code
stringlengths
281
23.7M
class StockTicker(GenPollUrl): defaults = [('interval', '1min', 'The default latency to query'), ('func', 'TIME_SERIES_INTRADAY', 'The default API function to query'), ('function', 'TIME_SERIES_INTRADAY', 'DEPRECATED: Use `func`.')] def __init__(self, **config): if ('function' in config): logger.warning('`function` parameter is deprecated. Please rename to `func`') config['func'] = config.pop('function') GenPollUrl.__init__(self, **config) self.add_defaults(StockTicker.defaults) self.sign = locale.localeconv()['currency_symbol'] self.query = {'interval': self.interval, 'outputsize': 'compact', 'function': self.func} for (k, v) in config.items(): self.query[k] = v def url(self): url = (' + urlencode(self.query)) return url def parse(self, body): last = None for (k, v) in body['Meta Data'].items(): if ('Last Refreshed' in k): last = v other = None for (k, v) in body.items(): if (k != 'Meta Data'): other = v break price = None for (k, v) in other[last].items(): if (('price' in k) or ('close' in k)): price = '{:0.2f}'.format(float(v)) break return '{symbol}: {sign}{price}'.format(symbol=self.symbol, sign=self.sign, price=price)
class LowRankCrossNet(torch.nn.Module): def __init__(self, in_features: int, num_layers: int, low_rank: int=1) -> None: super().__init__() assert (low_rank >= 1), 'Low rank must be larger or equal to 1' self._num_layers = num_layers self._low_rank = low_rank self.W_kernels: torch.nn.ParameterList = torch.nn.ParameterList([torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.empty(in_features, self._low_rank))) for i in range(self._num_layers)]) self.V_kernels: torch.nn.ParameterList = torch.nn.ParameterList([torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.empty(self._low_rank, in_features))) for i in range(self._num_layers)]) self.bias: torch.nn.ParameterList = torch.nn.ParameterList([torch.nn.Parameter(torch.nn.init.zeros_(torch.empty(in_features))) for i in range(self._num_layers)]) def forward(self, input: torch.Tensor) -> torch.Tensor: x_0 = input x_l = x_0 for layer in range(self._num_layers): x_l_v = torch.nn.functional.linear(x_l, self.V_kernels[layer]) x_l_w = torch.nn.functional.linear(x_l_v, self.W_kernels[layer]) x_l = ((x_0 * (x_l_w + self.bias[layer])) + x_l) return x_l
class AttnDownEncoderBlock2D(nn.Module): def __init__(self, in_channels: int, out_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, attn_num_head_channels=1, output_scale_factor=1.0, add_downsample=True, downsample_padding=1): super().__init__() resnets = [] attentions = [] for i in range(num_layers): in_channels = (in_channels if (i == 0) else out_channels) resnets.append(ResnetBlock(in_channels=in_channels, out_channels=out_channels, temb_channels=None, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)) attentions.append(AttentionBlockNew(out_channels, num_head_channels=attn_num_head_channels, rescale_output_factor=output_scale_factor, eps=resnet_eps, num_groups=resnet_groups)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList([Downsample2D(in_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name='op')]) else: self.downsamplers = None def forward(self, hidden_states): for (resnet, attn) in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states, temb=None) hidden_states = attn(hidden_states) if (self.downsamplers is not None): for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) return hidden_states
class Discriminator(): def __init__(self, env): with tf.variable_scope('discriminator'): self.scope = tf.get_variable_scope().name self.expert_s = tf.placeholder(dtype=tf.float32, shape=([None] + list(env.observation_space.shape))) self.expert_a = tf.placeholder(dtype=tf.int32, shape=[None]) expert_a_one_hot = tf.one_hot(self.expert_a, depth=env.action_space.n) expert_a_one_hot += (tf.random_normal(tf.shape(expert_a_one_hot), mean=0.2, stddev=0.1, dtype=tf.float32) / 1.2) expert_s_a = tf.concat([self.expert_s, expert_a_one_hot], axis=1) self.agent_s = tf.placeholder(dtype=tf.float32, shape=([None] + list(env.observation_space.shape))) self.agent_a = tf.placeholder(dtype=tf.int32, shape=[None]) agent_a_one_hot = tf.one_hot(self.agent_a, depth=env.action_space.n) agent_a_one_hot += (tf.random_normal(tf.shape(agent_a_one_hot), mean=0.2, stddev=0.1, dtype=tf.float32) / 1.2) agent_s_a = tf.concat([self.agent_s, agent_a_one_hot], axis=1) with tf.variable_scope('network') as network_scope: prob_1 = self.construct_network(input=expert_s_a) network_scope.reuse_variables() prob_2 = self.construct_network(input=agent_s_a) with tf.variable_scope('loss'): loss_expert = tf.reduce_mean(tf.log(tf.clip_by_value(prob_1, 0.01, 1))) loss_agent = tf.reduce_mean(tf.log(tf.clip_by_value((1 - prob_2), 0.01, 1))) loss = (loss_expert + loss_agent) loss = (- loss) tf.summary.scalar('discriminator', loss) optimizer = tf.train.AdamOptimizer() self.train_op = optimizer.minimize(loss) self.rewards = tf.log(tf.clip_by_value(prob_2, 1e-10, 1)) def construct_network(self, input): layer_1 = tf.layers.dense(inputs=input, units=20, activation=tf.nn.leaky_relu, name='layer1') layer_2 = tf.layers.dense(inputs=layer_1, units=20, activation=tf.nn.leaky_relu, name='layer2') layer_3 = tf.layers.dense(inputs=layer_2, units=20, activation=tf.nn.leaky_relu, name='layer3') prob = tf.layers.dense(inputs=layer_3, units=1, activation=tf.sigmoid, name='prob') return prob def train(self, expert_s, expert_a, agent_s, agent_a): return tf.get_default_session().run(self.train_op, feed_dict={self.expert_s: expert_s, self.expert_a: expert_a, self.agent_s: agent_s, self.agent_a: agent_a}) def get_rewards(self, agent_s, agent_a): return tf.get_default_session().run(self.rewards, feed_dict={self.agent_s: agent_s, self.agent_a: agent_a}) def get_trainable_variables(self): return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
def interval_unpack(mds, timedelta=datetime.timedelta): (months, days, seconds_ms) = mds if (months != 0): w = TypeConversionWarning('datetime.timedelta cannot represent relative intervals', details={'hint': 'An interval was unpacked with a non-zero "month" field.'}, source='DRIVER') warnings.warn(w) return timedelta(days=(days + (months * 30)), seconds=seconds_ms[0], microseconds=seconds_ms[1])
class RandomFourierFeatures(nn.Module): def __init__(self, in_dim, num_random_features, feature_scale=None): super().__init__() if (feature_scale is None): feature_scale = math.sqrt((num_random_features / 2)) self.register_buffer('feature_scale', torch.tensor(feature_scale)) if (num_random_features <= in_dim): W = random_ortho(in_dim, num_random_features) else: dim_left = num_random_features ws = [] while (dim_left > in_dim): ws.append(random_ortho(in_dim, in_dim)) dim_left -= in_dim ws.append(random_ortho(in_dim, dim_left)) W = torch.cat(ws, 1) feature_norm = (torch.randn(W.shape) ** 2) W = (W * feature_norm.sum(0).sqrt()) self.register_buffer('W', W) b = torch.empty(num_random_features).uniform_(0, (2 * math.pi)) self.register_buffer('b', b) def forward(self, x): k = torch.cos(((x self.W) + self.b)) k = (k / self.feature_scale) return k
class BlockPushHorizontalMultimodal(BlockPushMultimodal): def _reset_object_poses(self, workspace_center_x, workspace_center_y): self._reset_block_poses(workspace_center_y) self._reset_target_poses(workspace_center_y) def _reset_block_poses(self, workspace_center_y): def _reset_block_pose(idx, add=0.0, avoid=None): def _get_random_translation(): block_x = (0.35 + (0.5 * self._rng.uniform(low=(- RANDOM_X_SHIFT), high=RANDOM_X_SHIFT))) block_y = ((workspace_center_y + add) + (0.5 * self._rng.uniform(low=(- RANDOM_Y_SHIFT), high=RANDOM_Y_SHIFT))) block_translation = np.array([block_x, block_y, 0]) return block_translation if (avoid is None): block_translation = _get_random_translation() else: for _ in range(NUM_RESET_ATTEMPTS): block_translation = _get_random_translation() dist = np.linalg.norm((block_translation[0] - avoid[0])) if (dist > MIN_BLOCK_DIST): break block_sampled_angle = self._rng.uniform(math.pi) block_rotation = transform.Rotation.from_rotvec([0, 0, block_sampled_angle]) self._pybullet_client.resetBasePositionAndOrientation(self._block_ids[idx], block_translation.tolist(), block_rotation.as_quat().tolist()) return block_translation for _ in range(NUM_RESET_ATTEMPTS): add = (0.2 * self._rng.choice([(- 1), 1])) b0_translation = _reset_block_pose(0, add=add) b1_translation = _reset_block_pose(1, add=(- add), avoid=b0_translation) dist = np.linalg.norm((b0_translation[0] - b1_translation[0])) if (dist > MIN_BLOCK_DIST): break else: raise ValueError('could not find matching block') assert (dist > MIN_BLOCK_DIST) def _reset_target_poses(self, workspace_center_y): def _reset_target_pose(idx, add=0.0, avoid=None): def _get_random_translation(): target_x = (0.5 + self._rng.uniform(low=((- 0.05) * RANDOM_X_SHIFT), high=(0.05 * RANDOM_X_SHIFT))) target_y = ((workspace_center_y + add) + self._rng.uniform(low=((- 0.05) * RANDOM_Y_SHIFT), high=(0.05 * RANDOM_Y_SHIFT))) target_translation = np.array([target_x, target_y, 0.02]) return target_translation if (avoid is None): target_translation = _get_random_translation() else: for _ in range(NUM_RESET_ATTEMPTS): target_translation = _get_random_translation() dist = np.linalg.norm((target_translation[0] - avoid[0])) if (dist > MIN_TARGET_DIST): break target_sampled_angle = (math.pi + self._rng.uniform(low=((- math.pi) / 30), high=(math.pi / 30))) target_rotation = transform.Rotation.from_rotvec([0, 0, target_sampled_angle]) self._pybullet_client.resetBasePositionAndOrientation(self._target_ids[idx], target_translation.tolist(), target_rotation.as_quat().tolist()) self._target_poses[idx] = Pose3d(rotation=target_rotation, translation=target_translation) if (self._target_poses is None): self._target_poses = [None for _ in range(len(self._target_ids))] for _ in range(NUM_RESET_ATTEMPTS): add = (0.2 * self._rng.choice([(- 1), 1])) _reset_target_pose(0, add=add) _reset_target_pose(1, add=(- add), avoid=self._target_poses[0].translation) dist = np.linalg.norm((self._target_poses[0].translation[0] - self._target_poses[1].translation[0])) break else: raise ValueError('could not find matching target')
def LJ_force_1d(pos, dim=3): N_atom = int((len(pos) / dim)) pos = np.reshape(pos, [N_atom, dim]) force = np.zeros([N_atom, dim]) for (i, pos0) in enumerate(pos): pos1 = deepcopy(pos) pos1 = np.delete(pos1, i, 0) distance = cdist([pos0], pos1) r = (pos1 - pos0) r2 = np.power(distance, 2) r6 = np.power(r2, 3) r12 = np.power(r6, 2) force[i] = np.dot((((48 / r12) - (24 / r6)) / r2), r) return force.flatten()
_torch class ChineseCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ((ChineseCLIPModel,) if is_torch_available() else ()) pipeline_model_mapping = ({'feature-extraction': ChineseCLIPModel} if is_torch_available() else {}) fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): text_kwargs = {'use_labels': False, 'batch_size': 12} vision_kwargs = {'batch_size': 12} self.model_tester = ChineseCLIPModelTester(self, text_kwargs, vision_kwargs) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) (reason='Hidden_states is tested in individual model tests') def test_hidden_states_output(self): pass (reason='Inputs_embeds is tested in individual model tests') def test_inputs_embeds(self): pass (reason='Retain_grad is tested in individual model tests') def test_retain_grad_hidden_states_attentions(self): pass (reason='ChineseCLIPModel does not have input/output embeddings') def test_model_common_attributes(self): pass def test_initialization(self): (config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for sub_config_key in ('vision_config', 'text_config'): sub_config = getattr(configs_no_init, sub_config_key, {}) setattr(configs_no_init, sub_config_key, _config_zero_init(sub_config)) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for (name, param) in model.named_parameters(): if param.requires_grad: if (name == 'logit_scale'): self.assertAlmostEqual(param.data.item(), np.log((1 / 0.07)), delta=0.001, msg=f'Parameter {name} of model {model_class} seems not properly initialized') else: self.assertIn(((param.data.mean() * .0).round() / .0).item(), [0.0, 1.0], msg=f'Parameter {name} of model {model_class} seems not properly initialized') def _create_and_check_torchscript(self, config, inputs_dict): if (not self.test_torchscript): return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict['input_ids'] pixel_values = inputs_dict['pixel_values'] traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, 'traced_model.pt') try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if (key not in model_state_dict.keys()): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = {key: value for (key, value) in loaded_model_state_dict.items() if (key not in non_persistent_buffers)} self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) models_equal = True for (layer_name, p1) in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if (p1.data.ne(p2.data).sum() > 0): models_equal = False self.assertTrue(models_equal) def test_model_from_pretrained(self): for model_name in CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ChineseCLIPModel.from_pretrained(model_name) self.assertIsNotNone(model)
.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"') def test_coveragerc_dist(testdir): testdir.makefile('', coveragerc=COVERAGERC) script = testdir.makepyfile(EXCLUDED_TEST) result = testdir.runpytest('-v', '--cov-config=coveragerc', f'--cov={script.dirpath()}', '--cov-report=term-missing', '-n', '2', max_worker_restart_0, script) assert (result.ret == 0) result.stdout.fnmatch_lines([f'test_coveragerc_dist* {EXCLUDED_RESULT}'])
class AverageMeter(): def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += (val * n) self.count += n self.avg = (self.sum / self.count)
class WSGIWebServer(internet.TCPServer): def __init__(self, pool, *args, **kwargs): self.pool = pool super().__init__(*args, **kwargs) def startService(self): super().startService() self.pool.start() def stopService(self): super().stopService() self.pool.stop()
class Instances(): def __init__(self, image_size: Tuple[(int, int)], **kwargs: Any): self._image_size = image_size self._fields: Dict[(str, Any)] = {} for (k, v) in kwargs.items(): self.set(k, v) def image_size(self) -> Tuple[(int, int)]: return self._image_size def __setattr__(self, name: str, val: Any) -> None: if name.startswith('_'): super().__setattr__(name, val) else: self.set(name, val) def __getattr__(self, name: str) -> Any: if ((name == '_fields') or (name not in self._fields)): raise AttributeError("Cannot find field '{}' in the given Instances!".format(name)) return self._fields[name] def set(self, name: str, value: Any) -> None: data_len = len(value) if len(self._fields): assert (len(self) == data_len), 'Adding a field of length {} to a Instances of length {}'.format(data_len, len(self)) self._fields[name] = value def has(self, name: str) -> bool: return (name in self._fields) def remove(self, name: str) -> None: del self._fields[name] def get(self, name: str) -> Any: return self._fields[name] def get_fields(self) -> Dict[(str, Any)]: return self._fields def to(self, *args: Any, **kwargs: Any) -> 'Instances': ret = Instances(self._image_size) for (k, v) in self._fields.items(): if hasattr(v, 'to'): v = v.to(*args, **kwargs) ret.set(k, v) return ret def numpy(self): ret = Instances(self._image_size) for (k, v) in self._fields.items(): if hasattr(v, 'numpy'): v = v.numpy() ret.set(k, v) return ret def __getitem__(self, item: Union[(int, slice, torch.BoolTensor)]) -> 'Instances': if (type(item) == int): if ((item >= len(self)) or (item < (- len(self)))): raise IndexError('Instances index out of range!') else: item = slice(item, None, len(self)) ret = Instances(self._image_size) for (k, v) in self._fields.items(): ret.set(k, v[item]) return ret def __len__(self) -> int: for v in self._fields.values(): return v.__len__() raise NotImplementedError('Empty Instances does not support __len__!') def __iter__(self): raise NotImplementedError('`Instances` object is not iterable!') def cat(instance_lists: List['Instances']) -> 'Instances': assert all((isinstance(i, Instances) for i in instance_lists)) assert (len(instance_lists) > 0) if (len(instance_lists) == 1): return instance_lists[0] image_size = instance_lists[0].image_size for i in instance_lists[1:]: assert (i.image_size == image_size) ret = Instances(image_size) for k in instance_lists[0]._fields.keys(): values = [i.get(k) for i in instance_lists] v0 = values[0] if isinstance(v0, torch.Tensor): values = torch.cat(values, dim=0) elif isinstance(v0, list): values = list(itertools.chain(*values)) elif hasattr(type(v0), 'cat'): values = type(v0).cat(values) else: raise ValueError('Unsupported type {} for concatenation'.format(type(v0))) ret.set(k, values) return ret def __str__(self) -> str: s = (self.__class__.__name__ + '(') s += 'num_instances={}, '.format(len(self)) s += 'image_height={}, '.format(self._image_size[0]) s += 'image_width={}, '.format(self._image_size[1]) s += 'fields=[{}])'.format(', '.join((f'{k}: {v}' for (k, v) in self._fields.items()))) return s __repr__ = __str__
def pytest_xdist_auto_num_workers(config): env_var = os.environ.get('PYTEST_XDIST_AUTO_NUM_WORKERS') if env_var: try: return int(env_var) except ValueError: warnings.warn('PYTEST_XDIST_AUTO_NUM_WORKERS is not a number: {env_var!r}. Ignoring it.') try: import psutil except ImportError: pass else: use_logical = (config.option.numprocesses == 'logical') count = (psutil.cpu_count(logical=use_logical) or psutil.cpu_count()) if count: return count try: from os import sched_getaffinity def cpu_count(): return len(sched_getaffinity(0)) except ImportError: if (os.environ.get('TRAVIS') == 'true'): return 2 try: from os import cpu_count except ImportError: from multiprocessing import cpu_count try: n = cpu_count() except NotImplementedError: return 1 return (n if n else 1)
class CreateRepositoryPermission(QuayPermission): def __init__(self, namespace): admin_org = _OrganizationNeed(namespace, 'admin') create_repo_org = _OrganizationNeed(namespace, 'creator') self.namespace = namespace super(CreateRepositoryPermission, self).__init__(admin_org, create_repo_org)
def block_group(inputs, filters, block_fn, blocks, strides, is_training, name, data_format='channels_first', dropblock_keep_prob=None, dropblock_size=None): inputs = block_fn(inputs, filters, is_training, strides, use_projection=True, data_format=data_format, dropblock_keep_prob=dropblock_keep_prob, dropblock_size=dropblock_size) for _ in range(1, blocks): inputs = block_fn(inputs, filters, is_training, 1, data_format=data_format, dropblock_keep_prob=dropblock_keep_prob, dropblock_size=dropblock_size) return tf.identity(inputs, name)
class XmlTokenizer(): def __init__(self, fp, skip_ws=True): self.fp = fp self.tokens = [] self.index = 0 self.final = False self.skip_ws = skip_ws self.character_pos = (0, 0) self.character_data = '' self.parser = xml.parsers.expat.ParserCreate() self.parser.StartElementHandler = self.handle_element_start self.parser.EndElementHandler = self.handle_element_end self.parser.CharacterDataHandler = self.handle_character_data def handle_element_start(self, name, attributes): self.finish_character_data() (line, column) = self.pos() token = XmlToken(XML_ELEMENT_START, name, attributes, line, column) self.tokens.append(token) def handle_element_end(self, name): self.finish_character_data() (line, column) = self.pos() token = XmlToken(XML_ELEMENT_END, name, None, line, column) self.tokens.append(token) def handle_character_data(self, data): if (not self.character_data): self.character_pos = self.pos() self.character_data += data def finish_character_data(self): if self.character_data: if ((not self.skip_ws) or (not self.character_data.isspace())): (line, column) = self.character_pos token = XmlToken(XML_CHARACTER_DATA, self.character_data, None, line, column) self.tokens.append(token) self.character_data = '' def next(self): size = (16 * 1024) while ((self.index >= len(self.tokens)) and (not self.final)): self.tokens = [] self.index = 0 data = self.fp.read(size) self.final = (len(data) < size) self.parser.Parse(data, self.final) if (self.index >= len(self.tokens)): (line, column) = self.pos() token = XmlToken(XML_EOF, None, None, line, column) else: token = self.tokens[self.index] self.index += 1 return token def pos(self): return (self.parser.CurrentLineNumber, self.parser.CurrentColumnNumber)
class ConditionalRealNVPFlow(bijectors.ConditionalBijector): def __init__(self, num_coupling_layers=2, hidden_layer_sizes=(64,), use_batch_normalization=False, event_dims=None, is_constant_jacobian=False, validate_args=False, name='conditional_real_nvp_flow'): self._graph_parents = [] self._name = name self._num_coupling_layers = num_coupling_layers self._hidden_layer_sizes = tuple(hidden_layer_sizes) if use_batch_normalization: raise NotImplementedError('TODO(hartikainen): Batch normalization is not yet supported for ConditionalRealNVPFlow.') self._use_batch_normalization = use_batch_normalization assert (event_dims is not None), event_dims self._event_dims = event_dims self.build() super(ConditionalRealNVPFlow, self).__init__(forward_min_event_ndims=1, inverse_min_event_ndims=1, is_constant_jacobian=is_constant_jacobian, validate_args=validate_args, name=name) def build(self): D = np.prod(self._event_dims) flow = [] for i in range(self._num_coupling_layers): if self._use_batch_normalization: batch_normalization_bijector = bijectors.BatchNormalization() flow.append(batch_normalization_bijector) real_nvp_bijector = bijectors.RealNVP(num_masked=(D // 2), shift_and_log_scale_fn=conditioned_real_nvp_template(hidden_layers=self._hidden_layer_sizes, activation=tf.nn.tanh), name='real_nvp_{}'.format(i)) flow.append(real_nvp_bijector) if (i < (self._num_coupling_layers - 1)): permute_bijector = bijectors.Permute(permutation=list(reversed(range(D))), name='permute_{}'.format(i)) permute_bijector._is_constant_jacobian = False flow.append(permute_bijector) self.flow = flow def _get_flow_conditions(self, **condition_kwargs): conditions = {bijector.name: condition_kwargs for bijector in self.flow if isinstance(bijector, bijectors.RealNVP)} return conditions def _forward(self, x, **condition_kwargs): conditions = self._get_flow_conditions(**condition_kwargs) for bijector in self.flow: x = bijector.forward(x, **conditions.get(bijector.name, {})) return x def _inverse(self, y, **condition_kwargs): conditions = self._get_flow_conditions(**condition_kwargs) for bijector in reversed(self.flow): y = bijector.inverse(y, **conditions.get(bijector.name, {})) return y def _forward_log_det_jacobian(self, x, **condition_kwargs): conditions = self._get_flow_conditions(**condition_kwargs) fldj = tf.cast(0.0, dtype=x.dtype.base_dtype) event_ndims = self._maybe_get_static_event_ndims(self.forward_min_event_ndims) if _use_static_shape(x, event_ndims): event_shape = x.shape[(x.shape.ndims - event_ndims):] else: event_shape = tf.shape(x)[(tf.rank(x) - event_ndims):] for b in self.flow: fldj += b.forward_log_det_jacobian(x, event_ndims=event_ndims, **conditions.get(b.name, {})) if _use_static_shape(x, event_ndims): event_shape = b.forward_event_shape(event_shape) event_ndims = self._maybe_get_static_event_ndims(event_shape.ndims) else: event_shape = b.forward_event_shape_tensor(event_shape) event_ndims = tf.size(event_shape) event_ndims_ = self._maybe_get_static_event_ndims(event_ndims) if (event_ndims_ is not None): event_ndims = event_ndims_ x = b.forward(x, **conditions.get(b.name, {})) return fldj def _inverse_log_det_jacobian(self, y, **condition_kwargs): conditions = self._get_flow_conditions(**condition_kwargs) ildj = tf.cast(0.0, dtype=y.dtype.base_dtype) event_ndims = self._maybe_get_static_event_ndims(self.inverse_min_event_ndims) if _use_static_shape(y, event_ndims): event_shape = y.shape[(y.shape.ndims - event_ndims):] else: event_shape = tf.shape(y)[(tf.rank(y) - event_ndims):] for b in reversed(self.flow): ildj += b.inverse_log_det_jacobian(y, event_ndims=event_ndims, **conditions.get(b.name, {})) if _use_static_shape(y, event_ndims): event_shape = b.inverse_event_shape(event_shape) event_ndims = self._maybe_get_static_event_ndims(event_shape.ndims) else: event_shape = b.inverse_event_shape_tensor(event_shape) event_ndims = tf.size(event_shape) event_ndims_ = self._maybe_get_static_event_ndims(event_ndims) if (event_ndims_ is not None): event_ndims = event_ndims_ y = b.inverse(y, **conditions.get(b.name, {})) return ildj
class KnownValues(unittest.TestCase): def test_get_2c2e_gamma(self): dfbuilder = rsdf_builder._RSGDFBuilder(cell, auxcell).build() j2c = dfbuilder.get_2c2e(np.zeros((1, 3))) self.assertAlmostEqual(lib.fp(j2c), 0., 9) dfbuilder.exclude_d_aux = False j2c = dfbuilder.get_2c2e(np.zeros((1, 3))) self.assertAlmostEqual(lib.fp(j2c), 0., 9) dfbuilder = rsdf_builder._RSGDFBuilder(cell, auxcell1).build() j2c = dfbuilder.get_2c2e(np.zeros((1, 3))) self.assertAlmostEqual(lib.fp(j2c), (- 9.), 9) def test_get_2c2e(self): dfbuilder = rsdf_builder._RSGDFBuilder(cell, auxcell, kpts).build() j2c = dfbuilder.get_2c2e(kpts) self.assertAlmostEqual(lib.fp(j2c), ((- 1.) + 2.j), 9) self.assertAlmostEqual(lib.fp(j2c[0]), 0., 9) dfbuilder = rsdf_builder._RSGDFBuilder(cell, auxcell1, kpts).build() j2c = dfbuilder.get_2c2e(kpts) self.assertAlmostEqual(lib.fp(j2c), ((- 170.) - 1.j), 8) self.assertAlmostEqual(lib.fp(j2c[0]), (- 9.), 8) def test_get_2c2e_cart(self): with lib.temporary_env(cell, cart=True): dfbuilder = rsdf_builder._RSGDFBuilder(cell, auxcell, kpts).build() j2c = dfbuilder.get_2c2e(kpts) self.assertAlmostEqual(lib.fp(j2c), ((- 1.) + 2.j), 9) def test_make_j3c_gamma(self): dfbuilder = rsdf_builder._RSGDFBuilder(cell, auxcell).build() with tempfile.NamedTemporaryFile() as tmpf: dfbuilder.make_j3c(tmpf.name) v2 = load(tmpf.name, kpts[[0, 0]]) self.assertAlmostEqual(lib.fp(v2), 1., 7) dfbuilder.make_j3c(tmpf.name, aosym='s1') v1 = load(tmpf.name, kpts[[0, 0]]) self.assertAlmostEqual(abs((v1 - lib.unpack_tril(v2).reshape(v1.shape))).max(), 0, 9) dfbuilder.exclude_dd_block = True dfbuilder.exclude_d_aux = False dfbuilder.make_j3c(tmpf.name) v2 = load(tmpf.name, kpts[[0, 0]]) self.assertAlmostEqual(lib.fp(v2), 1., 7) dfbuilder.exclude_dd_block = False dfbuilder.exclude_d_aux = True dfbuilder.make_j3c(tmpf.name) v2 = load(tmpf.name, kpts[[0, 0]]) self.assertAlmostEqual(lib.fp(v2), 1., 7) dfbuilder.exclude_dd_block = False dfbuilder.exclude_d_aux = False dfbuilder.make_j3c(tmpf.name) v2 = load(tmpf.name, kpts[[0, 0]]) self.assertAlmostEqual(lib.fp(v2), 1., 7) def test_make_j3c(self): dfbuilder = rsdf_builder._RSGDFBuilder(cell, auxcell, kpts).build() with tempfile.NamedTemporaryFile() as tmpf: dfbuilder.make_j3c(tmpf.name, aosym='s2') v_s2 = [] for ki in range(nkpts): for kj in range(nkpts): v_s2.append(load(tmpf.name, kpts[[ki, kj]])) self.assertAlmostEqual(lib.fp(v_s2[0]), 1., 7) self.assertAlmostEqual(lib.fp(v_s2[((2 * nkpts) + 4)]), (3. + 0.j), 7) self.assertAlmostEqual(lib.fp(v_s2[((2 * nkpts) + 2)]), (1. + 0j), 7) dfbuilder.make_j3c(tmpf.name, aosym='s1') with df.CDERIArray(tmpf.name) as cderi_array: for ki in range(nkpts): for kj in range(nkpts): v1 = cderi_array[(ki, kj)] if (ki == kj): v2 = lib.unpack_tril(v_s2[((ki * nkpts) + kj)]).reshape(v1.shape) self.assertAlmostEqual(abs((v1 - v2)).max(), 0, 9) else: self.assertAlmostEqual(abs((v1 - v_s2[((ki * nkpts) + kj)])).max(), 0, 9) def test_make_j3c_j_only(self): dfbuilder = rsdf_builder._RSGDFBuilder(cell, auxcell, kpts).build() with tempfile.NamedTemporaryFile() as tmpf: dfbuilder.make_j3c(tmpf.name, aosym='s2', j_only=True) v_s2 = [] for ki in range(nkpts): v_s2.append(load(tmpf.name, kpts[[ki, ki]])) self.assertAlmostEqual(lib.fp(v_s2[0]), 1., 6) self.assertAlmostEqual(lib.fp(v_s2[2]), (1. + 0j), 6) dfbuilder.make_j3c(tmpf.name, aosym='s1', j_only=True) for ki in range(nkpts): v1 = load(tmpf.name, kpts[[ki, ki]]) v2 = lib.unpack_tril(v_s2[ki]).reshape(v1.shape) self.assertAlmostEqual(abs((v1 - v2)).max(), 0, 9) def test_make_j3c_kptij_lst(self): kpts = cell.make_kpts([3, 3, 3]) dfbuilder = rsdf_builder._RSGDFBuilder(cell, auxcell, kpts) ki_idx = np.array([0, 3, 4, 5, 15, 8, 9]) kj_idx = np.array([15, 18, 21, 1, 2, 4, 5]) kij_idx = np.array([ki_idx, kj_idx]).T kptij_lst = kpts[kij_idx] with tempfile.NamedTemporaryFile() as tmpf: cderi = tmpf.name dfbuilder.make_j3c(cderi, aosym='s1') with df.CDERIArray(cderi) as cderi_array: ref = np.array([cderi_array[(ki, kj)] for (ki, kj) in kij_idx]) with tempfile.NamedTemporaryFile() as tmpf: cderi = tmpf.name dfbuilder.make_j3c(cderi, aosym='s1', kptij_lst=kptij_lst) with df.CDERIArray(cderi) as cderi_array: v1 = np.array([cderi_array[(ki, kj)] for (ki, kj) in kij_idx]) self.assertAlmostEqual(abs((ref - v1)).max(), 0, 9) self.assertAlmostEqual(lib.fp(v1), (0. - 0.j), 8) def test_make_j3c_gamma_2d(self): cell = pgto.M(atom='He 0 0 0; He 0.9 0 0', basis=basis, a='2.8 0 0; 0 2.8 0; 0 0 25', dimension=2) auxcell = df.make_auxcell(cell, auxbasis) dfbuilder = rsdf_builder._RSGDFBuilder(cell, auxcell).build() with tempfile.NamedTemporaryFile() as tmpf: dfbuilder.make_j3c(tmpf.name) v2 = load(tmpf.name, kpts[[0, 0]]) self.assertAlmostEqual(lib.fp(v2.T.dot(v2)), 0., 7) def test_make_j3c_gamma_1d(self): cell = pgto.M(atom='He 0 0 0; He 0.9 0 0', basis=basis, a=(np.eye(3) * 2.8), dimension=1) auxcell = df.make_auxcell(cell, auxbasis) dfbuilder = rsdf_builder._RSGDFBuilder(cell, auxcell).build() with tempfile.NamedTemporaryFile() as tmpf: dfbuilder.make_j3c(tmpf.name) v2 = load(tmpf.name, kpts[[0, 0]]) self.assertAlmostEqual(lib.fp(v2), 1., 5) ('_RSGDFBuilder for dimension=0 not accurate') def test_make_j3c_gamma_0d(self): from pyscf.df.incore import cholesky_eri cell = pgto.M(atom='He 0 0 0; He 0.9 0 0', basis=basis, a=(np.eye(3) * 2.8), dimension=0) auxcell = df.make_auxcell(cell, auxbasis) dfbuilder = rsdf_builder._RSGDFBuilder(cell, auxcell).build() with tempfile.NamedTemporaryFile() as tmpf: dfbuilder.make_j3c(tmpf.name) v2 = load(tmpf.name, kpts[[0, 0]]) ref = cholesky_eri(cell, auxmol=auxcell) self.assertAlmostEqual(abs((v2 - ref)).max(), 0, 1) def test_get_nuc(self): dfbuilder = rsdf_builder._RSNucBuilder(cell).build() v1 = dfbuilder.get_nuc() self.assertAlmostEqual(lib.fp(v1), (- 2.), 7) def test_get_nuc_2d(self): a = (np.eye(3) * 2.8) a[(2, 2)] = 10.0 cell = pgto.M(atom='He 0 0 0; He 0.9 0 0', basis=basis, a=a, dimension=2) dfbuilder = rsdf_builder._RSNucBuilder(cell).build() v1 = dfbuilder.get_nuc() self.assertAlmostEqual(lib.fp(v1), (- 2.), 6) def test_get_nuc_0d(self): cell = pgto.M(atom='He 0 0 0; He 0.9 0 0', basis=basis, a=(np.eye(3) * 2.8), dimension=0) ref = cell.to_mol().intor('int1e_nuc') dfbuilder = rsdf_builder._RSNucBuilder(cell).build() v1 = dfbuilder.get_nuc() self.assertAlmostEqual(abs((v1 - ref)).max(), 0, 9) def test_get_pp(self): L = 7 a = (np.eye(3) * L) a[(1, 0)] = 5.0 cell = pgto.M(atom=[['Be', ((L / 2.0), (L / 2.0), (L / 2.0))]], a=a, basis='gth-szv', pseudo='gth-pade-q2') dfbuilder = rsdf_builder._RSNucBuilder(cell).build() vpp = dfbuilder.get_pp() self.assertAlmostEqual(lib.fp(vpp), (- 0.), 8) kpts = cell.make_kpts([3, 3, 2]) dfbuilder = rsdf_builder._RSNucBuilder(cell, kpts).build() vpp = dfbuilder.get_pp() self.assertAlmostEqual(lib.fp(vpp), (0. + 0j), 7) def test_vs_fft(self): cell = pgto.M(a=(np.eye(3) * 2.8), atom='He 0. 2.2 1.; He 1. 1. 1.', basis=[[0, [1.2, 1.0], [0.7, 0.5], [0.4, 0.5]], [1, [1.1, 0.5], [0.4, 0.5]]], mesh=([15] * 3), verbose=0) auxcell = df.make_auxcell(cell, auxbasis=[[0, [1.2, 1.0], [0.7, 0.5], [0.4, 0.5]], [1, [1.1, 0.5], [0.4, 0.5]], [2, [1.0, 1.0]]]) kpts = np.zeros((1, 3)) dfbuilder = rsdf_builder._RSGDFBuilder(cell, auxcell, kpts) dfbuilder.omega = 0.9 dfbuilder.build() j2c = dfbuilder.get_2c2e(np.zeros((1, 3))) (Gv, Gvbase, kws) = cell.get_Gv_weights() kpt = np.zeros(3) auxG = ft_ao.ft_ao(auxcell, Gv).T wcoulG = (pbctools.get_coulG(auxcell, kpt, mesh=cell.mesh) * kws) ref = lib.dot((auxG.conj() * wcoulG), auxG.T) self.assertAlmostEqual(abs((ref - j2c)).max(), 0, 8) aopair = ft_ao.ft_aopair(cell, Gv, aosym='s2') ngrids = Gv.shape[0] j3c = lib.dot((auxG.conj() * wcoulG), aopair.reshape(ngrids, (- 1))) j2c = scipy.linalg.cholesky(j2c[0], lower=True) ref = scipy.linalg.solve_triangular(j2c, j3c, lower=True) with tempfile.NamedTemporaryFile() as tmpf: dfbuilder.make_j3c(tmpf.name, aosym='s2', j_only=True) v1 = load(tmpf.name, kpts[[0, 0]]) self.assertAlmostEqual(abs((ref - v1)).max(), 0, 7) def test_get_2c2e_gamma_sr(self): dfbuilder = rsdf_builder._RSGDFBuilder(cell_sr, auxcell_sr).build() j2c = dfbuilder.get_2c2e(np.zeros((1, 3))) self.assertAlmostEqual(lib.fp(j2c), 0., 9) dfbuilder.exclude_d_aux = False j2c = dfbuilder.get_2c2e(np.zeros((1, 3))) self.assertAlmostEqual(lib.fp(j2c), 0., 9) def test_get_2c2e_sr(self): dfbuilder = rsdf_builder._RSGDFBuilder(cell_sr, auxcell_sr, kpts).build() j2c = dfbuilder.get_2c2e(kpts) self.assertAlmostEqual(lib.fp(j2c), (1. - 0.j), 9) self.assertAlmostEqual(lib.fp(j2c[0]), 0., 9) def test_get_2c2e_cart_sr(self): with lib.temporary_env(cell_sr, cart=True): dfbuilder = rsdf_builder._RSGDFBuilder(cell_sr, auxcell_sr, kpts).build() j2c = dfbuilder.get_2c2e(kpts) self.assertAlmostEqual(lib.fp(j2c), (1. - 0.j), 9) def test_make_j3c_gamma_sr(self): dfbuilder = rsdf_builder._RSGDFBuilder(cell_sr, auxcell_sr).build() with tempfile.NamedTemporaryFile() as tmpf: dfbuilder.make_j3c(tmpf.name) v2 = load(tmpf.name, kpts[[0, 0]]) self.assertAlmostEqual(lib.fp(v2), 0., 8) dfbuilder.make_j3c(tmpf.name, aosym='s1') v1 = load(tmpf.name, kpts[[0, 0]]) self.assertAlmostEqual(abs((v1 - lib.unpack_tril(v2).reshape(v1.shape))).max(), 0, 9) dfbuilder.exclude_dd_block = True dfbuilder.exclude_d_aux = False dfbuilder.make_j3c(tmpf.name) v2 = load(tmpf.name, kpts[[0, 0]]) self.assertAlmostEqual(lib.fp(v2), 0., 8) dfbuilder.exclude_dd_block = False dfbuilder.exclude_d_aux = True dfbuilder.make_j3c(tmpf.name) v2 = load(tmpf.name, kpts[[0, 0]]) self.assertAlmostEqual(lib.fp(v2), 0., 8) dfbuilder.exclude_dd_block = False dfbuilder.exclude_d_aux = False dfbuilder.make_j3c(tmpf.name) v2 = load(tmpf.name, kpts[[0, 0]]) self.assertAlmostEqual(lib.fp(v2), 0., 7) def test_make_j3c_sr_high_cost(self): dfbuilder = rsdf_builder._RSGDFBuilder(cell_sr, auxcell_sr, kpts).build() with tempfile.NamedTemporaryFile() as tmpf: dfbuilder.make_j3c(tmpf.name, aosym='s2') v_s2 = [] for ki in range(nkpts): for kj in range(nkpts): v_s2.append(load(tmpf.name, kpts[[ki, kj]])) self.assertAlmostEqual(lib.fp(v_s2[0]), 0., 8) self.assertAlmostEqual(lib.fp(v_s2[((2 * nkpts) + 4)]), (2. - 0.j), 8) self.assertAlmostEqual(lib.fp(v_s2[((2 * nkpts) + 2)]), (0. + 0j), 8) dfbuilder.make_j3c(tmpf.name, aosym='s1') with df.CDERIArray(tmpf.name) as cderi_array: v_s1 = cderi_array[:] for ki in range(nkpts): for kj in range(nkpts): v1 = v_s1[(ki, kj)] if (ki == kj): v2 = lib.unpack_tril(v_s2[((ki * nkpts) + kj)]).reshape(v1.shape) self.assertAlmostEqual(abs((v1 - v2)).max(), 0, 9) else: self.assertAlmostEqual(abs((v1 - v_s2[((ki * nkpts) + kj)])).max(), 0, 9) def test_make_j3c_j_only_sr(self): dfbuilder = rsdf_builder._RSGDFBuilder(cell_sr, auxcell_sr, kpts).build() with tempfile.NamedTemporaryFile() as tmpf: dfbuilder.make_j3c(tmpf.name, aosym='s2', j_only=True) v_s2 = [] for ki in range(nkpts): v_s2.append(load(tmpf.name, kpts[[ki, ki]])) self.assertAlmostEqual(lib.fp(v_s2[0]), 0., 8) self.assertAlmostEqual(lib.fp(v_s2[2]), (0. + 0j), 8) dfbuilder.make_j3c(tmpf.name, aosym='s1', j_only=True) for ki in range(nkpts): v1 = load(tmpf.name, kpts[[ki, ki]]) v2 = lib.unpack_tril(v_s2[ki]).reshape(v1.shape) self.assertAlmostEqual(abs((v1 - v2)).max(), 0, 9) def test_vs_fft_sr(self): cell_sr = pgto.M(a=(np.eye(3) * 2.8), atom='He 0. 2.2 1.; He 1. 1. 1.', basis=[[0, [1.2, 1.0], [0.7, 0.5], [0.4, 0.5]], [1, [1.1, 0.5], [0.4, 0.5]]], mesh=([14] * 3), verbose=0) cell_sr.omega = (- 0.9) auxcell_sr = df.make_auxcell(cell_sr, auxbasis=[[0, [1.2, 1.0], [0.7, 0.5], [0.4, 0.5]], [1, [1.1, 0.5], [0.4, 0.5]], [2, [1.0, 1.0]]]) kpts = np.zeros((1, 3)) dfbuilder = rsdf_builder._RSGDFBuilder(cell_sr, auxcell_sr, kpts) dfbuilder.build() j2c = dfbuilder.get_2c2e(np.zeros((1, 3))) (Gv, Gvbase, kws) = cell_sr.get_Gv_weights() kpt = np.zeros(3) auxG = ft_ao.ft_ao(auxcell_sr, Gv).T wcoulG = (pbctools.get_coulG(auxcell_sr, kpt, mesh=cell_sr.mesh, omega=cell_sr.omega) * kws) ref = lib.dot((auxG.conj() * wcoulG), auxG.T) self.assertAlmostEqual(abs((ref - j2c)).max(), 0, 8) aopair = ft_ao.ft_aopair(cell_sr, Gv, aosym='s2') ngrids = Gv.shape[0] j3c = lib.dot((auxG.conj() * wcoulG), aopair.reshape(ngrids, (- 1))) j2c = scipy.linalg.cholesky(j2c[0], lower=True) ref = scipy.linalg.solve_triangular(j2c, j3c, lower=True) with tempfile.NamedTemporaryFile() as tmpf: dfbuilder.make_j3c(tmpf.name, aosym='s2', j_only=True) v1 = load(tmpf.name, kpts[[0, 0]]) self.assertAlmostEqual(abs((ref - v1)).max(), 0, 7)
def text_render(structure, resolution=100): x = np.linspace(0, structure.width(), resolution) bulk = locate_regions(x, structure, 'bulk') barrier = (set(locate_regions(x, structure, 'barrier')) | set(locate_regions(x, structure, 'half barrier'))) interlayer = locate_regions(x, structure, 'interlayer') well = locate_regions(x, structure, 'well') chars = [('' if (i in bulk) else ('_' if (i in well) else ('-' if (i in interlayer) else (' ' if (i in barrier) else '?')))) for i in range(resolution)] return ''.join(chars)
def _pfunc_param_to_in(param, strict=False, allow_downcast=None): if isinstance(param, Constant): raise TypeError('Constants not allowed in param list', param) if isinstance(param, Variable): return In(variable=param, strict=strict, allow_downcast=allow_downcast) elif isinstance(param, In): return param raise TypeError(f'Unknown parameter type: {type(param)}')
class VideoChatScheduled(TelegramObject): __slots__ = ('start_date',) def __init__(self, start_date: dtm.datetime, *, api_kwargs: Optional[JSONDict]=None) -> None: super().__init__(api_kwargs=api_kwargs) self.start_date: dtm.datetime = start_date self._id_attrs = (self.start_date,) self._freeze() def de_json(cls, data: Optional[JSONDict], bot: 'Bot') -> Optional['VideoChatScheduled']: data = cls._parse_data(data) if (not data): return None loc_tzinfo = extract_tzinfo_from_defaults(bot) data['start_date'] = from_timestamp(data['start_date'], tzinfo=loc_tzinfo) return super().de_json(data=data, bot=bot)
class TestFreeColors(EndianTest): def setUp(self): self.req_args_0 = {'cmap': , 'pixels': [, , , , , , , , , , , , , , , , ], 'plane_mask': } self.req_bin_0 = b'X\x00\x00\x14\x14`ID_1\x19\xfbL8\xc8\x12(\x9e8)y\x9b\xe5\xd1`\xad\x08Ir\x1b>\xa88\xa7>\xfaNld)hS"\x19\\\x12+Dr.\xb8\\x9d\x92!6\xa0p\xeejQ\x17)t\x14\xd0\xdcF!\xab\r9\x14\xb2sr\xd8\xb7\x1c' def testPackRequest0(self): bin = request.FreeColors._request.to_binary(*(), **self.req_args_0) self.assertBinaryEqual(bin, self.req_bin_0) def testUnpackRequest0(self): (args, remain) = request.FreeColors._request.parse_binary(self.req_bin_0, dummy_display, 1) self.assertBinaryEmpty(remain) self.assertEqual(args, self.req_args_0)
def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_c16_noskip'], ['ir_r3_k3_s2_e3_c24'], ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320_noskip']] model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=(kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs))), **kwargs) model = _create_effnet(variant, pretrained, **model_kwargs) return model
class TestNaiveClusterer(unittest.TestCase): def setUp(self): super().setUp() pass def test_6by2_matrix(self): matrix = np.array([[1.0, 0.0], [1.1, 0.1], [0.0, 1.0], [0.1, 1.0], [0.9, (- 0.1)], [0.0, 1.2]]) clusterer = NaiveClusterer(threshold=0.5) labels = clusterer.predict(matrix) labels = utils.enforce_ordered_labels(labels) expected = np.array([0, 0, 1, 1, 0, 1]) np.testing.assert_equal(expected, labels) label = clusterer.predict_next(np.array([1.2, (- 0.1)])) self.assertEqual(0, label) label = clusterer.predict_next(np.array([(- 0.1), 0.8])) self.assertEqual(1, label) clusterer.reset() label = clusterer.predict_next(np.array([(- 0.1), 0.8])) self.assertEqual(0, label) def test_adaptation(self): clusterer = NaiveClusterer(threshold=0.5, adaptation_threshold=1.0) label = clusterer.predict_next(np.array([1.2, (- 0.1)])) self.assertEqual(0, label) self.assertEqual(1, clusterer.centroids[0].count) label = clusterer.predict_next(np.array([1.3, 0.2])) self.assertEqual(0, label) self.assertEqual(1, clusterer.centroids[0].count) clusterer.adaptation_threshold = 0.5 label = clusterer.predict_next(np.array([1.3, 0.2])) self.assertEqual(0, label) self.assertEqual(2, clusterer.centroids[0].count)
def get_similarity(text_a, text_b, k): wordnet = nltk.corpus.wordnet left_lsent = ((['oov'] + text_a[k].lower().translate(str.maketrans('', '', string.punctuation)).split()) + ['oov']) right_lsent = ((['oov'] + text_b[k].lower().translate(str.maketrans('', '', string.punctuation)).split()) + ['oov']) print(k) sim = [] for i in range(len(left_lsent)): word = left_lsent[i] tmp = [] for j in range(len(right_lsent)): targ = right_lsent[j] left_syn = get_synsets(word) right_syn = get_synsets(targ) left = wordnet.synsets(word) right = wordnet.synsets(targ) if ((word != 'oov') and (targ != 'oov')): if ((left != []) and (right != [])): if ((targ in left_syn) or (word in right_syn)): tmp.append(1.0) else: (count1, count2) = (0, 0) (ScoreList1, ScoreList2) = (0, 0) for word1 in left: for word2 in right: try: score1 = word1.wup_similarity(word2) except: score1 = 0.0 try: score2 = word2.wup_similarity(word1) except: score2 = 0.0 if (score1 is not None): ScoreList1 += score1 count1 += 1 if (score2 is not None): ScoreList2 += score2 count2 += 1 if ((count1 + count2) != 0): similarity = ((ScoreList1 + ScoreList2) / (count1 + count2)) tmp.append(similarity) elif (word == targ): tmp.append(1) else: tmp.append(0) elif (word == targ): tmp.append(1) else: tmp.append(0) else: tmp.append(0) sim.append(tmp) return sim
def pil_loader(path): if isinstance(path, bytes): img = Image.open(io.BytesIO(path)) elif is_zip_path(path): data = ZipReader.read(path) img = Image.open(io.BytesIO(data)) else: with open(path, 'rb') as f: img = Image.open(f) return img.convert('RGB')
_bp.app_errorhandler(V2RegistryException) def handle_registry_v2_exception(error): response = jsonify({'errors': [error.as_dict()]}) response.status_code = error. if (response.status_code == 401): response.headers.extend(get_auth_headers(repository=error.repository, scopes=error.scopes)) logger.debug('sending response: %s', response.get_data()) return response
class UnalignedDataLoader(BaseDataLoader): def initialize(self, opt): BaseDataLoader.initialize(self, opt) transformations = [transforms.Scale(opt.loadSize), transforms.RandomCrop(opt.fineSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] transform = transforms.Compose(transformations) dataset_A = ImageFolder(root=(((opt.dataroot + '/') + opt.phase) + 'A'), transform=transform, return_paths=True) data_loader_A = torch.utils.data.DataLoader(dataset_A, batch_size=self.opt.batchSize, shuffle=(not self.opt.serial_batches), num_workers=int(self.opt.nThreads)) dataset_B = ImageFolder(root=(((opt.dataroot + '/') + opt.phase) + 'B'), transform=transform, return_paths=True) data_loader_B = torch.utils.data.DataLoader(dataset_B, batch_size=self.opt.batchSize, shuffle=(not self.opt.serial_batches), num_workers=int(self.opt.nThreads)) self.dataset_A = dataset_A self.dataset_B = dataset_B flip = (opt.isTrain and (not opt.no_flip)) self.paired_data = PairedData(data_loader_A, data_loader_B, self.opt.max_dataset_size, flip) def name(self): return 'UnalignedDataLoader' def load_data(self): return self.paired_data def __len__(self): return min(max(len(self.dataset_A), len(self.dataset_B)), self.opt.max_dataset_size)
('time.sleep') def test_retry_loop_max_end_on_error_substitution(mock_time_sleep): rd = RetryDecorator({'max': PyString('3')}) context = Context({'k1': 'v1'}) mock = MagicMock() mock.side_effect = ValueError('arb') with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info: with pytest.raises(ValueError) as err_info: rd.retry_loop(context, mock) assert (str(err_info.value) == 'arb') assert (context['retryCounter'] == 3) assert (rd.retry_counter == 3) assert (mock.call_count == 3) mock.assert_called_with({'k1': 'v1', 'retryCounter': 3}) assert (mock_time_sleep.call_count == 2) mock_time_sleep.assert_called_with(0) assert (mock_logger_info.mock_calls == [call('retry decorator will try 3 times with fixed backoff starting at 0s intervals.'), call('retry: running step with counter 1'), call('retry: running step with counter 2'), call('retry: running step with counter 3')])
def _capture_subarguments(params: dict, arg_name: str, sub_arg_list: list[str]) -> Any: argument = params.get(arg_name) if (not isinstance(argument, dict)): return argument _validate_sub_arg_list(argument, arg_name, sub_arg_list) units = argument.pop('units', None) list_of_values = argument.pop(arg_name, []) for sub_arg in sub_arg_list: sub_arg_value = argument.get(sub_arg) if (sub_arg_value is not None): if ((sub_arg in ('lower_left_xy', 'upper_right_xy')) and isinstance(sub_arg_value, list)): list_of_values.extend(sub_arg_value) else: list_of_values.append(sub_arg_value) if (units is not None): return DataArray(list_of_values, attrs={'units': units}) return list_of_values
class F20_Upgrade(DeprecatedCommand, F11_Upgrade): def __init__(self): DeprecatedCommand.__init__(self) def _getParser(self): op = F11_Upgrade._getParser(self) op.description += dedent(('\n\n .. deprecated:: %s\n\n Starting with F18, upgrades are no longer supported in\n anaconda and should be done with FedUp, the Fedora update\n tool. Starting with F21, the DNF system-upgrade plugin is\n recommended instead. Therefore, the upgrade command\n essentially does nothing.' % versionToLongString(F20))) return op
def get_all_tests(): test_root_dir = os.path.join(PATH_TO_TRANFORMERS, 'tests') tests = os.listdir(test_root_dir) tests = sorted(filter((lambda x: (os.path.isdir(x) or x.startswith('tests/test_'))), [f'tests/{x}' for x in tests])) model_tests_folders = os.listdir(os.path.join(test_root_dir, 'models')) model_test_folders = sorted(filter(os.path.isdir, [f'tests/models/{x}' for x in model_tests_folders])) tests.remove('tests/models') tests = (model_test_folders + tests) return tests
def parse_input(): description = 'This script allows you to evaluate the ActivityNet proposal task which is intended to evaluate the ability of algorithms to generate activity proposals that temporally localize activities in untrimmed video sequences.' p = argparse.ArgumentParser(description=description) p.add_argument('ground_truth_filename', help='Full path to json file containing the ground truth.') p.add_argument('proposal_filename', help='Full path to json file containing the proposals.') p.add_argument('--subset', default='validation', help='String indicating subset to evaluate: (training, validation)') p.add_argument('--verbose', type=bool, default=True) p.add_argument('--check_status', type=bool, default=False) return p.parse_args()
def _maybe_compute_length_per_key(keys: List[str], stride: int, stride_per_key: List[int], variable_stride_per_key: bool, length_per_key: Optional[List[int]], lengths: Optional[torch.Tensor], offsets: Optional[torch.Tensor]) -> List[int]: if (length_per_key is None): if (len(keys) and (offsets is not None) and (len(offsets) > 0)): _length: List[int] = (_length_per_key_from_stride_per_key(torch.diff(offsets), stride_per_key) if variable_stride_per_key else torch.sum(torch.diff(offsets).view((- 1), stride), dim=1).tolist()) elif (len(keys) and (lengths is not None)): _length: List[int] = (_length_per_key_from_stride_per_key(lengths, stride_per_key) if variable_stride_per_key else (torch.sum(lengths.view((- 1), stride), dim=1).tolist() if (lengths.numel() != 0) else ([0] * len(keys)))) else: _length: List[int] = [] length_per_key = _length return length_per_key
class PrRoIPool2DFunction(ag.Function): def forward(ctx, features, rois, pooled_height, pooled_width, spatial_scale): _prroi_pooling = _import_prroi_pooling() assert (('FloatTensor' in features.type()) and ('FloatTensor' in rois.type())), 'Precise RoI Pooling only takes float input, got {} for features and {} for rois.'.format(features.type(), rois.type()) pooled_height = int(pooled_height) pooled_width = int(pooled_width) spatial_scale = float(spatial_scale) features = features.contiguous() rois = rois.contiguous() params = (pooled_height, pooled_width, spatial_scale) if features.is_cuda: output = _prroi_pooling.prroi_pooling_forward_cuda(features, rois, *params) ctx.params = params ctx.save_for_backward(features, rois, output) else: raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.') return output def backward(ctx, grad_output): _prroi_pooling = _import_prroi_pooling() (features, rois, output) = ctx.saved_tensors grad_input = grad_coor = None if features.requires_grad: grad_output = grad_output.contiguous() grad_input = _prroi_pooling.prroi_pooling_backward_cuda(features, rois, output, grad_output, *ctx.params) if rois.requires_grad: grad_output = grad_output.contiguous() grad_coor = _prroi_pooling.prroi_pooling_coor_backward_cuda(features, rois, output, grad_output, *ctx.params) return (grad_input, grad_coor, None, None, None)
def scale_voltage_current_power(data, voltage=1, current=1): voltage_keys = ['v_mp', 'v_oc'] current_keys = ['i_mp', 'i_x', 'i_xx', 'i_sc'] power_keys = ['p_mp'] voltage_df = (data.filter(voltage_keys, axis=1) * voltage) current_df = (data.filter(current_keys, axis=1) * current) power_df = ((data.filter(power_keys, axis=1) * voltage) * current) df = pd.concat([voltage_df, current_df, power_df], axis=1) df_sorted = df[data.columns] return df_sorted
def convert_image(image, export_path): image.logger.debug('Converting image patient name, birthdate and id to match pinnacle') dicom_directory = os.path.join(image.path, f"ImageSet_{image.image['ImageSetID']}.DICOM") if (not os.path.exists(dicom_directory)): image.logger.info('Dicom Image files do not exist. Creating image files') create_image_files(image, export_path) return for file in os.listdir(dicom_directory): imageds = pydicom.read_file(os.path.join(dicom_directory, file), force=True) imageds.PatientName = image.pinnacle.patient_info['FullName'] imageds.PatientID = image.pinnacle.patient_info['MedicalRecordNumber'] imageds.PatientBirthDate = image.pinnacle.patient_info['DOB'] if (not ('SOPInstanceUID' in imageds)): image.logger.warn('Unable to process image: %s', file) continue preamble = getattr(imageds, 'preamble', None) if (not preamble): preamble = (b'\x00' * 128) output_file = os.path.join(export_path, f"{image.image['Modality']}.{imageds.SOPInstanceUID}.dcm") imageds.save_as(output_file, write_like_original=False) image.logger.info('Exported: %s to %s', file, output_file)
def create_unlock(channel_state: NettingChannelState, message_identifier: MessageID, payment_identifier: PaymentID, secret: Secret, lock: HashTimeLockState, block_number: BlockNumber, recipient_metadata: AddressMetadata=None) -> SendUnlockAndPendingLocksState: our_state = channel_state.our_state msg = 'caller must make sure the lock is known' assert is_lock_pending(our_state, lock.secrethash), msg msg = 'caller must make sure the channel is open' assert (get_status(channel_state) == ChannelState.STATE_OPENED), msg expired = is_lock_expired(end_state=channel_state.our_state, lock=lock, block_number=block_number, lock_expiration_threshold=lock.expiration) msg = 'caller must make sure the lock is not expired' assert (not expired), msg our_balance_proof = our_state.balance_proof msg = 'the lock is pending, it must be in the pending locks' assert (our_balance_proof is not None), msg transferred_amount = TokenAmount((lock.amount + our_balance_proof.transferred_amount)) pending_locks = compute_locks_without(our_state.pending_locks, EncodedData(bytes(lock.encoded))) msg = 'the lock is pending, it must be in the pending locks' assert (pending_locks is not None), msg locksroot = compute_locksroot(pending_locks) token_address = channel_state.token_address recipient = channel_state.partner_state.address locked_amount = LockedAmount((get_amount_locked(our_state) - lock.amount)) nonce = get_next_nonce(our_state) channel_state.our_state.nonce = nonce balance_proof = BalanceProofUnsignedState(nonce=nonce, transferred_amount=transferred_amount, locked_amount=locked_amount, locksroot=locksroot, canonical_identifier=channel_state.canonical_identifier) unlock_lock = SendUnlock(recipient=recipient, recipient_metadata=recipient_metadata, message_identifier=message_identifier, payment_identifier=payment_identifier, token_address=token_address, secret=secret, balance_proof=balance_proof, canonical_identifier=channel_state.canonical_identifier) return (unlock_lock, pending_locks)
def FitCompass(debug, compass_points, compass_calibration, norm): p = compass_points.Points(True) if (len(p) < 8): return fit = FitPointsCompass(debug, p, compass_calibration, norm) if (not fit): return g_required_dev = 0.25 gpoints = [] for q in p: gpoints.append(q[3:]) (avg, g_dev, g_max_dev) = PointFit(gpoints) c = fit[1] if (g_max_dev < g_required_dev): debug('sigmapoints flat, 2D fit only', g_max_dev, g_required_dev) else: if fit[2]: c = fit[2] if (not c): debug('would be using 1d fit!') if (not c): debug('No Fit available', fit) return coverage = ComputeCoverage(p, c[0][:3], norm) if (coverage < 14): debug('insufficient coverage:', coverage, ' need 14') if (c == fit[1]): return c = fit[0] return debug('using 1d fit') mag = c[0][3] if ((mag < 12) or (mag > 120)): debug('fit found field outside of normal earth field strength', mag) return inc = c[0][4] if (abs(inc) > 82): debug('incline greater than 82 degrees, no fit') return deviation = c[1] if ((deviation[0] > 0.15) or (deviation[1] > 3)): curdeviation = ComputeDeviation(p, compass_calibration) debug('bad fit:', deviation, 'cur dev:', curdeviation) if ((((deviation[0] / curdeviation[0]) + (deviation[1] / curdeviation[1])) < 2.5) or (curdeviation[0] > 0.2) or (curdeviation[1] > 10)): debug('allowing bad fit') else: compass_points.RemoveOldest() return if (vector.dist2(c[0], compass_calibration) < 0.1): debug('new calibration same as previous') return c[1].append(coverage) return c
def find_model(model_name): if (model_name in VALID_MODELS): using_pretrained_model = True return (download_model(model_name), using_pretrained_model) else: using_pretrained_model = False return (torch.load(model_name, map_location=(lambda storage, loc: storage)), using_pretrained_model)
class Accumulator(object): def __init__(self): self.pointer = 0 self.pointed_obj = None def move(self, narg=None, **keywords): direction = Direction(keywords) lst = self.get_list() if (not lst): return self.pointer pointer = direction.move(direction=direction.down(), maximum=len(lst), override=narg, pagesize=self.get_height(), current=self.pointer) self.pointer = pointer self.correct_pointer() return pointer def move_to_obj(self, arg, attr=None): if (not arg): return None lst = self.get_list() if (not lst): return None do_get_attr = isinstance(attr, str) good = arg if do_get_attr: try: good = getattr(arg, attr) except (TypeError, AttributeError): pass for (obj, i) in zip(lst, range(len(lst))): if do_get_attr: try: test = getattr(obj, attr) except AttributeError: continue else: test = obj if (test == good): self.move(to=i) return True return self.move(to=self.pointer) def correct_pointer(self): lst = self.get_list() if (not lst): self.pointer = 0 self.pointed_obj = None else: i = self.pointer if (i is None): i = 0 if (i >= len(lst)): i = (len(lst) - 1) i = max(0, i) self.pointer = i self.pointed_obj = lst[i] def pointer_is_synced(self): lst = self.get_list() try: return (lst[self.pointer] == self.pointed_obj) except (IndexError, KeyError): return False def sync_index(self, **kw): self.move_to_obj(self.pointed_obj, **kw) def get_list(self): return [] def get_height(): return 25
(Post) class PostAdmin(admin.ModelAdmin): form = PostAdminForm list_display = ('title', 'published', 'author_display_name') user_fk = 'author_id' autocomplete_fields = ('author',) (description='Author') def author_display_name(self, obj): return obj.author.display_name
def test_fileformatjson_pass_with_substitutions(fs): payload = '{\n "key1": "{k1}value !$% *",\n "key2_{k2}": {\n "k21": "value",\n "abc": "{k3} def {k4}",\n "def": [\n "l1",\n "l2 {k5}",\n "l3"\n ]\n }\n}\n' in_path = './tests/testfiles/testsubst.json' fs.create_file(in_path, contents=payload) context = Context({'k1': 'v1', 'k2': 'v2', 'k3': 'v3', 'k4': 'v4', 'k5': 'v5', 'fileFormatJson': {'in': in_path, 'out': './tests/testfiles/out/outsubst.json'}}) fileformat.run_step(context) assert context, "context shouldn't be None" assert (len(context) == 6), 'context should have 6 items' assert (context['k1'] == 'v1') assert (context['fileFormatJson'] == {'in': in_path, 'out': './tests/testfiles/out/outsubst.json'}) with open('./tests/testfiles/out/outsubst.json') as outfile: outcontents = json.load(outfile) expected = {'key1': 'v1value !$% *', 'key2_v2': {'k21': 'value', 'abc': 'v3 def v4', 'def': ['l1', 'l2 v5', 'l3']}} assert (outcontents == expected)
class CPythonPosix(CPython, PosixSupports, metaclass=ABCMeta): def _executables(cls, interpreter): host_exe = Path(interpreter.system_executable) (major, minor) = (interpreter.version_info.major, interpreter.version_info.minor) targets = OrderedDict(((i, None) for i in ['python', f'python{major}', f'python{major}.{minor}', host_exe.name])) (yield (host_exe, list(targets.keys()), RefMust.NA, RefWhen.ANY))
def make_commodity_future_info(first_sid, root_symbols, years, month_codes=None, multiplier=500): nineteen_days = pd.Timedelta(days=19) one_year = pd.Timedelta(days=365) return make_future_info(first_sid=first_sid, root_symbols=root_symbols, years=years, notice_date_func=(lambda dt: ((dt - MonthBegin(2)) + nineteen_days)), expiration_date_func=(lambda dt: ((dt - MonthBegin(1)) + nineteen_days)), start_date_func=(lambda dt: (dt - one_year)), month_codes=month_codes, multiplier=multiplier)
def do_kmeans(n_anchors, boxes, centroids): loss = 0 groups = [] new_centroids = [] for i in range(n_anchors): groups.append([]) new_centroids.append(Box(0, 0, 0, 0)) for box in boxes: min_distance = 1 group_index = 0 for (centroid_index, centroid) in enumerate(centroids): distance = (1 - iou(box, centroid)) if (distance < min_distance): min_distance = distance group_index = centroid_index groups[group_index].append(box) loss += min_distance new_centroids[group_index].w += box.w new_centroids[group_index].h += box.h for i in range(n_anchors): new_centroids[i].w /= max(len(groups[i]), 1) new_centroids[i].h /= max(len(groups[i]), 1) return (new_centroids, groups, loss)
def test_upsert(local_client, remote_client): records = generate_fixtures(UPLOAD_NUM_VECTORS) (ids, payload) = ([], []) vectors = {} for record in records: ids.append(record.id) payload.append(record.payload) for (vector_name, vector) in record.vector.items(): if (vector_name not in vectors): vectors[vector_name] = [] vectors[vector_name].append(vector) points = models.Batch(ids=ids, vectors=vectors, payloads=payload) local_client.upsert(COLLECTION_NAME, points) remote_client.upsert(COLLECTION_NAME, points) id_ = ids[0] vector = {k: v[0] for (k, v) in vectors.items()} old_payload = payload[0] id_filter = models.Filter(must=[models.HasIdCondition(has_id=[id_])]) local_old_point = local_client.scroll(COLLECTION_NAME, scroll_filter=id_filter, limit=1)[0][0] remote_old_point = remote_client.scroll(COLLECTION_NAME, scroll_filter=id_filter, limit=1)[0][0] assert (local_old_point == remote_old_point) new_payload = one_random_payload_please(id_) assert (old_payload != new_payload) local_client.upsert(COLLECTION_NAME, [models.PointStruct(id=id_, vector=vector, payload=new_payload)]) remote_client.upsert(COLLECTION_NAME, [models.PointStruct(id=id_, vector=vector, payload=new_payload)]) local_new_point = local_client.scroll(COLLECTION_NAME, scroll_filter=id_filter, limit=1)[0][0] remote_new_point = remote_client.scroll(COLLECTION_NAME, scroll_filter=id_filter, limit=1)[0][0] assert (local_new_point == remote_new_point) compare_collections(local_client, remote_client, UPLOAD_NUM_VECTORS)
class SqlAlchemyControl(ORMControl): def __init__(self, echo=False): self.echo = echo self.engine = None def nested_transaction(self): transaction = Session().begin_nested() transaction_veto = TransactionVeto() try: (yield transaction_veto) except Exception as ex: commit = getattr(ex, 'commit', False) raise else: commit = True finally: if transaction_veto.has_voted: commit = transaction_veto.should_commit if transaction.is_active: if commit: transaction.commit() else: transaction.rollback() def managed_transaction(self): transaction = self.get_or_initiate_transaction() try: (yield transaction) except: if transaction.is_active: transaction.rollback() raise else: transaction.commit() def connect(self, auto_commit=False): assert (not self.connected) context = ExecutionContext.get_context() config = context.config db_api_connection_creator = context.system_control.db_control.get_dbapi_connection_creator() create_args = config.sqlalchemy.engine_create_args.copy() if auto_commit: create_args['isolation_level'] = 'AUTOCOMMIT' create_args['execution_options'] = {'isolation_level': 'AUTOCOMMIT'} if db_api_connection_creator: create_args['creator'] = db_api_connection_creator self.engine = create_engine(config.reahlsystem.connection_uri, **create_args) self.engine.echo = self.echo self.engine.connect() Session.configure(bind=self.engine) self.instrument_classes_for(config.reahlsystem.root_egg) def instrument_classes_for(self, root_egg): all_classes = [] for i in ReahlEgg.get_all_relevant_interfaces(root_egg): all_classes.extend(i.get_persisted_classes_in_order()) declarative_classes = [i for i in all_classes if issubclass(i, Base)] self.instrument_declarative_classes(declarative_classes) def instrument_declarative_classes(self, all_classes): registry = {} for cls in all_classes: try: if (not hasattr(cls, '__mapper__')): instrument_declarative(cls, registry, metadata) logging.getLogger(__file__).info(('Instrumented %s: __tablename__=%s [polymorphic_identity=%s]' % (cls, cls.table, cls.mapper.polymorphic_identity))) except InvalidRequestError: logging.info(('skipping declarative instrumentation of %s' % cls)) def connected(self): return self.engine def get_or_initiate_transaction(self): assert self.connected if Session().in_transaction(): return Session().get_transaction() return Session().begin() def finalise_session(self): nested = Session().in_nested_transaction() if nested: Session().flush() return self.commit() context = ExecutionContext.get_context() if context.system_control.db_control.is_in_memory: Session().expunge_all() else: Session.remove() def disconnect(self): assert self.connected self.engine.dispose() self.engine = None Session.remove() def commit(self): Session.commit() def rollback(self): Session.rollback() def create_db_tables(self, transaction, eggs_in_order): metadata.create_all(bind=Session.connection()) for egg in eggs_in_order: self.initialise_schema_version_for(egg) def drop_db_tables(self, transaction): metadata.drop_all(bind=Session.connection()) def execute_one(self, sql): return Session.execute(sql).fetchone() def migrate_db(self, eggs_in_order, explain=False): opts = {'target_metadata': metadata} with Operations.context(MigrationContext.configure(connection=Session.connection(), opts=opts)) as op: self.op = op return super().migrate_db(eggs_in_order, explain=explain) def prune_schemas_to_only(self, live_versions): opts = {'target_metadata': metadata} with Operations.context(MigrationContext.configure(connection=Session.connection(), opts=opts)) as op: self.op = op to_remove = [i for i in self.get_outstanding_migrations().upgrade_ops.as_diffs() if i[0].startswith('remove_')] tables_to_drop = [] unhandled = [] for migration in to_remove: name = migration[0] if (name == 'remove_table'): table = migration[1] tables_to_drop.append(table) for foreign_key in table.foreign_key_constraints: op.drop_constraint(foreign_key.name, table.name) elif (name == 'remove_index'): op.drop_index(migration[1].name) else: unhandled.append(migration) for table in tables_to_drop: op.drop_table(table.name) if unhandled: print('These migrations have not been automatically done, please effect them by other means:') for migration in unhandled: print(migration) installed_version_names = [version.name for version in live_versions] for created_schema_version in Session.query(SchemaVersion).all(): if (created_schema_version.egg_name not in installed_version_names): Session.delete(created_schema_version) def get_outstanding_migrations(self): return produce_migrations(MigrationContext.configure(connection=Session.connection()), metadata) def diff_db(self, output_sql=False): migrations = self.get_outstanding_migrations() if output_sql: commented_source_code = render_python_code(migrations.upgrade_ops, alembic_module_prefix='op2.', sqlalchemy_module_prefix='sqlalchemy.') uncommented_source_code = [i.strip() for i in commented_source_code.split('\n') if (not i.strip().startswith('#'))] source_code = '\n'.join((['import sqlalchemy'] + uncommented_source_code)) opts = {'as_sql': output_sql, 'target_metadata': metadata} with Operations.context(MigrationContext.configure(connection=Session.connection(), opts=opts)) as op2: exec(source_code, globals(), locals()) return uncommented_source_code else: migrations_required = migrations.upgrade_ops.as_diffs() if migrations_required: pprint.pprint(migrations_required, indent=2, width=20) return migrations_required def initialise_schema_version_for(self, egg=None, egg_name=None, egg_version=None): assert (egg or (egg_name and egg_version)) if egg: egg_name = egg.name egg_version = str(egg.installed_version.version_number) existing_versions = Session.query(SchemaVersion).filter_by(egg_name=egg_name) already_created = (existing_versions.count() > 0) if already_created: raise DomainException(message=('The schema for the "%s" egg has already been created previously at version %s' % (egg_name, existing_versions.one().version))) Session.add(SchemaVersion(version=egg_version, egg_name=egg_name)) def remove_schema_version_for(self, egg=None, egg_name=None, fail_if_not_found=True): assert (egg or egg_name) if egg: egg_name = egg.name versions_to_delete = Session.query(SchemaVersion).filter_by(egg_name=egg_name) if (fail_if_not_found or (versions_to_delete.count() > 0)): schema_version_for_egg = versions_to_delete.one() Session.delete(schema_version_for_egg) def schema_version_for(self, egg, default=None): engine = Session().get_bind() if (not sqlalchemy.inspect(engine).has_table(SchemaVersion.__tablename__)): return default existing_versions = Session.query(SchemaVersion).filter_by(egg_name=egg.name) number_versions_found = existing_versions.count() assert (number_versions_found <= 1), ('More than one existing schema version found for egg %s' % egg.name) if (number_versions_found == 1): return existing_versions.one().version else: assert default, ('No existing schema version found for egg %s, and you did not specify a default version' % egg.name) return default def set_schema_version_for(self, version): current_versions = Session.query(SchemaVersion).filter_by(egg_name=version.name) versions_count = current_versions.count() assert (versions_count <= 1), ('Expected 0 or 1 SchemaVersions for %s, found %s' % (version.name, versions_count)) if (versions_count < 1): Session.add(SchemaVersion(version=str(version.version_number), egg_name=version.name)) elif (versions_count == 1): current_version = current_versions.one() current_version.version = str(version.version_number) def assert_dialect(self, migration, *supported_dialects): dialect_name = self.engine.dialect.name if (dialect_name not in supported_dialects): raise DomainException(message=('Migration %s does not support the database dialect you are running on (%s), only one of %s' % (migration, dialect_name, supported_dialects)))
def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() training_args.group_by_length = True logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)]) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}')) logger.info(f'Training/evaluation parameters {training_args}') logger.info(training_args) last_checkpoint = None if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)): last_checkpoint = get_last_checkpoint(training_args.output_dir) if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)): raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.') elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)): logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.') set_seed(training_args.seed) tokenizer_kwargs = {'cache_dir': model_args.cache_dir, 'use_fast': model_args.use_fast_tokenizer, 'revision': model_args.model_revision, 'use_auth_token': (True if model_args.use_auth_token else None)} from tokenizer import Tokenizer data_args.dataset = ('iu_xray' if ('iu_xray' in data_args.annotation_file) else 'mimic_cxr') data_args.threshold = (3 if ('iu_xray' in data_args.annotation_file) else 10) train_image_tokenizer = image_tokenizer = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) if (data_args.dataset == 'iu_xray'): train_image_tokenizer = transforms.Compose([transforms.Resize(256), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) text_tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased', **tokenizer_kwargs) (id2tags, headers) = Tokenizer.load_tag2ids(data_args.tag_path, None, True) tokenizer = Tokenizer(data_args, headers) logger.info('') logger.info('') print(data_args) logger.info('') logger.info('') num_layers = 3 config = BartConfig(vocab_size=len(tokenizer.idx2token), max_position_embeddings=data_args.max_tgt_length, encoder_layers=num_layers, encoder_ffn_dim=512, encoder_attention_heads=8, decoder_layers=num_layers, decoder_ffn_dim=512, decoder_attention_heads=8, encoder_layerdrop=0.0, decoder_layerdrop=0.0, activation_function='relu', d_model=512, dropout=0.1, attention_dropout=0.1, activation_dropout=0.1, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, use_cache=True, pad_token_id=tokenizer.pad_token_id, bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.eos_token_id, decoder_start_token_id=tokenizer.bos_token_id, is_encoder_decoder=True, forced_eos_token_id=0, alpha=model_args.alpha) from src_plan.models.modeling_bart_custom import BartForConditionalGeneration visual_backbone = {'resnet101': ('resnet101', 2048)} (config.visual_extractor, config.d_visual) = visual_backbone['resnet101'] config.visual_extractor_pretrained = True config.chexpert_model_name_or_path = model_args.chexpert_model_name_or_path config.dataset = data_args.dataset config.max_tgt_length = data_args.max_tgt_length config.obs_num = (len(tokenizer.idx2token) - 2) config.region_num = (98 if ('iu_xray' in data_args.annotation_file) else 49) with open(data_args.annotation_file, 'r', encoding='utf-8') as f: annotation = json.load(f) config.tag_size = config.obs_num model = BartForConditionalGeneration(config=config, tokenizer=tokenizer) logger.info('') logger.info('***** Model Structure *****') logger.info('') logger.info(model) logger.info('') logger.info('') logger.info('') train_dataset = eval_dataset = test_dataset = None id2tagpos = json.load(open(data_args.id2tagpos_path, 'r', encoding='utf-8')) if data_args.debug_model: for key in annotation: annotation[key] = annotation[key][:16] if training_args.do_train: train_dataset = DatasetCustom(data_args=data_args, annotation=annotation, split='train', status='train', image_tokenizer=train_image_tokenizer, text_tokenizer=tokenizer, id2tags=(id2tagpos, id2tags, headers)) eval_dataset = DatasetCustom(data_args=data_args, annotation=annotation, split='valid', image_tokenizer=image_tokenizer, text_tokenizer=tokenizer, id2tags=(id2tagpos, id2tags, headers)) if training_args.do_predict: test_dataset = DatasetCustom(data_args=data_args, annotation=annotation, split='test', image_tokenizer=image_tokenizer, text_tokenizer=tokenizer, id2tags=(id2tagpos, id2tags, headers)) data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, text_tokenizer=text_tokenizer, train_image_tokenizer=train_image_tokenizer, eval_image_tokenizer=image_tokenizer, model=model, padding=True, pad_to_multiple_of=8) training_args.max_tgt_length = data_args.max_tgt_length training_args.num_beams = model_args.num_beams training_args.fast_lr = model_args.fast_lr data_args.max_steps = training_args.max_steps from transformers import EarlyStoppingCallback trainer = Seq2SeqTrainerGenMetrics(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, callbacks=[EarlyStoppingCallback(early_stopping_patience=5)]) trainer.data_args = data_args if training_args.do_train: logger.info('*** Train ***') train(training_args, data_args, last_checkpoint, trainer, train_dataset) if training_args.do_predict: logger.info('*** Test ***') if (model_args.test_model_name_or_path is not None): logger.info(('*** Test: Loading %s ***' % model_args.test_model_name_or_path)) state_dict = torch.load(os.path.join(model_args.test_model_name_or_path, WEIGHTS_NAME), map_location='cpu') model.load_state_dict(state_dict, strict=False) model = model.cuda() from train_eval_ende_full import eval_text print(model_args.num_beams) eval_text(max_tgt_length=data_args.max_tgt_length, model=model, tokenizer=tokenizer, test_dataset=trainer.get_test_dataloader(test_dataset), output_path=training_args.output_dir, num_beams=model_args.num_beams)
class Optimizer(object): def __init__(self, optimizer, init_lr, current_step=0, warmup_steps=50000, decay_learning_rate=0.5): self.optimizer = optimizer self.init_lr = init_lr self.current_steps = current_step self.warmup_steps = warmup_steps self.decay_learning_rate = decay_learning_rate def zero_grad(self): self.optimizer.zero_grad() def step_and_update_lr(self): self.update_learning_rate() self.optimizer.step() def get_lr_scale(self): if (self.current_steps >= self.warmup_steps): lr_scale = np.power(self.decay_learning_rate, (self.current_steps / self.warmup_steps)) else: lr_scale = 1 return lr_scale def update_learning_rate(self): self.current_steps += 1 lr = (self.init_lr * self.get_lr_scale()) lr = np.maximum(1e-06, lr) self.lr = lr for param_group in self.optimizer.param_groups: param_group['lr'] = self.lr
def get_available_reporting_integrations(): integrations = [] if (is_azureml_available() and (not is_mlflow_available())): integrations.append('azure_ml') if is_comet_available(): integrations.append('comet_ml') if is_dagshub_available(): integrations.append('dagshub') if is_mlflow_available(): integrations.append('mlflow') if is_neptune_available(): integrations.append('neptune') if is_tensorboard_available(): integrations.append('tensorboard') if is_wandb_available(): integrations.append('wandb') if is_codecarbon_available(): integrations.append('codecarbon') if is_clearml_available(): integrations.append('clearml') return integrations
class PickleProtocol(): def __init__(self, level): self.previous = pickle.HIGHEST_PROTOCOL self.level = level def __enter__(self): importlib.reload(pickle) pickle.HIGHEST_PROTOCOL = self.level def __exit__(self, *exc): importlib.reload(pickle) pickle.HIGHEST_PROTOCOL = self.previous
class GPT2Config(PretrainedConfig): pretrained_config_archive_map = GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP def __init__(self, vocab_size=50257, n_positions=1024, n_ctx=1024, n_embd=768, n_layer=12, n_head=12, resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, summary_type='cls_index', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, **kwargs): super(GPT2Config, self).__init__(**kwargs) self.vocab_size = vocab_size self.n_ctx = n_ctx self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_first_dropout = summary_first_dropout self.summary_proj_to_labels = summary_proj_to_labels def max_position_embeddings(self): return self.n_positions def hidden_size(self): return self.n_embd def num_attention_heads(self): return self.n_head def num_hidden_layers(self): return self.n_layer
class Sst2Processor(object): def get_train_examples(self, data_dir, num_train_samples=(- 1)): if (num_train_samples != (- 1)): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'sst2_train.tsv')), 'train')[:num_train_samples] return self._create_examples(self._read_tsv(os.path.join(data_dir, 'sst2_train.tsv')), 'train') def get_dev_examples(self, data_dir): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'sst2_dev.tsv')), 'dev') def get_labels(self): return ['0', '1'] def _create_examples(self, lines, set_type): examples = [] for (i, line) in enumerate(lines): if (i == 0): continue guid = i text_a = line[0] label = line[1] examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples def _read_tsv(cls, input_file, quotechar=None): with open(input_file, 'r') as f: reader = csv.reader(f, delimiter='\t', quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines
class SyslogWriter(object): OPTIONS = [('--syslog', 'log all of your features, scenarios, and steps to the syslog')] LOAD_IF = staticmethod((lambda config: config.syslog)) LOAD_PRIORITY = 40 def __init__(self): if (os.name == 'nt'): sys.stdout.write('Using --syslog on Windows is not supported.\n') return import syslog before.all(self.syslog_writer_before_all) before.each_feature(self.syslog_writer_before_each_feature) before.each_scenario(self.syslog_writer_before_each_scenario) before.each_step(self.syslog_writer_before_each_step) after.all(self.syslog_writer_after_all) after.each_feature(self.syslog_writer_after_each_feature) after.each_scenario(self.syslog_writer_after_each_scenario) after.each_step(self.syslog_writer_after_each_step) def get_scenario_feature(self, scenario): if (not isinstance(scenario.parent, Feature)): return scenario.parent.parent return scenario.parent def log(self, message): import syslog syslog.syslog(syslog.LOG_INFO, message) def syslog_writer_before_all(self, features, marker): import syslog syslog.openlog('radish') self.log('begin run {0}'.format(marker)) def syslog_writer_after_all(self, features, marker): import syslog self.log('end run {0}'.format(marker)) syslog.closelog() def syslog_writer_before_each_feature(self, feature): self.log('begin feature {0}:{1} {2}'.format(world.config.marker, feature.id, feature.sentence)) def syslog_writer_after_each_feature(self, feature): self.log('end feature {0}:{1} {2}'.format(world.config.marker, feature.id, feature.sentence)) def syslog_writer_before_each_scenario(self, scenario): self.log('begin scenario {0}:{1}.{2} {3}'.format(world.config.marker, self.get_scenario_feature(scenario).id, scenario.id, scenario.sentence)) def syslog_writer_after_each_scenario(self, scenario): self.log('end scenario {0}:{1}.{2} {3}'.format(world.config.marker, self.get_scenario_feature(scenario).id, scenario.id, scenario.sentence)) def syslog_writer_before_each_step(self, step): self.log('begin step {0}:{1}.{2}.{3} {4}'.format(world.config.marker, self.get_scenario_feature(step.parent).id, step.parent.id, step.id, step.sentence)) def syslog_writer_after_each_step(self, step): self.log('{0} step {1}:{2}.{3}.{4} {5}'.format(step.state, world.config.marker, self.get_scenario_feature(step.parent).id, step.parent.id, step.id, step.sentence))
def get_args_parser(): parser = argparse.ArgumentParser('Train and test network for classification task') parser.add_argument('--data_img', help='path to directory with subdirectories with images', type=str) parser.add_argument('--out', help='path to main directory with checkpoints', type=str) parser.add_argument('--mode', default='train', help='type of procedure: test or train', choices=['train', 'test'], type=str) parser.add_argument('--name', default='test.png', help='path to save test images', type=str) parser.add_argument('--num', help='number of images to display', default=5, type=int) parser.add_argument('--device', help='specify device to use', default='cuda:0', type=str) parser.add_argument('--epochs', default=30, type=int) parser.add_argument('--lr', default=0.001, type=float) parser.add_argument('--momentum', default=0.9, type=float) parser.add_argument('--neptune', action='store_true', default=False, help='Launch experiment on neptune (if avail)') return parser
class SECURITY_DESCRIPTOR(): def __init__(self, object_type=None): self.Revision = None self.Sbz1 = None self.Control = None self.Owner = None self.Group = None self.Sacl = None self.Dacl = None self.object_type = object_type def from_bytes(data, object_type=None): return SECURITY_DESCRIPTOR.from_buffer(io.BytesIO(data), object_type) def to_bytes(self): buff = io.BytesIO() self.to_buffer(buff) buff.seek(0) return buff.read() def to_buffer(self, buff): start = buff.tell() buff_data = io.BytesIO() OffsetOwner = 0 OffsetGroup = 0 OffsetSacl = 0 OffsetDacl = 0 if (self.Owner is not None): buff_data.write(self.Owner.to_bytes()) OffsetOwner = (start + 20) if (self.Group is not None): OffsetGroup = ((start + 20) + buff_data.tell()) buff_data.write(self.Group.to_bytes()) if (self.Sacl is not None): OffsetSacl = ((start + 20) + buff_data.tell()) buff_data.write(self.Sacl.to_bytes()) if (self.Dacl is not None): OffsetDacl = ((start + 20) + buff_data.tell()) buff_data.write(self.Dacl.to_bytes()) buff.write(self.Revision.to_bytes(1, 'little', signed=False)) buff.write(self.Sbz1.to_bytes(1, 'little', signed=False)) buff.write(self.Control.to_bytes(2, 'little', signed=False)) buff.write(OffsetOwner.to_bytes(4, 'little', signed=False)) buff.write(OffsetGroup.to_bytes(4, 'little', signed=False)) buff.write(OffsetSacl.to_bytes(4, 'little', signed=False)) buff.write(OffsetDacl.to_bytes(4, 'little', signed=False)) buff_data.seek(0) buff.write(buff_data.read()) def from_buffer(buff, object_type=None): sd = SECURITY_DESCRIPTOR(object_type) sd.Revision = int.from_bytes(buff.read(1), 'little', signed=False) sd.Sbz1 = int.from_bytes(buff.read(1), 'little', signed=False) sd.Control = SE_SACL(int.from_bytes(buff.read(2), 'little', signed=False)) OffsetOwner = int.from_bytes(buff.read(4), 'little', signed=False) OffsetGroup = int.from_bytes(buff.read(4), 'little', signed=False) OffsetSacl = int.from_bytes(buff.read(4), 'little', signed=False) OffsetDacl = int.from_bytes(buff.read(4), 'little', signed=False) if (OffsetOwner > 0): buff.seek(OffsetOwner) sd.Owner = SID.from_buffer(buff) if (OffsetGroup > 0): buff.seek(OffsetGroup) sd.Group = SID.from_buffer(buff) if (OffsetSacl > 0): buff.seek(OffsetSacl) sd.Sacl = ACL.from_buffer(buff, object_type) if (OffsetDacl > 0): buff.seek(OffsetDacl) sd.Dacl = ACL.from_buffer(buff, object_type) return sd def to_ssdl(self, object_type=None): t = ('O:' + self.Owner.to_ssdl()) t += ('G:' + self.Group.to_ssdl()) if (self.Sacl is not None): t += ('S:' + self.Sacl.to_ssdl()) if (self.Dacl is not None): t += (('D:' + sddl_acl_control(self.Control)) + self.Dacl.to_ssdl(object_type)) return t def __str__(self): t = ('Revision: %s, ' % self.Revision) t += ('Control: %s, ' % self.Control) t += ('Owner: %s, ' % self.Owner) t += ('Group: %s, ' % self.Group) t += ('Sacl: %s, ' % self.Sacl) t += ('Dacl: %s' % self.Dacl) return t
def _calculate_T_star(rb, frame, kde_map, constraint_map, uaux): I = (rb.inertia[0] - inertia_of_point_mass(rb.mass, rb.masscenter.pos_from(rb.inertia[1]), rb.frame)) alpha = rb.frame.ang_acc_in(frame) omega = rb.frame.ang_vel_in(frame) if (uaux is not None): uaux_zero = dict(zip(uaux, ([0] * len(uaux)))) alpha = subs(alpha, uaux_zero) omega = subs(omega, uaux_zero) if (kde_map is not None): alpha = subs(alpha, kde_map) omega = subs(omega, kde_map) if (constraint_map is not None): alpha = subs(alpha, constraint_map) omega = subs(omega, constraint_map) return ((- dot(alpha, I)) - dot(cross(omega, I), omega))
class ChainRecordAdapter(IBaseTrace): def __init__(self, chain: mcb.Chain, point_fn: PointFunc, stats_bijection: StatsBijection) -> None: self.chain = chain.cmeta.chain_number self.varnames = [v.name for v in chain.rmeta.variables] stats_dtypes = {s.name: np.dtype(s.dtype) for s in chain.rmeta.sample_stats} self.sampler_vars = [{sname: stats_dtypes[fname] for (fname, sname, is_obj) in sstats} for sstats in stats_bijection._stat_groups] self._chain = chain self._point_fn = point_fn self._statsbj = stats_bijection super().__init__() def record(self, draw: Mapping[(str, np.ndarray)], stats: Sequence[Mapping[(str, Any)]]): values = self._point_fn(draw) value_dict = {n: v for (n, v) in zip(self.varnames, values)} stats_dict = self._statsbj.map(stats) for fname in self._statsbj.object_stats.keys(): val_bytes = pickle.dumps(stats_dict[fname]) val = base64.encodebytes(val_bytes).decode('ascii') stats_dict[fname] = np.array(val, dtype=str) return self._chain.append(value_dict, stats_dict) def __len__(self): return len(self._chain) def get_values(self, varname: str, burn=0, thin=1) -> np.ndarray: return self._chain.get_draws(varname, slice(burn, None, thin)) def _get_stats(self, fname: str, slc: slice) -> np.ndarray: values = self._chain.get_stats(fname, slc) if (fname in self._statsbj.object_stats): objs = [] for v in values: enc = str(v).encode('ascii') str_ = base64.decodebytes(enc) obj = pickle.loads(str_) objs.append(obj) return np.array(objs, dtype=object) return values def get_sampler_stats(self, stat_name: str, sampler_idx: Optional[int]=None, burn=0, thin=1) -> np.ndarray: slc = slice(burn, None, thin) if ((sampler_idx is None) and (self._statsbj.n_samplers == 1)): sampler_idx = 0 if (sampler_idx is not None): return self._get_stats(flat_statname(sampler_idx, stat_name), slc) stats_dict = {stat.name: self._get_stats(stat.name, slc) for stat in self._chain.rmeta.sample_stats if (stat_name in stat.name)} if (not stats_dict): raise KeyError(f"No stat '{stat_name}' was recorded.") stats_list = self._statsbj.rmap(stats_dict) stats_arrays = [] is_ragged = False for sd in stats_list: if (not sd): is_ragged = True continue else: stats_arrays.append(tuple(sd.values())[0]) if is_ragged: _log.debug("Stat '%s' was not recorded by all samplers.", stat_name) if (len(stats_arrays) == 1): return stats_arrays[0] return np.array(stats_arrays).T def _slice(self, idx: slice) -> 'IBaseTrace': (start, stop, step) = idx.indices(len(self)) indices = np.arange(start, stop, step) nchain = mcb.backends.numpy.NumPyChain(self._chain.cmeta, self._chain.rmeta, preallocate=len(indices)) vnames = [v.name for v in nchain.variables.values()] snames = [s.name for s in nchain.sample_stats.values()] for i in indices: draw = self._chain.get_draws_at(i, var_names=vnames) stats = self._chain.get_stats_at(i, stat_names=snames) nchain.append(draw, stats) return ChainRecordAdapter(nchain, self._point_fn, self._statsbj) def point(self, idx: int) -> Dict[(str, np.ndarray)]: return self._chain.get_draws_at(idx, [v.name for v in self._chain.variables.values()])
class BasicBlock(CNNBlockBase): def __init__(self, in_channels, out_channels, *, stride=1, norm='BN'): super().__init__(in_channels, out_channels, stride) if (in_channels != out_channels): self.shortcut = Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False, norm=get_norm(norm, out_channels)) else: self.shortcut = None self.conv1 = Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False, norm=get_norm(norm, out_channels)) self.conv2 = Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False, norm=get_norm(norm, out_channels)) for layer in [self.conv1, self.conv2, self.shortcut]: if (layer is not None): weight_init.c2_msra_fill(layer) def forward(self, x): out = self.conv1(x) out = F.relu_(out) out = self.conv2(out) if (self.shortcut is not None): shortcut = self.shortcut(x) else: shortcut = x out += shortcut out = F.relu_(out) return out
class Effect4089(BaseEffect): runTime = 'early' type = ('projected', 'passive') def handler(fit, module, context, projectionRange, **kwargs): fit.modules.filteredItemMultiply((lambda mod: (mod.item.requiresSkill('Shield Emission Systems') or mod.item.requiresSkill('Capital Shield Emission Systems'))), 'shieldBonus', module.getModifiedItemAttr('shieldBonusMultiplierRemote'), stackingPenalties=True, penaltyGroup='postMul', **kwargs)
class Readable(EvscaperoomObject): read_flag = 'readable' start_readable = True def at_object_creation(self): super().at_object_creation() if self.start_readable: self.set_flag(self.read_flag) def at_focus_read(self, caller, **kwargs): if ((self.read_flag is None) or self.check_flag(self.read_flag)): self.at_read(caller) else: self.at_cannot_read(caller) def at_read(self, caller, *args, **kwargs): self.msg_char(caller, f'You read from *{self.key}.') def at_cannot_read(self, caller, *args, **kwargs): self.msg_char(caller, 'You cannot understand a thing!')
def test_update_error_questionset_page(db): question = Question.objects.exclude(questionsets=None).first() questionset = question.questionsets.first() page = questionset.pages.first() page.locked = True page.save() with pytest.raises(ValidationError): QuestionLockedValidator(question)({'locked': False})
class ClassBalancedSampler(Sampler): def __init__(self, data_source, doShuffle=False, seed=31426): self.data_source = data_source self.seed = seed self.rng = RandomState(self.seed) labels = [l[2] for l in self.data_source.labels] classes = list(set(labels)) classN = Counter(labels) classProb = [classN[i] for i in classes] classProb = (np.array(classProb) / sum(classProb)) availableClasses = copy.deepcopy(classes) labelSeq = [] for i in range(len(labels)): labelSeq.append(None) loss = float('inf') best = None for j in availableClasses: labelSeq[i] = j lossNow = self.probDiff(self.calculateProb(labelSeq, classes), classProb) if (lossNow < loss): loss = lossNow best = j labelSeq[i] = best classN[best] -= 1 if (classN[best] == 0): availableClasses.remove(best) labels = np.array(labels) classIdx = [np.argwhere((labels == clas)) for clas in classes] labelSeq = np.array(labelSeq) for (i, clas) in enumerate(classes): x = classIdx[i] if doShuffle: np.random.shuffle(x) labelSeq[np.argwhere((labelSeq == clas))] = x self.idxList = list(labelSeq) def calculateProb(self, elements, classes=None): classes = (classes or list(set(elements))) prob = np.zeros(np.size(classes)) classC = Counter(elements) className = list(classC.keys()) classN = np.array(list(classC.values())) classN = (classN / sum(classN)) for (i, key) in enumerate(className): if (key in classes): prob[classes.index(key)] = classN[i] return prob def probDiff(self, probA, probB): return sum(abs((probB - probA))) def __iter__(self): return iter(self.idxList) def __len__(self): return len(self.idxList)
class FrozenBatchNorm(nn.Module): _version = 3 def __init__(self, num_features, eps=1e-05, **kwargs): super().__init__() self.num_features = num_features self.eps = eps self.register_buffer('weight', torch.ones(num_features)) self.register_buffer('bias', torch.zeros(num_features)) self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', (torch.ones(num_features) - eps)) def forward(self, x): if x.requires_grad: scale = (self.weight * (self.running_var + self.eps).rsqrt()) bias = (self.bias - (self.running_mean * scale)) scale = scale.reshape(1, (- 1), 1, 1) bias = bias.reshape(1, (- 1), 1, 1) return ((x * scale) + bias) else: return F.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias, training=False, eps=self.eps) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): version = local_metadata.get('version', None) if ((version is None) or (version < 2)): if ((prefix + 'running_mean') not in state_dict): state_dict[(prefix + 'running_mean')] = torch.zeros_like(self.running_mean) if ((prefix + 'running_var') not in state_dict): state_dict[(prefix + 'running_var')] = torch.ones_like(self.running_var) if ((version is not None) and (version < 3)): logger = logging.getLogger(__name__) logger.info('FrozenBatchNorm {} is upgraded to version 3.'.format(prefix.rstrip('.'))) state_dict[(prefix + 'running_var')] -= self.eps super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def __repr__(self): return 'FrozenBatchNorm2d(num_features={}, eps={})'.format(self.num_features, self.eps) def convert_frozen_batchnorm(cls, module): bn_module = nn.modules.batchnorm bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm) res = module if isinstance(module, bn_module): res = cls(module.num_features) if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps else: for (name, child) in module.named_children(): new_child = cls.convert_frozen_batchnorm(child) if (new_child is not child): res.add_module(name, new_child) return res
def make_fake_scene(content_dict, daskify=False, area=True, common_attrs=None): if (common_attrs is None): common_attrs = {} sc = Scene() for (did, arr) in content_dict.items(): extra_attrs = common_attrs.copy() if area: extra_attrs['area'] = _get_fake_scene_area(arr, area) sc[did] = _get_did_for_fake_scene(area, arr, extra_attrs, daskify) return sc
def rtn_sem_wait(se: 'SymbolicExecutor', pstate: 'ProcessState'): logger.debug('sem_wait hooked') arg0 = pstate.get_argument_value(0) value = pstate.memory.read_ptr(arg0) if (value > 0): logger.debug('semaphore still not locked') pstate.memory.write_ptr(arg0, (value - 1)) pstate.semaphore_locked = False else: logger.debug('semaphore locked') pstate.semaphore_locked = True return 0
class L2DisplacementYawReward(Reward): def __init__(self, reward_prefix: str='L2DisplacementYaw', metric_set: Optional[L5MetricSet]=None, enable_clip: bool=True, rew_clip_thresh: float=15.0, use_yaw: Optional[bool]=True, yaw_weight: Optional[float]=1.0) -> None: self.reward_prefix = reward_prefix self.metric_set = (metric_set if (metric_set is not None) else L2DisplacementYawMetricSet()) if ('yaw_error_closest_angle' not in self.metric_set.evaluation_plan.metrics_dict()): raise RuntimeError("'yaw_error_closest_angle' missing in metric set") if ('displacement_error_l2' not in self.metric_set.evaluation_plan.metrics_dict()): raise RuntimeError("'displacement_error_l2' missing in metric set") self.use_yaw = use_yaw self.yaw_weight = yaw_weight self.enable_clip = enable_clip self.rew_clip_thresh = rew_clip_thresh def reset(self) -> None: self.metric_set.reset() def slice_simulated_output(index: int, simulated_outputs: List[SimulationOutputCLE]) -> List[SimulationOutputCLE]: simulated_outputs[0].recorded_ego_states = simulated_outputs[0].recorded_ego_states[index:(index + 1)] simulated_outputs[0].simulated_ego_states = simulated_outputs[0].simulated_ego_states[index:(index + 1)] return simulated_outputs def get_reward(self, frame_index: int, simulated_outputs: List[SimulationOutputCLE]) -> Dict[(str, float)]: scene_id = simulated_outputs[0].scene_id simulated_outputs = self.slice_simulated_output((frame_index + 1), simulated_outputs) self.metric_set.evaluate(simulated_outputs) scene_metrics = self.metric_set.evaluator.scene_metric_results[scene_id] dist_error = scene_metrics['displacement_error_l2'] yaw_error = (self.yaw_weight * scene_metrics['yaw_error_closest_angle']) dist_reward = float((- dist_error.item())) if self.enable_clip: dist_reward = max((- self.rew_clip_thresh), (- dist_error.item())) yaw_reward = 0.0 if self.use_yaw: yaw_reward -= yaw_error.item() total_reward = (dist_reward + yaw_reward) reward_dict = {'total': total_reward, 'distance': dist_reward, 'yaw': yaw_reward} return reward_dict
class LDC(nn.Module): def __init__(self): super(LDC, self).__init__() self.block_1 = DoubleConvBlock(3, 16, 16, stride=2) self.block_2 = DoubleConvBlock(16, 32, use_act=False) self.dblock_3 = _DenseBlock(2, 32, 64) self.dblock_4 = _DenseBlock(3, 64, 96) self.dblock_5 = _DenseBlock(3, 96, 32) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.side_1 = SingleConvBlock(16, 32, 2) self.side_2 = SingleConvBlock(32, 64, 2) self.side_3 = SingleConvBlock(64, 96, 1) self.pre_dense_2 = SingleConvBlock(32, 64, 2) self.pre_dense_3 = SingleConvBlock(32, 64, 1) self.pre_dense_4 = SingleConvBlock(64, 96, 1) self.pre_dense_5 = SingleConvBlock(96, 32, 1) self.up_block_1 = UpConvBlock(16, 1) self.up_block_2 = UpConvBlock(32, 1) self.up_block_3 = UpConvBlock(64, 2) self.up_block_4 = UpConvBlock(96, 3) self.up_block_5 = UpConvBlock(32, 3) self.block_cat = CoFusion(5, 5) self.apply(weight_init) def slice(self, tensor, slice_shape): t_shape = tensor.shape (height, width) = slice_shape if (t_shape[(- 1)] != slice_shape[(- 1)]): new_tensor = F.interpolate(tensor, size=(height, width), mode='bicubic', align_corners=False) else: new_tensor = tensor return new_tensor def forward(self, x): assert (x.ndim == 4), x.shape block_1 = self.block_1(x) block_1_side = self.side_1(block_1) block_2 = self.block_2(block_1) block_2_down = self.maxpool(block_2) block_2_add = (block_2_down + block_1_side) block_2_side = self.side_2(block_2_add) block_3_pre_dense = self.pre_dense_3(block_2_down) (block_3, _) = self.dblock_3([block_2_add, block_3_pre_dense]) block_3_down = self.maxpool(block_3) block_3_add = (block_3_down + block_2_side) block_3_side = self.side_3(block_3_add) block_2_resize_half = self.pre_dense_2(block_2_down) block_4_pre_dense = self.pre_dense_4((block_3_down + block_2_resize_half)) (block_4, _) = self.dblock_4([block_3_add, block_4_pre_dense]) block_4_add = (block_4 + block_3_side) block_5_pre_dense = self.pre_dense_5(block_4) (block_5, _) = self.dblock_5([block_4_add, block_5_pre_dense]) out_1 = self.up_block_1(block_1) out_2 = self.up_block_2(block_2) out_3 = self.up_block_3(block_3) out_4 = self.up_block_4(block_4) out_5 = self.up_block_5(block_5) results = [out_1, out_2, out_3, out_4, out_5] block_cat = torch.cat(results, dim=1) block_cat = self.block_cat(block_cat) results.append(block_cat) return results
def main(): args = get_config() args = args_dict(args) print(args.ex_name) print(vars(args)) seed_init() if (args.action == 'train'): kwargs = {'matching': args.dataset['matching'], 'sample_rate': 16000} length = int((args.setting['segment'] * args.setting['sample_rate'])) stride = int((args.setting['stride'] * args.setting['sample_rate'])) train_dataset = TrainDataset(args.dataset['train'], length=length, stride=stride, valid=args.dataset['val'], pad=args.setting['pad'], **kwargs) train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_worker) val_dataset = ValDataset(args.dataset['train'], valid=args.dataset['val'], **kwargs) val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=args.num_worker) data_loader = {'train': train_loader, 'val': val_loader} trainer = Trainer(data_loader, args) trainer.train() tester = Tester(args) print('---Test score---') tester.test() else: tester = Tester(args) print('---Test score---') tester.test()
class Yang2017(DFN): def __init__(self, options=None, name='Yang2017', build=True): options = {'SEI': ('ec reaction limited', 'none'), 'SEI film resistance': 'distributed', 'SEI porosity change': 'true', 'lithium plating': ('irreversible', 'none'), 'lithium plating porosity change': 'true'} super().__init__(options=options, name=name) pybamm.citations.register('Yang2017')
class IterationTimeLogger(Callback): _writer: Optional[SummaryWriter] = None def __init__(self, logger: Union[(TensorBoardLogger, SummaryWriter)], moving_avg_window: int=1, log_every_n_steps: int=1) -> None: if isinstance(logger, TensorBoardLogger): logger = logger.writer if (get_global_rank() == 0): self._writer = none_throws(logger, 'TensorBoardLogger.writer should not be None') self.moving_avg_window = moving_avg_window self.log_every_n_steps = log_every_n_steps def _log_step_metrics(self, writer: SummaryWriter, metric_label: str, iteration_timer: TimerProtocol, step_logging_for: int) -> None: if ((step_logging_for % self.log_every_n_steps) != 0): return human_metric_names = {'train_iteration_time': 'Train Iteration Time (seconds)', 'eval_iteration_time': 'Eval Iteration Time (seconds)', 'predict_iteration_time': 'Prediction Iteration Time (seconds)'} time_list = iteration_timer.recorded_durations.get(metric_label, []) if (not time_list): return last_n_values = time_list[(- self.moving_avg_window):] writer.add_scalar(human_metric_names[metric_label], (sum(last_n_values) / len(last_n_values)), step_logging_for) def on_train_step_end(self, state: State, unit: TTrainUnit) -> None: timer = none_throws(state.train_state).iteration_timer if (writer := self._writer): self._log_step_metrics(writer, 'train_iteration_time', timer, unit.train_progress.num_steps_completed) def on_eval_step_end(self, state: State, unit: TEvalUnit) -> None: timer = none_throws(state.eval_state).iteration_timer if (writer := self._writer): self._log_step_metrics(writer, 'eval_iteration_time', timer, unit.eval_progress.num_steps_completed) def on_predict_step_end(self, state: State, unit: TPredictUnit) -> None: timer = none_throws(state.predict_state).iteration_timer if (writer := self._writer): self._log_step_metrics(writer, 'predict_iteration_time', timer, unit.predict_progress.num_steps_completed)
class IBNbResUnit(nn.Module): def __init__(self, in_channels, out_channels, stride, use_inst_norm): super(IBNbResUnit, self).__init__() self.use_inst_norm = use_inst_norm self.resize_identity = ((in_channels != out_channels) or (stride != 1)) self.body = ResBottleneck(in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=False) if self.resize_identity: self.identity_conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) if self.use_inst_norm: self.inst_norm = nn.InstanceNorm2d(num_features=out_channels, affine=True) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = (x + identity) if self.use_inst_norm: x = self.inst_norm(x) x = self.activ(x) return x
class _HasAttrGuardMeta(type): def __getitem__(self, params: Tuple[(str, str, object)]) -> 'HasAttrGuard': if ((not isinstance(params, tuple)) or (len(params) != 3)): raise TypeError('HasAttrGuard[...] should be instantiated with three arguments (a variable name, an attribute name, and a type).') if (not isinstance(params[0], str)): raise TypeError('The first argument to HasAttrGuard must be a string') return HasAttrGuard(params[0], params[1], params[2])
(all_backends) def test_general(backend): xnp = get_xnp(backend) dtype = xnp.float32 diag = generate_spectrum(coeff=0.75, scale=1.0, size=15) A = xnp.array(generate_pd_from_diag(diag, dtype=diag.dtype, seed=21), dtype=dtype, device=None) A = SelfAdjoint(lazify(A)) soln = xnp.array(generate_pd_from_diag((diag ** 0.5), dtype=diag.dtype, seed=21), dtype=dtype, device=None) approx = sqrt(A, Auto()).to_dense() rel_error = relative_error(soln, approx) assert (rel_error < _tol)
class MultiViewDataset(Dataset): def __init__(self, args, neg_sample_num=1, root_dir='MMCLR/dataset/TIMA/UserBehavior.10%.seq.splited.pickle', eval=None): super(MultiViewDataset, self).__init__() self.root_dir = root_dir self.eval = eval self.args = args self.item_set = set(self.args.item_ids) self.count = 0 print(root_dir) self.eavl = eval if (eval is None): self.data = self.read_data(self.root_dir) else: self.data = self.read_data_eval(self.root_dir) self.rng = random.Random(args.random_seed) self.neg_sample_num = neg_sample_num self.raw_data = self.make_raw_data(root_dir) self.hardSet = self.make_hard_sample_item_set(root_dir) def make_hard_sample_item_set(self, file): f = open(file, 'rb') all_seq = {} b_id_set = [] all_info = pickle.load(f) for (user, user_info) in tqdm(all_info.items()): buy_ids = user_info['buy']['item_id'] buy_times = user_info['buy']['times'] one_seq = {'user_id': user} bs = ['pv', 'cart'] time = buy_times[(- 1)] for b in bs: if (len(user_info[b]['item_id']) == 0): continue b_ids = np.array(user_info[b]['item_id']) b_times = np.array(user_info[b]['times']) index = (b_times > time) pos_b_ids = b_ids[index] b_ids = b_ids[(~ index)] pos_b_ids = [later_item for later_item in pos_b_ids if (later_item not in b_ids)] b_id_set.extend(pos_b_ids) print(len(set(b_id_set))) return set(b_id_set) def make_raw_data(self, file): f = open(file, 'rb') all_seq = {} all_info = pickle.load(f) for (user, user_info) in tqdm(all_info.items()): buy_ids = user_info['buy']['item_id'] buy_times = user_info['buy']['times'] one_seq = {'user_id': user} bs = ['fav', 'pv', 'cart'] if ((len(buy_ids) == 1) and (self.eavl is None)): continue if (self.eavl is None): time = buy_times[(- 2)] buy_sub_item_ids = buy_ids[:(- 1)] else: time = buy_times[(- 1)] buy_sub_item_ids = buy_ids for b in bs: if (b == 'buy'): continue if ((b not in user_info) or (len(user_info[b]['item_id']) == 0)): b_ids = [] pos_b_ids = [] else: b_ids = np.array(user_info[b]['item_id']) b_times = np.array(user_info[b]['times']) index = (b_times > time) pos_b_ids = b_ids[index].tolist() index = (b_times <= time) b_ids = b_ids[index].tolist() one_seq[b] = b_ids one_seq[('pos' + b)] = pos_b_ids one_seq['buy'] = buy_sub_item_ids all_seq[user] = one_seq return all_seq def read_data_eval(self, file): f = open(file, 'rb') all_info = pickle.load(f) f.close() all_seq = [] for (user, user_info) in tqdm(all_info.items()): buy_ids = user_info['buy']['item_id'] buy_times = user_info['buy']['times'] one_seq = {'user_id': user} one_seq['posbuy'] = [buy_ids[(- 1)]] if (self.eval == 'vaild'): if (buy_ids[(- 2)] not in self.item_set): continue if (buy_ids[(- 2)] in buy_ids[:(- 2)]): continue time = buy_times[(- 2)] buy_sub_item_ids = buy_ids buy_sub_times = buy_times elif (self.eval == 'test'): if (buy_ids[(- 1)] not in self.item_set): self.count += 1 continue if (buy_ids[(- 1)] in buy_ids[:(- 1)]): continue time = buy_times[(- 1)] buy_sub_item_ids = buy_ids buy_sub_times = buy_times elif (self.eval == 'cold_start'): if (buy_ids[(- 1)] not in self.item_set): self.count += 1 continue if (buy_ids[(- 1)] in buy_ids[:(- 1)]): continue if (len(buy_ids) > 3): continue time = buy_times[(- 1)] buy_sub_item_ids = buy_ids buy_sub_times = buy_times elif (self.eval == 'uncold_start'): if (buy_ids[(- 1)] not in self.item_set): self.count += 1 continue if (buy_ids[(- 1)] in buy_ids[:(- 1)]): continue if (len(buy_ids) <= 3): continue time = buy_times[(- 1)] buy_sub_item_ids = buy_ids buy_sub_times = buy_times else: time = buy_times[(- 2)] buy_sub_item_ids = buy_ids buy_sub_times = buy_times bs = ['fav', 'pv', 'cart'] for b in bs: if (b == 'buy'): continue if ((b not in user_info) or (len(user_info[b]['item_id']) == 0)): b_ids = [] pos_b_ids = [] else: b_ids = np.array(user_info[b]['item_id']) b_times = np.array(user_info[b]['times']) index = (b_times > time) pos_b_ids = b_ids[index].tolist() index = (b_times <= time) b_ids = b_ids[index].tolist() pos_b_ids = [later_item for later_item in pos_b_ids if (later_item not in b_ids)] one_seq[b] = b_ids one_seq[('pos' + b)] = pos_b_ids one_seq['pospv'] = [later_item for later_item in one_seq['pospv'] if (later_item not in one_seq['cart'])] one_seq['poscart'] = [later_item for later_item in one_seq['poscart'] if (later_item not in one_seq['pv'])] one_seq['buy'] = buy_sub_item_ids if ((len(buy_sub_item_ids) == 1) and (len(one_seq['cart']) == 0) and (len(one_seq['pv']) == 0)): continue all_seq.append(one_seq) return all_seq def read_data(self, file): f = open(file, 'rb') all_info = pickle.load(f) f.close() all_seq = [] for (user, user_info) in tqdm(all_info.items()): buy_ids = user_info['buy']['item_id'] buy_times = user_info['buy']['times'] for (i, item_id) in enumerate(buy_ids[:(- 1)]): if (item_id not in self.item_set): print(user, item_id) continue one_seq = {'user_id': user} time = buy_times[i] next_time = buy_times[(i + 1)] buy_sub_item_ids = buy_ids[:(i + 1)] one_seq['posbuy'] = buy_ids[(i + 1):(- 1)] buy_sub_times = buy_times[:(i + 1)] bs = ['fav', 'pv', 'cart'] for b in bs: if (b == 'buy'): continue if (b not in user_info): b_ids = [] pos_b_ids = [] else: b_ids = np.array(user_info[b]['item_id']) pos_b_ids = np.array([]) if (len(b_ids) != 0): b_times = np.array(user_info[b]['times']) index = ((b_times > time) & (b_times <= next_time)) pos_b_ids = b_ids[index] index = (b_times <= time) b_ids = b_ids[index] b_ids = b_ids.tolist() pos_b_ids = [later_item for later_item in pos_b_ids if (later_item not in b_ids)] one_seq[b] = b_ids one_seq[('pos' + b)] = pos_b_ids one_seq['buy'] = buy_sub_item_ids if ((len(buy_sub_item_ids) == 1) and (len(one_seq['cart']) == 0) and (len(one_seq['pv']) == 0)): continue all_seq.append(one_seq) return all_seq def __len__(self): return len(self.data) def encode_behavior(self, behvaior): be2code = {'pv': 1, 'cart': 2, 'fav': 3, 'buy': 4} return be2code[behvaior] def mask_seq(self, mask_item): masked_item_seq = [] negtive_seq = [] mask_num = 1 for i in mask_item[:(- 1)]: prob = self.rng.random() if (prob < self.args.mask_prob): prob = (prob / self.args.mask_prob) if (prob < 0.8): mask_num += 1 masked_item_seq.append(self.args.mask_id) neg = tools.neg_sample(set(mask_item), self.args.item_ids, self.neg_sample_num) negtive_seq.append(neg[0]) elif (prob < 0.9): mask_num += 1 masked_item_seq.append(self.rng.randint(1, (self.args.item_size - 4))) neg = tools.neg_sample(set(mask_item), self.args.item_ids, self.neg_sample_num) negtive_seq.append(neg[0]) else: masked_item_seq.append(i) negtive_seq.append(i) else: masked_item_seq.append(i) negtive_seq.append(i) pos_seq = mask_item negtive_seq.append(tools.neg_sample(set(mask_item), self.args.item_ids, self.neg_sample_num)[0]) masked_item_seq.append(self.args.mask_id) return (masked_item_seq, pos_seq, negtive_seq, mask_num) def __getitem__(self, index): user_id = self.data[index]['user_id'] pv_item_seq = (([self.args.start_id] + self.data[index]['pv']) + [self.args.end_id]) buy_item_seq = self.data[index]['buy'] fav_item_seq = (([self.args.start_id] + self.data[index]['buy']) + [self.args.end_id]) cart_item_seq = (([self.args.start_id] + self.data[index]['cart']) + [self.args.end_id]) pos_buy_item_seq = (([self.args.start_id] + self.data[index]['posbuy']) + [self.args.end_id]) multi_items = [self.data[index]['buy'], self.data[index]['pv'], self.data[index]['cart']] have_constra = 1 have_click = 1 if ((len(multi_items[0]) == 0) or (len(multi_items[1]) == 0) or (len(multi_items[2]) == 0)): b = [i for (i, j) in enumerate(multi_items) if (len(j) > 0)] if (len(b) > 1): b3 = b c = self.rng.randint(0, 1) b1 = b3[c] b2 = b3[(1 - c)] else: have_constra = 0 have_click = 0 b3 = [0, 1] (b1, b2) = (0, 0) if (1 not in b): have_click = 0 else: have_cart = 1 (b1, b2, b3) = (0, 1, [0]) b1 = self.rng.randint(0, 2) b2 = b1 while (b2 == b1): b2 = self.rng.randint(0, 2) b3 = [b1, b2] (b1, b2) = (multi_items[b1][(- self.args.max_seq_len):], multi_items[b2][(- self.args.max_seq_len):]) if (b1 == 0): b1 = (([self.args.start_id] + b1[((- self.args.max_seq_len) + 2):]) + [self.args.end_id]) if (b2 == 0): b2 = (([self.args.start_id] + b2[((- self.args.max_seq_len) + 2):]) + [self.args.end_id]) con_len = [len(b1), len(b2), len(b3)] b1 = (([0] * (self.args.max_seq_len - len(b1))) + b1) b2 = (([0] * (self.args.max_seq_len - len(b2))) + b2) if (len(b1) != len(b2)): print(len(b1), len(b2), b3) behavior_ctra_sample = (b1, b2) if (self.eval is None): (masked_item_seq, pos_seq, negtive_seq, mask_num) = self.mask_seq(buy_item_seq) else: mask_num = 1 if ((self.eval == 'test') or (self.eval == 'cold_start') or (self.eval == 'uncold_start')): pos_seq = [buy_item_seq[(- 1)]] masked_item_seq = buy_item_seq[:(- 1)] elif (self.eval == 'vaild'): pos_seq = [buy_item_seq[(- 2)]] masked_item_seq = buy_item_seq[:(- 2)] negtive_seq = tools.neg_sample(set(buy_item_seq), self.args.item_ids, self.neg_sample_num) masked_item_seq.append(self.args.mask_id) sampled_clicks = ([(- 1)] * 50) sample_item = self.data[index]['pospv'] aragen = len(sample_item) for i in range(mask_num): if (aragen == 0): sampled_clicks[i] = 0 continue aragen = min(len(sample_item), 10) sampled_click = self.rng.randint(0, (aragen - 1)) sampled_click = sample_item[sampled_click] sampled_clicks[i] = sampled_click sample_items = (self.data[index]['poscart'] + self.data[index]['pospv']) if self.eavl: if (aragen != 0): aragen = min(len(sample_items), 10) for i in range(aragen): sampled_clicks[i] = sample_items[i] pad_len = (self.args.max_seq_len - len(masked_item_seq)) masked_item_seq = masked_item_seq[(- self.args.max_seq_len):] masked_item_seq = (([0] * pad_len) + masked_item_seq) pad_len = (self.args.max_seq_len - len(pv_item_seq)) pv_item_seq = (([0] * pad_len) + pv_item_seq[(- self.args.max_seq_len):]) pad_len = (self.args.max_seq_len - len(cart_item_seq)) cart_item_seq = (([0] * pad_len) + cart_item_seq[(- self.args.max_seq_len):]) pad_len = (self.args.max_seq_len - len(fav_item_seq)) fav_item_seq = (([0] * pad_len) + fav_item_seq[(- self.args.max_seq_len):]) pad_len = (self.args.max_seq_len - len(pos_buy_item_seq)) pos_buy_item_seq = (([0] * pad_len) + pos_buy_item_seq[(- self.args.max_seq_len):]) if (self.eval is None): pad_len = (self.args.max_seq_len - len(pos_seq)) pos_seq = (([0] * pad_len) + pos_seq[(- self.args.max_seq_len):]) negtive_seq = (([0] * pad_len) + negtive_seq[(- self.args.max_seq_len):]) cur_tensor = (torch.LongTensor([user_id]), torch.LongTensor(masked_item_seq), torch.LongTensor(pv_item_seq), torch.LongTensor(cart_item_seq), torch.LongTensor(fav_item_seq), torch.tensor(pos_seq, dtype=torch.long), torch.tensor(negtive_seq, dtype=torch.long), torch.tensor(b1, dtype=torch.long), torch.tensor(b2, dtype=torch.long), torch.tensor(b3, dtype=torch.long), torch.tensor([have_click], dtype=torch.long), torch.tensor(sampled_clicks, dtype=torch.long), torch.tensor([have_constra], dtype=torch.long)) return cur_tensor
class VideoSettings(QDialog): def __init__(self, mediaRecorder, parent=None): super(VideoSettings, self).__init__(parent) self.ui = Ui_VideoSettingsUi() self.mediaRecorder = mediaRecorder self.ui.setupUi(self) self.ui.audioCodecBox.addItem('Default audio codec', '') for codecName in self.mediaRecorder.supportedAudioCodecs(): description = self.mediaRecorder.audioCodecDescription(codecName) self.ui.audioCodecBox.addItem(((codecName + ': ') + description), codecName) (supportedSampleRates, _) = self.mediaRecorder.supportedAudioSampleRates() for sampleRate in supportedSampleRates: self.ui.audioSampleRateBox.addItem(str(sampleRate), sampleRate) self.ui.audioQualitySlider.setRange(0, QMultimedia.VeryHighQuality) self.ui.videoCodecBox.addItem('Default video codec', '') for codecName in self.mediaRecorder.supportedVideoCodecs(): description = self.mediaRecorder.videoCodecDescription(codecName) self.ui.videoCodecBox.addItem(((codecName + ': ') + description), codecName) self.ui.videoQualitySlider.setRange(0, QMultimedia.VeryHighQuality) self.ui.videoResolutionBox.addItem('Default') (supportedResolutions, _) = self.mediaRecorder.supportedResolutions() for resolution in supportedResolutions: self.ui.videoResolutionBox.addItem(('%dx%d' % (resolution.width(), resolution.height())), resolution) self.ui.videoFramerateBox.addItem('Default') (supportedFrameRates, _) = self.mediaRecorder.supportedFrameRates() for rate in supportedFrameRates: self.ui.videoFramerateBox.addItem(('%0.2f' % rate), rate) self.ui.containerFormatBox.addItem('Default container', '') for format in self.mediaRecorder.supportedContainers(): self.ui.containerFormatBox.addItem(((format + ':') + self.mediaRecorder.containerDescription(format)), format) def audioSettings(self): settings = self.mediaRecorder.audioSettings() settings.setCodec(self.boxValue(self.ui.audioCodecBox)) settings.setQuality(QMultimedia.EncodingQuality(self.ui.audioQualitySlider.value())) settings.setSampleRate(self.boxValue(self.ui.audioSampleRateBox)) return settings def setAudioSettings(self, settings): self.selectComboBoxItem(self.ui.audioCodecBox, settings.codec()) self.selectComboBoxItem(self.ui.audioSampleRateBox, settings.sampleRate()) self.ui.audioQualitySlider.setValue(settings.quality()) def videoSettings(self): settings = self.mediaRecorder.videoSettings() settings.setCodec(self.boxValue(self.ui.videoCodecBox)) settings.setQuality(QMultimedia.EncodingQuality(self.ui.videoQualitySlider.value())) settings.setResolution(self.boxValue(self.ui.videoResolutionBox)) settings.setFrameRate(self.boxValue(self.ui.videoFramerateBox)) return settings def setVideoSettings(self, settings): self.selectComboBoxItem(self.ui.videoCodecBox, settings.codec()) self.selectComboBoxItem(self.ui.videoResolutionBox, settings.resolution()) self.ui.videoQualitySlider.setValue(settings.quality()) for i in range(1, self.ui.videoFramerateBox.count()): itemRate = self.ui.videoFramerateBox.itemData(i) if qFuzzyCompare(itemRate, settings.frameRate()): self.ui.videoFramerateBox.setCurrentIndex(i) break def format(self): return self.boxValue(self.ui.containerFormatBox) def setFormat(self, format): self.selectComboBoxItem(self.ui.containerFormatBox, format) def boxValue(box): idx = box.currentIndex() if (idx == (- 1)): return None return box.itemData(idx) def selectComboBoxItem(box, value): for i in range(box.count()): if (box.itemData(i) == value): box.setCurrentIndex(i) break
def generate_sparse_fixtures(num: Optional[int]=NUM_VECTORS, random_ids: bool=False, vectors_sizes: Optional[Union[(Dict[(str, int)], int)]]=None, skip_vectors: bool=False, with_payload: bool=True) -> List[models.Record]: if (vectors_sizes is None): vectors_sizes = {'sparse-text': sparse_text_vector_size, 'sparse-image': sparse_image_vector_size, 'sparse-code': sparse_code_vector_size} return generate_records(num_records=(num or NUM_VECTORS), vector_sizes=vectors_sizes, with_payload=with_payload, random_ids=random_ids, skip_vectors=skip_vectors, sparse=True)
class Env(object): def __init__(self): self.state_space = 1000000 self.action_dim = 1 self.timestep_limit = 10 pass def read_data(self, f): pass def reset(self): pass def step(self): dim = random.randint(4, 20) state = [random.randint(0, self.state_space) for i in range(dim)] dim = random.randint(4, 20) next_state = [random.randint(0, self.state_space) for i in range(dim)] action = (random.random() - 0.5) reward = (random.random() - action) done = random.randint(0, 1) return (state, action, reward, next_state, done) def pretrained_step(self): state = [random.uniform((- 1.0), 1.0) for i in range(self.state_space)] next_state = [random.uniform((- 1.0), 1.0) for i in range(self.state_space)] action = (random.random() - 0.5) reward = (random.random() - 0.5) next_state[0] += action reward += (random.uniform(1.0, 0.2) * action) done = random.randint(0, 1) return (state, action, reward, next_state, done) def rand(self): state = [random.uniform((- 1.0), 1.0) for i in range(self.state_space)] return state def search(self, state, action): reward = (random.random() - 0.5) reward += (random.uniform(1.0, 0.2) * action) next_state = [random.uniform((- 1.0), 1.0) for i in range(self.state_space)] next_state[0] += action return (next_state, reward)
def test_reseed_rngs(): default_rng = np.random.PCG64 assert isinstance(np.random.default_rng().bit_generator, default_rng) seed = 543 bit_generators = [default_rng(sub_seed) for sub_seed in np.random.SeedSequence(seed).spawn(2)] rngs = [pytensor.shared(rng_type(default_rng())) for rng_type in (np.random.Generator, np.random.RandomState)] for (rng, bit_generator) in zip(rngs, bit_generators): if isinstance(rng, RandomStateSharedVariable): assert (rng.get_value()._bit_generator.state != bit_generator.state) else: assert (rng.get_value().bit_generator.state != bit_generator.state) reseed_rngs(rngs, seed) for (rng, bit_generator) in zip(rngs, bit_generators): if isinstance(rng, RandomStateSharedVariable): assert (rng.get_value()._bit_generator.state == bit_generator.state) else: assert (rng.get_value().bit_generator.state == bit_generator.state)
def _test(): import torch pretrained = False models = [(diapreresnet20_cifar10, 10), (diapreresnet20_cifar100, 100), (diapreresnet20_svhn, 10), (diapreresnet56_cifar10, 10), (diapreresnet56_cifar100, 100), (diapreresnet56_svhn, 10), (diapreresnet110_cifar10, 10), (diapreresnet110_cifar100, 100), (diapreresnet110_svhn, 10), (diapreresnet164bn_cifar10, 10), (diapreresnet164bn_cifar100, 100), (diapreresnet164bn_svhn, 10), (diapreresnet1001_cifar10, 10), (diapreresnet1001_cifar100, 100), (diapreresnet1001_svhn, 10), (diapreresnet1202_cifar10, 10), (diapreresnet1202_cifar100, 100), (diapreresnet1202_svhn, 10)] for (model, num_classes) in models: net = model(pretrained=pretrained) net.eval() weight_count = _calc_width(net) print('m={}, {}'.format(model.__name__, weight_count)) assert ((model != diapreresnet20_cifar10) or (weight_count == 286674)) assert ((model != diapreresnet20_cifar100) or (weight_count == 292524)) assert ((model != diapreresnet20_svhn) or (weight_count == 286674)) assert ((model != diapreresnet56_cifar10) or (weight_count == 869970)) assert ((model != diapreresnet56_cifar100) or (weight_count == 875820)) assert ((model != diapreresnet56_svhn) or (weight_count == 869970)) assert ((model != diapreresnet110_cifar10) or (weight_count == 1744914)) assert ((model != diapreresnet110_cifar100) or (weight_count == 1750764)) assert ((model != diapreresnet110_svhn) or (weight_count == 1744914)) assert ((model != diapreresnet164bn_cifar10) or (weight_count == 1922106)) assert ((model != diapreresnet164bn_cifar100) or (weight_count == 1945236)) assert ((model != diapreresnet164bn_svhn) or (weight_count == 1922106)) assert ((model != diapreresnet1001_cifar10) or (weight_count == )) assert ((model != diapreresnet1001_cifar100) or (weight_count == )) assert ((model != diapreresnet1001_svhn) or (weight_count == )) assert ((model != diapreresnet1202_cifar10) or (weight_count == )) assert ((model != diapreresnet1202_cifar100) or (weight_count == )) assert ((model != diapreresnet1202_svhn) or (weight_count == )) x = torch.randn(1, 3, 32, 32) y = net(x) y.sum().backward() assert (tuple(y.size()) == (1, num_classes))
class _Config(): def __init__(self): self._init_logging_handler() self.cuda_device = 4 self.eos_m_token = 'EOS_M' self.beam_len_bonus = 0.6 self.mode = 'unknown' self.m = 'TSD' self.prev_z_method = 'none' self.dataset = 'unknown' self.seed = 0 def init_handler(self, m): init_method = {'tsdf-camrest': self._camrest_tsdf_init, 'tsdf-kvret': self._kvret_tsdf_init, 'tsdf-sys': self._sys_tsdf_init, 'tsdf-usr': self._usr_tsdf_init, 'tsdf-usr_act': self._usr_act_tsdf_init} init_method[m]() def _camrest_tsdf_init(self): self.beam_len_bonus = 0.5 self.prev_z_method = 'separate' self.vocab_size = 800 self.embedding_size = 50 self.hidden_size = 50 self.split = (3, 1, 1) self.lr = 0.003 self.lr_decay = 0.5 self.layer_num = 1 self.z_length = 8 self.max_ts = 40 self.early_stop_count = 3 self.cuda = True self.vocab_path = './vocab/vocab-camrest.pkl' self.data = './data/CamRest676/CamRest676.json' self.entity = './data/CamRest676/CamRestOTGY.json' self.db = './data/CamRest676/CamRestDB.json' self.model_path = './models/camrest.pkl' self.result_path = './results/camrest-rl.csv' self.glove_path = '../sequicity/data/glove/glove.6B.50d.txt' self.batch_size = 32 self.degree_size = 5 self.dropout_rate = 0.5 self.epoch_num = 100 self.rl_epoch_num = 1 self.spv_proportion = 100 self.new_vocab = True self.teacher_force = 100 self.beam_search = False self.beam_size = 10 self.sampling = False self.use_positional_embedding = False self.unfrz_attn_epoch = 0 self.skip_unsup = False self.truncated = False self.pretrain = False def _sys_tsdf_init(self): self.vocab_size = 800 self.embedding_size = 50 self.hidden_size = 50 self.lr = 0.003 self.lr_decay = 0.5 self.layer_num = 1 self.z_length = 16 self.max_ts = 50 self.early_stop_count = 5 self.cuda = True self.split = (9, 1, 1) self.model_path = './models/multiwoz_sys911.pkl' self.result_path = './results/multiwoz_sys.csv' self.vocab_path = './vocab/vocab-multiwoz_sys.pkl' self.data = './data/multiwoz-master/data/multi-woz/rest_sys.json' self.entity = './data/multiwoz-master/data/multi-woz/rest_OTGY.json' self.db = './data/multiwoz-master/data/multi-woz/restaurant_db.json' self.beam_len_bonus = 0.5 self.prev_z_method = 'separate' self.glove_path = '../sequicity/data/glove/glove.6B.50d.txt' self.batch_size = 32 self.degree_size = 5 self.dropout_rate = 0.5 self.epoch_num = 100 self.rl_epoch_num = 1 self.spv_proportion = 100 self.new_vocab = True self.teacher_force = 100 self.beam_search = False self.beam_size = 10 self.sampling = False self.use_positional_embedding = False self.unfrz_attn_epoch = 0 self.skip_unsup = False self.truncated = False self.pretrain = False def _usr_tsdf_init(self): self.vocab_size = 800 self.embedding_size = 50 self.hidden_size = 50 self.lr = 0.003 self.lr_decay = 0.5 self.layer_num = 1 self.z_length = 16 self.max_ts = 50 self.early_stop_count = 5 self.cuda = True self.degree_size = 1 self.split = (9, 1, 1) self.root_dir = '/data/qkun/sequicity_multiwoz_0.4' self.model_path = (self.root_dir + '/models/multi_woz_simulator911_goal.pkl') self.result_path = (self.root_dir + '/results/multi_woz_simulator911_goal.csv') self.vocab_path = (self.root_dir + '/vocab/vocab-multi_woz_simulator911_goal.pkl') self.data = './data/multiwoz-master/data/multi-woz/rest_usr_simulator_goalkey.json' self.entity = './data/multiwoz-master/data/multi-woz/rest_OTGY.json' self.db = './data/multiwoz-master/data/multi-woz/restaurant_db.json' self.beam_len_bonus = 0.5 self.prev_z_method = 'separate' self.glove_path = '../sequicity/data/glove/glove.6B.50d.txt' self.batch_size = 32 self.dropout_rate = 0.5 self.epoch_num = 100 self.rl_epoch_num = 1 self.spv_proportion = 100 self.new_vocab = True self.teacher_force = 100 self.beam_search = False self.beam_size = 10 self.sampling = False self.use_positional_embedding = False self.unfrz_attn_epoch = 0 self.skip_unsup = False self.truncated = False self.pretrain = False def _usr_act_tsdf_init(self): self.vocab_size = 800 self.embedding_size = 50 self.hidden_size = 50 self.lr = 0.003 self.lr_decay = 0.5 self.layer_num = 1 self.z_length = 16 self.max_ts = 50 self.early_stop_count = 5 self.cuda = True self.degree_size = 1 self.split = (9, 1, 1) self.root_dir = '/data/qkun/sequicity_multiwoz_0.4' self.model_path = (self.root_dir + '/models/multi_woz_simulator911_act3.pkl') self.result_path = (self.root_dir + '/results/multi_woz_simulator911_act.csv') self.vocab_path = (self.root_dir + '/vocab/vocab-multi_woz_simulator911_act3.pkl') self.data = './data/multiwoz-master/data/multi-woz/rest_usr_simulator_act.json' self.entity = './data/multiwoz-master/data/multi-woz/rest_OTGY.json' self.db = './data/multiwoz-master/data/multi-woz/restaurant_db.json' self.beam_len_bonus = 0.5 self.prev_z_method = 'separate' self.glove_path = '../sequicity/data/glove/glove.6B.50d.txt' self.batch_size = 32 self.dropout_rate = 0.5 self.epoch_num = 100 self.rl_epoch_num = 1 self.spv_proportion = 100 self.new_vocab = True self.teacher_force = 100 self.beam_search = False self.beam_size = 10 self.sampling = False self.use_positional_embedding = False self.unfrz_attn_epoch = 0 self.skip_unsup = False self.truncated = False self.pretrain = False def _kvret_tsdf_init(self): self.prev_z_method = 'separate' self.intent = 'all' self.vocab_size = 1400 self.embedding_size = 50 self.hidden_size = 50 self.split = None self.lr = 0.003 self.lr_decay = 0.5 self.vocab_path = './vocab/vocab-kvret.pkl' self.train = './data/kvret/kvret_train_public.json' self.dev = './data/kvret/kvret_dev_public.json' self.test = './data/kvret/kvret_test_public.json' self.entity = './data/kvret/kvret_entities.json' self.glove_path = './data/glove/glove.6B.50d.txt' self.batch_size = 32 self.degree_size = 5 self.z_length = 8 self.layer_num = 1 self.dropout_rate = 0.5 self.epoch_num = 100 self.rl_epoch_num = 2 self.cuda = False self.spv_proportion = 100 self.alpha = 0.0 self.max_ts = 40 self.early_stop_count = 3 self.new_vocab = True self.model_path = './models/kvret.pkl' self.result_path = './results/kvret.csv' self.teacher_force = 100 self.beam_search = False self.beam_size = 10 self.sampling = False self.use_positional_embedding = False self.unfrz_attn_epoch = 0 self.skip_unsup = False self.truncated = False self.pretrain = False def __str__(self): s = '' for (k, v) in self.__dict__.items(): s += '{} : {}\n'.format(k, v) return s def _init_logging_handler(self): current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) stderr_handler = logging.StreamHandler() file_handler = logging.FileHandler('./log/log_{}.txt'.format(current_time)) logging.basicConfig(handlers=[stderr_handler, file_handler]) logger = logging.getLogger() logger.setLevel(logging.INFO)
class XLMTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, vocab_file, merges_file, unk_token='<unk>', bos_token='<s>', sep_token='</s>', pad_token='<pad>', cls_token='</s>', mask_token='<special1>', additional_special_tokens=['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>'], lang2id=None, id2lang=None, do_lowercase_and_remove_accent=True, **kwargs): super().__init__(unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, lang2id=lang2id, id2lang=id2lang, do_lowercase_and_remove_accent=do_lowercase_and_remove_accent, **kwargs) self.cache_moses_punct_normalizer = dict() self.cache_moses_tokenizer = dict() self.lang_with_custom_tokenizer = set(['zh', 'th', 'ja']) self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent self.lang2id = lang2id self.id2lang = id2lang if ((lang2id is not None) and (id2lang is not None)): assert (len(lang2id) == len(id2lang)) self.ja_word_tokenizer = None self.zh_word_tokenizer = None with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for (k, v) in self.encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: merges = merges_handle.read().split('\n')[:(- 1)] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} def do_lower_case(self): return self.do_lowercase_and_remove_accent def moses_punct_norm(self, text, lang): if (lang not in self.cache_moses_punct_normalizer): punct_normalizer = sm.MosesPunctNormalizer(lang=lang) self.cache_moses_punct_normalizer[lang] = punct_normalizer else: punct_normalizer = self.cache_moses_punct_normalizer[lang] return punct_normalizer.normalize(text) def moses_tokenize(self, text, lang): if (lang not in self.cache_moses_tokenizer): moses_tokenizer = sm.MosesTokenizer(lang=lang) self.cache_moses_tokenizer[lang] = moses_tokenizer else: moses_tokenizer = self.cache_moses_tokenizer[lang] return moses_tokenizer.tokenize(text, return_str=False, escape=False) def moses_pipeline(self, text, lang): text = replace_unicode_punct(text) text = self.moses_punct_norm(text, lang) text = remove_non_printing_char(text) return text def ja_tokenize(self, text): if (self.ja_word_tokenizer is None): try: import Mykytea self.ja_word_tokenizer = Mykytea.Mykytea(f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin") except (AttributeError, ImportError): logger.error("Make sure you install KyTea ( and it's python wrapper ( with the following steps") logger.error('1. git clone :neubig/kytea.git && cd kytea') logger.error('2. autoreconf -i') logger.error('3. ./configure --prefix=$HOME/local') logger.error('4. make && make install') logger.error('5. pip install kytea') raise return list(self.ja_word_tokenizer.getWS(text)) def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),)) if (token in self.cache): return self.cache[token] pairs = get_pairs(word) if (not pairs): return (token + '</w>') while True: bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf')))) if (bigram not in self.bpe_ranks): break (first, second) = bigram new_word = [] i = 0 while (i < len(word)): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)): new_word.append((first + second)) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if (len(word) == 1): break else: pairs = get_pairs(word) word = ' '.join(word) if (word == '\n </w>'): word = '\n</w>' self.cache[token] = word return word def _tokenize(self, text, lang='en', bypass_tokenizer=False): if (lang and self.lang2id and (lang not in self.lang2id)): logger.error('Supplied language code not found in lang2id mapping. Please check that your language is supported by the loaded pretrained model.') if bypass_tokenizer: text = text.split() elif (lang not in self.lang_with_custom_tokenizer): text = self.moses_pipeline(text, lang=lang) if (lang == 'ro'): text = romanian_preprocessing(text) text = self.moses_tokenize(text, lang=lang) elif (lang == 'th'): text = self.moses_pipeline(text, lang=lang) try: if ('pythainlp' not in sys.modules): from pythainlp.tokenize import word_tokenize as th_word_tokenize else: th_word_tokenize = sys.modules['pythainlp'].word_tokenize except (AttributeError, ImportError): logger.error('Make sure you install PyThaiNLP ( with the following steps') logger.error('1. pip install pythainlp') raise text = th_word_tokenize(text) elif (lang == 'zh'): try: if ('jieba' not in sys.modules): import jieba else: jieba = sys.modules['jieba'] except (AttributeError, ImportError): logger.error('Make sure you install Jieba ( with the following steps') logger.error('1. pip install jieba') raise text = ' '.join(jieba.cut(text)) text = self.moses_pipeline(text, lang=lang) text = text.split() elif (lang == 'ja'): text = self.moses_pipeline(text, lang=lang) text = self.ja_tokenize(text) else: raise ValueError('It should not reach here') if (self.do_lowercase_and_remove_accent and (not bypass_tokenizer)): text = lowercase_and_remove_accent(text) split_tokens = [] for token in text: if token: split_tokens.extend([t for t in self.bpe(token).split(' ')]) return split_tokens def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): out_string = ''.join(tokens).replace('</w>', ' ').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: bos = [self.bos_token_id] sep = [self.sep_token_id] if (token_ids_1 is None): return ((bos + token_ids_0) + sep) return ((((bos + token_ids_0) + sep) + token_ids_1) + sep) def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if (token_ids_1 is not None): return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1]) return (([1] + ([0] * len(token_ids_0))) + [1]) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if (token_ids_1 is None): return (len(((cls + token_ids_0) + sep)) * [0]) return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1])) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if (not os.path.isdir(save_directory)): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])) merge_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, ensure_ascii=False)) index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])): if (index != token_index): logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write((' '.join(bpe_tokens) + '\n')) index += 1 return (vocab_file, merge_file)
.parametrize('input_type', [tuple, list]) def test_prepare_inputs_from_poa_wrong_number_arrays(sapm_dc_snl_ac_system_Array, location, total_irrad, weather, input_type): len_error = 'Input must be same length as number of Arrays in system\\. Expected 2, got [0-9]+\\.' type_error = 'Input must be a tuple of length 2, got .*\\.' mc = ModelChain(sapm_dc_snl_ac_system_Array, location) poa = pd.concat([weather, total_irrad], axis=1) with pytest.raises(TypeError, match=type_error): mc.prepare_inputs_from_poa(poa) with pytest.raises(ValueError, match=len_error): mc.prepare_inputs_from_poa(input_type((poa,))) with pytest.raises(ValueError, match=len_error): mc.prepare_inputs_from_poa(input_type((poa, poa, poa)))
def sample_from_gan(generator, out_dir, num_samples, out_shape, batch_size=50, noise_shape=None, rand_sampler=None, verbosity=make_verbose()): if ((noise_shape is None) and (rand_sampler is None)): raise Exception('Either noise shape or randomizer should be provided') if (not os.path.isdir(out_dir)): os.makedirs(out_dir) i = 0 with torch.no_grad(): generator.cuda() for _ in wrap_with_tqdm(range(((num_samples // batch_size) + 1)), verbosity): if (rand_sampler is not None): noise = rand_sampler() else: noise = torch.randn(([batch_size] + noise_shape)).cuda() generated = generator(noise).cpu().view(([batch_size] + out_shape)) for sample in generated: to_image(sample).save(os.path.join(out_dir, '{}.png'.format(i))) i += 1 if (i > num_samples): return
class NTC_Hyperprior(nn.Module): def __init__(self, config): super().__init__() self.ga = AnalysisTransform(**config.ga_kwargs) self.gs = SynthesisTransform(**config.gs_kwargs) self.ha = nn.Sequential(nn.Conv2d(256, 192, 3, stride=1, padding=1), nn.LeakyReLU(inplace=True), nn.Conv2d(192, 192, 5, stride=2, padding=2), nn.LeakyReLU(inplace=True), nn.Conv2d(192, 192, 5, stride=2, padding=2)) self.hs = nn.Sequential(nn.ConvTranspose2d(192, 256, 5, stride=2, padding=2, output_padding=1), nn.ReLU(inplace=True), nn.ConvTranspose2d(256, 384, 5, stride=2, padding=2, output_padding=1), nn.ReLU(inplace=True), nn.ConvTranspose2d(384, 512, 3, stride=1, padding=1)) self.entropy_bottleneck = EntropyBottleneck(192) self.gaussian_conditional = GaussianConditional(None) self.distortion = Distortion(config) self.H = self.W = 0 def update_resolution(self, H, W): if ((H != self.H) or (W != self.W)): self.ga.update_resolution(H, W) self.gs.update_resolution((H // 16), (W // 16)) self.H = H self.W = W def forward(self, input_image, require_probs=False): (B, C, H, W) = input_image.shape self.update_resolution(H, W) y = self.ga(input_image) z = self.ha(y) (_, z_likelihoods) = self.entropy_bottleneck(z) z_offset = self.entropy_bottleneck._get_medians() z_tmp = (z - z_offset) z_hat = (ste_round(z_tmp) + z_offset) gaussian_params = self.hs(z_hat) (scales_hat, means_hat) = gaussian_params.chunk(2, 1) (_, y_likelihoods) = self.gaussian_conditional(y, scales_hat, means=means_hat) y_hat = (ste_round((y - means_hat)) + means_hat) x_hat = self.gs(y_hat) mse_loss = self.distortion(input_image, x_hat) bpp_y = ((torch.log(y_likelihoods).sum() / (((- math.log(2)) * H) * W)) / B) bpp_z = ((torch.log(z_likelihoods).sum() / (((- math.log(2)) * H) * W)) / B) if require_probs: return (mse_loss, bpp_y, bpp_z, x_hat, y, y_likelihoods, scales_hat, means_hat) else: return (mse_loss, bpp_y, bpp_z, x_hat) def aux_loss(self): aux_loss = sum((m.loss() for m in self.modules() if isinstance(m, EntropyBottleneck))) return aux_loss
.parametrize('constructor', [get_core_metadata_constructors()['2.1']]) class TestCoreMetadataV21(): def test_default(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0'}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n ')) def test_description(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'description': 'foo'}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Summary: foo\n ')) def test_urls(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'urls': {'foo': 'bar', 'bar': 'baz'}}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Project-URL: foo, bar\n Project-URL: bar, baz\n ')) def test_authors_name(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'authors': [{'name': 'foo'}]}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Author: foo\n ')) def test_authors_email(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'authors': [{'email': ''}]}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Author-email: \n ')) def test_authors_name_and_email(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'authors': [{'email': '', 'name': 'foo'}]}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Author-email: foo <>\n ')) def test_authors_multiple(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'authors': [{'name': 'foo'}, {'name': 'bar'}]}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Author: foo, bar\n ')) def test_maintainers_name(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'maintainers': [{'name': 'foo'}]}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Maintainer: foo\n ')) def test_maintainers_email(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'maintainers': [{'email': ''}]}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Maintainer-email: \n ')) def test_maintainers_name_and_email(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'maintainers': [{'email': '', 'name': 'foo'}]}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Maintainer-email: foo <>\n ')) def test_maintainers_multiple(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'maintainers': [{'name': 'foo'}, {'name': 'bar'}]}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Maintainer: foo, bar\n ')) def test_license(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'license': {'text': 'foo\nbar'}}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n License: foo\n bar\n ')) def test_license_expression(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'license': 'mit'}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n License-Expression: MIT\n ')) def test_keywords_single(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'keywords': ['foo']}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Keywords: foo\n ')) def test_keywords_multiple(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'keywords': ['foo', 'bar']}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Keywords: bar,foo\n ')) def test_classifiers(self, constructor, isolation, helpers): classifiers = ['Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.9'] metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'classifiers': classifiers}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Classifier: Programming Language :: Python :: 3.9\n Classifier: Programming Language :: Python :: 3.11\n ')) def test_requires_python(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'requires-python': '>=1,<2'}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Requires-Python: <2,>=1\n ')) def test_dependencies(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'dependencies': ['foo==1', 'bar==5']}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Requires-Dist: bar==5\n Requires-Dist: foo==1\n ')) def test_optional_dependencies(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'optional-dependencies': {'feature2': ['foo==1; python_version < "3"', 'bar==5'], 'feature1': ['foo==1', 'bar==5; python_version < "3"']}}}) assert (constructor(metadata) == helpers.dedent("\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Provides-Extra: feature1\n Requires-Dist: bar==5; (python_version < '3') and extra == 'feature1'\n Requires-Dist: foo==1; extra == 'feature1'\n Provides-Extra: feature2\n Requires-Dist: bar==5; extra == 'feature2'\n Requires-Dist: foo==1; (python_version < '3') and extra == 'feature2'\n ")) def test_extra_runtime_dependencies(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'dependencies': ['foo==1', 'bar==5']}}) assert (constructor(metadata, extra_dependencies=['baz==9']) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Requires-Dist: bar==5\n Requires-Dist: foo==1\n Requires-Dist: baz==9\n ')) def test_readme(self, constructor, isolation, helpers): metadata = ProjectMetadata(str(isolation), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'readme': {'content-type': 'text/markdown', 'text': 'test content\n'}}}) assert (constructor(metadata) == helpers.dedent('\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Description-Content-Type: text/markdown\n\n test content\n ')) def test_all(self, constructor, helpers, temp_dir): metadata = ProjectMetadata(str(temp_dir), None, {'project': {'name': 'My.App', 'version': '0.1.0', 'description': 'foo', 'urls': {'foo': 'bar', 'bar': 'baz'}, 'authors': [{'email': '', 'name': 'foo'}], 'maintainers': [{'email': '', 'name': 'foo'}], 'license': {'text': 'foo\nbar'}, 'keywords': ['foo', 'bar'], 'classifiers': ['Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.9'], 'requires-python': '>=1,<2', 'dependencies': ['foo==1', 'bar==5'], 'optional-dependencies': {'feature2': ['foo==1; python_version < "3"', 'bar==5'], 'feature1': ['foo==1', 'bar==5; python_version < "3"'], 'feature3': ['baz file:///path/to/project']}, 'readme': {'content-type': 'text/markdown', 'text': 'test content\n'}}, 'tool': {'hatch': {'metadata': {'allow-direct-references': True}}}}) (temp_dir / 'LICENSE.txt').touch() assert (constructor(metadata) == helpers.dedent("\n Metadata-Version: 2.1\n Name: My.App\n Version: 0.1.0\n Summary: foo\n Project-URL: foo, bar\n Project-URL: bar, baz\n Author-email: foo <>\n Maintainer-email: foo <>\n License: foo\n bar\n License-File: LICENSE.txt\n Keywords: bar,foo\n Classifier: Programming Language :: Python :: 3.9\n Classifier: Programming Language :: Python :: 3.11\n Requires-Python: <2,>=1\n Requires-Dist: bar==5\n Requires-Dist: foo==1\n Provides-Extra: feature1\n Requires-Dist: bar==5; (python_version < '3') and extra == 'feature1'\n Requires-Dist: foo==1; extra == 'feature1'\n Provides-Extra: feature2\n Requires-Dist: bar==5; extra == 'feature2'\n Requires-Dist: foo==1; (python_version < '3') and extra == 'feature2'\n Provides-Extra: feature3\n Requires-Dist: file:///path/to/project ; extra == 'feature3'\n Description-Content-Type: text/markdown\n\n test content\n "))
class SegmentationTTAWrapper(nn.Module): def __init__(self, model: nn.Module, transforms: Compose, merge_mode: str='mean', output_mask_key: Optional[str]=None): super().__init__() self.model = model self.transforms = transforms self.merge_mode = merge_mode self.output_key = output_mask_key def forward(self, image: torch.Tensor, *args) -> Union[(torch.Tensor, Mapping[(str, torch.Tensor)])]: merger = Merger(type=self.merge_mode, n=len(self.transforms)) for transformer in self.transforms: augmented_image = transformer.augment_image(image) augmented_output = self.model(augmented_image, *args) if (self.output_key is not None): augmented_output = augmented_output[self.output_key] deaugmented_output = transformer.deaugment_mask(augmented_output) merger.append(deaugmented_output) result = merger.result if (self.output_key is not None): result = {self.output_key: result} return result
def test_activation(temp_dir, platform): venv_dir = (temp_dir / 'venv') venv = VirtualEnv(venv_dir, platform) venv.create(sys.executable) with EnvVars(exclude=VirtualEnv.IGNORED_ENV_VARS): os.environ['PATH'] = str(temp_dir) os.environ['VIRTUAL_ENV'] = 'foo' for env_var in VirtualEnv.IGNORED_ENV_VARS: os.environ[env_var] = 'foo' venv.activate() assert (os.environ['PATH'] == f'{venv.executables_directory}{os.pathsep}{temp_dir}') assert (os.environ['VIRTUAL_ENV'] == str(venv_dir)) for env_var in VirtualEnv.IGNORED_ENV_VARS: assert (env_var not in os.environ) venv.deactivate() assert (os.environ['PATH'] == str(temp_dir)) assert (os.environ['VIRTUAL_ENV'] == 'foo') for env_var in VirtualEnv.IGNORED_ENV_VARS: assert (os.environ[env_var] == 'foo')
class Vgg_face_dag(nn.Module): def __init__(self, return_layer): super(Vgg_face_dag, self).__init__() self.meta = {'mean': [129., 104., 93.], 'std': [1, 1, 1], 'imageSize': [224, 224, 3]} self.return_layer = return_layer self.conv1_1 = nn.Conv2d(3, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.relu1_1 = nn.ReLU(inplace=True) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.relu1_2 = nn.ReLU(inplace=True) self.pool1 = nn.MaxPool2d(kernel_size=[2, 2], stride=[2, 2], padding=0, dilation=1, ceil_mode=False) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.relu2_1 = nn.ReLU(inplace=True) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.relu2_2 = nn.ReLU(inplace=True) self.pool2 = nn.MaxPool2d(kernel_size=[2, 2], stride=[2, 2], padding=0, dilation=1, ceil_mode=False) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.relu3_1 = nn.ReLU(inplace=True) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.relu3_2 = nn.ReLU(inplace=True) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.relu3_3 = nn.ReLU(inplace=True) self.pool3 = nn.MaxPool2d(kernel_size=[2, 2], stride=[2, 2], padding=0, dilation=1, ceil_mode=False) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.relu4_1 = nn.ReLU(inplace=True) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.relu4_2 = nn.ReLU(inplace=True) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.relu4_3 = nn.ReLU(inplace=True) self.pool4 = nn.MaxPool2d(kernel_size=[2, 2], stride=[2, 2], padding=0, dilation=1, ceil_mode=False) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.relu5_1 = nn.ReLU(inplace=True) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.relu5_2 = nn.ReLU(inplace=True) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.relu5_3 = nn.ReLU(inplace=True) self.pool5 = nn.MaxPool2d(kernel_size=[2, 2], stride=[2, 2], padding=0, dilation=1, ceil_mode=False) self.fc6 = nn.Linear(in_features=25088, out_features=4096, bias=True) self.relu6 = nn.ReLU(inplace=True) self.dropout6 = nn.Dropout(p=0.5) self.fc7 = nn.Linear(in_features=4096, out_features=4096, bias=True) self.relu7 = nn.ReLU(inplace=True) self.dropout7 = nn.Dropout(p=0.5) self.fc8 = nn.Linear(in_features=4096, out_features=2622, bias=True) def forward(self, x0): x1 = self.conv1_1(x0) x2 = self.relu1_1(x1) x3 = self.conv1_2(x2) x4 = self.relu1_2(x3) x5 = self.pool1(x4) x6 = self.conv2_1(x5) x7 = self.relu2_1(x6) x8 = self.conv2_2(x7) x9 = self.relu2_2(x8) x10 = self.pool2(x9) x11 = self.conv3_1(x10) x12 = self.relu3_1(x11) x13 = self.conv3_2(x12) x14 = self.relu3_2(x13) x15 = self.conv3_3(x14) x16 = self.relu3_3(x15) x17 = self.pool3(x16) x18 = self.conv4_1(x17) x19 = self.relu4_1(x18) x20 = self.conv4_2(x19) x21 = self.relu4_2(x20) x22 = self.conv4_3(x21) x23 = self.relu4_3(x22) x24 = self.pool4(x23) x25 = self.conv5_1(x24) x26 = self.relu5_1(x25) x27 = self.conv5_2(x26) x28 = self.relu5_2(x27) x29 = self.conv5_3(x28) x30 = self.relu5_3(x29) x31_preflatten = self.pool5(x30) x31 = x31_preflatten.view(x31_preflatten.size(0), (- 1)) x32 = self.fc6(x31) x33 = self.relu6(x32) x34 = self.dropout6(x33) x35 = self.fc7(x34) x36 = self.relu7(x35) x37 = self.dropout7(x36) x38 = self.fc8(x37) if (self.return_layer == 'conv'): return x31 elif (self.return_layer == 'fc6'): return x33 elif (self.return_layer == 'fc7'): return x36 else: return x38
class CSVBlotter(Blotter): def __init__(self, csv_file_path: str): self.file_path = csv_file_path self.logger = qf_logger.getChild(self.__class__.__name__) (self.file_handler, self.csv_writer) = self._init_csv_file() def save_transaction(self, transaction: Transaction): if (transaction is not None): self.csv_writer.writerow(transaction.get_row()) def get_transactions(self, from_date: datetime=None, to_date: datetime=None) -> List[Transaction]: raise NotImplementedError() def _init_csv_file(self) -> Tuple[(TextIO, csv.writer)]: output_dir = path.dirname(self.file_path) if (not path.exists(output_dir)): self.logger.info(f'directory {output_dir} does not exist, creating directory...') makedirs(output_dir) if (not path.exists(self.file_path)): file_handler = open(self.file_path, 'a+', newline='') csv.DictWriter(file_handler, fieldnames=Transaction.get_header()).writeheader() else: file_handler = open(self.file_path, 'a+', newline='') csv_writer = csv.writer(file_handler) return (file_handler, csv_writer) def close_file(self): if (self.file_handler is not None): self.file_handler.close()
class FairseqAdamConfig(FairseqDataclass): adam_betas: str = field(default='(0.9, 0.999)', metadata={'help': 'betas for Adam optimizer'}) adam_eps: float = field(default=1e-08, metadata={'help': 'epsilon for Adam optimizer'}) weight_decay: float = field(default=0.0, metadata={'help': 'weight decay'}) use_old_adam: bool = field(default=False, metadata={'help': 'Use fairseq.optim.adam.Adam'}) tpu: bool = II('common.tpu') lr: List[float] = II('optimization.lr')
class WhooshSearchBackend(BaseSearchBackend): RESERVED_WORDS = ('AND', 'NOT', 'OR', 'TO') RESERVED_CHARACTERS = ('\\', '+', '-', '&&', '||', '!', '(', ')', '{', '}', '[', ']', '^', '"', '~', '*', '?', ':', '.') def __init__(self, connection_alias, **connection_options): super(WhooshSearchBackend, self).__init__(connection_alias, **connection_options) self.setup_complete = False self.use_file_storage = True self.post_limit = getattr(connection_options, 'POST_LIMIT', ((128 * 1024) * 1024)) self.path = connection_options.get('PATH') if (connection_options.get('STORAGE', 'file') != 'file'): self.use_file_storage = False if (self.use_file_storage and (not self.path)): raise ImproperlyConfigured(("You must specify a 'PATH' in your settings for connection '%s'." % connection_alias)) self.log = logging.getLogger('haystack') def setup(self): from haystack import connections new_index = False if (self.use_file_storage and (not os.path.exists(self.path))): os.makedirs(self.path) new_index = True if (self.use_file_storage and (not os.access(self.path, os.W_OK))): raise IOError(("The path to your Whoosh index '%s' is not writable for the current user/group." % self.path)) if self.use_file_storage: self.storage = FileStorage(self.path) else: global LOCALS if (getattr(LOCALS, 'RAM_STORE', None) is None): LOCALS.RAM_STORE = RamStorage() self.storage = LOCALS.RAM_STORE (self.content_field_name, self.schema) = self.build_schema(connections[self.connection_alias].get_unified_index().all_searchfields()) self.parser = QueryParser(self.content_field_name, schema=self.schema) if (new_index is True): self.index = self.storage.create_index(self.schema) else: try: self.index = self.storage.open_index(schema=self.schema) except index.EmptyIndexError: self.index = self.storage.create_index(self.schema) self.setup_complete = True def build_schema(self, fields): schema_fields = {ID: WHOOSH_ID(stored=True, unique=True), DJANGO_CT: WHOOSH_ID(stored=True), DJANGO_ID: WHOOSH_ID(stored=True)} initial_key_count = len(schema_fields) content_field_name = '' for (field_name, field_class) in fields.items(): if field_class.is_multivalued: if (field_class.indexed is False): schema_fields[field_class.index_fieldname] = IDLIST(stored=True, field_boost=field_class.boost) else: schema_fields[field_class.index_fieldname] = KEYWORD(stored=True, commas=True, scorable=True, field_boost=field_class.boost) elif (field_class.field_type in ['date', 'datetime']): schema_fields[field_class.index_fieldname] = DATETIME(stored=field_class.stored, sortable=True) elif (field_class.field_type == 'integer'): schema_fields[field_class.index_fieldname] = NUMERIC(stored=field_class.stored, numtype=int, field_boost=field_class.boost) elif (field_class.field_type == 'float'): schema_fields[field_class.index_fieldname] = NUMERIC(stored=field_class.stored, numtype=float, field_boost=field_class.boost) elif (field_class.field_type == 'boolean'): schema_fields[field_class.index_fieldname] = BOOLEAN(stored=field_class.stored) elif (field_class.field_type == 'ngram'): schema_fields[field_class.index_fieldname] = NGRAM(minsize=3, maxsize=15, stored=field_class.stored, field_boost=field_class.boost) elif (field_class.field_type == 'edge_ngram'): schema_fields[field_class.index_fieldname] = NGRAMWORDS(minsize=2, maxsize=15, at='start', stored=field_class.stored, field_boost=field_class.boost) else: schema_fields[field_class.index_fieldname] = TEXT(stored=True, analyzer=ChineseAnalyzer(), field_boost=field_class.boost, sortable=True) if (field_class.document is True): content_field_name = field_class.index_fieldname schema_fields[field_class.index_fieldname].spelling = True if (len(schema_fields) <= initial_key_count): raise SearchBackendError('No fields were found in any search_indexes. Please correct this before attempting to search.') return (content_field_name, Schema(**schema_fields)) def update(self, index, iterable, commit=True): if (not self.setup_complete): self.setup() self.index = self.index.refresh() writer = AsyncWriter(self.index) for obj in iterable: try: doc = index.full_prepare(obj) except SkipDocument: self.log.debug(u'Indexing for object `%s` skipped', obj) else: for key in doc: doc[key] = self._from_python(doc[key]) if ('boost' in doc): del doc['boost'] try: writer.update_document(**doc) except Exception as e: if (not self.silently_fail): raise self.log.error((u'%s while preparing object for update' % e.__class__.__name__), exc_info=True, extra={'data': {'index': index, 'object': get_identifier(obj)}}) if (len(iterable) > 0): writer.commit() def remove(self, obj_or_string, commit=True): if (not self.setup_complete): self.setup() self.index = self.index.refresh() whoosh_id = get_identifier(obj_or_string) try: self.index.delete_by_query(q=self.parser.parse((u'%s:"%s"' % (ID, whoosh_id)))) except Exception as e: if (not self.silently_fail): raise self.log.error("Failed to remove document '%s' from Whoosh: %s", whoosh_id, e, exc_info=True) def clear(self, models=None, commit=True): if (not self.setup_complete): self.setup() self.index = self.index.refresh() if (models is not None): assert isinstance(models, (list, tuple)) try: if (models is None): self.delete_index() else: models_to_delete = [] for model in models: models_to_delete.append((u'%s:%s' % (DJANGO_CT, get_model_ct(model)))) self.index.delete_by_query(q=self.parser.parse(u' OR '.join(models_to_delete))) except Exception as e: if (not self.silently_fail): raise if (models is not None): self.log.error("Failed to clear Whoosh index of models '%s': %s", ','.join(models_to_delete), e, exc_info=True) else: self.log.error('Failed to clear Whoosh index: %s', e, exc_info=True) def delete_index(self): if (self.use_file_storage and os.path.exists(self.path)): shutil.rmtree(self.path) elif (not self.use_file_storage): self.storage.clean() self.setup() def optimize(self): if (not self.setup_complete): self.setup() self.index = self.index.refresh() self.index.optimize() def calculate_page(self, start_offset=0, end_offset=None): if ((end_offset is not None) and (end_offset <= 0)): end_offset = 1 page_num = 0 if (end_offset is None): end_offset = 1000000 if (start_offset is None): start_offset = 0 page_length = (end_offset - start_offset) if (page_length and (page_length > 0)): page_num = int((start_offset / page_length)) page_num += 1 return (page_num, page_length) _query def search(self, query_string, sort_by=None, start_offset=0, end_offset=None, fields='', highlight=False, facets=None, date_facets=None, query_facets=None, narrow_queries=None, spelling_query=None, within=None, dwithin=None, distance_point=None, models=None, limit_to_registered_models=None, result_class=None, **kwargs): if (not self.setup_complete): self.setup() if (len(query_string) == 0): return {'results': [], 'hits': 0} query_string = force_text(query_string) if ((len(query_string) <= 1) and (query_string != u'*')): return {'results': [], 'hits': 0} reverse = False if (sort_by is not None): sort_by_list = [] reverse_counter = 0 for order_by in sort_by: if order_by.startswith('-'): reverse_counter += 1 if (reverse_counter and (reverse_counter != len(sort_by))): raise SearchBackendError('Whoosh requires all order_by fields to use the same sort direction') for order_by in sort_by: if order_by.startswith('-'): sort_by_list.append(order_by[1:]) if (len(sort_by_list) == 1): reverse = True else: sort_by_list.append(order_by) if (len(sort_by_list) == 1): reverse = False sort_by = sort_by_list if (facets is not None): warnings.warn('Whoosh does not handle faceting.', Warning, stacklevel=2) if (date_facets is not None): warnings.warn('Whoosh does not handle date faceting.', Warning, stacklevel=2) if (query_facets is not None): warnings.warn('Whoosh does not handle query faceting.', Warning, stacklevel=2) narrowed_results = None self.index = self.index.refresh() if (limit_to_registered_models is None): limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True) if (models and len(models)): model_choices = sorted((get_model_ct(model) for model in models)) elif limit_to_registered_models: model_choices = self.build_models_list() else: model_choices = [] if (len(model_choices) > 0): if (narrow_queries is None): narrow_queries = set() narrow_queries.add(' OR '.join([('%s:%s' % (DJANGO_CT, rm)) for rm in model_choices])) narrow_searcher = None if (narrow_queries is not None): narrow_searcher = self.index.searcher() for nq in narrow_queries: recent_narrowed_results = narrow_searcher.search(self.parser.parse(force_text(nq)), limit=None) if (len(recent_narrowed_results) <= 0): return {'results': [], 'hits': 0} if narrowed_results: narrowed_results.filter(recent_narrowed_results) else: narrowed_results = recent_narrowed_results self.index = self.index.refresh() if self.index.doc_count(): searcher = self.index.searcher() parsed_query = self.parser.parse(query_string) if (parsed_query is None): return {'results': [], 'hits': 0} (page_num, page_length) = self.calculate_page(start_offset, end_offset) search_kwargs = {'pagelen': page_length, 'sortedby': sort_by, 'reverse': reverse} if (narrowed_results is not None): search_kwargs['filter'] = narrowed_results try: raw_page = searcher.search_page(parsed_query, page_num, **search_kwargs) except ValueError: if (not self.silently_fail): raise return {'results': [], 'hits': 0, 'spelling_suggestion': None} if (raw_page.pagenum < page_num): return {'results': [], 'hits': 0, 'spelling_suggestion': None} results = self._process_results(raw_page, highlight=highlight, query_string=query_string, spelling_query=spelling_query, result_class=result_class) searcher.close() if hasattr(narrow_searcher, 'close'): narrow_searcher.close() return results else: if self.include_spelling: if spelling_query: spelling_suggestion = self.create_spelling_suggestion(spelling_query) else: spelling_suggestion = self.create_spelling_suggestion(query_string) else: spelling_suggestion = None return {'results': [], 'hits': 0, 'spelling_suggestion': spelling_suggestion} def more_like_this(self, model_instance, additional_query_string=None, start_offset=0, end_offset=None, models=None, limit_to_registered_models=None, result_class=None, **kwargs): if (not self.setup_complete): self.setup() field_name = self.content_field_name narrow_queries = set() narrowed_results = None self.index = self.index.refresh() if (limit_to_registered_models is None): limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True) if (models and len(models)): model_choices = sorted((get_model_ct(model) for model in models)) elif limit_to_registered_models: model_choices = self.build_models_list() else: model_choices = [] if (len(model_choices) > 0): if (narrow_queries is None): narrow_queries = set() narrow_queries.add(' OR '.join([('%s:%s' % (DJANGO_CT, rm)) for rm in model_choices])) if (additional_query_string and (additional_query_string != '*')): narrow_queries.add(additional_query_string) narrow_searcher = None if (narrow_queries is not None): narrow_searcher = self.index.searcher() for nq in narrow_queries: recent_narrowed_results = narrow_searcher.search(self.parser.parse(force_text(nq)), limit=None) if (len(recent_narrowed_results) <= 0): return {'results': [], 'hits': 0} if narrowed_results: narrowed_results.filter(recent_narrowed_results) else: narrowed_results = recent_narrowed_results (page_num, page_length) = self.calculate_page(start_offset, end_offset) self.index = self.index.refresh() raw_results = EmptyResults() searcher = None if self.index.doc_count(): query = ('%s:%s' % (ID, get_identifier(model_instance))) searcher = self.index.searcher() parsed_query = self.parser.parse(query) results = searcher.search(parsed_query) if len(results): raw_results = results[0].more_like_this(field_name, top=end_offset) if ((narrowed_results is not None) and hasattr(raw_results, 'filter')): raw_results.filter(narrowed_results) try: raw_page = ResultsPage(raw_results, page_num, page_length) except ValueError: if (not self.silently_fail): raise return {'results': [], 'hits': 0, 'spelling_suggestion': None} if (raw_page.pagenum < page_num): return {'results': [], 'hits': 0, 'spelling_suggestion': None} results = self._process_results(raw_page, result_class=result_class) if searcher: searcher.close() if hasattr(narrow_searcher, 'close'): narrow_searcher.close() return results def _process_results(self, raw_page, highlight=False, query_string='', spelling_query=None, result_class=None): from haystack import connections results = [] hits = len(raw_page) if (result_class is None): result_class = SearchResult facets = {} spelling_suggestion = None unified_index = connections[self.connection_alias].get_unified_index() indexed_models = unified_index.get_indexed_models() for (doc_offset, raw_result) in enumerate(raw_page): score = (raw_page.score(doc_offset) or 0) (app_label, model_name) = raw_result[DJANGO_CT].split('.') additional_fields = {} model = haystack_get_model(app_label, model_name) if (model and (model in indexed_models)): for (key, value) in raw_result.items(): index = unified_index.get_index(model) string_key = str(key) if ((string_key in index.fields) and hasattr(index.fields[string_key], 'convert')): if index.fields[string_key].is_multivalued: if ((value is None) or (len(value) is 0)): additional_fields[string_key] = [] else: additional_fields[string_key] = value.split(',') else: additional_fields[string_key] = index.fields[string_key].convert(value) else: additional_fields[string_key] = self._to_python(value) del additional_fields[DJANGO_CT] del additional_fields[DJANGO_ID] if highlight: sa = StemmingAnalyzer() formatter = WhooshHtmlFormatter('em') terms = [token.text for token in sa(query_string)] whoosh_result = whoosh_highlight(additional_fields.get(self.content_field_name), terms, sa, ContextFragmenter(), formatter) additional_fields['highlighted'] = {self.content_field_name: [whoosh_result]} result = result_class(app_label, model_name, raw_result[DJANGO_ID], score, **additional_fields) results.append(result) else: hits -= 1 if self.include_spelling: if spelling_query: spelling_suggestion = self.create_spelling_suggestion(spelling_query) else: spelling_suggestion = self.create_spelling_suggestion(query_string) return {'results': results, 'hits': hits, 'facets': facets, 'spelling_suggestion': spelling_suggestion} def create_spelling_suggestion(self, query_string): spelling_suggestion = None reader = self.index.reader() corrector = reader.corrector(self.content_field_name) cleaned_query = force_text(query_string) if (not query_string): return spelling_suggestion for rev_word in self.RESERVED_WORDS: cleaned_query = cleaned_query.replace(rev_word, '') for rev_char in self.RESERVED_CHARACTERS: cleaned_query = cleaned_query.replace(rev_char, '') query_words = cleaned_query.split() suggested_words = [] for word in query_words: suggestions = corrector.suggest(word, limit=1) if (len(suggestions) > 0): suggested_words.append(suggestions[0]) spelling_suggestion = ' '.join(suggested_words) return spelling_suggestion def _from_python(self, value): if hasattr(value, 'strftime'): if (not hasattr(value, 'hour')): value = datetime(value.year, value.month, value.day, 0, 0, 0) elif isinstance(value, bool): if value: value = 'true' else: value = 'false' elif isinstance(value, (list, tuple)): value = u','.join([force_text(v) for v in value]) elif isinstance(value, (six.integer_types, float)): pass else: value = force_text(value) return value def _to_python(self, value): if (value == 'true'): return True elif (value == 'false'): return False if (value and isinstance(value, six.string_types)): possible_datetime = DATETIME_REGEX.search(value) if possible_datetime: date_values = possible_datetime.groupdict() for (dk, dv) in date_values.items(): date_values[dk] = int(dv) return datetime(date_values['year'], date_values['month'], date_values['day'], date_values['hour'], date_values['minute'], date_values['second']) try: converted_value = json.loads(value) if isinstance(converted_value, (list, tuple, set, dict, six.integer_types, float, complex)): return converted_value except: pass return value
class ConvBnAct(nn.Module): def __init__(self, in_chs, out_chs, kernel_size, stride=1, dilation=1, group_size=0, pad_type='', skip=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_path_rate=0.0): super(ConvBnAct, self).__init__() norm_act_layer = get_norm_act_layer(norm_layer, act_layer) groups = num_groups(group_size, in_chs) self.has_skip = (skip and (stride == 1) and (in_chs == out_chs)) self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, groups=groups, padding=pad_type) self.bn1 = norm_act_layer(out_chs, inplace=True) self.drop_path = (DropPath(drop_path_rate) if drop_path_rate else nn.Identity()) def feature_info(self, location): if (location == 'expansion'): return dict(module='bn1', hook_type='forward', num_chs=self.conv.out_channels) else: return dict(module='', hook_type='', num_chs=self.conv.out_channels) def forward(self, x): shortcut = x x = self.conv(x) x = self.bn1(x) if self.has_skip: x = (self.drop_path(x) + shortcut) return x
def fmt_ria(ria, verbose=True, mip=False): if verbose: mechanism = f'Mechanism: {fmt_mechanism(ria.mechanism, ria.node_labels)}' direction = f'Direction: {ria.direction}' else: mechanism = '' direction = '' if (config.REPR_VERBOSITY is HIGH): partition_name = ('MIP' if mip else 'Partition') partition = f'{partition_name}: ' if ria.partition: partition += f''' {indent(fmt_partition(ria.partition))}''' else: partition += 'empty' if (ria.specified_state is not None): mark_states = [specified.state for specified in ria.specified_state.ties] else: mark_states = [] if (ria.repertoire is not None): if (ria.repertoire.size == 1): repertoire = f'''Forward probability: {ria.repertoire}''' partitioned_repertoire = f'''Partitioned forward probability: {ria.partitioned_repertoire}''' else: repertoire = 'Repertoire:\n{}'.format(indent(fmt_repertoire(ria.repertoire, mark_states=mark_states))) partitioned_repertoire = 'Partitioned repertoire:\n{}'.format(indent(fmt_repertoire(ria.partitioned_repertoire, mark_states=mark_states))) else: repertoire = '' partitioned_repertoire = '' else: partition = '' repertoire = '' partitioned_repertoire = '' data = (([f'{SMALL_PHI} = {fmt_number(ria.phi)}', f'Normalized {SMALL_PHI} = {fmt_number(ria.normalized_phi)}', f'{mechanism}', f'Purview: {fmt_mechanism(ria.purview, ria.node_labels)}', f'''Specified state: {ria.specified_state}''', f'{direction}', f'{partition}'] + ([f'Selectivity: {ria.selectivity}'] if (ria.selectivity is not None) else [])) + [f'{repertoire}', f'{partitioned_repertoire}', f'#(state ties): {ria.num_state_ties}', f'#(partition ties): {ria.num_partition_ties}']) if hasattr(ria, 'num_purview_ties'): data.append(f'#(purview ties): {ria.num_purview_ties}') if (ria.reasons is not None): data.append(('Reasons: ' + ', '.join(map(str, ria.reasons)))) return '\n'.join(data)
def main(): initial_risk = 0.03 start_date = str_to_date('2010-01-01') end_date = str_to_date('2011-12-31') data_provider = daily_data_provider session_builder = container.resolve(BacktestTradingSessionBuilder) session_builder.set_backtest_name('Moving Average Alpha Model Backtest no weekends') session_builder.set_position_sizer(InitialRiskPositionSizer, initial_risk=initial_risk) session_builder.set_commission_model(IBCommissionModel) session_builder.set_data_provider(data_provider) session_builder.set_frequency(Frequency.DAILY) ts = session_builder.build(start_date, end_date) model = MovingAverageAlphaModel(fast_time_period=5, slow_time_period=20, risk_estimation_factor=1.25, data_provider=ts.data_handler) model_tickers = [DummyTicker('AAA'), DummyTicker('BBB'), DummyTicker('CCC'), DummyTicker('DDD'), DummyTicker('EEE'), DummyTicker('FFF')] model_tickers_dict = {model: model_tickers} ts.use_data_preloading(model_tickers) ts.verify_preloaded_data('778bbaac65cb0a5ab88cf29a1cd8467') strategy = AlphaModelStrategy(ts, model_tickers_dict, use_stop_losses=True) CalculateAndPlaceOrdersRegularEvent.set_daily_default_trigger_time() CalculateAndPlaceOrdersRegularEvent.exclude_weekends() strategy.subscribe(CalculateAndPlaceOrdersRegularEvent) ts.start_trading()
def add_matcher(output_dir, owner, data): data['owner'] = owner out_data = {'problemMatcher': [data]} output_file = (output_dir / '{}.json'.format(owner)) with output_file.open('w', encoding='utf-8') as f: json.dump(out_data, f) print('::add-matcher::{}'.format(output_file))
class OggPage(object): version = 0 __type_flags = 0 position = 0 serial = 0 sequence = 0 offset = None complete = True def __init__(self, fileobj=None): self.packets = [] if (fileobj is None): return self.offset = fileobj.tell() header = fileobj.read(27) if (len(header) == 0): raise EOFError try: (oggs, self.version, self.__type_flags, self.position, self.serial, self.sequence, crc, segments) = struct.unpack('<4sBBqIIiB', header) except struct.error: raise error(('unable to read full header; got %r' % header)) if (oggs != b'OggS'): raise error(('read %r, expected %r, at 0x%x' % (oggs, b'OggS', (fileobj.tell() - 27)))) if (self.version != 0): raise error(('version %r unsupported' % self.version)) total = 0 lacings = [] lacing_bytes = fileobj.read(segments) if (len(lacing_bytes) != segments): raise error(('unable to read %r lacing bytes' % segments)) for c in bytearray(lacing_bytes): total += c if (c < 255): lacings.append(total) total = 0 if total: lacings.append(total) self.complete = False self.packets = [fileobj.read(l) for l in lacings] if ([len(p) for p in self.packets] != lacings): raise error('unable to read full data') def __eq__(self, other): try: return (self.write() == other.write()) except AttributeError: return False __hash__ = object.__hash__ def __repr__(self): attrs = ['version', 'position', 'serial', 'sequence', 'offset', 'complete', 'continued', 'first', 'last'] values = [('%s=%r' % (attr, getattr(self, attr))) for attr in attrs] return ('<%s %s, %d bytes in %d packets>' % (type(self).__name__, ' '.join(values), sum(map(len, self.packets)), len(self.packets))) def write(self): data = [struct.pack('<4sBBqIIi', b'OggS', self.version, self.__type_flags, self.position, self.serial, self.sequence, 0)] lacing_data = [] for datum in self.packets: (quot, rem) = divmod(len(datum), 255) lacing_data.append(((b'\xff' * quot) + bchr(rem))) lacing_data = b''.join(lacing_data) if ((not self.complete) and lacing_data.endswith(b'\x00')): lacing_data = lacing_data[:(- 1)] data.append(bchr(len(lacing_data))) data.append(lacing_data) data.extend(self.packets) data = b''.join(data) crc = ((~ zlib.crc32(data.translate(cdata.bitswap), (- 1))) & ) crc = cdata.to_uint_be(crc).translate(cdata.bitswap) data = ((data[:22] + crc) + data[26:]) return data def size(self) -> int: size = 27 for datum in self.packets: (quot, rem) = divmod(len(datum), 255) size += (quot + 1) if ((not self.complete) and (rem == 0)): size -= 1 size += sum(map(len, self.packets)) return size def __set_flag(self, bit, val): mask = (1 << bit) if val: self.__type_flags |= mask else: self.__type_flags &= (~ mask) continued = property((lambda self: cdata.test_bit(self.__type_flags, 0)), (lambda self, v: self.__set_flag(0, v)), doc='The first packet is continued from the previous page.') first = property((lambda self: cdata.test_bit(self.__type_flags, 1)), (lambda self, v: self.__set_flag(1, v)), doc='This is the first page of a logical bitstream.') last = property((lambda self: cdata.test_bit(self.__type_flags, 2)), (lambda self, v: self.__set_flag(2, v)), doc='This is the last page of a logical bitstream.') def renumber(fileobj, serial, start): number = start while True: try: page = OggPage(fileobj) except EOFError: break else: if (page.serial != serial): continue fileobj.seek((- page.size), 1) page.sequence = number fileobj.write(page.write()) fileobj.seek((page.offset + page.size), 0) number += 1 def to_packets(pages, strict=False): serial = pages[0].serial sequence = pages[0].sequence packets = [] if strict: if pages[0].continued: raise ValueError('first packet is continued') if (not pages[(- 1)].complete): raise ValueError('last packet does not complete') elif (pages and pages[0].continued): packets.append([b'']) for page in pages: if (serial != page.serial): raise ValueError(('invalid serial number in %r' % page)) elif (sequence != page.sequence): raise ValueError(('bad sequence number in %r' % page)) else: sequence += 1 if page.packets: if page.continued: packets[(- 1)].append(page.packets[0]) else: packets.append([page.packets[0]]) packets.extend(([p] for p in page.packets[1:])) return [b''.join(p) for p in packets] def _from_packets_try_preserve(cls, packets, old_pages): old_packets = cls.to_packets(old_pages) if ([len(p) for p in packets] != [len(p) for p in old_packets]): return cls.from_packets(packets, old_pages[0].sequence) new_data = b''.join(packets) new_pages = [] for old in old_pages: new = OggPage() new.sequence = old.sequence new.complete = old.complete new.continued = old.continued new.position = old.position for p in old.packets: (data, new_data) = (new_data[:len(p)], new_data[len(p):]) new.packets.append(data) new_pages.append(new) assert (not new_data) return new_pages def from_packets(packets, sequence=0, default_size=4096, wiggle_room=2048): chunk_size = ((default_size // 255) * 255) pages = [] page = OggPage() page.sequence = sequence for packet in packets: page.packets.append(b'') while packet: (data, packet) = (packet[:chunk_size], packet[chunk_size:]) if ((page.size < default_size) and (len(page.packets) < 255)): page.packets[(- 1)] += data else: if page.packets[(- 1)]: page.complete = False if (len(page.packets) == 1): page.position = (- 1) else: page.packets.pop((- 1)) pages.append(page) page = OggPage() page.continued = (not pages[(- 1)].complete) page.sequence = (pages[(- 1)].sequence + 1) page.packets.append(data) if (len(packet) < wiggle_room): page.packets[(- 1)] += packet packet = b'' if page.packets: pages.append(page) return pages def replace(cls, fileobj, old_pages, new_pages): if ((not len(old_pages)) or (not len(new_pages))): raise ValueError('empty pages list not allowed') first = old_pages[0].sequence for (page, seq) in zip(new_pages, range(first, (first + len(new_pages)))): page.sequence = seq page.serial = old_pages[0].serial new_pages[0].first = old_pages[0].first new_pages[0].last = old_pages[0].last new_pages[0].continued = old_pages[0].continued new_pages[(- 1)].first = old_pages[(- 1)].first new_pages[(- 1)].last = old_pages[(- 1)].last new_pages[(- 1)].complete = old_pages[(- 1)].complete if ((not new_pages[(- 1)].complete) and (len(new_pages[(- 1)].packets) == 1)): new_pages[(- 1)].position = (- 1) new_data = [cls.write(p) for p in new_pages] pages_diff = (len(old_pages) - len(new_data)) if (pages_diff > 0): new_data.extend(([b''] * pages_diff)) elif (pages_diff < 0): new_data[(pages_diff - 1):] = [b''.join(new_data[(pages_diff - 1):])] offset_adjust = 0 new_data_end = None assert (len(old_pages) == len(new_data)) for (old_page, data) in zip(old_pages, new_data): offset = (old_page.offset + offset_adjust) data_size = len(data) resize_bytes(fileobj, old_page.size, data_size, offset) fileobj.seek(offset, 0) fileobj.write(data) new_data_end = (offset + data_size) offset_adjust += (data_size - old_page.size) if (len(old_pages) != len(new_pages)): fileobj.seek(new_data_end, 0) serial = new_pages[(- 1)].serial sequence = (new_pages[(- 1)].sequence + 1) cls.renumber(fileobj, serial, sequence) def find_last(fileobj, serial, finishing=False): seek_end(fileobj, (256 * 256)) data = fileobj.read() try: index = data.rindex(b'OggS') except ValueError: raise error('unable to find final Ogg header') bytesobj = BytesIO(data[index:]) def is_valid(page): return ((not finishing) or (page.position != (- 1))) best_page = None try: page = OggPage(bytesobj) except error: pass else: if ((page.serial == serial) and is_valid(page)): if page.last: return page else: best_page = page else: best_page = None fileobj.seek(0) try: page = OggPage(fileobj) while True: if (page.serial == serial): if is_valid(page): best_page = page if page.last: break page = OggPage(fileobj) return best_page except error: return best_page except EOFError: return best_page
class ROKS(rks.KohnShamDFT, rohf.ROHF): get_vsap = rks.RKS.get_vsap init_guess_by_vsap = rks.RKS.init_guess_by_vsap get_veff = get_veff energy_elec = pyscf.dft.uks.energy_elec def __init__(self, cell, kpt=numpy.zeros(3), xc='LDA,VWN', exxdiv=getattr(__config__, 'pbc_scf_SCF_exxdiv', 'ewald')): rohf.ROHF.__init__(self, cell, kpt, exxdiv=exxdiv) rks.KohnShamDFT.__init__(self, xc) def dump_flags(self, verbose=None): rohf.ROHF.dump_flags(self, verbose) rks.KohnShamDFT.dump_flags(self, verbose) return self def to_hf(self): from pyscf.pbc import scf return self._transfer_attrs_(scf.ROHF(self.cell, self.kpt))