code
stringlengths
101
5.91M
class Pipeline(): def __init__(self, clientid: str, provisioner: 'Provisioner', transfer_config: TransferConfig, max_instances: Optional[int]=1, n_connections: Optional[int]=64, planning_algorithm: Optional[str]='direct', debug: Optional[bool]=False): self.clientid = clientid self.max_instances = max_instances self.n_connections = n_connections self.provisioner = provisioner self.transfer_config = transfer_config self. = urllib3.PoolManager(retries=urllib3.Retry(total=3)) self.provisioning_lock = threading.Lock() self.provisioned = False self.transfer_dir = ((tmp_log_dir / 'transfer_logs') / datetime.now().strftime('%Y%m%d_%H%M%S')) self.transfer_dir.mkdir(exist_ok=True, parents=True) self.dataplane = None self.planning_algorithm = planning_algorithm if (self.planning_algorithm == 'direct'): self.planner = MulticastDirectPlanner(self.max_instances, self.n_connections, self.transfer_config) elif (self.planning_algorithm == 'src_one_sided'): self.planner = DirectPlannerSourceOneSided(self.max_instances, self.n_connections, self.transfer_config) elif (self.planning_algorithm == 'dst_one_sided'): self.planner = DirectPlannerDestOneSided(self.max_instances, self.n_connections, self.transfer_config) else: raise ValueError(f'No such planning algorithm {planning_algorithm}') self.transfer_dir = ((tmp_log_dir / 'transfer_logs') / datetime.now().strftime('%Y%m%d_%H%M%S')) self.transfer_dir.mkdir(exist_ok=True, parents=True) self.debug = debug self.jobs_to_dispatch: List[TransferJob] = [] self.pending_transfers: List[TransferProgressTracker] = [] self.bound_nodes: Dict[(TopologyPlanGateway, compute.Server)] = {} def create_dataplane(self, debug): topo = self.planner.plan(self.jobs_to_dispatch) dp = Dataplane(self.clientid, topo, self.provisioner, self.transfer_config, str(self.transfer_dir), debug=debug) return dp def start(self, debug=False, progress=False): dp = self.create_dataplane(debug) try: dp.provision(spinner=True) if progress: from skyplane.cli.impl.progress_bar import ProgressBarTransferHook tracker = dp.run_async(self.jobs_to_dispatch, hooks=ProgressBarTransferHook(dp.topology.dest_region_tags)) else: tracker = dp.run_async(self.jobs_to_dispatch) tracker.join() if debug: dp.copy_gateway_logs() except Exception: dp.copy_gateway_logs() dp.deprovision(spinner=True) return dp def start_async(self, debug=False): dp = self.create_dataplane(debug) try: dp.provision(spinner=False) tracker = dp.run_async(self.jobs_to_dispatch) if debug: dp.copy_gateway_logs() return tracker except Exception: dp.copy_gateway_logs() return def queue_copy(self, src: str, dst: (str or List[str]), recursive: bool=False) -> str: if isinstance(dst, str): dst = [dst] job = CopyJob(src, dst, recursive, requester_pays=self.transfer_config.requester_pays) logger.fs.debug(f'[SkyplaneClient] Queued copy job {job}') self.jobs_to_dispatch.append(job) return job.uuid def queue_sync(self, src: str, dst: (str or List[str])) -> str: if isinstance(dst, str): dst = [dst] job = SyncJob(src, dst, requester_pays=self.transfer_config.requester_pays) logger.fs.debug(f'[SkyplaneClient] Queued sync job {job}') self.jobs_to_dispatch.append(job) return job.uuid def estimate_total_cost(self): total_size = 0 for job in self.jobs_to_dispatch: total_size += job.size_gb() topo = self.planner.plan(self.jobs_to_dispatch) return (total_size * topo.cost_per_gb)
def set_framebuffer_size_callback(window, cbfun): window_addr = ctypes.cast(ctypes.pointer(window), ctypes.POINTER(ctypes.c_long)).contents.value if (window_addr in _framebuffer_size_callback_repository): previous_callback = _framebuffer_size_callback_repository[window_addr] else: previous_callback = None if (cbfun is None): cbfun = 0 c_cbfun = _GLFWframebuffersizefun(cbfun) _framebuffer_size_callback_repository[window_addr] = (cbfun, c_cbfun) cbfun = c_cbfun _glfw.glfwSetFramebufferSizeCallback(window, cbfun) if ((previous_callback is not None) and (previous_callback[0] != 0)): return previous_callback[0]
_module() class BITHead(BaseDecodeHead): def __init__(self, in_channels=256, channels=32, embed_dims=64, enc_depth=1, enc_with_pos=True, dec_depth=8, num_heads=8, drop_rate=0.0, pool_size=2, pool_mode='max', use_tokenizer=True, token_len=4, pre_upsample=2, upsample_size=4, norm_cfg=dict(type='LN'), act_cfg=dict(type='ReLU', inplace=True), **kwargs): super(BITHead, self).__init__(in_channels, channels, **kwargs) self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.embed_dims = embed_dims self.use_tokenizer = use_tokenizer self.num_heads = num_heads if (not use_tokenizer): self.pool_size = pool_size self.pool_mode = pool_mode self.token_len = (pool_size * pool_size) else: self.token_len = token_len self.conv_att = ConvModule(32, self.token_len, 1, conv_cfg=self.conv_cfg) self.enc_with_pos = enc_with_pos if enc_with_pos: self.enc_pos_embedding = nn.Parameter(torch.randn(1, (self.token_len * 2), self.channels)) self.pre_process = Sequential(Upsample(scale_factor=pre_upsample, mode='bilinear', align_corners=self.align_corners), ConvModule(self.in_channels, self.channels, 3, padding=1, conv_cfg=self.conv_cfg)) self.encoder = ModuleList() for _ in range(enc_depth): block = TransformerEncoder(self.channels, self.embed_dims, self.num_heads, drop_rate=drop_rate, norm_cfg=self.norm_cfg) self.encoder.append(block) self.decoder = ModuleList() for _ in range(dec_depth): block = TransformerDecoder(self.channels, self.embed_dims, self.num_heads, drop_rate=drop_rate, norm_cfg=self.norm_cfg) self.decoder.append(block) self.upsample = Upsample(scale_factor=upsample_size, mode='bilinear', align_corners=self.align_corners) def _forward_semantic_tokens(self, x): (b, c) = x.shape[:2] att_map = self.conv_att(x) att_map = att_map.reshape((b, self.token_len, 1, (- 1))) att_map = F.softmax(att_map, dim=(- 1)) x = x.reshape((b, 1, c, (- 1))) tokens = (x * att_map).sum((- 1)) return tokens def _forward_reshaped_tokens(self, x): if (self.pool_mode == 'max'): x = F.adaptive_max_pool2d(x, (self.pool_size, self.pool_size)) elif (self.pool_mode == 'avg'): x = F.adaptive_avg_pool2d(x, (self.pool_size, self.pool_size)) else: x = x tokens = x.permute((0, 2, 3, 1)).flatten(1, 2) return tokens def _forward_feature(self, inputs): inputs = self._transform_inputs(inputs) (x1, x2) = torch.chunk(inputs, 2, dim=1) x1 = self.pre_process(x1) x2 = self.pre_process(x2) if self.use_tokenizer: token1 = self._forward_semantic_tokens(x1) token2 = self._forward_semantic_tokens(x2) else: token1 = self._forward_reshaped_tokens(x1) token2 = self._forward_reshaped_tokens(x2) token = torch.cat([token1, token2], dim=1) if self.enc_with_pos: token += self.enc_pos_embedding for (i, _encoder) in enumerate(self.encoder): token = _encoder(token) (token1, token2) = torch.chunk(token, 2, dim=1) for _decoder in self.decoder: (b, c, h, w) = x1.shape x1 = x1.permute((0, 2, 3, 1)).flatten(1, 2) x2 = x2.permute((0, 2, 3, 1)).flatten(1, 2) x1 = _decoder(x1, token1) x2 = _decoder(x2, token2) x1 = x1.transpose(1, 2).reshape((b, c, h, w)) x2 = x2.transpose(1, 2).reshape((b, c, h, w)) y = torch.abs((x1 - x2)) y = self.upsample(y) return y def forward(self, inputs): output = self._forward_feature(inputs) output = self.cls_seg(output) return output
def move_early_end_date_to_start_date(patient: RawPatient) -> RawPatient: for event in patient.events: if ((event.end is not None) and (event.omop_table == 'visit_occurrence') and (event.end < event.start)): event.end = event.start return patient
def get_version(): major = ctypes.c_uint(0) minor = ctypes.c_uint(0) build = ctypes.c_uint(0) rev = ctypes.c_uint(0) Z3_get_version(major, minor, build, rev) return (major.value, minor.value, build.value, rev.value)
class Skeletonize3d(): def setup(self, *args): try: if (Version(skimage.__version__) < Version('0.16.0')): self.skeletonize = morphology.skeletonize_3d else: self.skeletonize = morphology.skeletonize except AttributeError: raise NotImplementedError('3d skeletonize unavailable') self.image = np.stack((5 * [util.invert(data.horse())])) def time_skeletonize_3d(self): self.skeletonize(self.image) def peakmem_reference(self, *args): pass def peakmem_skeletonize_3d(self): self.skeletonize(self.image)
def _shortcut(input_feature, residual, conv_name_base=None, bn_name_base=None): input_shape = K.int_shape(input_feature) residual_shape = K.int_shape(residual) stride_width = int(round((input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))) stride_height = int(round((input_shape[COL_AXIS] / residual_shape[COL_AXIS]))) equal_channels = (input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]) shortcut = input_feature if ((stride_width > 1) or (stride_height > 1) or (not equal_channels)): print('reshaping via a convolution...') if (conv_name_base is not None): conv_name_base = (conv_name_base + '1') shortcut = ConvLSTM2D(filters=residual_shape[CHANNEL_AXIS], kernel_size=(1, 1), strides=(stride_width, stride_height), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(0.0001), return_sequences=True, name=conv_name_base)(input_feature) if (bn_name_base is not None): bn_name_base = (bn_name_base + '1') shortcut = BatchNormalization(axis=CHANNEL_AXIS, name=bn_name_base)(shortcut) return add([shortcut, residual])
def export_caffe2_detection_model(model: torch.nn.Module, tensor_inputs: List[torch.Tensor]): model = copy.deepcopy(model) assert isinstance(model, torch.nn.Module) assert hasattr(model, 'encode_additional_info') logger.info('Exporting a {} model via ONNX ...'.format(type(model).__name__)) onnx_model = export_onnx_model(model, (tensor_inputs,)) (init_net, predict_net) = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model) ops_table = [[op.type, op.input, op.output] for op in predict_net.op] table = tabulate(ops_table, headers=['type', 'input', 'output'], tablefmt='pipe') logger.info(('ONNX export Done. Exported predict_net (before optimizations):\n' + colored(table, 'cyan'))) fuse_alias_placeholder(predict_net, init_net) if any(((t.device.type != 'cpu') for t in tensor_inputs)): fuse_copy_between_cpu_and_gpu(predict_net) remove_dead_end_ops(init_net) _assign_device_option(predict_net, init_net, tensor_inputs) (params, device_options) = get_params_from_init_net(init_net) (predict_net, params) = remove_reshape_for_fc(predict_net, params) init_net = construct_init_net_from_params(params, device_options) group_norm_replace_aten_with_caffe2(predict_net) model.encode_additional_info(predict_net, init_net) logger.info('Operators used in predict_net: \n{}'.format(_op_stats(predict_net))) logger.info('Operators used in init_net: \n{}'.format(_op_stats(init_net))) return (predict_net, init_net)
class LeakyReluChannel(PiecewiseLinearChannel): def __init__(self, slope): self.slope = slope neg = dict(zmin=(- np.inf), zmax=0, slope=slope, x0=0) pos = dict(zmin=0, zmax=np.inf, slope=1, x0=0) super().__init__(name='l-relu', regions=[pos, neg])
def register_types(module): root_module = module.get_root() module.add_enum('EnvironmentType', ['UrbanEnvironment', 'SubUrbanEnvironment', 'OpenAreasEnvironment']) module.add_enum('CitySize', ['SmallCity', 'MediumCity', 'LargeCity']) module.add_class('AttributeConstructionList', import_from_module='ns.core') module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) module.add_class('CallbackBase', import_from_module='ns.core') module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor']) module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker']) module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue']) module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase']) module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation']) module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor']) module.add_class('Hasher', import_from_module='ns.core') module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') module.add_class('ObjectDeleter', import_from_module='ns.core') module.add_class('PropagationCache', template_parameters=['ns3::JakesProcess']) module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) module.add_class('TimeWithUnit', import_from_module='ns.core') module.add_class('TypeId', import_from_module='ns.core') module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) module.add_class('Vector2D', import_from_module='ns.core') module.add_class('Vector3D', import_from_module='ns.core') module.add_class('empty', import_from_module='ns.core') module.add_class('int64x64_t', import_from_module='ns.core') module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core') module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) module.add_class('PropagationDelayModel', parent=root_module['ns3::Object']) module.add_class('PropagationLossModel', parent=root_module['ns3::Object']) module.add_class('RandomPropagationDelayModel', parent=root_module['ns3::PropagationDelayModel']) module.add_class('RandomPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object']) module.add_class('RangePropagationLossModel', parent=root_module['ns3::PropagationLossModel']) module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) module.add_class('ThreeLogDistancePropagationLossModel', parent=root_module['ns3::PropagationLossModel']) module.add_class('Time', import_from_module='ns.core') module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) module.add_class('TwoRayGroundPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) module.add_class('ConstantSpeedPropagationDelayModel', parent=root_module['ns3::PropagationDelayModel']) module.add_class('Cost231PropagationLossModel', parent=root_module['ns3::PropagationLossModel']) module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor']) module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) module.add_class('FixedRssLossModel', parent=root_module['ns3::PropagationLossModel']) module.add_class('FriisPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) module.add_class('ItuR1411LosPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) module.add_class('ItuR1411NlosOverRooftopPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) module.add_class('JakesProcess', parent=root_module['ns3::Object']) module.add_class('JakesPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) module.add_class('Kun2600MhzPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) module.add_class('LogDistancePropagationLossModel', parent=root_module['ns3::PropagationLossModel']) module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) module.add_class('MatrixPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) module.add_class('MobilityModel', import_from_module='ns.mobility', parent=root_module['ns3::Object']) module.add_class('NakagamiPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) module.add_class('OkumuraHataPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase']) module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<const ns3::MobilityModel>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase']) typehandlers.add_type_alias(u'ns3::Vector3D', u'ns3::Vector') typehandlers.add_type_alias(u'ns3::Vector3D*', u'ns3::Vector*') typehandlers.add_type_alias(u'ns3::Vector3D&', u'ns3::Vector&') module.add_typedef(root_module['ns3::Vector3D'], 'Vector') typehandlers.add_type_alias(u'ns3::Vector3DValue', u'ns3::VectorValue') typehandlers.add_type_alias(u'ns3::Vector3DValue*', u'ns3::VectorValue*') typehandlers.add_type_alias(u'ns3::Vector3DValue&', u'ns3::VectorValue&') module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue') typehandlers.add_type_alias(u'ns3::Vector3DChecker', u'ns3::VectorChecker') typehandlers.add_type_alias(u'ns3::Vector3DChecker*', u'ns3::VectorChecker*') typehandlers.add_type_alias(u'ns3::Vector3DChecker&', u'ns3::VectorChecker&') module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker') nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) nested_module = module.add_cpp_namespace('Hash') register_types_ns3_Hash(nested_module) nested_module = module.add_cpp_namespace('TracedValueCallback') register_types_ns3_TracedValueCallback(nested_module)
def test_quantity_configuration_namespace(config_ns): config_ns.a.b.param3 = 3 assert_almost_equal(config_ns['a']['b']['param3'].to(u.km).value, 3) config_ns.a.b.param3 = (5000 * u.m) assert_almost_equal(config_ns['a']['b']['param3'].to(u.km).value, 5)
class CaseInsensitiveDict(MutableMapping): def __init__(self, data=None, **kwargs): self._store = OrderedDict() if (data is None): data = {} self.update(data, **kwargs) def __setitem__(self, key, value): self._store[key.lower()] = (key, value) def __getitem__(self, key): return self._store[key.lower()][1] def __delitem__(self, key): del self._store[key.lower()] def __iter__(self): return (casedkey for (casedkey, mappedvalue) in self._store.values()) def __len__(self): return len(self._store) def lower_items(self): return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items()) def __eq__(self, other): if isinstance(other, Mapping): other = CaseInsensitiveDict(other) else: return NotImplemented return (dict(self.lower_items()) == dict(other.lower_items())) def copy(self): return CaseInsensitiveDict(self._store.values()) def __repr__(self): return str(dict(self.items()))
_BUILDERS.register_module() class LayerDecayOptimizerConstructor(DefaultOptimizerConstructor): def add_params(self, params, module, prefix='', is_dcn_module=None): parameter_groups = {} print(self.paramwise_cfg) num_layers = (self.paramwise_cfg.get('num_layers') + 2) layer_decay_rate = self.paramwise_cfg.get('layer_decay_rate') print(('Build LayerDecayOptimizerConstructor %f - %d' % (layer_decay_rate, num_layers))) weight_decay = self.base_wd for (name, param) in module.named_parameters(): if (not param.requires_grad): continue if ((len(param.shape) == 1) or name.endswith('.bias') or (name in ('pos_embed', 'cls_token'))): group_name = 'no_decay' this_weight_decay = 0.0 else: group_name = 'decay' this_weight_decay = weight_decay layer_id = get_num_layer_for_vit(name, num_layers) group_name = ('layer_%d_%s' % (layer_id, group_name)) if (group_name not in parameter_groups): scale = (layer_decay_rate ** ((num_layers - layer_id) - 1)) parameter_groups[group_name] = {'weight_decay': this_weight_decay, 'params': [], 'param_names': [], 'lr_scale': scale, 'group_name': group_name, 'lr': (scale * self.base_lr)} parameter_groups[group_name]['params'].append(param) parameter_groups[group_name]['param_names'].append(name) (rank, _) = get_dist_info() if (rank == 0): to_display = {} for key in parameter_groups: to_display[key] = {'param_names': parameter_groups[key]['param_names'], 'lr_scale': parameter_groups[key]['lr_scale'], 'lr': parameter_groups[key]['lr'], 'weight_decay': parameter_groups[key]['weight_decay']} print(('Param groups = %s' % json.dumps(to_display, indent=2))) params.extend(parameter_groups.values())
def Prune2Sparse(cur, id2node, name2id, ops, model): if ((not cur.visited) and (cur.optype == 'FC_Prune')): transFCRelu(cur, id2node, name2id, ops, model) cur.visited = True for (name, n) in cur.ops.iteritems(): Prune2Sparse(n, id2node, name2id, ops, model)
def get_args(): parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument('--dexp', help='root experiment folder', default='exp') parser.add_argument('--model', help='which model to use', default='glad', choices=get_models()) parser.add_argument('--epoch', help='max epoch to run for', default=50, type=int) parser.add_argument('--demb', help='word embedding size', default=400, type=int) parser.add_argument('--dhid', help='hidden state size', default=200, type=int) parser.add_argument('--batch_size', help='batch size', default=50, type=int) parser.add_argument('--lr', help='learning rate', default=0.001, type=float) parser.add_argument('--stop', help='slot to early stop on', default='joint_goal') parser.add_argument('--resume', help='save directory to resume from') parser.add_argument('-n', '--nick', help='nickname for model', default='default') parser.add_argument('--seed', default=42, help='random seed', type=int) parser.add_argument('--test', action='store_true', help='run in evaluation only mode') parser.add_argument('--gpu', type=int, help='which GPU to use') parser.add_argument('--dropout', nargs='*', help='dropout rates', default=['emb=0.2', 'local=0.2', 'global=0.2']) args = parser.parse_args() args.dout = os.path.join(args.dexp, args.model, args.nick) args.dropout = {d.split('=')[0]: float(d.split('=')[1]) for d in args.dropout} if (not os.path.isdir(args.dout)): os.makedirs(args.dout) return args
def test_nested_sdfg_with_return_value(): def nested(A: dace.float64[20]): return (A + 20) sdfg = nested.to_sdfg() def mainprog(A: dace.float64[30], B: dace.float64[20]): return (sdfg(A[10:]) + B) A = np.random.rand(30) B = np.random.rand(20) expected = ((A[10:] + 20) + B) assert np.allclose(mainprog(A, B), expected)
def test_direct_schema(testdir): testdir.make_test('\(method="POST")\(max_examples=1)\ndef test_(request, case):\n request.config.HYPOTHESIS_CASES += 1\n assert case.full_path == "/v1/users"\n assert case.method == "POST"\n assert_list(case.body)\n assert_str(case.body[0])\n', paths={'/users': {'post': {'parameters': [{'schema': {'type': 'array', 'items': {'type': 'string'}, 'minItems': 1}, 'in': 'body', 'name': 'object', 'required': True}], 'responses': {'200': {'description': 'OK'}}}}}) result = testdir.runpytest('-v', '-s') result.assert_outcomes(passed=1) result.stdout.re_match_lines(['Hypothesis calls: 1$'])
class InvalidParameterError(Exception): param: str def __init__(self, param: str) -> None: super().__init__() self.param = param def __str__(self) -> str: return f'the parameter {self.param} is invalid, refer info method'
class SphericalBesselJ(BuiltinFunction): def __init__(self): conversions = dict(mathematica='SphericalBesselJ', maxima='spherical_bessel_j', sympy='jn') BuiltinFunction.__init__(self, 'spherical_bessel_J', nargs=2, conversions=conversions) def _evalf_(self, n, z, parent, algorithm=None): return _mpmath_utils_call(spherical_bessel_f, 'besselj', n, z, parent=parent) def _latex_(self): return 'j_n' def _print_latex_(self, n, z): return 'j_{{{}}}\\left({}\\right)'.format(latex(n), latex(z)) def _derivative_(self, n, z, diff_param): if (SR(n).is_numeric() and (not SR(n).is_integer())): raise NotImplementedError('derivative of spherical function with noninteger index') if (diff_param == 1): return (spherical_bessel_J((n - 1), z) - (((n + 1) / z) * spherical_bessel_J(n, z))) else: raise NotImplementedError('derivative with respect to order')
def generate_outputs_neural_style_transfer(content_paths, style_paths, output_path, args): for img_type in ['real', 'fake']: img_path = os.path.join(output_path, img_type) if (not os.path.isdir(img_path)): os.mkdir(img_path) count = 0 for content_path in content_paths: count += 1 target_name = content_path[(content_path.rfind('/') + 1):] target_path = os.path.join(args.data_path, 'valB', target_name) target_image = NeuralStyleTransfer_DataLoader(None, None).load(target_path) img_name = ((((('val' + '_img') + str(count).zfill(4)) + '_') + 'real') + '.jpg') img_path = os.path.join(output_path, 'real', img_name) image_jpg = tensor_to_image((target_image / 255)) image_jpg.save(img_path) style_path = random.choice(style_paths) dataloader = NeuralStyleTransfer_DataLoader(content_path, style_path) (content_image, style_image) = dataloader.load_dataset() img_name = ((((('val' + '_img') + str(count).zfill(4)) + '_') + 'fake') + '.jpg') img_path = os.path.join(output_path, 'fake', img_name) if (args.model == 'neural_style_transfer'): model = NeuralStyleTransfer() model.build_model() model.configure_optimizers() model.fit(content_image, style_image, output_path=img_path) elif (args.model == 'fast_neural_style_transfer'): model = FastNeuralStyleTransfer() model.build_model() model.fit(content_image, style_image, output_path=img_path)
class All(LoopBasedReplacement): class Transformation(AnyAllCountTransformation): def __init__(self, ast): super().__init__(ast) def _result_init_value(self): return '1' def _result_loop_update(self, node: ast_internal_classes.FNode): return ast_internal_classes.BinOp_Node(lval=copy.deepcopy(node.lval), op='=', rval=ast_internal_classes.Int_Literal_Node(value='0'), line_number=node.line_number) def _loop_condition(self): return ast_internal_classes.UnOp_Node(op='not', lval=self.cond) def func_name() -> str: return '__dace_all'
def main(): (in_fname, out_fname) = (op.abspath(p) for p in sys.argv[1:3]) sbp.run(((['cython', '-3', '--fast-fail', '--output-file', out_fname, '--include-dir', os.getcwd()] + sys.argv[3:]) + [in_fname]), check=True)
class MatVarReader(): def __init__(self, file_reader): pass def read_header(self): pass def array_from_header(self, header): pass
def process_shape(axis_list, axis_num, in_shape_orig, shape_dims_orig): is_reduce_orig = ([False] * FW_MAX_SHAPE_DIMS) is_reduce = ([False] * FW_MAX_SHAPE_DIMS) in_shape = ([0] * FW_MAX_SHAPE_DIMS) for i in range(FW_MAX_SHAPE_DIMS): in_shape[i] = 1 is_reduce[i] = is_reduce_orig[i] = False for i in range(axis_num): is_reduce_orig[axis_list[i]] = True (pos, reduce_pos) = (0, 0) for i in range(shape_dims_orig): if (in_shape_orig[i] == 1): is_reduce_orig[i] = False continue if is_reduce_orig[i]: axis_list[reduce_pos] = pos reduce_pos += 1 is_reduce[pos] = True in_shape[pos] = in_shape_orig[i] pos += 1 if (pos < 4): for i in range(3, (- 1), (- 1)): if (i < (4 - pos)): in_shape[i] = 1 else: in_shape[i] = in_shape[((i + pos) - 4)] for i in range(reduce_pos): axis_list[i] += (4 - pos) pos = 4 elif (pos > 4): shape_dims = pos pos = 0 reduce_pos = 0 minimum_merged_dims = shape_dims cur_dims = 0 for i in range(1, shape_dims): if ((not is_reduce[(i - 1)]) and (not is_reduce[i])): minimum_merged_dims -= 1 for i in range(1, shape_dims): if ((not is_reduce[(i - 1)]) and (not is_reduce[i]) and ((shape_dims - cur_dims) > 4)): in_shape[pos] *= in_shape[i] cur_dims += 1 else: if is_reduce[(i - 1)]: axis_list[reduce_pos] = pos reduce_pos += 1 pos += 1 in_shape[pos] = in_shape[i] if is_reduce[(shape_dims - 1)]: axis_list[reduce_pos] = pos reduce_pos += 1 pos += 1 shape_sum = 0 for i in range(FW_MAX_SHAPE_DIMS): shape_sum += in_shape[i] if (((shape_sum - in_shape[3]) == 7) and (axis_list[0] == 3)): imm_res1 = int(((tpu_local_mem_size_per_npu - (3 * bank_size)) / 2)) imm_res2 = int((((imm_res1 / bank_size) * bank_size) - 64)) reduce_hw_min = int(((imm_res2 / 64) * 16)) if (in_shape[3] > reduce_hw_min): shape_h = int(find_smaller_factor(in_shape[3])) shape_w = int((in_shape[3] / shape_h)) in_shape[2] = shape_h in_shape[3] = shape_w axis_list[0] = 2 axis_list[1] = 3 axis_num = 2 return (pos, in_shape) axis_num = reduce_pos return (pos, in_shape)
def bfill_lowertriangle(A: torch.Tensor, vec: torch.Tensor): (ii, jj) = np.tril_indices(A.size((- 2)), k=(- 1), m=A.size((- 1))) A[(..., ii, jj)] = vec return A
def get_normalization(dataset): returns = [] ret = 0 for (r, term) in zip(dataset['rewards'], dataset['dones_float']): ret += r if term: returns.append(ret) ret = 0 return ((max(returns) - min(returns)) / 1000)
class SENet(nn.Module): def __init__(self, block, num_blocks, num_classes=10): super(SENet, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) self.linear = nn.Linear(512, num_classes) def _make_layer(self, block, planes, num_blocks, stride): strides = ([stride] + ([1] * (num_blocks - 1))) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = planes return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
class Geometric(Distribution): arg_constraints = {'probs': constraints.unit_interval} support = constraints.nonnegative_integer def __init__(self, probs=None, logits=None, validate_args=None): if ((probs is None) == (logits is None)): raise ValueError('Either `probs` or `logits` must be specified, but not both.') if (probs is not None): (self.probs,) = broadcast_all(probs) if (not self.probs.gt(0).all()): raise ValueError('All elements of probs must be greater than 0') else: (self.logits,) = broadcast_all(logits) probs_or_logits = (probs if (probs is not None) else logits) if isinstance(probs_or_logits, Number): batch_shape = torch.Size() else: batch_shape = probs_or_logits.size() super(Geometric, self).__init__(batch_shape, validate_args=validate_args) def mean(self): return ((1.0 / self.probs) - 1.0) def variance(self): return (((1.0 / self.probs) - 1.0) / self.probs) _property def logits(self): return probs_to_logits(self.probs, is_binary=True) _property def probs(self): return logits_to_probs(self.logits, is_binary=True) def sample(self, sample_shape=torch.Size()): shape = self._extended_shape(sample_shape) with torch.no_grad(): u = self.probs.new(shape).uniform_(_finfo(self.probs).tiny, 1) return (u.log() / (- self.probs).log1p()).floor() def log_prob(self, value): if self._validate_args: self._validate_sample(value) (value, probs) = broadcast_all(value, self.probs.clone()) probs[((probs == 1) & (value == 0))] = 0 return ((value * (- probs).log1p()) + self.probs.log()) def entropy(self): return (binary_cross_entropy_with_logits(self.logits, self.probs, reduction='none') / self.probs)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-base_data_dir', default='../data/', help='Root directory of data', type=str) parser.add_argument('-base_model_dir', default='../models', help='Root folder storing model runs', type=str) parser.add_argument('-dataset', default='litbank', choices=['litbank', 'ontonotes'], type=str) parser.add_argument('-conll_scorer', type=str, help='Root folder storing model runs', default='../resources/lrec2020-coref/reference-coreference-scorers/scorer.pl') parser.add_argument('-model_size', default='large', type=str, help='BERT model type') parser.add_argument('-doc_enc', default='overlap', type=str, choices=['independent', 'overlap'], help='BERT model type') parser.add_argument('-pretrained_bert_dir', default='../resources', type=str, help='SpanBERT model location') parser.add_argument('-max_segment_len', default=512, type=int, help='Max segment length of BERT segments.') parser.add_argument('-max_span_width', default=20, type=int, help='Max span width.') parser.add_argument('-ment_emb', default='attn', choices=['attn', 'endpoint'], type=str) parser.add_argument('-top_span_ratio', default=0.3, type=float, help='Ratio of top spans proposed as mentions.') parser.add_argument('-mem_type', default='learned', choices=['learned', 'lru', 'unbounded', 'unbounded_no_ignore'], help='Memory type.') parser.add_argument('-num_cells', default=20, type=int, help='Number of memory cells.') parser.add_argument('-mlp_size', default=3000, type=int, help='MLP size used in the model') parser.add_argument('-mlp_depth', default=1, type=int, help='Number of hidden layers in other MLPs') parser.add_argument('-entity_rep', default='wt_avg', type=str, choices=['learned_avg', 'wt_avg'], help='Entity representation.') parser.add_argument('-emb_size', default=20, type=int, help='Embedding size of features.') parser.add_argument('-cross_val_split', default=0, type=int, help='Cross validation split to be used.') parser.add_argument('-new_ent_wt', help='Weight of new entity term in coref loss', default=1.0, type=float) parser.add_argument('-num_train_docs', default=None, type=int, help='Number of training docs.') parser.add_argument('-max_training_segments', default=None, type=int, help='Maximum number of BERT segments in a document.') parser.add_argument('-sample_invalid', help='Sample prob. of invalid mentions during training', default=0.2, type=float) parser.add_argument('-dropout_rate', default=0.3, type=float, help='Dropout rate') parser.add_argument('-label_smoothing_wt', default=0.0, type=float, help='Label Smoothing') parser.add_argument('-max_epochs', help='Maximum number of epochs', default=30, type=int) parser.add_argument('-seed', default=0, help='Random seed to get different runs', type=int) parser.add_argument('-init_lr', help='Initial learning rate', default=0.0002, type=float) parser.add_argument('-no_singletons', help='No singletons.', default=False, action='store_true') parser.add_argument('-eval', help='Evaluate model', default=False, action='store_true') parser.add_argument('-slurm_id', help='Slurm ID', default=None, type=str) args = parser.parse_args() if (args.dataset == 'litbank'): args.max_span_width = 20 elif (args.dataset == 'ontonotes'): args.max_span_width = 30 else: args.max_span_width = 20 opt_dict = OrderedDict() imp_opts = ['model_size', 'max_segment_len', 'ment_emb', 'doc_enc', 'max_span_width', 'top_span_ratio', 'mem_type', 'num_cells', 'entity_rep', 'mlp_size', 'mlp_depth', 'emb_size', 'dropout_rate', 'seed', 'init_lr', 'new_ent_wt', 'sample_invalid', 'max_training_segments', 'label_smoothing_wt', 'dataset', 'num_train_docs', 'cross_val_split'] for (key, val) in vars(args).items(): if (key in imp_opts): opt_dict[key] = val str_repr = str(opt_dict.items()) hash_idx = hashlib.md5(str_repr.encode('utf-8')).hexdigest() model_name = ('coref_' + str(hash_idx)) model_dir = path.join(args.base_model_dir, model_name) args.model_dir = model_dir print(model_dir) best_model_dir = path.join(model_dir, 'best_models') args.best_model_dir = best_model_dir if (not path.exists(model_dir)): os.makedirs(model_dir) if (not path.exists(best_model_dir)): os.makedirs(best_model_dir) if (args.dataset == 'litbank'): args.data_dir = path.join(args.base_data_dir, f'{args.dataset}/{args.doc_enc}/{args.cross_val_split}') args.conll_data_dir = path.join(args.base_data_dir, f'{args.dataset}/conll/{args.cross_val_split}') else: args.data_dir = path.join(args.base_data_dir, f'{args.dataset}/{args.doc_enc}') args.conll_data_dir = path.join(args.base_data_dir, f'{args.dataset}/conll') print(args.data_dir) args.pretrained_mention_model = path.join(path.join(args.base_model_dir, get_mention_model_name(args)), 'best_models/model.pth') print(args.pretrained_mention_model) log_dir = path.join(model_dir, 'logs') if (not path.exists(log_dir)): os.makedirs(log_dir) config_file = path.join(model_dir, 'config') with open(config_file, 'w') as f: for (key, val) in opt_dict.items(): logging.info(('%s: %s' % (key, val))) f.write(('%s: %s\n' % (key, val))) Experiment(args, **vars(args))
class PseKNC(): def __init__(self, k=3, lamada=1, w=0.5): self.k = k self.lamada = lamada self.w = w check_psenac(self.lamada, self.w, self.k) def make_vec(self, input_data, extra_phyche_index=None): (sequence_list, phyche_value) = get_sequence_list_and_phyche_value_pseknc(input_data, extra_phyche_index) return make_old_pseknc_vector(sequence_list, self.lamada, self.w, self.k, phyche_value, theta_type=1)
def _get_gold_ctx_dict(file: str) -> Tuple[(Dict[(str, ReaderPassage)], Dict[(str, str)])]: gold_passage_infos = {} original_questions = {} with open(file, 'r', encoding='utf-8') as f: logger.info(('Reading file %s' % file)) data = json.load(f)['data'] for sample in data: question = sample['question'] question_from_tokens = (sample['question_tokens'] if ('question_tokens' in sample) else question) original_questions[question_from_tokens] = question title = sample['title'].lower() context = sample['context'] rp = ReaderPassage(sample['example_id'], text=context, title=title) if (question in gold_passage_infos): logger.info('Duplicate question %s', question) rp_exist = gold_passage_infos[question] logger.info('Duplicate question gold info: title new =%s | old title=%s', title, rp_exist.title) logger.info('Duplicate question gold info: new ctx =%s ', context) logger.info('Duplicate question gold info: old ctx =%s ', rp_exist.passage_text) gold_passage_infos[question] = rp gold_passage_infos[question_from_tokens] = rp return (gold_passage_infos, original_questions)
class HeuristicalFunction(): def __call__(self, *args, **kwds): complete_dict = copy(kwds) heuristic = True try: heuristic = complete_dict['heuristic'] except KeyError: pass for (k, v) in zip(self.argnames, args): complete_dict[k] = v if heuristic: complete_dict = self.heuristicFunction(complete_dict) return self.f(**complete_dict) def __init__(self, f, heuristic_function): (self.argnames, self.varargs, self.varopts, self.defaults) = getargspec(f)[:4] if hasattr(f, 'options'): self.options = f.options else: self.options = dict(zip(self.argnames[(- len(self.defaults)):], self.defaults)) self.heuristicFunction = heuristic_function self.f = f self.__doc__ = f.__doc__
def random_select_images(path, save_path, num=100): coco_api = COCO(path) data_path = '/data1/coco2017/train2017/' img_ids = list(coco_api.imgs.keys()) random.shuffle(img_ids) img_ids = img_ids[:num] image_save_path = (save_path + 'image{}/'.format(num)) if (not os.path.exists(image_save_path)): os.makedirs(image_save_path) data = {} for img_id in img_ids: image_info = coco_api.imgs[img_id] name = image_info['file_name'] data[name] = coco_api.imgToAnns[img_id] src_file = (data_path + name) tgt_file = (image_save_path + name) shutil.copyfile(src_file, tgt_file) print('copy images to {}'.format(image_save_path)) save_info_path = (save_path + 'image{}_annotation.json'.format(num)) json_data = json.dumps(data, indent=4) with open(save_info_path, 'w') as f: f.write(json_data) print('save the annotation to {}'.format(save_info_path))
class OccrelNet(nn.Module): def __init__(self): super(OccrelNet, self).__init__() print('OccrelNet...') self.hidden_dim = 64 self.net = nn.Sequential(nn.Conv3d(in_channels=hyp.feat3D_dim, out_channels=self.hidden_dim, kernel_size=3, padding=1), nn.LeakyReLU(), nn.Conv3d(in_channels=self.hidden_dim, out_channels=1, kernel_size=1, padding=0)).cuda() def compute_loss(self, pred, pos, neg, occ, free, valid, summ_writer): label = ((pos * 2.0) - 1.0) a = ((- label) * pred) b = F.relu(a) loss = (b + torch.log((torch.exp((- b)) + torch.exp((a - b))))) mask_ = ((pos + neg) > 0.0).float() loss_vis = torch.mean(((loss * mask_) * valid), dim=3) summ_writer.summ_oned('occrel/prob_loss', loss_vis) pos_occ_loss = utils_basic.reduce_masked_mean(loss, ((pos * valid) * occ)) pos_free_loss = utils_basic.reduce_masked_mean(loss, ((pos * valid) * free)) neg_occ_loss = utils_basic.reduce_masked_mean(loss, ((neg * valid) * occ)) neg_free_loss = utils_basic.reduce_masked_mean(loss, ((neg * valid) * free)) balanced_loss = (((pos_occ_loss + pos_free_loss) + neg_occ_loss) + neg_free_loss) return balanced_loss def forward(self, feat, occrel_g=None, occ_g=None, free_g=None, valid=None, summ_writer=None, suffix=''): total_loss = torch.tensor(0.0).cuda() occrel_e_ = self.net(feat) occrel_e = F.sigmoid(occrel_e_) occrel_e_binary = torch.round(occrel_e) if (occrel_g is not None): pos_match = (occrel_g * torch.eq(occrel_e_binary, occrel_g).float()) neg_match = ((1.0 - occrel_g) * torch.eq((1.0 - occrel_e_binary), (1.0 - occrel_g)).float()) either_match = torch.clamp((pos_match + neg_match), 0.0, 1.0) either_have = occrel_g.clone() acc_pos = utils_basic.reduce_masked_mean(pos_match, (occrel_g * valid)) acc_neg = utils_basic.reduce_masked_mean(neg_match, ((1.0 - occrel_g) * valid)) acc_total = utils_basic.reduce_masked_mean(either_match, (either_have * valid)) acc_bal = ((acc_pos + acc_neg) * 0.5) summ_writer.summ_scalar('unscaled_occrel/acc_pos', acc_pos.cpu().item()) summ_writer.summ_scalar('unscaled_occrel/acc_neg', acc_neg.cpu().item()) summ_writer.summ_scalar('unscaled_occrel/acc_total', acc_total.cpu().item()) summ_writer.summ_scalar('unscaled_occrel/acc_bal', acc_bal.cpu().item()) prob_loss = self.compute_loss(occrel_e_, occrel_g, (1.0 - occrel_g), occ_g, free_g, valid, summ_writer) total_loss = utils_misc.add_loss('occrel/prob_loss', total_loss, prob_loss, hyp.occrel_coeff, summ_writer) if (summ_writer is not None): if (occrel_g is not None): summ_writer.summ_occ('occrel/occrel_g', occrel_g) summ_writer.summ_oned('occrel/occrel_g_', occrel_g, bev=True, norm=False) summ_writer.summ_occ('occrel/occrel_e', occrel_e) summ_writer.summ_oned('occrel/occrel_e', occrel_e, bev=True, norm=False) return (total_loss, occrel_e)
def test_simple_dataset(): with pytest.raises(AssertionError): dataset = InstructionDataset(DATASET_WRONG_EXAMPLE_DICT)
class DIVO(ImageDataset): _junk_pids = [0, (- 1)] dataset_dir = 'ReID_format' def __init__(self, root='', divo=False, **kwargs): self.root = osp.abspath(osp.expanduser(root)) self.dataset_dir = osp.join(self.root, self.dataset_dir) self.data_dir = self.dataset_dir self.train_dir = osp.join(self.data_dir, 'bounding_box_train') self.query_dir = osp.join(self.data_dir, 'bounding_box_test') self.gallery_dir = osp.join(self.data_dir, 'bounding_box_test') self.extra_gallery_dir = osp.join(self.data_dir, 'images') self.divo = divo required_files = [self.data_dir, self.train_dir, self.query_dir, self.gallery_dir] self.check_before_run(required_files) train = self.process_dir(self.train_dir, relabel=True) query = self.process_dir(self.query_dir, relabel=False) gallery = self.process_dir(self.gallery_dir, relabel=False) super(DIVO, self).__init__(train, query, gallery, **kwargs) def process_dir(self, dir_path, relabel=False): img_paths = glob.glob(osp.join(dir_path, '*.jpg')) pattern = re.compile('([-\\d]+)_c(\\d)') pid_container = set() for img_path in img_paths: (pid, _) = map(int, pattern.search(img_path).groups()) if (pid == (- 1)): continue pid_container.add(pid) pid2label = {pid: label for (label, pid) in enumerate(pid_container)} data = [] for img_path in img_paths: (pid, camid) = map(int, pattern.search(img_path).groups()) if (pid == (- 1)): continue if relabel: pid = pid2label[pid] data.append((img_path, pid, camid)) return data
class _nchypergeom_gen(rv_discrete): rvs_name = None dist = None def _shape_info(self): return [_ShapeInfo('M', True, (0, np.inf), (True, False)), _ShapeInfo('n', True, (0, np.inf), (True, False)), _ShapeInfo('N', True, (0, np.inf), (True, False)), _ShapeInfo('odds', False, (0, np.inf), (False, False))] def _get_support(self, M, n, N, odds): (N, m1, n) = (M, n, N) m2 = (N - m1) x_min = np.maximum(0, (n - m2)) x_max = np.minimum(n, m1) return (x_min, x_max) def _argcheck(self, M, n, N, odds): (M, n) = (np.asarray(M), np.asarray(n)) (N, odds) = (np.asarray(N), np.asarray(odds)) cond1 = ((M.astype(int) == M) & (M >= 0)) cond2 = ((n.astype(int) == n) & (n >= 0)) cond3 = ((N.astype(int) == N) & (N >= 0)) cond4 = (odds > 0) cond5 = (N <= M) cond6 = (n <= M) return (((((cond1 & cond2) & cond3) & cond4) & cond5) & cond6) def _rvs(self, M, n, N, odds, size=None, random_state=None): _vectorize_rvs_over_shapes def _rvs1(M, n, N, odds, size, random_state): length = np.prod(size) urn = _PyStochasticLib3() rv_gen = getattr(urn, self.rvs_name) rvs = rv_gen(N, n, M, odds, length, random_state) rvs = rvs.reshape(size) return rvs return _rvs1(M, n, N, odds, size=size, random_state=random_state) def _pmf(self, x, M, n, N, odds): (x, M, n, N, odds) = np.broadcast_arrays(x, M, n, N, odds) if (x.size == 0): return np.empty_like(x) def _pmf1(x, M, n, N, odds): urn = self.dist(N, n, M, odds, 1e-12) return urn.probability(x) return _pmf1(x, M, n, N, odds) def _stats(self, M, n, N, odds, moments): def _moments1(M, n, N, odds): urn = self.dist(N, n, M, odds, 1e-12) return urn.moments() (m, v) = (_moments1(M, n, N, odds) if (('m' in moments) or ('v' in moments)) else (None, None)) (s, k) = (None, None) return (m, v, s, k)
def sanitize_test_filename(filename): if filename.startswith(CI_TEST_PREFIX): filename = filename[(len(CI_TEST_PREFIX) + 1):] strip_py = re.sub('.py$', '', filename) return re.sub('/', '.', strip_py)
class CarliniWagnerL2(Attack): def __init__(self, model, sess, dtypestr='float32', **kwargs): if (not isinstance(model, Model)): wrapper_warning_logits() model = CallableModelWrapper(model, 'logits') super(CarliniWagnerL2, self).__init__(model, sess, dtypestr, **kwargs) self.feedable_kwargs = ('y', 'y_target') self.structural_kwargs = ['batch_size', 'confidence', 'targeted', 'learning_rate', 'binary_search_steps', 'max_iterations', 'abort_early', 'initial_const', 'clip_min', 'clip_max'] def generate(self, x, **kwargs): assert (self.sess is not None), 'Cannot use `generate` when no `sess` was provided' self.parse_params(**kwargs) (labels, nb_classes) = self.get_or_guess_labels(x, kwargs) attack = CWL2(self.sess, self.model, self.batch_size, self.confidence, ('y_target' in kwargs), self.learning_rate, self.binary_search_steps, self.max_iterations, self.abort_early, self.initial_const, self.clip_min, self.clip_max, nb_classes, x.get_shape().as_list()[1:]) def cw_wrap(x_val, y_val): return np.array(attack.attack(x_val, y_val), dtype=self.np_dtype) wrap = tf.py_func(cw_wrap, [x, labels], self.tf_dtype) wrap.set_shape(x.get_shape()) return wrap def parse_params(self, y=None, y_target=None, batch_size=1, confidence=0, learning_rate=0.005, binary_search_steps=5, max_iterations=1000, abort_early=True, initial_const=0.01, clip_min=0, clip_max=1): self.batch_size = batch_size self.confidence = confidence self.learning_rate = learning_rate self.binary_search_steps = binary_search_steps self.max_iterations = max_iterations self.abort_early = abort_early self.initial_const = initial_const self.clip_min = clip_min self.clip_max = clip_max
def main(args): print('\nCreating retrieval dataset') (_, _, test_dataset, code_dataset) = create_dataset(args.data_dir, args.lang) (test_loader, code_loader) = create_loader([test_dataset, code_dataset], [None, None], batch_size=[args.batch_size, args.batch_size], num_workers=[4, 4], is_trains=[False, False], collate_fns=[None, None]) tokenizer = AutoTokenizer.from_pretrained(args.model_name, trust_remote_code=True) model = AutoModel.from_pretrained(args.model_name, trust_remote_code=True) print(f'Loaded {args.model_name} model (#para={model.num_parameters()})') print('\nStart zero-shot evaluation...') device = torch.device(args.device) model = model.to(device) model.eval() text_embeds = get_feats(model, tokenizer, test_loader, args.max_text_len, device, desc='Get text feats') code_embeds = get_feats(model, tokenizer, code_loader, args.max_code_len, device, desc='Get code feats') test_result = contrast_evaluation(text_embeds, code_embeds, test_loader.dataset.text2code) print(f''' ====> zero-shot test result: ''', test_result) if (args.local_rank in [(- 1), 0]): log_stats = {**{f'test_{k}': v for (k, v) in test_result.items()}, 'epoch': (- 1)} with open(os.path.join(args.output_dir, 'result.txt'), 'a') as f: f.write((json.dumps(log_stats) + '\n'))
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps): if (schedule_name == 'linear'): scale = (1000 / num_diffusion_timesteps) beta_start = (scale * 0.0001) beta_end = (scale * 0.02) return np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64) elif (schedule_name == 'cosine'): return betas_for_alpha_bar(num_diffusion_timesteps, (lambda t: (math.cos(((((t + 0.008) / 1.008) * math.pi) / 2)) ** 2))) elif (schedule_name == 'vp'): T = num_diffusion_timesteps t = np.arange(1, (T + 1)) b_max = 10.0 b_min = 0.1 alpha = np.exp((((- b_min) / T) - (((0.5 * (b_max - b_min)) * ((2 * t) - 1)) / (T ** 2)))) betas = (1 - alpha) return betas else: raise NotImplementedError(f'unknown beta schedule: {schedule_name}')
def test_bad_bounds(): lp = _LPProblem(c=[1, 2]) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=(1, 2, 2))) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2, 2)])) with np.testing.suppress_warnings() as sup: sup.filter(VisibleDeprecationWarning, 'Creating an ndarray from ragged') assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2), (1, 2, 2)])) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2), (1, 2), (1, 2)])) lp = _LPProblem(c=[1, 2, 3, 4]) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2, 3, 4), (1, 2, 3, 4)]))
def convert_clip_window_to_seconds(window, clip_len=2): return [(window[0] * clip_len), ((window[1] + 1) * clip_len)]
class TranspositionCryptosystem(SymmetricKeyCryptosystem): def __init__(self, S, n): if (not isinstance(S, StringMonoid_class)): raise TypeError(('S (= %s) must be a string monoid.' % S)) key_space = SymmetricGroup(n) SymmetricKeyCryptosystem.__init__(self, S, S, key_space, block_length=n) def __call__(self, K): G = self.key_space() if isinstance(K, list): try: K = G(K) except Exception: raise TypeError(('K (= %s) must specify a permutation.' % K)) if ((not isinstance(K, PermutationGroupElement)) and (K.parent() == G)): raise TypeError(('K (= %s) must be a permutation or list specifying a permutation.' % K)) return TranspositionCipher(self, K) def _repr_(self): return ('Transposition cryptosystem on %s of block length %s' % (self.cipher_domain(), self.block_length())) def random_key(self): n = self.block_length() return SymmetricGroup(n).random_element() def inverse_key(self, K, check=True): if check: if (K not in self.key_space()): raise TypeError(('Argument K (= %s) is not in the key space.' % K)) return (K ** (- 1)) def encoding(self, M): S = self.cipher_domain() if isinstance(S, AlphabeticStringMonoid): return S(strip_encoding(M)) try: return S.encoding(M) except Exception: raise TypeError(('Argument M = %s does not encode in the cipher domain' % M)) def deciphering(self, K, C): i = self(self.inverse_key(K)) return i(C) def enciphering(self, K, M): e = self(K) return e(M)
def _validate_trainable_layers(pretrained, trainable_backbone_layers, max_value, default_value): if (not pretrained): if (trainable_backbone_layers is not None): warnings.warn('Changing trainable_backbone_layers has not effect if neither pretrained nor pretrained_backbone have been set to True, falling back to trainable_backbone_layers={} so that all layers are trainable'.format(max_value)) trainable_backbone_layers = max_value if (trainable_backbone_layers is None): trainable_backbone_layers = default_value assert (0 <= trainable_backbone_layers <= max_value) return trainable_backbone_layers
def main(): print('Collecting environment information...') output = get_pretty_env_info() print(output)
def constant_init(module, val, bias=0): if (hasattr(module, 'weight') and (module.weight is not None)): nn.init.constant_(module.weight, val) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias)
def dma_gather_base(context, reg: DMA_gather_reg): lane_mask = ((reg.localmem_mask_h32 * (2 ** 32)) + reg.localmem_mask_l32) (c, h, w) = (reg[f'src_{d}size'] for d in 'chw') d_h = reg.dst_hsize if reg.nchw_copy: d_h = h stride = (((c * h) * w), (h * w), w, 1) opd0 = dict(address=dma_addr(reg.src_start_addr_h13, reg.src_start_addr_l32), dtype=DType(reg.src_data_format), shape=(1, c, h, w), stride=(0, reg.src_cstride, reg.src_hstride, 1), layout=Layout.stride) res0 = dict(address=dma_addr(reg.dst_start_addr_h13, reg.dst_start_addr_l32), dtype=DType(reg.src_data_format), shape=(1, max(c, reg.index_csize), d_h, w), stride=(0, reg.dst_cstride, reg.dst_hstride, 1), layout=Layout.DMAstride(lane_mask)) opd1 = dict(address=dma_addr(reg.index_start_addr_h13, reg.index_start_addr_l32), dtype=DType.ui32, shape=(1, reg.index_csize, d_h, 1), stride=(0, reg.index_cstride, reg.index_hstride, 1), layout=Layout.stride) const = get_value(context, address=reg.constant_value, dtype=DType(reg.src_data_format), is_const=True).data attr = dict(const=const) operands = [get_value(context, **x) for x in (opd0, opd1)] results = [get_value(context, **res0)] return (results, attr, operands)
class TestLinearSimulation(unittest.TestCase): def setUp(self): mesh = discretize.TensorMesh([100]) self.sim = simulation.ExponentialSinusoidSimulation(mesh=mesh, model_map=maps.IdentityMap(mesh)) mtrue = np.zeros(mesh.nC) mtrue[(mesh.cell_centers_x > 0.3)] = 1.0 mtrue[(mesh.cell_centers_x > 0.45)] = (- 0.5) mtrue[(mesh.cell_centers_x > 0.6)] = 0 self.mtrue = mtrue def test_forward(self): data = np.r_[(0.075, 0., 0., (- 0.), (- 0.), (- 0.), 0., 0., 0., (- 0.), (- 0.), (- 0.), (- 0.), 0., 0., 0., 0., 7.e-05, (- 0.), (- 0.))] assert np.allclose(data, self.sim.dpred(self.mtrue)) def test_make_synthetic_data(self): dclean = self.sim.dpred(self.mtrue) data = self.sim.make_synthetic_data(self.mtrue) assert np.all((data.relative_error == (0.05 * np.ones_like(dclean))))
class TransferModule(nn.Module): def forward(self, node_attn, edge_attn): new_attn = torch.matmul(node_attn, edge_attn) return new_attn
(ipyvuetify=_HAS_IPYVUETIFY, IPython=_HAS_IPYTHON) def init_noise_param_floattextfield(noise_param: str) -> ui.ValidatedNumberField: return ui.ValidatedNumberField(num_type=float, v_model=noise.NOISE_PARAMS[noise_param], name=noise_param, label=noise_param, step=0.001, style_='max-width: 180px')
class G11(nn.Module): def __init__(self, conv_dim=64): super(G11, self).__init__() self.conv1 = conv(1, conv_dim, 4) self.conv1_svhn = conv(3, conv_dim, 4) self.conv2 = conv(conv_dim, (conv_dim * 2), 4) res_dim = (conv_dim * 2) self.conv3 = conv(res_dim, res_dim, 3, 1, 1) self.conv4 = conv(res_dim, res_dim, 3, 1, 1) self.deconv1 = deconv((conv_dim * 2), conv_dim, 4) self.deconv2 = deconv(conv_dim, 1, 4, bn=False) self.deconv2_svhn = deconv(conv_dim, 3, 4, bn=False) def forward(self, x, svhn=False): if svhn: out = F.leaky_relu(self.conv1_svhn(x), 0.05) else: out = F.leaky_relu(self.conv1(x), 0.05) out = F.leaky_relu(self.conv2(out), 0.05) out = F.leaky_relu(self.conv3(out), 0.05) out = F.leaky_relu(self.conv4(out), 0.05) out = F.leaky_relu(self.deconv1(out), 0.05) if svhn: out = F.tanh(self.deconv2_svhn(out)) else: out = F.tanh(self.deconv2(out)) return out def encode(self, x, svhn=False): if svhn: out = F.leaky_relu(self.conv1_svhn(x), 0.05) else: out = F.leaky_relu(self.conv1(x), 0.05) out = F.leaky_relu(self.conv2(out), 0.05) out = F.leaky_relu(self.conv3(out), 0.05) return out def decode(self, out, svhn=False): out = F.leaky_relu(self.conv4(out), 0.05) out = F.leaky_relu(self.deconv1(out), 0.05) if svhn: out = F.tanh(self.deconv2_svhn(out)) else: out = F.tanh(self.deconv2(out)) return out def encode_params(self): layers_basic = (list(self.conv1_svhn.parameters()) + list(self.conv1.parameters())) layers_basic += list(self.conv2.parameters()) layers_basic += list(self.conv3.parameters()) return layers_basic def decode_params(self): layers_basic = (list(self.deconv2_svhn.parameters()) + list(self.deconv2.parameters())) layers_basic += list(self.deconv1.parameters()) layers_basic += list(self.conv4.parameters()) return layers_basic def unshared_parameters(self): return (((list(self.deconv2_svhn.parameters()) + list(self.conv1_svhn.parameters())) + list(self.deconv2.parameters())) + list(self.conv1.parameters()))
class TestMiniWoBPolicy(object): def test_states_to_image_var(self): arr_to_state = (lambda arr: Bunch(observation=Bunch(image=np.array(arr, dtype=np.float32)))) arrs = [[[[1, 2], [3, 4]], [[1, 0], [3, 0]], [[0, 2], [0, 4]]], [[[10, 20], [30, 40]], [[10, 0], [30, 0]], [[0, 20], [0, 40]]], [[[100, 200], [300, 400]], [[100, 0], [300, 0]], [[0, 200], [0, 400]]]] correct = np.array(arrs, dtype=np.float32) states = [arr_to_state(arr) for arr in arrs] image_var = MiniWoBPolicy._states_to_image_var(states) assert_tensor_equal(correct, image_var) def test_sample_elements(self): element_probs_arr = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float32) element_probs = GPUVariable(torch.from_numpy(element_probs_arr)) element_indices = MiniWoBPolicy._sample_elements(element_probs) assert np.array_equal(element_indices, [0, 1, 2])
class InvertedResidual(nn.Module): def __init__(self, inp, oup, stride, expand_ratio): super(InvertedResidual, self).__init__() self.stride = stride assert (stride in [1, 2]) hidden_dim = int(round((inp * expand_ratio))) self.use_res_connect = ((self.stride == 1) and (inp == oup)) layers = [] if (expand_ratio != 1): layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1)) layers.extend([ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup)]) self.conv = nn.Sequential(*layers) def forward(self, x): if self.use_res_connect: return (x + self.conv(x)) else: return self.conv(x)
.parametrize('tp,name0,name1', [(int, 'int_0', 'int_1'), (str, 'str_0', 'str_1'), (Callable[([int, str], bool)], 'callable_0', 'callable_1'), (dict[(int, str)], 'dict_0', 'dict_1')]) def test_variable_type_counter(variable_type_naming_scope, tp, name0, name1, type_system): var = vr.VariableReference(MagicMock(), type_system.convert_type_hint(tp)) assert (variable_type_naming_scope.get_name(var) == name0) var = vr.VariableReference(MagicMock(), type_system.convert_type_hint(tp)) assert (variable_type_naming_scope.get_name(var) == name1)
def RPSLS(): from sage.matrix.constructor import matrix A = matrix([[0, (- 1), 1, 1, (- 1)], [1, 0, (- 1), (- 1), 1], [(- 1), 1, 0, 1, (- 1)], [(- 1), 1, (- 1), 0, 1], [1, (- 1), 1, (- 1), 0]]) g = NormalFormGame([A]) g.rename(('Rock-Paper-Scissors-Lizard-Spock - ' + repr(g))) return g
def mask_loss(views: List[View], gbuffers: List[Dict[(str, torch.Tensor)]], loss_function=torch.nn.MSELoss()): loss = 0.0 for (view, gbuffer) in zip(views, gbuffers): loss += loss_function(view.mask, gbuffer['mask']) return (loss / len(views))
def average_df(df, metric_names=['l-val_top1', 'l-val_base_top1'], take_average=True): data_names = set(list(df['data'])) f_names = set(list(df['feature'])) t_names = set(list(df['type'])) hp_names = [c for c in df.columns if (c not in (['data', 'feature', 'type', 'file', 'best_epoch'] + metric_names))] data_dict = defaultdict(list) for d_name in data_names: for f_name in f_names: for t_name in t_names: result = df[(df.data == d_name)] result = result[(result.feature == f_name)] result = result[(result.type == t_name)] if (len(result) == 0): continue data_dict['data'].append(d_name) data_dict['feature'].append(f_name) data_dict['type'].append(t_name) data_dict['total_runs'].append(len(result)) for m in metric_names: if take_average: data_dict[m].append('{:.2f}'.format(np.mean([r for (i, r) in enumerate(result[m])]))) data_dict[f'{m}-std'].append('{:.2f}'.format(np.std([r for (i, r) in enumerate(result[m])]))) else: data_dict[m].append('{:.2f}'.format(np.median([r for (i, r) in enumerate(result[m])]))) for h_name in hp_names: data_dict[h_name].append(result[h_name].iloc[0]) df = pd.DataFrame(data_dict) df = df.sort_values(['data', 'feature', 'type']) return df
def test_fails_without_run_subtask(): parser = _get_command_line_parser([], [], []) assert_raises(SystemExit, parser.parse_args, ['run'])
def create_input_tvis(parser): ops = parser.ops tvis = [] for op in ops: if (op.type == 'top.Input'): shape_type = mlir.ir.ShapedType(op.op.results[0].type) mlir_type = shape_type.element_type tvi = helper.make_tensor_value_info(op.name, type_map(mlir_type), op.shape) tvis.append(tvi) return tvis
_method def identity_matrix(ring, n=0, sparse=False): if isinstance(ring, (Integer, int)): n = ring ring = ZZ return matrix_space.MatrixSpace(ring, n, n, sparse)(1)
class ToricVarietyFactory(SageObject): _check = True def _make_ToricVariety(self, name, coordinate_names, base_ring): (rays, cones) = toric_varieties_rays_cones[name] if (coordinate_names is None): dict_key = (name, base_ring) else: coordinate_names = normalize_names(coordinate_names, len(rays), DEFAULT_PREFIX) dict_key = ((name, base_ring) + tuple(coordinate_names)) if (dict_key not in self.__dict__): fan = Fan(cones, rays, check=self._check) self.__dict__[dict_key] = ToricVariety(fan, coordinate_names=coordinate_names, base_ring=base_ring) return self.__dict__[dict_key] def _make_CPRFanoToricVariety(self, name, coordinate_names, base_ring): (rays, cones) = toric_varieties_rays_cones[name] if (coordinate_names is None): dict_key = (name, base_ring) else: coordinate_names = normalize_names(coordinate_names, len(rays), DEFAULT_PREFIX) dict_key = ((name, base_ring) + tuple(coordinate_names)) if (dict_key not in self.__dict__): polytope = LatticePolytope(rays, lattice=ToricLattice(len(rays[0]))) points = [tuple(_) for _ in polytope.points()] ray2point = [points.index(r) for r in rays] charts = [[ray2point[i] for i in c] for c in cones] self.__dict__[dict_key] = CPRFanoToricVariety(Delta_polar=polytope, coordinate_points=ray2point, charts=charts, coordinate_names=coordinate_names, base_ring=base_ring, check=self._check) return self.__dict__[dict_key] def dP6(self, names='x u y v z w', base_ring=QQ): return self._make_CPRFanoToricVariety('dP6', names, base_ring) def dP7(self, names='x u y v z', base_ring=QQ): return self._make_CPRFanoToricVariety('dP7', names, base_ring) def dP8(self, names='t x y z', base_ring=QQ): return self._make_CPRFanoToricVariety('dP8', names, base_ring) def P1xP1(self, names='s t x y', base_ring=QQ): return self._make_CPRFanoToricVariety('P1xP1', names, base_ring) def P1xP1_Z2(self, names='s t x y', base_ring=QQ): return self._make_CPRFanoToricVariety('P1xP1_Z2', names, base_ring) def P1(self, names='s t', base_ring=QQ): return self._make_CPRFanoToricVariety('P1', names, base_ring) def P2(self, names='x y z', base_ring=QQ): return self._make_CPRFanoToricVariety('P2', names, base_ring) def P(self, n, names='z+', base_ring=QQ): try: n = ZZ(n) except TypeError: raise TypeError(('dimension of the projective space must be a positive integer!\nGot: %s' % n)) if (n <= 0): raise ValueError(('only projective spaces of positive dimension can be constructed!\nGot: %s' % n)) m = identity_matrix(n).augment(matrix(n, 1, ([(- 1)] * n))) charts = [(list(range(i)) + list(range((i + 1), (n + 1)))) for i in range((n + 1))] return CPRFanoToricVariety(Delta_polar=LatticePolytope(m.columns(), lattice=ToricLattice(n)), charts=charts, check=self._check, coordinate_names=names, base_ring=base_ring) def A1(self, names='z', base_ring=QQ): return self._make_ToricVariety('A1', names, base_ring) def A2(self, names='x y', base_ring=QQ): return self._make_ToricVariety('A2', names, base_ring) def A(self, n, names='z+', base_ring=QQ): try: n = ZZ(n) except TypeError: raise TypeError(('dimension of the affine space must be a positive integer!\nGot: %s' % n)) if (n <= 0): raise ValueError(('only affine spaces of positive dimension can be constructed!\nGot: %s' % n)) rays = identity_matrix(n).columns() cones = [list(range(n))] fan = Fan(cones, rays, check=self._check) return ToricVariety(fan, coordinate_names=names) def A2_Z2(self, names='x y', base_ring=QQ): return self._make_ToricVariety('A2_Z2', names, base_ring) def P1xA1(self, names='s t z', base_ring=QQ): return self._make_ToricVariety('P1xA1', names, base_ring) def Conifold(self, names='u x y v', base_ring=QQ): return self._make_ToricVariety('Conifold', names, base_ring) def dP6xdP6(self, names='x0 x1 x2 x3 x4 x5 y0 y1 y2 y3 y4 y5', base_ring=QQ): return self._make_CPRFanoToricVariety('dP6xdP6', names, base_ring) def Cube_face_fan(self, names='z+', base_ring=QQ): return self._make_CPRFanoToricVariety('Cube_face_fan', names, base_ring) def Cube_sublattice(self, names='z+', base_ring=QQ): return self._make_CPRFanoToricVariety('Cube_sublattice', names, base_ring) def Cube_nonpolyhedral(self, names='z+', base_ring=QQ): return self._make_ToricVariety('Cube_nonpolyhedral', names, base_ring) def Cube_deformation(self, k, names=None, base_ring=QQ): try: k = ZZ(k) except TypeError: raise TypeError(('cube deformations X_k are defined only for non-negative integer k!\nGot: %s' % k)) if (k < 0): raise ValueError(('cube deformations X_k are defined only for non-negative k!\nGot: %s' % k)) def rays(kappa): return matrix([[1, 1, ((2 * kappa) + 1)], [1, (- 1), 1], [(- 1), 1, 1], [(- 1), (- 1), 1], [(- 1), (- 1), (- 1)], [(- 1), 1, (- 1)], [1, (- 1), (- 1)], [1, 1, (- 1)]]) cones = [[0, 1, 2, 3], [4, 5, 6, 7], [0, 1, 7, 6], [4, 5, 3, 2], [0, 2, 5, 7], [4, 6, 1, 3]] fan = Fan(cones, rays(k)) return ToricVariety(fan, coordinate_names=names) def BCdlOG(self, names='v1 v2 c1 c2 v4 v5 b e1 e2 e3 f g v6', base_ring=QQ): return self._make_CPRFanoToricVariety('BCdlOG', names, base_ring) def BCdlOG_base(self, names='d4 d3 r2 r1 d2 u d1', base_ring=QQ): return self._make_ToricVariety('BCdlOG_base', names, base_ring) def P2_112(self, names='z+', base_ring=QQ): return self._make_CPRFanoToricVariety('P2_112', names, base_ring) def P2_123(self, names='z+', base_ring=QQ): return self._make_CPRFanoToricVariety('P2_123', names, base_ring) def P4_11169(self, names='z+', base_ring=QQ): return self._make_CPRFanoToricVariety('P4_11169', names, base_ring) def P4_11169_resolved(self, names='z+', base_ring=QQ): return self._make_CPRFanoToricVariety('P4_11169_resolved', names, base_ring) def P4_11133(self, names='z+', base_ring=QQ): return self._make_CPRFanoToricVariety('P4_11133', names, base_ring) def P4_11133_resolved(self, names='z+', base_ring=QQ): return self._make_CPRFanoToricVariety('P4_11133_resolved', names, base_ring) def WP(self, *q, **kw): if (len(q) == 1): if isinstance(q[0], (list, tuple)): q = q[0] q = list(q) m = len(q) if (m < 2): raise ValueError(('more than one weight must be provided (got %s)' % q)) for i in range(m): try: q[i] = ZZ(q[i]) except TypeError: raise TypeError(('the weights (=%s) must be integers' % q)) if (q[i] <= 0): raise ValueError(('the weights (=%s) must be positive integers' % q)) if (not (gcd(q) == 1)): raise ValueError(('the weights (=%s) must be relatively prime' % q)) base_ring = QQ names = 'z+' for key in kw: if (key == 'K'): base_ring = kw['K'] elif (key == 'base_ring'): base_ring = kw['base_ring'] elif (key == 'names'): names = kw['names'] names = normalize_names(names, m, DEFAULT_PREFIX) else: raise TypeError(('got an unexpected keyword argument %r' % key)) L = ToricLattice(m) L_sub = L.submodule([L(q)]) Q = (L / L_sub) rays = [] cones = [] w = list(range(m)) L_basis = L.basis() for i in w: b = L_basis[i] v = Q.coordinate_vector(Q(b)) rays = (rays + [v]) w_c = (w[:i] + w[(i + 1):]) cones = (cones + [tuple(w_c)]) fan = Fan(cones, rays) return ToricVariety(fan, coordinate_names=names, base_ring=base_ring) def torus(self, n, names='z+', base_ring=QQ): try: n = ZZ(n) except TypeError: raise TypeError('dimension of the torus must be an integer') if (n < 0): raise ValueError('dimension must be non-negative') N = ToricLattice(n) fan = Fan([], lattice=N) return ToricVariety(fan, coordinate_names=names, base_field=base_ring)
def parse_win(case_name, spinLable): ext = ('.win' + spinLable) file_name = (case_name + ext) f = open(file_name, 'r') (real_lattice, recip_lattice) = parse_win_unit_cell_cart(f) f.close() f = open(file_name, 'r') dimensions = parse_win_mp_grid(f) f.close() f = open(file_name, 'r') kpoints = parse_win_kpoints(f) f.close() return (real_lattice, recip_lattice, dimensions, kpoints)
def has_tensor(obj) -> bool: if isinstance(obj, torch.Tensor): return True elif isinstance(obj, dict): return any((has_tensor(value) for value in obj.values())) elif isinstance(obj, (list, tuple)): return any((has_tensor(item) for item in obj)) else: return False
.parametrize('ctx, func_name', ctxs) .parametrize('seed', [313]) .parametrize('center_point_box', [0, 1]) .parametrize('max_output_boxes', [0, 1, 13]) .parametrize('iou_threshold', [0.0, 0.1, 0.5, 1.0]) .parametrize('score_threshold', [0.4]) .parametrize('batch_size, num_boxes, num_classes', [(1, 1, 1), (7, 1, 1), (1, 7, 1), (1, 1, 7), (2, 3, 4), (3, 129, 65)]) def test_onnx_non_max_suppression_forward(seed, batch_size, num_boxes, num_classes, center_point_box, max_output_boxes, iou_threshold, score_threshold, ctx, func_name): pytest.skip('This test needs onnx, test locally.') import onnx import onnxruntime as ort rng = np.random.RandomState(seed) boxes = rng.rand(batch_size, num_boxes, 4).astype(np.float32) scores = rng.rand(batch_size, num_classes, num_boxes).astype(np.float32) vboxes = nn.Variable.from_numpy_array(boxes) vscores = nn.Variable.from_numpy_array(scores) with nn.context_scope(ctx), nn.auto_forward(): voutput = F.onnx_non_max_suppression(vboxes, vscores, center_point_box, max_output_boxes, iou_threshold, score_threshold) ref = ref_non_max_suppression(boxes, scores, center_point_box, max_output_boxes, iou_threshold, score_threshold) assert_allclose(voutput.d, ref) assert (func_name == voutput.parent.name)
def initialize_linear(linear, nonlinearity, bias): if (nonlinearity in ('relu', 'leaky_relu')): nn.init.kaiming_normal_(linear.weight, nonlinearity=nonlinearity) nn.init.uniform_(linear.bias, 0, (1 / ((bias * 2) ** 0.5)))
def set_unit_cap(unit: str) -> None: if ((unit not in freq_list) and (unit not in farad_list)): error = 'The input unit is not correct. Look at the documentation for the correct input format.' raise ValueError(error) global _unit_cap _unit_cap = unit
def rotation(x, degs): x_rot = [] angle = (math.pi / 180) for deg in degs: if (deg == 0): x_rot.append(x) elif (deg == 90): x_rot.append(tf.contrib.image.rotate(x, (90 * angle))) elif (deg == 180): x_rot.append(tf.contrib.image.rotate(x, (180 * angle))) elif (deg == 270): x_rot.append(tf.contrib.image.rotate(x, (270 * angle))) return x_rot
def write_file(file_name, list_to_write): with open(file_name, 'w') as f: for string in list_to_write: f.write((string + '\n'))
class DatasetProcessor(): def get_train_examples(self): raise NotImplementedError def get_dev_examples(self): raise NotImplementedError def get_test_examples(self): raise NotImplementedError def get_labels(self): raise NotImplementedError
def has_person_mention(x: DataPoint) -> int: person_ents = [ent for ent in x.doc.ents if (ent.label_ == 'PERSON')] return (0 if (len(person_ents) > 0) else (- 1))
class Accumulator(object): def __init__(self): self.metrics = defaultdict((lambda : 0.0)) def add(self, key, value): self.metrics[key] += value def add_dict(self, dict): for (key, value) in dict.items(): self.add(key, value) def __getitem__(self, item): return self.metrics[item] def __setitem__(self, key, value): self.metrics[key] = value def get_dict(self): return copy.deepcopy(dict(self.metrics)) def items(self): return self.metrics.items() def __str__(self): return str(dict(self.metrics)) def __truediv__(self, other): newone = Accumulator() for (key, value) in self.items(): if isinstance(other, str): if (other != key): newone[key] = (value / self[other]) else: newone[key] = value else: newone[key] = (value / other) return newone
def create_rollout_worker(mocker: MockerFixture, env_desc: Dict[(str, Any)], rollout_config: Dict[(str, Any)]): mocker.patch('malib.rollout.rolloutworker.RayInferenceClient', new=FakeInferenceClient) mocker.patch('malib.rollout.rolloutworker.RayInferenceServer', new=FakeInferenceServer) from malib.rollout.pb_rolloutworker import PBRolloutWorker worker = PBRolloutWorker(experiment_tag='test_rollout_worker', env_desc=env_desc, agent_mapping_func=(lambda agent: agent), rollout_config=rollout_config, log_dir='./logs') return worker
def train_and_evaluate_sentiment(train_data, train_labels, val_data, val_labels, test_data=None, test_labels=None, parser_output_path=None, perl_script_path=None): print(('Training the SVM on %d examples...' % train_data.shape[0])) clf = svm.SVC() clf.fit(train_data, train_labels) val_predictions = clf.predict(val_data) val_accuracy = accuracy_score(val_labels, val_predictions) print(('Val acc: %.5f' % val_accuracy)) test_accuracy = None if ((test_data is not None) and (test_labels is not None)): test_predictions = clf.predict(test_data) test_accuracy = accuracy_score(test_labels, test_predictions) print(('Test acc: %.5f' % test_accuracy)) return (val_accuracy, test_accuracy)
_decorator('') def _get_header(html): soup = BeautifulSoup(html, 'lxml') scripts = soup.find_all('script') pattern = re.compile('FM.view\\((.*)\\)') cont = '' for script in scripts: m = pattern.search(script.string) if (m and ('pl.header.head.index' in script.string)): all_info = m.group(1) cont = json.loads(all_info)['html'] return cont
def test_obtain_exact_trajectories(): max_path_length = 15 n_workers = 8 env = GarageEnv(PointEnv()) per_worker_actions = [env.action_space.sample() for _ in range(n_workers)] policies = [FixedPolicy(env.spec, ([action] * max_path_length)) for action in per_worker_actions] workers = WorkerFactory(seed=100, max_path_length=max_path_length, n_workers=n_workers) sampler = LocalSampler.from_worker_factory(workers, policies, envs=env) n_traj_per_worker = 3 rollouts = sampler.obtain_exact_trajectories(n_traj_per_worker, agent_update=policies) assert (sum(rollouts.lengths) >= (n_workers * n_traj_per_worker)) assert (len(rollouts.lengths) == (n_workers * n_traj_per_worker)) worker = (- 1) for (count, rollout) in enumerate(rollouts.split()): if ((count % n_traj_per_worker) == 0): worker += 1 assert (rollout.actions == per_worker_actions[worker]).all()
def calculate_mu_friction_and_height_distribution(bin_width, height_histogram): for k in range(len(height_histogram)): h = (sph.A + (bin_width * (k + 0.5))) height_histogram[k] = gibbs_boltzmann_distribution([0.0, 0.0, h]) height_histogram /= (sum(height_histogram) * bin_width) average_mu = 0.0 average_gamma = 0.0 initial_orientation = [Quaternion([1.0, 0.0, 0.0, 0.0])] for k in range(len(height_histogram)): h = (sph.A + (bin_width * (k + 0.5))) mobility = sph.sphere_mobility([np.array([0.0, 0.0, h])], initial_orientation) average_mu += (((mobility[(0, 0)] + mobility[(1, 1)]) * height_histogram[k]) * bin_width) average_gamma += ((height_histogram[k] * bin_width) / mobility[(0, 0)]) return [average_mu, average_gamma]
def scatter(inputs, target_gpus, dim=0): def scatter_map(obj): if isinstance(obj, torch.Tensor): return OrigScatter.apply(target_gpus, None, dim, obj) if isinstance(obj, DataContainer): if obj.cpu_only: return obj.data else: return Scatter.forward(target_gpus, obj.data) if (isinstance(obj, tuple) and (len(obj) > 0)): return list(zip(*map(scatter_map, obj))) if (isinstance(obj, list) and (len(obj) > 0)): out = list(map(list, zip(*map(scatter_map, obj)))) return out if (isinstance(obj, dict) and (len(obj) > 0)): out = list(map(type(obj), zip(*map(scatter_map, obj.items())))) return out return [obj for targets in target_gpus] try: return scatter_map(inputs) finally: scatter_map = None
def add_ResNet_roi_conv5_head(model, blob_in, dim_in, spatial_scale): model.RoIFeatureTransform(blob_in, 'pool5', blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=cfg.FAST_RCNN.ROI_XFORM_RESOLUTION, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale) dim_bottleneck = (cfg.RESNETS.NUM_GROUPS * cfg.RESNETS.WIDTH_PER_GROUP) stride_init = int((cfg.FAST_RCNN.ROI_XFORM_RESOLUTION / 7)) (s, dim_in) = add_stage(model, 'res5', 'pool5', 3, dim_in, 2048, (dim_bottleneck * 8), 1, stride_init) s = model.AveragePool(s, 'res5_pool', kernel=7) return (s, 2048)
class ExpParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _EXPPARAMETER
def psi(x, *args, **kwds): if (not args): return psi1(x, **kwds) if (len(args) > 1): raise TypeError(('Symbolic function psi takes at most 2 arguments (%s given)' % (len(args) + 1))) return psi2(x, args[0], **kwds)
class SKExampleTitleSortKey(ExampleTitleSortKey): def __call__(self, filename): title = super().__call__(filename) prefix = 'plot_release_highlights_' if (not str(filename).startswith(prefix)): return title major_minor = filename[len(prefix):].split('_')[:2] version_float = float('.'.join(major_minor)) return (- version_float)
(frozen=True) class DataOverlapStatsKey(): light_scenario_key: LightScenarioKey overlap_protocol_spec: OverlapProtocolSpec
def write_audio(path: Union[(str, Path)], music: 'Music', audio_format: str='auto', soundfont_path: Union[(str, Path)]=None, rate: int=44100, gain: float=1, options: str=None): soundfont_path = _check_soundfont(soundfont_path) with tempfile.TemporaryDirectory() as temp_dir: midi_path = (Path(temp_dir) / 'temp.mid') write_midi(midi_path, music) option_list = (options.split(' ') if (options is not None) else []) subprocess.run(((['fluidsynth', '-ni', '-F', str(path), '-T', audio_format, '-r', str(rate), '-g', str(gain), str(soundfont_path)] + option_list) + [str(midi_path)]), check=True, stdout=subprocess.DEVNULL)
class _Parser(): def __init__(self, parse_table, callbacks, debug=False): self.parse_table = parse_table self.callbacks = callbacks self.debug = debug def parse(self, lexer, start, value_stack=None, state_stack=None, start_interactive=False): parse_conf = ParseConf(self.parse_table, self.callbacks, start) parser_state = ParserState(parse_conf, lexer, state_stack, value_stack) if start_interactive: return InteractiveParser(self, parser_state, parser_state.lexer) return self.parse_from_state(parser_state) def parse_from_state(self, state): try: token = None for token in state.lexer.lex(state): state.feed_token(token) end_token = (Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1)) return state.feed_token(end_token, True) except UnexpectedInput as e: try: e.interactive_parser = InteractiveParser(self, state, state.lexer) except NameError: pass raise e except Exception as e: if self.debug: print('') print('STATE STACK DUMP') print('') for (i, s) in enumerate(state.state_stack): print(('%d)' % i), s) print('') raise
def _option_required_str(x): if (not x): raise ValueError('value is required') return str(x)
def mcfarland_1973_construction(q, s): from sage.rings.finite_rings.finite_field_constructor import GF from sage.modules.free_module import VectorSpace from sage.rings.finite_rings.integer_mod_ring import Zmod from sage.categories.cartesian_product import cartesian_product r = (((q ** (s + 1)) - 1) // (q - 1)) F = GF(q, 'a') V = VectorSpace(F, (s + 1)) K = Zmod((r + 1)) G = cartesian_product((([F] * (s + 1)) + [K])) D = [] for (k, H) in zip(K, V.subspaces(s)): for v in H: D.append(G((tuple(v) + (k,)))) return (G, [D])
class PaddedGatherOp(torch.autograd.Function): _fwd def forward(ctx, x, indices, bin_ids, bins, padded_bins, top_k): ctx.save_for_backward(indices, bin_ids, bins, padded_bins) ctx.top_k = top_k return kernels.padded_gather(x, indices, bin_ids, None, bins, padded_bins, top_k) _bwd def backward(ctx, grad): grad = grad.contiguous() (indices, bin_ids, bins, padded_bins) = ctx.saved_tensors out = kernels.padded_scatter(grad, indices, bin_ids, None, bins, padded_bins, ctx.top_k) return (out, None, None, None, None, None)
class MobileBertTokenizerFast(metaclass=DummyObject): _backends = ['tokenizers'] def __init__(self, *args, **kwargs): requires_backends(self, ['tokenizers'])
def _initialize_override(module, override, cfg): if (not isinstance(override, (dict, list))): raise TypeError(f'override must be a dict or a list of dict, but got {type(override)}') override = ([override] if isinstance(override, dict) else override) for override_ in override: cp_override = copy.deepcopy(override_) name = cp_override.pop('name', None) if (name is None): raise ValueError(f'`override` must contain the key "name",but got {cp_override}') if (not cp_override): cp_override.update(cfg) elif ('type' not in cp_override.keys()): raise ValueError(f'`override` need "type" key, but got {cp_override}') if hasattr(module, name): _initialize(getattr(module, name), cp_override, wholemodule=True) else: raise RuntimeError(f'module did not have attribute {name}, but init_cfg is {cp_override}.')
class DaceModule(sys.modules[__name__].__class__): def __call__(self, *args, **kwargs): return function(*args, **kwargs)
def luminosity_distance(z, k=0): if (not (type(z) is np.ndarray)): z = np.array([z]) n = len(z) if (const.lam == 0): denom = ((np.sqrt((1 + ((2 * const.q0) * z))) + 1) + (const.q0 * z)) dlum = (((const.c * z) / const.H0) * (1 + ((z * (1 - const.q0)) / denom))) return dlum else: dlum = np.zeros(n) for i in range(n): if (z[i] <= 0): dlum[i] = 0.0 else: dlum[i] = quadrature(_ldist, 0, z[i])[0] if (k > 0): dlum = (np.sinh((np.sqrt(k) * dlum)) / np.sqrt(k)) elif (k < 0): dlum = (np.sin((np.sqrt((- k)) * dlum)) / np.sqrt((- k))) return outputify((((const.c * (1 + z)) * dlum) / const.H0))
_processor('masked_roberta_tokenizer') class MaskedRobertaTokenizer(MaskedTokenProcessor): def __init__(self, config, *args, **kwargs): tokenizer_config = config.tokenizer_config self._tokenizer = AutoTokenizer.from_pretrained(tokenizer_config.type, **tokenizer_config.params) self._CLS_TOKEN = self._tokenizer.bos_token self._SEP_TOKEN = self._tokenizer.sep_token self._MASK_TOKEN = self._tokenizer.mask_token self._PAD_TOKEN_ID = self._tokenizer.pad_token_id self._max_seq_length = config.max_seq_length self._probability = getattr(config, 'mask_probability', 0.15) def _truncate_seq_pair(self, tokens_a: List[str], tokens_b: List[str], max_length: int): if (tokens_b is None): tokens_b = [] max_length -= 2 else: max_length -= 4 assert (max_length >= 0), ('Max length should be minimum 2 in case of single sentence' + ' and 4 in case of two sentences.') while True: total_length = (len(tokens_a) + len(tokens_b)) if (total_length <= max_length): break if (len(tokens_a) > len(tokens_b)): tokens_a.pop() else: tokens_b.pop() def _convert_to_indices(self, tokens_a: List[str], tokens_b: Optional[List[str]]=None, probability: float=0.15): (tokens_a, label_a) = self._random_word(tokens_a, probability=probability) tokens = (([self._CLS_TOKEN] + tokens_a) + [self._SEP_TOKEN]) segment_ids = (([0] + ([0] * len(tokens_a))) + [0]) lm_label_ids = (([(- 1)] + label_a) + [(- 1)]) if tokens_b: (tokens_b, label_b) = self._random_word(tokens_b, probability=probability) assert (len(tokens_b) > 0) tokens += (([self._SEP_TOKEN] + tokens_b) + [self._SEP_TOKEN]) segment_ids += (([0] + ([0] * len(tokens_b))) + [0]) lm_label_ids += (([(- 1)] + label_b) + [(- 1)]) input_ids = self._convert_tokens_to_ids(tokens) input_mask = ([1] * len(input_ids)) while (len(input_ids) < self._max_seq_length): input_ids.append(self._PAD_TOKEN_ID) input_mask.append(0) segment_ids.append(0) lm_label_ids.append((- 1)) assert (len(input_ids) == self._max_seq_length) assert (len(input_mask) == self._max_seq_length) assert (len(segment_ids) == self._max_seq_length) assert (len(lm_label_ids) == self._max_seq_length) input_ids = torch.tensor(input_ids, dtype=torch.long) input_mask = torch.tensor(input_mask, dtype=torch.long) segment_ids = torch.tensor(segment_ids, dtype=torch.long) lm_label_ids = torch.tensor(lm_label_ids, dtype=torch.long) return {'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids, 'lm_label_ids': lm_label_ids, 'tokens': tokens}
def test_arrow_chunked_strings(): a = pyarrow.chunked_array([pyarrow.array(['one', 'two', 'three', 'four', 'five']), pyarrow.array(['six', 'seven', 'eight'])]) assert (to_list(ak._connect.pyarrow.handle_arrow(a)) == ['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight'])
class Normalize(object): def __call__(self, rgb_img, label_img=None): rgb_img = F.to_tensor(rgb_img) rgb_img = F.normalize(rgb_img, MEAN, STD) label_img = torch.LongTensor(np.array(label_img).astype(np.int64)) return (rgb_img, label_img)
.parametrize('dtype', [np.uint8, np.int32, np.float16, np.float32, np.float64]) def test_mssim_vs_legacy(dtype): mssim_skimage_0pt17 = 0. assert (cam.dtype == np.uint8) assert (cam_noisy.dtype == np.uint8) mssim = structural_similarity(cam.astype(dtype), cam_noisy.astype(dtype), data_range=255) assert_almost_equal(mssim, mssim_skimage_0pt17)
class WeightsHessianTraceReuseModelTest(BaseHessianTraceBasicModelTest): def __init__(self, unit_test): super().__init__(unit_test) self.val_batch_size = 1 def run_test(self, seed=0): (graph, pytorch_impl) = self._setup() hessian_service = hessian_common.HessianInfoService(graph=graph, representative_dataset=self.representative_data_gen, fw_impl=pytorch_impl) ipts = [n for n in graph.get_topo_sorted_nodes() if (len(n.weights) > 0)] for ipt in ipts: self.test_hessian_trace_approx(hessian_service, interest_point=ipt, num_scores=1, granularity=hessian_common.HessianInfoGranularity.PER_OUTPUT_CHANNEL, mode=hessian_common.HessianMode.WEIGHTS) self.test_hessian_trace_approx(hessian_service, interest_point=ipt, num_scores=2, granularity=hessian_common.HessianInfoGranularity.PER_TENSOR, mode=hessian_common.HessianMode.WEIGHTS) self.test_hessian_trace_approx(hessian_service, interest_point=ipt, num_scores=3, granularity=hessian_common.HessianInfoGranularity.PER_ELEMENT, mode=hessian_common.HessianMode.WEIGHTS)
def Linf_PGD_alpha(model, X, y, epsilon, steps=7, random_start=True): training = model.training if training: model.eval() saved_params = [p.clone() for p in model.arch_parameters()] optimizer = Linf_SGD(model.arch_parameters(), lr=((2 * epsilon) / steps)) with torch.no_grad(): loss_before = model._loss(X, y) if random_start: for p in model.arch_parameters(): p.data.add_(torch.zeros_like(p).uniform_((- epsilon), epsilon)) model.clip() for _ in range(steps): optimizer.zero_grad() model.zero_grad() loss = (- model._loss(X, y)) loss.backward() optimizer.step() diff = [(model.arch_parameters()[i] - saved_params[i]).clamp_((- epsilon), epsilon) for i in range(len(saved_params))] for (i, p) in enumerate(model.arch_parameters()): p.data.copy_((diff[i] + saved_params[i])) model.clip() optimizer.zero_grad() model.zero_grad() with torch.no_grad(): loss_after = model._loss(X, y) if (loss_before > loss_after): for (i, p) in enumerate(model.arch_parameters()): p.data.copy_(saved_params[i]) if training: model.train()
def atomic_data_dict(counts, linear): offsets = get_offsets(counts) data = {'linear': linear, 'offsets': offsets, 'counts': counts} return data
def init_weights(model, fc_init_std=0.01, zero_init_final_bn=True): for m in model.modules(): if isinstance(m, nn.Conv3d): c2_msra_fill(m) elif isinstance(m, nn.BatchNorm3d): if (hasattr(m, 'transform_final_bn') and m.transform_final_bn and zero_init_final_bn): batchnorm_weight = 0.0 else: batchnorm_weight = 1.0 if (m.weight is not None): m.weight.data.fill_(batchnorm_weight) if (m.bias is not None): m.bias.data.zero_() if isinstance(m, nn.Linear): m.weight.data.normal_(mean=0.0, std=fc_init_std) if (m.bias is not None): m.bias.data.zero_()