code
stringlengths
101
5.91M
class AveragePoolingDataGrad(UnaryDataGrad): def __init__(self, ctx, kernel, stride=None, ignore_border=True, pad=None, channel_last=False, including_pad=True): super(AveragePoolingDataGrad, self).__init__(ctx) self._func = _F.AveragePooling(ctx, kernel, stride, ignore_border, pad, channel_last, including_pad)
def norm(a, ord=None, axis=None, keepdims=False, check_finite=True): if check_finite: a = np.asarray_chkfinite(a) else: a = np.asarray(a) if (a.size and (a.dtype.char in 'fdFD') and (axis is None) and (not keepdims)): if ((ord in (None, 2)) and (a.ndim == 1)): nrm2 = get_blas_funcs('nrm2', dtype=a.dtype, ilp64='preferred') return nrm2(a) if (a.ndim == 2): lange_args = None if (ord == 1): if np.isfortran(a): lange_args = ('1', a) elif np.isfortran(a.T): lange_args = ('i', a.T) elif (ord == np.inf): if np.isfortran(a): lange_args = ('i', a) elif np.isfortran(a.T): lange_args = ('1', a.T) if lange_args: lange = get_lapack_funcs('lange', dtype=a.dtype, ilp64='preferred') return lange(*lange_args) return np.linalg.norm(a, ord=ord, axis=axis, keepdims=keepdims)
(frozen=True) class DistributedConfig(): coordinator_address: Optional[str] = None num_processes: Optional[int] = None process_id: Optional[int] = None local_device_ids: Optional[Union[(int, List[int])]] = None def _is_distributed(self): if ((self.coordinator_address is not None) or (self.num_processes is not None) or (self.process_id is not None) or (self.local_device_ids is not None)): return True if any((env.is_env_present() for env in clusters.ClusterEnv._cluster_types)): return True return False def initialize(self): if self._is_distributed(): device_ids = self.local_device_ids coordinator_address = self.coordinator_address if LevanterSlurmCluster.is_env_present(): if (device_ids is None): device_ids = LevanterSlurmCluster.get_local_device_ids_for_process() if (coordinator_address is None): coordinator_address = LevanterSlurmCluster.get_coordinator_address() jax.distributed.initialize(coordinator_address, self.num_processes, self.process_id, device_ids) logger.info(f'Initialized jax.distributed with {jax.device_count()} devices, {jax.process_count()} processes, coordinator_address={coordinator_address}, process_id={self.process_id}, my device_ids={device_ids}.') else: logger.info('Not initializing jax.distributed because no distributed config was provided, and no cluster was detected.')
def train_std_scaler(X): xscaler = prep.StandardScaler() fX = [] for x in X: fX.extend(x) xscaler.fit(fX) return xscaler
class GaussianMLPRegressorModel(GaussianMLPModel): def __init__(self, input_shape, output_dim, name='GaussianMLPRegressorModel', **kwargs): super().__init__(output_dim=output_dim, name=name, **kwargs) self._input_shape = input_shape def network_output_spec(self): return ['means', 'log_stds', 'std_param', 'normalized_means', 'normalized_log_stds', 'x_mean', 'x_std', 'y_mean', 'y_std', 'dist'] def _build(self, state_input, name=None): with tf.compat.v1.variable_scope('normalized_vars'): x_mean_var = tf.compat.v1.get_variable(name='x_mean', shape=((1,) + self._input_shape), dtype=np.float32, initializer=tf.zeros_initializer(), trainable=False) x_std_var = tf.compat.v1.get_variable(name='x_std_var', shape=((1,) + self._input_shape), dtype=np.float32, initializer=tf.ones_initializer(), trainable=False) y_mean_var = tf.compat.v1.get_variable(name='y_mean_var', shape=(1, self._output_dim), dtype=np.float32, initializer=tf.zeros_initializer(), trainable=False) y_std_var = tf.compat.v1.get_variable(name='y_std_var', shape=(1, self._output_dim), dtype=np.float32, initializer=tf.ones_initializer(), trainable=False) normalized_xs_var = ((state_input - x_mean_var) / x_std_var) (normalized_mean, normalized_log_std, std_param, dist) = super()._build(normalized_xs_var) with tf.name_scope('mean_network'): means_var = ((normalized_mean * y_std_var) + y_mean_var) with tf.name_scope('std_network'): log_stds_var = (normalized_log_std + tf.math.log(y_std_var)) return (means_var, log_stds_var, std_param, normalized_mean, normalized_log_std, x_mean_var, x_std_var, y_mean_var, y_std_var, dist)
def _valid_accessor(acc): if (not isinstance(acc, tuple)): return False if (len(acc) != 2): return False return (isinstance(acc[0], str) and (isinstance(acc[1], Datatype) or is_sort(acc[1])))
def test_constructor_param_count_of_type_none(default_test_case, constructor_mock): const = stmt.ConstructorStatement(default_test_case, constructor_mock) assert (const._param_count_of_type(AnyType()) == 0)
def verbosity_to_loglevel(verbosity): if (verbosity <= 0): log_level = logging.ERROR warnings.filterwarnings('ignore') elif (verbosity == 1): log_level = logging.WARNING elif (verbosity == 2): log_level = logging.INFO else: log_level = logging.DEBUG return log_level
class QuantEmbedding(nn.Module): def __init__(self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None, weight_bit=8, momentum=0.95, quant_mode=False): super().__init__() self.num_ = num_embeddings self.dim = embedding_dim self.padding_idx = padding_idx self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq self.sparse = sparse self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim])) self.register_buffer('weight_scaling_factor', torch.zeros(1)) self.register_buffer('weight_integer', torch.zeros_like(self.weight)) self.weight_bit = weight_bit self.momentum = momentum self.quant_mode = quant_mode self.percentile_mode = False self.weight_function = SymmetricQuantFunction.apply def forward(self, x, positions=None, incremental_state=None): if (not self.quant_mode): return (nn.functional.embedding(x, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse), None) w = self.weight w_transform = w.data.detach() w_min = w_transform.min().expand(1) w_max = w_transform.max().expand(1) self.weight_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, False) self.weight_integer = self.weight_function(self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor) emb_int = nn.functional.embedding(x, self.weight_integer, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse) return ((emb_int * self.weight_scaling_factor), self.weight_scaling_factor)
class ConfigurationError(Exception): def __init__(self, msg): super(ConfigurationError, self).__init__() self._msg = msg def message(self): return self._msg
def random_str(length: int=4) -> str: return ''.join(random.choices((string.ascii_letters + string.digits), k=4))
class _BatchNorm(nn.Module): def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True): super(_BatchNorm, self).__init__() self.num_features = num_features self.eps = eps self.momentum = momentum self.affine = affine self.track_running_stats = track_running_stats self.freezed = False if self.affine: self.weight = Parameter(torch.Tensor(num_features)) self.bias = Parameter(torch.Tensor(num_features)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) if self.track_running_stats: self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) else: self.register_parameter('running_mean', None) self.register_parameter('running_var', None) self.reset_parameters() def reset_parameters(self): if self.track_running_stats: self.running_mean.zero_() self.running_var.fill_(1) if self.affine: self.weight.data.uniform_() self.bias.data.zero_() def _check_input_dim(self, input): return NotImplemented def forward(self, input): self._check_input_dim(input) compute_stats = ((not self.freezed) and self.training and self.track_running_stats) ret = F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, compute_stats, self.momentum, self.eps) return ret def extra_repr(self): return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, track_running_stats={track_running_stats}'.format(**self.__dict__)
def draw_rectangle(img, bbox, bbox_color=(255, 255, 255), thickness=3, is_opaque=False, alpha=0.5): output = img.copy() if (not is_opaque): cv2.rectangle(output, (bbox[0], bbox[1]), (bbox[2], bbox[3]), bbox_color, thickness) else: overlay = img.copy() cv2.rectangle(overlay, (bbox[0], bbox[1]), (bbox[2], bbox[3]), bbox_color, (- 1)) cv2.addWeighted(overlay, alpha, output, (1 - alpha), 0, output) return output
def exp(field): def func(edges): return {field: torch.exp(edges.data[field].sum((- 1), keepdim=True).clamp((- 5), 5))} return func
class RobertaForMultipleChoice(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def register_Ns3Ipv4MaskChecker_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return
class LEDForQuestionAnswering(): def __init__(self, *args, **kwargs): requires_pytorch(self) def from_pretrained(self, *args, **kwargs): requires_pytorch(self)
class DebugUnderflowOverflow(): def __init__(self, model, max_frames_to_save=21, trace_batch_nums=[], abort_after_batch_num=None): self.model = model self.trace_batch_nums = trace_batch_nums self.abort_after_batch_num = abort_after_batch_num self.frames = collections.deque([], max_frames_to_save) self.frame = [] self.batch_number = 0 self.total_calls = 0 self.detected_overflow = False self.prefix = ' ' self.analyse_model() self.register_forward_hook() def save_frame(self, frame=None): if (frame is not None): self.expand_frame(frame) self.frames.append('\n'.join(self.frame)) self.frame = [] def expand_frame(self, line): self.frame.append(line) def trace_frames(self): print('\n'.join(self.frames)) self.frames = [] def reset_saved_frames(self): self.frames = [] def dump_saved_frames(self): print(f''' Detected inf/nan during batch_number={self.batch_number}''') print(f'Last {len(self.frames)} forward frames:') print(f"{'abs min':8} {'abs max':8} metadata") print('\n'.join(self.frames)) print('\n\n') self.frames = [] def analyse_model(self): self.module_names = {m: name for (name, m) in self.model.named_modules()} def analyse_variable(self, var, ctx): if torch.is_tensor(var): self.expand_frame(get_abs_min_max(var, ctx)) if detect_overflow(var, ctx): self.detected_overflow = True elif (var is None): self.expand_frame(f"{'None':>17} {ctx}") else: self.expand_frame(f"{'not a tensor':>17} {ctx}") def batch_start_frame(self): self.expand_frame(f''' {self.prefix} *** Starting batch number={self.batch_number} ***''') self.expand_frame(f"{'abs min':8} {'abs max':8} metadata") def batch_end_frame(self): self.expand_frame(f'''{self.prefix} *** Finished batch number={(self.batch_number - 1)} *** ''') def create_frame(self, module, input, output): self.expand_frame(f'{self.prefix} {self.module_names[module]} {module.__class__.__name__}') for (name, p) in module.named_parameters(recurse=False): self.analyse_variable(p, name) if isinstance(input, tuple): for (i, x) in enumerate(input): self.analyse_variable(x, f'input[{i}]') else: self.analyse_variable(input, 'input') if isinstance(output, tuple): for (i, x) in enumerate(output): if isinstance(x, tuple): for (j, y) in enumerate(x): self.analyse_variable(y, f'output[{i}][{j}]') else: self.analyse_variable(x, f'output[{i}]') else: self.analyse_variable(output, 'output') self.save_frame() def register_forward_hook(self): self.model.apply(self._register_forward_hook) def _register_forward_hook(self, module): module.register_forward_hook(self.forward_hook) def forward_hook(self, module, input, output): last_frame_of_batch = False trace_mode = (True if (self.batch_number in self.trace_batch_nums) else False) if trace_mode: self.reset_saved_frames() if (self.total_calls == 0): self.batch_start_frame() self.total_calls += 1 if (module == self.model): self.batch_number += 1 last_frame_of_batch = True self.create_frame(module, input, output) if trace_mode: self.trace_frames() if last_frame_of_batch: self.batch_start_frame() if (self.detected_overflow and (not trace_mode)): self.dump_saved_frames() raise ValueError('DebugUnderflowOverflow: inf/nan detected, aborting as there is no point running further. Please scroll up above this traceback to see the activation values prior to this event.') if ((self.abort_after_batch_num is not None) and (self.batch_number > self.abort_after_batch_num)): raise ValueError(f'DebugUnderflowOverflow: aborting after {self.batch_number} batches due to `abort_after_batch_num={self.abort_after_batch_num}` arg')
class NoTransformerFoundationCache(FoundationCache): def load_bert(self, transformer_name): return load_bert(transformer_name)
def LatticePoset(data=None, *args, **options): if (isinstance(data, FiniteLatticePoset) and (not args) and (not options)): return data if ('check' in options): check = options.pop('check') else: check = True P = Poset(data, *args, **options) if (P.cardinality() != 0): if (not P.has_bottom()): raise ValueError('not a meet-semilattice: no bottom element') if check: try: P._hasse_diagram.join_matrix() except LatticeError as error: error.x = P._vertex_to_element(error.x) error.y = P._vertex_to_element(error.y) raise return FiniteLatticePoset(P, category=FiniteLatticePosets(), facade=P._is_facade)
class Bottleneck_depthwise_ip(Bottleneck): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): super(Bottleneck_depthwise_ip, self).__init__(inplanes, planes, stride, downsample, dilation) self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm3d(planes) self.conv2 = nn.Conv3d(planes, planes, kernel_size=1, bias=False) self.bn2 = nn.BatchNorm3d(planes) self.conv3 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False, groups=planes) self.bn3 = nn.BatchNorm3d(planes) self.conv4 = nn.Conv3d(planes, (planes * 4), kernel_size=1, bias=False) self.bn4 = nn.BatchNorm3d((planes * 4)) def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.conv3(out) out = self.bn3(out) out = self.relu(out) out = self.conv4(out) out = self.bn4(out) if (self.downsample is not None): residual = self.downsample(x) out += residual out = self.relu(out) return out
_jieba class CPMAntTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = CpmAntTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab_tokens = ['<d>', '</d>', '<s>', '</s>', '</_>', '<unk>', '<pad>', '</n>', '', '', 'C', 'P', 'M', 'A', 'n', 't'] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file']) with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens])) def test_pre_tokenization(self): tokenizer = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b') texts = '!' jieba_tokens = ['', '', '', '', '!'] tokens = tokenizer.tokenize(texts) self.assertListEqual(tokens, jieba_tokens) normalized_text = '!' input_tokens = ([tokenizer.bos_token] + tokens) input_jieba_tokens = [6, 9802, 14962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_jieba_tokens) reconstructed_text = tokenizer.decode(input_jieba_tokens) self.assertEqual(reconstructed_text, normalized_text)
def get_paws_loss(multicrop=6, tau=0.1, T=0.25, me_max=True): def sharpen(proba): sharp_p = (proba ** (1.0 / T)) sharp_p /= tf.reduce_sum(sharp_p, axis=1, keepdims=True) return sharp_p def snn(query, supports, labels): query = tf.math.l2_normalize(query, axis=1) supports = tf.math.l2_normalize(supports, axis=1) return (tf.nn.softmax(((query tf.transpose(supports)) / tau), axis=1) labels) def loss(anchor_views, anchor_supports, anchor_support_labels, target_views, target_supports, target_support_labels, sharpen=sharpen, snn=snn): batch_size = (len(anchor_views) // (2 + multicrop)) probs = snn(anchor_views, anchor_supports, anchor_support_labels) targets = tf.stop_gradient(snn(target_views, target_supports, target_support_labels)) targets = tf.stop_gradient(sharpen(targets)) if (multicrop > 0): mc_target = tf.stop_gradient((0.5 * (targets[:batch_size] + targets[batch_size:]))) targets = tf.stop_gradient(tf.concat([targets, *[mc_target for _ in range(multicrop)]], axis=0)) mask = tf.stop_gradient(tf.math.greater(targets, 0.0001)) mask = tf.stop_gradient(tf.cast(mask, dtype=targets.dtype)) targets *= tf.stop_gradient(mask) loss = tf.reduce_mean(tf.reduce_sum(tf.math.log((probs ** (- targets))), axis=1)) rloss = 0.0 if me_max: avg_probs = tf.reduce_mean(sharpen(probs), axis=0) rloss -= tf.reduce_sum(tf.math.log((avg_probs ** (- avg_probs)))) return (loss, rloss) return loss
class HubertFeatureReaderS2T(HubertFeatureReader): def read_audio(self, path, ref_len=None): (path, *extra) = path.split(':') assert (len(extra) == 2) assert path.endswith('.zip') data = read_from_uncompressed_zip(path, int(extra[0]), int(extra[1])) f = io.BytesIO(data) (wav, sr) = get_waveform(f) assert (sr == self.task.cfg.sample_rate), sr if (wav.ndim == 2): wav = wav.mean((- 1)) assert (wav.ndim == 1), wav.ndim if ((ref_len is not None) and (abs((ref_len - len(wav))) > 160)): logging.warning(f'ref {ref_len} != read {len(wav)} ({path})') return wav
class IntegralProjectivePlaneCurve_finite_field(IntegralProjectiveCurve_finite_field, ProjectivePlaneCurve_finite_field): _point = IntegralProjectivePlaneCurvePoint_finite_field
def entropy(p): plogp = (p * torch.log(p)) plogp[(p == 0)] = 0 return (- plogp.sum(dim=(- 1)))
class LogFormatter(logging.Formatter): DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s' DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S' DEFAULT_COLORS = {logging.DEBUG: 4, logging.INFO: 2, logging.WARNING: 3, logging.ERROR: 1} def __init__(self, color=True, fmt=DEFAULT_FORMAT, datefmt=DEFAULT_DATE_FORMAT, colors=DEFAULT_COLORS): logging.Formatter.__init__(self, datefmt=datefmt) self._fmt = fmt self._colors = {} if (color and _stdout_supports_color()): fg_color = (curses.tigetstr('setaf') or curses.tigetstr('setf') or '') if ((3, 0) < sys.version_info < (3, 2, 3)): fg_color = unicode_type(fg_color, 'ascii') for (levelno, code) in colors.items(): self._colors[levelno] = unicode_type(curses.tparm(fg_color, code), 'ascii') self._normal = unicode_type(curses.tigetstr('sgr0'), 'ascii') else: self._normal = '' def format(self, record): try: message = record.getMessage() assert isinstance(message, basestring_type) record.message = _safe_unicode(message) except Exception as e: record.message = ('Bad message (%r): %r' % (e, record.__dict__)) record.asctime = self.formatTime(record, self.datefmt) if (record.levelno in self._colors): record.color = self._colors[record.levelno] record.end_color = self._normal else: record.color = record.end_color = '' formatted = (self._fmt % record.__dict__) if record.exc_info: if (not record.exc_text): record.exc_text = self.formatException(record.exc_info) if record.exc_text: lines = [formatted.rstrip()] lines.extend((_safe_unicode(ln) for ln in record.exc_text.split('\n'))) formatted = '\n'.join(lines) return formatted.replace('\n', '\n ')
def MannWhitney(data_A, data_B): if ((n < 20) or (m < 20)): print('Use only when the number of observation in each sample is > 20') return 1.0 (_, pval) = Utest(data_A, data_B, alternative='less') return pval
def buffered_db_writer(conn, table_name, table_schema, buff_size=100, slice_id=0): driver = conn.driver if (driver == 'maxcompute'): w = db_writer.MaxComputeDBWriter(conn, table_name, table_schema, buff_size) elif (driver == 'mysql'): w = db_writer.MySQLDBWriter(conn, table_name, table_schema, buff_size) elif (driver == 'hive'): w = db_writer.HiveDBWriter(conn, table_name, table_schema, buff_size) elif (driver == 'paiio'): w = db_writer.PAIMaxComputeDBWriter(table_name, table_schema, buff_size, slice_id) else: raise ValueError(('unrecognized database driver: %s' % driver)) try: (yield w) finally: w.close()
class InputFeatures(object): def __init__(self, unique_id, example_index, paragraph_index=None, doc_span_index=None, doc_tokens=None, tokens=None, token_to_orig_map=None, token_is_max_context=None, input_ids=None, input_mask=None, segment_ids=None, start_position=None, end_position=None, switch=None, answer_mask=None): self.unique_id = unique_id self.example_index = example_index self.paragraph_index = paragraph_index self.doc_span_index = doc_span_index self.doc_tokens = doc_tokens self.tokens = tokens self.token_to_orig_map = token_to_orig_map self.token_is_max_context = token_is_max_context self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.start_position = start_position self.end_position = end_position self.switch = switch self.answer_mask = answer_mask
def main(all_settings): oie_data_dir = 'SORE/data/OpenIE/processed/' sore_output_dir = 'SORE/data/processed_data/' brat_output_dir = 'SORE/data/brat_annotations/' prep = all_settings['Prepare_data'] parse_narrow = all_settings['Parse_narrowIE_predictions'] runOIE = all_settings['Run_OIE'] filter = all_settings['Filter_OIE'] max_num_docs_narrowIE = all_settings['data_prep']['max_num_docs_narrowIE'] narrowIE_input_files = all_settings['narrowIE']['narrowIE_input_files'] RELATIONS_TO_STORE = all_settings['narrowIE']['RELATIONS_TO_STORE'] path_to_OIE_jar = all_settings['OpenIE']['path_to_OIE_jar'] sp_size = all_settings['Filtering']['sp_size'] SUBWORDUNIT = all_settings['Filtering']['SUBWORDUNIT'] STEMMING = all_settings['Filtering']['STEMMING'] STOPWORDS = all_settings['Filtering']['STOPWORDS'] prefix = all_settings['Filtering']['prefix'] file_names = all_settings['Filtering']['file_names'] number_of_clusters = all_settings['Filtering']['number_of_clusters'] num_clusters_to_drop = all_settings['Filtering']['num_largest_clusters_to_drop'] irrelevant_cluster_ids = all_settings['Filtering']['irrelevant_cluster_ids'] SUBWORD_UNIT_COMBINATION = all_settings['Filtering']['SUBWORD_UNIT_COMBINATION'] print_stats = all_settings['Filtering']['print_stats'] filter_settings = all_settings['Filtering']['filter_settings'] convert_back_to_BRAT = all_settings['convert_back_to_BRAT'] if prep: prep_obj = DataPreparation() prep_obj.start(max_num_docs_narrowIE) suffix_options = ['ALL', 'TRADEOFFS', 'TRADEOFFS_AND_ARGMODS'] suffixes = ['_all_arguments.csv', '_tradeoffs.csv', '_tradeoffs_and_argmods.csv'] output_suffix = suffixes[suffix_options.index(RELATIONS_TO_STORE)] narrowIE_parser = NarrowIEParser(RELATIONS_TO_STORE=RELATIONS_TO_STORE) combined_name = ((narrowIE_parser.output_csv_path + prefix) + output_suffix) if parse_narrow: parse_files = [[input_filename, ('predictions_' + input_filename)] for input_filename in narrowIE_input_files] csv_files = [] for (input_filename, predictions_filename) in parse_files: output_csv = ((narrowIE_parser.output_csv_path + input_filename.rsplit('.', maxsplit=1)[0]) + output_suffix) narrowIE_parser.start(input_filename, predictions_filename, output_csv) csv_files.append(output_csv) combined_csv = pd.concat([pd.read_csv(f, engine='python') for f in csv_files]) combined_csv.to_csv(combined_name, index=False, encoding='utf-8') print('Written all predictions to {}.'.format(combined_name)) for file_to_remove in csv_files: os.remove(file_to_remove) if runOIE: run_OIE5.run_OpenIE_5(combined_name, path_to_OIE_jar) prepper = FilterPrep() my_SORE_filter = SORE_filter(combined_name, sore_output_dir) if filter: IDF_weights_path = prepper.determine_output_name(prefix, SUBWORDUNIT, STEMMING, STOPWORDS) if os.path.exists(IDF_weights_path): print('Assuming IDF weights and sentencepiece model exist, since path exists: {} '.format(IDF_weights_path)) else: prepper.start(prefix, file_names, sp_size, SUBWORDUNIT, STEMMING, STOPWORDS) my_SORE_filter.start(prefix, filter_settings, IDF_weights_path, SUBWORDUNIT, irrelevant_cluster_ids, oie_data_dir, sp_size, number_of_clusters, num_clusters_to_drop, STEMMING, STOPWORDS, SUBWORD_UNIT_COMBINATION, print_stats) if convert_back_to_BRAT: dataset_paths = [] for input_data_prefix in file_names: dataset_paths += glob.glob('SORE/data/unprocessed_data/processed/{}*.json'.format(input_data_prefix)) converter = SORE_to_BRAT.BratConverter(dataset_paths, combined_name, sore_output_dir, brat_output_dir) converter.convert_to_BRAT(prefix)
class AST_RangeExpression(AST_Node): def __init__(self, context, lhs, rhs): AST_Node.__init__(self, context) self.lhs = lhs self.rhs = rhs def __repr__(self): return (((('AST_RangeExpression(' + str(self.lhs)) + ', ') + str(self.rhs)) + ')') def get_children(self): L = [self.lhs, self.rhs] return [x for x in L if (x is not None)] def get_dims(self): from .ast_values import AST_Constant if (isinstance(self.lhs, AST_Constant) and isinstance(self.rhs, AST_Constant)): l = ((self.rhs.get_value() - self.lhs.get_value()) + 1) return [1, l] else: print((('Dimensionality of ' + str(self)) + ' cannot be inferred')) return [1, 1] def get_basetype(self): return dace.dtypes.float64 def replace_child(self, old, new): if (old == self.lhs): self.lhs = new return if (old == self.rhs): self.rhs = new return raise ValueError(((('The child ' + str(old)) + ' is not a child of ') + str(self))) def specialize(self): return None def generate_code(self, sdfg, state): from .ast_values import AST_Constant from .ast_matrix import AST_Matrix_Row, AST_Matrix if (isinstance(self.lhs, AST_Constant) and isinstance(self.rhs, AST_Constant)): lval = self.lhs.get_value() rval = self.rhs.get_value() vals = [AST_Constant(self.context, v) for v in list(range(lval, (rval + 1)))] new = AST_Matrix(self.context, [AST_Matrix_Row(self.context, vals)]) new.parent = self.parent new.prev = self.prev new.next = self.next new.generate_code(sdfg, state) else: raise NotImplementedError('Code generation for Range with non-constant bounds not implemented') __str__ = __repr__
class LIM(Model): def __init__(self, cfg, emb_dim): super().__init__(name=cfg['name']) cfg['num_inputs'] = (2 * emb_dim) if ('augment' in cfg.keys()): self.augment = cfg['augment'] else: self.augment = False self.minion = minion_maker(cfg) self.loss = self.minion.loss self.loss_weight = self.minion.loss_weight def forward(self, x, alpha=1, device=None): (x_pos, x_neg) = make_samples(x, self.augment) x = torch.cat((x_pos, x_neg), dim=0).to(device) y = self.minion(x, alpha) label = make_labels(y).to(device) return (y, label)
def create_sin_dataset(n, p): x1 = (5 * np.random.uniform(0, 1, n).reshape((- 1), 1)) x2 = (5 * np.random.uniform(0, 1, n).reshape((- 1), 1)) y = (np.sin(x1) * (np.cos(x2) ** 3)) relevant = np.hstack((x1, x2)) noise_vector = norm.rvs(loc=0, scale=1, size=[n, (p - 2)]) data = np.concatenate([relevant, noise_vector], axis=1) return (data, y.astype(np.float32))
class TestLRN(test_util.TestCase): def setUp(self): self.test_configs = [(6, 10), (3, 13)] def testLRN(self): for (input_size, depth) in self.test_configs: op = core.CreateOperator('LRN', ['X'], ['Y', 'Y_scale'], size=11, alpha=0.001, beta=0.5, bias=2.0, order='NHWC') X = np.random.rand(2, input_size, input_size, depth).astype(np.float32) res = device_checker.CheckSimple(op, [X], [0]) self.assertTrue(res) for checker in gradient_checkers: (res, grad, grad_estimated) = checker.CheckSimple(op, [X], 0, [0]) self.assertTrue(res)
def test_SincConv(device): from speechbrain.nnet.CNN import SincConv input = torch.rand([4, 16000], device=device) convolve = SincConv(input_shape=input.shape, out_channels=8, kernel_size=65, padding='same').to(device) output = convolve(input) assert (output.shape[(- 1)] == 8) assert torch.jit.trace(convolve, input) input = torch.rand([10, 16000, 8], device=device) convolve = SincConv(input_shape=input.shape, out_channels=16, kernel_size=11, padding='same').to(device) output = convolve(input) assert (output.shape[(- 1)] == 16) assert torch.jit.trace(convolve, input)
def local_density_congruence(self, p, m, Zvec=None, NZvec=None): return ((self.local_good_density_congruence(p, m, Zvec, NZvec) + self.local_zero_density_congruence(p, m, Zvec, NZvec)) + self.local_bad_density_congruence(p, m, Zvec, NZvec))
class FiniteWordPath_dyck_callable(WordDatatype_callable, FiniteWordPath_dyck, FiniteWord_class): pass
class PairwiseDistance(Module): def __init__(self, p): super(PairwiseDistance, self).__init__() assert ((p % 1) == 0) self.gradInput = [] self.diff = torch.Tensor() self.norm = p self.outExpand = None self.grad = None self.ones = None def updateOutput(self, input): self.output.resize_(1) assert (input[0].dim() == 2) if (self.diff is None): self.diff = input[0].new() torch.add(input[0], (- 1), input[1], out=self.diff).abs_() self.output.resize_(input[0].size(0)) self.output.zero_() self.output.add_(self.diff.pow_(self.norm).sum(1, keepdim=False)) self.output.pow_((1.0 / self.norm)) return self.output def updateGradInput(self, input, gradOutput): assert (input[0].dim() == 2) if (len(self.gradInput) != 2): self.gradInput[:] = [None, None] if (self.gradInput[0] is None): self.gradInput[0] = input[0].new() self.gradInput[0].resize_(input[0].size()) if (self.gradInput[1] is None): self.gradInput[1] = input[1].new() self.gradInput[1].resize_(input[1].size()) self.gradInput[0].copy_(input[0]) self.gradInput[0].add_((- 1), input[1]) if (self.norm == 1): self.gradInput[0].sign_() else: if (self.norm > 2): self.gradInput[0].mul_(self.gradInput[0].abs().pow_((self.norm - 2))) if (self.outExpand is None): self.outExpand = self.output.new() self.outExpand.resize_(self.output.size(0), 1) self.outExpand.copy_(self.output.view(self.output.size(0), 1)) self.outExpand.add_(1e-06) self.outExpand.pow_((- (self.norm - 1))) self.gradInput[0].mul_(self.outExpand.expand(self.gradInput[0].size(0), self.gradInput[0].size(1))) if (self.grad is None): self.grad = gradOutput.new() if (self.ones is None): self.ones = gradOutput.new() self.grad.resize_as_(input[0]).zero_() self.ones.resize_(input[0].size(1)).fill_(1) self.grad.addr_(gradOutput, self.ones) self.gradInput[0].mul_(self.grad) self.gradInput[1].zero_().add_((- 1), self.gradInput[0]) return self.gradInput def clearState(self): clear(self, 'diff', 'outExpand', 'grad', 'ones') return super(PairwiseDistance, self).clearState()
def qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False, overwrite_b=False, check_finite=True): (result, _) = _qz(A, B, output=output, lwork=lwork, sort=sort, overwrite_a=overwrite_a, overwrite_b=overwrite_b, check_finite=check_finite) return (result[0], result[1], result[(- 4)], result[(- 3)])
class IfAllStructural(Visitor): def __init__(self) -> None: super().__init__() self.res = True def __call__(self, node): super().__call__(node) def visit_A_Expr(self, ancestors, node: A_Expr): if (self.res is False): return def is_structural(expr): if (isinstance(expr, FuncCall) and ('.'.join(map((lambda x: x.sval), expr.funcname)) in SET_FREE_TEXT_FCNS)): return False return True if (not (is_structural(node.lexpr) and is_structural(node.rexpr))): self.res = False
_model def ig_resnext101_32x16d(pretrained=True, **kwargs): model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) return _create_resnet('ig_resnext101_32x16d', pretrained, **model_args)
class ZeroEvenOpTest(unittest.TestCase): def _run_zero_even_op(self, X): op = core.CreateOperator('ZeroEven', ['X'], ['Y']) workspace.FeedBlob('X', X) workspace.RunOperatorOnce(op) Y = workspace.FetchBlob('Y') return Y def _run_zero_even_op_gpu(self, X): with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)): op = core.CreateOperator('ZeroEven', ['X'], ['Y']) workspace.FeedBlob('X', X) workspace.RunOperatorOnce(op) Y = workspace.FetchBlob('Y') return Y def test_throws_on_non_1D_arrays(self): X = np.zeros((2, 2), dtype=np.float32) with self.assertRaisesRegexp(RuntimeError, 'X\\.ndim\\(\\) == 1'): self._run_zero_even_op(X) def test_handles_empty_arrays(self): X = np.array([], dtype=np.float32) Y_exp = np.copy(X) Y_act = self._run_zero_even_op(X) np.testing.assert_allclose(Y_act, Y_exp) def test_sets_vals_at_even_inds_to_zero(self): X = np.array([0, 1, 2, 3, 4], dtype=np.float32) Y_exp = np.array([0, 1, 0, 3, 0], dtype=np.float32) Y_act = self._run_zero_even_op(X) np.testing.assert_allclose(Y_act[0::2], Y_exp[0::2]) def test_preserves_vals_at_odd_inds(self): X = np.array([0, 1, 2, 3, 4], dtype=np.float32) Y_exp = np.array([0, 1, 0, 3, 0], dtype=np.float32) Y_act = self._run_zero_even_op(X) np.testing.assert_allclose(Y_act[1::2], Y_exp[1::2]) def test_handles_even_length_arrays(self): X = np.random.rand(64).astype(np.float32) Y_exp = np.copy(X) Y_exp[0::2] = 0.0 Y_act = self._run_zero_even_op(X) np.testing.assert_allclose(Y_act, Y_exp) def test_handles_odd_length_arrays(self): X = np.random.randn(77).astype(np.float32) Y_exp = np.copy(X) Y_exp[0::2] = 0.0 Y_act = self._run_zero_even_op(X) np.testing.assert_allclose(Y_act, Y_exp) def test_gpu_throws_on_non_1D_arrays(self): X = np.zeros((2, 2), dtype=np.float32) with self.assertRaisesRegexp(RuntimeError, 'X\\.ndim\\(\\) == 1'): self._run_zero_even_op_gpu(X) def test_gpu_handles_empty_arrays(self): X = np.array([], dtype=np.float32) Y_exp = np.copy(X) Y_act = self._run_zero_even_op_gpu(X) np.testing.assert_allclose(Y_act, Y_exp) def test_gpu_sets_vals_at_even_inds_to_zero(self): X = np.array([0, 1, 2, 3, 4], dtype=np.float32) Y_exp = np.array([0, 1, 0, 3, 0], dtype=np.float32) Y_act = self._run_zero_even_op_gpu(X) np.testing.assert_allclose(Y_act[0::2], Y_exp[0::2]) def test_gpu_preserves_vals_at_odd_inds(self): X = np.array([0, 1, 2, 3, 4], dtype=np.float32) Y_exp = np.array([0, 1, 0, 3, 0], dtype=np.float32) Y_act = self._run_zero_even_op_gpu(X) np.testing.assert_allclose(Y_act[1::2], Y_exp[1::2]) def test_gpu_handles_even_length_arrays(self): X = np.random.rand(64).astype(np.float32) Y_exp = np.copy(X) Y_exp[0::2] = 0.0 Y_act = self._run_zero_even_op_gpu(X) np.testing.assert_allclose(Y_act, Y_exp) def test_gpu_handles_odd_length_arrays(self): X = np.random.randn(77).astype(np.float32) Y_exp = np.copy(X) Y_exp[0::2] = 0.0 Y_act = self._run_zero_even_op_gpu(X) np.testing.assert_allclose(Y_act, Y_exp)
def compute_stab_reg(args, model, meter, eps, eps_scheduler): loss = torch.zeros(()).to(args.device) if isinstance(model, BoundDataParallel): modules = list(model._modules.values())[0]._modules else: modules = model._modules nodes = {} for m in modules.values(): if isinstance(m, BoundRelu): if isinstance(model, BoundDataParallel): lower = model(get_property=True, node_name=m.name, att_name='lower') upper = model(get_property=True, node_name=m.name, att_name='upper') else: (lower, upper) = (m.lower, m.upper) nodes[m.name] = Node(m, lower, upper) for (k, v) in nodes.items(): loss += (- torch.tanh((1 + (v.lower * v.upper))).view(v.lower.size(0), (- 1)).sum(dim=(- 1)).mean()) meter.update('relu_stab_Loss', loss) return (loss * args.xiao_coeff)
def helper_variable_scope(): with tf_util.reuse_name_scope('IO', absolute=True) as scope: (yield scope)
class ArrayDim(AstNode): def __init__(self, sizes): super(ArrayDim, self).__init__() self.sizes_as_declared = sizes self.size_str = None self.size_int = None self._dynamic = None self._auto_member = None def dynamic(self): if (self._dynamic is None): raise RuntimeError('Cannot determine until reference_check() is called') return self._dynamic def auto_member(self): if (self._dynamic is None): raise RuntimeError('Cannot determine until reference_check() is called') return self._auto_member def compute_hash(self, type_hash): type_hash.update((1 if self.dynamic else 0)) assert (self.size_str is not None), 'size_str must be set before calling compute_hash' type_hash.update_string(self.size_str) def reference_check(self, struct, member): size_declaration = '[{}]'.format(' '.join(self.sizes_as_declared)) if (len(self.sizes_as_declared) == 1): try: self.size_int = int(self.sizes_as_declared[0], base=0) self.size_str = self.sizes_as_declared[0] self._dynamic = False return except ValueError: pass decl_tuple = (self.sizes_as_declared or ('int32_t',)) if ((len(decl_tuple) == 1) and (decl_tuple[0] in INTEGER_TYPES)): auto_name = 'num_{}'.format(member.name) decl_tuple = (decl_tuple[0], auto_name) if (len(decl_tuple) == 2): (type_name, member_name) = decl_tuple if (member.ndim > 1): raise TypeError('ArrayDim {} with auto-size is not allowed on multi-dimensional arrays'.format(size_declaration)) if (type_name not in INTEGER_TYPES): raise TypeError('ArrayDim {} does not specify a valid integer type: {}'.format(size_declaration, type_name)) if (member_name in struct.member_map): raise NameError('ArrayDim {} with auto-size conflicts with existing member {}'.format(size_declaration, member_name)) self._dynamic = True self.size_int = None self.size_str = member_name self._auto_member = Member(TypeRef(type_name), member_name) return assert (len(decl_tuple) == 1) member_name = decl_tuple[0] if (member_name not in struct.member_map): raise NameError('ArrayDim {} is not a defined member'.format(size_declaration)) size_member = struct.member_map[member_name] if (size_member.type_ref.name not in INTEGER_TYPES): raise TypeError('ArrayDim {} is not a valid array type'.format(size_declaration)) if isinstance(size_member, ConstMember): self._dynamic = False self.size_int = size_member.value self.size_str = str(self.size_int) return if (struct.members.index(size_member) >= struct.members.index(member)): raise ValueError('ArrayDim {} must appear before the array {}'.format(member_name, member.name)) self._dynamic = True self.size_int = None self.size_str = member_name return def __repr__(self): return '[{}]'.format(' '.join(self.sizes_as_declared))
def layer_norm_linear_fn(x, norm_weight, norm_bias, linear_weight, linear_bias, residual=None, eps=1e-06, prenorm=False, residual_in_fp32=False, is_rms_norm=False): return LayerNormLinearFn.apply(x, norm_weight, norm_bias, linear_weight, linear_bias, residual, eps, prenorm, residual_in_fp32, is_rms_norm)
class Config(object): def _file2dict(filename): filename = osp.abspath(osp.expanduser(filename)) check_file_exist(filename) if filename.endswith('.py'): with tempfile.TemporaryDirectory() as temp_config_dir: shutil.copyfile(filename, osp.join(temp_config_dir, '_tempconfig.py')) sys.path.insert(0, temp_config_dir) mod = import_module('_tempconfig') sys.path.pop(0) cfg_dict = {name: value for (name, value) in mod.__dict__.items() if (not name.startswith('__'))} del sys.modules['_tempconfig'] elif filename.endswith(('.yml', '.yaml', '.json')): import mmcv cfg_dict = mmcv.load(filename) else: raise IOError('Only py/yml/yaml/json type are supported now!') cfg_text = (filename + '\n') with open(filename, 'r') as f: cfg_text += f.read() if ('_base_' in cfg_dict): cfg_dir = osp.dirname(filename) base_filename = cfg_dict.pop('_base_') base_filename = (base_filename if isinstance(base_filename, list) else [base_filename]) cfg_dict_list = list() cfg_text_list = list() for f in base_filename: (_cfg_dict, _cfg_text) = Config._file2dict(osp.join(cfg_dir, f)) cfg_dict_list.append(_cfg_dict) cfg_text_list.append(_cfg_text) base_cfg_dict = dict() for c in cfg_dict_list: if (len((base_cfg_dict.keys() & c.keys())) > 0): raise KeyError('Duplicate key is not allowed among bases') base_cfg_dict.update(c) Config._merge_a_into_b(cfg_dict, base_cfg_dict) cfg_dict = base_cfg_dict cfg_text_list.append(cfg_text) cfg_text = '\n'.join(cfg_text_list) return (cfg_dict, cfg_text) def _merge_a_into_b(a, b): for (k, v) in a.items(): if (isinstance(v, dict) and (k in b) and (not v.pop(DELETE_KEY, False))): if (not isinstance(b[k], dict)): raise TypeError('Cannot inherit key {} from base!'.format(k)) Config._merge_a_into_b(v, b[k]) else: b[k] = v def fromfile(filename): (cfg_dict, cfg_text) = Config._file2dict(filename) return Config(cfg_dict, cfg_text=cfg_text, filename=filename) def auto_argparser(description=None): partial_parser = ArgumentParser(description=description) partial_parser.add_argument('config', help='config file path') cfg_file = partial_parser.parse_known_args()[0].config cfg = Config.fromfile(cfg_file) parser = ArgumentParser(description=description) parser.add_argument('config', help='config file path') add_args(parser, cfg) return (parser, cfg) def __init__(self, cfg_dict=None, cfg_text=None, filename=None): if (cfg_dict is None): cfg_dict = dict() elif (not isinstance(cfg_dict, dict)): raise TypeError('cfg_dict must be a dict, but got {}'.format(type(cfg_dict))) super(Config, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict)) super(Config, self).__setattr__('_filename', filename) if cfg_text: text = cfg_text elif filename: with open(filename, 'r') as f: text = f.read() else: text = '' super(Config, self).__setattr__('_text', text) def filename(self): return self._filename def text(self): return self._text def __repr__(self): return 'Config (path: {}): {}'.format(self.filename, self._cfg_dict.__repr__()) def __len__(self): return len(self._cfg_dict) def __getattr__(self, name): return getattr(self._cfg_dict, name) def __getitem__(self, name): return self._cfg_dict.__getitem__(name) def __setattr__(self, name, value): if isinstance(value, dict): value = ConfigDict(value) self._cfg_dict.__setattr__(name, value) def __setitem__(self, name, value): if isinstance(value, dict): value = ConfigDict(value) self._cfg_dict.__setitem__(name, value) def __iter__(self): return iter(self._cfg_dict) def dump(self): cfg_dict = super(Config, self).__getattribute__('_cfg_dict') format_text = json.dumps(cfg_dict, indent=2) return format_text def merge_from_dict(self, options): option_cfg_dict = {} for (full_key, v) in options.items(): d = option_cfg_dict key_list = full_key.split('.') for subkey in key_list[:(- 1)]: d[subkey] = ConfigDict() d = d[subkey] subkey = key_list[(- 1)] d[subkey] = v cfg_dict = super(Config, self).__getattribute__('_cfg_dict') Config._merge_a_into_b(option_cfg_dict, cfg_dict)
def test__get_qualified_name_class(): fully_qualified_name = _get_qualified_name(Constraint) expected_name = 'sdv.constraints.base.Constraint' assert (fully_qualified_name == expected_name)
class DepthWiseConv1d(nn.Module): def __init__(self, chan_in, chan_out, kernel_size, padding): super().__init__() self.padding = padding self.conv = nn.Conv1d(chan_in, chan_out, kernel_size, groups=chan_in) def forward(self, x): x = F.pad(x, self.padding) return self.conv(x)
_on_pypy def test_cyclic_gc(): instance = m.DynamicClass() instance.circular_reference = instance cstats = ConstructorStats.get(m.DynamicClass) assert (cstats.alive() == 1) del instance assert (cstats.alive() == 0) i1 = m.DynamicClass() i2 = m.DynamicClass() i1.cycle = i2 i2.cycle = i1 assert (cstats.alive() == 2) del i1, i2 assert (cstats.alive() == 0)
class Caffe2Tracer(): def __init__(self, cfg: CfgNode, model: nn.Module, inputs): assert isinstance(cfg, CfgNode), cfg assert isinstance(model, torch.nn.Module), type(model) if ('EXPORT_CAFFE2' not in cfg): cfg = add_export_config(cfg) C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[cfg.MODEL.META_ARCHITECTURE] self.traceable_model = C2MetaArch(cfg, copy.deepcopy(model)) self.inputs = inputs self.traceable_inputs = self.traceable_model.get_caffe2_inputs(inputs) def export_caffe2(self): from .caffe2_export import export_caffe2_detection_model (predict_net, init_net) = export_caffe2_detection_model(self.traceable_model, self.traceable_inputs) return Caffe2Model(predict_net, init_net) def export_onnx(self): from .caffe2_export import export_onnx_model as export_onnx_model_impl return export_onnx_model_impl(self.traceable_model, (self.traceable_inputs,)) def export_torchscript(self): logger = logging.getLogger(__name__) logger.info('Tracing the model with torch.jit.trace ...') with torch.no_grad(): return torch.jit.trace(self.traceable_model, (self.traceable_inputs,))
class EarlyStopping(): def __init__(self, model, checkpoint_instance, early_stop_criteria='total_loss', patience=1000, minimize=False, should_stop=True): self.minimize = minimize self.patience = patience self.model = model self.checkpoint = checkpoint_instance self.early_stop_criteria = early_stop_criteria if ('val' not in self.early_stop_criteria): self.early_stop_criteria = f'val/{self.early_stop_criteria}' self.best_monitored_value = ((- np.inf) if (not minimize) else np.inf) self.best_monitored_iteration = 0 self.best_monitored_update = 0 self.should_stop = should_stop self.activated = False self.metric = self.early_stop_criteria def __call__(self, update, iteration, meter): if ((not is_main()) and (not is_xla())): return False value = meter.meters.get(self.early_stop_criteria, None) if (value is None): raise ValueError('Criteria used for early stopping ({}) is not present in meter.'.format(self.early_stop_criteria)) value = value.global_avg if isinstance(value, torch.Tensor): value = value.item() if ((self.minimize and (value < self.best_monitored_value)) or ((not self.minimize) and (value > self.best_monitored_value))): self.best_monitored_value = value self.best_monitored_iteration = iteration self.best_monitored_update = update self.checkpoint.save(update, iteration, update_best=True) elif ((self.best_monitored_update + self.patience) < update): self.activated = True if (self.should_stop is True): self.checkpoint.restore() self.checkpoint.finalize() return True else: return False else: self.checkpoint.save(update, iteration, update_best=False) return False def is_activated(self): return self.activated def init_from_checkpoint(self, load): if ('best_iteration' in load): self.best_monitored_iteration = load['best_iteration'] if ('best_metric_value' in load): self.best_monitored_value = load['best_metric_value'] def get_info(self): return {'best_update': self.best_monitored_update, 'best_iteration': self.best_monitored_iteration, f'best_{self.metric}': f'{self.best_monitored_value:.6f}'}
def add_rotation_to_pcloud(pcloud): r_rotation = rand_rotation_matrix() if (len(pcloud.shape) == 2): return pcloud.dot(r_rotation) else: return np.asarray([e.dot(r_rotation) for e in pcloud])
def register_Ns3LteRrcSapAntennaInfoCommon_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::LteRrcSap::AntennaInfoCommon const &', 'arg0')]) cls.add_instance_attribute('antennaPortsCount', 'uint16_t', is_const=False) return
class WeightNorm(object): name: str dim: int def __init__(self, name: str, dim: int) -> None: if (dim is None): dim = (- 1) self.name = name self.dim = dim def compute_weight(self, module: Module) -> Any: g = getattr(module, (self.name + '_g')) v = getattr(module, (self.name + '_v')) return _weight_norm(v, g, self.dim) def apply(module, name: str, dim: int) -> 'WeightNorm': for (k, hook) in module._forward_pre_hooks.items(): if (isinstance(hook, WeightNorm) and (hook.name == name)): raise RuntimeError('Cannot register two weight_norm hooks on the same parameter {}'.format(name)) if (dim is None): dim = (- 1) fn = WeightNorm(name, dim) weight = getattr(module, name) del module._parameters[name] module.register_parameter((name + '_g'), Parameter(norm_except_dim(weight, 2, dim).data)) module.register_parameter((name + '_v'), Parameter(weight.data)) setattr(module, name, fn.compute_weight(module)) module.register_forward_pre_hook(fn) return fn def remove(self, module: Module) -> None: weight = self.compute_weight(module) delattr(module, self.name) del module._parameters[(self.name + '_g')] del module._parameters[(self.name + '_v')] setattr(module, self.name, Parameter(weight.data)) def __call__(self, module: Module, inputs: Any) -> None: setattr(module, self.name, self.compute_weight(module))
def _decompression_bomb_check(size): if (MAX_IMAGE_PIXELS is None): return pixels = (size[0] * size[1]) if (pixels > (2 * MAX_IMAGE_PIXELS)): raise DecompressionBombError(('Image size (%d pixels) exceeds limit of %d pixels, could be decompression bomb DOS attack.' % (pixels, (2 * MAX_IMAGE_PIXELS)))) if (pixels > MAX_IMAGE_PIXELS): warnings.warn(('Image size (%d pixels) exceeds limit of %d pixels, could be decompression bomb DOS attack.' % (pixels, MAX_IMAGE_PIXELS)), DecompressionBombWarning)
(wandb=True, sh=True) .slow def test_optuna_sweep_ddp_sim_wandb(tmp_path): command = [startfile, '-m', 'hparams_search=mnist_optuna', ('hydra.sweep.dir=' + str(tmp_path)), 'hydra.sweeper.n_trials=5', 'trainer=ddp_sim', 'trainer.max_epochs=3', '+trainer.limit_train_batches=0.01', '+trainer.limit_val_batches=0.1', '+trainer.limit_test_batches=0.1', 'logger=wandb'] run_sh_command(command)
_module() class YOLOV3(SingleStageDetector): def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None): super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained)
def colorize_mask(image_array): new_mask = Image.fromarray(image_array.astype(np.uint8)).convert('P') new_mask.putpalette(color_mapping) return new_mask
class Config(object): def __init__(self, conf=None, **kwargs): super(Config, self).__init__() config = ConfigParser() config.read((conf or [])) self.update({**dict(((name, literal_eval(value)) for section in config.sections() for (name, value) in config.items(section))), **kwargs}) def __repr__(self): s = line = (((('-' * 20) + '-+-') + ('-' * 30)) + '\n') s += (f'''{'Param':20} | {'Value':^30} ''' + line) for (name, value) in vars(self).items(): s += f'''{name:20} | {str(value):^30} ''' s += line return s def __getitem__(self, key): return getattr(self, key) def __getstate__(self): return vars(self) def __setstate__(self, state): self.__dict__.update(state) def keys(self): return vars(self).keys() def items(self): return vars(self).items() def update(self, kwargs): for key in ('self', 'cls', '__class__'): kwargs.pop(key, None) kwargs.update(kwargs.pop('kwargs', dict())) for (name, value) in kwargs.items(): setattr(self, name, value) return self def pop(self, key, val=None): return self.__dict__.pop(key, val)
_cmd('lint') class Lint(): def run(): run_doit_task({'lint': {}, 'unicode-check': {}, 'check-testname': {}})
def load_solve_state_from_h5(nnp, filename): class SolverState(): def __init__(self): self.t = 0 self.pstate = {} states = OrderedDict() with get_file_handle_load(nnp, filename, '.h5') as f: skeys = [] pkeys = set() def _get_skeys(name, obj): if (not isinstance(obj, h5py.Dataset)): return skeys.append(name) index = obj.parent.attrs.get('index', None) pname = name[:name.rindex('/')] pkeys.add((index, pname)) f.visititems(_get_skeys) for (_, pkey) in sorted(pkeys): state = SolverState() for skey in skeys: if (not skey.startswith(pkey)): continue ds = f[skey] if skey.endswith('/t'): state.t = ds[...] else: sname = skey.split('/')[(- 1)] var = nn.Variable.from_numpy_array(ds[...]) state.pstate[sname] = var states[pkey] = state return states
def reduction_test_3(A: dace.float64[(M, N)], B: dace.float64[(M, N)], C: dace.float64[N]): tmp = dace.reduce((lambda a, b: max(a, b)), A, identity=(- 9999999), axis=0) tmp2 = dace.reduce((lambda a, b: (a + b)), B, identity=0, axis=0) for i in dace.map[0:N]: with dace.tasklet: (in1 << tmp[i]) (in2 << tmp2[i]) (out1 >> C[i]) out1 = (in1 + in2)
_driver.jit def NumbaClassicControlAcrobotEnvStep(state_arr, action_arr, done_arr, reward_arr, observation_arr, env_timestep_arr, episode_length): kEnvId = numba_driver.blockIdx.x kThisAgentId = numba_driver.threadIdx.x TORQUE = numba_driver.const.array_like(AVAIL_TORQUE) assert (kThisAgentId == 0), 'We only have one agent per environment' env_timestep_arr[kEnvId] += 1 assert (0 < env_timestep_arr[kEnvId] <= episode_length) reward_arr[(kEnvId, kThisAgentId)] = (- 1.0) action = action_arr[(kEnvId, kThisAgentId, 0)] torque = TORQUE[action] ns = numba_driver.local.array(shape=4, dtype=numba.float32) rk4(state_arr[(kEnvId, kThisAgentId)], torque, ns) ns[0] = wrap(ns[0], (- pi), pi) ns[1] = wrap(ns[1], (- pi), pi) ns[2] = bound(ns[2], (- MAX_VEL_1), MAX_VEL_1) ns[3] = bound(ns[3], (- MAX_VEL_2), MAX_VEL_2) for i in range(4): state_arr[(kEnvId, kThisAgentId, i)] = ns[i] terminated = _terminal(state_arr, kEnvId, kThisAgentId) if terminated: reward_arr[(kEnvId, kThisAgentId)] = 0.0 _get_ob(state_arr, observation_arr, kEnvId, kThisAgentId) if ((env_timestep_arr[kEnvId] == episode_length) or terminated): done_arr[kEnvId] = 1
def unregister(): bpy.utils.unregister_module(__name__) bpy.types.INFO_MT_file_import.remove(menu_func_import)
_start_docstrings('The bare MMBT Model outputting raw hidden-states without any specific head on top.', MMBT_START_DOCSTRING) class MMBTModel(nn.Module, ModuleUtilsMixin): def __init__(self, config, transformer, encoder): super().__init__() self.config = config self.transformer = transformer self.modal_encoder = ModalEmbeddings(config, encoder, transformer.embeddings) _start_docstrings_to_model_forward(MMBT_INPUTS_DOCSTRING) _return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward(self, input_modal, input_ids=None, modal_start_tokens=None, modal_end_tokens=None, attention_mask=None, token_type_ids=None, modal_token_type_ids=None, position_ids=None, modal_position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None): output_attentions = (output_attentions if (output_attentions is not None) else self.config.output_attentions) output_hidden_states = (output_hidden_states if (output_hidden_states is not None) else self.config.output_hidden_states) return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict) if ((input_ids is not None) and (inputs_embeds is not None)): raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif (input_ids is not None): input_txt_shape = input_ids.size() elif (inputs_embeds is not None): input_txt_shape = inputs_embeds.size()[:(- 1)] else: raise ValueError('You have to specify either input_ids or inputs_embeds') device = (input_ids.device if (input_ids is not None) else inputs_embeds.device) modal_embeddings = self.modal_encoder(input_modal, start_token=modal_start_tokens, end_token=modal_end_tokens, position_ids=modal_position_ids, token_type_ids=modal_token_type_ids) input_modal_shape = modal_embeddings.size()[:(- 1)] if (token_type_ids is None): token_type_ids = torch.ones(input_txt_shape, dtype=torch.long, device=device) txt_embeddings = self.transformer.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) embedding_output = torch.cat([modal_embeddings, txt_embeddings], 1) input_shape = embedding_output.size()[:(- 1)] if (attention_mask is None): attention_mask = torch.ones(input_shape, device=device) else: attention_mask = torch.cat([torch.ones(input_modal_shape, device=device, dtype=torch.long), attention_mask], dim=1) if (encoder_attention_mask is None): encoder_attention_mask = torch.ones(input_shape, device=device) else: encoder_attention_mask = torch.cat([torch.ones(input_modal_shape, device=device), encoder_attention_mask], dim=1) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.transformer.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.transformer.pooler(sequence_output) if (not return_dict): return ((sequence_output, pooled_output) + encoder_outputs[1:]) return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value
def percent_good_ring(x_fake, var=0.0001, n_clusters=8, radius=2.0): std = np.sqrt(var) thetas = np.linspace(0, (2 * np.pi), (n_clusters + 1))[:n_clusters] (x, y) = ((radius * np.sin(thetas)), (radius * np.cos(thetas))) threshold = np.array([(std * 3), (std * 3)]) means = [] for i in range(n_clusters): means.append(np.array([x[i], y[i]])) return percent_good_pts(x_fake, means, threshold)
def matplotlib_imshow(img, one_channel=False): (fig, ax) = plt.subplots(figsize=(10, 6)) ax.imshow(img.permute(1, 2, 0).numpy())
def do_title(s): return ''.join([(item[0].upper() + item[1:].lower()) for item in _word_beginning_split_re.split(soft_unicode(s)) if item])
class RoIAwarePool3dFunction(Function): def forward(ctx, rois, pts, pts_feature, out_size, max_pts_per_voxel, mode): if isinstance(out_size, int): out_x = out_y = out_z = out_size else: assert (len(out_size) == 3) assert mmcv.is_tuple_of(out_size, int) (out_x, out_y, out_z) = out_size num_rois = rois.shape[0] num_channels = pts_feature.shape[(- 1)] num_pts = pts.shape[0] pooled_features = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels)) argmax = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int) pts_idx_of_voxels = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, max_pts_per_voxel), dtype=torch.int) ext_module.roiaware_pool3d_forward(rois, pts, pts_feature, argmax, pts_idx_of_voxels, pooled_features, mode) ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, mode, num_pts, num_channels) return pooled_features def backward(ctx, grad_out): ret = ctx.roiaware_pool3d_for_backward (pts_idx_of_voxels, argmax, mode, num_pts, num_channels) = ret grad_in = grad_out.new_zeros((num_pts, num_channels)) ext_module.roiaware_pool3d_backward(pts_idx_of_voxels, argmax, grad_out.contiguous(), grad_in, mode) return (None, None, grad_in, None, None, None)
def log_normal_diag(x, mean, log_var, average=False, dim=None): log_normal = ((- 0.5) * ((log_var + log_2_pi) + (torch.pow((x - mean), 2) / torch.exp(log_var)))) if average: return torch.mean(log_normal, dim) else: return torch.sum(log_normal, dim)
class EmbeddingImagenet(nn.Layer): def __init__(self, emb_size): super(EmbeddingImagenet, self).__init__() self.emb_size = emb_size self.ndf = 64 self.conv1 = nn.Conv2D(3, self.ndf, kernel_size=3, stride=1, padding=1, bias_attr=False) self.bn1 = nn.BatchNorm2D(self.ndf) self.conv2 = nn.Conv2D(self.ndf, int((self.ndf * 1.5)), kernel_size=3, bias_attr=False) self.bn2 = nn.BatchNorm2D(int((self.ndf * 1.5))) self.conv3 = nn.Conv2D(int((self.ndf * 1.5)), (self.ndf * 2), kernel_size=3, padding=1, bias_attr=False) self.bn3 = nn.BatchNorm2D((self.ndf * 2)) self.drop_3 = nn.Dropout2D(0.4) self.conv4 = nn.Conv2D((self.ndf * 2), (self.ndf * 4), kernel_size=3, padding=1, bias_attr=False) self.bn4 = nn.BatchNorm2D((self.ndf * 4)) self.drop_4 = nn.Dropout2D(0.5) self.fc1 = nn.Linear((((self.ndf * 4) * 5) * 5), self.emb_size, bias_attr=True) self.bn_fc = nn.BatchNorm1D(self.emb_size) def forward(self, input): e1 = F.max_pool2d(self.bn1(self.conv1(input)), 2) x = F.leaky_relu(e1, 0.2) e2 = F.max_pool2d(self.bn2(self.conv2(x)), 2) x = F.leaky_relu(e2, 0.2) e3 = F.max_pool2d(self.bn3(self.conv3(x)), 2) x = F.leaky_relu(e3, 0.2) x = self.drop_3(x) e4 = F.max_pool2d(self.bn4(self.conv4(x)), 2) x = F.leaky_relu(e4, 0.2) x = self.drop_4(x) x = x.reshape([(- 1), (((self.ndf * 4) * 5) * 5)]) output = self.bn_fc(self.fc1(x)) return output
def price_sum(x: list) -> int: res = 0 for item in x: res += int(item[C.Keys.PRICE]) return res
def apply_bias_correction_to_graph(graph_to_apply_bias_correction: Graph, core_config: CoreConfig, fw_impl: FrameworkImplementation) -> Graph: graph = copy.deepcopy(graph_to_apply_bias_correction) for n in graph.nodes: if (n.is_weights_quantization_enabled() and core_config.quantization_config.weights_bias_correction and (not n.final_weights_quantization_cfg.weights_second_moment_correction)): if n.final_weights_quantization_cfg.weights_bias_correction: _apply_bias_correction_to_node(n, fw_impl) return graph
_model def seresnext26d_32x4d(pretrained=False, num_classes=1000, in_chans=3, **kwargs): default_cfg = default_cfgs['seresnext26d_32x4d'] model = ResNet(Bottleneck, [2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, stem_type='deep', avg_down=True, num_classes=num_classes, in_chans=in_chans, block_args=dict(attn_layer='se'), **kwargs) model.default_cfg = default_cfg if pretrained: load_pretrained(model, default_cfg, num_classes, in_chans) return model
def state_form_z(z, q, y, v, geometry): return ((dot(grad(z), grad(q)) * geometry.dx) - (((y + v) * q) * geometry.dx))
class TestTorch(test_inference.TestInference): def setUp(self): if skip: raise unittest.SkipTest('PyTorch not installed') test_inference.TestInference.setUp(self) self.engine = FactoredInference(self.domain, backend='torch', log=True)
def r_cond2(t): cond = t[2] def fn(world, n): if (n > MAX_FUNC_CALL): return (world, n, False, False) (world, n, s, c) = cond(world, n) return (world, n, s, (not c)) return [('cond', fn)]
def rpn(base_layers, num_anchors): x = Convolution2D(512, (3, 3), padding='same', activation='relu', kernel_initializer='normal', name='rpn_conv1')(base_layers) x_class = Convolution2D(num_anchors, (1, 1), activation='sigmoid', kernel_initializer='uniform', name='rpn_out_class')(x) x_regr = Convolution2D((num_anchors * 4), (1, 1), activation='linear', kernel_initializer='zero', name='rpn_out_regress')(x) return [x_class, x_regr, base_layers]
class TestFeatureColumnBase(unittest.TestCase): def check(self, column, column_names, inputs, expected_outputs): if (not isinstance(inputs, (list, tuple))): inputs = (inputs,) if (not isinstance(expected_outputs, (list, tuple))): expected_outputs = (expected_outputs,) self.assertEqual(len(inputs), len(expected_outputs)) column = fc.ComposedColumnTransformer(column_names, column) outputs = column((inputs,))[0] self.assertEqual(len(inputs), len(outputs)) if isinstance(outputs[0], np.ndarray): self.assertTrue(np.array_equal(outputs[0], expected_outputs[0])) else: self.assertEqual(outputs[0], expected_outputs[0])
class ComputeBucketAssignmentTest(TestCase): def test_single_limit_single_dtype(self): tensors = [torch.empty([100], dtype=torch.float), torch.empty([200], dtype=torch.float), torch.empty([100], dtype=torch.float), torch.empty([50], dtype=torch.float)] result = dist._compute_bucket_assignment_by_size(tensors, [400]) self.assertEqual([[0], [1], [2], [3]], result) def test_single_limit_multi_dtype(self): tensors = [torch.empty([50], dtype=torch.float), torch.empty([25], dtype=torch.double), torch.empty([50], dtype=torch.float), torch.empty([25], dtype=torch.double), torch.empty([50], dtype=torch.float), torch.empty([25], dtype=torch.double)] result = dist._compute_bucket_assignment_by_size(tensors, [400]) self.assertEqual([[0, 2], [1, 3], [4], [5]], result) def test_multi_limit_single_dtype(self): tensors = [torch.empty([10], dtype=torch.float), torch.empty([10], dtype=torch.float), torch.empty([10], dtype=torch.float), torch.empty([10], dtype=torch.float)] result = dist._compute_bucket_assignment_by_size(tensors, [40, 80]) self.assertEqual([[0], [1, 2], [3]], result) def test_multi_limit_multi_dtype(self): tensors = [torch.empty([50], dtype=torch.float), torch.empty([25], dtype=torch.double), torch.empty([50], dtype=torch.float), torch.empty([25], dtype=torch.double), torch.empty([50], dtype=torch.float), torch.empty([25], dtype=torch.double)] result = dist._compute_bucket_assignment_by_size(tensors, [200, 400]) self.assertEqual([[0], [1], [2, 4], [3, 5]], result)
class UnaryBinaryExpressionGen(): def __init__(self, unary_ops: T.Sequence[OpProbability], binary_ops: T.Sequence[OpProbability], leaves: T.Sequence[sf.Scalar]): self.unary_ops = unary_ops self.binary_ops = binary_ops self.leaves = leaves self.ops = (list(self.unary_ops) + list(self.binary_ops)) self.ops_dict = {op.name: op for op in self.ops} self.unary_ops_probs = np.array([op.prob for op in self.unary_ops]) self.unary_ops_probs = (self.unary_ops_probs / sum(self.unary_ops_probs)) self.binary_ops_probs = np.array([op.prob for op in self.binary_ops]) self.binary_ops_probs = (self.binary_ops_probs / sum(self.binary_ops_probs)) self.D: T.Optional[T.List[np.ndarray]] = None def _next_row_of_D(num_leaves: int, max_ops: int, n: int, prev_row: np.ndarray, p1: float, p2: float) -> np.ndarray: s = np.zeros((((2 * max_ops) - n) + 1)) for e in range(1, (((2 * max_ops) - n) + 1)): s[e] = (((num_leaves * s[(e - 1)]) + (p1 * prev_row[e])) + (p2 * prev_row[(e + 1)])) return s def generate_D(max_ops: int, num_leaves: int=1, p1: int=1, p2: int=1) -> T.List[np.ndarray]: D = [np.array(([0] + [(num_leaves ** e) for e in range(1, ((2 * max_ops) + 1))]))] for n in range(1, (max_ops + 1)): D.append(UnaryBinaryExpressionGen._next_row_of_D(num_leaves, max_ops, n, D[(- 1)], p1, p2)) assert all(((len(D[e]) >= len(D[(e + 1)])) for e in range((len(D) - 1)))) D_transpose = [np.array([D[e][n] for e in range(len(D)) if (n < len(D[e]))]) for n in range(max((len(x) for x in D)))] return D_transpose def sample_next_pos(self, nb_empty: int, nb_ops: int, num_leaves: int=1, p1: int=1, p2: int=1) -> T.Tuple[(int, int)]: assert (nb_empty > 0) assert (nb_ops > 0) assert (self.D is not None) probs: T.List[float] = [] for i in range(nb_empty): probs.append((((num_leaves ** i) * p1) * self.D[(nb_empty - i)][(nb_ops - 1)])) for i in range(nb_empty): probs.append((((num_leaves ** i) * p2) * self.D[((nb_empty - i) + 1)][(nb_ops - 1)])) np_probs = np.array([(p / self.D[nb_empty][nb_ops]) for p in probs], dtype=np.float64) e = np.random.choice((2 * nb_empty), p=np_probs) arity = (1 if (e < nb_empty) else 2) e = (e % nb_empty) return (e, arity) def build_tree_sequence(self, num_ops_target: int) -> T.List: if ((self.D is None) or (num_ops_target >= (len(self.D[0]) - 1))): self.D = self.generate_D(num_ops_target) e = 1 l_leaves = 0 t_leaves = 1 stack = [None] for n in range(num_ops_target, 0, (- 1)): (k, arity) = self.sample_next_pos(e, n, p1=1, p2=1) if (arity == 1): op = np.random.choice(self.unary_ops, p=self.unary_ops_probs) else: op = np.random.choice(self.binary_ops, p=self.binary_ops_probs) e += ((arity - 1) - k) t_leaves += (arity - 1) l_leaves += k pos = [i for (i, v) in enumerate(stack) if (v is None)][l_leaves] stack = (((stack[:pos] + [op.name]) + [None for _ in range(arity)]) + stack[(pos + 1):]) assert (len([1 for v in stack if (v in self.ops_dict)]) == num_ops_target) assert (len([1 for v in stack if (v is None)]) == t_leaves) leaves = [np.random.choice(self.leaves) for _ in range(t_leaves)] for i in range(len(stack)): if (stack[i] is None): stack[i] = leaves.pop() assert (len(leaves) == 0) return stack def seq_to_expr(self, seq: T.Sequence[T.Union[(str, sf.Scalar)]]) -> sf.Expr: def _seq_to_expr(seq: T.Sequence[T.Union[(str, sf.Scalar)]]) -> T.Tuple[(sf.Scalar, T.Sequence[T.Union[(str, sf.Scalar)]])]: assert (len(seq) > 0) t = seq[0] if (t in self.ops_dict): op = self.ops_dict[T.cast(str, t)] args = [] l1 = seq[1:] for _ in range(op.arity): (i1, l1) = _seq_to_expr(l1) args.append(i1) return (op.func(*args), l1) elif (t in self.leaves): return (T.cast(sf.Scalar, t), seq[1:]) else: assert f'Unknown: {t}' return (0, []) return _seq_to_expr(seq)[0] def build_expr(self, num_ops_target: int) -> sf.Scalar: seq = self.build_tree_sequence(num_ops_target=num_ops_target) return self.seq_to_expr(seq) def build_expr_vec(self, num_ops_target: int, num_exprs: int=None) -> sf.M: num_ops_target = int((1.1 * num_ops_target)) if (num_exprs is None): num_exprs = max(1, int(np.sqrt(num_ops_target))) target_per_expr = int((num_ops_target / num_exprs)) exprs: T.List[sf.Scalar] = [] while (len(exprs) < num_exprs): try: exprs.append(self.build_expr(target_per_expr)) except (ZeroDivisionError, RuntimeError) as e: print(e) print('Skipping.') return sf.M(exprs) def default(cls, unary_ops: T.Sequence[OpProbability]=DEFAULT_UNARY_OPS, binary_ops: T.Sequence[OpProbability]=DEFAULT_BINARY_OPS, leaves: T.Sequence[sf.Scalar]=DEFAULT_LEAVES) -> UnaryBinaryExpressionGen: return cls(unary_ops=unary_ops, binary_ops=binary_ops, leaves=leaves)
_args('v', 'i', 'v', 'v', 'v', 'v') def empty(g, sizes, dtype, layout, device, pin_memory=False, memory_format=None): return zeros(g, sizes, dtype, layout, device, pin_memory)
class IsMaleLabeler(Labeler): def __init__(self, ontology: extension_datasets.Ontology): self.male_code: str = 'Gender/M' def label(self, patient: Patient) -> List[Label]: is_male: bool = (self.male_code in [e.code for e in patient.events]) labels: List[Label] = [] for event in patient.events: if (event.code in get_inpatient_admission_concepts()): labels.append(Label(time=event.start, value=is_male)) return labels def get_labeler_type(self) -> LabelType: return 'boolean'
class MobileViTFeatureExtractor(metaclass=DummyObject): _backends = ['vision'] def __init__(self, *args, **kwargs): requires_backends(self, ['vision'])
_to_string_io def load_events(fhandle: TextIO) -> annotations.Events: times = [] labels = [] confidence = [] default_headers = ['start', 'end', 'label', 'confidence'] reader = csv.DictReader(fhandle, delimiter='\t', fieldnames=default_headers) for line in reader: times.append([float(line['start']), float(line['end'])]) labels.append(line['label']) confidence.append(min(float(line['confidence']), 1.0)) events_data = annotations.Events(np.array(times), 'seconds', labels, 'open', np.array(confidence)) return events_data
def test(): x = np.arange((- 100.0), 101.0, 5.0) y = np.arange((- 100.0), 101.0, 5.0) (x_vec, y_vec) = b_hat(x, y) fig = plt.figure(figsize=(10, 8)) ax1 = plt.subplot('111') ax1.quiver(x, y, x_vec, y_vec) for i in range((- 120), 121, 10): (x, y) = b_line(float(i), 0.0, 100) ax1.plot(x, y, 'b') for theta in np.arange((math.pi / 2.0), ((3.0 * math.pi) / 2.0), (math.pi / 100.0)): x = np.sin(theta) y = np.cos(theta) (x, y) = b_line(x, y, 100) ax1.plot(x, y, 'r') ax1.set_xlim([(- 100), 100]) ax1.set_ylim([(- 100), 100]) plt.title('Unit vectors for an arbitrary dipole field') fig.show()
class QuantAct(nn.Module): def __init__(self, activation_bit, act_range_momentum=0.95, per_channel=False, channel_len=None, quant_mode=False): super().__init__() self.activation_bit = activation_bit self.act_range_momentum = act_range_momentum self.quant_mode = quant_mode self.per_channel = per_channel self.percentile = False self.act_function = SymmetricQuantFunction.apply if (not self.per_channel): self.register_buffer('x_min', torch.zeros(1)) self.register_buffer('x_max', torch.zeros(1)) self.register_buffer('act_scaling_factor', torch.zeros(1)) self.x_min -= 1e-05 self.x_max += 1e-05 else: raise NotImplementedError('per-channel mode is not currently supported for activation.') def __repr__(self): return '{0}(activation_bit={1}, quant_mode: {2}, Act_min: {3:.2f}, Act_max: {4:.2f})'.format(self.__class__.__name__, self.activation_bit, self.quant_mode, self.x_min.item(), self.x_max.item()) def forward(self, x, pre_act_scaling_factor=None, identity=None, identity_scaling_factor=None, specified_min=None, specified_max=None): x_act = (x if (identity is None) else (identity + x)) if self.training: assert (not self.percentile), 'percentile mode is not currently supported for activation.' assert (not self.per_channel), 'per-channel mode is not currently supported for activation.' x_min = x_act.data.min() x_max = x_act.data.max() assert ((x_max.isnan().sum() == 0) and (x_min.isnan().sum() == 0)), 'NaN detected when computing min/max of the activation' if ((self.x_min.min() > (- 1.1e-05)) and (self.x_max.max() < 1.1e-05)): self.x_min = (self.x_min + x_min) self.x_max = (self.x_max + x_max) elif (self.act_range_momentum == (- 1)): self.x_min = torch.min(self.x_min, x_min) self.x_max = torch.max(self.x_max, x_max) else: self.x_min = ((self.x_min * self.act_range_momentum) + (x_min * (1 - self.act_range_momentum))) self.x_max = ((self.x_max * self.act_range_momentum) + (x_max * (1 - self.act_range_momentum))) if (not self.quant_mode): return (x_act, None) x_min = (self.x_min if (specified_min is None) else specified_min) x_max = (self.x_max if (specified_max is None) else specified_max) self.act_scaling_factor = symmetric_linear_quantization_params(self.activation_bit, x_min, x_max, per_channel=self.per_channel) if (pre_act_scaling_factor is None): quant_act_int = self.act_function(x, self.activation_bit, self.percentile, self.act_scaling_factor) else: quant_act_int = FixedPointMul.apply(x, pre_act_scaling_factor, self.activation_bit, self.act_scaling_factor, identity, identity_scaling_factor) correct_output_scale = self.act_scaling_factor.view((- 1)) return ((quant_act_int * correct_output_scale), self.act_scaling_factor)
def assure_array_length(array, size, value=128): while (len(array) < size): array.append(value)
def main(args, model): misc.init_distributed_mode(args) device = torch.device(args.device) misc.fix_random_seeds(args) cudnn.benchmark = True create_dataset_and_evalmetrix(args, mode='finetune') if args.disable_eval_during_finetuning: dataset_val = None else: dataset_val = DatasetFLFinetune(args=args, phase='test') if args.eval: dataset_test = DatasetFLFinetune(args=args, phase='test') else: dataset_test = None num_tasks = misc.get_world_size() global_rank = misc.get_rank() if args.dist_eval: if ((len(dataset_val) % num_tasks) != 0): print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. This will slightly alter validation results as extra duplicate entries are added to achieve equal num of samples per-process.') sampler_val = torch.utils.data.DistributedSampler(dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True) else: sampler_val = torch.utils.data.SequentialSampler(dataset_val) sampler_test = torch.utils.data.SequentialSampler(dataset_test) if (dataset_val is not None): data_loader_val = torch.utils.data.DataLoader(dataset_val, sampler=sampler_val, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False) else: data_loader_val = None if (dataset_test is not None): data_loader_test = torch.utils.data.DataLoader(dataset_test, sampler=sampler_test, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False) else: data_loader_test = None (model_all, optimizer_all, criterion_all, lr_scheduler_all, wd_scheduler_all, loss_scaler_all, mixupfn_all) = Partial_Client_Selection(args, model, mode='finetune') model_avg = deepcopy(model).cpu() if (args.log_dir is not None): os.makedirs(args.log_dir, exist_ok=True) log_writer = misc.TensorboardLogger(log_dir=args.log_dir) else: log_writer = None print(' Running fine-tuning ') tot_clients = args.dis_cvs_files print('total_clients: ', tot_clients) epoch = (- 1) start_time = time.time() max_accuracy = 0.0 while True: print('epoch: ', epoch) epoch += 1 if (args.num_local_clients == len(args.dis_cvs_files)): cur_selected_clients = args.proxy_clients else: cur_selected_clients = np.random.choice(tot_clients, args.num_local_clients, replace=False).tolist() cur_tot_client_Lens = 0 for client in cur_selected_clients: cur_tot_client_Lens += args.clients_with_len[client] for (cur_single_client, proxy_single_client) in zip(cur_selected_clients, args.proxy_clients): print('cur_single_client: ', cur_single_client) print('proxy_single_client: ', proxy_single_client) args.single_client = cur_single_client args.clients_weightes[proxy_single_client] = (args.clients_with_len[cur_single_client] / cur_tot_client_Lens) dataset_train = DatasetFLFinetune(args=args, phase='train') num_tasks = misc.get_world_size() global_rank = misc.get_rank() print(f'client: {proxy_single_client} ') if args.distributed: sampler_train = torch.utils.data.DistributedSampler(dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True) else: sampler_train = torch.utils.data.RandomSampler(dataset_train) print(('Sampler_train = %s' % str(sampler_train))) data_loader_train = torch.utils.data.DataLoader(dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True) model = model_all[proxy_single_client] optimizer = optimizer_all[proxy_single_client] criterion = criterion_all[proxy_single_client] lr_schedule_values = lr_scheduler_all[proxy_single_client] wd_schedule_values = wd_scheduler_all[proxy_single_client] loss_scaler = loss_scaler_all[proxy_single_client] mixup_fn = mixupfn_all[proxy_single_client] if args.distributed: model_without_ddp = model.module else: model_without_ddp = model n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad)) total_batch_size = ((args.batch_size * args.update_freq) * misc.get_world_size()) num_training_steps_per_inner_epoch = (len(dataset_train) // total_batch_size) print(('LR = %.8f' % args.lr)) print(('Batch size = %d' % total_batch_size)) print(('Update frequent = %d' % args.update_freq)) print(('Number of training examples = %d' % len(dataset_train))) print(('Number of training training per epoch = %d' % num_training_steps_per_inner_epoch)) if args.distributed: data_loader_train.sampler.set_epoch(epoch) if (log_writer is not None): log_writer.set_step(epoch) if args.eval: misc.auto_load_model(args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, model_ema=None) test_stats = valid(args, model, data_loader_test) print(f"Accuracy of the network on the {len(dataset_test)} test images: {test_stats['acc1']:.1f}%") model.cpu() exit(0) for inner_epoch in range(args.E_epoch): train_stats = train_one_epoch(args, model, criterion, data_loader_train, optimizer, device, epoch, loss_scaler=loss_scaler, cur_single_client=cur_single_client, max_norm=args.clip_grad, proxy_single_client=proxy_single_client, model_ema=None, mixup_fn=mixup_fn, log_writer=log_writer, start_steps=((epoch + inner_epoch) * num_training_steps_per_inner_epoch), lr_schedule_values=lr_schedule_values, wd_schedule_values=wd_schedule_values, num_training_steps_per_inner_epoch=num_training_steps_per_inner_epoch, update_freq=args.update_freq) log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, 'client': cur_single_client, 'epoch': epoch, 'inner_epoch': inner_epoch, 'n_parameters': n_parameters} if (args.output_dir and misc.is_main_process()): if (log_writer is not None): log_writer.flush() with open(os.path.join(args.output_dir, 'log.txt'), mode='a', encoding='utf-8') as f: f.write((json.dumps(log_stats) + '\n')) average_model(args, model_avg, model_all) if (args.output_dir and args.save_ckpt): if ((((epoch + 1) % args.save_ckpt_freq) == 0) or ((epoch + 1) == args.max_communication_rounds)): misc.save_model(args=args, model=model_avg, model_without_ddp=model_avg, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch) if (data_loader_val is not None): model_avg.to(args.device) test_stats = valid(args, model_avg, data_loader_val) print(f"Accuracy of the network on the {len(dataset_val)} validation images: {test_stats['acc1']:.1f}%") if (max_accuracy < test_stats['acc1']): max_accuracy = test_stats['acc1'] if (args.output_dir and args.save_ckpt): misc.save_model(args=args, model=model_avg, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch='best', model_ema=None) print(f'Max accuracy: {max_accuracy:.2f}%') if (log_writer is not None): log_writer.update(test_acc1=test_stats['acc1'], head='perf', step=epoch) log_writer.update(test_acc5=test_stats['acc5'], head='perf', step=epoch) log_writer.update(test_loss=test_stats['loss'], head='perf', step=epoch) log_stats = {**{f'test_{k}': v for (k, v) in test_stats.items()}, 'epoch': epoch, 'n_parameters': n_parameters} if (args.output_dir and misc.is_main_process()): if (log_writer is not None): log_writer.flush() with open(os.path.join(args.output_dir, 'log.txt'), mode='a', encoding='utf-8') as f: f.write((json.dumps(log_stats) + '\n')) model_avg.to('cpu') print('global_step_per_client: ', args.global_step_per_client[proxy_single_client]) print('t_total: ', args.t_total[proxy_single_client]) if (args.global_step_per_client[proxy_single_client] >= args.t_total[proxy_single_client]): total_time = (time.time() - start_time) total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str)) break
class BLEUScore(NGramScore): TINY = 1e-15 SMALL = 1e-09 def __init__(self, max_ngram=4, case_sensitive=False, smoothing=0.0): super(BLEUScore, self).__init__(max_ngram, case_sensitive) self.smoothing = smoothing self.reset() def reset(self): self.ref_len = 0 self.cand_lens = ([0] * self.max_ngram) self.hits = ([0] * self.max_ngram) def append(self, pred_sent, ref_sents): (pred_sent, ref_sents) = self.check_tokenized(pred_sent, ref_sents) for i in xrange(self.max_ngram): self.hits[i] += self.compute_hits((i + 1), pred_sent, ref_sents) self.cand_lens[i] += (len(pred_sent) - i) closest_ref = min(ref_sents, key=(lambda ref_sent: (abs((len(ref_sent) - len(pred_sent))), len(ref_sent)))) self.ref_len += len(closest_ref) def score(self): return self.bleu() def compute_hits(self, n, pred_sent, ref_sents): merged_ref_ngrams = self.get_ngram_counts(n, ref_sents) pred_ngrams = self.get_ngram_counts(n, [pred_sent]) hits = 0 for (ngram, cnt) in pred_ngrams.iteritems(): hits += min(merged_ref_ngrams.get(ngram, 0), cnt) return hits def bleu(self): bp = 1.0 if (self.cand_lens[0] <= self.ref_len): bp = math.exp((1.0 - (self.ref_len / (float(self.cand_lens[0]) if self.cand_lens[0] else 1e-05)))) return (bp * self.ngram_precision()) def ngram_precision(self): prec_log_sum = 0.0 for (n_hits, n_len) in zip(self.hits, self.cand_lens): n_hits += self.smoothing n_len += self.smoothing n_hits = max(n_hits, self.TINY) n_len = max(n_len, self.SMALL) prec_log_sum += math.log((n_hits / n_len)) return math.exp(((1.0 / self.max_ngram) * prec_log_sum))
def get_bag_of_words(cmd): cmd = clean_anonymize_command(cmd) tokens = cmd.strip().split() return tokens
class TestBasicConv(): def test_init(self): layers = [5] units = 5 model = BasicConv(layers, units) def test_fill(self): pass def test_unfill(self): pass def test_forward(self): pass
def save_checkpoint(its, model_state, optim_state, logdir): last_model = os.path.join(logdir, 'last.model') last_optim = os.path.join(logdir, 'last.optim') last_config = os.path.join(logdir, 'last.config') opt = {'its': its} torch.save(model_state, last_model) torch.save(optim_state, last_optim) with open(last_config, 'wb') as handle: pickle.dump(opt, handle, protocol=pickle.HIGHEST_PROTOCOL)
def create_parsers(): return PipelineCommon([(ProcessorTokenizerNltkEn(), ['text'], {0: 'tokens'}), (ProcessorSentenceSplitter(), ['tokens'], {0: 'sentences'})]) return ppl
def dump_table(table: Table) -> None: with open(((DATA_ROOT / table.dataset) / f'{table.version}.table.pkl'), 'wb') as f: pickle.dump(table, f, protocol=PKL_PROTO)
class ValidateSpans(object): def __init__(self, system, duplicate='error', crossing='warn', nested='ignore'): self.system = system self.duplicate = duplicate self.crossing = crossing self.nested = nested def __call__(self): OLD_VALIDATION = Document.VALIDATION Document.VALIDATION = {k: getattr(self, k) for k in OLD_VALIDATION} list(Reader(self.system)) Document.VALIDATION = OLD_VALIDATION def add_arguments(cls, p): CHOICES = ['ignore', 'warn', 'error'] p.add_argument('system', nargs='?', default=sys.stdin, type=argparse.FileType('r'), metavar='FILE') p.add_argument('--duplicate', default='error', choices=CHOICES) p.add_argument('--crossing', default='warn', choices=CHOICES) p.add_argument('--nested', default='ignore', choices=CHOICES) p.set_defaults(cls=cls) return p
def main(): parser = argparse.ArgumentParser() parser.add_argument('--input-scene', required=True, help='scene graph json file') parser.add_argument('--vocab-json', required=True, help='vocab file') parser.add_argument('--output-scene', required=True, help='output file') args = parser.parse_args() print('Loading data') with open(args.input_scene, 'r') as f: ori_scenes = json.load(f)['scenes'] vocab = {'edge_token_to_idx': edge_wtoi} if (not os.path.exists(args.vocab_json)): raise Exception('must give vocab.json produced by questions') old_vocab = json.load(open(args.vocab_json)) vocab.update(old_vocab) print('Update existed vocab') with open(args.vocab_json, 'w') as f: json.dump(vocab, f, indent=4) print('Construct') conn_matrixes = {} edge_matrixes = {} vertex_vectors = {} scene_descs = {} for scene in ori_scenes: image_index = scene['image_index'] (conn_M, edge_M) = get_graph_matrix(edge_wtoi, scene['objects'], scene['relationships']) scene_vertex_vector = get_onehot_attributes_objects(scene['objects']) scene_desc = get_descriptions(scene['objects'], scene['relationships']) assert (image_index not in scene_descs) conn_matrixes[image_index] = conn_M edge_matrixes[image_index] = edge_M vertex_vectors[image_index] = scene_vertex_vector scene_descs[image_index] = scene_desc print('Writing output') with open(args.output_scene, 'wb') as f: pickle.dump(conn_matrixes, f) pickle.dump(edge_matrixes, f) pickle.dump(vertex_vectors, f) pickle.dump(scene_descs, f)
def error(alpha, n): k = len(alpha) pvals = dirichlet(alpha) counts = multinomial(n, pvals) h0 = sp_entropy(pvals) (h, std) = ndd.entropy(counts, k=k, return_std=True) return (((h - h0) / h0), (std / h0))