code stringlengths 101 5.91M |
|---|
class RandomTranslate(object):
def __init__(self, offset):
self.offset = offset
def __call__(self, img, mask):
assert (img.size == mask.size)
x_offset = int(((2 * (random.random() - 0.5)) * self.offset[0]))
y_offset = int(((2 * (random.random() - 0.5)) * self.offset[1]))
x_crop_offset = x_offset
y_crop_offset = y_offset
if (x_offset < 0):
x_crop_offset = 0
if (y_offset < 0):
y_crop_offset = 0
cropped_img = tf.crop(img, y_crop_offset, x_crop_offset, (img.size[1] - abs(y_offset)), (img.size[0] - abs(x_offset)))
if ((x_offset >= 0) and (y_offset >= 0)):
padding_tuple = (0, 0, x_offset, y_offset)
elif ((x_offset >= 0) and (y_offset < 0)):
padding_tuple = (0, abs(y_offset), x_offset, 0)
elif ((x_offset < 0) and (y_offset >= 0)):
padding_tuple = (abs(x_offset), 0, 0, y_offset)
elif ((x_offset < 0) and (y_offset < 0)):
padding_tuple = (abs(x_offset), abs(y_offset), 0, 0)
return (tf.pad(cropped_img, padding_tuple, padding_mode='reflect'), tf.affine(mask, translate=((- x_offset), (- y_offset)), scale=1.0, angle=0.0, shear=0.0, fillcolor=250)) |
def format_submit(X, sub_id, submit_dir='../submissions/'):
header = ['user_id', 'items']
for (pos, key) in enumerate(X):
l = X[key]
if isinstance(l, list):
X[key] = ','.join((str(xx) for xx in l))
else:
break
x = pd.DataFrame(X.items())
write_csv(x, join(submit_dir, sub_id), header)
return |
def get_same_padding_conv2d(image_size=None):
if (image_size is None):
return Conv2dDynamicSamePadding
else:
return partial(Conv2dStaticSamePadding, image_size=image_size) |
class TFData2VecVisionModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class PReLU_SENet(nn.Module):
def __init__(self, block, num_blocks, num_classes=100):
super(PReLU_SENet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512, num_classes)
self.prelu = nn.PReLU()
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.prelu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def _dora_array(state: State, riichi):
def next(tile):
return jax.lax.cond((tile < 27), (lambda : ((tile // 9) + ((tile + 1) % 9))), (lambda : jax.lax.cond((tile < 31), (lambda : (27 + ((tile + 1) % 4))), (lambda : (31 + ((tile + 1) % 3))))))
dora = jnp.zeros(34, dtype=jnp.bool_)
return jax.lax.cond(riichi, (lambda : jax.lax.fori_loop(0, (state._n_kan + 1), (lambda i, arr: arr.at[next(state._deck[(5 + (2 * i))])].set(TRUE).at[next(state._doras[(4 + (2 * i))])].set(TRUE)), dora)), (lambda : jax.lax.fori_loop(0, (state._n_kan + 1), (lambda i, arr: arr.at[next(state._doras[(5 + (2 * i))])].set(TRUE)), dora))) |
def rank_ZZ(n=700, min=0, max=9, system='sage'):
if (system == 'sage'):
A = random_matrix(ZZ, n, (n + 10), x=min, y=(max + 1))
t = cputime()
v = A.rank()
return cputime(t)
elif (system == 'magma'):
code = ('\nn := %s;\nA := RMatrixSpace(IntegerRing(), n, n+10)![Random(%s,%s) : i in [1..n*(n+10)]];\nt := Cputime();\nK := Rank(A);\ns := Cputime(t);\n' % (n, min, max))
if verbose:
print(code)
magma.eval(code)
return float(magma.eval('s'))
else:
raise ValueError(('unknown system "%s"' % system)) |
def VGG16(include_top=True, weights='imagenet', input_tensor=None):
if (weights not in {'imagenet', None}):
raise ValueError('The `weights` argument should be either `None` (random initialization) or `imagenet` (pre-training on ImageNet).')
if (K.image_dim_ordering() == 'th'):
if include_top:
input_shape = (3, 224, 224)
else:
input_shape = (3, None, None)
elif include_top:
input_shape = (224, 224, 3)
else:
input_shape = (None, None, 3)
if (input_tensor is None):
img_input = Input(shape=input_shape)
elif (not K.is_keras_tensor(input_tensor)):
img_input = Input(tensor=input_tensor)
else:
img_input = input_tensor
x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv1')(img_input)
x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv1')(x)
x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv1')(x)
x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv2')(x)
x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv1')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv2')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv1')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv2')(x)
x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(1000, activation='softmax', name='predictions')(x)
model = Model(img_input, x)
if (weights == 'imagenet'):
print('K.image_dim_ordering:', K.image_dim_ordering())
if (K.image_dim_ordering() == 'th'):
if include_top:
weights_path = get_file('vgg16_weights_th_dim_ordering_th_kernels.h5', TH_WEIGHTS_PATH, cache_subdir='models')
else:
weights_path = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5', TH_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
model.load_weights(weights_path)
if (K.backend() == 'tensorflow'):
warnings.warn('You are using the TensorFlow backend, yet you are using the Theano image dimension ordering convention (`image_dim_ordering="th"`). For best performance, set `image_dim_ordering="tf"` in your Keras config at ~/.keras/keras.json.')
convert_all_kernels_in_model(model)
else:
if include_top:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5', TF_WEIGHTS_PATH, cache_subdir='models')
else:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
model.load_weights(weights_path)
if (K.backend() == 'theano'):
convert_all_kernels_in_model(model)
return model |
def generate_points_for_circle(centerx, centery, r, density_factor):
pts_on_circle = []
num_points = int((((2 * 3.141) * r) * density_factor))
angles = np.linspace(0, (2.0 * 3.141), num_points)
for angle in angles:
x = ((math.sin(angle) * r) + centerx)
y = ((math.cos(angle) * r) + centery)
newpoint = Point(x, y)
pts_on_circle.append(newpoint)
return pts_on_circle
pass |
class TransformerInitModel(nn.Module):
def __init__(self, config, output_attentions, *inputs, **kwargs):
super(TransformerInitModel, self).__init__()
self.config = config
self.output_attentions = output_attentions
def init_Transformer_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, TransformerLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if (isinstance(module, nn.Linear) and (module.bias is not None)):
module.bias.data.zero_() |
_start_docstrings(CUSTOM_DPR_READER_DOCSTRING)
class CustomDPRReaderTokenizerMixin():
def __call__(self, questions, titles: Optional[str]=None, texts: Optional[str]=None, padding: Union[(bool, str)]=False, truncation: Union[(bool, str)]=False, max_length: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, return_attention_mask: Optional[bool]=None, **kwargs) -> BatchEncoding:
if ((titles is None) and (texts is None)):
return super().__call__(questions, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, return_attention_mask=return_attention_mask, **kwargs)
elif ((titles is None) or (texts is None)):
text_pair = (titles if (texts is None) else texts)
return super().__call__(questions, text_pair, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, return_attention_mask=return_attention_mask, **kwargs)
titles = (titles if (not isinstance(titles, str)) else [titles])
texts = (texts if (not isinstance(texts, str)) else [texts])
n_passages = len(titles)
questions = (questions if (not isinstance(questions, str)) else ([questions] * n_passages))
assert (len(titles) == len(texts)), f'There should be as many titles than texts but got {len(titles)} titles and {len(texts)} texts.'
encoded_question_and_titles = super().__call__(questions, titles, padding=False, truncation=False)['input_ids']
encoded_texts = super().__call__(texts, add_special_tokens=False, padding=False, truncation=False)['input_ids']
encoded_inputs = {'input_ids': [((encoded_question_and_title + encoded_text)[:max_length] if ((max_length is not None) and truncation) else (encoded_question_and_title + encoded_text)) for (encoded_question_and_title, encoded_text) in zip(encoded_question_and_titles, encoded_texts)]}
if (return_attention_mask is not False):
attention_mask = []
for input_ids in encoded_inputs['input_ids']:
attention_mask.append([int((input_id != self.pad_token_id)) for input_id in input_ids])
encoded_inputs['attention_mask'] = attention_mask
return self.pad(encoded_inputs, padding=padding, max_length=max_length, return_tensors=return_tensors)
def decode_best_spans(self, reader_input: BatchEncoding, reader_output: DPRReaderOutput, num_spans: int=16, max_answer_length: int=64, num_spans_per_passage: int=4) -> List[DPRSpanPrediction]:
input_ids = reader_input['input_ids']
(start_logits, end_logits, relevance_logits) = reader_output[:3]
n_passages = len(relevance_logits)
sorted_docs = sorted(range(n_passages), reverse=True, key=relevance_logits.__getitem__)
nbest_spans_predictions: List[DPRReaderOutput] = []
for doc_id in sorted_docs:
sequence_ids = list(input_ids[doc_id])
passage_offset = (sequence_ids.index(self.sep_token_id, 2) + 1)
if (sequence_ids[(- 1)] == self.pad_token_id):
sequence_len = sequence_ids.index(self.pad_token_id)
else:
sequence_len = len(sequence_ids)
best_spans = self._get_best_spans(start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=max_answer_length, top_spans=num_spans_per_passage)
for (start_index, end_index) in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(DPRSpanPrediction(span_score=(start_logits[doc_id][start_index] + end_logits[doc_id][end_index]), relevance_score=relevance_logits[doc_id], doc_id=doc_id, start_index=start_index, end_index=end_index, text=self.decode(sequence_ids[start_index:(end_index + 1)])))
if (len(nbest_spans_predictions) >= num_spans):
break
return nbest_spans_predictions[:num_spans]
def _get_best_spans(self, start_logits: List[int], end_logits: List[int], max_answer_length: int, top_spans: int) -> List[DPRSpanPrediction]:
scores = []
for (start_index, start_score) in enumerate(start_logits):
for (answer_length, end_score) in enumerate(end_logits[start_index:(start_index + max_answer_length)]):
scores.append(((start_index, (start_index + answer_length)), (start_score + end_score)))
scores = sorted(scores, key=(lambda x: x[1]), reverse=True)
chosen_span_intervals = []
for ((start_index, end_index), score) in scores:
assert (start_index <= end_index), f'Wrong span indices: [{start_index}:{end_index}]'
length = ((end_index - start_index) + 1)
assert (length <= max_answer_length), f'Span is too long: {length} > {max_answer_length}'
if any([((start_index <= prev_start_index <= prev_end_index <= end_index) or (prev_start_index <= start_index <= end_index <= prev_end_index)) for (prev_start_index, prev_end_index) in chosen_span_intervals]):
continue
chosen_span_intervals.append((start_index, end_index))
if (len(chosen_span_intervals) == top_spans):
break
return chosen_span_intervals |
def module_init():
root_module = Module('ns.tap_bridge', cpp_namespace='::ns3')
return root_module |
def Gamma_constructor(N):
if (N == 1):
return SL2Z
try:
return _gamma_cache[N]
except KeyError:
_gamma_cache[N] = Gamma_class(N)
return _gamma_cache[N] |
_spec_function('viz_wiz')
def get_viz_wiz_spec() -> RunSpec:
scenario_spec = ScenarioSpec(class_name='helm.benchmark.scenarios.vision_language.viz_wiz_scenario.VizWizScenario', args={})
adapter_spec: AdapterSpec = get_vlm_generation_adapter_spec(input_prefix='User: ', input_suffix='<end_of_utterance>', output_prefix='\nAssistant: ', output_suffix='<end_of_utterance>', stop_sequences=['<end_of_utterance>'])
metric_specs: List[MetricSpec] = get_exact_match_metric_specs()
run_spec_name: str = 'viz_wiz'
return RunSpec(name=run_spec_name, scenario_spec=scenario_spec, adapter_spec=adapter_spec, metric_specs=metric_specs, groups=[run_spec_name]) |
class Score(nn.Module):
def __init__(self, embeds_dim, hidden_dim=150):
super().__init__()
self.score = nn.Sequential(nn.Linear(embeds_dim, hidden_dim), nn.ReLU(), nn.Dropout(0.2), nn.Linear(hidden_dim, hidden_dim), nn.ReLU(), nn.Dropout(0.2), nn.Linear(hidden_dim, 1))
def forward(self, x):
return self.score(x) |
class Stream_zero(Stream):
def __init__(self):
super().__init__(True)
self._approximate_order = infinity
def __getitem__(self, n):
return ZZ.zero()
def order(self):
return self._approximate_order
def __eq__(self, other):
return ((self is other) or isinstance(other, Stream_zero))
def __ne__(self, other):
return ((self is not other) and (not isinstance(other, Stream_zero)) and other.is_nonzero())
def __hash__(self):
return 0 |
class TsStruct(StructBuilder, TsBase):
def __init__(self, package, struct, args):
super(TsStruct, self).__init__(package, struct, args)
self.members = [TsMember(member) for member in self.members]
self.constants = [TsMember(member) for member in self.constants]
def complex_members(self):
for member in self.members:
if (not member.type_ref.is_primitive_type()):
(yield member)
def has_complex_members(self):
return bool(list(self.complex_members()))
def include_list(self):
primitive_includes = set()
lcm_includes = set()
for member in (self.members + self.constants):
if member.ndim:
for dim in member.dims:
if dim.auto_member:
primitive_includes.add(TsInclude(member=dim.auto_member, is_primitive=True, prefix=self.args.typescript_library_path))
if member.type_ref.is_primitive_type():
if (member.type_ref.name in TS_DUPLICATE_TYPES):
continue
elif member.is_byte_array:
continue
primitive_includes.add(TsInclude(member=member, is_primitive=True, prefix=self.args.typescript_library_path))
else:
if (member.type_ref.name == self.struct.name):
continue
lcm_includes.add(TsInclude(member=member, prefix=self.args.typescript_import_path))
return (sorted(primitive_includes, key=str) + sorted(lcm_includes, key=str)) |
class FilteredModuleTestCluster(TestCluster):
def type_system(self) -> TypeSystem:
return self.__delegate.type_system
def update_return_type(self, accessible: GenericCallableAccessibleObject, new_type: ProperType) -> None:
self.__delegate.update_return_type(accessible, new_type)
def update_parameter_knowledge(self, accessible: GenericCallableAccessibleObject, param_name: str, knowledge: tt.UsageTraceNode) -> None:
self.__delegate.update_parameter_knowledge(accessible, param_name, knowledge)
def linenos(self) -> int:
return self.__delegate.linenos
def log_cluster_statistics(self) -> None:
self.__delegate.log_cluster_statistics()
def add_generator(self, generator: GenericAccessibleObject) -> None:
self.__delegate.add_generator(generator)
def add_accessible_object_under_test(self, objc: GenericAccessibleObject, data: _CallableData) -> None:
self.__delegate.add_accessible_object_under_test(objc, data)
def add_modifier(self, typ: TypeInfo, obj: GenericAccessibleObject) -> None:
self.__delegate.add_modifier(typ, obj)
def function_data_for_accessibles(self) -> dict[(GenericAccessibleObject, _CallableData)]:
return self.__delegate.function_data_for_accessibles
def track_statistics_values(self, tracking_fun: Callable[([RuntimeVariable, Any], None)]) -> None:
self.__delegate.track_statistics_values(tracking_fun)
def __init__(self, delegate: ModuleTestCluster, archive: arch.Archive, subject_properties: SubjectProperties, targets: OrderedSet[ff.TestCaseFitnessFunction]) -> None:
self.__delegate = delegate
self.__subject_properties = subject_properties
self.__code_object_id_to_accessible_objects: dict[(int, GenericCallableAccessibleObject)] = {json.loads(acc.callable.__code__.co_consts[0])[CODE_OBJECT_ID_KEY]: acc for acc in delegate.accessible_objects_under_test if (isinstance(acc, GenericCallableAccessibleObject) and hasattr(acc.callable, '__code__'))}
self.__accessible_to_targets: dict[(GenericCallableAccessibleObject, OrderedSet)] = {acc: OrderedSet() for acc in self.__code_object_id_to_accessible_objects.values()}
for target in targets:
if ((acc := self.__get_accessible_object_for_target(target)) is not None):
targets_for_acc = self.__accessible_to_targets[acc]
targets_for_acc.add(target)
archive.add_on_target_covered(self.on_target_covered)
def __get_accessible_object_for_target(self, target: ff.TestCaseFitnessFunction) -> (GenericCallableAccessibleObject | None):
code_object_id: (int | None) = target.code_object_id
while (code_object_id is not None):
if ((acc := self.__code_object_id_to_accessible_objects.get(code_object_id, None)) is not None):
return acc
code_object_id = self.__subject_properties.existing_code_objects[code_object_id].parent_code_object_id
return None
def on_target_covered(self, target: ff.TestCaseFitnessFunction) -> None:
acc = self.__get_accessible_object_for_target(target)
if (acc is not None):
targets_for_acc = self.__accessible_to_targets.get(acc)
assert (targets_for_acc is not None)
targets_for_acc.remove(target)
if (len(targets_for_acc) == 0):
self.__accessible_to_targets.pop(acc)
LOGGER.debug('Removed %s from test cluster because all targets within it have been covered.', acc)
def accessible_objects_under_test(self) -> OrderedSet[GenericAccessibleObject]:
accessibles = self.__accessible_to_targets.keys()
if (len(accessibles) == 0):
return self.__delegate.accessible_objects_under_test
return OrderedSet(accessibles)
def num_accessible_objects_under_test(self) -> int:
return self.__delegate.num_accessible_objects_under_test()
def get_generators_for(self, typ: ProperType) -> tuple[(OrderedSet[GenericAccessibleObject], bool)]:
return self.__delegate.get_generators_for(typ)
def get_modifiers_for(self, typ: ProperType) -> OrderedSet[GenericAccessibleObject]:
return self.__delegate.get_modifiers_for(typ)
def generators(self) -> dict[(ProperType, OrderedSet[GenericAccessibleObject])]:
return self.__delegate.generators
def modifiers(self) -> dict[(TypeInfo, OrderedSet[GenericAccessibleObject])]:
return self.__delegate.modifiers
def get_random_accessible(self) -> (GenericAccessibleObject | None):
accessibles = self.__accessible_to_targets.keys()
if (len(accessibles) == 0):
return self.__delegate.get_random_accessible()
return randomness.choice(OrderedSet(accessibles))
def get_random_call_for(self, typ: ProperType) -> GenericAccessibleObject:
return self.__delegate.get_random_call_for(typ)
def get_all_generatable_types(self) -> list[ProperType]:
return self.__delegate.get_all_generatable_types()
def select_concrete_type(self, typ: ProperType) -> ProperType:
return self.__delegate.select_concrete_type(typ) |
def test_countless2d():
def test_all_cases(fn, test_zero):
case1 = np.array([[1, 2], [3, 4]]).reshape((2, 2, 1, 1))
case2 = np.array([[1, 1], [2, 3]]).reshape((2, 2, 1, 1))
case1z = np.array([[0, 1], [2, 3]]).reshape((2, 2, 1, 1))
case2z = np.array([[0, 0], [2, 3]]).reshape((2, 2, 1, 1))
case3 = np.array([[1, 1], [2, 2]]).reshape((2, 2, 1, 1))
case4 = np.array([[1, 2], [2, 2]]).reshape((2, 2, 1, 1))
case5 = np.array([[5, 5], [5, 5]]).reshape((2, 2, 1, 1))
is_255_handled = np.array([[255, 255], [1, 2]], dtype=np.uint8).reshape((2, 2, 1, 1))
test = (lambda case: fn(case))
if test_zero:
assert (test(case1z) == [[[[3]]]])
assert (test(case2z) == [[[[0]]]])
else:
assert (test(case1) == [[[[4]]]])
assert (test(case2) == [[[[1]]]])
assert (test(case3) == [[[[1]]]])
assert (test(case4) == [[[[2]]]])
assert (test(case5) == [[[[5]]]])
assert (test(is_255_handled) == [[[[255]]]])
assert (fn(case1).dtype == case1.dtype)
test_all_cases(countless2d.simplest_countless, False)
test_all_cases(countless2d.quick_countless, False)
test_all_cases(countless2d.quickest_countless, False)
test_all_cases(countless2d.stippled_countless, False)
methods = [countless2d.zero_corrected_countless, countless2d.countless, countless2d.countless_if]
for fn in methods:
print(fn.__name__)
test_all_cases(fn, True) |
def register_Ns3RngRsp_methods(root_module, cls):
cls.add_constructor([param('ns3::RngRsp const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetAasBdcastPermission', 'uint8_t', [], is_const=True)
cls.add_method('GetBasicCid', 'ns3::Cid', [], is_const=True)
cls.add_method('GetDlFreqOverride', 'uint32_t', [], is_const=True)
cls.add_method('GetDlOperBurstProfile', 'uint16_t', [], is_const=True)
cls.add_method('GetFrameNumber', 'uint32_t', [], is_const=True)
cls.add_method('GetInitRangOppNumber', 'uint8_t', [], is_const=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetMacAddress', 'ns3::Mac48Address', [], is_const=True)
cls.add_method('GetName', 'std::string', [], is_const=True)
cls.add_method('GetOffsetFreqAdjust', 'uint32_t', [], is_const=True)
cls.add_method('GetPowerLevelAdjust', 'uint8_t', [], is_const=True)
cls.add_method('GetPrimaryCid', 'ns3::Cid', [], is_const=True)
cls.add_method('GetRangStatus', 'uint8_t', [], is_const=True)
cls.add_method('GetRangSubchnl', 'uint8_t', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTimingAdjust', 'uint32_t', [], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('GetUlChnlIdOverride', 'uint8_t', [], is_const=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetAasBdcastPermission', 'void', [param('uint8_t', 'aasBdcastPermission')])
cls.add_method('SetBasicCid', 'void', [param('ns3::Cid', 'basicCid')])
cls.add_method('SetDlFreqOverride', 'void', [param('uint32_t', 'dlFreqOverride')])
cls.add_method('SetDlOperBurstProfile', 'void', [param('uint16_t', 'dlOperBurstProfile')])
cls.add_method('SetFrameNumber', 'void', [param('uint32_t', 'frameNumber')])
cls.add_method('SetInitRangOppNumber', 'void', [param('uint8_t', 'initRangOppNumber')])
cls.add_method('SetMacAddress', 'void', [param('ns3::Mac48Address', 'macAddress')])
cls.add_method('SetOffsetFreqAdjust', 'void', [param('uint32_t', 'offsetFreqAdjust')])
cls.add_method('SetPowerLevelAdjust', 'void', [param('uint8_t', 'powerLevelAdjust')])
cls.add_method('SetPrimaryCid', 'void', [param('ns3::Cid', 'primaryCid')])
cls.add_method('SetRangStatus', 'void', [param('uint8_t', 'rangStatus')])
cls.add_method('SetRangSubchnl', 'void', [param('uint8_t', 'rangSubchnl')])
cls.add_method('SetTimingAdjust', 'void', [param('uint32_t', 'timingAdjust')])
cls.add_method('SetUlChnlIdOverride', 'void', [param('uint8_t', 'ulChnlIdOverride')])
return |
def get_init_with_noise(model, X, y):
init = X.clone()
p = model(X).argmax(1)
while any((p == y)):
init = torch.where(atleast_kdim((p == y), len(X.shape)), (X + (0.5 * torch.randn_like(X))).clip(0, 1), init)
p = model(init).argmax(1)
return init |
def main(fn=None, *, args: Optional[List[str]]=None, config_dir: Optional[str]=DEFAULT_CONFIG_DIR):
if (fn is None):
return functools.partial(main, args=args, config_dir=config_dir)
_cmdline_args = args
if (args is None):
_cmdline_args = sys.argv[1:]
(fn)
def wrapper_inner(*args, **kwargs):
(config_path, cmdline_args) = _maybe_get_config_path_and_cmdline_args(_cmdline_args)
paths_to_check = [config_path, f'{config_path}.yaml', f'{config_path}.yml']
if ((config_path is not None) and (config_dir is not None)):
paths_to_check.extend([os.path.join(config_dir, p) for p in paths_to_check])
for path in paths_to_check:
if ((path is not None) and os.path.exists(path)):
config_path = path
break
argspec = inspect.getfullargspec(fn)
argtype = argspec.annotations[argspec.args[0]]
cfg = parse(config_class=argtype, config_path=config_path, args=cmdline_args)
response = fn(cfg, *args, **kwargs)
return response
return wrapper_inner |
class ConstantConvReuseSubstitutionTest(BaseConstantConvSubstitutionTest):
def __init__(self, unit_test):
super().__init__(unit_test)
class ConvNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=(3, 3), bias=False)
self.conv2 = nn.Conv2d(16, 8, kernel_size=(1, 1), bias=True)
self.conv3 = nn.Conv2d(8, 8, kernel_size=(1, 1), bias=True)
def forward(self, x):
x1 = F.conv2d(x, weight=self.conv1.weight, bias=self.conv1.bias)
x1 = (F.hardswish(x1) + x1)
x2 = F.conv2d(x1, weight=self.conv2.weight, bias=self.conv2.bias)
x3 = (F.relu(x2) - x2)
x4 = (F.conv2d(x3, weight=self.conv3.weight, bias=self.conv3.bias) + x2)
x4 = F.gelu(x4)
y = (F.conv2d(x4, weight=self.conv3.weight, bias=self.conv3.bias) + x3)
return y
def create_networks(self):
return self.ConvNet() |
def main():
print(__doc__)
fn = os.path.join('..', 'cephes', 'expn.h')
K = 12
A = generate_A(K)
with open((fn + '.new'), 'w') as f:
f.write(WARNING)
f.write(f'''#define nA {len(A)}
''')
for (k, Ak) in enumerate(A):
.join([str(x.evalf(18)) for x in Ak.coeffs()])
f.write(f'''static const double A{k}[] = {{tmp}};
''')
', '.join([f'A{k}' for k in range((K + 1))])
f.write('static const double *A[] = {{tmp}};\n')
.join([str(Ak.degree()) for Ak in A])
f.write('static const int Adegs[] = {{tmp}};\n')
os.rename((fn + '.new'), fn) |
class FooInterpreter(PostOrderInterpreter):
def eval_mult(self, node, args):
return (args[0] * args[1])
def eval_plus(self, node, args):
return (args[0] + args[1]) |
class ListOffsetMeta(Meta, Generic[T]):
is_list = True
_content: T
def purelist_parameters(self, *keys: str) -> JSONSerializable:
if (self._parameters is not None):
for key in keys:
if (key in self._parameters):
return self._parameters[key]
return self._content.purelist_parameters(*keys)
def purelist_isregular(self) -> bool:
return False
def purelist_depth(self) -> int:
if (self.parameter('__array__') in ('string', 'bytestring')):
return 1
else:
return (self._content.purelist_depth + 1)
def is_identity_like(self) -> bool:
return False
def minmax_depth(self) -> tuple[(int, int)]:
if (self.parameter('__array__') in ('string', 'bytestring')):
return (1, 1)
else:
(mindepth, maxdepth) = self._content.minmax_depth
return ((mindepth + 1), (maxdepth + 1))
def branch_depth(self) -> tuple[(bool, int)]:
if (self.parameter('__array__') in ('string', 'bytestring')):
return (False, 1)
else:
(branch, depth) = self._content.branch_depth
return (branch, (depth + 1))
def fields(self):
return self._content.fields
def is_tuple(self) -> bool:
return self._content.is_tuple
def dimension_optiontype(self) -> bool:
return False
def content(self) -> T:
return self._content |
class VideoGroundingDataset(Dataset):
EXCLUDE_FILES = {'activitynet': {'train': [], 'val': ['v_0dkIbKXXFzI', 'v_j73Wh1olDsA']}, 'charades': {'train': [], 'val': []}}
def __init__(self, root, dataset='activitynet', data_type='features', backbone='clip', phase='train', num_input_frames=32, num_input_sentences=16, normalize_txt_feats=True, normalize_vid_feats=True, txt_drop_ratio=0.0, exlude_short=False, glove_type='glove.6B.300d.txt'):
super().__init__()
self.root = root
self.dataset = dataset
self.data_type = data_type
self.backbone = backbone
self.phase = phase
assert (self.phase in ['train', 'val', 'test']), 'phase should be one of train/val/test.'
self.num_input_frames = num_input_frames
self.num_input_sentences = num_input_sentences
self.normalize_input_txt_feats = normalize_txt_feats
self.normalize_input_vid_feats = normalize_vid_feats
self.txt_drop_ratio = (txt_drop_ratio if (phase == 'train') else 0)
self.glove_dir = os.path.join(root, 'glove', glove_type)
dir_to_annotations = os.path.join(self.root, self.dataset, 'annotations')
with open(os.path.join(dir_to_annotations, (self.phase + '.json'))) as j:
self.annotations = json.load(j)
if ('features' in self.data_type):
dir_to_feats = os.path.join(self.root, self.dataset, 'clip_features', self.phase)
video_ids = os.listdir(dir_to_feats)
self.vid2data = {vid: os.path.join(dir_to_feats, vid) for vid in video_ids}
else:
dir_to_frames = os.path.join(self.root, self.dataset, 'frames', str(num_input_frames))
video_ids = list(self.annotations.keys())
self.vid2data = {vid: os.path.join(dir_to_frames, vid) for vid in video_ids}
if ('clip' in self.backbone):
(_, self.clip_preprocess) = clip.load('ViT-B/32')
if exlude_short:
for exclude in self.EXCLUDE_FILES[self.dataset][self.phase]:
self.annotations.pop(exclude)
self.vid2data.pop(exclude)
def __len__(self):
return len(self.vid2data)
def __getitem__(self, idx):
video_id = list(self.annotations.keys())[idx]
annos = self.annotations[video_id]
annos['video_id'] = video_id
assert (len(annos['timestamps']) == len(annos['sentences'])), 'The number of target spans and input sentences does not matches.'
if ((self.phase == 'train') and (len(annos['sentences']) > self.num_input_sentences)):
random_idxs = np.random.choice(range(len(annos['sentences'])), self.num_input_sentences, replace=False)
random_idxs = sorted(random_idxs)
annos['sentences'] = [annos['sentences'][i] for i in random_idxs]
annos['timestamps'] = [annos['timestamps'][i] for i in random_idxs]
else:
random_idxs = None
sorted_by_time = sorted(zip(annos['sentences'], annos['timestamps']), key=(lambda x: (x[1][0] + x[1][1])))
(annos['sentences'], annos['timestamps']) = map(list, zip(*sorted_by_time))
model_inputs = dict()
if ('features' in self.data_type):
dir_to_feats = self.vid2data[video_id]
model_inputs['input_txt'] = self._get_txt_feats(dir_to_feats, random_idxs=random_idxs)
model_inputs['input_vid'] = self._get_vid_feats(dir_to_feats)
else:
dir_to_frames = self.vid2data[video_id]
model_inputs['input_txt'] = self._get_txt(annos['sentences'])
model_inputs['input_vid'] = self._get_vid(dir_to_frames)
if (self.dataset in ['activitynet', 'charades']):
duration = annos['duration']
else:
raise NotImplementedError(f'{self.dataset} is not supported.')
model_inputs['target_spans'] = self._get_target_spans(spans=annos['timestamps'], duration=duration)
return dict(annos=annos, model_inputs=model_inputs)
def _get_txt_feats(self, dir_to_feats, random_idxs=None):
feature_dir = os.path.join(dir_to_feats, 'txt_feats.pt')
txt_feats = torch.load(feature_dir).float()
if (random_idxs is not None):
txt_feats = txt_feats[random_idxs]
if self.normalize_input_txt_feats:
txt_feats = l2_normalize_tensor(txt_feats)
if (self.txt_drop_ratio > 0):
txt_feats = self._random_drop_rows(txt_feats)
return txt_feats
def _get_vid_feats(self, dir_to_feats):
feature_dir = os.path.join(dir_to_feats, (('vid_feats_' + str(self.num_input_frames)) + '.pt'))
vid_feats = torch.load(feature_dir).float()
if self.normalize_input_vid_feats:
vid_feats = l2_normalize_tensor(vid_feats)
return vid_feats
def _get_txt(self, sentences):
if ('lstm' in self.backbone):
return self._to_glove_embeddings(sentences)
elif ('clip' in self.backbone):
return clip.tokenize(sentences, truncate=True)
else:
NotImplementedError
def _to_glove_embeddings(self, sentences):
word2glove = self._load_glove(self.dataset, self.glove_dir)
sentences_glove = []
for words in sentences:
words_glove = []
words = word_tokenize(words)
for word in words:
if ('/' in word):
word = word.replace('/', ' / ')
for chars in word.split():
glove = word2glove[chars.lower()]
words_glove.append(glove)
else:
glove = word2glove[word.lower()]
words_glove.append(glove)
sentences_glove.append(torch.tensor(words_glove))
return sentences_glove
def _load_glove(self, dataset, dir_to_glove, use_compact_glove=True):
if use_compact_glove:
with open(dir_to_glove.replace('.txt', f'.{dataset}.json')) as j:
glove = json.load(j)
else:
glove = get_raw_glove(dir_to_glove)
return glove
def _get_vid(self, dir_to_frames):
frames = sorted(glob.glob(os.path.join((dir_to_frames + '*'), '*.png')))
if (len(frames) == 0):
raise ValueError(f'No valid frames exist in {dir_to_frame}.')
frames = [self._transform(Image.open(frame).convert('RGB')) for frame in frames]
if (self.backbone == 'clip'):
return torch.stack(frames, dim=0)
elif (self.backbone == 'c3d_lstm'):
if ((len(frames) % 16) != 0):
(C, H, W) = frames[0].shape
frames = torch.cat([torch.stack(frames), torch.stack(([torch.zeros(3, 112, 112)] * (16 - (len(frames) % 16))))])
clips = torch.stack([frames[(16 * i):(16 * (i + 1))] for i in range((len(frames) // 16))])
clips = clips.permute(0, 2, 1, 3, 4)
else:
clips = torch.stack([torch.stack(frames[(16 * i):(16 * (i + 1))], dim=1) for i in range((len(frames) // 16))])
return clips
else:
raise NotImplementedError
def _transform(self, img):
if ('c3d' in self.backbone):
transform = transforms.Compose([transforms.Resize(112), transforms.RandomResizedCrop(112), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
return transform(img)
elif ('clip' in self.backbone):
return self.clip_preprocess(img)
else:
raise NotImplementedError
def _get_target_spans(self, spans, duration):
spans = (torch.Tensor(spans) / duration)
spans = span_xx_to_cw(spans)
return spans
def _random_drop_rows(self, feats):
num_drop = round((len(feats) * self.txt_drop_ratio))
if (num_drop > 0):
row_indices = np.random.choice(len(feats), size=num_drop, replace=False)
feats[row_indices] = 0
return feats
def get_gt_with_vids(self, video_ids):
ground_truth = []
for vid in video_ids:
annos = self.annotations[vid]
for (query, timespan) in zip(annos['sentences'], annos['timestamps']):
gt = {}
gt['video_id'] = vid
gt['query'] = query
gt['gt_timespan'] = [timespan]
if (self.dataset in ['activitynet', 'charades']):
gt['duration'] = annos['duration']
else:
raise NotImplementedError(f'{self.dataset} is not supported.')
ground_truth.append(gt)
return ground_truth |
def evaluate_design(input_list, testbench, ground_truth, output, filename, path, liberty):
truth_dir = os.path.join(output, 'truthtable', (filename + '.truth'))
subprocess.call(([path['iverilog'], '-o', (truth_dir[:(- 5)] + 'iv'), testbench] + input_list))
with open(truth_dir, 'w') as f:
subprocess.call([path['vvp'], (truth_dir[:(- 5)] + 'iv')], stdout=f)
os.remove((truth_dir[:(- 5)] + 'iv'))
output_syn = os.path.join(output, 'approx_design', filename)
area = synth_design(' '.join(input_list), output_syn, liberty, path['script'], path['yosys'])
f = assess_HD(ground_truth, truth_dir)
print(((('Simulation error: ' + str(f)) + '\tCircuit area: ') + str(area)))
return (f, area) |
.parametrize('estimator, build_dataset', metric_learners, ids=ids_metric_learners)
def test_get_metric_raises_error(estimator, build_dataset):
(input_data, labels, _, X) = build_dataset()
model = clone(estimator)
set_random_state(model)
model.fit(*remove_y(model, input_data, labels))
metric = model.get_metric()
list_test_get_metric_raises = [((X[0].tolist() + [5.2]), X[1]), (X[0:4], X[1:5]), ((X[0].tolist() + [5.2]), (X[1] + [7.2]))]
for (u, v) in list_test_get_metric_raises:
with pytest.raises(ValueError):
metric(u, v) |
.datainstrument
def test_symbol_dump_conditional():
def dinstr(A: dace.float64[20]):
for i in range(19):
A[(i + 1)] = (A[i] + 1)
sdfg = dinstr.to_sdfg(simplify=True)
for state in sdfg.states():
state.symbol_instrument = dace.DataInstrumentationType.Save
state.symbol_instrument_condition = CodeBlock('i == 18', language=dace.Language.Python)
A = np.ones((20,))
sdfg(A)
dreport = sdfg.get_instrumented_data()
assert (len(dreport.keys()) == 1)
assert ('i' in dreport.keys())
assert (len(dreport.files['i']) == 1)
assert (dreport['i'] == 18) |
def test_calinski_harabasz_score():
assert_raises_on_only_one_label(calinski_harabasz_score)
assert_raises_on_all_points_same_cluster(calinski_harabasz_score)
assert (1.0 == calinski_harabasz_score(np.ones((10, 2)), (([0] * 5) + ([1] * 5))))
assert (0.0 == calinski_harabasz_score(([[(- 1), (- 1)], [1, 1]] * 10), (([0] * 10) + ([1] * 10))))
X = (((([[0, 0], [1, 1]] * 5) + ([[3, 3], [4, 4]] * 5)) + ([[0, 4], [1, 3]] * 5)) + ([[3, 1], [4, 0]] * 5))
labels = (((([0] * 10) + ([1] * 10)) + ([2] * 10)) + ([3] * 10))
pytest.approx(calinski_harabasz_score(X, labels), ((45 * (40 - 4)) / (5 * (4 - 1)))) |
class _PreprocessorInfo(object):
def __init__(self, stack_before_if):
self.stack_before_if = stack_before_if
self.stack_before_else = []
self.seen_else = False |
class TinyNetwork(nn.Module):
def __init__(self, C, N, genotype, num_classes):
super(TinyNetwork, self).__init__()
self._C = C
self._layerN = N
self.channel = (1 if (num_classes == 18) else 3)
self.stem = nn.Sequential(nn.Conv2d(self.channel, C, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(C))
layer_channels = ((((([C] * N) + [(C * 2)]) + ([(C * 2)] * N)) + [(C * 4)]) + ([(C * 4)] * N))
layer_reductions = ((((([False] * N) + [True]) + ([False] * N)) + [True]) + ([False] * N))
C_prev = C
self.cells = nn.ModuleList()
for (index, (C_curr, reduction)) in enumerate(zip(layer_channels, layer_reductions)):
if reduction:
cell = ResNetBasicblock(C_prev, C_curr, 2, True)
else:
cell = InferCell(genotype, C_prev, C_curr, 1)
self.cells.append(cell)
C_prev = cell.out_dim
self._Layer = len(self.cells)
self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
def get_message(self):
string = self.extra_repr()
for (i, cell) in enumerate(self.cells):
string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr())
return string
def extra_repr(self):
return '{name}(C={_C}, N={_layerN}, L={_Layer})'.format(name=self.__class__.__name__, **self.__dict__)
def forward(self, inputs):
feature = self.stem(inputs)
for (i, cell) in enumerate(self.cells):
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling(out)
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
return (out, logits) |
def get_norm_layer(norm_type):
if (norm_type == 'batch'):
norm_layer = nn.BatchNorm2d
elif (norm_type == 'instance'):
norm_layer = nn.InstanceNorm2d
else:
print(('normalization layer [%s] is not found' % norm))
return norm_layer |
class LikeZapp(tvm.relay.dataflow_pattern.DFPatternCallback):
def __init__(self):
self.translations_with_dt = {'zeros_like': tvm.relay.zeros, 'ones_like': tvm.relay.ones}
self.data_tensor = tvm.relay.dataflow_pattern.wildcard()
self.pattern_tensor = tvm.relay.dataflow_pattern.wildcard()
self.pattern = ((tvm.relay.dataflow_pattern.is_op('zeros_like') | tvm.relay.dataflow_pattern.is_op('ones_like'))(self.data_tensor) | ((tvm.relay.dataflow_pattern.is_op('collapse_sum_like') | tvm.relay.dataflow_pattern.is_op('reshape_like')) | tvm.relay.dataflow_pattern.is_op('broadcast_to_like'))(self.data_tensor, self.pattern_tensor))
def callback(self, pre, post, node_map):
data = node_map[self.data_tensor][0]
res = node_map[self.pattern][0]
if (res.op.name in self.translations_with_dt):
ret = self.translations_with_dt[res.op.name](list(res.type_args[0].shape), res.type_args[0].dtype)
return ret
if ((res.type_args[0] is not None) and (res.type_args[0] == res.type_args[1])):
return data
if (res.op.name == 'broadcast_to_like'):
return tvm.relay.broadcast_to(data, list(res.type_args[1].shape))
if (res.op.name == 'reshape_like'):
return tvm.relay.reshape(data, list(res.type_args[1].shape))
if (res.op.name == 'collapse_sum_like'):
return tvm.relay.collapse_sum_to(data, list(res.type_args[1].shape))
return res |
def append_replace_return_docstrings(model_class, output_type, config_class):
model_class.__call__ = copy_func(model_class.__call__)
model_class.__call__ = replace_return_docstrings(output_type=output_type, config_class=config_class)(model_class.__call__) |
def setup_logging(log_file='log.txt', resume=False, dummy=False):
if dummy:
logging.getLogger('dummy')
else:
if (os.path.isfile(log_file) and resume):
file_mode = 'a'
else:
file_mode = 'w'
root_logger = logging.getLogger()
if root_logger.handlers:
root_logger.removeHandler(root_logger.handlers[0])
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S', filename=log_file, filemode=file_mode)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console) |
def get_abi3_suffix():
for (suffix, _, _) in (s for s in imp.get_suffixes() if (s[2] == imp.C_EXTENSION)):
if ('.abi3' in suffix):
return suffix
elif (suffix == '.pyd'):
return suffix |
def get_margins(clusters: List[Cluster], min_occurances: int):
for c in clusters:
if (len(c) >= min_occurances):
return c
return clusters[0] |
class RK4(FixedGridODESolver):
order = 4
def _step_func(self, func, t0, dt, t1, y0):
f0 = func(t0, y0, perturb=(Perturb.NEXT if self.perturb else Perturb.NONE))
return (rk4_alt_step_func(func, t0, dt, t1, y0, f0=f0, perturb=self.perturb), f0) |
class PUndirNet(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError('No constructor defined')
__repr__ = _swig_repr
def New():
return _snap.PUndirNet_New()
New = staticmethod(New)
__swig_destroy__ = _snap.delete_PUndirNet
def Save(self, SOut):
return _snap.PUndirNet_Save(self, SOut)
def __deref__(self):
return _snap.PUndirNet___deref__(self)
def __ref__(self):
return _snap.PUndirNet___ref__(self)
def __call__(self):
return _snap.PUndirNet___call__(self)
def Empty(self):
return _snap.PUndirNet_Empty(self)
def Clr(self):
return _snap.PUndirNet_Clr(self)
def GetRefs(self):
return _snap.PUndirNet_GetRefs(self)
def Save_V1(self, SOut):
return _snap.PUndirNet_Save_V1(self, SOut)
def Load(self, SIn):
return _snap.PUndirNet_Load(self, SIn)
def Load_V1(self, SIn):
return _snap.PUndirNet_Load_V1(self, SIn)
def HasFlag(self, Flag):
return _snap.PUndirNet_HasFlag(self, Flag)
def GetNodes(self):
return _snap.PUndirNet_GetNodes(self)
def AddNode(self, *args):
return _snap.PUndirNet_AddNode(self, *args)
def AddNodeUnchecked(self, NId=(- 1)):
return _snap.PUndirNet_AddNodeUnchecked(self, NId)
def DelNode(self, *args):
return _snap.PUndirNet_DelNode(self, *args)
def IsNode(self, NId):
return _snap.PUndirNet_IsNode(self, NId)
def BegNI(self, *args):
return _snap.PUndirNet_BegNI(self, *args)
def EndNI(self, *args):
return _snap.PUndirNet_EndNI(self, *args)
def GetNI(self, *args):
return _snap.PUndirNet_GetNI(self, *args)
def GetMxNId(self):
return _snap.PUndirNet_GetMxNId(self)
def GetEdges(self):
return _snap.PUndirNet_GetEdges(self)
def AddEdge(self, *args):
return _snap.PUndirNet_AddEdge(self, *args)
def AddEdgeUnchecked(self, SrcNId, DstNId):
return _snap.PUndirNet_AddEdgeUnchecked(self, SrcNId, DstNId)
def DelEdge(self, SrcNId, DstNId):
return _snap.PUndirNet_DelEdge(self, SrcNId, DstNId)
def IsEdge(self, SrcNId, DstNId):
return _snap.PUndirNet_IsEdge(self, SrcNId, DstNId)
def BegEI(self, *args):
return _snap.PUndirNet_BegEI(self, *args)
def EndEI(self, *args):
return _snap.PUndirNet_EndEI(self, *args)
def GetEI(self, SrcNId, DstNId):
return _snap.PUndirNet_GetEI(self, SrcNId, DstNId)
def GetRndNId(self, *args):
return _snap.PUndirNet_GetRndNId(self, *args)
def GetRndNI(self, *args):
return _snap.PUndirNet_GetRndNI(self, *args)
def GetNIdV(self, NIdV):
return _snap.PUndirNet_GetNIdV(self, NIdV)
def Reserve(self, Nodes, Edges):
return _snap.PUndirNet_Reserve(self, Nodes, Edges)
def ReserveNIdDeg(self, NId, Deg):
return _snap.PUndirNet_ReserveNIdDeg(self, NId, Deg)
def SortNodeAdjV(self):
return _snap.PUndirNet_SortNodeAdjV(self)
def Defrag(self, OnlyNodeLinks=False):
return _snap.PUndirNet_Defrag(self, OnlyNodeLinks)
def IsOk(self, ThrowExcept=True):
return _snap.PUndirNet_IsOk(self, ThrowExcept)
def Dump(self, *args):
return _snap.PUndirNet_Dump(self, *args)
def GetSmallGraph(self):
return _snap.PUndirNet_GetSmallGraph(self)
def AddSAttrDatN(self, *args):
return _snap.PUndirNet_AddSAttrDatN(self, *args)
def GetSAttrDatN(self, *args):
return _snap.PUndirNet_GetSAttrDatN(self, *args)
def DelSAttrDatN(self, *args):
return _snap.PUndirNet_DelSAttrDatN(self, *args)
def GetSAttrVN(self, *args):
return _snap.PUndirNet_GetSAttrVN(self, *args)
def GetIdVSAttrN(self, *args):
return _snap.PUndirNet_GetIdVSAttrN(self, *args)
def AddSAttrN(self, Name, AttrType, AttrId):
return _snap.PUndirNet_AddSAttrN(self, Name, AttrType, AttrId)
def GetSAttrIdN(self, Name, AttrIdX, AttrTypeX):
return _snap.PUndirNet_GetSAttrIdN(self, Name, AttrIdX, AttrTypeX)
def GetSAttrNameN(self, AttrId, NameX, AttrTypeX):
return _snap.PUndirNet_GetSAttrNameN(self, AttrId, NameX, AttrTypeX)
def AddSAttrDatE(self, *args):
return _snap.PUndirNet_AddSAttrDatE(self, *args)
def GetSAttrDatE(self, *args):
return _snap.PUndirNet_GetSAttrDatE(self, *args)
def DelSAttrDatE(self, *args):
return _snap.PUndirNet_DelSAttrDatE(self, *args)
def GetSAttrVE(self, *args):
return _snap.PUndirNet_GetSAttrVE(self, *args)
def GetIdVSAttrE(self, *args):
return _snap.PUndirNet_GetIdVSAttrE(self, *args)
def AddSAttrE(self, Name, AttrType, AttrId):
return _snap.PUndirNet_AddSAttrE(self, Name, AttrType, AttrId)
def GetSAttrIdE(self, Name, AttrIdX, AttrTypeX):
return _snap.PUndirNet_GetSAttrIdE(self, Name, AttrIdX, AttrTypeX)
def GetSAttrNameE(self, AttrId, NameX, AttrTypeX):
return _snap.PUndirNet_GetSAttrNameE(self, AttrId, NameX, AttrTypeX) |
class ResultList():
def __init__(self, rule, name_generator):
self.final_effect = rule.effect
self.result = []
self.name_generator = name_generator
def get_result(self):
self.result[(- 1)].effect = self.final_effect
return self.result
def add_rule(self, type, conditions, effect_vars):
effect = pddl.Atom(next(self.name_generator), effect_vars)
rule = pddl_to_prolog.Rule(conditions, effect)
rule.type = type
self.result.append(rule)
return rule.effect |
def get_informed_denoiser(diffusion):
def informed_denoiser(model, noisy_data, noise_map, clip_denoised=False, model_kwargs=None, etaA_ddrm=1.0, etaB_ddrm=1.0):
device = next(model.parameters()).device
etaA_ddrm = torch.tensor(etaA_ddrm, device=device).float()
etaB_ddrm = torch.tensor(etaB_ddrm, device=device).float()
x_t = torch.randn_like(noisy_data)
indices = list(range(diffusion.num_timesteps))[::(- 1)]
(b, c, h, w) = noisy_data.shape
for i in tqdm.tqdm(indices):
t = torch.tensor(([i] * b), device=device)
with torch.no_grad():
sqrt_1_m_cumalpha = torch.sqrt(torch.tensor((1.0 - diffusion.alphas_cumprod_prev[i]), device=device)).float()
sqrt_cumalpha = torch.sqrt(torch.tensor(diffusion.alphas_cumprod_prev[i], device=device).float())
mask_sigmat_is_larger = (1.0 * (sqrt_1_m_cumalpha[(None, None, None, None)] > (sqrt_cumalpha * noise_map)))
scale_x0_at_t = sqrt_cumalpha
scaled_noisy_data = (noisy_data * scale_x0_at_t)
noise = torch.randn_like(x_t)
nonzero_mask = (t != 0).float().view((- 1), *([1] * (len(x_t.shape) - 1)))
res_p_mean_variance = diffusion.p_mean_variance(model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs)
est_x_0 = (res_p_mean_variance['pred_xstart'] * scale_x0_at_t)
sigma_for_larger_sigmat = torch.sqrt((mask_sigmat_is_larger * ((sqrt_1_m_cumalpha[(None, None, None, None)] ** 2) - ((etaB_ddrm ** 2) * ((sqrt_cumalpha ** 2) * (noise_map ** 2))))))
data_for_larger_sigmat = ((((1.0 - etaB_ddrm) * est_x_0) + (etaB_ddrm * scaled_noisy_data)) + ((nonzero_mask * sigma_for_larger_sigmat) * torch.randn_like(x_t)))
sigma_for_smaller_sigmat = torch.sqrt((((1.0 - mask_sigmat_is_larger) * (sqrt_1_m_cumalpha ** 2)) * (etaA_ddrm ** 2)))
coef = ((sqrt_1_m_cumalpha / sqrt_cumalpha) / (noise_map + 1e-05))
data_for_smaller_sigmat = ((est_x_0 + ((torch.sqrt((1 - (etaA_ddrm ** 2))) * coef) * (scaled_noisy_data - est_x_0))) + ((nonzero_mask * sigma_for_smaller_sigmat) * torch.randn_like(x_t)))
x_t = ((data_for_smaller_sigmat * (1.0 - mask_sigmat_is_larger)) + (data_for_larger_sigmat * mask_sigmat_is_larger))
return x_t
return informed_denoiser |
def _make_sdfg(node, parent_state, parent_sdfg, implementation):
arr_desc = node.validate(parent_sdfg, parent_state)
if node.overwrite:
(in_shape, in_dtype, in_strides, n) = arr_desc
else:
(in_shape, in_dtype, in_strides, out_shape, out_dtype, out_strides, n) = arr_desc
dtype = in_dtype
sdfg = dace.SDFG('{l}_sdfg'.format(l=node.label))
a_arr = sdfg.add_array('_ain', in_shape, dtype=in_dtype, strides=in_strides)
if (not node.overwrite):
ain_arr = a_arr
a_arr = sdfg.add_array('_aout', out_shape, dtype=out_dtype, strides=out_strides)
ipiv_arr = sdfg.add_array('_pivots', [n], dtype=dace.int32, transient=True)
info_arr = sdfg.add_array('_info', [1], dtype=dace.int32, transient=True)
state = sdfg.add_state('{l}_state'.format(l=node.label))
getrf_node = Getrf('getrf')
getrf_node.implementation = implementation
getri_node = Getri('getri')
getri_node.implementation = implementation
if node.overwrite:
ain = state.add_read('_ain')
ainout = state.add_access('_ain')
aout = state.add_write('_ain')
else:
a = state.add_read('_ain')
ain = state.add_read('_aout')
ainout = state.add_access('_aout')
aout = state.add_write('_aout')
state.add_nedge(a, ain, Memlet.from_array(*ain_arr))
ipiv = state.add_access('_pivots')
info1 = state.add_write('_info')
info2 = state.add_write('_info')
state.add_memlet_path(ain, getrf_node, dst_conn='_xin', memlet=Memlet.from_array(*a_arr))
state.add_memlet_path(getrf_node, info1, src_conn='_res', memlet=Memlet.from_array(*info_arr))
state.add_memlet_path(getrf_node, ipiv, src_conn='_ipiv', memlet=Memlet.from_array(*ipiv_arr))
state.add_memlet_path(getrf_node, ainout, src_conn='_xout', memlet=Memlet.from_array(*a_arr))
state.add_memlet_path(ainout, getri_node, dst_conn='_xin', memlet=Memlet.from_array(*a_arr))
state.add_memlet_path(ipiv, getri_node, dst_conn='_ipiv', memlet=Memlet.from_array(*ipiv_arr))
state.add_memlet_path(getri_node, info2, src_conn='_res', memlet=Memlet.from_array(*info_arr))
state.add_memlet_path(getri_node, aout, src_conn='_xout', memlet=Memlet.from_array(*a_arr))
return sdfg |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=3, help='random seed')
parser.add_argument('--plot-interval', type=int, default=50, help='plot interval. 0 to disable plotting.')
parser.add_argument('--save-interval', type=int, default=50, help='interval to save models. 0 to disable saving.')
parser.add_argument('--mask', default='block', help='missing data mask. (options: block, indep)')
parser.add_argument('--block-len', type=int, default=12, help='size of observed block. Set to 0 to use variable size')
parser.add_argument('--block-len-max', type=int, default=None, help='max size of observed block. Use fixed-size observed block if unspecified.')
parser.add_argument('--obs-prob', type=float, default=0.2, help='observed probability for independent dropout')
parser.add_argument('--obs-prob-max', type=float, default=None, help='max observed probability for independent dropout. Use fixed probability if unspecified.')
parser.add_argument('--flow', type=int, default=2, help='number of IAF layers')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument('--min-lr', type=float, default=(- 1), help='min learning rate for LR scheduler. -1 to disable annealing')
parser.add_argument('--epoch', type=int, default=4000, help='number of training epochs')
parser.add_argument('--batch-size', type=int, default=128, help='batch size')
parser.add_argument('--k', type=int, default=5, help='number of importance weights')
parser.add_argument('--prefix', default='pvae', help='prefix of output directory')
parser.add_argument('--latent', type=int, default=128, help='dimension of latent variable')
parser.add_argument('--kl-off', type=int, default=200, help='epoch to start tune up KL weight from zero')
parser.add_argument('--kl-on', type=int, default=0, help='start epoch to use KL weight 1')
args = parser.parse_args()
train_pvae(args) |
def set_seeds(seed):
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed) |
.parametrize('value, expected', (({'exclusiveMinimum': True, 'minimum': 5}, {'exclusiveMinimum': True, 'minimum': 5}), ({'exclusiveMinimum': 5}, {'exclusiveMinimum': True, 'minimum': 5}), ({'exclusiveMaximum': 5}, {'exclusiveMaximum': True, 'maximum': 5}), ({'schema': {'exclusiveMaximum': 5}}, {'schema': {'exclusiveMaximum': True, 'maximum': 5}}), ([{'schema': {'exclusiveMaximum': 5}}], [{'schema': {'exclusiveMaximum': True, 'maximum': 5}}])))
def test_fastapi_schema_conversion(value, expected):
fixups.fast_api.before_load_schema(None, value)
assert (value == expected) |
def list_functions():
fnames = set(GLOBALS).difference(KEYWORDS)
documented = Filtered(list(fnames), IsDocumentedWord)
return tuple(sorted(documented.sage())) |
.parametrize('param_range, xscale', [([5, 10, 15], 'linear'), ([(- 50), 5, 50, 500], 'symlog'), ([5, 50, 500], 'log')])
def test_validation_curve_xscale_from_param_range_provided_as_a_list(pyplot, data, param_range, xscale):
(X, y) = data
estimator = DecisionTreeClassifier(random_state=0)
param_name = 'max_depth'
display = ValidationCurveDisplay.from_estimator(estimator, X, y, param_name=param_name, param_range=param_range)
assert (display.ax_.get_xscale() == xscale) |
def GL(n, R, var='a'):
(degree, ring) = normalize_args_vectorspace(n, R, var='a')
try:
if ring.is_finite():
cat = Groups().Finite()
elif ((n > 1) or (ring in Fields())):
cat = Groups().Infinite()
else:
cat = Groups()
except AttributeError:
cat = Groups()
name = 'General Linear Group of degree {0} over {1}'.format(degree, ring)
ltx = 'GL({0}, {1})'.format(degree, latex(ring))
try:
from .linear_gap import LinearMatrixGroup_gap
except ImportError:
pass
else:
try:
cmd = 'GL({0}, {1})'.format(degree, ring._gap_init_())
return LinearMatrixGroup_gap(degree, ring, False, name, ltx, cmd, category=cat)
except ValueError:
pass
return LinearMatrixGroup_generic(degree, ring, False, name, ltx, category=cat) |
def _toggle_dropout(cell_params, mode):
cell_params = copy.deepcopy(cell_params)
if (mode != tf.contrib.learn.ModeKeys.TRAIN):
cell_params['dropout_input_keep_prob'] = 1.0
cell_params['dropout_output_keep_prob'] = 1.0
return cell_params |
def select_indices(data_source: ds.PymiaDatasource, selection_strategy: SelectionStrategy):
selected_indices = []
for (i, sample) in enumerate(data_source):
if selection_strategy(sample):
selected_indices.append(i)
return selected_indices |
class PLBartForConditionalGeneration(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def get_kmeans_model(n_clusters, init, max_iter, batch_size, tol, max_no_improvement, n_init, reassignment_ratio, random_state):
return MiniBatchKMeans(n_clusters=n_clusters, init=init, max_iter=max_iter, batch_size=batch_size, tol=tol, max_no_improvement=max_no_improvement, n_init=n_init, reassignment_ratio=reassignment_ratio, random_state=random_state, verbose=1, compute_labels=True, init_size=None) |
def get_tl_line_values_from_file_contents(content, CRLF=True, LTRB=True, withTranscription=False, withConfidence=False, imWidth=0, imHeight=0, sort_by_confidences=True):
pointsList = []
transcriptionsList = []
confidencesList = []
lines = content.split(('\r\n' if CRLF else '\n'))
for line in lines:
line = line.replace('\r', '').replace('\n', '')
if (line != ''):
(points, confidence, transcription) = get_tl_line_values(line, LTRB, withTranscription, withConfidence, imWidth, imHeight)
pointsList.append(points)
transcriptionsList.append(transcription)
confidencesList.append(confidence)
if (withConfidence and (len(confidencesList) > 0) and sort_by_confidences):
import numpy as np
sorted_ind = np.argsort((- np.array(confidencesList)))
confidencesList = [confidencesList[i] for i in sorted_ind]
pointsList = [pointsList[i] for i in sorted_ind]
transcriptionsList = [transcriptionsList[i] for i in sorted_ind]
return (pointsList, confidencesList, transcriptionsList) |
_toolkit()
class InvestmentManager(FunctionToolkit):
name_for_human = 'Investment Manager'
description_for_human = 'Toolkit for managing personal investments.'
name_for_model = 'InvestmentManager'
description_for_model = 'A comprehensive toolkit for managing personal investments, including retrieving information about holdings, searching for investment options, making transactions and searching transaction history.'
tool_classes = [InvestmentManagerGetHoldingsInformation, InvestmentManagerListTransactions, InvestmentManagerSearchInvestments, InvestmentManagerMakeTransaction] |
class QDynamicLinearBenchmark(_QLinearBenchmarkBase):
def init(self, N, IN, OUT, device):
super(QDynamicLinearBenchmark, self).init(N, IN, OUT, nnqd.Linear(IN, OUT))
self.input = self.X
self.set_module_name('QDynamicLinear') |
class Algorithm():
def get_params(self):
signature = inspect.signature(self.__class__.__init__)
params_exclude = ['self', 'random_state', 'verbose']
params = dict()
for param in signature.parameters.values():
name = param.name
if (name not in params_exclude):
try:
value = self.__dict__[name]
except KeyError:
continue
params[name] = value
return params
def set_params(self, params: dict) -> 'Algorithm':
valid_params = self.get_params()
if (type(params) is not dict):
raise ValueError('The parameters must be given as a dictionary.')
for (name, value) in params.items():
if (name not in valid_params):
raise ValueError(f'Invalid parameter: {name}.')
setattr(self, name, value)
return self
def __repr__(self):
params_string = []
for (name, value) in self.get_params().items():
if (type(value) == str):
value = (("'" + value) + "'")
else:
value = str(value)
params_string.append(((name + '=') + value))
return (((self.__class__.__name__ + '(') + ', '.join(params_string)) + ')')
def fit(self, *args, **kwargs):
raise NotImplementedError |
class LastKFramesSelector(Callable):
def __init__(self, k: int):
self.k = k
def __call__(self, frame_tss: FrameTsList) -> FrameTsList:
return frame_tss[(- self.k):] |
(datatype[N], datatype[N])
def jacobi1d(A, B):
for t in range(tsteps):
def a(i: _[1:(N - 1)]):
(a1 << A[(i - 1)])
(a2 << A[i])
(a3 << A[(i + 1)])
(b >> B[i])
b = (0.33333 * ((a1 + a2) + a3))
def b(i: _[1:(N - 1)]):
(a1 << B[(i - 1)])
(a2 << B[i])
(a3 << B[(i + 1)])
(b >> A[i])
b = (0.33333 * ((a1 + a2) + a3)) |
def bert_masking(sentence, mask, tokens, pad, mask_id):
sentence = np.copy(sentence)
sent_length = len(sentence)
target = np.copy(sentence)
mask = set(mask)
for i in range(sent_length):
if (i in mask):
rand = np.random.random()
if (rand < 0.8):
sentence[i] = mask_id
elif (rand < 0.9):
sentence[i] = np.random.choice(tokens)
else:
target[i] = pad
return (sentence, target, None) |
def infer_abbr(class_type):
def camel2snack(word):
word = re.sub('([A-Z]+)([A-Z][a-z])', '\\1_\\2', word)
word = re.sub('([a-z\\d])([A-Z])', '\\1_\\2', word)
word = word.replace('-', '_')
return word.lower()
if (not inspect.isclass(class_type)):
raise TypeError(f'class_type must be a type, but got {type(class_type)}')
if hasattr(class_type, '_abbr_'):
return class_type._abbr_
else:
return camel2snack(class_type.__name__) |
def seed_test_case2():
var0 = True
var1 = True
var2 = module0.i_take_bools(var0, var1)
assert (var2 == 'Bools are equal!') |
def scale_stats_container(sc, num_of_scaling_factors):
scaling_factor = np.random.random(num_of_scaling_factors)
scaled_sc = scale_statistics(sc, scaling_factor)
return (scaled_sc, scaling_factor) |
def test_initialize_from_files_not_lazy():
_pos = 'datasets/ToyFather/train/pos.pl'
_neg = 'datasets/ToyFather/train/neg.pl'
_facts = 'datasets/ToyFather/train/facts.pl'
_db = Database.from_files(pos=_pos, neg=_neg, facts=_facts, lazy_load=False)
assert isinstance(_db.pos, list)
assert isinstance(_db.neg, list)
assert isinstance(_db.facts, list)
assert (_db.pos == open(_pos).read().splitlines())
assert (_db.neg == open(_neg).read().splitlines())
assert (_db.facts == open(_facts).read().splitlines()) |
class CryptoMiniSatEncoder(CNFEncoder):
group_counter = 0
def dimacs_encode_polynomial(self, p):
if ((p.deg() != 1) or (len(p) <= 1)):
res = super().dimacs_encode_polynomial(p)
else:
invert_last = bool(p.has_constant_part())
variables = list(p.vars_as_monomial().variables())
indices = [self.to_dimacs_index(v) for v in variables]
if invert_last:
indices[(- 1)] = (- indices[(- 1)])
indices.append(0)
res = [('x' + ' '.join((str(v) for v in indices)))]
self.group_counter = (self.group_counter + 1)
group_comment = ('\nc g %s %s' % (self.group_counter, str(p)[:30]))
return [(c + group_comment) for c in res]
def dimacs_cnf(self, polynomial_system):
uv = list(used_vars_set(polynomial_system).variables())
res = super().dimacs_cnf(polynomial_system)
res = ((res + '\n') + '\n'.join([('c v %s %s' % (self.to_dimacs_index(v), v)) for v in uv]))
return res |
class SumMeter(Meter):
def __init__(self, round: Optional[int]=None):
self.round = round
self.reset()
def reset(self):
self.sum = 0
def update(self, val):
if (val is not None):
self.sum = (type_as(self.sum, val) + val)
def state_dict(self):
return {'sum': self.sum, 'round': self.round}
def load_state_dict(self, state_dict):
self.sum = state_dict['sum']
self.round = state_dict.get('round', None)
def smoothed_value(self) -> float:
val = self.sum
if ((self.round is not None) and (val is not None)):
val = safe_round(val, self.round)
return val |
def test_receiver_properties():
xyz_1 = np.c_[(0.0, 0.0, 0.0)]
xyz_2 = np.c_[(10.0, 0.0, 0.0)]
times = np.logspace((- 4), (- 2), 3)
rx = dc.receivers.BaseRx(xyz_1)
assert (rx.orientation is None)
rx = dc.receivers.BaseRx(xyz_1, orientation='x')
assert (rx.orientation == 'x')
with pytest.raises(AttributeError):
dc.receivers.Dipole(locations=None)
with pytest.raises(ValueError):
dc.receivers.Dipole(locations=[xyz_1, xyz_2, xyz_1])
with pytest.raises(ValueError):
dc.receivers.Dipole(locations=[xyz_1, np.r_[(xyz_2, xyz_1)]])
with pytest.raises(AttributeError):
sip.receivers.Dipole(times=times, locations=None)
with pytest.raises(ValueError):
sip.receivers.Dipole(times=times, locations=[xyz_1, xyz_2, xyz_1])
with pytest.raises(ValueError):
sip.receivers.Dipole(times=times, locations=[xyz_1, np.r_[(xyz_2, xyz_1)]]) |
def test_contraction_perturbation():
data_augmenter = DataAugmenter(perturbations=[ContractionPerturbation()])
instance: Instance = Instance(id='id0', input=Input(text='She is a doctor, and I am a student'), references=[Reference(Output(text='he is a teacher'), tags=[])])
instances: List[Instance] = data_augmenter.generate([instance], include_original=True)
assert (len(instances) == 2)
assert (instances[1].id == 'id0')
assert (instances[1].perturbation.name == 'contraction')
assert (instances[1].input.text == "She's a doctor, and I'm a student")
assert (instances[1].references[0].output.text == 'he is a teacher') |
class ConsoleFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
super(ConsoleFormatter, self).__init__(fmt=fmt, datefmt=datefmt)
def format(self, record=None):
indent = sys.modules[__name__].global_indent
record.msg = ((' ' * indent) + record.msg)
return super(ConsoleFormatter, self).format(record) |
class BP1SNmat(SpectralMatrix):
def assemble(self, method):
from shenfun.jacobi.recursions import Lmat, half, cn
(test, trial) = (self.testfunction, self.trialfunction)
assert isinstance(test[0], P1)
assert isinstance(trial[0], SN)
N = (test[0].N - 2)
K = trial[0].stencil_matrix()
K.shape = (N, (N + 2))
B2 = Lmat(2, 0, 2, N, (N + 2), (- half), (- half), cn)
if (not test[0].is_scaled()):
k = np.arange(test[0].N)
B2 = (SparseMatrix({0: (k[:(- 2)] + 2)}, (N, N)).diags('csr') * B2)
M = (B2 * K.diags('csr').T)
K.shape = ((N + 2), (N + 2))
d = {(- 2): M.diagonal((- 2)), 0: M.diagonal(0), 2: M.diagonal(2), 4: M.diagonal(4)}
return d |
def move_directory(src_dir, dst_dir):
print('Moving to {}'.format(dst_dir))
os.makedirs(dst_dir, exist_ok=True)
for file_name in os.listdir(src_dir):
os.rename(os.path.join(src_dir, file_name), os.path.join(dst_dir, file_name)) |
def test_schema_consistency():
data_dir = sys.argv[1]
db_name = 'flight_2'
schema_graphs = load_schema_graphs_spider(data_dir, 'spider')
schema = schema_graphs[db_name]
schema.pretty_print()
in_sql = 'SELECT singer.Name FROM concert JOIN singer_in_concert ON singer_in_concert.Singer_ID = singer.Singer_ID WHERE concert.Year = 2014'
in_sql = 'SELECT singer.concert FROM singer WHERE singer.age > (SELECT avg(singer.age) FROM singer)'
in_sql = 'SELECT singer.Name, singer.Country FROM singer INTERSECT SELECT singer.Name, singer.Country, singer.Age FROM singer WHERE singer.Age = "?" ORDER BY singer.Age DESC'
in_sql = 'SELECT COUNT(*) FROM singer'
in_sql = 'SELECT concert.concert_Name, concert.Theme, COUNT(*) FROM concert GROUP BY concert.Theme, concert.Theme'
in_sql = 'SELECT T2.name FROM singer_in_concert AS T1 JOIN singer AS T2 ON T1.singer_id = T2.singer_id JOIN concert AS T3 ON T1.concert_id = T3.concert_id WHERE T3.year = 2014'
in_sql = 'SELECT AIRPORTS.AirportCode FROM AIRPORTS JOIN FLIGHTS ON AIRPORTS.AirportCode = FLIGHTS.DestAirport OR AIRPORTS.AirportCode = FLIGHTS.SourceAirport GROUP BY AIRPORTS.AirportCode ORDER BY COUNT(*) DESC LIMIT 1'
ast = parse(in_sql)
check_schema_consistency(ast, schema, verbose=True) |
class EvalConsumer(Consumer):
def __init__(self, dataset, data_sequencer, support, disk_images=True):
self.dataset = dataset
self.data_sequencer = data_sequencer
self.support = support
self.disk_images = disk_images
super().__init__()
def consume(self, inputs):
if self.disk_images:
input_image = tf.placeholder(tf.string, (self.support,))
else:
input_image = tf.placeholder(tf.float32, ((None, None) + self.dataset.img_shape))
input_states = tf.placeholder(tf.float32, (self.support, self.dataset.time_horizon, self.dataset.state_size))
input_outputs = tf.placeholder(tf.float32, (self.support, self.dataset.time_horizon, self.dataset.action_size))
input_ctr_image = tf.placeholder(tf.float32, ((None, 1) + self.dataset.img_shape))
input_ctr_state = tf.placeholder(tf.float32, (None, 1, self.dataset.state_size))
training = tf.placeholder_with_default(False, None)
(stacked_embnet_images, bs, cs) = ([], [], [])
for i in range(self.support):
(embnet_images, embnet_states, embnet_outputs) = self.data_sequencer.load(input_image[i], input_states[i], input_outputs[i])
embnet_images = utils.preprocess(embnet_images)
stacked_embnet_images.append(embnet_images)
bs.append(embnet_states)
cs.append(embnet_outputs)
embnet_images = tf.stack(stacked_embnet_images)
embnet_images = tf.expand_dims(embnet_images, axis=0)
embnet_states = tf.stack(bs)
embnet_states = tf.expand_dims(embnet_states, axis=0)
embnet_outputs = tf.stack(cs)
embnet_outputs = tf.expand_dims(embnet_outputs, axis=0)
embnet_images.set_shape(((None, None, self.data_sequencer.frames) + self.dataset.img_shape))
embnet_states.set_shape((None, None, self.data_sequencer.frames, self.dataset.state_size))
embnet_outputs.set_shape((None, None, self.data_sequencer.frames, self.dataset.action_size))
return {'embnet_images': embnet_images, 'embnet_states': embnet_states, 'embnet_outputs': embnet_outputs, 'input_image_files': input_image, 'input_states': input_states, 'input_outputs': input_outputs, 'ctrnet_images': input_ctr_image, 'ctrnet_states': input_ctr_state, 'training': training, 'support': tf.placeholder_with_default(self.support, None), 'query': tf.placeholder_with_default(0, None)} |
def get_random_predictions(reference_file, preds_per_sent=3, do_eval=False):
ref_df = pd.read_csv(reference_file)
if do_eval:
ce_metric = load_metric('seqeval')
sig_metric = load_metric('seqeval')
refs = [get_BIO_all(i) for i in ref_df['text_w_pairs']]
else:
refs = [get_BIO_all(i) for i in ref_df['text']]
pred_list = []
for (i, ref) in enumerate(refs):
(tokens, ce_ref, sig_ref) = ref
p = []
for _ in range(preds_per_sent):
ce_pred = get_random_ce_pred(ce_ref, verbose=False)
sig_pred = get_random_sig_pred(sig_ref, verbose=False)
p.append(get_text_w_pairs(tokens, ce_pred, sig_pred))
pred_list.append({'index': i, 'prediction': p})
if do_eval:
ce_metric.add(prediction=ce_pred, reference=ce_ref)
sig_metric.add(prediction=sig_pred, reference=sig_ref)
if do_eval:
preds = [get_BIO_all(i['prediction']) for i in pred_list]
grouped_df = ref_df.copy()
grouped_df['id'] = [[i] for i in grouped_df.index]
grouped_df = grouped_df.groupby(['corpus', 'doc_id', 'sent_id'])[['eg_id', 'id']].agg({'eg_id': 'count', 'id': 'sum'}).reset_index()
grouped_df = grouped_df[(grouped_df['eg_id'] > 1)]
req_combi_ids = [item for sublist in grouped_df['id'] for item in sublist]
regular_ids = list((set(range(len(preds))) - set(req_combi_ids)))
ce_metric = load_metric('seqeval')
sig_metric = load_metric('seqeval')
for i in regular_ids:
(_, ce_ref, sig_ref) = refs[i]
(_, ce_pred, sig_pred) = preds[i]
ce_metric.add(prediction=ce_pred, reference=ce_ref)
sig_metric.add(prediction=sig_pred, reference=sig_ref)
final_result = format_results(ce_metric, sig_metric)
for (_, row) in grouped_df.iterrows():
best_results = keep_best_combinations_only(row, refs, preds)
final_result = combine_dicts(final_result, best_results)
print(final_result)
save_file_path = 'outs/submission_random_st2.json'
with open(save_file_path, 'w') as fp:
fp.write('\n'.join((json.dumps(i) for i in pred_list))) |
def collect_dataframes(run_id_to_filename_dictionary):
loaded_dataframes = {}
for (k, v) in run_id_to_filename_dictionary.items():
loaded_dataframes[k] = pd.read_csv(v)
return loaded_dataframes |
def get_auto_soundness_ret_types_offsets_and_casts(func: LeanFunctionInfo, lean_info: LeanProgramInfo, cast_end_separator: str='') -> List[Tuple[(CairoType, int, str)]]:
end_offset = 0
explicit_offsets_etc = lean_info.struct_defs.get_offsets_and_casts_by_types(func.func_scope, func.ret_arg_types, end_offset, lean_info.open_namespaces, cast_end_separator)
if (len(explicit_offsets_etc) > 0):
(_, end_offset, _) = explicit_offsets_etc[0]
implicit_offsets_etc = lean_info.struct_defs.get_arg_types_offsets_and_casts(func.func_scope, func.implicit_args_struct, end_offset, lean_info.open_namespaces, cast_end_separator)
return (list(implicit_offsets_etc.values()) + explicit_offsets_etc) |
def test_superb_sid():
with tempfile.TemporaryDirectory() as tempdir:
with pseudo_audio([10, 2, 1, 8, 5]) as (wav_paths, num_samples):
class TestSID(SuperbSID):
def default_config(self) -> dict:
config = super().default_config()
config['prepare_data'] = {}
return config
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False):
ids = [Path(path).stem for path in wav_paths]
label = ['a', 'b', 'a', 'c', 'd']
start_secs = [0.0, 0.1, 0.2, None, 0.0]
end_secs = [5.2, 1.0, 0.3, None, 4.9]
df = pd.DataFrame(data={'id': ids, 'wav_path': wav_paths, 'label': label, 'start_sec': start_secs, 'end_sec': end_secs})
train_csv = (target_dir / 'train.csv')
valid_csv = (target_dir / 'valid.csv')
test_csv = (target_dir / 'test.csv')
df.to_csv(train_csv)
df.to_csv(valid_csv)
df.to_csv(test_csv)
return (train_csv, valid_csv, [test_csv])
problem = TestSID()
config = problem.default_config()
config['target_dir'] = tempdir
config['device'] = 'cpu'
config['train']['total_steps'] = 4
config['train']['log_step'] = 1
config['train']['eval_step'] = 2
config['train']['save_step'] = 2
config['eval_batch'] = 2
config['build_upstream']['name'] = 'fbank'
problem.run(**config) |
def test_from_cupy():
cupy_array_1d = cp.arange(10)
cupy_array_2d = cp.array([[1.1, 2.2], [3.3, 4.4], [5.5, 6.6], [7.7, 8.8]])
ak_cupy_array_1d = ak.from_cupy(cupy_array_1d)
ak_cupy_array_2d = ak.from_cupy(cupy_array_2d)
for i in range(10):
assert (ak_cupy_array_1d[i] == cupy_array_1d[i])
for i in range(4):
for j in range(2):
assert (ak_cupy_array_2d[i][j] == cupy_array_2d[i][j]) |
def get_identifier(s):
if ('identifier=' == s[:11]):
return ('SimpleName_' + s[11:])
else:
return None |
def integrate(expression, v=None, a=None, b=None, algorithm=None, hold=False):
(expression, v, a, b) = _normalize_integral_input(expression, v, a, b)
if (algorithm is not None):
integrator = available_integrators.get(algorithm)
if (not integrator):
raise ValueError(('Unknown algorithm: %s' % algorithm))
return integrator(expression, v, a, b)
if (a is None):
return indefinite_integral(expression, v, hold=hold)
else:
return definite_integral(expression, v, a, b, hold=hold) |
def train(args, model, tokenizer, ngram_dict, processor, label_list):
train_dataset = load_examples(args, tokenizer, ngram_dict, processor, label_list, mode='train')
if args.fp16:
model.half()
if (args.local_rank != (- 1)):
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError('Please install apex from to use distributed and fp16 training.')
model = DDP(model)
elif (args.n_gpu > 1):
model = torch.nn.DataParallel(model)
num_train_optimization_steps = (int(((len(train_dataset) / args.train_batch_size) / args.gradient_accumulation_steps)) * args.num_train_epochs)
if (args.local_rank != (- 1)):
num_train_optimization_steps = (num_train_optimization_steps // torch.distributed.get_world_size())
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError('Please install apex from to use distributed and fp16 training.')
optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0)
if (args.loss_scale == 0):
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion, t_total=num_train_optimization_steps)
else:
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=num_train_optimization_steps)
global_step = 0
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Batch size = %d', args.train_batch_size)
logger.info(' Num steps = %d', num_train_optimization_steps)
if (args.local_rank == (- 1)):
train_sampler = RandomSampler(train_dataset)
else:
train_sampler = DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
best_f1 = (- 1)
best_epoch = (- 1)
epoch_after_best_one = 3
for epoch_num in trange(int(args.num_train_epochs), desc='Epoch'):
model.train()
tr_loss = 0
(nb_tr_examples, nb_tr_steps) = (0, 0)
for (step, batch) in enumerate(tqdm(train_dataloader, desc='Iteration')):
batch = tuple((t.to(args.device) for t in batch))
(input_ids, input_mask, segment_ids, label_ids, ngram_ids, ngram_positions, ngram_lengths, ngram_seg_ids, ngram_masks, valid_ids, l_mask) = batch
loss = model(input_ids, token_type_ids=None, attention_mask=None, labels=label_ids, valid_ids=valid_ids, attention_mask_label=None, ngram_ids=ngram_ids, ngram_positions=ngram_positions)
if (args.n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (((step + 1) % args.gradient_accumulation_steps) == 0):
if args.fp16:
lr_this_step = (args.learning_rate * warmup_linear((global_step / num_train_optimization_steps), args.warmup_proportion))
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if ((args.local_rank in [(- 1), 0]) and (args.save_steps > 0) and ((global_step % args.save_steps) == 0)):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
save_zen_model(output_dir, model, tokenizer, ngram_dict, args) |
def kmax_pooling(inputs, dim, k):
indices = inputs.topk(k, dim=dim)[1].sort(dim=dim)[0]
return inputs.gather(dim, indices) |
class MnistShardDescriptor(ShardDescriptor):
def __init__(self, rank_worldsize: str='1, 1', **kwargs) -> None:
(self.rank, self.worldsize) = tuple((int(num) for num in rank_worldsize.split(',')))
((x_train, y_train), (x_val, y_val)) = self.download_data()
self.data_by_type = {'train': (x_train, y_train), 'val': (x_val, y_val)}
def get_shard_dataset_types(self) -> List[Dict[(str, Any)]]:
return list(self.data_by_type)
def get_dataset(self, dataset_type: str='train') -> MnistShardDataset:
if (dataset_type not in self.data_by_type):
raise Exception(f'Wrong dataset type: {dataset_type}')
return MnistShardDataset(*self.data_by_type[dataset_type], data_type=dataset_type, rank=self.rank, worldsize=self.worldsize)
def sample_shape(self) -> List[str]:
return ['28', '28']
def target_shape(self) -> List[str]:
return ['1']
def dataset_description(self) -> str:
return f'Mnist dataset, shard number {self.rank} out of {self.worldsize}'
def download_data(self) -> Tuple[(Tuple[(Any, Any)], Tuple[(Any, Any)])]:
(train_data, val_data) = (datasets.MNIST('data', train=train, download=True) for train in (True, False))
(x_train, y_train) = (train_data.train_data, train_data.train_labels)
(x_val, y_val) = (val_data.test_data, val_data.test_labels)
print('Mnist data was loaded!')
return ((x_train, y_train), (x_val, y_val)) |
def register_Ns3LteEnbComponentCarrierManager_methods(root_module, cls):
cls.add_constructor([param('ns3::LteEnbComponentCarrierManager const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetLteCcmMacSapUser', 'ns3::LteCcmMacSapUser *', [], is_virtual=True)
cls.add_method('GetLteCcmRrcSapProvider', 'ns3::LteCcmRrcSapProvider *', [], is_virtual=True)
cls.add_method('GetLteMacSapProvider', 'ns3::LteMacSapProvider *', [], is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetCcmMacSapProviders', 'bool', [param('uint8_t', 'componentCarrierId'), param('ns3::LteCcmMacSapProvider *', 'sap')], is_virtual=True)
cls.add_method('SetLteCcmRrcSapUser', 'void', [param('ns3::LteCcmRrcSapUser *', 's')], is_virtual=True)
cls.add_method('SetMacSapProvider', 'bool', [param('uint8_t', 'componentCarrierId'), param('ns3::LteMacSapProvider *', 'sap')], is_virtual=True)
cls.add_method('SetNumberOfComponentCarriers', 'void', [param('uint16_t', 'noOfComponentCarriers')], is_virtual=True)
cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True)
cls.add_method('DoReportUeMeas', 'void', [param('uint16_t', 'rnti'), param('ns3::LteRrcSap::MeasResults', 'measResults')], is_pure_virtual=True, visibility='protected', is_virtual=True)
return |
class Attention(nn.Module):
def __init__(self, dim, head, sr_ratio=1):
super().__init__()
self.head = head
self.sr_ratio = sr_ratio
self.scale = ((dim // head) ** (- 0.5))
self.q = nn.Linear(dim, dim, bias=True)
self.kv = nn.Linear(dim, (dim * 2), bias=True)
self.proj = nn.Linear(dim, dim)
if (sr_ratio > 1):
self.sr = nn.Conv2d(dim, dim, (sr_ratio + 1), sr_ratio, (sr_ratio // 2), groups=dim)
self.sr_norm = nn.LayerNorm(dim)
self.apply_transform = (head > 1)
if self.apply_transform:
self.transform_conv = nn.Conv2d(head, head, 1, 1)
self.transform_norm = nn.InstanceNorm2d(head)
def forward(self, x: Tensor, H, W) -> Tensor:
(B, N, C) = x.shape
q = self.q(x).reshape(B, N, self.head, (C // self.head)).permute(0, 2, 1, 3)
if (self.sr_ratio > 1):
x = x.permute(0, 2, 1).reshape(B, C, H, W)
x = self.sr(x).reshape(B, C, (- 1)).permute(0, 2, 1)
x = self.sr_norm(x)
(k, v) = self.kv(x).reshape(B, (- 1), 2, self.head, (C // self.head)).permute(2, 0, 3, 1, 4)
attn = ((q k.transpose((- 2), (- 1))) * self.scale)
if self.apply_transform:
attn = self.transform_conv(attn)
attn = attn.softmax(dim=(- 1))
attn = self.transform_norm(attn)
else:
attn = attn.softmax(dim=(- 1))
x = (attn v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
return x |
def _GetPrincipleQuantumNumber(atNum):
if (atNum <= 2):
return 1
elif (atNum <= 10):
return 2
elif (atNum <= 18):
return 3
elif (atNum <= 36):
return 4
elif (atNum <= 54):
return 5
elif (atNum <= 86):
return 6
else:
return 7 |
class data_reader():
def __init__(self, train_test_files, use_columns, output_file_name):
if (not os.path.exists(output_file_name)):
(self.data, self.idToLabel) = self.readPamap2(train_test_files, use_columns)
self.save_data(output_file_name)
def save_data(self, output_file_name):
f = h5py.File(output_file_name)
for key in self.data:
f.create_group(key)
for field in self.data[key]:
f[key].create_dataset(field, data=self.data[key][field])
f.close()
def train(self):
return self.data['train']
def test(self):
return self.data['test']
def readPamap2(self, train_test_files, use_columns):
files = train_test_files
label_map = [(1, 'lying'), (2, 'sitting'), (3, 'standing'), (4, 'walking'), (5, 'running'), (6, 'cycling'), (7, 'Nordic walking'), (9, 'watching TV'), (10, 'computer work'), (11, 'car driving'), (12, 'ascending stairs'), (13, 'descending stairs'), (16, 'vacuum cleaning'), (17, 'ironing'), (18, 'folding laundry'), (19, 'house cleaning'), (20, 'playing soccer'), (24, 'rope jumping')]
labelToId = {str(x[0]): i for (i, x) in enumerate(label_map)}
idToLabel = [x[1] for x in label_map]
cols = use_columns
data = {dataset: self.readPamap2Files(files[dataset], cols, labelToId) for dataset in ('train', 'test', 'validation')}
return (data, idToLabel)
def readPamap2Files(self, filelist, cols, labelToId):
data = []
labels = []
for (i, filename) in enumerate(filelist):
with open(('data/raw/pamap2/PAMAP2_Dataset/Protocol/%s' % filename), 'r') as f:
reader = csv.reader(f, delimiter=' ')
for line in reader:
elem = []
if (line[1] == '0'):
continue
for ind in cols:
elem.append(line[ind])
if (sum([(x == 'NaN') for x in elem]) < 9):
data.append([(float(x) / 1000) for x in elem[:(- 1)]])
labels.append(labelToId[elem[0]])
return {'inputs': np.asarray(data), 'targets': (np.asarray(labels, dtype=int) + 1)} |
class TransformerBaseline(nn.Module):
def init_weights(layer):
if (type(layer) == nn.Linear):
nn.init.xavier_normal_(layer.weight)
def __init__(self, config):
super(TransformerBaseline, self).__init__()
self.config = config
self.transformer_post = Transformer.Transformer(self.config, self.config.n_mha_layers, self.config.d_model, self.config.n_head)
self.emb_layer_query = nn.ModuleList([nn.Linear(self.config.emb_dim, self.config.emb_dim) for _ in range(self.config.num_emb_layers)])
self.emb_layer_val = nn.ModuleList([nn.Linear(self.config.emb_dim, self.config.emb_dim) for _ in range(self.config.num_emb_layers)])
self.emb_layer_key = nn.ModuleList([nn.Linear(self.config.emb_dim, self.config.emb_dim) for _ in range(self.config.num_emb_layers)])
self.layer_norm = nn.LayerNorm(normalized_shape=self.config.emb_dim)
self.final_layer = nn.Sequential(nn.Linear(self.config.d_model, self.config.num_classes), nn.LogSoftmax(dim=1))
self.emb_layer_query.apply(Transformer.init_weights)
self.emb_layer_val.apply(Transformer.init_weights)
self.emb_layer_key.apply(Transformer.init_weights)
self.final_layer.apply(Transformer.init_weights)
def forward(self, X, time_delay):
(batch_size, num_posts, num_words, emb_dim) = X.shape
X = X.view((- 1), num_words, emb_dim)
X = X.permute(0, 2, 1).contiguous()
X = F.adaptive_max_pool1d(X, 1).squeeze((- 1))
X = X.view(batch_size, num_tweets, (- 1))
query = X
key = X
val = X
for i in range(self.config.num_emb_layers):
query = self.layer_norm(self.emb_layer_query[i](query))
key = self.layer_norm(self.emb_layer_key[i](key))
val = self.layer_norm(self.emb_layer_val[i](val))
query = (query + time_delay)
key = (key + time_delay)
val = (val + time_delay)
(self_atten_output, self_atten_weights_dict) = self.transformer_post(query, key, val)
self_atten_output = self_atten_output.permute(0, 2, 1).contiguous()
self_atten_output = F.adaptive_max_pool1d(self_atten_output, 1).squeeze((- 1))
output = self.final_layer(self_atten_output)
return (output, self_atten_weights_dict)
def __repr__(self):
return str(vars(self)) |
def get_mb_mpo_agent(dim_state, dim_action, params, reward_model, transformations, action_scale, input_transform=None, termination_model=None, initial_distribution=None):
dynamical_model = _get_model(dim_state, dim_action, params, input_transform, transformations)
model_optimizer = optim.Adam(dynamical_model.parameters(), lr=params.model_opt_lr, weight_decay=params.model_opt_weight_decay)
value_function = _get_value_function(dim_state, params, input_transform)
policy = _get_nn_policy(dim_state, dim_action, params, action_scale=action_scale, input_transform=input_transform)
assert (policy.nn.hidden_layers[0].in_features == value_function.nn.hidden_layers[0].in_features)
zero_bias(dynamical_model)
init_head_weight(dynamical_model)
optimizer = optim.Adam(chain(policy.parameters(), value_function.parameters()), lr=params.mpo_opt_lr, weight_decay=params.mpo_opt_weight_decay)
model_name = dynamical_model.base_model.name
comment = f'{model_name} {params.exploration.capitalize()} {params.action_cost}'
agent = MBMPOAgent(policy=policy, value_function=value_function, reward_model=reward_model, dynamical_model=dynamical_model, model_optimizer=model_optimizer, model_learn_num_iter=params.model_learn_num_iter, model_learn_batch_size=params.model_learn_batch_size, bootstrap=(not params.not_bootstrap), optimizer=optimizer, termination_model=termination_model, plan_horizon=params.plan_horizon, plan_samples=params.plan_samples, plan_elites=params.plan_elites, mpo_value_learning_criterion=nn.MSELoss, mpo_epsilon=params.mpo_epsilon, mpo_epsilon_mean=params.mpo_epsilon_mean, mpo_epsilon_var=params.mpo_epsilon_var, mpo_regularization=params.mpo_regularization, mpo_num_action_samples=params.mpo_num_action_samples, mpo_num_iter=params.mpo_num_iter, mpo_gradient_steps=params.mpo_gradient_steps, mpo_batch_size=params.mpo_batch_size, mpo_target_update_frequency=params.mpo_target_update_frequency, sim_num_steps=params.sim_num_steps, sim_initial_states_num_trajectories=params.sim_initial_states_num_trajectories, sim_initial_dist_num_trajectories=params.sim_initial_dist_num_trajectories, sim_memory_num_trajectories=params.sim_memory_num_trajectories, sim_num_subsample=params.sim_num_subsample, sim_max_memory=params.sim_max_memory, sim_refresh_interval=params.sim_refresh_interval, thompson_sampling=(params.exploration == 'thompson'), initial_distribution=initial_distribution, max_memory=params.max_memory, gamma=params.gamma, comment=comment)
return agent |
def ResNet34(num_classes=10):
return ResNet(BasicBlock, layers=[3, 4, 6, 3], filters=[64, 128, 256, 512], num_classes=num_classes) |
class DatasetEvaluator():
def reset(self):
pass
def preprocess_inputs(self, inputs):
pass
def process(self, inputs, outputs):
pass
def evaluate(self):
pass |
def AztecDiamondGraph(n):
from sage.graphs.generators.basic import Grid2dGraph
if n:
N = (2 * n)
G = Grid2dGraph(N, N)
H = G.subgraph([(i, j) for i in range(N) for j in range(N) if (((i - n) <= j <= (n + i)) and (((n - 1) - i) <= j <= (((3 * n) - i) - 1)))])
else:
H = Graph()
H.rename('Aztec Diamond graph of order {}'.format(n))
return H |
class GroupOps(object):
def identity():
_res = ([0.0] * 4)
_res[0] = 0
_res[1] = 0
_res[2] = 0
_res[3] = 1
return sym.Unit3.from_storage(_res)
def inverse(a):
_a = a.data
_res = ([0.0] * 4)
_res[0] = (- _a[0])
_res[1] = (- _a[1])
_res[2] = (- _a[2])
_res[3] = _a[3]
return sym.Unit3.from_storage(_res)
def compose(a, b):
_a = a.data
_b = b.data
_res = ([0.0] * 4)
_res[0] = ((((_a[0] * _b[3]) + (_a[1] * _b[2])) - (_a[2] * _b[1])) + (_a[3] * _b[0]))
_res[1] = (((((- _a[0]) * _b[2]) + (_a[1] * _b[3])) + (_a[2] * _b[0])) + (_a[3] * _b[1]))
_res[2] = ((((_a[0] * _b[1]) - (_a[1] * _b[0])) + (_a[2] * _b[3])) + (_a[3] * _b[2]))
_res[3] = (((((- _a[0]) * _b[0]) - (_a[1] * _b[1])) - (_a[2] * _b[2])) + (_a[3] * _b[3]))
return sym.Unit3.from_storage(_res)
def between(a, b):
_a = a.data
_b = b.data
_res = ([0.0] * 4)
_res[0] = (((((- _a[0]) * _b[3]) - (_a[1] * _b[2])) + (_a[2] * _b[1])) + (_a[3] * _b[0]))
_res[1] = ((((_a[0] * _b[2]) - (_a[1] * _b[3])) - (_a[2] * _b[0])) + (_a[3] * _b[1]))
_res[2] = (((((- _a[0]) * _b[1]) + (_a[1] * _b[0])) - (_a[2] * _b[3])) + (_a[3] * _b[2]))
_res[3] = ((((_a[0] * _b[0]) + (_a[1] * _b[1])) + (_a[2] * _b[2])) + (_a[3] * _b[3]))
return sym.Unit3.from_storage(_res)
def inverse_with_jacobian(a):
_a = a.data
_tmp0 = (_a[0] ** 2)
_tmp1 = (_a[1] ** 2)
_tmp2 = ((_a[2] ** 2) - (_a[3] ** 2))
_tmp3 = ((2 * _a[2]) * _a[3])
_tmp4 = ((2 * _a[0]) * _a[1])
_res = ([0.0] * 4)
_res[0] = (- _a[0])
_res[1] = (- _a[1])
_res[2] = (- _a[2])
_res[3] = _a[3]
_res_D_a = numpy.zeros((2, 2))
_res_D_a[(0, 0)] = ((_tmp0 - _tmp1) + _tmp2)
_res_D_a[(1, 0)] = ((- _tmp3) + _tmp4)
_res_D_a[(0, 1)] = (_tmp3 + _tmp4)
_res_D_a[(1, 1)] = (((- _tmp0) + _tmp1) + _tmp2)
return (sym.Unit3.from_storage(_res), _res_D_a)
def compose_with_jacobians(a, b):
_a = a.data
_b = b.data
_tmp0 = (_a[3] * _b[0])
_tmp1 = (_a[2] * _b[1])
_tmp2 = (_a[0] * _b[3])
_tmp3 = (_a[1] * _b[2])
_tmp4 = (((_tmp0 - _tmp1) + _tmp2) + _tmp3)
_tmp5 = (_a[3] * _b[1])
_tmp6 = (_a[2] * _b[0])
_tmp7 = (_a[0] * _b[2])
_tmp8 = (_a[1] * _b[3])
_tmp9 = (((_tmp5 + _tmp6) - _tmp7) + _tmp8)
_tmp10 = (_a[3] * _b[2])
_tmp11 = (_a[2] * _b[3])
_tmp12 = (_a[0] * _b[1])
_tmp13 = (_a[1] * _b[0])
_tmp14 = (((_tmp10 + _tmp11) + _tmp12) - _tmp13)
_tmp15 = (_a[3] * _b[3])
_tmp16 = (_a[2] * _b[2])
_tmp17 = (_a[0] * _b[0])
_tmp18 = (_a[1] * _b[1])
_tmp19 = (((_tmp15 - _tmp16) - _tmp17) - _tmp18)
_tmp20 = ((1.0 / 2.0) * _tmp10)
_tmp21 = (((- 1.0) / 2.0) * _tmp11)
_tmp22 = ((1.0 / 2.0) * _tmp12)
_tmp23 = (- _tmp22)
_tmp24 = ((1.0 / 2.0) * _tmp13)
_tmp25 = (((_tmp20 + _tmp21) + _tmp23) - _tmp24)
_tmp26 = (2 * _tmp14)
_tmp27 = ((1.0 / 2.0) * _tmp0)
_tmp28 = ((1.0 / 2.0) * _tmp1)
_tmp29 = (- _tmp28)
_tmp30 = ((1.0 / 2.0) * _tmp2)
_tmp31 = ((1.0 / 2.0) * _tmp3)
_tmp32 = (- _tmp31)
_tmp33 = ((((- _tmp27) + _tmp29) + _tmp30) + _tmp32)
_tmp34 = (2 * _tmp4)
_tmp35 = ((1.0 / 2.0) * _tmp6)
_tmp36 = ((1.0 / 2.0) * _tmp7)
_tmp37 = (_tmp35 - _tmp36)
_tmp38 = ((1.0 / 2.0) * _tmp5)
_tmp39 = (- _tmp38)
_tmp40 = ((1.0 / 2.0) * _tmp8)
_tmp41 = (_tmp39 - _tmp40)
_tmp42 = (_tmp37 + _tmp41)
_tmp43 = (2 * _tmp9)
_tmp44 = ((1.0 / 2.0) * _tmp16)
_tmp45 = ((1.0 / 2.0) * _tmp17)
_tmp46 = (_tmp44 + _tmp45)
_tmp47 = ((1.0 / 2.0) * _tmp15)
_tmp48 = ((1.0 / 2.0) * _tmp18)
_tmp49 = (- _tmp48)
_tmp50 = (_tmp47 + _tmp49)
_tmp51 = (_tmp46 + _tmp50)
_tmp52 = (2 * _tmp19)
_tmp53 = (- _tmp47)
_tmp54 = (- _tmp44)
_tmp55 = (((_tmp45 + _tmp49) + _tmp53) + _tmp54)
_tmp56 = (((_tmp35 + _tmp36) + _tmp39) + _tmp40)
_tmp57 = (_tmp27 + _tmp30)
_tmp58 = ((_tmp28 + _tmp32) + _tmp57)
_tmp59 = (_tmp21 + _tmp24)
_tmp60 = ((_tmp20 + _tmp22) + _tmp59)
_tmp61 = (((- _tmp35) + _tmp36) + _tmp41)
_tmp62 = (((- _tmp45) + _tmp50) + _tmp54)
_tmp63 = (((- _tmp20) + _tmp23) + _tmp59)
_tmp64 = ((_tmp29 + _tmp31) + _tmp57)
_tmp65 = (((- _tmp26) * _tmp63) + (_tmp34 * _tmp64))
_tmp66 = (_tmp43 * _tmp64)
_tmp67 = (_tmp52 * _tmp63)
_tmp68 = (((2 * _tmp46) + (2 * _tmp48)) + (2 * _tmp53))
_tmp69 = ((_tmp37 + _tmp38) + _tmp40)
_res = ([0.0] * 4)
_res[0] = _tmp4
_res[1] = _tmp9
_res[2] = _tmp14
_res[3] = _tmp19
_res_D_a = numpy.zeros((2, 2))
_res_D_a[(0, 0)] = (((((- _tmp25) * _tmp26) + (_tmp33 * _tmp34)) - (_tmp42 * _tmp43)) + (_tmp51 * _tmp52))
_res_D_a[(1, 0)] = (((((- _tmp25) * _tmp52) - (_tmp26 * _tmp51)) + (_tmp33 * _tmp43)) + (_tmp34 * _tmp42))
_res_D_a[(0, 1)] = (((((- _tmp26) * _tmp55) + (_tmp34 * _tmp56)) - (_tmp43 * _tmp58)) + (_tmp52 * _tmp60))
_res_D_a[(1, 1)] = (((((- _tmp26) * _tmp60) + (_tmp34 * _tmp58)) + (_tmp43 * _tmp56)) - (_tmp52 * _tmp55))
_res_D_b = numpy.zeros((2, 2))
_res_D_b[(0, 0)] = ((((- _tmp43) * _tmp61) + (_tmp52 * _tmp62)) + _tmp65)
_res_D_b[(1, 0)] = (((((- _tmp26) * _tmp62) + (_tmp34 * _tmp61)) + _tmp66) - _tmp67)
_res_D_b[(0, 1)] = (((((- _tmp14) * _tmp68) + (_tmp34 * _tmp69)) - _tmp66) + _tmp67)
_res_D_b[(1, 1)] = ((((- _tmp19) * _tmp68) + (_tmp43 * _tmp69)) + _tmp65)
return (sym.Unit3.from_storage(_res), _res_D_a, _res_D_b)
def between_with_jacobians(a, b):
_a = a.data
_b = b.data
_tmp0 = (_a[3] * _b[0])
_tmp1 = (_a[2] * _b[1])
_tmp2 = (_a[0] * _b[3])
_tmp3 = (_a[1] * _b[2])
_tmp4 = (((_tmp0 + _tmp1) - _tmp2) - _tmp3)
_tmp5 = (_a[3] * _b[1])
_tmp6 = (_a[2] * _b[0])
_tmp7 = (_a[0] * _b[2])
_tmp8 = (_a[1] * _b[3])
_tmp9 = (((_tmp5 - _tmp6) + _tmp7) - _tmp8)
_tmp10 = (_a[3] * _b[2])
_tmp11 = (_a[2] * _b[3])
_tmp12 = (_a[0] * _b[1])
_tmp13 = (_a[1] * _b[0])
_tmp14 = (((_tmp10 - _tmp11) - _tmp12) + _tmp13)
_tmp15 = (_a[3] * _b[3])
_tmp16 = (_a[2] * _b[2])
_tmp17 = (_a[0] * _b[0])
_tmp18 = (_a[1] * _b[1])
_tmp19 = (((_tmp15 + _tmp16) + _tmp17) + _tmp18)
_tmp20 = ((1.0 / 2.0) * _tmp5)
_tmp21 = ((1.0 / 2.0) * _tmp6)
_tmp22 = ((1.0 / 2.0) * _tmp7)
_tmp23 = ((1.0 / 2.0) * _tmp8)
_tmp24 = (((_tmp20 - _tmp21) + _tmp22) - _tmp23)
_tmp25 = (2 * _tmp9)
_tmp26 = (_tmp24 * _tmp25)
_tmp27 = ((1.0 / 2.0) * _tmp15)
_tmp28 = ((1.0 / 2.0) * _tmp16)
_tmp29 = ((1.0 / 2.0) * _tmp17)
_tmp30 = ((1.0 / 2.0) * _tmp18)
_tmp31 = ((((- _tmp27) - _tmp28) - _tmp29) - _tmp30)
_tmp32 = (2 * _tmp19)
_tmp33 = (_tmp31 * _tmp32)
_tmp34 = ((((((- 1.0) / 2.0) * _tmp10) + ((1.0 / 2.0) * _tmp11)) + ((1.0 / 2.0) * _tmp12)) - ((1.0 / 2.0) * _tmp13))
_tmp35 = (2 * _tmp14)
_tmp36 = ((- _tmp34) * _tmp35)
_tmp37 = ((1.0 / 2.0) * _tmp0)
_tmp38 = ((1.0 / 2.0) * _tmp1)
_tmp39 = ((1.0 / 2.0) * _tmp2)
_tmp40 = ((1.0 / 2.0) * _tmp3)
_tmp41 = (((_tmp37 + _tmp38) - _tmp39) - _tmp40)
_tmp42 = (2 * _tmp4)
_tmp43 = (_tmp36 + (_tmp41 * _tmp42))
_tmp44 = (_tmp24 * _tmp42)
_tmp45 = ((- _tmp31) * _tmp35)
_tmp46 = (_tmp25 * _tmp41)
_tmp47 = (_tmp32 * _tmp34)
_tmp48 = (_tmp46 - _tmp47)
_tmp49 = (((_tmp27 + _tmp28) + _tmp29) + _tmp30)
_tmp50 = ((- _tmp35) * _tmp49)
_tmp51 = ((((- _tmp37) - _tmp38) + _tmp39) + _tmp40)
_tmp52 = (_tmp44 + _tmp47)
_tmp53 = (_tmp32 * _tmp49)
_tmp54 = ((((- _tmp20) + _tmp21) - _tmp22) + _tmp23)
_res = ([0.0] * 4)
_res[0] = _tmp4
_res[1] = _tmp9
_res[2] = _tmp14
_res[3] = _tmp19
_res_D_a = numpy.zeros((2, 2))
_res_D_a[(0, 0)] = (((- _tmp26) + _tmp33) + _tmp43)
_res_D_a[(1, 0)] = ((_tmp44 + _tmp45) + _tmp48)
_res_D_a[(0, 1)] = ((((- _tmp25) * _tmp51) + _tmp50) + _tmp52)
_res_D_a[(1, 1)] = (((_tmp26 + _tmp36) + (_tmp42 * _tmp51)) - _tmp53)
_res_D_b = numpy.zeros((2, 2))
_res_D_b[(0, 0)] = ((((- _tmp25) * _tmp54) + _tmp43) + _tmp53)
_res_D_b[(1, 0)] = (((_tmp42 * _tmp54) + _tmp48) + _tmp50)
_res_D_b[(0, 1)] = ((_tmp45 - _tmp46) + _tmp52)
_res_D_b[(1, 1)] = ((_tmp26 - _tmp33) + _tmp43)
return (sym.Unit3.from_storage(_res), _res_D_a, _res_D_b) |
def main():
if (sys.argv[1] == 'delex'):
print('MultiWoz Create delexicalized dialogues. Get yourself a coffee, this might take a while.')
if (not os.path.isfile(os.path.join(DATA_DIR, 'multi-woz/delex.json'))):
data = createDelexData()
else:
data = json.load(open(os.path.join(DATA_DIR, 'multi-woz/delex.json')))
elif (sys.argv[1] == 'lexical'):
print('MultiWoz Create lexicalized dialogues. Get yourself a coffee, this might take a while.')
if (not os.path.isfile(os.path.join(DATA_DIR, 'multi-woz/lex.json'))):
data = createLexicalData()
else:
data = json.load(open(os.path.join(DATA_DIR, 'multi-woz/lex.json')))
else:
raise TypeError('unknown preprocessing')
print('Divide dialogues for separate bits - usr, sys, db, bs')
(word_freqs_usr, word_freqs_sys, word_freqs_history) = divideData(data, lexicalize=(str(sys.argv[1]) == 'lexical'))
print('Building dictionaries')
buildDictionaries(word_freqs_usr, word_freqs_sys, word_freqs_history, lexicalize=(str(sys.argv[1]) == 'lexical')) |
def _get_word_cluster_features(query_tokens, clusters_name, resources):
if (not clusters_name):
return []
ngrams = get_all_ngrams(query_tokens)
cluster_features = []
for ngram in ngrams:
cluster = get_word_cluster(resources, clusters_name).get(ngram[NGRAM].lower(), None)
if (cluster is not None):
cluster_features.append(cluster)
return cluster_features |
def get_module_type(graph):
if (not graph.is_connected()):
return NodeType.PARALLEL
elif graph.complement().is_connected():
return NodeType.PRIME
return NodeType.SERIES |
def ref_pow2_quantize(x, sign, with_zero, n, m, quantize, ste_fine_grained):
assert (n > 0)
n_ = ((n - 1) if sign else n)
n_ = ((n_ - 1) if with_zero else n_)
ref_p_max = (2 ** m)
ref_p_min = (2 ** (m - ((1 << n_) - 1)))
ref_pruning_threshold = (ref_p_min * (2.0 ** (- 0.5)))
if quantize:
x_q = (2.0 ** np.round(np.log2(np.abs(x))))
x_q[np.where((x_q > ref_p_max))] = ref_p_max
if with_zero:
x_q_ = x_q.copy()
x_q[np.where((x_q_ < ref_p_min))] = ref_p_min
x_q[np.where((np.abs(x) < ref_pruning_threshold))] = 0.0
if (not with_zero):
x_q[np.where((x_q < ref_p_min))] = ref_p_min
if sign:
x_q = (np.sign(x) * x_q)
elif with_zero:
x_q[np.where((np.sign(x) < 0.0))] = 0.0
else:
x_q[np.where((np.sign(x) < 0.0))] = ref_p_min
else:
x_q = x
return x_q |
def changeEgoInTwoStar(G, A, i):
return (((G.indegree(i) * (G.indegree(i) - 1)) / 2.0) if (G.indegree(i) > 1) else 0) |
def kmeans_tensor(tensor_data: np.ndarray, p: int, n_bits: int, per_channel: bool=False, channel_axis: int=1, n_iter: int=10, min_threshold: float=MIN_THRESHOLD, quant_error_method: qc.QuantizationErrorMethod=None) -> dict:
if (len(np.unique(tensor_data.flatten())) < (2 ** n_bits)):
n_clusters = len(np.unique(tensor_data.flatten()))
else:
n_clusters = (2 ** n_bits)
kmeans = KMeans(n_clusters=n_clusters)
axis_not_channel = [i for i in range(len(tensor_data.shape))]
if (channel_axis in axis_not_channel):
axis_not_channel.remove(channel_axis)
if per_channel:
scales_per_channel = np.max(np.abs(tensor_data), axis=tuple(axis_not_channel), keepdims=True)
else:
scales_per_channel = np.max(np.abs(tensor_data), keepdims=True)
tensor_for_kmeans = (tensor_data / (scales_per_channel + EPS))
kmeans.fit(tensor_for_kmeans.reshape((- 1), 1))
return {LUT_VALUES: kmeans.cluster_centers_, SCALE_PER_CHANNEL: scales_per_channel} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.