code stringlengths 101 5.91M |
|---|
class FlaxDistilBertPreTrainedModel(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def pretty_eta(seconds_left):
minutes_left = (seconds_left // 60)
seconds_left %= 60
hours_left = (minutes_left // 60)
minutes_left %= 60
days_left = (hours_left // 24)
hours_left %= 24
def helper(cnt, name):
return '{} {}{}'.format(str(cnt), name, ('s' if (cnt > 1) else ''))
if (days_left > 0):
msg = helper(days_left, 'day')
if (hours_left > 0):
msg += (' and ' + helper(hours_left, 'hour'))
return msg
if (hours_left > 0):
msg = helper(hours_left, 'hour')
if (minutes_left > 0):
msg += (' and ' + helper(minutes_left, 'minute'))
return msg
if (minutes_left > 0):
return helper(minutes_left, 'minute')
return 'less than a minute' |
def save_figure(destination, obj=None):
plt.tight_layout()
plt.savefig(destination)
plt.close() |
class FeatureExtractor(ch.nn.Module):
def __init__(self, submod, layers):
super(FeatureExtractor, self).__init__()
self.submod = submod
self.layers = layers
self.n = 0
for layer_func in layers:
layer = layer_func(self.submod)
def hook(module, _, output):
module.register_buffer('activations', output)
layer.register_forward_hook(hook)
def forward(self, *args, **kwargs):
out = self.submod(*args, **kwargs)
activs = [layer_fn(self.submod).activations for layer_fn in self.layers]
return ([out] + activs) |
class install_lib(orig.install_lib):
def run(self):
self.build()
outfiles = self.install()
if (outfiles is not None):
self.byte_compile(outfiles)
def get_exclusions(self):
all_packages = (pkg for ns_pkg in self._get_SVEM_NSPs() for pkg in self._all_packages(ns_pkg))
excl_specs = product(all_packages, self._gen_exclusion_paths())
return set(starmap(self._exclude_pkg_path, excl_specs))
def _exclude_pkg_path(self, pkg, exclusion_path):
parts = (pkg.split('.') + [exclusion_path])
return os.path.join(self.install_dir, *parts)
def _all_packages(pkg_name):
while pkg_name:
(yield pkg_name)
(pkg_name, sep, child) = pkg_name.rpartition('.')
def _get_SVEM_NSPs(self):
if (not self.distribution.namespace_packages):
return []
install_cmd = self.get_finalized_command('install')
svem = install_cmd.single_version_externally_managed
return (self.distribution.namespace_packages if svem else [])
def _gen_exclusion_paths():
(yield '__init__.py')
(yield '__init__.pyc')
(yield '__init__.pyo')
if (not hasattr(imp, 'get_tag')):
return
base = os.path.join('__pycache__', ('__init__.' + imp.get_tag()))
(yield (base + '.pyc'))
(yield (base + '.pyo'))
(yield (base + '.opt-1.pyc'))
(yield (base + '.opt-2.pyc'))
def copy_tree(self, infile, outfile, preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1):
assert (preserve_mode and preserve_times and (not preserve_symlinks))
exclude = self.get_exclusions()
if (not exclude):
return orig.install_lib.copy_tree(self, infile, outfile)
from setuptools.archive_util import unpack_directory
from distutils import log
outfiles = []
def pf(src, dst):
if (dst in exclude):
log.warn('Skipping installation of %s (namespace package)', dst)
return False
log.info('copying %s -> %s', src, os.path.dirname(dst))
outfiles.append(dst)
return dst
unpack_directory(infile, outfile, pf)
return outfiles
def get_outputs(self):
outputs = orig.install_lib.get_outputs(self)
exclude = self.get_exclusions()
if exclude:
return [f for f in outputs if (f not in exclude)]
return outputs |
class MarioDataset(Dataset):
def __init__(self, tokenizer: Optional[PreTrainedTokenizer]=None, level_string: Optional[str]=None, context_len: int=700, height: int=14, remove_start_end_tokens: bool=False, sample_all_indices: bool=False):
if (level_string is None):
print('No level string specified, using default string FULL_LEVEL_STR_WITH_PATHS...')
level_string = FULL_LEVEL_STR_WITH_PATHS
elif ('.txt' in level_string):
with open(level_string, 'r') as file:
level_string = file.read()
self.character_set = set(level_string)
if ('\n' in self.character_set):
self.character_set.remove('\n')
self.vocab_size = len(self.character_set)
self.sample_all_indices = sample_all_indices
def get_training_corpus():
(yield list(level_string))
if (tokenizer is None):
tokenizer = AutoTokenizer.from_pretrained(DEFAULT_MODEL)
self.tokenizer = tokenizer
if (getattr(tokenizer, 'train_new_from_iterator', None) is not None):
self.tokenizer = self.tokenizer.train_new_from_iterator(get_training_corpus(), 52000)
elif (getattr(tokenizer, 'train_from_iterator', None) is not None):
self.tokenizer = PreTrainedTokenizerFast(tokenizer_object=self.tokenizer)
self.tokenizer = self.tokenizer.train_new_from_iterator(get_training_corpus(), self.vocab_size)
self.context_len = context_len
self.height = height
(x, self.str_arr) = self.convert_level_to_tensor(level_string.split('\n'))
self.input_ids = x['input_ids'].squeeze()
self.attention_masks = x['attention_mask'].squeeze()
if remove_start_end_tokens:
self.input_ids = self.input_ids[1:(- 1)]
self.attention_masks = self.attention_masks[1:(- 1)]
self.indices = self.generate_indices()
(self.unique_tokens, self.unique_counts) = self.input_ids.unique(return_counts=True)
self.weighted_unique_counts = ((1.0 / self.unique_counts) / torch.sum(self.unique_counts))
self.token_dict = {}
string_tokens = list(self.tokenizer.decode(self.unique_tokens))
for (int_token, string_token) in zip(self.unique_tokens, string_tokens):
self.token_dict[string_token] = int_token
def convert_level_to_tensor(self, level: List[str]):
str_arr = flip_and_transpose(np.array(characterize(level)))
str_arr = ''.join(join_list_of_list(str_arr))
x = self.tokenizer(str_arr, return_tensors='pt')
return (x, str_arr)
def __len__(self):
return self.indices.shape[0]
def __getitem__(self, idx):
if isinstance(idx, int):
indices = self.indices[idx]
else:
indices = torch.stack([self.indices[i] for i in idx])
return (self.input_ids[indices], self.attention_masks[indices])
def generate_indices(self):
out = []
for idx in range((self.input_ids.shape[0] - self.context_len)):
if (((idx % self.height) == 0) or self.sample_all_indices):
arange = torch.arange(idx, (idx + self.context_len))
out.append(arange)
return torch.stack(out)
def sample_indices(self, batch_size):
out = []
for _ in range(batch_size):
start_idx = np.random.randint(0, (self.__len__() - self.context_len))
indices = torch.arange(start_idx, (start_idx + self.context_len))
out.append(indices)
return torch.stack(out)
def __str__(self):
str_list = characterize(self.tokenizer.batch_decode(self.x['input_ids']))
string = '\n'.join(join_list_of_list(flip_and_transpose(np.array(str_list), True)))
return string |
def get_cgroup_path(private=True):
if private:
return '/'
p = None
with open('/proc/1/cpuset', 'r') as f:
p = f.read().strip()
assert p
return p |
class LabeledPatients(MutableMapping[(int, List[Label])]):
def __init__(self, patients_to_labels: Dict[(int, List[Label])], labeler_type: LabelType):
self.patients_to_labels: Dict[(int, List[Label])] = patients_to_labels
self.labeler_type: LabelType = labeler_type
def save(self, target_filename) -> None:
with open(target_filename, 'w') as f:
writer = csv.writer(f)
header = ['patient_id', 'prediction_time', 'label_type', 'value']
if (self.labeler_type == 'survival'):
header.append('is_censored')
writer.writerow(header)
for (patient, labels) in self.patients_to_labels.items():
for label in labels:
if (self.labeler_type == 'survival'):
assert isinstance(label.value, SurvivalValue)
writer.writerow([patient, label.time.isoformat(), self.labeler_type, (label.value.time_to_event / datetime.timedelta(minutes=1)), label.value.is_censored])
else:
writer.writerow([patient, label.time.isoformat(), self.labeler_type, label.value])
def get_labels_from_patient_idx(self, idx: int) -> List[Label]:
return self.patients_to_labels[idx]
def get_all_patient_ids(self) -> List[int]:
return sorted(list(self.patients_to_labels.keys()))
def get_patients_to_labels(self) -> Dict[(int, List[Label])]:
return self.patients_to_labels
def get_labeler_type(self) -> LabelType:
return self.labeler_type
def as_numpy_arrays(self) -> Tuple[(NDArray[(Literal['n_patients, 1'], np.int64)], NDArray[(Literal['n_patients, 1 or 2'], Any)], NDArray[(Literal['n_patients, 1'], np.datetime64)])]:
patient_ids: List[int] = []
label_values: List[Any] = []
label_times: List[datetime.datetime] = []
if (self.labeler_type in ['boolean', 'numerical', 'categorical']):
for (patient_id, labels) in self.patients_to_labels.items():
for label in labels:
patient_ids.append(patient_id)
label_values.append(label.value)
label_times.append(label.time)
elif (self.labeler_type in ['survival']):
for (patient_id, labels) in self.patients_to_labels.items():
for label in labels:
survival_value: SurvivalValue = cast(SurvivalValue, label.value)
patient_ids.append(patient_id)
label_values.append([survival_value.time_to_event, survival_value.is_censored])
label_times.append(label.time)
else:
raise ValueError('Other label types are not implemented yet for this method')
return (np.array(patient_ids), np.array(label_values), np.array(label_times))
def get_num_patients(self) -> int:
return len(self)
def get_num_labels(self) -> int:
total: int = 0
for labels in self.patients_to_labels.values():
total += len(labels)
return total
def as_list_of_label_tuples(self) -> List[Tuple[(int, Label)]]:
result: List[Tuple[(int, Label)]] = []
for (patient_id, labels) in self.patients_to_labels.items():
for label in labels:
result.append((int(patient_id), label))
return result
def load_from_numpy(cls, patient_ids: NDArray[(Literal['n_patients, 1'], np.int64)], label_values: NDArray[(Literal['n_patients, 1 or 2'], Any)], label_times: NDArray[(Literal['n_patients, 1'], datetime.datetime)], labeler_type: LabelType) -> LabeledPatients:
patients_to_labels: DefaultDict[(int, List[Label])] = collections.defaultdict(list)
for (patient_id, l_value, l_time) in zip(patient_ids, label_values, label_times):
if (labeler_type in ['boolean', 'numerical', 'categorical']):
patients_to_labels[patient_id].append(Label(time=l_time, value=l_value))
elif (labeler_type in ['survival']):
patients_to_labels[patient_id].append(Label(time=l_time, value=SurvivalValue(time_to_event=l_value[0], is_censored=l_value[1])))
else:
raise ValueError('Other label types are not implemented yet for this method')
return LabeledPatients(dict(patients_to_labels), labeler_type)
def __str__(self):
return ('LabeledPatients:\n' + pprint.pformat(self.patients_to_labels))
def __getitem__(self, key):
return self.patients_to_labels[key]
def __setitem__(self, key, item):
self.patients_to_labels[key] = item
def __delitem__(self, key):
del self.patients_to_labels[key]
def __iter__(self):
return iter(self.patients_to_labels)
def __len__(self):
return len(self.patients_to_labels) |
class TensorflowImporter():
def __init__(self, *args, **kwargs):
self._tf_file = args[0]
self._tf_format = kwargs.get('tf_format')
self._outputs = kwargs.get('outputs')
self._inputs = kwargs.get('inputs')
def convert_to_onnx(self, graph_def, inputs, outputs):
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(graph_def, name='')
with tf_loader.tf_session(graph=tf_graph):
g = process_tf_graph(tf_graph, continue_on_error=False, target=','.join(constants.DEFAULT_TARGET), opset=11, input_names=inputs, output_names=outputs, inputs_as_nchw=None)
onnx_graph = optimizer.optimize_graph(g)
model_proto = onnx_graph.make_model('converted from {}'.format(self._tf_file))
return model_proto
def load_checkpoint_v1(self):
ckpt_path = os.path.dirname(self._tf_file)
latest_ckpt = tf.train.latest_checkpoint(ckpt_path)
saver = tf.train.import_meta_graph((latest_ckpt + '.meta'))
with tf.Session() as session:
session.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
saver.restore(session, latest_ckpt)
graph_def = session.graph.as_graph_def(add_shapes=True)
frozen_graph = freeze_graph.freeze_graph_with_def_protos(input_graph_def=graph_def, input_saver_def=None, input_checkpoint=latest_ckpt, output_node_names=self._outputs, restore_op_name='', filename_tensor_name='', output_graph=None, clear_devices=True, initializer_nodes='')
return frozen_graph
def execute(self):
if (self._tf_format == 'TF_PB'):
graph_def = graph_pb2.GraphDef()
with tf.io.gfile.GFile(self._tf_file, 'rb') as f:
graph_def.ParseFromString(f.read())
(inputs, outputs) = find_out_terminal_node(graph_def, postfix=True)
(graph_def, inputs, outputs) = tf_loader.from_graphdef(self._tf_file, inputs, outputs)
elif (self._tf_format == 'SAVED_MODEL'):
(graph_def, inputs, outputs) = tf_loader.from_saved_model(self._tf_file, None, None)
else:
if (self._outputs is None):
raise ImportError("Missing '--outputs' parameter.")
if (self._inputs is None):
raise ImportError("Missing '--inputs' parameter.")
inputs = [(i + ':0') for i in self._inputs.split(',')]
outputs = [(i + ':0') for i in self._outputs.split(',')]
if (self._tf_format == 'TF_CKPT_V1'):
graph_def = self.load_checkpoint_v1()
elif (self._tf_format == 'TF_CKPT_V2'):
(graph_def, inputs, outputs) = tf_loader.from_checkpoint(self._tf_file, inputs, outputs)
onnx_model = self.convert_to_onnx(graph_def, inputs, outputs)
onnx_importer = OnnxImporter()
onnx_importer.import_from_onnx_model(onnx_model)
return onnx_importer.execute() |
.skip
def test_lambda_nested_call():
def lamb2(A, B, C, f):
A[:] = f(B, C)
def lamb1(A: dace.float64[20], B: dace.float64[20], C: dace.float64[20]):
f = (lambda a, b: (a + b))
lamb2(A, B, C, f)
A = np.random.rand(20)
B = np.random.rand(20)
C = np.random.rand(20)
lamb1(A, B, C)
assert np.allclose(A, (B + C)) |
class UnboundedMemory(BaseMemory):
def __init__(self, **kwargs):
super(UnboundedMemory, self).__init__(**kwargs)
def initialize_memory(self):
mem = torch.zeros(1, self.mem_size).to(self.device)
ent_counter = torch.tensor([0.0]).to(self.device)
last_mention_idx = torch.zeros(1).long().to(self.device)
return (mem, ent_counter, last_mention_idx)
def predict_action(self, query_vector, ment_score, mem_vectors, ent_counter, feature_embs):
coref_new_scores = self.get_coref_new_scores(query_vector, ment_score, mem_vectors, ent_counter, feature_embs)
not_a_ment_score = (- ment_score)
over_ign_score = torch.cat([torch.tensor([0.0]).to(self.device), not_a_ment_score], dim=0).to(self.device)
return (coref_new_scores, over_ign_score)
def interpret_scores(self, coref_new_scores, overwrite_ign_scores, first_overwrite):
if first_overwrite:
num_ents = 0
num_cells = 1
else:
num_ents = (coref_new_scores.shape[0] - 1)
num_cells = num_ents
pred_max_idx = torch.argmax(coref_new_scores).item()
if (pred_max_idx < num_cells):
return (pred_max_idx, 'c')
elif (pred_max_idx == num_cells):
over_max_idx = torch.argmax(overwrite_ign_scores).item()
if (over_max_idx == 0):
return (num_ents, 'o')
else:
return ((- 1), 'i')
else:
raise NotImplementedError
def forward(self, mention_emb_list, mention_scores, gt_actions, metadata, rand_fl_list, teacher_forcing=False):
(mem_vectors, ent_counter, last_mention_idx) = self.initialize_memory()
action_logit_list = []
action_list = []
first_overwrite = True
last_action_str = '<s>'
follow_gt = (self.training or teacher_forcing)
for (ment_idx, (ment_emb, ment_score, (gt_cell_idx, gt_action_str))) in enumerate(zip(mention_emb_list, mention_scores, gt_actions)):
query_vector = ment_emb
metadata['last_action'] = self.action_str_to_idx[last_action_str]
feature_embs = self.get_feature_embs(ment_idx, last_mention_idx, ent_counter, metadata)
if (not (follow_gt and (gt_action_str == 'i') and (rand_fl_list[ment_idx] > self.sample_invalid))):
(coref_new_scores, overwrite_ign_scores) = self.predict_action(query_vector, ment_score, mem_vectors, ent_counter, feature_embs)
(pred_cell_idx, pred_action_str) = self.interpret_scores(coref_new_scores, overwrite_ign_scores, first_overwrite)
action_logit_list.append((coref_new_scores, overwrite_ign_scores))
action_list.append((pred_cell_idx, pred_action_str))
else:
continue
if follow_gt:
action_str = gt_action_str
cell_idx = gt_cell_idx
else:
action_str = pred_action_str
cell_idx = pred_cell_idx
last_action_str = action_str
if (first_overwrite and (action_str == 'o')):
first_overwrite = False
mem_vectors = torch.unsqueeze(query_vector, dim=0)
ent_counter = torch.tensor([1.0]).to(self.device)
last_mention_idx[0] = ment_idx
else:
num_ents = mem_vectors.shape[0]
cell_mask = (torch.arange(0, num_ents) == cell_idx).float().to(self.device)
mask = torch.unsqueeze(cell_mask, dim=1)
mask = mask.repeat(1, self.mem_size)
if (action_str == 'c'):
mem_vectors = self.coref_update(mem_vectors, query_vector, cell_idx, mask, ent_counter)
ent_counter = (ent_counter + cell_mask)
last_mention_idx[cell_idx] = ment_idx
elif (action_str == 'o'):
mem_vectors = torch.cat([mem_vectors, torch.unsqueeze(query_vector, dim=0)], dim=0)
ent_counter = torch.cat([ent_counter, torch.tensor([1.0]).to(self.device)], dim=0)
last_mention_idx = torch.cat([last_mention_idx, torch.tensor([ment_idx]).to(self.device)], dim=0)
return (action_logit_list, action_list) |
def test_clean_up_not_old(nparray, tensor_key):
db = TensorDB()
db.cache_tensor({tensor_key: nparray})
db.clean_up()
cached_nparray = db.get_tensor_from_cache(tensor_key)
assert np.array_equal(nparray, cached_nparray) |
def is_integer(s):
try:
s = int(s)
return True
except Exception:
return False |
class TestLocalProjectCheckout():
def setup(self):
self.shell = Shell()
self.temp_dir = mkdtemp(prefix='mubench-checkout-local_')
self.local_url = join(self.temp_dir, 'origin')
os.makedirs(self.local_url)
open(join(self.local_url, 'some.file'), 'w').close()
self.checkouts_dir = join(self.temp_dir, 'checkouts')
def teardown(self):
rmtree(self.temp_dir)
def test_create(self):
uut = LocalProjectCheckout(self.local_url, self.checkouts_dir, '-project-')
uut.create(0)
expected_checkout_path = join(self.checkouts_dir, '-project-', 'checkout')
assert_equals(expected_checkout_path, uut.checkout_dir)
assert exists(join(expected_checkout_path, 'some.file'))
def test_not_exists(self):
uut = LocalProjectCheckout(self.local_url, self.checkouts_dir, '-project-')
assert (not uut.exists())
def test_exists_after_create(self):
uut = LocalProjectCheckout(self.local_url, self.checkouts_dir, '-project-')
uut.create(0)
assert uut.exists()
def test_not_exists_empty(self):
uut = LocalProjectCheckout(self.local_url, self.checkouts_dir, '-project-')
os.makedirs(uut.checkout_dir)
assert (not uut.exists())
def test_delete(self):
uut = LocalProjectCheckout(self.local_url, self.checkouts_dir, '-project-')
uut.create(0)
uut.delete()
assert (not exists(uut.checkout_dir))
def test_to_string(self):
uut = LocalProjectCheckout(self.local_url, self.checkouts_dir, '-project-')
assert_equals('local:{}'.format(self.local_url), str(uut)) |
(frozen=True)
class CritiqueTaskTemplate():
name: str
instructions: str
num_respondents: int
questions: List[CritiqueQuestionTemplate] |
class SerializationMixin(object):
def save_instance(self, filepath):
save_dict = dict(auto_fix_time_shifts=self._state_data.auto_fix_time_shifts, power_signals_d=self._state_data.power_signals_d.tolist(), rank_k=self._state_data.rank_k, matrix_l0=self._state_data.matrix_l0.tolist(), matrix_r0=self._state_data.matrix_r0.tolist(), l_value=self._state_data.l_value.tolist(), r_value=self._state_data.r_value.tolist(), beta_value=float(self._state_data.beta_value), component_r0=self._state_data.component_r0.tolist(), mu_l=self._state_data.mu_l, mu_r=self._state_data.mu_r, tau=self._state_data._tau, is_solver_error=self._state_data.is_solver_error, is_problem_status_error=self._state_data.is_problem_status_error, f1_increase=self._state_data.f1_increase, obj_increase=self._state_data.obj_increase, residuals_median=self._state_data.residuals_median, residuals_variance=self._state_data.residuals_variance, residual_l0_norm=self._state_data.residual_l0_norm, weights=self._state_data.weights.tolist())
with open(filepath, 'w') as file:
json.dump(save_dict, file)
def load_instance(cls, filepath):
with open(filepath, 'r') as file:
load_dict = json.load(file)
power_signals_d = np.array(load_dict['power_signals_d'])
rank_k = load_dict['rank_k']
instance = cls(np.array(power_signals_d), rank_k=rank_k)
instance.state_data.power_signals_d = power_signals_d
instance.state_data.rank_k = rank_k
instance.state_data.matrix_l0 = np.array(load_dict['matrix_l0'])
instance.state_data.matrix_r0 = np.array(load_dict['matrix_r0'])
instance.state_data.l_cs_value = np.array(load_dict['l_value'])
instance.state_data.r_cs_value = np.array(load_dict['r_value'])
instance.state_data.beta_value = load_dict['beta_value']
instance.state_data.component_r0 = np.array(load_dict['component_r0'])
instance.state_data.mu_l = load_dict['mu_l']
instance.state_data.mu_r = load_dict['mu_r']
instance.state_data.tau = load_dict['tau']
instance.state_data.is_solver_error = load_dict['is_solver_error']
instance.state_data.is_problem_status_error = load_dict['is_problem_status_error']
instance.state_data.f1_increase = load_dict['f1_increase']
instance.state_data.obj_increase = load_dict['obj_increase']
instance.state_data.residuals_median = load_dict['residuals_median']
instance.state_data.residuals_variance = load_dict['residuals_variance']
instance.state_data.residual_l0_norm = load_dict['residual_l0_norm']
instance.state_data.weights = np.array(load_dict['weights'])
instance._keep_result_variables_as_properties(instance.state_data.l_cs_value, instance.state_data.r_cs_value, instance.state_data.beta_value)
instance._keep_supporting_parameters_as_properties(instance.state_data.weights)
return instance |
('prefix')
class PrefixFactory(SingleFeatureFactory):
def feature_name(self):
return ('prefix_%s' % self.prefix_size)
def prefix_size(self):
return self.args['prefix_size']
def compute_feature(self, tokens, token_index):
return get_word_chunk(normalize_token(tokens[token_index]), self.prefix_size, 0) |
class _VisibleDeprecationTestCase(_DeprecationTestCase):
warning_cls = np.VisibleDeprecationWarning |
def getProductions(code):
stream = antlr4.InputStream(code)
lexer = JavaLexer(stream)
toks = antlr4.CommonTokenStream(lexer)
parser = JavaParserModified(toks)
tree = parser.memberDeclaration()
st = []
st.append(tree)
rule_seq = []
while (len(st) > 0):
top = st.pop()
(name, typ) = nname(top)
if (name == 'ErrorN'):
return None
if (typ == 'T'):
pass
else:
rule = getRuleAtNode(top)
rule_seq.append(rule)
for i in range((top.getChildCount() - 1), (- 1), (- 1)):
st.append(top.getChild(i))
return rule_seq[6:] |
def convert_to_color(arr_2d, palette):
arr_3d = np.zeros((arr_2d.shape[0], arr_2d.shape[1], 3), dtype=np.uint8)
for (c, i) in palette.items():
m = (arr_2d == c)
arr_3d[m] = i
return arr_3d |
def get_noise(data, dist='G', noise_std=(float(25) / 255.0), mode='S', min_noise=(float(5) / 255.0), max_noise=(float(55) / 255.0)):
if (dist == 'G'):
noise_std /= 255.0
min_noise /= 255.0
max_noise /= 255.0
noise = torch.randn_like(data)
if (mode == 'B'):
n = noise.shape[0]
noise_tensor_array = (((max_noise - min_noise) * torch.rand(n)) + min_noise)
for i in range(n):
noise.data[i] = (noise.data[i] * noise_tensor_array[i])
else:
noise.data = (noise.data * noise_std)
elif (dist == 'P'):
noise = torch.randn_like(data)
if (mode == 'S'):
noise_std /= 255.0
noise = ((torch.poisson((data * noise_std)) / noise_std) - data)
return noise |
class GaloisGroup_v2(GaloisGroup_perm):
def __init__(self, number_field, algorithm='pari', names=None, gc_numbering=None, _type=None):
if (not number_field.is_absolute()):
deprecation(28782, 'Use .absolute_field().galois_group() if you want the Galois group of the absolute field')
if (gc_numbering is None):
gc_numbering = (algorithm != 'magma')
self._type = _type
super().__init__(number_field, algorithm, names, gc_numbering)
_method(key=GaloisGroup_perm._get_algorithm)
def _pol_galgp(self, algorithm=None):
algorithm = self._get_algorithm(algorithm)
f = self._field.absolute_polynomial()
pari_group = (self._type != 'gap')
return f.galois_group(pari_group=pari_group, algorithm=algorithm)
def group(self):
deprecation(28782, 'the group method is deprecated; you can use _pol_galgp if you really need it')
return self._pol_galgp()
_method(key=_alg_key)
def order(self, algorithm=None, recompute=False):
algorithm = self._get_algorithm(algorithm)
K = self._field
if ((K.absolute_degree() < 12) or (algorithm != 'pari')):
return self._pol_galgp(algorithm=algorithm).order()
else:
return self._galois_closure.absolute_degree()
def easy_order(self, algorithm=None):
algorithm = self._get_algorithm(algorithm)
if self.order.cache:
return next(iter(self.order.cache.values()))
K = self._field
if ((K.absolute_degree() < 12) or (algorithm != 'pari')):
size = self._pol_galgp(algorithm=algorithm).order()
self.order.cache[None] = size
return size
_method(key=_alg_key)
def transitive_number(self, algorithm=None, recompute=False):
algorithm = self._get_algorithm(algorithm)
K = self._field
if ((K.absolute_degree() < 12) or (algorithm != 'pari')):
return self._pol_galgp(algorithm=algorithm).transitive_number()
else:
if self._gc_numbering:
G = self._field.galois_group(algorithm=self._default_algorithm, names=self._gc_names, gc_numbering=False)
else:
G = self
return ZZ(G.gap().TransitiveIdentification())
def pari_label(self):
return self._pol_galgp().label()
_method
def signature(self):
if (self._field.absolute_degree() < 12):
return self._pol_galgp().signature()
elif self._field.absolute_polynomial().discriminant().is_square():
return ZZ(1)
else:
return ZZ((- 1))
_attribute
def _gcdata(self):
K = self._field
if self.is_galois():
return (K, K.hom(K.gen(), K))
else:
if K.is_relative():
K = K.absolute_field((K.variable_name() + 'a'))
(from_abs, to_abs) = K.structure()
else:
to_abs = None
(L, emb) = K.galois_closure(names=self._gc_names, map=True)
if (to_abs is not None):
emb = (emb * to_abs)
return (L, emb)
_attribute
def _pari_data(self):
return self._galois_closure.__pari__().galoisinit()
_attribute
def _elts(self):
if self._gc_numbering:
return sorted([self(x, check=False) for x in self._pari_data[5]])
else:
return sorted(list(self.iteration()))
_attribute
def _gens(self):
if self._gc_numbering:
gens = [standardize_generator(x, as_cycles=True) for x in self._pari_data[6]]
if (not gens):
gens = [()]
gens = [self.element_class(x, self, check=False) for x in gens]
return sorted(set(gens))
else:
G = self._field.galois_group(algorithm=self._default_algorithm, names=self._gc_names, gc_numbering=True)
self._galois_closure = L = G._galois_closure
gens = [g.as_hom() for g in G._gens]
if gens:
roots = ([None] + self._field.absolute_polynomial().roots(L, multiplicities=False))
new_gens = []
for g in gens:
seen = set()
cycles = []
for start in range(1, len(roots)):
if (start in seen):
continue
cycle = [start]
r = roots[start]
while True:
r = g(r)
i = roots.index(r)
seen.add(i)
if (i == start):
break
cycle.append(i)
cycles.append(tuple(cycle))
new_gens.append(cycles)
else:
new_gens = [()]
return [self.element_class(x, self, check=False) for x in new_gens]
def _element_constructor_(self, x, check=True):
if (x == 1):
return self.identity()
if (isinstance(x, NumberFieldHomomorphism_im_gens) and (x.parent() == self.number_field().Hom(self.number_field()))):
l = [g for g in self if (g.as_hom() == x)]
if (len(l) != 1):
raise ArithmeticError
return l[0]
return self.element_class(x, self, check=check)
def is_galois(self):
K = self._field
d = K.absolute_degree()
if (d < 12):
return (self._pol_galgp().order() == d)
else:
return (len(K.automorphisms()) == d)
def _repr_(self):
K = self.number_field()
f = K.defining_polynomial()
d = K.absolute_degree()
if (d < 12):
plabel = self.pari_label().split('=')[(- 1)].strip()
tlabel = ('%sT%s (%s) with order %s ' % (d, self.transitive_number(), plabel, self.order()))
else:
tlabel = ''
if ((d < 12) or self.is_galois()):
return ('Galois group %sof %s' % (tlabel, f))
else:
return ('Galois group %sof (non-Galois) %s' % (tlabel, f))
def number_field(self):
return self._field
def list(self):
return self._elts
def unrank(self, i):
return self._elts[i]
def __iter__(self):
return iter(self._elts)
_method
def _ramgroups(self, P):
K = self.number_field()
P = K.ideal_monoid()(P).pari_prime()
return pari(K).idealramgroups(self._pari_data, P)
def decomposition_group(self, P):
if (not self.is_galois()):
raise TypeError('Decomposition groups only defined for Galois extensions')
if isinstance(P, NumberFieldHomomorphism_im_gens):
if self.number_field().is_totally_real():
return self.subgroup([])
else:
return self.subgroup([self.complex_conjugation(P)])
else:
return self.ramification_group(P, (- 1))
def complex_conjugation(self, P=None):
if (P is None):
Q = self.number_field().specified_complex_embedding()
if (Q is None):
raise ValueError('No default complex embedding specified')
P = Q
P = refine_embedding(P, infinity)
if (not self.number_field().is_galois()):
raise TypeError('Extension is not Galois')
if self.number_field().is_totally_real():
raise TypeError('No complex conjugation (field is real)')
g = self.number_field().gen()
gconj = P(g).conjugate()
elts = [s for s in self if (P(s(g)) == gconj)]
if (len(elts) != 1):
raise ArithmeticError('Something has gone very wrong here')
return elts[0]
def ramification_group(self, P, v):
if (not self.is_galois()):
raise TypeError('Ramification groups only defined for Galois extensions')
ramdata = self._ramgroups(P)
if (v < (- 1)):
raise ValueError('v must be at least -1')
elif ((v + 1) >= len(ramdata)):
return self.subgroup([])
else:
return self.subgroup(ramdata[(v + 1)][0])
def inertia_group(self, P):
if (not self.is_galois()):
raise TypeError('Inertia groups only defined for Galois extensions')
return self.ramification_group(P, 0)
def ramification_breaks(self, P):
if (not self.is_galois()):
raise TypeError('Ramification breaks only defined for Galois extensions')
ramdata = self._ramgroups(P)
n = len(ramdata)
from sage.sets.set import Set
return Set(([(i - 1) for i in range((n - 1)) if (ramdata[i][1] != ramdata[(i + 1)][1])] + [(n - 2)]))
def artin_symbol(self, P):
if (not self.is_galois()):
raise TypeError('Artin symbols only defined for Galois extensions')
P = self.number_field().ideal_monoid()(P)
if (not P.is_prime()):
raise ValueError(('%s is not prime' % P))
p = P.smallest_integer()
t = []
gens = self.number_field().ring_of_integers().ring_generators()
for s in self.decomposition_group(P):
w = [(s(g) - (g ** p)).valuation(P) for g in gens]
if (min(w) >= 1):
t.append(s)
if (len(t) > 1):
raise ValueError(('%s is ramified' % P))
return t[0] |
def add_comm_rewrites(ctx: LeanGenContext, expr: Expression) -> List[str]:
return [((('add_comm ' + a_expr) + ' ') + b_expr) for (a_expr, b_expr) in get_reversed_add_exprs(expr=expr, simplifier=ctx.simplifier)] |
def create_wham_whamr_csv(datapath, savepath, fs, version='min', savename='whamr_', set_types=['tr', 'cv', 'tt'], add_reverb=True, task='separation', dereverberate=True):
if (fs == 8000):
sample_rate = '8k'
elif (fs == 16000):
sample_rate = '16k'
else:
raise ValueError('Unsupported sampling rate')
for set_type in set_types:
if add_reverb:
mix_both = ('mix_both_reverb/' if (task == 'separation') else 'mix_single_reverb/')
if (dereverberate and (set_type != 'tr')):
s1 = 's1_reverb/'
s2 = 's2_reverb/'
else:
s1 = 's1_anechoic/'
s2 = 's2_anechoic/'
else:
mix_both = ('mix_both/' if (task == 'separation') else 'mix_single/')
s1 = 's1/'
s2 = 's2/'
mix_path = os.path.join(datapath, 'wav{}'.format(sample_rate), version, set_type, mix_both)
s1_path = os.path.join(datapath, 'wav{}'.format(sample_rate), version, set_type, s1)
s2_path = os.path.join(datapath, 'wav{}'.format(sample_rate), version, set_type, s2)
noise_path = os.path.join(datapath, 'wav{}'.format(sample_rate), version, set_type, 'noise/')
files = os.listdir(mix_path)
mix_fl_paths = [(mix_path + fl) for fl in files]
s1_fl_paths = [(s1_path + fl) for fl in files]
s2_fl_paths = [(s2_path + fl) for fl in files]
noise_fl_paths = [(noise_path + fl) for fl in files]
csv_columns = ['ID', 'duration', 'mix_wav', 'mix_wav_format', 'mix_wav_opts', 's1_wav', 's1_wav_format', 's1_wav_opts', 's2_wav', 's2_wav_format', 's2_wav_opts', 'noise_wav', 'noise_wav_format', 'noise_wav_opts']
with open(os.path.join(savepath, ((savename + set_type) + '.csv')), 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for (i, (mix_path, s1_path, s2_path, noise_path)) in enumerate(zip(mix_fl_paths, s1_fl_paths, s2_fl_paths, noise_fl_paths)):
row = {'ID': i, 'duration': 1.0, 'mix_wav': mix_path, 'mix_wav_format': 'wav', 'mix_wav_opts': None, 's1_wav': s1_path, 's1_wav_format': 'wav', 's1_wav_opts': None, 's2_wav': s2_path, 's2_wav_format': 'wav', 's2_wav_opts': None, 'noise_wav': noise_path, 'noise_wav_format': 'wav', 'noise_wav_opts': None}
writer.writerow(row) |
class AutoTokenCounter(TokenCounter):
def __init__(self, huggingface_tokenizer: HuggingFaceTokenizer):
self.token_counters: Dict[(str, TokenCounter)] = {}
self.huggingface_tokenizer: HuggingFaceTokenizer = huggingface_tokenizer
def get_token_counter(self, organization: str) -> TokenCounter:
token_counter = self.token_counters.get(organization)
if (token_counter is None):
if (organization == 'openai'):
token_counter = OpenAITokenCounter(self.huggingface_tokenizer)
elif (organization == 'ai21'):
token_counter = AI21TokenCounter()
elif (organization == 'gooseai'):
token_counter = GooseAITokenCounter()
elif (organization == 'cohere'):
token_counter = CohereTokenCounter()
else:
token_counter = FreeTokenCounter()
self.token_counters[organization] = token_counter
return token_counter
def count_tokens(self, request: Request, completions: List[Sequence]) -> int:
token_counter: TokenCounter = self.get_token_counter(request.model_host)
return token_counter.count_tokens(request, completions) |
class RandomDataset(data.Dataset):
def __init__(self, num_random=10000, shape=(3, 224, 224)):
self.size = num_random
self.shape = shape
def __len__(self):
return self.size
def __repr__(self):
return self.__class__.__name__
def __getitem__(self, index):
img = torch.rand(*self.shape)
target = 0
return (F.normalize(img, normalizing_mean, normalizing_std), target) |
def new_query(table: Table, ncols) -> Query:
return Query(predicates=OrderedDict.fromkeys(table.data.columns, None), ncols=ncols) |
def read_from_hdf5(fd, group, cache=None):
if (cache is None):
cache = {}
from sfepy.discrete.iga.domain import IGDomain
from sfepy.discrete.fem.meshio import HDF5MeshIO
types = {b'True': True, b'False': False, b'None': None}
def _read_from_hdf5(group):
while isinstance(group, pt.link.SoftLink):
group = group()
path = path_of_hdf5_group(group)
if (path in cache):
return cache[path]
cache_ids = tuple()
if ('cache' in group):
cache_ids = group.cache.read().reshape((- 1))
cache_ids = [dec(i) for i in cache_ids]
for cache_id in cache_ids:
if (cache_id in cache):
out = cache[cache_id]
break
else:
out = _read_value_from_hdf5(group)
else:
out = _read_value_from_hdf5(group)
if (not isinstance(out, (int, float, str, bool, None.__class__))):
cache[path] = out
for cache_id in cache_ids:
cache[cache_id] = out
return out
def _read_value_from_hdf5(group):
type = group.type.read().item()
if (type in types):
return types[type]
data = group.data
while isinstance(data, pt.link.SoftLink):
data = data()
def load_list():
out = ([None] * group.len.read())
for i in data:
out[int(i._v_name)] = _read_from_hdf5(i)
return out
def load_dict():
out = {}
for i in fd.iter_nodes(data):
out[i._v_name] = _read_from_hdf5(i)
return out
def load_object(fn=None):
if fn:
out = fn(fd, data)
else:
out = data.read()
return out
if (type == b'raw'):
return data.read()
if (type == b'object'):
return load_object()
if (type == b'str'):
return dec(data.read().item())
if (type == b'pickle'):
return pickle.loads(data.read().item())
if (type == b'dict'):
return load_dict()
if (type == b'Struct'):
return Struct(**load_dict())
if (type == b'list'):
return load_list()
if (type == b'tuple'):
return tuple(load_list())
if (type == b'sparse_matrix'):
return load_object(read_sparse_matrix_from_hdf5)
if (type == b'IGDomain'):
return load_object(IGDomain.read_domain_from_hdf5)
if (type == b'Mesh'):
return load_object(HDF5MeshIO.read_mesh_from_hdf5)
raise Exception('Unknown h5 group type {}'.format(type.decode('utf8')))
return _read_from_hdf5(group) |
def batch_transformer(U, thetas, out_size, name='BatchSpatialTransformer'):
with tf.variable_scope(name):
(num_batch, num_transforms) = map(int, thetas.get_shape().as_list()[:2])
indices = [([i] * num_transforms) for i in xrange(num_batch)]
input_repeated = tf.gather(U, tf.reshape(indices, [(- 1)]))
return transformer(input_repeated, thetas, out_size) |
def Jacobian(C):
try:
return C.jacobian()
except AttributeError:
return Jacobian_generic(C) |
def sum_task(mixture_or_task_name, dataset_split='train', add_percentiles=True):
sequence_length = {'inputs': 512, 'targets': 512}
df_packing = analyze_packing(mixture_or_task_name=mixture_or_task_name, sequence_length=sequence_length, dataset_split=dataset_split)
df_padding = analyze_padding(mixture_or_task_name=mixture_or_task_name, sequence_length=sequence_length, dataset_split=dataset_split)
print((40 * '='))
print('-I- mixture_or_task_name', mixture_or_task_name)
print('-I- packing:')
print(df_packing.describe(percentiles=[0.5, 0.75, 0.9, 0.99]))
print('-I- padding:')
described_padding = df_padding.describe(percentiles=[0.5, 0.75, 0.9, 0.99])
print(described_padding)
splits = ['train']
npacked = df_packing['npacked'].mean()
ntrain = len(df_padding)
sequence_length_req = infer_no_truncation_padding_seq_length(df_padding)
record = {'mixture_or_task_name': mixture_or_task_name, 'max_input': sequence_length_req['inputs'], 'max_targets': sequence_length_req['targets'], 'npacked': npacked, 'examples': ntrain}
if add_percentiles:
percs_input = {f'input_seq_length_{i}%': described_padding['input_seq_length'][f'{i}%'] for i in [50, 75, 90, 99]}
percs_target = {f'target_seq_length_{i}%': described_padding['target_seq_length'][f'{i}%'] for i in [50, 75, 90, 99]}
record.update(percs_input)
record.update(percs_target)
print('-I summary:')
pprint(record)
print((40 * '='))
return record |
def test_phi_plus_phi_plus():
for i in range(400):
(k1, k2, k3, k4, a3) = create_scenario(phi_plus, phi_plus, i)
state = correct_order(k1.state, k1.keys)
assert numpy.array_equal(state, phi_plus) |
def read_relation_from_id(filename='./data/WN18RR/relation2id.txt'):
relation2id = {}
with open(filename, 'r') as f:
for line in f:
if (len(line.strip().split()) > 1):
(relation, relation_id) = (line.strip().split()[0].strip(), line.strip().split()[1].strip())
relation2id[relation] = int(relation_id)
return relation2id |
def __lagrange_bounds_phc(n, m, a, tmpfile=None):
S = coefficients_to_power_sums(n, m, a)
(fi, fo) = os.popen2('which phc')
find_phc = fo.readlines()
fi.close()
fo.close()
if (find_phc == []):
raise RuntimeError('PHCpack not installed.')
if (tmpfile is None):
tmpfile = sage.misc.misc.tmp_filename()
f = open((tmpfile + '.phc'), 'w')
f.close()
output_data = []
for P in sage.combinat.partition.Partitions((n - 1), length=(m - 1)):
f = open(tmpfile, 'w')
f.write((('%d' % m) + '\n'))
for j in range(1, (m + 1)):
for i in range((m - 1)):
f.write((((('%d' % P[i]) + ('*x%d' % i)) + ('**%d' % j)) + ' + '))
f.write(((('xn**%d' % j) + (' - (%d' % S[j])) + ');\n'))
f.close()
os.remove((tmpfile + '.phc'))
os.popen((((('phc -b ' + tmpfile) + ' ') + tmpfile) + '.phc'))
f = open((tmpfile + '.phc'), 'r')
f_str = f.read()
pos = f_str.find('= real ')
crits = []
while (pos != (- 1)):
posl = f_str.rfind('xn', 0, pos)
f_str_split = f_str[posl:pos].split()
crits += [float(f_str_split[2])]
pos = f_str.find('= real ', (pos + 1))
if (len(crits) > 0):
output_data += [[P, min(crits), max(crits)]]
if (len(output_data) > 0):
return [min([v[1] for v in output_data]), max([v[2] for v in output_data])]
else:
return [] |
def _nls_subproblem(X, W, H, tol, max_iter, alpha=0.0, l1_ratio=0.0, sigma=0.01, beta=0.1):
WtX = safe_sparse_dot(W.T, X)
WtW = np.dot(W.T, W)
gamma = 1
for n_iter in range(1, (max_iter + 1)):
grad = (np.dot(WtW, H) - WtX)
if ((alpha > 0) and (l1_ratio == 1.0)):
grad += alpha
elif (alpha > 0):
grad += (alpha * (l1_ratio + ((1 - l1_ratio) * H)))
if (_norm((grad * np.logical_or((grad < 0), (H > 0)))) < tol):
break
Hp = H
for inner_iter in range(20):
Hn = (H - (gamma * grad))
Hn *= (Hn > 0)
d = (Hn - H)
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = ((((1 - sigma) * gradd) + (0.5 * dQd)) < 0)
if (inner_iter == 0):
decr_gamma = (not suff_decr)
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif ((not suff_decr) or (Hp == Hn).all()):
H = Hp
break
else:
gamma /= beta
Hp = Hn
if (n_iter == max_iter):
warnings.warn('Iteration limit reached in nls subproblem.', ConvergenceWarning)
return (H, grad, n_iter) |
class PathScaleCCompiler(UnixCCompiler):
compiler_type = 'pathcc'
cc_exe = 'pathcc'
cxx_exe = 'pathCC'
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
cc_compiler = self.cc_exe
cxx_compiler = self.cxx_exe
self.set_executables(compiler=cc_compiler, compiler_so=cc_compiler, compiler_cxx=cxx_compiler, linker_exe=cc_compiler, linker_so=(cc_compiler + ' -shared')) |
def load_checkpoint(step, model, optimizer, scheduler):
global global_step
global global_epoch
checkpoint_path = os.path.join(args.save, args.model_name, 'checkpoint_step{:09d}.pth'.format(step))
print('Load checkpoint from: {}'.format(checkpoint_path))
checkpoint = torch.load(checkpoint_path)
try:
model.load_state_dict(checkpoint['state_dict'])
except RuntimeError:
print('INFO: this model is trained with DataParallel. Creating new state_dict without module...')
state_dict = checkpoint['state_dict']
from collections import OrderedDict
new_state_dict = OrderedDict()
for (k, v) in state_dict.items():
name = k[7:]
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
global_step = checkpoint['global_step']
global_epoch = checkpoint['global_epoch']
return (model, optimizer, scheduler) |
def save_args(original_args):
reversed_trainer_log_levels = {v: k for (k, v) in trainer_log_levels.items()}
original_args['log_level'] = reversed_trainer_log_levels[original_args['log_level']]
original_args['log_level_replica'] = reversed_trainer_log_levels[original_args['log_level_replica']]
for arg in ['_n_gpu', 'local_rank']:
if (arg in original_args):
del original_args[arg]
to_delete = []
for (k, v) in original_args.items():
if (v is None):
to_delete.append(k)
if (original_args[k] == ''):
to_delete.append(k)
for k in to_delete:
del original_args[k]
with open(os.path.join(original_args['output_dir'], 'args.json'), mode='w') as f:
json.dump(original_args, f, indent=4) |
def get_from_to_our_keys(model_name: str) -> Dict[(str, str)]:
our_config = RegNetConfig(depths=[2, 7, 17, 1], hidden_sizes=[8, 8, 8, 8], groups_width=8)
if ('in1k' in model_name):
our_model = RegNetForImageClassification(our_config)
else:
our_model = RegNetModel(our_config)
from_model = FakeRegNetVisslWrapper(RegNet(FakeRegNetParams(depth=27, group_width=1010, w_0=1744, w_a=620.83, w_m=2.52)))
with torch.no_grad():
from_model = from_model.eval()
our_model = our_model.eval()
x = torch.randn((1, 3, 32, 32))
dest_tracker = Tracker(our_model)
dest_traced = dest_tracker(x).parametrized
pprint(dest_tracker.name2module)
src_tracker = Tracker(from_model)
src_traced = src_tracker(x).parametrized
def to_params_dict(dict_with_modules):
params_dict = OrderedDict()
for (name, module) in dict_with_modules.items():
for (param_name, param) in module.state_dict().items():
params_dict[f'{name}.{param_name}'] = param
return params_dict
from_to_ours_keys = {}
src_state_dict = to_params_dict(src_traced)
dst_state_dict = to_params_dict(dest_traced)
for ((src_key, src_param), (dest_key, dest_param)) in zip(src_state_dict.items(), dst_state_dict.items()):
from_to_ours_keys[src_key] = dest_key
logger.info(f'{src_key} -> {dest_key}')
if ('in1k' in model_name):
from_to_ours_keys['0.clf.0.weight'] = 'classifier.1.weight'
from_to_ours_keys['0.clf.0.bias'] = 'classifier.1.bias'
return from_to_ours_keys |
class TransductiveFinetuning(Finetune):
def __init__(self, *args, fine_tuning_steps: int=25, fine_tuning_lr: float=5e-05, temperature: float=1.0, **kwargs):
super().__init__(*args, fine_tuning_steps=fine_tuning_steps, fine_tuning_lr=fine_tuning_lr, temperature=temperature, **kwargs)
def forward(self, query_images: Tensor) -> Tensor:
query_features = self.compute_features(query_images)
with torch.enable_grad():
self.prototypes.requires_grad_()
optimizer = torch.optim.Adam([self.prototypes], lr=self.fine_tuning_lr)
for _ in range(self.fine_tuning_steps):
support_cross_entropy = nn.functional.cross_entropy((self.temperature * self.l2_distance_to_prototypes(self.support_features)), self.support_labels)
query_conditional_entropy = entropy((self.temperature * self.l2_distance_to_prototypes(query_features)))
loss = (support_cross_entropy + query_conditional_entropy)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return self.softmax_if_specified(self.l2_distance_to_prototypes(query_features), temperature=self.temperature).detach()
def is_transductive() -> bool:
return True |
def test_argcombinations():
array = ak.Array([[0.0, 1.1, 2.2, 3.3], [], [4.4, 5.5, 6.6], [7.7], [8.8, 9.9, 10.0, 11.1, 12.2]])
assert (to_list(ak.operations.argcombinations(array, 2, replacement=False)) == [[(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)], [], [(0, 1), (0, 2), (1, 2)], [], [(0, 1), (0, 2), (0, 3), (0, 4), (1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]]) |
class AI21TokenCostEstimator(TokenCostEstimator):
def estimate_tokens(self, request: Request, metric_service: MetricService) -> int:
return (request.num_completions * request.max_tokens) |
class CosineClassifier(_Classifier):
def __init__(self, feat_dim=None, num_classes=None, dtype=None, scale=30, **kwargs):
super().__init__(feat_dim, num_classes, dtype)
self.scale = scale
def forward(self, x):
x = F.normalize(x, dim=(- 1))
weight = F.normalize(self.weight, dim=(- 1))
return (F.linear(x, weight) * self.scale) |
class BQCorpusBertPipe(MatchingBertPipe):
def __init__(self, tokenizer='cn-char'):
super().__init__(tokenizer=tokenizer)
def process_from_file(self, paths=None):
data_bundle = BQCorpusLoader().load(paths)
data_bundle = RenamePipe(task='cn-nli-bert').process(data_bundle)
data_bundle = self.process(data_bundle)
data_bundle = TruncateBertPipe(task='cn').process(data_bundle)
data_bundle = RenamePipe(task='cn-nli-bert').process(data_bundle)
return data_bundle |
class Action(Enum):
opened = 'opened'
reopened = 'reopened'
closed = 'closed'
labeled = 'labeled'
unlabeled = 'unlabeled'
ready_for_review = 'ready_for_review'
synchronize = 'synchronize'
review_requested = 'review_requested'
converted_to_draft = 'converted_to_draft'
submitted = 'submitted' |
def train(args, trainer, task, epoch_itr):
update_freq = (args.update_freq[(epoch_itr.epoch - 1)] if (epoch_itr.epoch <= len(args.update_freq)) else args.update_freq[(- 1)])
itr = epoch_itr.next_epoch_itr(fix_batches_to_gpus=args.fix_batches_to_gpus, shuffle=(epoch_itr.epoch >= args.curriculum))
itr = iterators.GroupedIterator(itr, update_freq)
progress = progress_bar.build_progress_bar(args, itr, epoch_itr.epoch, no_progress_bar='simple')
extra_meters = collections.defaultdict((lambda : AverageMeter()))
valid_subsets = args.valid_subset.split(',')
max_update = (args.max_update or math.inf)
for (i, samples) in enumerate(progress, start=epoch_itr.iterations_in_epoch):
log_output = trainer.train_step(samples)
if (log_output is None):
continue
stats = get_training_stats(trainer)
for (k, v) in log_output.items():
if (k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']):
continue
if (('loss' in k) or (k == 'accuracy')):
extra_meters[k].update(v, log_output['sample_size'])
else:
extra_meters[k].update(v)
stats[k] = extra_meters[k].avg
progress.log(stats, tag='train', step=stats['num_updates'])
if (i == 0):
trainer.get_meter('wps').reset()
trainer.get_meter('ups').reset()
num_updates = trainer.get_num_updates()
if ((not args.disable_validation) and (args.save_interval_updates > 0) and ((num_updates % args.save_interval_updates) == 0) and (num_updates > 0)):
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
if (num_updates >= max_update):
break
stats = get_training_stats(trainer)
for (k, meter) in extra_meters.items():
stats[k] = meter.avg
progress.print(stats, tag='train', step=stats['num_updates'])
for k in ['train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip']:
meter = trainer.get_meter(k)
if (meter is not None):
meter.reset() |
def register_Ns3DsrDsrMaintainBuffEntry_methods(root_module, cls):
cls.add_constructor([param('ns3::dsr::DsrMaintainBuffEntry const &', 'arg0')])
cls.add_constructor([param('ns3::Ptr< ns3::Packet const >', 'pa', default_value='0'), param('ns3::Ipv4Address', 'us', default_value='ns3::Ipv4Address()'), param('ns3::Ipv4Address', 'n', default_value='ns3::Ipv4Address()'), param('ns3::Ipv4Address', 's', default_value='ns3::Ipv4Address()'), param('ns3::Ipv4Address', 'dst', default_value='ns3::Ipv4Address()'), param('uint16_t', 'ackId', default_value='0'), param('uint8_t', 'segs', default_value='0'), param('ns3::Time', 'exp', default_value='ns3::Simulator::Now()')])
cls.add_method('GetAckId', 'uint16_t', [], is_const=True)
cls.add_method('GetDst', 'ns3::Ipv4Address', [], is_const=True)
cls.add_method('GetExpireTime', 'ns3::Time', [], is_const=True)
cls.add_method('GetNextHop', 'ns3::Ipv4Address', [], is_const=True)
cls.add_method('GetOurAdd', 'ns3::Ipv4Address', [], is_const=True)
cls.add_method('GetPacket', 'ns3::Ptr< ns3::Packet const >', [], is_const=True)
cls.add_method('GetSegsLeft', 'uint8_t', [], is_const=True)
cls.add_method('GetSrc', 'ns3::Ipv4Address', [], is_const=True)
cls.add_method('SetAckId', 'void', [param('uint16_t', 'ackId')])
cls.add_method('SetDst', 'void', [param('ns3::Ipv4Address', 'n')])
cls.add_method('SetExpireTime', 'void', [param('ns3::Time', 'exp')])
cls.add_method('SetNextHop', 'void', [param('ns3::Ipv4Address', 'n')])
cls.add_method('SetOurAdd', 'void', [param('ns3::Ipv4Address', 'us')])
cls.add_method('SetPacket', 'void', [param('ns3::Ptr< ns3::Packet const >', 'p')])
cls.add_method('SetSegsLeft', 'void', [param('uint8_t', 'segs')])
cls.add_method('SetSrc', 'void', [param('ns3::Ipv4Address', 's')])
return |
def keypoint_rcnn(model):
logger.warn('Deprecated: use `MODEL.TYPE: generalized_rcnn` with `MODEL.KEYPOINTS_ON: True`')
return generalized_rcnn(model) |
class Attention(nn.Module):
def __init__(self, input_dim, hidden_dim, attn_channel, kernel_size):
super(Attention, self).__init__()
self.kernel_size = kernel_size
self.padding = (kernel_size // 2)
self.H = nn.Conv2d(in_channels=hidden_dim, out_channels=attn_channel, kernel_size=kernel_size, padding=self.padding, bias=False)
self.W = nn.Conv2d(in_channels=input_dim, out_channels=attn_channel, kernel_size=kernel_size, padding=self.padding, bias=False)
self.V = nn.Conv2d(in_channels=attn_channel, out_channels=1, kernel_size=kernel_size, padding=self.padding, bias=False)
def forward(self, input_tensor, hidden):
hid_conv_out = self.H(hidden[0])
in_conv_out = self.W(input_tensor)
energy = self.V((hid_conv_out + in_conv_out).tanh())
return energy |
def main():
data = load_pickle(args.data_path)
query_cam = data['query_cam']
query_label = data['query_label']
gallery_cam = data['gallery_cam']
gallery_label = data['gallery_label']
gallery_feature = torch.FloatTensor(data['gallery_f'])
query_feature = torch.FloatTensor(data['query_f'])
query_feature = query_feature.cuda()
gallery_feature = gallery_feature.cuda()
indices = gnn_reranking(query_feature, gallery_feature, args.k1, args.k2)
evaluate_ranking_list(indices, query_label, query_cam, gallery_label, gallery_cam) |
((not workspace.C.has_mkldnn), 'Skipping as we do not have mkldnn.')
class MKLReluTest(hu.HypothesisTestCase):
(size=st.integers(8, 20), input_channels=st.integers(1, 3), batch_size=st.integers(1, 3), inplace=st.booleans(), **mu.gcs)
def test_mkl_relu(self, size, input_channels, batch_size, inplace, gc, dc):
op = core.CreateOperator('Relu', ['X'], (['Y'] if (not inplace) else ['X']))
X = (np.random.rand(batch_size, input_channels, size, size).astype(np.float32) - 0.5)
self.assertDeviceChecks(dc, op, [X], [0]) |
def dag2pag(dag, islatent):
udg = nx.Graph()
nodes = dag.get_nodes()
nodes_ids = {node: i for (i, node) in enumerate(nodes)}
n = len(nodes)
for (x, y) in combinations(range(n), 2):
if dag.get_edge(nodes[x], nodes[y]):
udg.add_edge(x, y)
observed_nodes = list((set(nodes) - set(islatent)))
PAG = GeneralGraph(observed_nodes)
for (nodex, nodey) in combinations(observed_nodes, 2):
edge = Edge(nodex, nodey, Endpoint.CIRCLE, Endpoint.CIRCLE)
edge.set_endpoint1(Endpoint.CIRCLE)
edge.set_endpoint2(Endpoint.CIRCLE)
PAG.add_edge(edge)
sepset = {(nodex, nodey): set() for (nodex, nodey) in permutations(observed_nodes, 2)}
for (nodex, nodey) in combinations(observed_nodes, 2):
if (nodex in islatent):
continue
if (nodey in islatent):
continue
all_paths = nx.all_simple_paths(udg, nodes_ids[nodex], nodes_ids[nodey])
noncolider_path = []
is_connected = False
for path in all_paths:
path_sep = True
has_nonlatent = False
for i in range(1, (len(path) - 1)):
if (nodes[path[i]] in observed_nodes):
has_nonlatent = True
has_collider = (is_endpoint(dag.get_edge(nodes[path[(i - 1)]], nodes[path[i]]), nodes[path[i]], Endpoint.ARROW) and is_endpoint(dag.get_edge(nodes[path[(i + 1)]], nodes[path[i]]), nodes[path[i]], Endpoint.ARROW))
if has_collider:
path_sep = False
if (not path_sep):
continue
if has_nonlatent:
noncolider_path.append(path)
else:
is_connected = True
break
if (not is_connected):
edge = PAG.get_edge(nodex, nodey)
if edge:
PAG.remove_edge(edge)
for path in noncolider_path:
for i in range(1, (len(path) - 1)):
if (nodes[path[i]] in islatent):
continue
sepset[(nodex, nodey)] |= {nodes[path[i]]}
sepset[(nodey, nodex)] |= {nodes[path[i]]}
for (nodex, nodey) in combinations(observed_nodes, 2):
if PAG.get_edge(nodex, nodey):
continue
for nodez in observed_nodes:
if (nodez == nodex):
continue
if (nodez == nodey):
continue
if (nodez not in sepset[(nodex, nodey)]):
edge_xz = PAG.get_edge(nodex, nodez)
edge_yz = PAG.get_edge(nodey, nodez)
if (edge_xz and edge_yz):
PAG.remove_edge(edge_xz)
mod_endpoint(edge_xz, nodez, Endpoint.ARROW)
PAG.add_edge(edge_xz)
PAG.remove_edge(edge_yz)
mod_endpoint(edge_yz, nodez, Endpoint.ARROW)
PAG.add_edge(edge_yz)
changeFlag = True
while changeFlag:
changeFlag = False
changeFlag = rulesR1R2cycle(PAG, None, changeFlag, False)
changeFlag = ruleR3(PAG, sepset, None, changeFlag, False)
return PAG |
def list_s3_objects(bucket, name):
s3_client = boto3.client('s3', aws_access_key_id=access_key, aws_secret_access_key=secret_key)
res = s3_client.list_objects(Bucket=bucket, Prefix=name, MaxKeys=1)
return res |
def make_beta_schedule(schedule, n_timestep, linear_start=0.0001, linear_end=0.02, cosine_s=0.008):
if (schedule == 'linear'):
betas = (torch.linspace((linear_start ** 0.5), (linear_end ** 0.5), n_timestep, dtype=torch.float64) ** 2)
elif (schedule == 'cosine'):
timesteps = ((torch.arange((n_timestep + 1), dtype=torch.float64) / n_timestep) + cosine_s)
alphas = (((timesteps / (1 + cosine_s)) * np.pi) / 2)
alphas = torch.cos(alphas).pow(2)
alphas = (alphas / alphas[0])
betas = (1 - (alphas[1:] / alphas[:(- 1)]))
betas = np.clip(betas, a_min=0, a_max=0.999)
elif (schedule == 'sqrt_linear'):
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
elif (schedule == 'sqrt'):
betas = (torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5)
else:
raise ValueError(f"schedule '{schedule}' unknown.")
return betas.numpy() |
def __getattr__(name):
if (name == 'load_boston'):
msg = textwrap.dedent('\n `load_boston` has been removed from scikit-learn since version 1.2.\n\n The Boston housing prices dataset has an ethical problem: as\n investigated in [1], the authors of this dataset engineered a\n non-invertible variable "B" assuming that racial self-segregation had a\n positive impact on house prices [2]. Furthermore the goal of the\n research that led to the creation of this dataset was to study the\n impact of air quality but it did not give adequate demonstration of the\n validity of this assumption.\n\n The scikit-learn maintainers therefore strongly discourage the use of\n this dataset unless the purpose of the code is to study and educate\n about ethical issues in data science and machine learning.\n\n In this special case, you can fetch the dataset from the original\n source::\n\n import pandas as pd\n import numpy as np\n\n data_url = " raw_df = pd.read_csv(data_url, sep="\\s+", skiprows=22, header=None)\n data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])\n target = raw_df.values[1::2, 2]\n\n Alternative datasets include the California housing dataset and the\n Ames housing dataset. You can load the datasets as follows::\n\n from sklearn.datasets import fetch_california_housing\n housing = fetch_california_housing()\n\n for the California housing dataset and::\n\n from sklearn.datasets import fetch_openml\n housing = fetch_openml(name="house_prices", as_frame=True)\n\n for the Ames housing dataset.\n\n [1] M Carlisle.\n "Racist data destruction?"\n < [2] Harrison Jr, David, and Daniel L. Rubinfeld.\n "Hedonic housing prices and the demand for clean air."\n Journal of environmental economics and management 5.1 (1978): 81-102.\n < ')
raise ImportError(msg)
try:
return globals()[name]
except KeyError:
raise AttributeError |
class model():
def __init__(self, curr_param):
self.sess = tf.Session()
self.training_step = 0
self.par = curr_param
def load_data(self, mode):
if (mode == 'train'):
file_name = self.par.train_file
self.training_data = []
else:
file_name = self.par.test_file
self.test_data = []
with open(file_name) as f:
scans = f.readlines()
scans = [s.rstrip() for s in scans]
if (mode == 'train'):
self.training_scans = scans
else:
self.test_scans = scans
scans = [os.path.join(self.par.pre_output_dir, s.rstrip()) for s in scans]
cnt = 0
for s_path in scans:
if (mode == 'train'):
rot = random.randint(0, (self.par.num_rotations - 1))
else:
rot = 0
s = ScanData()
s.load(os.path.join(s_path, str(rot)), self.par.num_scales)
s.remap_depth(vmin=(- self.par.conv_rad[0]), vmax=self.par.conv_rad[0])
s.remap_normals()
if (mode == 'train'):
self.training_data.append(s)
else:
self.test_data.append(s)
cnt += 1
def precompute_validation_batches(self):
self.validation_batches = []
for test_scan in self.test_data:
if (self.par.data_sampling_type == 'part'):
batch_array = get_batch_array(test_scan, self.par)
for b in batch_array:
if (np.shape(b.colors[0])[0] <= self.par.batch_size):
self.validation_batches.append(b)
else:
b = get_batch_from_full_scan(test_scan, self.par.num_scales, self.par.d_par.class_weights)
if (np.shape(b.colors[0])[0] <= self.par.batch_size):
self.validation_batches.append(b)
def get_training_batch(self, iter_num):
if (self.par.data_sampling_type == 'full'):
num_train_scans = len(self.training_data)
scan_num = (iter_num % num_train_scans)
return get_batch_from_full_scan(self.training_data[scan_num], self.par.num_scales, self.par.d_par.class_weights)
else:
scan_num = (iter_num % self.par.batch_array_size)
if (scan_num == 0):
random_scan = random.randint(0, (len(self.training_data) - 1))
self.tr_batch_array = get_batch_array(self.training_data[random_scan], self.par)
return self.tr_batch_array[scan_num]
def get_feed_dict(self, b):
bs = self.par.batch_size
mask1 = get_pooling_mask(b.pool_ind[1])
mask2 = get_pooling_mask(b.pool_ind[2])
ret_dict = {self.c1_ind: expand_dim_to_batch2(b.conv_ind[0], bs), self.c2_ind: expand_dim_to_batch2(b.conv_ind[1], (bs // 2)), self.c3_ind: expand_dim_to_batch2(b.conv_ind[2], (bs // 4)), self.p12_ind: expand_dim_to_batch2(b.pool_ind[1], (bs // 2)), self.p12_mask: expand_dim_to_batch2(mask1, (bs // 2), dummy_val=0), self.p23_ind: expand_dim_to_batch2(b.pool_ind[2], (bs // 4)), self.p23_mask: expand_dim_to_batch2(mask2, (bs // 4), dummy_val=0), self.label: expand_dim_to_batch1(b.labels[0], bs), self.loss_weight: expand_dim_to_batch1(b.loss_weights[0], bs)}
if ('d' in self.par.input_type):
ret_dict.update({self.input_depth1: expand_dim_to_batch2(b.depth[0].T, bs)})
ret_dict.update({self.input_depth2: expand_dim_to_batch2(b.depth[1].T, (bs // 2))})
ret_dict.update({self.input_depth3: expand_dim_to_batch2(b.depth[2].T, (bs // 4))})
if ('n' in self.par.input_type):
ret_dict.update({self.input_normals: expand_dim_to_batch2(b.normals[0], bs)})
if ('h' in self.par.input_type):
ret_dict.update({self.input_h: expand_dim_to_batch2(b.height[0], bs)})
if ('c' in self.par.input_type):
ret_dict.update({self.input_colors: expand_dim_to_batch2(b.colors[0], bs)})
return ret_dict
def build_model(self, batch_size):
self.best_accuracy = 0.0
fs = self.par.filter_size
bs = batch_size
num_input_ch = 0
input_list = []
if ('d' in self.par.input_type):
num_input_ch += 1
self.input_depth1 = tf.placeholder(tf.float32, [bs, (fs * fs)])
self.input_depth2 = tf.placeholder(tf.float32, [(bs // 2), (fs * fs)])
self.input_depth3 = tf.placeholder(tf.float32, [(bs // 4), (fs * fs)])
if ('n' in self.par.input_type):
num_input_ch += 3
self.input_normals = tf.placeholder(tf.float32, [bs, 3])
input_list.append(self.input_normals)
if ('h' in self.par.input_type):
num_input_ch += 1
self.input_h = tf.placeholder(tf.float32, [bs, 1])
input_list.append(self.input_h)
if ('c' in self.par.input_type):
num_input_ch += 3
self.input_colors = tf.placeholder(tf.float32, [bs, 3])
input_list.append(self.input_colors)
self.c1_ind = tf.placeholder(tf.int32, [bs, (fs * fs)])
self.p12_ind = tf.placeholder(tf.int32, [(bs // 2), 8])
self.p12_mask = tf.placeholder(tf.float32, [(bs // 2), 8])
self.c2_ind = tf.placeholder(tf.int32, [(bs // 2), (fs * fs)])
self.p23_ind = tf.placeholder(tf.int32, [(bs // 4), 8])
self.p23_mask = tf.placeholder(tf.float32, [(bs // 4), 8])
self.c3_ind = tf.placeholder(tf.int32, [(bs // 4), (fs * fs)])
self.label = tf.placeholder(tf.int32, [bs])
self.loss_weight = tf.placeholder(tf.float32, [bs])
label_mask = tf.cast(self.label, tf.bool)
shape_unpool2 = tf.constant([(bs // 2), 64])
shape_unpool1 = tf.constant([bs, 32])
if ('d' in self.par.input_type):
if (num_input_ch > 1):
signal_input = tf.concat(input_list, axis=1)
h_conv1 = lrelu(point_conv('conv1', signal_input, self.c1_ind, (fs * fs), num_input_ch, 32, extra_chan=self.input_depth1))
else:
signal_input = tf.expand_dims(tf.expand_dims(self.input_depth1, axis=2), axis=0)
h_conv1 = lrelu(conv_2d_layer('conv1', signal_input, 1, 32, 1, (fs * fs), 1, 1, padding='VALID'))
else:
signal_input = tf.concat(input_list, axis=1)
h_conv1 = lrelu(point_conv('conv1', signal_input, self.c1_ind, (fs * fs), num_input_ch, 32))
h_conv1 = tf.squeeze(h_conv1)
h_conv11 = lrelu(point_conv('conv11', h_conv1, self.c1_ind, (fs * fs), 32, 32))
h_pool1 = point_pool(h_conv11, self.p12_ind, self.p12_mask)
if ('d' in self.par.input_type):
h_conv2 = lrelu(point_conv('conv2', h_pool1, self.c2_ind, (fs * fs), 33, 64, extra_chan=self.input_depth2))
else:
h_conv2 = lrelu(point_conv('conv2', h_pool1, self.c2_ind, (fs * fs), 32, 64))
h_conv22 = lrelu(point_conv('conv22', h_conv2, self.c2_ind, (fs * fs), 64, 64))
h_pool2 = point_pool(h_conv22, self.p23_ind, self.p23_mask)
if ('d' in self.par.input_type):
h_conv3 = lrelu(point_conv('conv3', h_pool2, self.c3_ind, (fs * fs), 65, 128, extra_chan=self.input_depth3))
else:
h_conv3 = lrelu(point_conv('conv3', h_pool2, self.c3_ind, (fs * fs), 64, 128))
h_conv33 = lrelu(point_conv('conv33', h_conv3, self.c3_ind, (fs * fs), 128, 64))
h_unpool2 = point_unpool(h_conv33, self.p23_ind, shape_unpool2)
uconv2_in = tf.concat([h_conv22, h_unpool2], axis=1)
h_uconv2 = lrelu(point_conv('uconv2', uconv2_in, self.c2_ind, (fs * fs), 128, 64))
h_uconv22 = lrelu(point_conv('uconv22', h_uconv2, self.c2_ind, (fs * fs), 64, 32))
h_unpool1 = point_unpool(h_uconv22, self.p12_ind, shape_unpool1)
uconv1_in = tf.concat([h_conv11, h_unpool1], axis=1)
h_uconv1 = lrelu(point_conv('uconv1', uconv1_in, self.c1_ind, (fs * fs), 64, 32))
h_uconv11 = tf.squeeze(point_conv('uconv11', h_uconv1, self.c1_ind, (fs * fs), 32, 32))
pred_input = tf.expand_dims(tf.expand_dims(h_uconv11, axis=1), axis=0)
h_pred = tf.squeeze(conv_2d_layer('pred1', pred_input, 32, self.par.d_par.num_classes, 1, 1, 1, 1))
self.output = tf.argmax(h_pred, axis=1, output_type=tf.int32)
masked_output = tf.boolean_mask(h_pred, label_mask)
masked_label = tf.boolean_mask(self.label, label_mask)
masked_weights = tf.boolean_mask(self.loss_weight, label_mask)
tr_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, '')
self.loss = tf.reduce_mean(tf.multiply(masked_weights, tf.nn.sparse_softmax_cross_entropy_with_logits(labels=masked_label, logits=masked_output)))
self.train_step = tf.train.AdamOptimizer(0.0001).minimize(self.loss)
correct_prediction = tf.equal(tf.argmax(masked_output, axis=1, output_type=tf.int32), masked_label)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self.test_loss_placeholder = tf.placeholder(dtype=tf.float32, shape=[])
self.test_loss_summary = tf.summary.scalar('accuracy', self.test_loss_placeholder)
self.train_loss_placeholder = tf.placeholder(dtype=tf.float32, shape=[])
self.train_loss_summary = tf.summary.scalar('train_loss', self.train_loss_placeholder)
curr_time = strftime('%Y-%m-%d %H:%M:%S', gmtime())
self.writer = tf.summary.FileWriter(os.path.join(self.par.log_dir, curr_time))
self.saver = tf.train.Saver(tr_var, max_to_keep=self.par.max_snapshots)
def initialize_model(self):
self.sess.run(tf.global_variables_initializer())
def save_snapshot(self):
self.saver.save(self.sess, os.path.join(self.par.snapshot_dir, 'model'), global_step=self.training_step)
def load_snapshot(self):
snapshot_name = tf.train.latest_checkpoint(self.par.snapshot_dir)
if (snapshot_name is not None):
model_file_name = os.path.basename(snapshot_name)
print(('Loading snapshot ' + model_file_name))
itn = int(model_file_name.split('-')[1])
self.training_step = itn
self.saver.restore(self.sess, snapshot_name)
def train(self):
bs = self.par.batch_size
for iter_i in range(self.training_step, self.par.max_iter_count):
if ((iter_i > 0) and ((iter_i % self.par.reload_iter) == 0)):
self.load_data('train')
if ((iter_i % self.par.test_iter) == 0):
self.validate(iter_i)
b = self.get_training_batch(iter_i)
if (b.num_points() > bs):
continue
out = self.sess.run([self.train_step, self.loss, self.output], feed_dict=self.get_feed_dict(b))
print(((str(iter_i) + ' : ') + str(out[1])))
summary = self.sess.run(self.train_loss_summary, feed_dict={self.train_loss_placeholder: out[1]})
self.writer.add_summary(summary, iter_i)
def validate(self, step):
pixel_count = 0
acc = []
pix = []
bs = self.par.batch_size
for b in self.validation_batches:
out = self.sess.run([self.accuracy, self.output], feed_dict=self.get_feed_dict(b))
valid_out = np.multiply(out[1], np.asarray(expand_dim_to_batch1(b.labels[0], bs), dtype=bool))
acc.append(out[0])
pix.append(np.count_nonzero(b.labels[0]))
pixel_count += np.count_nonzero(b.labels[0])
avg_acc = 0.0
for i in range(0, len(acc)):
avg_acc += ((acc[i] * pix[i]) / pixel_count)
print(('Accuracy: ' + str(avg_acc)))
if (avg_acc > self.best_accuracy):
self.best_accuracy = avg_acc
self.save_snapshot()
summary = self.sess.run(self.test_loss_summary, feed_dict={self.test_loss_placeholder: avg_acc})
self.writer.add_summary(summary, step)
def test(self):
scan_id = 0
cs = self.par.cube_size
print('Testing...')
for val_scan in self.test_data:
if (self.par.data_sampling_type == 'full'):
scan_batches = [get_batch_from_full_scan(val_scan, self.par.num_scales, self.par.d_par.class_weights)]
else:
global scan
min_bound = (val_scan.clouds[0].get_min_bound() - (cs[0] * 0.5))
max_bound = (val_scan.clouds[0].get_max_bound() + (cs[0] * 0.5))
scan = val_scan
rad = self.par.valid_rad
points = []
x_s = (min_bound[0] + (rad / 2.0))
while (x_s < (max_bound[0] - rad)):
y_s = (min_bound[1] + (rad / 2.0))
while (y_s < (max_bound[1] - rad)):
z_s = (min_bound[2] + (rad / 2.0))
while (z_s < (max_bound[2] - rad)):
if val_scan.has_points([x_s, y_s, z_s], rad):
points.append([x_s, y_s, z_s])
z_s += rad
y_s += rad
x_s += rad
arr_size = len(points)
print(('Number of test batches: ' + str(arr_size)))
print('Loading batches...')
scan_batches = get_batch_array(val_scan, self.par, points)
print('Done.')
for b in scan_batches:
out = self.sess.run(self.output, feed_dict=self.get_feed_dict(b))
valid_out = np.multiply(out, np.asarray(expand_dim_to_batch1(b.labels[0], self.par.batch_size), dtype=bool))
if (self.par.data_sampling_type == 'full'):
val_scan.assign_labels(valid_out)
else:
val_scan.assign_labels_part(valid_out, b.index_maps[0])
make_dir(os.path.join(self.par.output_dir, self.test_scans[scan_id]))
val_scan.save(os.path.join(self.par.output_dir, self.test_scans[scan_id]))
print(self.test_scans[scan_id])
scan_id += 1 |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--train_corpus', default=None, type=str, required=True, help='The input train corpus, each line contains numbers that are the roberta tokenized indices.')
parser.add_argument('--train_eval_corpus', default=None, type=str, required=False, help='The input train eval corpus, each line contains numbers that are the roberta tokenized indices.')
parser.add_argument('--eval_corpus', default=None, type=str, required=False, help='The input eval corpus, each line contains numbers that are the roberta tokenized indices.')
parser.add_argument('--bert_model', default=None, type=str, required=True, help='Bert pre-trained model selected in the list: roberta-base, roberta-base, roberta-large')
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints will be written.')
parser.add_argument('--max_seq_length', default=208, type=int, help='The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, and sequences shorter \nthan this will be padded.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run evaluation.')
parser.add_argument('--mlm_loss', action='store_true', help='Whether to add mlm loss.')
parser.add_argument('--concat_tabcol', action='store_true', help='Whether to concatenate table and column representations.')
parser.add_argument('--train_batch_size', default=48, type=int, help='Total batch size for training.')
parser.add_argument('--learning_rate', default=1e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--num_train_epochs', default=1000.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.')
parser.add_argument('--no_cuda', action='store_true', help='Whether not to use CUDA when available')
parser.add_argument('--on_memory', action='store_true', help='Whether to load train samples into memory or use disk')
parser.add_argument('--do_lower_case', action='store_true', help='Whether to lower case the input text. True for uncased models, False for cased models.')
parser.add_argument('--local_rank', type=int, default=(- 1), help='local_rank for distributed training on gpus')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumualte before performing a backward/update pass.')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit float precision instead of 32-bit')
parser.add_argument('--loss_scale', type=float, default=0, help='Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n0 (default value): dynamic loss scaling.\nPositive power of 2: static loss scaling value.\n')
args = parser.parse_args()
wandb.init(project='column_roberta', name=args.output_dir)
if ((args.local_rank == (- 1)) or args.no_cuda):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(init_method='env://', backend='nccl')
print('\n=====Using device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}'.format(device, n_gpu, bool((args.local_rank != (- 1))), args.fp16))
if (args.gradient_accumulation_steps < 1):
raise ValueError('Invalid gradient_accumulation_steps parameter: {}, should be >= 1'.format(args.gradient_accumulation_steps))
args.train_batch_size = (args.train_batch_size // args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if (n_gpu > 0):
torch.cuda.manual_seed_all(args.seed)
if (not args.do_train):
raise ValueError('Training is currently the only implemented execution option. Please set `do_train`.')
tokenizer = RobertaTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
num_train_optimization_steps = None
if args.do_train:
print('Loading Train Dataset', args.train_corpus)
train_dataset = BERTDataset(args.train_corpus, tokenizer, seq_len=args.max_seq_length, corpus_lines=None, on_memory=args.on_memory, mlm_loss=args.mlm_loss)
num_train_optimization_steps = (int(((len(train_dataset) / args.train_batch_size) / args.gradient_accumulation_steps)) * args.num_train_epochs)
if (args.local_rank != (- 1)):
num_train_optimization_steps = (num_train_optimization_steps // torch.distributed.get_world_size())
if args.do_eval:
print('Loading Eval Dataset', args.eval_corpus)
eval_dataset = BERTDataset(args.eval_corpus, tokenizer, seq_len=args.max_seq_length, corpus_lines=None, on_memory=args.on_memory)
train_eval_dataset = BERTDataset(args.train_eval_corpus, tokenizer, seq_len=args.max_seq_length, corpus_lines=None, on_memory=args.on_memory)
model = RobertaForMaskedLM.from_pretrained(args.bert_model)
if args.fp16:
model.half()
model.to(device)
if (args.local_rank != (- 1)):
model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, broadcast_buffers=True, find_unused_parameters=True)
elif (n_gpu > 1):
model = torch.nn.DataParallel(model)
wandb.watch(model)
if args.do_train:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError('Please install apex from to use distributed and fp16 training.')
optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0)
if (args.loss_scale == 0):
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=num_train_optimization_steps)
global_step = 0
if args.do_train:
print('***** Running training *****')
print(' Num examples:', len(train_dataset))
print(' Batch size:', args.train_batch_size)
print(' Num steps:', num_train_optimization_steps)
if (args.local_rank == (- 1)):
train_sampler = RandomSampler(train_dataset)
else:
train_sampler = DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.do_eval:
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.train_batch_size)
train_eval_sampler = SequentialSampler(train_eval_dataset)
train_eval_dataloader = DataLoader(train_eval_dataset, sampler=train_eval_sampler, batch_size=args.train_batch_size)
best_acc = 0
for epoch in range(int(args.num_train_epochs)):
tr_loss = 0
(nb_tr_examples, nb_tr_steps) = (0, 0)
total_steps = len(train_dataloader)
step_check = (int((total_steps * 0.1)) if (int((total_steps * 0.1)) != 0) else 1)
save_check = (int((total_steps * 0.5)) if (int((total_steps * 0.5)) != 0) else 1)
print('\nEpoch: ', epoch)
for (step, batch) in enumerate(train_dataloader):
model.train()
(input_ids, input_mask, col_label_ids, lm_label_ids, q_tab_inds) = batch
(input_ids, input_mask, col_label_ids, lm_label_ids, q_tab_inds) = to_device(device, input_ids, input_mask, col_label_ids, lm_label_ids, q_tab_inds)
if (not args.concat_tabcol):
q_tab_inds = None
if args.mlm_loss:
loss = model(input_ids=input_ids, attention_mask=input_mask, masked_lm_labels=lm_label_ids, masked_col_labels=col_label_ids, q_tab_inds=q_tab_inds)
else:
loss = model(input_ids=input_ids, attention_mask=input_mask, masked_col_labels=col_label_ids, q_tab_inds=q_tab_inds)
if (n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
wandb.log({'batch_training_loss': loss.item()})
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if ((step % step_check) == 0):
print(('Finishing training for current epoch:\t' + str(round((step / total_steps), 3))))
if (((step + 1) % args.gradient_accumulation_steps) == 0):
optimizer.step()
scheduler.step()
optimizer.zero_grad()
global_step += 1
epoch_loss = (tr_loss / total_steps)
print(('Train epoch loss:\t' + str(epoch_loss)))
wandb.log({'epoch_training_loss': epoch_loss})
if (args.do_eval and (args.local_rank in [(- 1), 0])):
print('***** Running evaluation on train *****')
train_eval_loss = 0.0
train_preds = None
train_nb_eval_steps = 0
train_out_label_ids = None
for (step, batch) in enumerate(train_eval_dataloader):
model.eval()
(input_ids, input_mask, col_label_ids, _, q_tab_inds) = batch
(input_ids, input_mask, col_label_ids, q_tab_inds) = to_device(device, input_ids, input_mask, col_label_ids, q_tab_inds)
if (not args.concat_tabcol):
q_tab_inds = None
with torch.no_grad():
(tmp_eval_loss, logits) = model(input_ids=input_ids, attention_mask=input_mask, masked_col_labels=col_label_ids, q_tab_inds=q_tab_inds, is_train=False)
col_logits = logits[1]
train_eval_loss += tmp_eval_loss.mean().item()
train_nb_eval_steps += 1
if (train_preds is None):
train_preds = col_logits.detach().cpu().numpy()
train_out_label_ids = col_label_ids.detach().cpu().numpy()
else:
train_preds = np.append(train_preds, col_logits.detach().cpu().numpy(), axis=0)
train_out_label_ids = np.append(train_out_label_ids, col_label_ids.detach().cpu().numpy(), axis=0)
train_eval_loss = (train_eval_loss / train_nb_eval_steps)
train_preds = np.argmax(train_preds, axis=2)
train_cur_acc = simple_accuracy(train_preds, train_out_label_ids)
print(('Train eval epoch loss:\t' + str(train_eval_loss)))
print(('Train eval accuracy:\t' + str(train_cur_acc)))
wandb.log({'epoch_train_eval_loss': train_eval_loss})
wandb.log({'train_eval_accuracy': train_cur_acc})
print('***** Running evaluation on dev *****')
eval_loss = 0.0
preds = None
nb_eval_steps = 0
out_label_ids = None
for (step, batch) in enumerate(eval_dataloader):
model.eval()
(input_ids, input_mask, col_label_ids, _, q_tab_inds) = batch
(input_ids, input_mask, col_label_ids, q_tab_inds) = to_device(device, input_ids, input_mask, col_label_ids, q_tab_inds)
if (not args.concat_tabcol):
q_tab_inds = None
with torch.no_grad():
(tmp_eval_loss, logits) = model(input_ids=input_ids, attention_mask=input_mask, masked_col_labels=col_label_ids, q_tab_inds=q_tab_inds, is_train=False)
col_logits = logits[1]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if (preds is None):
preds = col_logits.detach().cpu().numpy()
out_label_ids = col_label_ids.detach().cpu().numpy()
else:
preds = np.append(preds, col_logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, col_label_ids.detach().cpu().numpy(), axis=0)
eval_loss = (eval_loss / nb_eval_steps)
preds = np.argmax(preds, axis=2)
cur_acc = simple_accuracy(preds, out_label_ids)
print(('Eval epoch loss:\t' + str(eval_loss)))
print(('Eval accuracy:\t' + str(cur_acc)))
wandb.log({'dev_eval_accuracy': cur_acc})
wandb.log({'epoch_dev_eval_loss': eval_loss})
if (cur_acc > best_acc):
best_acc = cur_acc
print(('** ** * Saving fine-tuned model for epoch ' + str(epoch)))
model_to_save = (model.module if hasattr(model, 'module') else model)
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
if args.do_train:
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
if ((epoch < 10) or ((epoch % 10) == 0)):
print(('** ** * Saving fine-tuned model for epoch ' + str(epoch)))
model_to_save = (model.module if hasattr(model, 'module') else model)
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME.replace('.bin', (('_' + str(epoch)) + '.bin')))
output_config_file = os.path.join(args.output_dir, CONFIG_NAME.replace('.json', (('_' + str(epoch)) + '.json')))
if args.do_train:
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir) |
def weights_init_kaiming(m):
classname = m.__class__.__name__
if (classname.find('Linear') != (- 1)):
nn.init.normal_(m.weight, 0, 0.01)
if (m.bias is not None):
nn.init.constant_(m.bias, 0.0)
elif (classname.find('Conv') != (- 1)):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias is not None):
nn.init.constant_(m.bias, 0.0)
elif (classname.find('BatchNorm') != (- 1)):
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0) |
class Mlp3Layer256UnitLongerTrainingDecreaseBatchSize(NeuralNetworkTrainingDecreaseBatchSize, Mlp3Layer256Unit):
pass |
class TestGradients(TestCase):
exact_dtype = True
def _get_safe_inplace(self, inplace_variant):
(inplace_variant)
def _fn(t, *args, **kwargs):
return inplace_variant(t.clone(), *args, **kwargs)
return _fn
def _check_helper(self, device, dtype, op, variant, check):
if (variant is None):
self.skipTest('Skipped! Variant not implemented.')
if (not op.supports_dtype(dtype, torch.device(device).type)):
self.skipTest(f'Skipped! {op.name} does not support dtype {str(dtype)}')
samples = op.sample_inputs(device, dtype, requires_grad=True)
for sample in samples:
partial_fn = partial(variant, **sample.kwargs)
if (check == 'gradcheck'):
self.assertTrue(gradcheck(partial_fn, ((sample.input,) + sample.args), check_grad_dtypes=True))
elif (check == 'gradgradcheck'):
self.assertTrue(gradgradcheck(partial_fn, ((sample.input,) + sample.args), gen_non_contig_grad_outputs=False, check_grad_dtypes=True))
self.assertTrue(gradgradcheck(partial_fn, ((sample.input,) + sample.args), gen_non_contig_grad_outputs=True, check_grad_dtypes=True))
else:
self.assertTrue(False, msg='Unknown check requested!')
def _grad_test_helper(self, device, dtype, op, variant):
return self._check_helper(device, dtype, op, variant, 'gradcheck')
def _gradgrad_test_helper(self, device, dtype, op, variant):
return self._check_helper(device, dtype, op, variant, 'gradgradcheck')
(torch.double)
(op_db)
def test_fn_grad(self, device, dtype, op):
self._grad_test_helper(device, dtype, op, op.get_op())
(torch.double)
(op_db)
def test_method_grad(self, device, dtype, op):
self._grad_test_helper(device, dtype, op, op.get_method())
(torch.double)
(op_db)
def test_inplace_grad(self, device, dtype, op):
if (not op.test_inplace_grad):
self.skipTest('Skipped! Inplace gradcheck marked to skip.')
self._grad_test_helper(device, dtype, op, self._get_safe_inplace(op.get_inplace()))
(torch.double)
(op_db)
def test_fn_gradgrad(self, device, dtype, op):
self._gradgrad_test_helper(device, dtype, op, op.get_op())
(torch.double)
(op_db)
def test_method_gradgrad(self, device, dtype, op):
self._gradgrad_test_helper(device, dtype, op, op.get_method())
(torch.double)
(op_db)
def test_inplace_gradgrad(self, device, dtype, op):
if (not op.test_inplace_grad):
self.skipTest('Skipped! Inplace gradgradcheck marked to skip.')
self._gradgrad_test_helper(device, dtype, op, self._get_safe_inplace(op.get_inplace())) |
def quantize_linear_modules(module, dtype=torch.int8):
warnings.warn('quantize_linear_modules function has been deprecated. Please use torch.quantization.quantize_dynamic API instead.')
reassign = {}
for (name, mod) in module.named_modules():
if (mod is module):
continue
new_mod = quantize_linear_modules(mod, dtype)
if (new_mod is not mod):
reassign[name] = new_mod
for (name, mod) in reassign.items():
setattr(module, name, mod)
if isinstance(module, torch.nn.Linear):
if (dtype == torch.int8):
return QuantizedLinear(module)
elif (dtype == torch.float16):
return QuantizedLinearFP16(module)
else:
raise RuntimeError('Unsupported dtype: {}'.format(dtype))
return module |
def env_desc_gen(**config):
env = MDPEnvironment(**config)
env_desc = {'creator': MDPEnvironment, 'possible_agents': env.possible_agents, 'action_spaces': env.action_spaces, 'observation_spaces': env.observation_spaces, 'config': config}
env.close()
return env_desc |
class SingleTaskSVGP(BaseGPSurrogate, SingleTaskVariationalGP):
def __init__(self, feature_dim, out_dim, num_inducing_points, encoder, noise_constraint=None, lengthscale_prior=None, outcome_transform=None, input_transform=None, learn_inducing_points=True, mll_beta=1.0, *args, **kwargs):
BaseGPSurrogate.__init__(self, *args, encoder=encoder, **kwargs)
self.num_inducing_points = num_inducing_points
if (out_dim == 1):
covar_module = kernels.MaternKernel(ard_num_dims=feature_dim, lengthscale_prior=lengthscale_prior)
covar_module.initialize(lengthscale=self.lengthscale_init)
likelihood = likelihoods.GaussianLikelihood(noise_constraint=noise_constraint)
likelihood.initialize(noise=self.task_noise_init)
else:
covar_module = kernels.MaternKernel(batch_shape=(out_dim,), ard_num_dims=feature_dim, lengthscale_prior=lengthscale_prior)
covar_module.initialize(lengthscale=self.lengthscale_init)
likelihood = likelihoods.MultitaskGaussianLikelihood(num_tasks=out_dim, has_global_noise=False, noise_constraint=noise_constraint)
likelihood.initialize(task_noises=self.task_noise_init)
dummy_X = (2 * (torch.rand(num_inducing_points, feature_dim).to(self.device, self.dtype) - 0.5))
dummy_Y = torch.randn(num_inducing_points, out_dim).to(self.device, self.dtype)
covar_module = (covar_module if (covar_module is None) else covar_module.to(self.device, self.dtype))
self.base_cls = SingleTaskVariationalGP
self.base_cls.__init__(self, dummy_X, dummy_Y, likelihood, out_dim, learn_inducing_points, covar_module=covar_module, inducing_points=dummy_X, outcome_transform=outcome_transform, input_transform=input_transform)
self.encoder = encoder.to(self.device, self.dtype)
self.mll_beta = mll_beta
def clear_cache(self):
clear_cache_hook(self)
clear_cache_hook(self.model)
clear_cache_hook(self.model.variational_strategy)
if hasattr(self.model.variational_strategy, 'base_variational_strategy'):
clear_cache_hook(self.model.variational_strategy.base_variational_strategy)
def forward(self, inputs):
features = (self.get_features(inputs, self.bs) if isinstance(inputs, np.ndarray) else inputs)
res = self.base_cls.forward(self, features)
return res
def posterior(self, inputs, output_indices=None, observation_noise=False, **kwargs):
self.clear_cache()
features = (self.get_features(inputs, self.bs) if isinstance(inputs, np.ndarray) else inputs)
return self.base_cls.posterior(self, features, output_indices, observation_noise, **kwargs)
def set_train_data(self, inputs=None, targets=None, strict=True):
self.clear_cache()
def fit(self, X_train, Y_train, X_val, Y_val, X_test, Y_test, reset=False, log_prefix='single_task_svgp', **kwargs):
if reset:
raise NotImplementedError
fit_kwargs = dict(surrogate=self, mll=VariationalELBO(self.likelihood, self.model, num_data=X_train.shape[0]), X_train=X_train, Y_train=Y_train, X_val=X_val, Y_val=Y_val, X_test=X_test, Y_test=Y_test, train_bs=self.bs, eval_bs=self.bs, shuffle_train=True, log_prefix=log_prefix)
fit_kwargs.update(kwargs)
return fit_gp_surrogate(**fit_kwargs)
def reshape_targets(self, targets):
if (targets.shape[(- 1)] > 1):
return targets
else:
return targets.squeeze((- 1)) |
def clip_grad_by_value_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes):
dy = grad_inputs[0]
x0 = inputs[0]
assert False, 'This function is not called since the function is the composite of other functions.' |
class StepOff(BaseVRMWaveform):
def __init__(self, t0=0.0):
self.t0 = t0
def t0(self):
return self._t0
.setter
def t0(self, value):
self._t0 = validate_float('t0', value)
def getCharDecay(self, fieldType, times):
fieldType = validate_string('fieldType', fieldType, ['dhdt', 'dbdt'])
if (self.t0 >= np.min(times)):
raise ValueError(('Earliest time channel must be after beginning of off-time (t0 = %.2e s)' % self.t0))
t0 = self.t0
if (fieldType == 'dbdt'):
mu0 = ((4 * np.pi) * 1e-07)
eta = ((- mu0) / (times - t0))
elif (fieldType == 'dhdt'):
eta = ((- 1) / (times - t0))
return eta
def getLogUniformDecay(self, fieldType, times, chi0, dchi, tau1, tau2):
fieldType = validate_string('fieldType', fieldType, ['dhdt', 'dbdt'])
nT = len(times)
nC = len(dchi)
t0 = self.t0
times = np.kron(np.ones((nC, 1)), times)
chi0 = np.kron(np.reshape(chi0, newshape=(nC, 1)), np.ones((1, nT)))
dchi = np.kron(np.reshape(dchi, newshape=(nC, 1)), np.ones((1, nT)))
tau1 = np.kron(np.reshape(tau1, newshape=(nC, 1)), np.ones((1, nT)))
tau2 = np.kron(np.reshape(tau2, newshape=(nC, 1)), np.ones((1, nT)))
if (fieldType == 'h'):
eta = (((0.5 * (1 - np.sign((times - t0)))) * chi0) + (((0.5 * (1 + np.sign((times - t0)))) * (dchi / np.log((tau2 / tau1)))) * (spec.expi(((- (times - t0)) / tau2)) - spec.expi(((- (times - t0)) / tau1)))))
elif (fieldType == 'b'):
mu0 = ((4 * np.pi) * 1e-07)
eta = (((0.5 * (1 - np.sign((times - t0)))) * chi0) + (((0.5 * (1 + np.sign((times - t0)))) * (dchi / np.log((tau2 / tau1)))) * (spec.expi(((- (times - t0)) / tau2)) - spec.expi(((- (times - t0)) / tau1)))))
eta = (mu0 * eta)
elif (fieldType == 'dhdt'):
eta = (0.0 + ((((0.5 * (1 + np.sign((times - t0)))) * (dchi / np.log((tau2 / tau1)))) * (np.exp(((- (times - t0)) / tau1)) - np.exp(((- (times - t0)) / tau2)))) / (times - t0)))
elif (fieldType == 'dbdt'):
mu0 = ((4 * np.pi) * 1e-07)
eta = (0.0 + ((((0.5 * (1 + np.sign((times - t0)))) * (dchi / np.log((tau2 / tau1)))) * (np.exp(((- (times - t0)) / tau1)) - np.exp(((- (times - t0)) / tau2)))) / (times - t0)))
eta = (mu0 * eta)
return eta |
_grad()
def concat_all_gather(tensor):
tensors_gather = [torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output |
def register(target: Target) -> Target:
from . import cli
global ALL_TARGETS
ALL_TARGETS += (target,)
cli.TARGETS_TYPE.choices += (target.__name__,)
return target |
def _frobenius_shift(K, generators, check_only=False):
if (len(generators) == 1):
return generators
p = K.characteristic()
n = K.degree()
compatible = {}
from .integer_mod import mod
for m in n.divisors():
compatible[m] = {}
for (q, x) in generators.items():
for m in (n // q).divisors():
compatible[m][q] = (x ** (((p ** (n // q)) - 1) // ((p ** m) - 1)))
if check_only:
for m in n.divisors():
try:
(q, x) = compatible[m].popitem()
except KeyError:
break
for (qq, xx) in compatible[m].items():
assert (x == xx)
return
crt = {}
qlist = sorted(generators.keys())
for j in range(1, len(qlist)):
for i in range(j):
crt[(i, j)] = []
for m in n.divisors():
mqlist = sorted(compatible[m].keys())
for k in range(1, len(mqlist)):
j = qlist.index(mqlist[k])
i = qlist.index(mqlist[(k - 1)])
crt[(i, j)].append(_find_pow_of_frobenius(p, m, compatible[m][qlist[j]], compatible[m][qlist[i]]))
for (i, j) in list(crt):
L = crt[(i, j)]
running = mod(0, 1)
for a in L:
running = _crt_non_coprime(running, a)
crt[(i, j)] = [(mod(running, (qq ** running.modulus().valuation(qq))), running.modulus().valuation(qq)) for qq in qlist]
crt[(j, i)] = [((- a), level) for (a, level) in crt[(i, j)]]
import bisect
frob_powers = [mod(0, 1) for _ in qlist]
def find_leveller(qindex, level, x, xleveled, searched, i):
searched[i] = True
crt_possibles = []
for j in range(1, len(qlist)):
if (i == j):
continue
if (crt[(i, j)][qindex][1] >= level):
if xleveled[j]:
return [j]
elif (j not in searched):
crt_possibles.append(j)
for j in crt_possibles:
path = find_leveller(qindex, level, x, xleveled, searched, j)
if (path is not None):
path.append(j)
return path
return None
def propagate_levelling(qindex, level, x, xleveled, i):
for j in range(1, len(qlist)):
if (i == j):
continue
if ((not xleveled[j]) and (crt[(i, j)][qindex][1] >= level)):
newxj = (x[i][0] + crt[(i, j)][qindex][0])
x[j] = (newxj, min(x[i][1], crt[(i, j)][qindex][1]))
xleveled[j] = True
propagate_levelling(qindex, level, x, xleveled, j)
for qindex in range(len(qlist)):
q = qlist[qindex]
x = ([0] + [crt[(0, j)][qindex] for j in range(1, len(qlist))])
levels = []
for j in range(2, len(qlist)):
for i in range(j):
if (i != 0):
assert (x[j][0] == (x[i][0] + crt[(i, j)][qindex][0]))
level = crt[(i, j)][qindex][1]
if (level > 0):
ins = bisect.bisect_left(levels, level)
if (ins == len(levels)):
levels.append(level)
elif (levels[ins] != level):
levels.insert(ins, level)
for level in levels:
xleveled = ([0] + [(x[i][1] >= level) for i in range(1, len(qlist))])
while True:
try:
i = xleveled.index(False, 1)
searched = {}
levelling_path = find_leveller(qindex, level, x, xleveled, searched, i)
if (levelling_path is None):
x[i] = (mod(x[i][0].lift(), (q ** level)), level)
xleveled[i] = True
propagate_levelling(qindex, level, x, xleveled, i)
else:
levelling_path.append(i)
for m in range(1, len(path)):
if (not xleveled[path[m]]):
newx = (x[path[(m - 1)]][0] + crt[(path[(m - 1)], path[m])][qindex][0])
x[path[m]] = (newx, min(x[path[(m - 1)]][1], crt[(path[(m - 1)], path[m])][qindex][1]))
xleveled[path[m]] = True
propagate_levelling(qindex, level, x, xleveled, path[m])
except ValueError:
break
for j in range(1, len(qlist)):
frob_powers[j] = frob_powers[j].crt(x[j][0])
for j in range(1, len(qlist)):
generators[qlist[j]] = (generators[qlist[j]] ** (p ** (- frob_powers[j]).lift()))
_frobenius_shift(K, generators, check_only=True) |
def changeContagion(G, A, i):
delta = 0
for u in G.neighbourIterator(i):
if (A[u] == 1):
delta += 1
return delta |
def download_a_url(dl_folder, url):
(url, filename) = get_downloaded_file(dl_folder, url)
if os.path.exists(filename):
print(f'{filename} has already been downloaded so skip')
return filename
print(f'downloading {url} to {filename}')
if (isinstance(url, list) or isinstance(url, tuple)):
download_parts_and_combine(dl_folder, url, filename)
else:
wget.download(url, filename, bar=bar_custom)
print(f'dowloaded: {filename}')
return filename |
def test_queryrequest4():
url = (brokerIp + '/ngsi10/queryContext')
headers = {'Content-Type': 'appliction/json'}
r = requests.post(url, data=json.dumps(data_ngsi10.subdata45), headers=headers)
assert (r.status_code == 200) |
class AggregatorGRPCClient():
def __init__(self, agg_addr, agg_port, tls, disable_client_auth, root_certificate, certificate, private_key, aggregator_uuid=None, federation_uuid=None, single_col_cert_common_name=None, **kwargs):
self.uri = f'{agg_addr}:{agg_port}'
self.tls = tls
self.disable_client_auth = disable_client_auth
self.root_certificate = root_certificate
self.certificate = certificate
self.private_key = private_key
self.logger = getLogger(__name__)
if (not self.tls):
self.logger.warn('gRPC is running on insecure channel with TLS disabled.')
self.channel = self.create_insecure_channel(self.uri)
else:
self.channel = self.create_tls_channel(self.uri, self.root_certificate, self.disable_client_auth, self.certificate, self.private_key)
self.header = None
self.aggregator_uuid = aggregator_uuid
self.federation_uuid = federation_uuid
self.single_col_cert_common_name = single_col_cert_common_name
self.interceptors = (RetryOnRpcErrorClientInterceptor(sleeping_policy=ConstantBackoff(logger=self.logger, reconnect_interval=int(kwargs.get('client_reconnect_interval', 1)), uri=self.uri), status_for_retry=(grpc.StatusCode.UNAVAILABLE,)),)
self.stub = aggregator_pb2_grpc.AggregatorStub(grpc.intercept_channel(self.channel, *self.interceptors))
def create_insecure_channel(self, uri):
return grpc.insecure_channel(uri, options=channel_options)
def create_tls_channel(self, uri, root_certificate, disable_client_auth, certificate, private_key):
with open(root_certificate, 'rb') as f:
root_certificate_b = f.read()
if disable_client_auth:
self.logger.warn('Client-side authentication is disabled.')
private_key_b = None
certificate_b = None
else:
with open(private_key, 'rb') as f:
private_key_b = f.read()
with open(certificate, 'rb') as f:
certificate_b = f.read()
credentials = grpc.ssl_channel_credentials(root_certificates=root_certificate_b, private_key=private_key_b, certificate_chain=certificate_b)
return grpc.secure_channel(uri, credentials, options=channel_options)
def _set_header(self, collaborator_name):
self.header = aggregator_pb2.MessageHeader(sender=collaborator_name, receiver=self.aggregator_uuid, federation_uuid=self.federation_uuid, single_col_cert_common_name=(self.single_col_cert_common_name or ''))
def validate_response(self, reply, collaborator_name):
check_equal(reply.header.receiver, collaborator_name, self.logger)
check_equal(reply.header.sender, self.aggregator_uuid, self.logger)
check_equal(reply.header.federation_uuid, self.federation_uuid, self.logger)
check_equal(reply.header.single_col_cert_common_name, (self.single_col_cert_common_name or ''), self.logger)
def disconnect(self):
self.logger.debug(f'Disconnecting from gRPC server at {self.uri}')
self.channel.close()
def reconnect(self):
self.disconnect()
if (not self.tls):
self.channel = self.create_insecure_channel(self.uri)
else:
self.channel = self.create_tls_channel(self.uri, self.root_certificate, self.disable_client_auth, self.certificate, self.private_key)
self.logger.debug(f'Connecting to gRPC at {self.uri}')
self.stub = aggregator_pb2_grpc.AggregatorStub(grpc.intercept_channel(self.channel, *self.interceptors))
_atomic_connection
_resend_data_on_reconnection
def get_tasks(self, collaborator_name):
self._set_header(collaborator_name)
request = aggregator_pb2.GetTasksRequest(header=self.header)
response = self.stub.GetTasks(request)
self.validate_response(response, collaborator_name)
return (response.tasks, response.round_number, response.sleep_time, response.quit)
_atomic_connection
_resend_data_on_reconnection
def get_aggregated_tensor(self, collaborator_name, tensor_name, round_number, report, tags, require_lossless):
self._set_header(collaborator_name)
request = aggregator_pb2.GetAggregatedTensorRequest(header=self.header, tensor_name=tensor_name, round_number=round_number, report=report, tags=tags, require_lossless=require_lossless)
response = self.stub.GetAggregatedTensor(request)
self.validate_response(response, collaborator_name)
return response.tensor
_atomic_connection
_resend_data_on_reconnection
def send_local_task_results(self, collaborator_name, round_number, task_name, data_size, named_tensors):
self._set_header(collaborator_name)
request = aggregator_pb2.TaskResults(header=self.header, round_number=round_number, task_name=task_name, data_size=data_size, tensors=named_tensors)
stream = []
stream += utils.proto_to_datastream(request, self.logger)
response = self.stub.SendLocalTaskResults(iter(stream))
self.validate_response(response, collaborator_name)
def _get_trained_model(self, experiment_name, model_type):
get_model_request = self.stub.GetTrainedModelRequest(experiment_name=experiment_name, model_type=model_type)
model_proto_response = self.stub.GetTrainedModel(get_model_request)
(tensor_dict, _) = utils.deconstruct_model_proto(model_proto_response.model_proto, NoCompressionPipeline())
return tensor_dict |
_warnings(category=sklearn.exceptions.ConvergenceWarning)
.filterwarnings('ignore:The SAMME.R algorithm')
.parametrize('name, Estimator', all_estimators())
def test_fit_docstring_attributes(name, Estimator):
pytest.importorskip('numpydoc')
from numpydoc import docscrape
doc = docscrape.ClassDoc(Estimator)
attributes = doc['Attributes']
if (Estimator.__name__ in ('HalvingRandomSearchCV', 'RandomizedSearchCV', 'HalvingGridSearchCV', 'GridSearchCV')):
est = _construct_searchcv_instance(Estimator)
elif (Estimator.__name__ in ('ColumnTransformer', 'Pipeline', 'FeatureUnion')):
est = _construct_compose_pipeline_instance(Estimator)
elif (Estimator.__name__ == 'SparseCoder'):
est = _construct_sparse_coder(Estimator)
else:
est = _construct_instance(Estimator)
if (Estimator.__name__ == 'SelectKBest'):
est.set_params(k=2)
elif (Estimator.__name__ == 'DummyClassifier'):
est.set_params(strategy='stratified')
elif ((Estimator.__name__ == 'CCA') or Estimator.__name__.startswith('PLS')):
est.set_params(n_components=1)
elif (Estimator.__name__ in ('GaussianRandomProjection', 'SparseRandomProjection')):
est.set_params(n_components=2)
elif (Estimator.__name__ == 'TSNE'):
est.set_params(perplexity=2)
if (Estimator.__name__ in ('LinearSVC', 'LinearSVR')):
est.set_params(dual='auto')
if (Estimator.__name__ in ('NMF', 'MiniBatchNMF')):
est.set_params(n_components='auto')
if (Estimator.__name__ == 'QuantileRegressor'):
solver = ('highs' if (sp_version >= parse_version('1.6.0')) else 'interior-point')
est.set_params(solver=solver)
if ('max_iter' in est.get_params()):
est.set_params(max_iter=2)
if ('random_state' in est.get_params()):
est.set_params(random_state=0)
skipped_attributes = {}
if Estimator.__name__.endswith('Vectorizer'):
if (Estimator.__name__ in ('CountVectorizer', 'HashingVectorizer', 'TfidfVectorizer')):
X = ['This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?']
elif (Estimator.__name__ == 'DictVectorizer'):
X = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
y = None
else:
(X, y) = make_classification(n_samples=20, n_features=3, n_redundant=0, n_classes=2, random_state=2)
y = _enforce_estimator_tags_y(est, y)
X = _enforce_estimator_tags_X(est, X)
if ('1dlabels' in est._get_tags()['X_types']):
est.fit(y)
elif ('2dlabels' in est._get_tags()['X_types']):
est.fit(np.c_[(y, y)])
elif ('3darray' in est._get_tags()['X_types']):
est.fit(X[(np.newaxis, ...)], y)
else:
est.fit(X, y)
for attr in attributes:
if (attr.name in skipped_attributes):
continue
desc = ' '.join(attr.desc).lower()
if ('only ' in desc):
continue
with ignore_warnings(category=FutureWarning):
assert hasattr(est, attr.name)
fit_attr = _get_all_fitted_attributes(est)
fit_attr_names = [attr.name for attr in attributes]
undocumented_attrs = set(fit_attr).difference(fit_attr_names)
undocumented_attrs = set(undocumented_attrs).difference(skipped_attributes)
if undocumented_attrs:
raise AssertionError(f'Undocumented attributes for {Estimator.__name__}: {undocumented_attrs}') |
class Decoder(nn.Module):
def __init__(self, x_dim, z_dim):
super(Decoder, self).__init__()
self.model = nn.Sequential(nn.Linear(z_dim, 512), nn.ReLU(), nn.Linear(512, x_dim))
def forward(self, z):
img = self.model(z)
return img |
class ImageGPTConfig(PretrainedConfig):
model_type = 'imagegpt'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'}
def __init__(self, vocab_size=(512 + 1), n_positions=(32 * 32), n_embd=512, n_layer=24, n_head=8, n_inner=None, activation_function='quick_gelu', resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, scale_attn_weights=True, use_cache=True, tie_word_embeddings=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False, **kwargs):
self.vocab_size = vocab_size
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.n_inner = n_inner
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.scale_attn_weights = scale_attn_weights
self.use_cache = use_cache
self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
self.reorder_and_upcast_attn = reorder_and_upcast_attn
self.tie_word_embeddings = tie_word_embeddings
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) |
class _fasterRCNN(nn.Module):
def __init__(self, classes, class_agnostic):
super(_fasterRCNN, self).__init__()
self.classes = classes
self.n_classes = len(classes)
self.class_agnostic = class_agnostic
self.RCNN_loss_cls = 0
self.RCNN_loss_bbox = 0
self.RCNN_rpn = _RPN(self.dout_base_model)
self.RCNN_proposal_target = _ProposalTargetLayer(self.n_classes)
self.RCNN_roi_pool = ROIPool((cfg.POOLING_SIZE, cfg.POOLING_SIZE), (1.0 / 16.0))
self.RCNN_roi_align = ROIAlign((cfg.POOLING_SIZE, cfg.POOLING_SIZE), (1.0 / 16.0), 0)
def forward(self, im_data, im_info, gt_boxes, num_boxes):
batch_size = im_data.size(0)
im_info = im_info.data
gt_boxes = gt_boxes.data
num_boxes = num_boxes.data
base_feat = self.RCNN_base(im_data)
(rois, rpn_loss_cls, rpn_loss_bbox) = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes)
if self.training:
roi_data = self.RCNN_proposal_target(rois, gt_boxes, num_boxes)
(rois, rois_label, rois_target, rois_inside_ws, rois_outside_ws) = roi_data
rois_label = Variable(rois_label.view((- 1)).long())
rois_target = Variable(rois_target.view((- 1), rois_target.size(2)))
rois_inside_ws = Variable(rois_inside_ws.view((- 1), rois_inside_ws.size(2)))
rois_outside_ws = Variable(rois_outside_ws.view((- 1), rois_outside_ws.size(2)))
else:
rois_label = None
rois_target = None
rois_inside_ws = None
rois_outside_ws = None
rpn_loss_cls = 0
rpn_loss_bbox = 0
rois = Variable(rois)
if (cfg.POOLING_MODE == 'align'):
pooled_feat = self.RCNN_roi_align(base_feat, rois.view((- 1), 5))
elif (cfg.POOLING_MODE == 'pool'):
pooled_feat = self.RCNN_roi_pool(base_feat, rois.view((- 1), 5))
pooled_feat = self._head_to_tail(pooled_feat)
bbox_pred = self.RCNN_bbox_pred(pooled_feat)
if (self.training and (not self.class_agnostic)):
bbox_pred_view = bbox_pred.view(bbox_pred.size(0), int((bbox_pred.size(1) / 4)), 4)
bbox_pred_select = torch.gather(bbox_pred_view, 1, rois_label.view(rois_label.size(0), 1, 1).expand(rois_label.size(0), 1, 4))
bbox_pred = bbox_pred_select.squeeze(1)
cls_score = self.RCNN_cls_score(pooled_feat)
cls_prob = F.softmax(cls_score, 1)
RCNN_loss_cls = 0
RCNN_loss_bbox = 0
if self.training:
RCNN_loss_cls = F.cross_entropy(cls_score, rois_label)
RCNN_loss_bbox = _smooth_l1_loss(bbox_pred, rois_target, rois_inside_ws, rois_outside_ws)
cls_prob = cls_prob.view(batch_size, rois.size(1), (- 1))
bbox_pred = bbox_pred.view(batch_size, rois.size(1), (- 1))
return (rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_bbox, RCNN_loss_cls, RCNN_loss_bbox, rois_label)
def _init_weights(self):
def normal_init(m, mean, stddev, truncated=False):
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean)
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
normal_init(self.RCNN_rpn.RPN_Conv, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_rpn.RPN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_rpn.RPN_bbox_pred, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_bbox_pred, 0, 0.001, cfg.TRAIN.TRUNCATED)
def create_architecture(self):
self._init_modules()
self._init_weights() |
class SawyerDoorEnvV2(SawyerXYZEnv):
def __init__(self):
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (0.0, 0.85, 0.15)
obj_high = (0.1, 0.95, 0.15)
goal_low = ((- 0.3), 0.4, 0.1499)
goal_high = ((- 0.2), 0.5, 0.1501)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_angle': np.array([0.3]), 'obj_init_pos': np.array([0.1, 0.95, 0.15]), 'hand_init_pos': np.array([0, 0.6, 0.2])}
self.goal = np.array([(- 0.2), 0.7, 0.15])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self.max_path_length = 150
self._random_reset_space = Box(np.array(obj_low), np.array(obj_high))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
self.door_angle_idx = self.model.get_joint_qpos_addr('doorjoint')
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_door_pull.xml')
_assert_task_is_set
def step(self, action):
ob = super().step(action)
(reward, reachDist, pullDist) = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {'reachDist': reachDist, 'goalDist': pullDist, 'epRew': reward, 'pickRew': None, 'success': float((pullDist <= 0.08))}
return (ob, reward, False, info)
def _get_pos_objects(self):
return self.data.get_geom_xpos('handle').copy()
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.copy()
qvel = self.data.qvel.copy()
qpos[self.door_angle_idx] = pos
qvel[self.door_angle_idx] = 0
self.set_state(qpos.flatten(), qvel.flatten())
def reset_model(self):
self._reset_hand()
self.objHeight = self.data.get_geom_xpos('handle')[2]
self.obj_init_pos = (self._get_state_rand_vec() if self.random_init else self.init_config['obj_init_pos'])
self._target_pos = (self.obj_init_pos + np.array([(- 0.3), (- 0.45), 0.0]))
self.sim.model.body_pos[self.model.body_name2id('door')] = self.obj_init_pos
self.sim.model.site_pos[self.model.site_name2id('goal')] = self._target_pos
self._set_obj_xyz(0)
self.maxPullDist = np.linalg.norm((self.data.get_geom_xpos('handle')[:(- 1)] - self._target_pos[:(- 1)]))
self.target_reward = ((1000 * self.maxPullDist) + (1000 * 2))
return self._get_obs()
def _reset_hand(self):
super()._reset_hand()
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
self.init_fingerCOM = ((rightFinger + leftFinger) / 2)
self.reachCompleted = False
def compute_reward(self, actions, obs):
del actions
objPos = obs[3:6]
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
fingerCOM = ((rightFinger + leftFinger) / 2)
pullGoal = self._target_pos
pullDist = np.linalg.norm((objPos[:(- 1)] - pullGoal[:(- 1)]))
reachDist = np.linalg.norm((objPos - fingerCOM))
reachRew = (- reachDist)
self.reachCompleted = (reachDist < 0.05)
def pullReward():
c1 = 1000
c2 = 0.01
c3 = 0.001
if self.reachCompleted:
pullRew = ((1000 * (self.maxPullDist - pullDist)) + (c1 * (np.exp(((- (pullDist ** 2)) / c2)) + np.exp(((- (pullDist ** 2)) / c3)))))
pullRew = max(pullRew, 0)
return pullRew
else:
return 0
pullRew = pullReward()
reward = (reachRew + pullRew)
return [reward, reachDist, pullDist] |
def hide_rename_eval_setting(eval_setting_name):
setting = m_repo.get_evaluation_setting(name=eval_setting_name, load_evaluations=True)
for e in m_repo.get_evaluations([x.uuid for x in setting.evaluations]):
m_repo.hide_evaluation(e.uuid)
new_name = (eval_setting_name + f'_hidden_{random.randint(0, 10000)}')
m_repo.rename_evaluation_setting(setting.uuid, new_name)
m_repo.hide_evaluation_setting(setting.uuid) |
def find_all(a_str, sub):
start = 0
while True:
start = a_str.find(sub, start)
if (start == (- 1)):
return
(yield start)
start += len(sub) |
def build_voxel_generator(voxel_config):
voxel_generator = VoxelGenerator(voxel_size=voxel_config.VOXEL_SIZE, point_cloud_range=voxel_config.RANGE, max_num_points=voxel_config.MAX_POINTS_NUM_PER_VOXEL, max_voxels=20000)
return voxel_generator |
class TestFromCTypes(object):
def check(ctype, dtype):
dtype = np.dtype(dtype)
assert_equal(np.dtype(ctype), dtype)
assert_equal(np.dtype(ctype()), dtype)
def test_array(self):
c8 = ctypes.c_uint8
self.check((3 * c8), (np.uint8, (3,)))
self.check((1 * c8), (np.uint8, (1,)))
self.check((0 * c8), (np.uint8, (0,)))
self.check((1 * (3 * c8)), ((np.uint8, (3,)), (1,)))
self.check((3 * (1 * c8)), ((np.uint8, (1,)), (3,)))
def test_padded_structure(self):
class PaddedStruct(ctypes.Structure):
_fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint16)]
expected = np.dtype([('a', np.uint8), ('b', np.uint16)], align=True)
self.check(PaddedStruct, expected)
def test_bit_fields(self):
class BitfieldStruct(ctypes.Structure):
_fields_ = [('a', ctypes.c_uint8, 7), ('b', ctypes.c_uint8, 1)]
assert_raises(TypeError, np.dtype, BitfieldStruct)
assert_raises(TypeError, np.dtype, BitfieldStruct())
def test_pointer(self):
p_uint8 = ctypes.POINTER(ctypes.c_uint8)
assert_raises(TypeError, np.dtype, p_uint8)
def test_void_pointer(self):
self.check(ctypes.c_void_p, np.uintp)
def test_union(self):
class Union(ctypes.Union):
_fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint16)]
expected = np.dtype(dict(names=['a', 'b'], formats=[np.uint8, np.uint16], offsets=[0, 0], itemsize=2))
self.check(Union, expected)
def test_union_with_struct_packed(self):
class Struct(ctypes.Structure):
_pack_ = 1
_fields_ = [('one', ctypes.c_uint8), ('two', ctypes.c_uint32)]
class Union(ctypes.Union):
_fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint16), ('c', ctypes.c_uint32), ('d', Struct)]
expected = np.dtype(dict(names=['a', 'b', 'c', 'd'], formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]], offsets=[0, 0, 0, 0], itemsize=ctypes.sizeof(Union)))
self.check(Union, expected)
def test_union_packed(self):
class Struct(ctypes.Structure):
_fields_ = [('one', ctypes.c_uint8), ('two', ctypes.c_uint32)]
_pack_ = 1
class Union(ctypes.Union):
_pack_ = 1
_fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint16), ('c', ctypes.c_uint32), ('d', Struct)]
expected = np.dtype(dict(names=['a', 'b', 'c', 'd'], formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]], offsets=[0, 0, 0, 0], itemsize=ctypes.sizeof(Union)))
self.check(Union, expected)
def test_packed_structure(self):
class PackedStructure(ctypes.Structure):
_pack_ = 1
_fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint16)]
expected = np.dtype([('a', np.uint8), ('b', np.uint16)])
self.check(PackedStructure, expected)
def test_large_packed_structure(self):
class PackedStructure(ctypes.Structure):
_pack_ = 2
_fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint16), ('c', ctypes.c_uint8), ('d', ctypes.c_uint16), ('e', ctypes.c_uint32), ('f', ctypes.c_uint32), ('g', ctypes.c_uint8)]
expected = np.dtype(dict(formats=[np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8], offsets=[0, 2, 4, 6, 8, 12, 16], names=['a', 'b', 'c', 'd', 'e', 'f', 'g'], itemsize=18))
self.check(PackedStructure, expected)
def test_big_endian_structure_packed(self):
class BigEndStruct(ctypes.BigEndianStructure):
_fields_ = [('one', ctypes.c_uint8), ('two', ctypes.c_uint32)]
_pack_ = 1
expected = np.dtype([('one', 'u1'), ('two', '>u4')])
self.check(BigEndStruct, expected)
def test_little_endian_structure_packed(self):
class LittleEndStruct(ctypes.LittleEndianStructure):
_fields_ = [('one', ctypes.c_uint8), ('two', ctypes.c_uint32)]
_pack_ = 1
expected = np.dtype([('one', 'u1'), ('two', '<u4')])
self.check(LittleEndStruct, expected)
def test_little_endian_structure(self):
class PaddedStruct(ctypes.LittleEndianStructure):
_fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint16)]
expected = np.dtype([('a', '<B'), ('b', '<H')], align=True)
self.check(PaddedStruct, expected)
def test_big_endian_structure(self):
class PaddedStruct(ctypes.BigEndianStructure):
_fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint16)]
expected = np.dtype([('a', '>B'), ('b', '>H')], align=True)
self.check(PaddedStruct, expected)
def test_simple_endian_types(self):
self.check(ctypes.c_uint16.__ctype_le__, np.dtype('<u2'))
self.check(ctypes.c_uint16.__ctype_be__, np.dtype('>u2'))
self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))
self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))
all_types = set(np.typecodes['All'])
all_pairs = permutations(all_types, 2)
.parametrize('pair', all_pairs)
def test_pairs(self, pair):
pair_type = np.dtype('{},{}'.format(*pair))
expected = np.dtype([('f0', pair[0]), ('f1', pair[1])])
assert_equal(pair_type, expected) |
def _named_idx(idx):
if ((idx < 0) or (idx > 2)):
raise ValueError(('idx must be between 0 and 2, got %d' % idx))
return ('x', 'y', 'z')[idx] |
def build_transform(is_train, args):
resize_im = (args.input_size > 32)
if is_train:
transform = create_transform(input_size=args.input_size, is_training=True, color_jitter=args.color_jitter, auto_augment=args.aa, interpolation=args.train_interpolation, re_prob=args.reprob, re_mode=args.remode, re_count=args.recount)
if (not resize_im):
transform.transforms[0] = transforms.RandomCrop(args.input_size, padding=4)
return transform
t = []
if resize_im:
size = int(((256 / 224) * args.input_size))
t.append(transforms.Resize(size, interpolation=3))
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
return transforms.Compose(t) |
class LinformerEncoder(RobertaEncoder):
def __init__(self, args, dictionary):
super().__init__(args, dictionary)
self.register_buffer('version', torch.tensor(2))
def build_encoder(self, args, dictionary, embed_tokens):
encoder = LinformerTransformerEncoder(args, dictionary, embed_tokens)
encoder.apply(init_bert_params)
return encoder
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
prefix = ((name + '.') if (name != '') else '')
if (utils.item(state_dict.get(f'{prefix}version', torch.tensor(1))) < 2):
state_dict[f'{prefix}version'] = torch.tensor(1)
if (not torch.allclose(state_dict[f'{prefix}sentence_encoder.embed_tokens.weight'], state_dict[f'{prefix}lm_head.weight'])):
self.lm_head = self.build_lm_head(embed_dim=self.args.encoder_embed_dim, output_dim=len(self.dictionary), activation_fn=self.args.activation_fn, weight=None) |
def write_predictions_extended(all_examples, all_features, all_results, n_best_size, max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, orig_data_file, start_n_top, end_n_top, version_2_with_negative, tokenizer, verbose_logging):
_PrelimPrediction = collections.namedtuple('PrelimPrediction', ['feature_index', 'start_index', 'end_index', 'start_log_prob', 'end_log_prob'])
_NbestPrediction = collections.namedtuple('NbestPrediction', ['text', 'start_log_prob', 'end_log_prob'])
logger.info('Writing predictions to: %s', output_prediction_file)
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
score_null = 1000000
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
cur_null_score = result.cls_logits
score_null = min(score_null, cur_null_score)
for i in range(start_n_top):
for j in range(end_n_top):
start_log_prob = result.start_top_log_probs[i]
start_index = result.start_top_index[i]
j_index = ((i * end_n_top) + j)
end_log_prob = result.end_top_log_probs[j_index]
end_index = result.end_top_index[j_index]
if (start_index >= (feature.paragraph_len - 1)):
continue
if (end_index >= (feature.paragraph_len - 1)):
continue
if (not feature.token_is_max_context.get(start_index, False)):
continue
if (end_index < start_index):
continue
length = ((end_index - start_index) + 1)
if (length > max_answer_length):
continue
prelim_predictions.append(_PrelimPrediction(feature_index=feature_index, start_index=start_index, end_index=end_index, start_log_prob=start_log_prob, end_log_prob=end_log_prob))
prelim_predictions = sorted(prelim_predictions, key=(lambda x: (x.start_log_prob + x.end_log_prob)), reverse=True)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if (len(nbest) >= n_best_size):
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
tok_text = tok_text.strip()
tok_text = ' '.join(tok_text.split())
orig_text = ' '.join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, tokenizer.do_lower_case, verbose_logging)
if (final_text in seen_predictions):
continue
seen_predictions[final_text] = True
nbest.append(_NbestPrediction(text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob))
if (not nbest):
nbest.append(_NbestPrediction(text='', start_log_prob=(- 1000000.0), end_log_prob=(- 1000000.0)))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append((entry.start_log_prob + entry.end_log_prob))
if (not best_non_null_entry):
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output['text'] = entry.text
output['probability'] = probs[i]
output['start_log_prob'] = entry.start_log_prob
output['end_log_prob'] = entry.end_log_prob
nbest_json.append(output)
assert (len(nbest_json) >= 1)
assert (best_non_null_entry is not None)
score_diff = score_null
scores_diff_json[example.qas_id] = score_diff
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, 'w') as writer:
writer.write((json.dumps(all_predictions, indent=4) + '\n'))
with open(output_nbest_file, 'w') as writer:
writer.write((json.dumps(all_nbest_json, indent=4) + '\n'))
if version_2_with_negative:
with open(output_null_log_odds_file, 'w') as writer:
writer.write((json.dumps(scores_diff_json, indent=4) + '\n'))
with open(orig_data_file, 'r', encoding='utf-8') as reader:
orig_data = json.load(reader)['data']
qid_to_has_ans = make_qid_to_has_ans(orig_data)
has_ans_qids = [k for (k, v) in qid_to_has_ans.items() if v]
no_ans_qids = [k for (k, v) in qid_to_has_ans.items() if (not v)]
(exact_raw, f1_raw) = get_raw_scores(orig_data, all_predictions)
out_eval = {}
find_all_best_thresh_v2(out_eval, all_predictions, exact_raw, f1_raw, scores_diff_json, qid_to_has_ans)
return out_eval |
def get_statistics(args, datasource):
scaler = sklearn.preprocessing.StandardScaler()
pbar = tqdm.tqdm(range(len(datasource.mus.tracks)))
for ind in pbar:
x = datasource.mus.tracks[ind].audio.T
audio = nn.NdArray.from_numpy_array(x[(None, ...)])
target_spec = get_spectogram(*get_stft(audio, n_fft=args.nfft, n_hop=args.nhop), mono=True)
pbar.set_description('Compute dataset statistics')
scaler.partial_fit(np.squeeze(target_spec.data))
std = np.maximum(scaler.scale_, (0.0001 * np.max(scaler.scale_)))
return (scaler.mean_, std) |
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
seed = (args.seed + utils.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
start_epoch = 0
max_epoch = config['schedular']['epochs']
warmup_epochs = config['schedular']['warmup_epochs']
print('Creating vqa datasets')
datasets = create_dataset('vqa', config)
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
samplers = create_sampler(datasets, [True, False, False], num_tasks, global_rank)
else:
samplers = [None, None, None]
(train_loader, val_loader, test_loader) = create_loader(datasets, samplers, batch_size=[config['batch_size_train'], config['batch_size_test'], config['batch_size_test']], num_workers=[4, 4, 4], is_trains=[True, False, False], collate_fns=[vqa_collate_fn, vqa_collate_fn, None])
tokenizer = BertTokenizer.from_pretrained(args.encoder, bos_token='[CLS]', eos_token='[SEP]', add_single_sep=False)
print('Creating model')
model = DaVinciVQA(config=config, encoder=args.encoder, text_decoder=args.text_decoder, tokenizer=tokenizer)
model = model.to(device)
arg_opt = utils.AttrDict(config['optimizer'])
optimizer = create_optimizer(arg_opt, model)
arg_sche = utils.AttrDict(config['schedular'])
step_per_epoch = len(train_loader)
arg_sche['num_warmup_steps'] = (arg_sche['warmup_epochs'] * step_per_epoch)
arg_sche['num_training_steps'] = (arg_sche['epochs'] * step_per_epoch)
(lr_scheduler, _) = create_scheduler(arg_sche, optimizer)
if args.checkpoint:
if args.evaluate:
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
msg = model.load_state_dict(state_dict, strict=False)
print(('load checkpoint from %s' % args.checkpoint))
print(msg)
else:
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
for key in list(state_dict.keys())[:]:
new_key = ('davinci.' + key)
state_dict[new_key] = state_dict[key]
del state_dict[key]
pos_embed_reshaped = interpolate_pos_embed(state_dict['davinci.visual_encoder.pos_embed'], model.davinci.visual_encoder)
state_dict['davinci.visual_encoder.pos_embed'] = pos_embed_reshaped
msg = model.load_state_dict(state_dict, strict=False)
print(('load checkpoint from %s' % args.checkpoint))
print(msg)
model_without_ddp = model
(model, optimizer) = amp.initialize(model, optimizer, opt_level='O1')
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
print('Start training')
start_time = time.time()
for epoch in range(start_epoch, max_epoch):
if (not args.evaluate):
if args.distributed:
train_loader.sampler.set_epoch(epoch)
train_stats = train(model, train_loader, optimizer, tokenizer, epoch, warmup_epochs, device, lr_scheduler, config)
vqa_test_result = test(model, test_loader, tokenizer, device, config)
result_file = save_result(vqa_test_result, args.result_dir, ('vqa_test_result_epoch%d' % epoch))
if args.evaluate:
break
if utils.is_main_process():
log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, 'epoch': epoch}
with open(os.path.join(args.output_dir, 'log.txt'), 'a') as f:
f.write((json.dumps(log_stats) + '\n'))
save_obj = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'config': config, 'epoch': epoch}
torch.save(save_obj, os.path.join(args.output_dir, ('checkpoint_%02d.pth' % epoch)))
dist.barrier()
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str)) |
()
def dataset(test_dataset):
if (test_dataset == ''):
return None
elif (test_dataset not in soundata.DATASETS):
raise ValueError('{} is not a dataset in soundata'.format(test_dataset))
data_home = os.path.join('tests/resources/sound_datasets_full', test_dataset)
return soundata.initialize(test_dataset, data_home) |
class BandGapsConf(Struct):
def __init__(self, filename, approx, region_selects, mat_pars, options, evp_options, eigenmomenta_options, band_gaps_options, coefs_save_name='coefs', corrs_save_names=None, incwd=None, output_dir=None, **kwargs):
Struct.__init__(self, approx=approx, region_selects=region_selects, mat_pars=mat_pars, options=options, evp_options=evp_options, eigenmomenta_options=eigenmomenta_options, band_gaps_options=band_gaps_options, **kwargs)
self.incwd = get_default(incwd, (lambda x: x))
self.conf = Struct()
self.conf.filename_mesh = self.incwd(filename)
output_dir = get_default(output_dir, self.incwd('output'))
default = {'evp': 'evp', 'corrs_rs': 'corrs_rs'}
self.corrs_save_names = get_default(corrs_save_names, default)
io = MeshIO.any_from_filename(self.conf.filename_mesh)
(self.bbox, self.dim) = io.read_bounding_box(ret_dim=True)
rpc_axes = (nm.eye(self.dim, dtype=nm.float64) * (self.bbox[1] - self.bbox[0]))
self.conf.options = options
self.conf.options.update({'output_dir': output_dir, 'volume': {'value': get_lattice_volume(rpc_axes)}, 'coefs': 'coefs', 'requirements': 'requirements', 'coefs_filename': coefs_save_name})
self.conf.mat_pars = mat_pars
self.conf.solvers = self.define_solvers()
self.conf.regions = self.define_regions()
self.conf.materials = self.define_materials()
self.conf.fields = self.define_fields()
self.conf.variables = self.define_variables()
(self.conf.ebcs, self.conf.epbcs, self.conf.lcbcs, self.all_periodic) = self.define_bcs()
self.conf.functions = self.define_functions()
self.conf.integrals = self.define_integrals()
(self.equations, self.expr_coefs) = self.define_equations()
self.conf.coefs = self.define_coefs()
self.conf.requirements = self.define_requirements()
def __call__(self):
return ProblemConf.from_dict(self.conf.__dict__, import_file(__file__))
def define_solvers(self):
solvers = {'ls_d': ('ls.auto_direct', {'use_presolve': True}), 'ls_i': ('ls.scipy_iterative', {'method': 'cg', 'i_max': 1000, 'eps_a': 1e-12}), 'newton': ('nls.newton', {'i_max': 1, 'eps_a': 0.0001})}
return solvers
def define_regions(self):
regions = {'Y': 'all', 'Y_m': self.region_selects.matrix, 'Y_c': self.region_selects.inclusion, 'Gamma_mc': ('r.Y_m *v r.Y_c', 'facet')}
regions.update(define_box_regions(self.dim, self.bbox[0], self.bbox[1], 1e-05))
return regions
def define_materials(self):
materials = {'m': ({'D_m': self.mat_pars.D_m, 'density_m': self.mat_pars.density_m, 'D_c': self.mat_pars.D_c, 'density_c': self.mat_pars.density_c}, None, None, {'special_constant': True})}
return materials
def define_fields(self):
fields = {'vector_Y_m': ('real', self.dim, 'Y_m', self.approx), 'vector_Y_c': ('real', self.dim, 'Y_c', self.approx), 'scalar_Y': ('real', 1, 'Y', 1)}
return fields
def define_variables(self):
variables = {'u_m': ('unknown field', 'vector_Y_m'), 'v_m': ('test field', 'vector_Y_m', 'u_m'), 'Pi': ('parameter field', 'vector_Y_m', '(set-to-None)'), 'u1_m': ('parameter field', 'vector_Y_m', '(set-to-None)'), 'u2_m': ('parameter field', 'vector_Y_m', '(set-to-None)'), 'u_c': ('unknown field', 'vector_Y_c'), 'v_c': ('test field', 'vector_Y_c', 'u_c'), 'aux': ('parameter field', 'scalar_Y', '(set-to-None)')}
return variables
def define_bcs(self):
ebcs = {'fixed_corners': ('Corners', {'u_m.all': 0.0}), 'fixed_gamma_mc': ('Gamma_mc', {'u_c.all': 0.0})}
epbcs = {}
all_periodic = []
for vn in ['u_m']:
val = {('%s.all' % vn): ('%s.all' % vn)}
epbcs.update({('periodic_%s_x' % vn): (['Left', 'Right'], val, 'match_y_line'), ('periodic_%s_y' % vn): (['Top', 'Bottom'], val, 'match_x_line')})
all_periodic.extend([('periodic_%s_x' % vn), ('periodic_%s_y' % vn)])
lcbcs = {}
return (ebcs, epbcs, lcbcs, all_periodic)
def define_functions(self):
functions = {'match_x_line': (per.match_x_line,), 'match_y_line': (per.match_y_line,)}
return functions
def define_integrals(self):
integrals = {'i': 2}
return integrals
def define_equations(self):
equations = {}
equations['corrs_rs'] = {'balance_of_forces': 'dw_lin_elastic.i.Y_m( m.D_m, v_m, u_m )\n = - dw_lin_elastic.i.Y_m( m.D_m, v_m, Pi )'}
equations['evp'] = {'lhs': 'dw_lin_elastic.i.Y_c( m.D_c, v_c, u_c )', 'rhs': 'dw_dot.i.Y_c( m.density_c, v_c, u_c )'}
expr_coefs = {'D': 'dw_lin_elastic.i.Y_m( m.D_m, u1_m, u2_m )', 'VF': 'ev_volume.i.%s(aux)', 'ema': 'ev_integrate.i.Y_c( m.density_c, u_c )'}
return (equations, expr_coefs)
def define_coefs(self):
from copy import copy
ema_options = copy(self.eigenmomenta_options)
ema_options.update({'var_name': 'u_c'})
dispersion_options = copy(self.band_gaps_options)
dispersion_options.update({'log_save_name': 'dispersion.log'})
coefs = {'VF': {'regions': ['Y_m', 'Y_c'], 'expression': self.expr_coefs['VF'], 'class': cb.VolumeFractions}, 'dv_info': {'requires': ['c.VF'], 'region_to_material': {'Y_m': ('m', 'density_m'), 'Y_c': ('m', 'density_c')}, 'class': cp.DensityVolumeInfo}, 'eigenmomenta': {'requires': ['evp', 'c.dv_info'], 'expression': self.expr_coefs['ema'], 'options': ema_options, 'class': cp.Eigenmomenta}, 'M': {'requires': ['evp', 'c.dv_info', 'c.eigenmomenta'], 'class': cp.AcousticMassTensor}, 'band_gaps': {'requires': ['evp', 'c.eigenmomenta', 'c.M'], 'options': self.band_gaps_options, 'class': cp.BandGaps}, 'D': {'requires': ['pis', 'corrs_rs'], 'expression': self.expr_coefs['D'], 'set_variables': set_coef_d, 'class': cb.CoefSymSym}, 'Gamma': {'requires': ['c.D'], 'options': {'mode': 'simple', 'incident_wave_dir': None}, 'class': cp.ChristoffelAcousticTensor}, 'dispersion': {'requires': ['evp', 'c.eigenmomenta', 'c.M', 'c.Gamma'], 'options': dispersion_options, 'class': cp.BandGaps}, 'polarization_angles': {'requires': ['c.dispersion'], 'options': {'incident_wave_dir': None}, 'class': cp.PolarizationAngles}, 'phase_velocity': {'requires': ['c.dv_info', 'c.Gamma'], 'options': {'eigensolver': 'eig.sgscipy'}, 'class': cp.PhaseVelocity}, 'filenames': {}}
return coefs
def define_requirements(self):
requirements = {'evp': {'ebcs': ['fixed_gamma_mc'], 'epbcs': None, 'equations': self.equations['evp'], 'save_name': self.corrs_save_names['evp'], 'options': self.evp_options, 'class': cp.SimpleEVP}, 'pis': {'variables': ['u_m'], 'class': cb.ShapeDimDim}, 'corrs_rs': {'requires': ['pis'], 'ebcs': ['fixed_corners'], 'epbcs': self.all_periodic, 'equations': self.equations['corrs_rs'], 'set_variables': [('Pi', 'pis', 'u_m')], 'save_name': self.corrs_save_names['corrs_rs'], 'is_linear': True, 'class': cb.CorrDimDim, 'is_linear': True}}
return requirements |
class GAN(object):
def __init__(self, z_dim, crop_image_size, resized_image_size, batch_size, data_dir):
celebA_dataset = celebA.read_dataset(data_dir)
self.z_dim = z_dim
self.crop_image_size = crop_image_size
self.resized_image_size = resized_image_size
self.batch_size = batch_size
filename_queue = tf.train.string_input_producer(celebA_dataset.train_images)
self.images = self._read_input_queue(filename_queue)
def _read_input(self, filename_queue):
class DataRecord(object):
pass
reader = tf.WholeFileReader()
(key, value) = reader.read(filename_queue)
record = DataRecord()
decoded_image = tf.image.decode_jpeg(value, channels=3)
cropped_image = tf.cast(tf.image.crop_to_bounding_box(decoded_image, 55, 35, self.crop_image_size, self.crop_image_size), tf.float32)
decoded_image_4d = tf.expand_dims(cropped_image, 0)
resized_image = tf.image.resize_bilinear(decoded_image_4d, [self.resized_image_size, self.resized_image_size])
record.input_image = tf.squeeze(resized_image, squeeze_dims=[0])
return record
def _read_input_queue(self, filename_queue):
print('Setting up image reader...')
read_input = self._read_input(filename_queue)
num_preprocess_threads = 4
num_examples_per_epoch = 800
min_queue_examples = int((0.1 * num_examples_per_epoch))
print('Shuffling')
input_image = tf.train.batch([read_input.input_image], batch_size=self.batch_size, num_threads=num_preprocess_threads, capacity=(min_queue_examples + (2 * self.batch_size)))
input_image = utils.process_image(input_image, 127.5, 127.5)
return input_image
def _generator(self, z, dims, train_phase, activation=tf.nn.relu, scope_name='generator'):
N = len(dims)
image_size = (self.resized_image_size // (2 ** (N - 1)))
with tf.variable_scope(scope_name) as scope:
W_z = utils.weight_variable([self.z_dim, ((dims[0] * image_size) * image_size)], name='W_z')
b_z = utils.bias_variable([((dims[0] * image_size) * image_size)], name='b_z')
h_z = (tf.matmul(z, W_z) + b_z)
h_z = tf.reshape(h_z, [(- 1), image_size, image_size, dims[0]])
h_bnz = utils.batch_norm(h_z, dims[0], train_phase, scope='gen_bnz')
h = activation(h_bnz, name='h_z')
utils.add_activation_summary(h)
for index in range((N - 2)):
image_size *= 2
W = utils.weight_variable([5, 5, dims[(index + 1)], dims[index]], name=('W_%d' % index))
b = utils.bias_variable([dims[(index + 1)]], name=('b_%d' % index))
deconv_shape = tf.pack([tf.shape(h)[0], image_size, image_size, dims[(index + 1)]])
h_conv_t = utils.conv2d_transpose_strided(h, W, b, output_shape=deconv_shape)
h_bn = utils.batch_norm(h_conv_t, dims[(index + 1)], train_phase, scope=('gen_bn%d' % index))
h = activation(h_bn, name=('h_%d' % index))
utils.add_activation_summary(h)
image_size *= 2
W_pred = utils.weight_variable([5, 5, dims[(- 1)], dims[(- 2)]], name='W_pred')
b_pred = utils.bias_variable([dims[(- 1)]], name='b_pred')
deconv_shape = tf.pack([tf.shape(h)[0], image_size, image_size, dims[(- 1)]])
h_conv_t = utils.conv2d_transpose_strided(h, W_pred, b_pred, output_shape=deconv_shape)
pred_image = tf.nn.tanh(h_conv_t, name='pred_image')
utils.add_activation_summary(pred_image)
return pred_image
def _discriminator(self, input_images, dims, train_phase, activation=tf.nn.relu, scope_name='discriminator', scope_reuse=False):
N = len(dims)
with tf.variable_scope(scope_name) as scope:
if scope_reuse:
scope.reuse_variables()
h = input_images
skip_bn = True
for index in range((N - 2)):
W = utils.weight_variable([5, 5, dims[index], dims[(index + 1)]], name=('W_%d' % index))
b = utils.bias_variable([dims[(index + 1)]], name=('b_%d' % index))
h_conv = utils.conv2d_strided(h, W, b)
if skip_bn:
h_bn = h_conv
skip_bn = False
else:
h_bn = utils.batch_norm(h_conv, dims[(index + 1)], train_phase, scope=('disc_bn%d' % index))
h = activation(h_bn, name=('h_%d' % index))
utils.add_activation_summary(h)
shape = h.get_shape().as_list()
image_size = (self.resized_image_size // (2 ** (N - 2)))
h_reshaped = tf.reshape(h, [self.batch_size, ((image_size * image_size) * shape[3])])
W_pred = utils.weight_variable([((image_size * image_size) * shape[3]), dims[(- 1)]], name='W_pred')
b_pred = utils.bias_variable([dims[(- 1)]], name='b_pred')
h_pred = (tf.matmul(h_reshaped, W_pred) + b_pred)
return (tf.nn.sigmoid(h_pred), h_pred, h)
def _cross_entropy_loss(self, logits, labels, name='x_entropy'):
xentropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits, labels))
tf.scalar_summary(name, xentropy)
return xentropy
def _get_optimizer(self, optimizer_name, learning_rate, optimizer_param):
self.learning_rate = learning_rate
if (optimizer_name == 'Adam'):
return tf.train.AdamOptimizer(learning_rate, beta1=optimizer_param)
elif (optimizer_name == 'RMSProp'):
return tf.train.RMSPropOptimizer(learning_rate, decay=optimizer_param)
else:
raise ValueError(('Unknown optimizer %s' % optimizer_name))
def _train(self, loss_val, var_list, optimizer):
grads = optimizer.compute_gradients(loss_val, var_list=var_list)
for (grad, var) in grads:
utils.add_gradient_summary(grad, var)
return optimizer.apply_gradients(grads)
def _setup_placeholder(self):
self.train_phase = tf.placeholder(tf.bool)
self.z_vec = tf.placeholder(tf.float32, [self.batch_size, self.z_dim], name='z')
def _gan_loss(self, logits_real, logits_fake, feature_real, feature_fake, use_features=False):
discriminator_loss_real = self._cross_entropy_loss(logits_real, tf.ones_like(logits_real), name='disc_real_loss')
discriminator_loss_fake = self._cross_entropy_loss(logits_fake, tf.zeros_like(logits_fake), name='disc_fake_loss')
self.discriminator_loss = (discriminator_loss_fake + discriminator_loss_real)
gen_loss_disc = self._cross_entropy_loss(logits_fake, tf.ones_like(logits_fake), name='gen_disc_loss')
if use_features:
gen_loss_features = (tf.reduce_mean(tf.nn.l2_loss((feature_real - feature_fake))) / (self.crop_image_size ** 2))
else:
gen_loss_features = 0
self.gen_loss = (gen_loss_disc + (0.1 * gen_loss_features))
tf.scalar_summary('Discriminator_loss', self.discriminator_loss)
tf.scalar_summary('Generator_loss', self.gen_loss)
def create_network(self, generator_dims, discriminator_dims, optimizer='Adam', learning_rate=0.0002, optimizer_param=0.9, improved_gan_loss=True):
print('Setting up model...')
self._setup_placeholder()
tf.histogram_summary('z', self.z_vec)
self.gen_images = self._generator(self.z_vec, generator_dims, self.train_phase, scope_name='generator')
tf.image_summary('image_real', self.images, max_images=2)
tf.image_summary('image_generated', self.gen_images, max_images=2)
def leaky_relu(x, name='leaky_relu'):
return utils.leaky_relu(x, alpha=0.2, name=name)
(discriminator_real_prob, logits_real, feature_real) = self._discriminator(self.images, discriminator_dims, self.train_phase, activation=leaky_relu, scope_name='discriminator', scope_reuse=False)
(discriminator_fake_prob, logits_fake, feature_fake) = self._discriminator(self.gen_images, discriminator_dims, self.train_phase, activation=leaky_relu, scope_name='discriminator', scope_reuse=True)
self._gan_loss(logits_real, logits_fake, feature_real, feature_fake, use_features=improved_gan_loss)
train_variables = tf.trainable_variables()
for v in train_variables:
utils.add_to_regularization_and_summary(var=v)
self.generator_variables = [v for v in train_variables if v.name.startswith('generator')]
self.discriminator_variables = [v for v in train_variables if v.name.startswith('discriminator')]
optim = self._get_optimizer(optimizer, learning_rate, optimizer_param)
self.generator_train_op = self._train(self.gen_loss, self.generator_variables, optim)
self.discriminator_train_op = self._train(self.discriminator_loss, self.discriminator_variables, optim)
def initialize_network(self, logs_dir):
print('Initializing network...')
self.logs_dir = logs_dir
self.sess = tf.Session()
self.summary_op = tf.merge_all_summaries()
self.saver = tf.train.Saver()
self.summary_writer = tf.train.SummaryWriter(self.logs_dir, self.sess.graph)
self.sess.run(tf.initialize_all_variables())
ckpt = tf.train.get_checkpoint_state(self.logs_dir)
if (ckpt and ckpt.model_checkpoint_path):
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
print('Model restored...')
self.coord = tf.train.Coordinator()
self.threads = tf.train.start_queue_runners(self.sess, self.coord)
def train_model(self, max_iterations):
try:
print('Training model...')
for itr in xrange(1, max_iterations):
batch_z = np.random.uniform((- 1.0), 1.0, size=[self.batch_size, self.z_dim]).astype(np.float32)
feed_dict = {self.z_vec: batch_z, self.train_phase: True}
self.sess.run(self.discriminator_train_op, feed_dict=feed_dict)
self.sess.run(self.generator_train_op, feed_dict=feed_dict)
if ((itr % 10) == 0):
(g_loss_val, d_loss_val, summary_str) = self.sess.run([self.gen_loss, self.discriminator_loss, self.summary_op], feed_dict=feed_dict)
print(('Step: %d, generator loss: %g, discriminator_loss: %g' % (itr, g_loss_val, d_loss_val)))
self.summary_writer.add_summary(summary_str, itr)
if ((itr % 2000) == 0):
self.saver.save(self.sess, (self.logs_dir + 'model.ckpt'), global_step=itr)
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
except KeyboardInterrupt:
print('Ending Training...')
finally:
self.coord.request_stop()
self.coord.join(self.threads)
def visualize_model(self):
print('Sampling images from model...')
batch_z = np.random.uniform((- 1.0), 1.0, size=[self.batch_size, self.z_dim]).astype(np.float32)
feed_dict = {self.z_vec: batch_z, self.train_phase: False}
images = self.sess.run(self.gen_images, feed_dict=feed_dict)
images = utils.unprocess_image(images, 127.5, 127.5).astype(np.uint8)
shape = [4, (self.batch_size // 4)]
utils.save_imshow_grid(images, self.logs_dir, 'generated.png', shape=shape) |
def RatVal(a, b, ctx=None):
if z3_debug():
_z3_assert((_is_int(a) or isinstance(a, str)), 'First argument cannot be converted into an integer')
_z3_assert((_is_int(b) or isinstance(b, str)), 'Second argument cannot be converted into an integer')
return simplify((RealVal(a, ctx) / RealVal(b, ctx))) |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=(1, 1), residual=True):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, padding=dilation[0], dilation=dilation[0])
self.bn1 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, padding=dilation[1], dilation=dilation[1])
self.bn2 = BatchNorm(planes)
self.downsample = downsample
self.stride = stride
self.residual = residual
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
if self.residual:
out += residual
out = self.relu(out)
return out |
def fused_bn(x, mean, var, gain=None, bias=None, eps=1e-05):
scale = torch.rsqrt((var + eps))
if (gain is not None):
scale = (scale * gain)
shift = (mean * scale)
if (bias is not None):
shift = (shift - bias)
return ((x * scale) - shift) |
class _DecreasingVarianceModel(QuadraticMeanAndRBFKernel, TrainableProbabilisticModel):
def __init__(self, data: Dataset):
super().__init__()
self._data = data
_check_shapes
def predict(self, query_points: TensorType) -> tuple[(TensorType, TensorType)]:
(mean, var) = super().predict(query_points)
return (mean, (var / len(self._data)))
def update(self, dataset: Dataset) -> None:
self._data = dataset
def optimize(self, dataset: Dataset) -> None:
pass |
class UniqueSinglingOutQueries():
def __init__(self):
self._set: Set[str] = set()
self._list: List[str] = []
def check_and_append(self, query: str, df: pd.DataFrame):
sorted_query = ''.join(sorted(query))
if (sorted_query not in self._set):
counts = safe_query_counts(query=query, df=df)
if ((counts is not None) and (counts == 1)):
self._set.add(sorted_query)
self._list.append(query)
def __len__(self):
return len(self._list)
def queries(self) -> List[str]:
return self._list |
def import_fsspec(name: str) -> ModuleType:
try:
import fsspec
except ModuleNotFoundError as err:
raise ImportError(f'''to use {name}, you must install fsspec:
pip install fsspec
or
conda install -c conda-forge fsspec
''') from err
import_pyarrow_parquet(name)
return fsspec |
def transform_params(params, params_tf, num_classes):
params['root_block']['conv_root']['kernel'] = params_tf['resnet/root_block/standardized_conv2d/kernel']
for block in ['block1', 'block2', 'block3', 'block4']:
units = set([re.findall('unit\\d+', p)[0] for p in params_tf.keys() if (p.find(block) >= 0)])
for unit in units:
for (i, group) in enumerate(['a', 'b', 'c']):
params[block][unit][f'conv{(i + 1)}']['kernel'] = params_tf[f'resnet/{block}/{unit}/{group}/standardized_conv2d/kernel']
params[block][unit][f'gn{(i + 1)}']['bias'] = params_tf[f'resnet/{block}/{unit}/{group}/group_norm/beta'][(None, None, None)]
params[block][unit][f'gn{(i + 1)}']['scale'] = params_tf[f'resnet/{block}/{unit}/{group}/group_norm/gamma'][(None, None, None)]
projs = [p for p in params_tf.keys() if (p.find(f'{block}/{unit}/a/proj') >= 0)]
assert (len(projs) <= 1)
if projs:
params[block][unit]['conv_proj']['kernel'] = params_tf[projs[0]]
params['norm-pre-head']['bias'] = params_tf['resnet/group_norm/beta'][(None, None, None)]
params['norm-pre-head']['scale'] = params_tf['resnet/group_norm/gamma'][(None, None, None)]
params['conv_head']['kernel'] = np.zeros((params['conv_head']['kernel'].shape[0], num_classes), dtype=np.float32)
params['conv_head']['bias'] = np.zeros(num_classes, dtype=np.float32) |
class ArgMaxParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ARGMAXPARAMETER |
def _fake_quantize_per_tensor_affine_grad_reference(dY, X, scale, zero_point, quant_min, quant_max):
Xq = torch.round(((X * (1.0 / scale)) + zero_point))
mask = ((Xq >= quant_min) * (Xq <= quant_max))
res = torch.zeros_like(dY)
res[mask] = dY[mask]
return res |
def make_square(img_size=(64, 64), num_points_per_cluster=8, cluster_radius=1):
is_square = False
while (not is_square):
point_1_x = random.randint((0 + cluster_radius), (img_size[0] - cluster_radius))
point_1_y = random.randint((0 + cluster_radius), (img_size[1] - cluster_radius))
point_2_x = random.randint((0 + cluster_radius), (img_size[0] - cluster_radius))
point_2_y = random.randint((0 + cluster_radius), (img_size[1] - cluster_radius))
(point_3_x, point_3_y, point_4_x, point_4_y) = get_point_square(point_1_x, point_1_y, point_2_x, point_2_y)
if (((point_3_x + cluster_radius) > img_size[0]) or ((point_3_y + cluster_radius) > img_size[1]) or ((point_3_x - cluster_radius) < 0) or ((point_3_y - cluster_radius) < 0)):
continue
if (((point_4_x + cluster_radius) > img_size[0]) or ((point_4_y + cluster_radius) > img_size[1]) or ((point_4_x - cluster_radius) < 0) or ((point_4_y - cluster_radius) < 0)):
continue
points = []
points = get_cluster_points(num_points_per_cluster, point_1_x, point_1_y, points, cluster_radius)
points = get_cluster_points(num_points_per_cluster, point_2_x, point_2_y, points, cluster_radius)
points = get_cluster_points(num_points_per_cluster, point_3_x, point_3_y, points, cluster_radius)
points = get_cluster_points(num_points_per_cluster, point_4_x, point_4_y, points, cluster_radius)
image = np.zeros((img_size[0], img_size[1], 1))
for p in points:
image = cv2.circle(image, p, radius=2, color=255, thickness=(- 1))
is_square = True
return image |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.