code stringlengths 101 5.91M |
|---|
def test_gh_9608_preserve_array_shape():
def f(x):
return (x ** 2)
def fp(x):
return (2 * x)
def fpp(x):
return 2
x0 = np.array([(- 2)], dtype=np.float32)
(rt, r) = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True)
assert r.converged
x0_array = np.array([(- 2), (- 3)], dtype=np.float32)
with pytest.raises(IndexError):
result = zeros.newton(f, x0_array, fprime=fp, fprime2=fpp, full_output=True)
def fpp_array(x):
return np.full(np.shape(x), 2, dtype=np.float32)
result = zeros.newton(f, x0_array, fprime=fp, fprime2=fpp_array, full_output=True)
assert result.converged.all() |
class SubsetRandomSampler(Sampler[int]):
indices: Sequence[int]
def __init__(self, indices: Sequence[int], generator=None) -> None:
self.indices = indices
self.generator = generator
def __iter__(self) -> Iterator[int]:
for i in torch.randperm(len(self.indices), generator=self.generator):
(yield self.indices[i])
def __len__(self) -> int:
return len(self.indices) |
def _compute_support_files(db_dir, tile_id_column, tile_geometry, oa_id_column, oa_geometry, flow_origin_column, flow_destination_column, flow_flows_column):
_check_base_files(db_dir)
print('Generating the processed files - it may take a while....')
print('Reading tessellation....')
try:
tessellation = geopandas.read_file((db_dir + '/tessellation.shp'), dtype={tile_id_column: str})
except:
tessellation = geopandas.read_file((db_dir + '/tessellation.geojson'), dtype={tile_id_column: str})
tessellation = tessellation[[tile_id_column, tile_geometry]]
print('Reading output areas....')
try:
output_areas = geopandas.read_file((db_dir + '/output_areas.shp'), dtype={oa_id_column: str})
except:
output_areas = geopandas.read_file((db_dir + '/output_areas.geojson'), dtype={oa_id_column: str})
output_areas = output_areas[[oa_id_column, oa_geometry]]
print('Reading features....')
try:
features = pd.read_csv((db_dir + '/features.csv'))
if (not (oa_id_column in list(features.columns))):
raise ValueError(('Features must be associated with an output area. Please add a column ' + (+ ' to features.csv')))
except:
features = None
print('Running without features. features.csv not found....')
print('Mapping output areas with tessellation....')
output_areas['centroid'] = output_areas[oa_geometry].centroid
output_areas['area_km2'] = (output_areas[oa_geometry].area / (10 ** 6))
output_areas['x'] = output_areas['centroid'].x
output_areas['y'] = output_areas['centroid'].y
output_areas['ctrs'] = (((('[' + output_areas['x'].astype(str)) + ',') + output_areas['y'].astype(str)) + ']')
temp_out = output_areas[[oa_id_column, 'ctrs', 'area_km2']]
temp_out.rename(columns={oa_id_column: 'geo_code', 'ctrs': 'centroid'}, inplace=True)
temp_out.to_csv((db_dir + '/processed/oa_gdf.csv.gz'))
oa2centroid = {}
for (i, row) in temp_out.iterrows():
oa2centroid[row['geo_code']] = row['centroid']
with open((db_dir + '/processed/oa2centroid.pkl'), 'wb') as handle:
pickle.dump(oa2centroid, handle)
output_areas.drop(columns=[oa_geometry], inplace=True)
output_areas.rename(columns={'centroid': oa_geometry}, inplace=True)
mapping = geopandas.sjoin(output_areas, tessellation, how='inner', op='within')
try:
mapping.drop(columns=['index_right'], inplace=True)
except:
pass
flows = pd.read_csv((db_dir + '/flows.csv'), dtype={flow_origin_column: str, flow_destination_column: str, flow_flows_column: int})
flows = flows[[flow_origin_column, flow_destination_column, flow_flows_column]]
flows.rename(columns={flow_origin_column: 'residence', flow_destination_column: 'workplace', flow_flows_column: 'commuters'}, inplace=True)
flows.to_csv((db_dir + '/processed/flows_oa.csv.zip'))
od2flow = {}
for (i, row) in flows.iterrows():
od2flow[(row['residence'], row['workplace'])] = row['commuters']
with open((db_dir + '/processed/od2flow.pkl'), 'wb') as handle:
pickle.dump(oa2centroid, handle)
features = pd.read_csv((db_dir + 'features.csv'), dtype={oa_id_column: str})
oa2features = {}
for (i, row) in features.iterrows():
oa2features[row[0]] = row[1:].values
tileid2oa2handmade_features = dict()
for (i, row) in mapping.iterrows():
if (row[tile_id_column] not in tileid2oa2handmade_features):
tileid2oa2handmade_features[row[tile_id_column]] = dict()
tileid2oa2handmade_features[row[tile_id_column]][row[oa_id_column]] = dict()
else:
tileid2oa2handmade_features[row[tile_id_column]][row[oa_id_column]] = dict()
for (i, row) in features.iterrows():
for item in zip(list(row.keys()), row.values):
tileid2oa2handmade_features[row[tile_id_column]][item[0]] = [item[1]]
with open('tileid2oa2handmade_features.json', 'w') as f:
json.dump(tileid2oa2handmade_features, f) |
class DataLoader(object):
def __init__(self, data_path, tokenizer, args, test=False, cuda=True, batch_size=64):
self.cuda = cuda
self.batch_size = batch_size
self.tokenizer = tokenizer
self.max_len = args.max_len
self.evi_num = args.evi_num
self.threshold = args.threshold
self.data_path = data_path
self.test = test
examples = self.read_file(data_path)
self.examples = examples
self.total_num = len(examples)
if self.test:
self.total_num = 100000
self.total_step = np.ceil(((self.total_num * 1.0) / batch_size))
self.shuffle()
else:
self.total_step = (self.total_num / batch_size)
self.shuffle()
self.step = 0
def process_sent(self, sentence):
sentence = re.sub(' \\-LSB\\-.*?\\-RSB\\-', '', sentence)
sentence = re.sub('\\-LRB\\- \\-RRB\\- ', '', sentence)
sentence = re.sub(' -LRB-', ' ( ', sentence)
sentence = re.sub('-RRB-', ' )', sentence)
sentence = re.sub('--', '-', sentence)
sentence = re.sub('``', '"', sentence)
sentence = re.sub("''", '"', sentence)
return sentence
def process_wiki_title(self, title):
title = re.sub('_', ' ', title)
title = re.sub(' -LRB-', ' ( ', title)
title = re.sub('-RRB-', ' )', title)
title = re.sub('-COLON-', ':', title)
return title
def read_file(self, data_path):
examples = list()
with open(data_path) as fin:
for (step, line) in enumerate(fin):
sublines = line.strip().split('\t')
examples.append([self.process_sent(sublines[0]), self.process_sent(sublines[2]), self.process_sent(sublines[4])])
return examples
def shuffle(self):
np.random.shuffle(self.examples)
def __iter__(self):
return self
def __next__(self):
return self.next()
def __len__(self):
return self._n_batch
def next(self):
if (self.step < self.total_step):
examples = self.examples[(self.step * self.batch_size):((self.step + 1) * self.batch_size)]
pos_inputs = list()
neg_inputs = list()
for example in examples:
pos_inputs.append([example[0], example[1]])
neg_inputs.append([example[0], example[2]])
(inp_pos, msk_pos, seg_pos) = tok2int_list(pos_inputs, self.tokenizer, self.max_len)
(inp_neg, msk_neg, seg_neg) = tok2int_list(neg_inputs, self.tokenizer, self.max_len)
inp_tensor_pos = Variable(torch.LongTensor(inp_pos))
msk_tensor_pos = Variable(torch.LongTensor(msk_pos))
seg_tensor_pos = Variable(torch.LongTensor(seg_pos))
inp_tensor_neg = Variable(torch.LongTensor(inp_neg))
msk_tensor_neg = Variable(torch.LongTensor(msk_neg))
seg_tensor_neg = Variable(torch.LongTensor(seg_neg))
if self.cuda:
inp_tensor_pos = inp_tensor_pos.cuda()
msk_tensor_pos = msk_tensor_pos.cuda()
seg_tensor_pos = seg_tensor_pos.cuda()
inp_tensor_neg = inp_tensor_neg.cuda()
msk_tensor_neg = msk_tensor_neg.cuda()
seg_tensor_neg = seg_tensor_neg.cuda()
self.step += 1
return (inp_tensor_pos, msk_tensor_pos, seg_tensor_pos, inp_tensor_neg, msk_tensor_neg, seg_tensor_neg)
else:
self.step = 0
if (not self.test):
self.shuffle()
raise StopIteration() |
class LisaCNNModel():
def __new__(self, **kwargs):
return self.build(**kwargs)
def build(img_rows=32, img_cols=32, num_channels=3, n_classes=18, nb_filters=64, input_layer_name=None, custom_input=None):
if (custom_input is not None):
inputs = tf.keras.layers.Input(shape=(img_rows, img_cols, num_channels), tensor=custom_input)
else:
inputs = tf.keras.layers.Input(shape=(img_rows, img_cols, num_channels), name=input_layer_name)
x = tf.keras.layers.Conv2D(filters=nb_filters, kernel_size=(8, 8), strides=(2, 2), padding='same', input_shape=(img_rows, img_cols, num_channels))(inputs)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(filters=(nb_filters * 2), kernel_size=(6, 6), strides=(2, 2), padding='valid')(x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(filters=(nb_filters * 2), kernel_size=(5, 5), strides=(1, 1), padding='valid')(x)
x = tf.keras.layers.Activation('relu', name='last_conv')(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(n_classes, name='last_fc')(x)
x = tf.keras.layers.Activation('softmax', name='softmax')(x)
model = tf.keras.models.Model(inputs, x, name='lisacnn')
return model |
def handler(event):
request_id = event['request-id']
address = event['server-address']
port = event['server-port']
repetitions = event['repetitions']
output_bucket = event.get('output-bucket')
times = []
i = 0
socket.setdefaulttimeout(3)
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('', 0))
message = request_id.encode('utf-8')
adr = (address, port)
consecutive_failures = 0
while (i < (repetitions + 1)):
try:
send_begin = datetime.now().timestamp()
server_socket.sendto(message, adr)
(msg, addr) = server_socket.recvfrom(1024)
recv_end = datetime.now().timestamp()
except socket.timeout:
i += 1
consecutive_failures += 1
if (consecutive_failures == 5):
print("Can't setup the connection")
break
continue
if (i > 0):
times.append([i, send_begin, recv_end])
i += 1
consecutive_failures = 0
server_socket.settimeout(2)
server_socket.close()
if (consecutive_failures != 5):
with open('/tmp/data.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['id', 'client_send', 'client_rcv'])
for row in times:
writer.writerow(row)
client = storage.storage.get_instance()
key = client.upload(output_bucket, 'results-{}.csv'.format(request_id), '/tmp/data.csv')
return {'result': key} |
class PreActivationBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(PreActivationBasicBlock, self).__init__()
self.bn1 = nn.BatchNorm3d(inplanes)
self.conv1 = conv3x3x3(inplanes, planes, stride)
self.bn2 = nn.BatchNorm3d(planes)
self.conv2 = conv3x3x3(planes, planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
return out |
def p_arg2(p):
(startl, endl) = p.linespan(1)
(startc, endc) = p.lexspan(1)
di = dace.dtypes.DebugInfo(startl, startc, endl, endc)
p[0] = AST_Constant(di, p[1]) |
class WithTransform(CythonTransform, SkipDeclarations):
def visit_WithStatNode(self, node):
self.visitchildren(node, 'body')
pos = node.pos
is_async = node.is_async
(body, target, manager) = (node.body, node.target, node.manager)
node.enter_call = ExprNodes.SimpleCallNode(pos, function=ExprNodes.AttributeNode(pos, obj=ExprNodes.CloneNode(manager), attribute=EncodedString(('__aenter__' if is_async else '__enter__')), is_special_lookup=True), args=[], is_temp=True)
if is_async:
node.enter_call = ExprNodes.AwaitExprNode(pos, arg=node.enter_call)
if (target is not None):
body = Nodes.StatListNode(pos, stats=[Nodes.WithTargetAssignmentStatNode(pos, lhs=target, with_node=node), body])
excinfo_target = ExprNodes.TupleNode(pos, slow=True, args=[ExprNodes.ExcValueNode(pos) for _ in range(3)])
except_clause = Nodes.ExceptClauseNode(pos, body=Nodes.IfStatNode(pos, if_clauses=[Nodes.IfClauseNode(pos, condition=ExprNodes.NotNode(pos, operand=ExprNodes.WithExitCallNode(pos, with_stat=node, test_if_run=False, args=excinfo_target, await_expr=(ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None))), body=Nodes.ReraiseStatNode(pos))], else_clause=None), pattern=None, target=None, excinfo_target=excinfo_target)
node.body = Nodes.TryFinallyStatNode(pos, body=Nodes.TryExceptStatNode(pos, body=body, except_clauses=[except_clause], else_clause=None), finally_clause=Nodes.ExprStatNode(pos, expr=ExprNodes.WithExitCallNode(pos, with_stat=node, test_if_run=True, args=ExprNodes.TupleNode(pos, args=[ExprNodes.NoneNode(pos) for _ in range(3)]), await_expr=(ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None))), handle_error_case=False)
return node
def visit_ExprNode(self, node):
return node |
def align(input_file, output_file):
for line in input_file:
fields = line.rstrip().split('\t')
target_chars = ' '.join((str(c) for c in fields[1]))
output_file.write(((fields[0] + '\t') + target_chars))
if (len(fields) == 3):
output_file.write(('\t' + fields[2]))
output_file.write('\n') |
def Vamos():
E = 'abcdefgh'
CC = {3: ['abcd', 'abef', 'cdef', 'abgh', 'efgh'], 4: [E]}
M = CircuitClosuresMatroid(groundset=E, circuit_closures=CC)
M.rename(('Vamos: ' + repr(M)))
return M |
class CTRLPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def create_arguments(callable: Callable, parser: ArgumentParser, exclude: list=[], prefix: str=''):
arguments_added = [action.dest for action in parser._actions]
parameters = inspect.signature(callable).parameters
for (param_name, param_obj) in parameters.items():
arg_name = (prefix + param_name)
if ((param_name in exclude) or (arg_name in arguments_added)):
continue
annot_obj = param_obj.annotation
if (get_origin(annot_obj) is Annotated):
annotation = get_args(annot_obj)
metadata: dict = annotation[1]
param_type = metadata.get('type', annotation[0])
bases = metadata.get('bases', False)
if bases:
prefixes = metadata.get('prefixes', ([''] * len(bases)))
for (base_callable, pr) in zip(bases, prefixes):
create_arguments(callable=base_callable, parser=parser, exclude=(metadata.get('exclude', []) + exclude), prefix=(prefix + pr))
else:
metadata['type'] = param_type
metadata['dest'] = arg_name
if (param_obj.default is not inspect.Parameter.empty):
metadata['default'] = param_obj.default
else:
metadata['required'] = True
metadata['default'] = SUPPRESS
metadata['help'] += ' (required)'
if (param_type is bool):
metadata['type'] = boolean
metadata['nargs'] = '?'
metadata['const'] = True
elif (get_origin(param_type) is Union):
sub_types = get_args(param_type)
if ((len(sub_types) == 2) and (get_origin(sub_types[0]) is Literal)):
metadata['type'] = ArgWithLiteral(main_type=sub_types[1], literals=get_args(sub_types[0]))
metadata['metavar'] = (((f'<{sub_types[1].__name__}>' + '|{') + ','.join(map(str, get_args(sub_types[0])))) + '}')
if ('choices' not in metadata):
try:
metadata['metavar'] = metadata.get('metavar', f'<{param_type.__name__}>')
except:
pass
options = {f'--{arg_name}', f"--{arg_name.replace('_', '-')}"}
custom_options = metadata.pop('option', [])
custom_options = ([custom_options] if isinstance(custom_options, str) else custom_options)
for option in custom_options:
idx = max([i for (i, c) in enumerate(option) if (c == '-')])
option = ((option[:(idx + 1)] + prefix) + option[(idx + 1):])
options.add(option)
options = sorted(sorted(list(options)), key=len)
parser.add_argument(*options, **metadata) |
def retrieve_performance(conn):
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(' select *, \n 100 * success / total as success_rate,\n 100 * intent / total as intent_rate,\n 100 * ner / total as ner_rate,\n 100 * other / total as other_rate,\n turns / total as turns_avg\n from ( SELECT b.name, mode, \n sum(total) total, sum(success) success, sum(intent_error) intent, \n sum(ner_error) ner, sum(other_error) other, sum(turns) turns, bot_id\n FROM results r, bots b\n where r.bot_id = b.id\n group by bot_id, mode) t ')
success_dev = []
intent_dev = []
ner_dev = []
other_dev = []
turns_dev = []
success_rate_dev = []
intent_rate_dev = []
ner_rate_dev = []
other_rate_dev = []
avg_turns_dev = []
success_eval = []
intent_eval = []
ner_eval = []
other_eval = []
turns_eval = []
success_rate_eval = []
intent_rate_eval = []
ner_rate_eval = []
other_rate_eval = []
avg_turns_eval = []
labels = []
rows = cursor.fetchall()
for row in rows:
l = list(row)
if (l[1] == 'dev'):
labels.append(l[8])
success_dev.append(l[3])
intent_dev.append(l[4])
ner_dev.append(l[5])
other_dev.append(l[6])
turns_dev.append(l[7])
success_rate_dev.append(l[9])
intent_rate_dev.append(l[10])
ner_rate_dev.append(l[11])
other_rate_dev.append(l[12])
avg_turns_dev.append(l[13])
else:
success_eval.append(l[3])
intent_eval.append(l[4])
ner_eval.append(l[5])
other_eval.append(l[6])
turns_eval.append(l[7])
success_rate_eval.append(l[9])
intent_rate_eval.append(l[10])
ner_rate_eval.append(l[11])
other_rate_eval.append(l[12])
avg_turns_eval.append(l[13])
total = [{'label': 'Success Dev', 'data': success_dev, 'backgroundColor': 'rgb(75, 192, 5)', 'stack': 'Stack 0'}, {'label': 'Intent Dev', 'data': intent_dev, 'backgroundColor': 'rgb(75, 192, 192)', 'stack': 'Stack 0'}, {'label': 'NER Dev', 'data': ner_dev, 'backgroundColor': 'rgb(75, 12, 192)', 'stack': 'Stack 0'}, {'label': 'Other Dev', 'data': other_dev, 'backgroundColor': 'rgb(175, 42, 50)', 'stack': 'Stack 0'}, {'label': 'Success Eval', 'data': success_eval, 'backgroundColor': 'rgb(75, 192, 5)', 'stack': 'Stack 1'}, {'label': 'Intent Eval', 'data': intent_eval, 'backgroundColor': 'rgb(75, 192, 192)', 'stack': 'Stack 1'}, {'label': 'NER Eval', 'data': ner_eval, 'backgroundColor': 'rgb(75, 12, 192)', 'stack': 'Stack 1'}, {'label': 'Other Eval', 'data': other_eval, 'backgroundColor': 'rgb(175, 42, 50)', 'stack': 'Stack 1'}]
rate = [{'label': 'Success Dev', 'data': success_rate_dev, 'backgroundColor': 'rgb(75, 192, 5)', 'stack': 'Stack 0'}, {'label': 'Intent Dev', 'data': intent_rate_dev, 'backgroundColor': 'rgb(75, 192, 192)', 'stack': 'Stack 0'}, {'label': 'NER Dev', 'data': ner_rate_dev, 'backgroundColor': 'rgb(75, 12, 192)', 'stack': 'Stack 0'}, {'label': 'Other Dev', 'data': other_rate_dev, 'backgroundColor': 'rgb(175, 42, 50)', 'stack': 'Stack 0'}, {'label': 'Success Eval', 'data': success_rate_eval, 'backgroundColor': 'rgb(75, 192, 5)', 'stack': 'Stack 1'}, {'label': 'Intent Eval', 'data': intent_rate_eval, 'backgroundColor': 'rgb(75, 192, 192)', 'stack': 'Stack 1'}, {'label': 'NER Eval', 'data': ner_rate_eval, 'backgroundColor': 'rgb(75, 12, 192)', 'stack': 'Stack 1'}, {'label': 'Other Eval', 'data': other_rate_eval, 'backgroundColor': 'rgb(175, 42, 50)', 'stack': 'Stack 1'}]
cursor.close()
return (total, rate, labels) |
class TestGenerator(TestCase):
def _fake_dataset_load(self, tasks, examples):
fake_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../test_data', 'test_task')
data = [[{'image_files': fake_folder, 'states': np.ones((TIME_HORIZON, STATE_SIZE)), 'actions': np.ones((TIME_HORIZON, OUTPUT_SIZE))} for _ in range(examples)] for _ in range(tasks)]
return (data, data)
def _fake_dataset(self, tasks, examples):
dataset = MagicMock()
dataset.time_horizon = TIME_HORIZON
dataset.training_set = MagicMock(return_value=self._fake_dataset_load(tasks, examples))
return dataset
def _embedding_strategy(self, scope, strategy, frames, batch_size=BATCH_SIZE, support_size=SUPPORT_SIZE, query_size=QUERY_SIZE):
dataset = self._fake_dataset(TASKS, EXAMPLES)
data_seq = DataSequencer(strategy, TIME_HORIZON)
gen = Generator(dataset, batch_size, support_size, query_size, data_sequencer=data_seq)
with tf.variable_scope(scope):
sess = tf.InteractiveSession()
(train_handle, val_handle) = gen.get_handles(sess)
(embed_images, embnet_states, embnet_outputs, ctrnet_images, ctrnet_states, ctrnet_outputs) = sess.run(gen.next_element, feed_dict={gen.handle: train_handle})
self.assertEqual(embed_images.shape, ((batch_size, (support_size + query_size), frames) + IMG_SHAPE))
self.assertEqual(embnet_states.shape, (batch_size, (support_size + query_size), frames, STATE_SIZE))
self.assertEqual(embnet_outputs.shape, (batch_size, (support_size + query_size), frames, OUTPUT_SIZE))
self.assertEqual(ctrnet_images.shape, ((batch_size, 2) + IMG_SHAPE))
self.assertEqual(ctrnet_states.shape, (batch_size, 2, STATE_SIZE))
self.assertEqual(ctrnet_outputs.shape, (batch_size, 2, OUTPUT_SIZE))
def test_first_frame_embedding(self):
self._embedding_strategy('test_first_frame_embedding', 'first', 1)
def test_last_frame_embedding(self):
self._embedding_strategy('test_last_frame_embedding', 'last', 1)
def test_first_last_frame_embedding(self):
self._embedding_strategy('test_first_last_frame_embedding', 'first_last', 2)
def test_all_frame_embedding(self):
self._embedding_strategy('test_all_frame_embedding', 'all', TIME_HORIZON)
def test_invalid_frame_embedding_throws_error(self):
with self.assertRaises(ValueError):
self._embedding_strategy('test_invalid_frame_embedding_throws_error', 'invalid', 1)
def test_support_and_query_more_than_samples(self):
with self.assertRaises(Exception):
self._embedding_strategy('test_support_and_query_more_than_samples', 'first', 1, support_size=(TIME_HORIZON + 1)) |
class OpenAIGPTPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class BitMasked(LayoutBuilder):
def __init__(self, dtype, content, valid_when, lsb_order, *, parameters=None, initial=1024, resize=8.0):
self._mask = ak.numba.GrowableBuffer(dtype=dtype, initial=initial, resize=resize)
self._content = content
self._valid_when = valid_when
self._lsb_order = lsb_order
self._current_byte_index = np.zeros((2,), dtype=np.uint8)
self._mask.append(self._current_byte_index[0])
if self._lsb_order:
self._cast = np.array([np.uint8((1 << 0)), np.uint8((1 << 1)), np.uint8((1 << 2)), np.uint8((1 << 3)), np.uint8((1 << 4)), np.uint8((1 << 5)), np.uint8((1 << 6)), np.uint8((1 << 7))])
else:
self._cast = np.array([np.uint8((128 >> 0)), np.uint8((128 >> 1)), np.uint8((128 >> 2)), np.uint8((128 >> 3)), np.uint8((128 >> 4)), np.uint8((128 >> 5)), np.uint8((128 >> 6)), np.uint8((128 >> 7))])
self._init(parameters)
def __repr__(self):
return f'ak.numba.lb.BitMasked({self._mask.dtype}, {self._content}, {self._valid_when}, {self._lsb_order}, parameters={self._parameters})'
def numbatype(self):
import numba
return ak._connect.numba.layoutbuilder.BitMaskedType(numba.from_dtype(self._mask.dtype), self.content, self.valid_when, self.lsb_order, numba.types.StringLiteral(self._parameters))
def content(self):
return self._content
def valid_when(self):
return self._valid_when
def lsb_order(self):
return self._lsb_order
def form(self):
return ak.forms.BitMaskedForm(ak.index._dtype_to_form[self._mask.dtype], self.content.form, self.valid_when, self.lsb_order, parameters=self._parameters)
def _append_begin(self):
if (self._current_byte_index[1] == 8):
self._current_byte_index[0] = np.uint8(0)
self._mask.append(self._current_byte_index[0])
self._current_byte_index[1] = 0
def _append_end(self):
self._current_byte_index[1] += 1
if self._valid_when:
self._mask._panels[(- 1)][(self._mask._length_pos[1] - 1)] = self._current_byte_index[0]
else:
self._mask._panels[(- 1)][(self._mask._length_pos[1] - 1)] = (~ self._current_byte_index[0])
def append_valid(self):
self._append_begin()
self._current_byte_index[0] |= self._cast[self._current_byte_index[1]]
self._append_end()
return self._content
def extend_valid(self, size):
for _ in range(size):
self.append_valid()
return self._content
def append_invalid(self):
self._append_begin()
self._append_end()
return self._content
def extend_invalid(self, size):
for _ in range(size):
self.append_invalid()
return self._content
def clear(self):
self._mask.clear()
self._content.clear()
def __len__(self):
return (len(self._mask) if (len(self._mask) == 0) else (((len(self._mask) - 1) * 8) + self._current_byte_index[1]))
def is_valid(self, error: str):
if (len(self._content) != len(self)):
error = f'BitMasked has content length {len(self._content)} but bit mask length {len(self)}'
return False
else:
return self._content.is_valid(error)
def snapshot(self) -> ak.contents.Content:
return ak.contents.BitMaskedArray(ak.index.Index(self._mask.snapshot()), self._content.snapshot(), valid_when=self._valid_when, length=len(self), lsb_order=self._lsb_order, parameters=self._parameters) |
class _SimpleDistributionMixin():
def log_prob(self, value):
return self._pdf.log_prob(value)
def expected_data(self):
return self._pdf.expected_data()
def sample(self, sample_shape=()):
return self._pdf.sample(sample_shape) |
def merge_hparams(p1, p2):
params = HParams()
v1 = p1.values()
v2 = p2.values()
for (k, v) in v1.items():
params.add_hparam(k, v)
for (k, v) in v2.items():
params.add_hparam(k, v)
return params |
def element2Object(element):
ctxObj = {}
for key in element:
ctxObj[key] = element[key]
return ctxObj |
def test_load_annotation():
annotation_path = 'tests/resources/sound_datasets/dataset/annotation/some_id.pv'
annotation_data = example.load_annotation(annotation_path)
assert (type(annotation_data) == 'some_annotation_type')
assert (type(annotation_data.times) is np.ndarray)
assert np.array_equal(annotation_data.times, np.array([0.016, 0.048])) |
def load_dataset(args, **kwargs):
if (args.dataset == 'mnist'):
(train_loader, val_loader, test_loader, args) = load_static_mnist(args, **kwargs)
elif (args.dataset == 'caltech'):
(train_loader, val_loader, test_loader, args) = load_caltech101silhouettes(args, **kwargs)
elif (args.dataset == 'freyfaces'):
(train_loader, val_loader, test_loader, args) = load_freyfaces(args, **kwargs)
elif (args.dataset == 'omniglot'):
(train_loader, val_loader, test_loader, args) = load_omniglot(args, **kwargs)
else:
raise Exception('Wrong name of the dataset!')
return (train_loader, val_loader, test_loader, args) |
class AutoModelForTokenClassification():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def train_step():
model.train()
model.zero_grad()
(data, label) = data_call(args.batch_size, args.gt_rules, args.data_seed)
data = torch.Tensor(data).to(device)
label = torch.Tensor(label).to(device)
(out, score) = model(data)
loss = criterion(out, label)
loss.backward()
optimizer.step()
return loss |
def _leading_trailing(a, edgeitems, index=()):
axis = len(index)
if (axis == a.ndim):
return a[index]
if (a.shape[axis] > (2 * edgeitems)):
return concatenate((_leading_trailing(a, edgeitems, (index + np.index_exp[:edgeitems])), _leading_trailing(a, edgeitems, (index + np.index_exp[(- edgeitems):]))), axis=axis)
else:
return _leading_trailing(a, edgeitems, (index + np.index_exp[:])) |
class LeanDescContext():
simplifier: Optional[LeanExprSimplifier]
cairo_type: Optional[CairoType]
struct_defs: LeanStructDefs
identifiers: IdentifierManager
func_scope: Optional[ScopedName]
open_namespaces: List[ScopedName]
div_var_basename: str = '0_'
div_var_startnum: int = 0
local_vars: Dict[(int, str)] = dataclasses.field(default_factory=(lambda : {}))
name_sub: Dict[(str, int)] = dataclasses.field(default_factory=(lambda : {}))
prefix: Optional[str] = None
hint_vars: List[LeanPreprocessedTempVarAlloc] = dataclasses.field(default_factory=(lambda : [])) |
class TestIf(test_util.TestCase):
def testIf(self):
W_a_values = [2.0, 1.5]
B_a_values = [0.5]
W_b_values = [7.0, 3.5]
B_b_values = [1.5]
with NetBuilder(_use_control_ops=True) as init_nb:
W_a = ops.UniformFill([], 'W_a', shape=[1, 2], min=(- 1.0), max=1.0)
B_a = ops.ConstantFill([], 'B_a', shape=[1], value=0.0)
W_b = ops.UniformFill([], 'W_b', shape=[1, 2], min=(- 1.0), max=1.0)
B_b = ops.ConstantFill([], 'B_b', shape=[1], value=0.0)
W_gt_a = ops.GivenTensorFill([], 'W_gt_a', shape=[1, 2], values=W_a_values)
B_gt_a = ops.GivenTensorFill([], 'B_gt_a', shape=[1], values=B_a_values)
W_gt_b = ops.GivenTensorFill([], 'W_gt_b', shape=[1, 2], values=W_b_values)
B_gt_b = ops.GivenTensorFill([], 'B_gt_b', shape=[1], values=B_b_values)
params = [W_gt_a, B_gt_a, W_a, B_a, W_gt_b, B_gt_b, W_b, B_b]
with NetBuilder(_use_control_ops=True, initial_scope=params) as train_nb:
Y_pred = ops.ConstantFill([], 'Y_pred', shape=[1], value=0.0)
Y_noise = ops.ConstantFill([], 'Y_noise', shape=[1], value=0.0)
switch = ops.UniformFill([], 'switch', shape=[1], min=(- 1.0), max=1.0, run_once=0)
zero = ops.ConstantFill([], 'zero', shape=[1], value=0.0)
X = ops.GaussianFill([], 'X', shape=[4096, 2], mean=0.0, std=1.0, run_once=0)
noise = ops.GaussianFill([], 'noise', shape=[4096, 1], mean=0.0, std=1.0, run_once=0)
with ops.IfNet(ops.LT([switch, zero])):
Y_gt = ops.FC([X, W_gt_a, B_gt_a], 'Y_gt')
ops.Add([Y_gt, noise], Y_noise)
ops.FC([X, W_a, B_a], Y_pred)
with ops.Else():
Y_gt = ops.FC([X, W_gt_b, B_gt_b], 'Y_gt')
ops.Add([Y_gt, noise], Y_noise)
ops.FC([X, W_b, B_b], Y_pred)
dist = ops.SquaredL2Distance([Y_noise, Y_pred], 'dist')
loss = dist.AveragedLoss([], ['loss'])
assert (len(init_nb.get()) == 1), 'Expected a single init net produced'
assert (len(train_nb.get()) == 1), 'Expected a single train net produced'
train_net = train_nb.get()[0]
gradient_map = train_net.AddGradientOperators([loss])
init_net = init_nb.get()[0]
ITER = init_net.ConstantFill([], 'ITER', shape=[1], value=0, dtype=core.DataType.INT64)
train_net.Iter(ITER, ITER)
LR = train_net.LearningRate(ITER, 'LR', base_lr=(- 0.1), policy='step', stepsize=20, gamma=0.9)
ONE = init_net.ConstantFill([], 'ONE', shape=[1], value=1.0)
train_net.WeightedSum([W_a, ONE, gradient_map[W_a], LR], W_a)
train_net.WeightedSum([B_a, ONE, gradient_map[B_a], LR], B_a)
train_net.WeightedSum([W_b, ONE, gradient_map[W_b], LR], W_b)
train_net.WeightedSum([B_b, ONE, gradient_map[B_b], LR], B_b)
workspace.RunNetOnce(init_net)
workspace.CreateNet(train_net)
for _epoch in range(1000):
workspace.RunNet(train_net.Proto().name)
values_map = {'W_a': W_a_values, 'B_a': B_a_values, 'W_b': W_b_values, 'B_b': B_b_values}
train_eps = 0.01
for (blob_name, values) in values_map.items():
trained_values = workspace.FetchBlob(blob_name)
if (trained_values.ndim == 2):
self.assertEqual(trained_values.shape[0], 1)
trained_values = trained_values[0][:]
else:
self.assertEqual(trained_values.ndim, 1)
self.assertEqual(trained_values.size, len(values))
for idx in range(len(trained_values)):
self.assertTrue((abs((trained_values[idx] - values[idx])) < train_eps)) |
.parametrize('tdf', [tdf_test])
def test_plot_stops_tdf(tdf):
map_f = tdf.plot_trajectory()
stdf = detection.stay_locations(tdf)
map_f = stdf.plot_stops(map_f=map_f)
assert isinstance(map_f, folium.folium.Map) |
def calc_scoot(real, fake, level=6, N_blocks=4):
assert (real.size == fake.size)
x_dim = np.floor((real.size[0] / 4)).astype(int)
y_dim = np.floor((real.size[1] / 4)).astype(int)
real_patches = []
fake_patches = []
for i in range(N_blocks):
for j in range(N_blocks):
real_patches.append(quant(real.crop(((x_dim * i), (y_dim * j), (x_dim * (i + 1)), (y_dim * (j + 1)))), level=level))
fake_patches.append(quant(fake.crop(((x_dim * i), (y_dim * j), (x_dim * (i + 1)), (y_dim * (j + 1)))), level=level))
scores = []
for i in range(len(real_patches)):
real_stat = calc_comat(real_patches[i], level=level)
fake_stat = calc_comat(fake_patches[i], level=level)
score = (1 / (1 + np.linalg.norm((real_stat - fake_stat), ord=2)))
scores.append(score)
return (scores, np.mean(scores)) |
def load_reuters():
data_home = get_data_home()
train_file = os.path.join(data_home, 'reuters', 'money-fx.trn')
test_file = os.path.join(data_home, 'reuters', 'money-fx.tst')
return _load(train_file, test_file, 'reuters') |
class WasserstienGAN(GAN):
def __init__(self, z_dim, crop_image_size, resized_image_size, batch_size, data_dir, clip_values=((- 0.01), 0.01), critic_iterations=5):
self.critic_iterations = critic_iterations
self.clip_values = clip_values
GAN.__init__(self, z_dim, crop_image_size, resized_image_size, batch_size, data_dir)
def _generator(self, z, dims, train_phase, activation=tf.nn.relu, scope_name='generator'):
N = len(dims)
image_size = (self.resized_image_size // (2 ** (N - 1)))
with tf.variable_scope(scope_name) as scope:
W_z = utils.weight_variable([self.z_dim, ((dims[0] * image_size) * image_size)], name='W_z')
h_z = tf.matmul(z, W_z)
h_z = tf.reshape(h_z, [(- 1), image_size, image_size, dims[0]])
h_bnz = utils.batch_norm(h_z, dims[0], train_phase, scope='gen_bnz')
h = activation(h_bnz, name='h_z')
utils.add_activation_summary(h)
for index in range((N - 2)):
image_size *= 2
W = utils.weight_variable([4, 4, dims[(index + 1)], dims[index]], name=('W_%d' % index))
b = tf.zeros([dims[(index + 1)]])
deconv_shape = tf.pack([tf.shape(h)[0], image_size, image_size, dims[(index + 1)]])
h_conv_t = utils.conv2d_transpose_strided(h, W, b, output_shape=deconv_shape)
h_bn = utils.batch_norm(h_conv_t, dims[(index + 1)], train_phase, scope=('gen_bn%d' % index))
h = activation(h_bn, name=('h_%d' % index))
utils.add_activation_summary(h)
image_size *= 2
W_pred = utils.weight_variable([4, 4, dims[(- 1)], dims[(- 2)]], name='W_pred')
b = tf.zeros([dims[(- 1)]])
deconv_shape = tf.pack([tf.shape(h)[0], image_size, image_size, dims[(- 1)]])
h_conv_t = utils.conv2d_transpose_strided(h, W_pred, b, output_shape=deconv_shape)
pred_image = tf.nn.tanh(h_conv_t, name='pred_image')
utils.add_activation_summary(pred_image)
return pred_image
def _discriminator(self, input_images, dims, train_phase, activation=tf.nn.relu, scope_name='discriminator', scope_reuse=False):
N = len(dims)
with tf.variable_scope(scope_name) as scope:
if scope_reuse:
scope.reuse_variables()
h = input_images
skip_bn = True
for index in range((N - 2)):
W = utils.weight_variable([4, 4, dims[index], dims[(index + 1)]], name=('W_%d' % index))
b = tf.zeros([dims[(index + 1)]])
h_conv = utils.conv2d_strided(h, W, b)
if skip_bn:
h_bn = h_conv
skip_bn = False
else:
h_bn = utils.batch_norm(h_conv, dims[(index + 1)], train_phase, scope=('disc_bn%d' % index))
h = activation(h_bn, name=('h_%d' % index))
utils.add_activation_summary(h)
W_pred = utils.weight_variable([4, 4, dims[(- 2)], dims[(- 1)]], name='W_pred')
b = tf.zeros([dims[(- 1)]])
h_pred = utils.conv2d_strided(h, W_pred, b)
return (None, h_pred, None)
def _gan_loss(self, logits_real, logits_fake, feature_real, feature_fake, use_features=False):
self.discriminator_loss = tf.reduce_mean((logits_real - logits_fake))
self.gen_loss = tf.reduce_mean(logits_fake)
tf.scalar_summary('Discriminator_loss', self.discriminator_loss)
tf.scalar_summary('Generator_loss', self.gen_loss)
def train_model(self, max_iterations):
try:
print('Training Wasserstein GAN model...')
clip_discriminator_var_op = [var.assign(tf.clip_by_value(var, self.clip_values[0], self.clip_values[1])) for var in self.discriminator_variables]
start_time = time.time()
def get_feed_dict(train_phase=True):
batch_z = np.random.uniform((- 1.0), 1.0, size=[self.batch_size, self.z_dim]).astype(np.float32)
feed_dict = {self.z_vec: batch_z, self.train_phase: train_phase}
return feed_dict
for itr in xrange(1, max_iterations):
if ((itr < 25) or ((itr % 500) == 0)):
critic_itrs = 25
else:
critic_itrs = self.critic_iterations
for critic_itr in range(critic_itrs):
self.sess.run(self.discriminator_train_op, feed_dict=get_feed_dict(True))
self.sess.run(clip_discriminator_var_op)
feed_dict = get_feed_dict(True)
self.sess.run(self.generator_train_op, feed_dict=feed_dict)
if ((itr % 100) == 0):
summary_str = self.sess.run(self.summary_op, feed_dict=feed_dict)
self.summary_writer.add_summary(summary_str, itr)
if ((itr % 200) == 0):
stop_time = time.time()
duration = ((stop_time - start_time) / 200.0)
start_time = stop_time
(g_loss_val, d_loss_val) = self.sess.run([self.gen_loss, self.discriminator_loss], feed_dict=feed_dict)
print(('Time: %g/itr, Step: %d, generator loss: %g, discriminator_loss: %g' % (duration, itr, g_loss_val, d_loss_val)))
if ((itr % 5000) == 0):
self.saver.save(self.sess, (self.logs_dir + 'model.ckpt'), global_step=itr)
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
except KeyboardInterrupt:
print('Ending Training...')
finally:
self.coord.request_stop()
self.coord.join(self.threads) |
class TestSequenceBatch(object):
def sequences(self):
return [['a', 'b', 'b', 'c'], ['c'], []]
def vocab(self):
return SimpleVocab(['<unk>', 'a', 'b', 'c', '<start>', '<stop>'])
def test_from_sequences(self, sequences, vocab):
seq_batch = SequenceBatch.from_sequences(sequences, vocab)
assert_tensor_equal(seq_batch.values, np.array([[1, 2, 2, 3], [3, 0, 0, 0], [0, 0, 0, 0]], dtype=np.int32))
assert_tensor_equal(seq_batch.mask, np.array([[1, 1, 1, 1], [1, 0, 0, 0], [0, 0, 0, 0]], dtype=np.float32))
def test_min_seq_length(self, vocab):
seq_batch = SequenceBatch.from_sequences([[], [], []], vocab, min_seq_length=2)
assert_tensor_equal(seq_batch.values, np.zeros((3, 2)))
assert_tensor_equal(seq_batch.mask, np.zeros((3, 2)))
def test_mask_validation(self):
mask = GPUVariable(torch.FloatTensor([[1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]))
values = mask
SequenceBatch(values, mask)
non_binary_mask = GPUVariable(torch.FloatTensor([[1, 0, 0, 0], [1, 1.2, 0, 0], [1, 1, 1, 0]]))
with pytest.raises(ValueError):
SequenceBatch(mask, non_binary_mask)
non_left_justified_mask = GPUVariable(torch.FloatTensor([[1, 0, 0, 1], [1, 1, 0, 0], [1, 1, 1, 0]]))
with pytest.raises(ValueError):
SequenceBatch(mask, non_left_justified_mask)
def test_split(self):
input_embeds = GPUVariable(torch.LongTensor([[[1, 2], [2, 3], [5, 6]], [[4, 8], [3, 5], [0, 0]]]))
input_mask = GPUVariable(torch.FloatTensor([[1, 1, 1], [1, 1, 0]]))
sb = SequenceBatch(input_embeds, input_mask)
elements = sb.split()
input_list = [e.values for e in elements]
mask_list = [e.mask for e in elements]
assert (len(input_list) == 3)
assert_tensor_equal(input_list[0], [[1, 2], [4, 8]])
assert_tensor_equal(input_list[1], [[2, 3], [3, 5]])
assert_tensor_equal(input_list[2], [[5, 6], [0, 0]])
assert (len(mask_list) == 3)
assert_tensor_equal(mask_list[0], [[1], [1]])
assert_tensor_equal(mask_list[1], [[1], [1]])
assert_tensor_equal(mask_list[2], [[1], [0]])
def test_cat(self):
x1 = SequenceBatchElement(GPUVariable(torch.FloatTensor([[[1, 2], [3, 4]], [[8, 2], [9, 0]]])), GPUVariable(torch.FloatTensor([[1], [1]])))
x2 = SequenceBatchElement(GPUVariable(torch.FloatTensor([[[(- 1), 20], [3, 40]], [[(- 8), 2], [9, 10]]])), GPUVariable(torch.FloatTensor([[1], [0]])))
x3 = SequenceBatchElement(GPUVariable(torch.FloatTensor([[[(- 1), 20], [3, 40]], [[(- 8), 2], [9, 10]]])), GPUVariable(torch.FloatTensor([[0], [0]])))
result = SequenceBatch.cat([x1, x2, x3])
assert_tensor_equal(result.values, [[[[1, 2], [3, 4]], [[(- 1), 20], [3, 40]], [[(- 1), 20], [3, 40]]], [[[8, 2], [9, 0]], [[(- 8), 2], [9, 10]], [[(- 8), 2], [9, 10]]]])
assert_tensor_equal(result.mask, [[1, 1, 0], [1, 0, 0]])
def some_seq_batch(self):
values = GPUVariable(torch.FloatTensor([[[1, 2], [4, 5], [4, 4]], [[0, 4], [43, 5], [(- 1), 20]], [[(- 1), 20], [43, 5], [0, 0]]]))
mask = GPUVariable(torch.FloatTensor([[1, 1, 0], [1, 0, 0], [0, 0, 0]]))
return SequenceBatch(values, mask)
def test_weighted_sum(self, some_seq_batch):
weights = GPUVariable(torch.FloatTensor([[0.5, 0.3, 0], [0.8, 0.2, 0], [0, 0, 0]]))
result = SequenceBatch.weighted_sum(some_seq_batch, weights)
assert_tensor_equal(result, [[1.7, 2.5], [0, 3.2], [0, 0]])
def test_reduce_sum(self, some_seq_batch):
result = SequenceBatch.reduce_sum(some_seq_batch)
assert_tensor_equal(result, [[5, 7], [0, 4], [0, 0]])
def test_reduce_mean(self, some_seq_batch):
result = SequenceBatch.reduce_mean(some_seq_batch, allow_empty=True)
assert_tensor_equal(result, [[2.5, 3.5], [0, 4], [0, 0]])
with pytest.raises(ValueError):
SequenceBatch.reduce_mean(some_seq_batch, allow_empty=False)
def test_reduce_prod(self, some_seq_batch):
result = SequenceBatch.reduce_prod(some_seq_batch)
assert_tensor_equal(result, [[4, 10], [0, 4], [1, 1]])
def test_reduce_max(self, some_seq_batch):
with pytest.raises(ValueError):
SequenceBatch.reduce_max(some_seq_batch)
values = GPUVariable(torch.FloatTensor([[[1, 2], [4, 5], [4, 4]], [[0, (- 4)], [43, (- 5)], [(- 1), (- 20)]]]))
mask = GPUVariable(torch.FloatTensor([[1, 0, 0], [1, 1, 0]]))
seq_batch = SequenceBatch(values, mask)
result = SequenceBatch.reduce_max(seq_batch)
assert_tensor_equal(result, [[1, 2], [43, (- 4)]])
def test_embed(self):
sequences = [[], [1, 2, 3], [3, 3], [2]]
vocab = SimpleVocab([0, 1, 2, 3, 4])
indices = SequenceBatch.from_sequences(sequences, vocab)
embeds = GPUVariable(torch.FloatTensor([[0, 0], [2, 2], [3, 4], [(- 10), 1], [11, (- 1)]]))
embedded = SequenceBatch.embed(indices, embeds)
correct = np.array([[[0, 0], [0, 0], [0, 0]], [[2, 2], [3, 4], [(- 10), 1]], [[(- 10), 1], [(- 10), 1], [0, 0]], [[3, 4], [0, 0], [0, 0]]], dtype=np.float32)
assert_tensor_equal(embedded.values, correct) |
('mmdet.apis.single_gpu_test', MagicMock)
('mmdet.apis.multi_gpu_test', MagicMock)
.parametrize('EvalHookParam', (EvalHook, DistEvalHook))
def test_evaluation_hook(EvalHookParam):
dataloader = DataLoader(torch.ones((5, 2)))
with pytest.raises(TypeError):
EvalHookParam(dataloader=MagicMock(), interval=(- 1))
with pytest.raises(ValueError):
EvalHookParam(dataloader, interval=(- 1))
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, interval=1)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert (evalhook.evaluate.call_count == 2)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1, interval=1)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert (evalhook.evaluate.call_count == 2)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, interval=2)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert (evalhook.evaluate.call_count == 1)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1, interval=2)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 3)
assert (evalhook.evaluate.call_count == 2)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=0)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert (evalhook.evaluate.call_count == 3)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=0, interval=2, dynamic_intervals=[(3, 1)])
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 4)
assert (evalhook.evaluate.call_count == 3)
runner = _build_demo_runner()
with pytest.raises(ValueError):
EvalHookParam(dataloader, start=(- 2))
evalhook = EvalHookParam(dataloader, start=0)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert (evalhook.evaluate.call_count == 3)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner._epoch = 2
runner.run([dataloader], [('train', 1)], 3)
assert (evalhook.evaluate.call_count == 2)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=2)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner._epoch = 1
runner.run([dataloader], [('train', 1)], 3)
assert (evalhook.evaluate.call_count == 2) |
class DataType(Enum, metaclass=DataTypeMeta):
def get_accumulator_dt_cands():
cands = ['BINARY']
cands += [('UINT%d' % (x + 1)) for x in range(64)]
cands += ['BIPOLAR', 'TERNARY']
cands += [('INT%d' % (x + 1)) for x in range(64)]
return cands
def get_smallest_possible(value):
if (not (int(value) == value)):
return DataType['FLOAT32']
cands = DataType.get_accumulator_dt_cands()
for cand in cands:
dt = DataType[cand]
if ((dt.min() <= value) and (value <= dt.max())):
return dt
raise Exception(('Could not find a suitable int datatype for ' + str(value))) |
def handle_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', type=str, default=None, help='Configuration file for the experiment.')
args = parser.parse_args()
if (args.config_file is None):
raise ValueError('Configuration file must be provided.')
return args |
def get_barren_plot_from_model(model, plt):
y = np.exp(model.predict(model.x_val))
handle = []
handle.append(plt.semilogy(model.x_val, y))
handle.append(plt.semilogy(model.x_val, np.exp(model.y_val)))
return handle |
class CaffeVendor(object):
def __init__(self, net_name, weight_name, version=2):
print('loading model spec...')
self._net_pb = caffe_pb2.NetParameter()
text_format.Merge(open(net_name).read(), self._net_pb)
self._weight_dict = {}
self._init_dict = []
if (weight_name is not None):
print('loading weights...')
self._weight_pb = caffe_pb2.NetParameter()
self._weight_pb.ParseFromString(open(weight_name, 'rb').read())
for l in self._weight_pb.layer:
self._weight_dict[l.name] = l
print('parsing...')
self._parse_net(version)
def _parse_net(self, version):
self._name = str(self._net_pb.name)
self._layers = (self._net_pb.layer if (version == 2) else self._net_pb.layers)
self._parsed_layers = [self._layer2dict(x, version) for x in self._layers]
self._net_dict = {'name': self._name, 'inputs': [], 'layers': []}
self._weight_array_dict = {}
for (info, blob, is_data) in self._parsed_layers:
if ((not is_data) and (info is not None)):
self._net_dict['layers'].append(info)
self._weight_array_dict.update(blob)
def _parse_blob(blob):
flat_data = np.array(blob.data)
shaped_data = flat_data.reshape(list(blob.shape.dim))
return shaped_data
def _layer2dict(self, layer, version):
attr_dict = {}
params = []
weight_params = []
fillers = []
for (field, value) in layer.ListFields():
if (field.name == 'top'):
tops = [v.replace('-', '_').replace('/', '_') for v in value]
elif (field.name == 'name'):
layer_name = str(value).replace('-', '_').replace('/', '_')
elif (field.name == 'bottom'):
bottoms = [v.replace('-', '_').replace('/', '_') for v in value]
elif (field.name == 'include'):
if ((value[0].phase == 1) and (op == 'Data')):
print('found 1 testing data layer')
return (None, dict(), dict(), False)
elif (field.name == 'type'):
if (version == 2):
op = value
else:
raise NotImplemented
elif (field.name == 'loss_weight'):
pass
elif (field.name == 'param'):
pass
else:
try:
for (f, v) in value.ListFields():
if ('filler' in f.name):
pass
elif (f.name == 'pool'):
attr_dict['mode'] = ('max' if (v == 0) else 'ave')
else:
attr_dict[f.name] = v
except:
print(field.name, value)
raise
expr_temp = '{top}<={op}<={input}'
if (layer.name in self._weight_dict):
blobs = [self._parse_blob(x) for x in self._weight_dict[layer.name].blobs]
else:
blobs = []
blob_dict = dict()
if (len(blobs) > 0):
blob_dict['{}.weight'.format(layer_name)] = torch.from_numpy(blobs[0])
blob_dict['{}.bias'.format(layer_name)] = torch.from_numpy(blobs[1])
if (op == 'BN'):
blob_dict['{}.running_mean'.format(layer_name)] = torch.from_numpy(blobs[2])
blob_dict['{}.running_var'.format(layer_name)] = torch.from_numpy(blobs[3])
expr = expr_temp.format(top=','.join(tops), input=','.join(bottoms), op=op)
out_dict = {'id': layer_name, 'expr': expr}
if (len(attr_dict) > 0):
out_dict['attrs'] = attr_dict
return (out_dict, blob_dict, False)
def text_form(self):
return str(self._net_pb)
def info(self):
return {'name': self._name, 'layers': [x.name for x in self._layers]}
def yaml(self):
return yaml.dump(self._net_dict)
def dump_weights(self, filename):
torch.save(self._weight_array_dict, open(filename, 'wb')) |
def BHfilter(pval, q=0.2):
pval = np.asarray(pval)
pval_sort = np.sort(pval)
comparison = ((q * np.arange(1, (pval.shape[0] + 1.0))) / pval.shape[0])
passing = (pval_sort < comparison)
if passing.sum():
thresh = comparison[np.nonzero(passing)[0].max()]
return np.nonzero((pval <= thresh))[0]
return [] |
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(512, 10)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out1 = self.layer1(out)
out2 = self.layer2(out1)
out3 = self.layer3(out2)
out4 = self.layer4(out3)
out = self.avgpool(out4)
out = torch.flatten(out, 1)
out = self.linear(out)
return (out, [out1, out2, out3, out4]) |
class CAddTable(Module):
def __init__(self, inplace=False):
super(CAddTable, self).__init__()
self.inplace = inplace
self.gradInput = []
def updateOutput(self, input):
if self.inplace:
self.output.set_(input[0])
else:
self.output.resize_as_(input[0]).copy_(input[0])
for i in range(1, len(input)):
self.output.add_(input[i])
return self.output
def updateGradInput(self, input, gradOutput):
for i in range(len(input)):
if (i >= len(self.gradInput)):
assert (i == len(self.gradInput))
self.gradInput.append(input[0].new())
if self.inplace:
self.gradInput[i].set_(gradOutput)
else:
self.gradInput[i].resize_as_(input[i]).copy_(gradOutput)
del self.gradInput[len(input):]
return self.gradInput |
class TestTarOperator(unittest.TestCase):
def test_tar(self):
with temp_file.TemporaryDirectory(as_cwd=True):
test_dir = 'sqlflow_tar'
test_sub_dir = 'sqlflow_sub_dir'
test_py_file = 'hello.py'
test_py_content = "print('hello SQLFlow!')"
fullpath = os.path.join(test_dir, test_sub_dir)
os.makedirs(fullpath)
with open(os.path.join(fullpath, test_py_file), 'w') as f:
f.write(test_py_content)
zip_dir(fullpath, 'sqlflow.tar.gz')
unzip_dir('sqlflow.tar.gz', 'output')
self.assertTrue(os.path.isdir('output/sqlflow_tar/sqlflow_sub_dir'))
self.assertTrue(os.path.isfile('output/sqlflow_tar/sqlflow_sub_dir/hello.py'))
with open(os.path.join(fullpath, test_py_file), 'r') as f:
self.assertEqual(f.read(), test_py_content) |
class ResidualBlockWithCustomJacobian(ResidualBlock):
custom_jacobians: T.Dict[(T.Element, sf.Matrix)] = field(default_factory=dict)
def compute_jacobians(self, inputs: T.Sequence[T.Element], residual_name: str=None, key_names: T.Sequence[str]=None) -> T.Sequence[sf.Matrix]:
residual_jacobians = []
for (i, input_element) in enumerate(inputs):
if (input_element in self.custom_jacobians):
residual_jacobians.append(self.custom_jacobians[input_element])
else:
residual_input_jacobian = self.residual.jacobian(input_element)
if (residual_input_jacobian != sf.matrix_type_from_shape(residual_input_jacobian.shape).zero()):
residual_name = (residual_name or str(self))
if (key_names is not None):
key_name = key_names[i]
else:
key_name = str(input_element)
raise ValueError(f'The residual `{residual_name}` has a nonzero jacobian with respect to input `{key_name}`. Custom jacobians were provided for this residual, but not for this input variable. If you wish to use the automatically computed jacobian for this input, please compute it using `jacobian_helpers.tangent_jacobians(residual, [input])[0]` and add it to the custom_jacobians dictionary')
residual_jacobians.append(residual_input_jacobian)
return residual_jacobians |
class KLSchedule(Callback):
def __init__(self, start_epoch: int, end_epoch: int, max_kl_beta: float):
self.start_epoch = start_epoch
self.end_epoch = end_epoch
self.max_kl_beta = max_kl_beta
def on_train_epoch_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
epoch = pl_module.current_epoch
kl_beta = self._anneal_fn(epoch)
pl_module.set_kl_beta(kl_beta)
def _anneal_fn(self, epoch):
raise NotImplementedError |
def mk_parser():
psr = argparse.ArgumentParser(add_help=False)
psr.add_argument('--seed', type=int, default=42)
psr.add_argument('--prompt_version', type=str, default='v1')
psr.add_argument('--dataset', type=str, choices=task_mapper.keys())
psr.add_argument('--data_file', type=str)
psr.add_argument('--model_type', type=str, choices=['opt', 'gpt2', 'e-gpt', 'bloom', 'falcon', 'llama'])
psr.add_argument('--model_size', type=str)
psr.add_argument('--gpus', type=str, default='0')
psr.add_argument('--batch_size', type=int, default=0)
psr.add_argument('--in_8bit', type=str2bool, default=False)
psr.add_argument('--no_console', action='store_true', default=False)
psr.add_argument('--exemplar_method', type=str, default='random', choices=['random', 'written', 'stratified'])
psr.add_argument('--num_k_shots', type=int, default=1)
psr.add_argument('--alpha', type=float, default=0.8)
psr.add_argument('--rank', type=int, default=1)
return psr |
def get_layers_from_model_by_type(model: keras.Model, layer_type: type, include_wrapped_layers: bool=True):
if include_wrapped_layers:
return [layer for layer in model.layers if ((type(layer) == layer_type) or (isinstance(layer, KerasQuantizationWrapper) and (type(layer.layer) == layer_type)))]
return [layer for layer in model.layers if (type(layer) == layer_type)] |
def evaluation(source_dir):
data_type = 'train'
data_list = pd.read_csv(f'{source_dir}/{data_type}.csv', header=None)
train_file_list = []
for (idx, item) in data_list.iterrows():
file_path = os.path.join(source_dir, data_type, item[3], item[0])
if (not os.path.exists(file_path)):
print((file_path + 'not exists'))
else:
train_file_list.append(file_path)
data_type = 'test'
data_list = pd.read_csv(f'{source_dir}/{data_type}.csv', header=None)
test_file_list = []
for (idx, item) in data_list.iterrows():
file_path = f'{source_dir}/{data_type}/{item[3]}/{item[0]}'
if (not os.path.exists(file_path)):
print((file_path + 'not exists'))
else:
test_file_list.append(file_path)
for test_file in test_file_list:
if (test_file in train_file_list):
print(test_file) |
class TestDeprecatedJitQuantized(JitTestCase):
def test_rnn_cell_quantized(self):
(d_in, d_hid) = (2, 2)
for cell in [torch.nn.LSTMCell(d_in, d_hid).float(), torch.nn.GRUCell(d_in, d_hid).float(), torch.nn.RNNCell(d_in, d_hid).float()]:
if isinstance(cell, torch.nn.LSTMCell):
num_chunks = 4
elif isinstance(cell, torch.nn.GRUCell):
num_chunks = 3
elif isinstance(cell, torch.nn.RNNCell):
num_chunks = 1
vals = [[100, (- 155)], [100, (- 155)], [(- 155), 100], [(- 155), 100], [100, (- 155)], [(- 155), 100], [(- 155), 100], [100, (- 155)]]
vals = vals[:(d_hid * num_chunks)]
cell.weight_ih = torch.nn.Parameter(torch.tensor(vals, dtype=torch.float), requires_grad=False)
cell.weight_hh = torch.nn.Parameter(torch.tensor(vals, dtype=torch.float), requires_grad=False)
ref = copy.deepcopy(cell)
cell = torch.jit.quantized.quantize_rnn_cell_modules(cell)
x = torch.tensor([[100, (- 155)], [(- 155), 100], [100, (- 155)]], dtype=torch.float)
h0_vals = [[(- 155), 100], [(- 155), 155], [100, (- 155)]]
hx = torch.tensor(h0_vals, dtype=torch.float)
if isinstance(cell, torch.jit.quantized.QuantizedLSTMCell):
cx = torch.tensor(h0_vals, dtype=torch.float)
hiddens = (hx, cx)
else:
hiddens = hx
if isinstance(cell, torch.jit.quantized.QuantizedLSTMCell):
class ScriptWrapper(torch.jit.ScriptModule):
def __init__(self, cell):
super(ScriptWrapper, self).__init__()
self.cell = cell
.script_method
def forward(self, x, hiddens):
return self.cell(x, hiddens)
else:
class ScriptWrapper(torch.jit.ScriptModule):
def __init__(self, cell):
super(ScriptWrapper, self).__init__()
self.cell = cell
.script_method
def forward(self, x, hiddens):
return self.cell(x, hiddens)
cell = ScriptWrapper(cell)
outs = cell(x, hiddens)
cell = self.getExportImportCopyWithPacking(cell)
outs = cell(x, hiddens)
ref_outs = ref(x, hiddens)
self.assertEqual(len(outs), len(ref_outs))
for (out, ref_out) in zip(outs, ref_outs):
torch.testing.assert_allclose(out, ref_out)
def test_rnn_quantized(self):
(d_in, d_hid) = (2, 2)
for cell in [torch.nn.LSTM(d_in, d_hid).float(), torch.nn.GRU(d_in, d_hid).float()]:
vals = [[100, (- 155)], [100, (- 155)], [(- 155), 100], [(- 155), 100], [100, (- 155)], [(- 155), 100], [(- 155), 100], [100, (- 155)]]
if isinstance(cell, torch.nn.LSTM):
num_chunks = 4
elif isinstance(cell, torch.nn.GRU):
num_chunks = 3
vals = vals[:(d_hid * num_chunks)]
cell.weight_ih_l0 = torch.nn.Parameter(torch.tensor(vals, dtype=torch.float), requires_grad=False)
cell.weight_hh_l0 = torch.nn.Parameter(torch.tensor(vals, dtype=torch.float), requires_grad=False)
ref = copy.deepcopy(cell)
cell_int8 = torch.jit.quantized.quantize_rnn_modules(cell, dtype=torch.int8)
cell_fp16 = torch.jit.quantized.quantize_rnn_modules(cell, dtype=torch.float16)
niter = 10
x = torch.tensor([[100, (- 155)], [(- 155), 100], [100, (- 155)]], dtype=torch.float).unsqueeze(0).repeat(niter, 1, 1)
h0_vals = [[(- 155), 100], [(- 155), 155], [100, (- 155)]]
hx = torch.tensor(h0_vals, dtype=torch.float).unsqueeze(0)
cx = torch.tensor(h0_vals, dtype=torch.float).unsqueeze(0)
if isinstance(ref, torch.nn.LSTM):
hiddens = (hx, cx)
elif isinstance(ref, torch.nn.GRU):
hiddens = hx
(ref_out, ref_hid) = ref(x, hiddens)
(output_int8, final_hiddens_int8) = cell_int8(x, hiddens)
torch.testing.assert_allclose(output_int8, ref_out)
for (out, ref) in zip(final_hiddens_int8, ref_hid):
torch.testing.assert_allclose(out, ref)
(output_fp16, final_hiddens_fp16) = cell_fp16(x, hiddens)
torch.testing.assert_allclose(output_fp16, ref_out)
for (out, ref) in zip(final_hiddens_fp16, ref_hid):
torch.testing.assert_allclose(out, ref)
def compare_quantized_unquantized(ScriptWrapper, cell):
wrapper = ScriptWrapper(cell)
(script_out, script_hid) = wrapper(x, hiddens)
torch.testing.assert_allclose(script_out, ref_out)
for (out, ref) in zip(script_hid, ref_hid):
torch.testing.assert_allclose(out, ref)
export_import_wrapper = self.getExportImportCopyWithPacking(wrapper)
(ei_out, ei_hid) = export_import_wrapper(x, hiddens)
torch.testing.assert_allclose(ei_out, ref_out)
for (out, ref) in zip(ei_hid, ref_hid):
torch.testing.assert_allclose(out, ref)
if isinstance(cell, torch.jit.quantized.QuantizedGRU):
class ScriptWrapper(torch.jit.ScriptModule):
def __init__(self, cell):
super(ScriptWrapper, self).__init__()
self.cell = cell
.script_method
def forward(self, x, hiddens):
return self.cell(x, hiddens)
compare_quantized_unquantized(ScriptWrapper, cell)
elif isinstance(cell, torch.jit.quantized.QuantizedLSTM):
for cell in [cell_int8, cell_fp16]:
class ScriptWrapper(torch.jit.ScriptModule):
def __init__(self, cell):
super(ScriptWrapper, self).__init__()
self.cell = cell
.script_method
def forward(self, x, hiddens):
return self.cell(x, hiddens)
compare_quantized_unquantized(ScriptWrapper, cell)
if ('fbgemm' in torch.backends.quantized.supported_engines):
_warnings
def test_quantization_modules(self):
(K1, N1) = (2, 2)
class FooBar(torch.nn.Module):
def __init__(self):
super(FooBar, self).__init__()
self.linear1 = torch.nn.Linear(K1, N1).float()
def forward(self, x):
x = self.linear1(x)
return x
fb = FooBar()
fb.linear1.weight = torch.nn.Parameter(torch.tensor([[(- 150), 100], [100, (- 150)]], dtype=torch.float), requires_grad=False)
fb.linear1.bias = torch.nn.Parameter(torch.zeros_like(fb.linear1.bias), requires_grad=False)
x = ((torch.rand(1, K1).float() - 0.5) / 10.0)
value = torch.tensor([[100, (- 150)]], dtype=torch.float)
y_ref = fb(value)
fb_int8 = torch.jit.quantized.quantize_linear_modules(fb)
traced_int8 = torch.jit.trace(fb_int8, (x,))
fb_int8 = self.getExportImportCopyWithPacking(traced_int8)
y_int8 = fb_int8(value)
fb_fp16 = torch.jit.quantized.quantize_linear_modules(fb, torch.float16)
traced_fp16 = torch.jit.trace(fb_fp16, (x,))
fb_fp16 = self.getExportImportCopyWithPacking(traced_fp16)
y_fp16 = fb_fp16(value)
torch.testing.assert_allclose(y_int8, y_ref, rtol=0.0001, atol=0.001)
torch.testing.assert_allclose(y_fp16, y_ref, rtol=0.0001, atol=0.001)
def _test_pickle_checkpoint_qtensor(self, device):
with TemporaryFileName() as fname:
class M(torch.jit.ScriptModule):
__constants__ = ['fname']
def __init__(self):
super(M, self).__init__()
self.fname = fname
.script_method
def forward(self, x, y):
torch.save((x, y), self.fname)
return y
q = torch.quantize_per_tensor(torch.rand(2, 3, dtype=torch.float), scale=0.1, zero_point=10, dtype=torch.quint8).to(device)
qc = torch.quantize_per_channel(torch.rand(2, 3, dtype=torch.float), scales=torch.tensor([0.1, 0.5, 0.01]), zero_points=torch.tensor([10, 0, 20]), axis=1, dtype=torch.quint8).to(device)
m = M()
m(q, qc)
with open(fname, 'rb') as handle:
(loaded_q, loaded_qc) = torch.load(fname)
self.assertEqual(loaded_q, q)
self.assertEqual(loaded_qc, qc)
def test_pickle_checkpoint_qtensor(self):
self._test_pickle_checkpoint_qtensor('cpu')
def test_serialize_qtensor(self):
class SimpleQTensor(torch.jit.ScriptModule):
def __init__(self, per_channel):
super(SimpleQTensor, self).__init__()
x = torch.rand(5, 5).float()
if (not per_channel):
x_q = torch.quantize_per_tensor(x, 0.2, 10, torch.quint8)
else:
s = (torch.rand(5, dtype=torch.float64) + 0.1)
zp = torch.randint(5, 15, (5,))
x_q = torch.quantize_per_channel(x, s, zp, 1, torch.quint8)
self.register_buffer('x', x_q)
.script_method
def forward(self):
return self.x
for per_channel in [False, True]:
model = SimpleQTensor(per_channel)
buffer = io.BytesIO()
torch.jit.save(model, buffer)
buffer.seek(0)
model_loaded = torch.jit.load(buffer)
self.assertEqual(model_loaded(), model())
def test_erase_class_tensor_shapes(self):
class Linear(torch.nn.Module):
def __init__(self, in_features, out_features):
super(Linear, self).__init__()
qweight = torch._empty_affine_quantized([out_features, in_features], scale=1, zero_point=0, dtype=torch.qint8)
self._packed_weight = torch.ops.quantized.linear_prepack(qweight)
.export
def __getstate__(self):
return (torch.ops.quantized.linear_unpack(self._packed_weight)[0], self.training)
def forward(self):
return self._packed_weight
.export
def __setstate__(self, state):
self._packed_weight = torch.ops.quantized.linear_prepack(state[0])
self.training = state[1]
def weight(self):
return torch.ops.quantized.linear_unpack(self._packed_weight)[0]
def weight(self, w):
self._packed_weight = torch.ops.quantized.linear_prepack(w)
with torch._jit_internal._disable_emit_hooks():
x = torch.jit.script(Linear(10, 10))
torch._C._jit_pass_erase_shape_information(x.graph) |
class TestSubtract(object):
def test_exceptions(self):
a = np.ones((), dtype=np.bool_)[()]
assert_raises(TypeError, operator.sub, a, a)
def test_result(self):
types = (np.typecodes['AllInteger'] + np.typecodes['AllFloat'])
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
for dt in types:
a = np.ones((), dtype=dt)[()]
assert_equal(operator.sub(a, a), 0) |
def hash_sketch(sketch, ext):
hash_str = ((sha256(np.ascontiguousarray(sketch).flatten()).hexdigest() + '_') + sha256(np.ascontiguousarray(ext).flatten()).hexdigest())
return hash_str |
def load_tf2_weights_in_pytorch_model(pt_model, tf_weights, allow_missing_keys=False):
try:
import tensorflow as tf
import torch
except ImportError as e:
logger.error('Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see and for installation instructions.')
raise e
new_pt_params_dict = {}
current_pt_params_dict = dict(pt_model.named_parameters())
start_prefix_to_remove = ''
if (not any((s.startswith(pt_model.base_model_prefix) for s in current_pt_params_dict.keys()))):
start_prefix_to_remove = (pt_model.base_model_prefix + '.')
tf_weights_map = {}
for tf_weight in tf_weights:
(pt_name, transpose) = convert_tf_weight_name_to_pt_weight_name(tf_weight.name, start_prefix_to_remove=start_prefix_to_remove)
tf_weights_map[pt_name] = (tf_weight.numpy(), transpose)
all_tf_weights = set(list(tf_weights_map.keys()))
loaded_pt_weights_data_ptr = {}
for (pt_weight_name, pt_weight) in current_pt_params_dict.items():
if (pt_weight.data_ptr() in loaded_pt_weights_data_ptr):
new_pt_params_dict[pt_weight_name] = loaded_pt_weights_data_ptr[pt_weight.data_ptr()]
continue
if (pt_weight_name not in tf_weights_map):
raise ValueError('{} not found in TF 2.0 model'.format(pt_weight_name))
(array, transpose) = tf_weights_map[pt_weight_name]
if transpose:
array = numpy.transpose(array)
if (len(pt_weight.shape) < len(array.shape)):
array = numpy.squeeze(array)
elif (len(pt_weight.shape) > len(array.shape)):
array = numpy.expand_dims(array, axis=0)
try:
assert (list(pt_weight.shape) == list(array.shape))
except AssertionError as e:
e.args += (pt_weight.shape, array.shape)
raise e
logger.info('Initialize PyTorch weight {}'.format(pt_weight_name))
new_pt_params_dict[pt_weight_name] = torch.from_numpy(array)
loaded_pt_weights_data_ptr[pt_weight.data_ptr()] = torch.from_numpy(array)
all_tf_weights.discard(pt_weight_name)
(missing_keys, unexpected_keys) = pt_model.load_state_dict(new_pt_params_dict, strict=False)
if (len(missing_keys) > 0):
logger.info('Weights of {} not initialized from TF 2.0 model: {}'.format(pt_model.__class__.__name__, missing_keys))
if (len(unexpected_keys) > 0):
logger.info('Weights from TF 2.0 model not used in {}: {}'.format(pt_model.__class__.__name__, unexpected_keys))
logger.info('Weights or buffers not loaded from TF 2.0 model: {}'.format(all_tf_weights))
return pt_model |
(scope='module')
def functional_gxy(variable_x, variable_y):
return sn.Functional('gxy', [variable_x, variable_y], (2 * [10]), 'tanh') |
class MinMaxScaleTransformer(BaseEstimator, TransformerMixin):
def __init__(self, column):
self.column = column
self.mm = None
def fit(self, X, *args):
self.mm = MinMaxScaler().fit(X[[self.column]])
return self
def transform(self, X):
X[self.column] = self.mm.transform(X[[self.column]])
return X |
def stringify_throughputs(throughputs):
stringified_throughputs = {}
for worker_type in throughputs:
stringified_throughputs[worker_type] = {}
for key in throughputs[worker_type]:
stringified_throughputs[worker_type][str(key)] = {}
for other_key in throughputs[worker_type][key]:
stringified_throughputs[worker_type][str(key)][str(other_key)] = throughputs[worker_type][key][other_key]
return stringified_throughputs |
class CityscapesData(Data):
dirs = ['cs']
def __init__(self, data_dir, stat_log_dir=None, development=True, fast_dir=None):
super().__init__(data_dir, stat_log_dir, development=development, fast_dir=fast_dir)
def _fetch_if_missing(self):
pass
def get_raw_dirs(self):
top_dir = os.path.join(self.current_dir, 'cs', 'leftImg8bit_sequence_trainvaltest')
if (not os.path.isdir(top_dir)):
raise RuntimeError("Cityscapes data missing.\nDownload 'leftImg8bit_sequence_trainvaltest.zip (324GB)' from and store in <data_dir>/cs.")
dirs = []
splits = os.listdir(top_dir)
for split in splits:
split_path = os.path.join(top_dir, split)
cities = os.listdir(split_path)
for city in cities:
city_path = os.path.join(split_path, city)
dirs.append(city_path)
return dirs |
def test_assign_dev_data():
config = Config()
config.update(dummyconfig_dict)
print('Create ExternSprintDataset')
dataset = ExternSprintDataset([sys.executable, sprintExecPath], '--*.feature-dimension=2 --*.trainer-output-dimension=3 --*.crnn-dataset=DummyDataset(2,3,num_seqs=4,seq_len=10)')
dataset.init_seq_order(epoch=1)
assert_true(dataset.is_less_than_num_seqs(0))
recurrent = False
batch_generator = dataset.generate_batches(recurrent_net=recurrent, batch_size=5)
batches = batch_generator.peek_next_n(2)
assert_equal(len(batches), 2) |
def interactions_pandas():
columns = ['user_id', 'item_id']
data = [(1, 1), (2, 1), (2, 2), (3, 1), (3, 3), (3, 4), (4, 1), (4, 3), (4, 4)]
return PandasDataFrame(data, columns=columns) |
def _check_pydot():
try:
pydot.Dot.create(pydot.Dot())
except Exception:
raise ImportError('Failed to import pydot. You must install pydot and graphviz for `pydotprint` to work.') |
def process_mathtt(s):
while True:
start = s.find('\\mathtt{')
end = s.find('}', start)
if ((start == (- 1)) or (end == (- 1))):
break
s = ((s[:start] + s[(start + 8):end]) + s[(end + 1):])
return s |
def test_join_items_right_outer_deep(join_items):
(left_items, right_items) = join_items
joined = pyhf.workspace._join_items('right outer', left_items, right_items, key='name', deep_merge_key='deep')
assert (next((k['deep'] for k in joined if (k['name'] == 'common'))) == [{'name': 2}, {'name': 1}]) |
class FixLTUNet(nn.Module):
def __init__(self, num_inputs=20, num_features=80, beta=0.75):
super(FixLTUNet, self).__init__()
self.num_inputs = num_inputs
self.num_features = num_features
self.num_outputs = 1
self.beta = beta
self.layers = nn.ModuleList()
self.layers.append(nn.Linear(self.num_inputs, self.num_features, bias=True))
self.layers.append(nn.Linear(self.num_features, self.num_outputs, bias=True))
self.layers[0].weight.data = ((torch.randint(0, 2, (self.num_features, self.num_inputs), dtype=torch.float) * 2) - 1)
self.layers[0].bias.data = ((torch.randint(0, 2, (self.num_features,), dtype=torch.float) * 2) - 1)
self.layers[1].weight.data = ((torch.randint(0, 2, (self.num_outputs, self.num_features), dtype=torch.float) * 2) - 1)
self.layers[1].bias.data = ((torch.randint(0, 2, (self.num_outputs,), dtype=torch.float) * 2) - 1)
with torch.no_grad():
S = (((self.num_inputs - torch.sum(self.layers[0].weight.data, dim=1)) + self.layers[0].bias.data) / 2)
self.tau = ((self.beta * (self.num_inputs + 1)) - S)
self.hidden_activation = LTU(tau=self.tau)
def predict(self, x=None):
features = self.hidden_activation(self.layers[0](x))
out = self.layers[1](features)
return (out, features) |
class GPT2Model(torch.nn.Module):
def __init__(self, num_layers, vocab_size, hidden_size, num_attention_heads, embedding_dropout_prob, attention_dropout_prob, output_dropout_prob, max_sequence_length, checkpoint_activations, checkpoint_num_layers=1, parallel_output=True):
super(GPT2Model, self).__init__()
self.parallel_output = parallel_output
init_method = init_method_normal(std=0.02)
self.word_embeddings = mpu.VocabParallelEmbedding(vocab_size, hidden_size, init_method=init_method)
self.position_embeddings = torch.nn.Embedding(max_sequence_length, hidden_size)
self.tokentype_embeddings = None
self.hidden_size = hidden_size
init_method(self.position_embeddings.weight)
self.embedding_dropout = torch.nn.Dropout(embedding_dropout_prob)
self.transformer = mpu.GPT2ParallelTransformer(num_layers, hidden_size, num_attention_heads, attention_dropout_prob, output_dropout_prob, checkpoint_activations, checkpoint_num_layers)
def add_tokentype_embeddings(self, num_tokentypes):
if (self.tokentype_embeddings is not None):
raise Exception('tokentype embeddings is already initialized')
if (torch.distributed.get_rank() == 0):
print('adding embedding for {} tokentypes'.format(num_tokentypes), flush=True)
self.tokentype_embeddings = torch.nn.Embedding(num_tokentypes, self.hidden_size)
def forward(self, input_ids, position_ids, attention_mask, layer_past=None, get_present=False, tokentype_ids=None):
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = (words_embeddings + position_embeddings)
if (tokentype_ids is not None):
assert (self.tokentype_embeddings is not None)
embeddings = (embeddings + self.tokentype_embeddings(tokentype_ids))
else:
assert (self.tokentype_embeddings is None)
embeddings = self.embedding_dropout(embeddings)
transformer_output = self.transformer(embeddings, attention_mask, layer_past=layer_past, get_present=get_present)
if get_present:
(transformer_output, presents) = transformer_output
transformer_output_parallel = mpu.copy_to_model_parallel_region(transformer_output)
logits_parallel = F.linear(transformer_output_parallel, self.word_embeddings.weight)
if self.parallel_output:
output = logits_parallel
else:
output = mpu.gather_from_model_parallel_region(logits_parallel)
if get_present:
output = [output, presents]
return output |
class ETD(ETDLB):
def __init__(self, task, **kwargs):
super().__init__(task, **kwargs)
self.beta = self.task.GAMMA
def related_parameters():
return ['alpha', 'lmbda'] |
def top_Augmentation(d, nums=1):
from scipy.sparse import coo_matrix
from ogb.nodeproppred import DglNodePropPredDataset
import dgl
import time
dataset = DglNodePropPredDataset('ogbn-arxiv', root=d.raw_data_path)
(g, _) = dataset[0]
g = dgl.to_bidirected(g)
sampler = dgl.dataloading.MultiLayerNeighborSampler([999])
collator = dgl.dataloading.NodeCollator(g, np.arange(g.num_nodes()), sampler)
(_, _, blocks) = collator.collate(np.arange(g.num_nodes()))
edge0 = np.array(blocks[0].edges()[1])
edge1 = np.array(blocks[0].edges()[0])
assert (len(edge0) == len(edge1))
adj1 = coo_matrix((np.ones(edge0.shape), (edge0, edge1)), shape=(d.n_nodes, d.n_nodes))
adj2 = (adj1 adj1)
print('Start adj3')
a = time.time()
adj3 = (adj1 adj2)
print('waste time in adj3:', (time.time() - a))
print('Start adj4')
print('waste time:', (time.time() - a))
a = time.time()
neighbours_1 = adj1.tolil().rows
print('waste time:', (time.time() - a))
neighbours_2 = adj2.tolil().rows
print('waste time in neighbours2:', (time.time() - a))
neighbours_3 = adj3.tolil().rows
print('waste time in neighbours3:', (time.time() - a))
neighbours_2 = [list((set(neighbours_2[i]) - set(neighbours_1[i]))) for i in range(len(neighbours_2))]
neighbours_3 = [list(((set(neighbours_3[i]) - set(neighbours_1[i])) - set(neighbours_2[i]))) for i in range(len(neighbours_3))]
return (neighbours_1, neighbours_2, neighbours_3) |
def find_reachable_nodes(nodes, output_id, keep_tensors=False):
open = {nodes[output_id]}
reachable = set()
while open:
node = open.pop()
if (node in reachable):
continue
open.update(node.in_edges)
for n in node.out_edges:
if (('__i' in n.scope) or ((n.value_type is torch.Tensor) and keep_tensors)):
open.add(n)
reachable.add(node)
return reachable |
class Correlation():
def compute_crossscale_correlation(cls, _src_feats, _trg_feats, origin_resolution):
eps = 1e-08
(bsz, ha, wa, hb, wb) = origin_resolution
corr6d = []
for src_feat in _src_feats:
ch = src_feat.size(1)
(sha, swa) = (src_feat.size((- 2)), src_feat.size((- 1)))
src_feat = src_feat.view(bsz, ch, (- 1)).transpose(1, 2)
src_norm = src_feat.norm(p=2, dim=2, keepdim=True)
for trg_feat in _trg_feats:
(shb, swb) = (trg_feat.size((- 2)), trg_feat.size((- 1)))
trg_feat = trg_feat.view(bsz, ch, (- 1))
trg_norm = trg_feat.norm(p=2, dim=1, keepdim=True)
corr = torch.bmm(src_feat, trg_feat)
corr_norm = (torch.bmm(src_norm, trg_norm) + eps)
corr = (corr / corr_norm)
correlation = corr.view(bsz, sha, swa, shb, swb).contiguous()
corr6d.append(correlation)
for (idx, correlation) in enumerate(corr6d):
corr6d[idx] = Geometry.interpolate4d(correlation, [ha, wa, hb, wb])
corr6d = torch.stack(corr6d).view((len(_src_feats) * len(_trg_feats)), bsz, ha, wa, hb, wb).transpose(0, 1)
return corr6d.clamp(min=0)
def build_crossscale_correlation(cls, query_feats, key_feats, scales, conv2ds):
eps = 1e-08
(bsz, _, hq, wq) = query_feats.size()
(bsz, _, hk, wk) = key_feats.size()
_query_feats_scalewise = []
_key_feats_scalewise = []
for (scale, conv) in zip(scales, conv2ds):
shq = round((hq * math.sqrt(scale)))
swq = round((wq * math.sqrt(scale)))
shk = round((hk * math.sqrt(scale)))
swk = round((wk * math.sqrt(scale)))
_query_feats = conv(resize(query_feats, (shq, swq), mode='bilinear', align_corners=True))
_key_feats = conv(resize(key_feats, (shk, swk), mode='bilinear', align_corners=True))
_query_feats_scalewise.append(_query_feats)
_key_feats_scalewise.append(_key_feats)
corrs = cls.compute_crossscale_correlation(_query_feats_scalewise, _key_feats_scalewise, (bsz, hq, wq, hk, wk))
return corrs.contiguous() |
class ExtendedFrameSummary(FrameSummary):
def __init__(self, frame, **kwargs):
super(ExtendedFrameSummary, self).__init__(**kwargs)
self.tb_frame = frame |
class RoadLaneJunctionGraphPartition():
def __init__(self, graph):
self.roads: Dict[(str, RoadNode)] = {}
self.lanes: Dict[(str, LaneNode)] = {}
self.junctions: Dict[(str, JunctionNode)] = {}
for (road_id, road) in graph.roads.items():
if road.is_part_route:
self.roads[road_id] = deepcopy(road)
for lane in road.lanes:
self.lanes[lane.name] = deepcopy(lane)
if (road.junction is not None):
self.junctions[road.junction.name] = deepcopy(road.junction)
for comp in [self.roads, self.lanes, self.junctions]:
for (_, c) in comp.items():
incoming_or = [x for x in c.incoming if x.is_part_route]
outgoing_or = [x for x in c.outgoing if x.is_part_route]
c.incoming = incoming_or
c.outgoing = outgoing_or
self.shape: RoadShape
def get_nearest_lane(self, pos: np.ndarray):
from shapely.geometry import Point, LineString
min_d =
closest_lane = None
ego_point = Point(pos)
for (lane_id, lane) in self.lanes.items():
d = LineString(lane.shape.shape).distance(ego_point)
if (d < min_d):
min_d = d
closest_lane = lane
return closest_lane |
def _fill_missing_operator_names(ops):
seen = set()
for op in ops:
seen.update(op.input)
seen.update(op.output)
for op in ops:
if op.name:
name = op.name
elif (op.output or op.input):
l = [os.path.dirname(name) for name in (op.output or op.input)]
scope = os.path.commonprefix(l)
name = os.path.join(scope, op.type)
else:
name = op.type
assert name
op.name = _make_unique_name(seen, name) |
class MosesTokenizerConfig(FairseqDataclass):
source_lang: str = field(default='en', metadata={'help': 'source language'})
target_lang: str = field(default='en', metadata={'help': 'target language'})
moses_no_dash_splits: bool = field(default=False, metadata={'help': "don't apply dash split rules"})
moses_no_escape: bool = field(default=False, metadata={'help': "don't perform HTML escaping on apostrophe, quotes, etc."}) |
class EnumeratedSetFromIterator_method_decorator():
def __init__(self, f=None, **options):
if (f is not None):
self.f = f
if hasattr(f, '__name__'):
self.__name__ = f.__name__
self.__module__ = f.__module__
else:
if hasattr(f, '__module__'):
self.__module__ = f.__module__
elif hasattr(f, '__func__'):
self.__module__ = f.__func__.__module__
if hasattr(f, '__name__'):
self.__name__ = f.__name__
elif hasattr(f, '__func__'):
self.__name__ = f.__func__.__name__
self.options = options
def __call__(self, f):
return EnumeratedSetFromIterator_method_decorator(f, **self.options)
def __get__(self, inst, cls):
return EnumeratedSetFromIterator_method_caller(inst, self.f, **self.options) |
def compare_overlap(dpr_dict_rel, bm25_dict_rel):
intersection_bm25 = []
intersection_dpr = []
for query_id in dpr_dict_rel.keys():
dpr_rel_doc = set(dpr_dict_rel.get(query_id).keys())
bm25_rel_doc = set(bm25_dict_rel.get(query_id).keys())
print(query_id)
if bm25_rel_doc:
intersection_bm25.append((len(dpr_rel_doc.intersection(bm25_rel_doc)) / len(bm25_rel_doc)))
if dpr_rel_doc:
intersection_dpr.append((len(dpr_rel_doc.intersection(bm25_rel_doc)) / len(dpr_rel_doc)))
print('average percentual intersection of bm25 results which are also found in dpr {}'.format(np.mean(intersection_bm25)))
print('average percentual intersection of dpr results which are also found in bm25 {}'.format(np.mean(intersection_dpr))) |
class PermutationGroup_action(PermutationGroup_generic):
def __init__(self, gens, action, domain, gap_group=None, category=None, canonicalize=None):
from sage.combinat.cyclic_sieving_phenomenon import orbit_decomposition
from sage.sets.disjoint_set import DisjointSet
if (gap_group is not None):
raise ValueError('gap_group is not supported with action')
if (gens is None):
self._orbits = tuple((tuple(o) for o in orbit_decomposition(domain, action)))
gens = [o for o in self._orbits if (len(o) > 1)]
else:
g_orbits = [orbit_decomposition(domain, (lambda x: action(g, x))) for g in gens]
gens = []
for g_orbit in g_orbits:
g_gens = [tuple(o) for o in g_orbit if (len(o) > 1)]
if g_gens:
gens.append(g_gens)
D = DisjointSet(domain)
for g_orbit in g_orbits:
for o in g_orbit:
for i in range(1, len(o)):
D.union(o[0], o[i])
self._orbits = tuple((tuple(o) for o in D))
PermutationGroup_generic.__init__(self, gens=gens, gap_group=gap_group, domain=domain, category=category, canonicalize=canonicalize)
def orbits(self):
return self._orbits |
def load_mp():
fname = 'datasets/canVote_processed/mp_dict.pkl'
MP_dict = normal_util.load_object(fname)
return MP_dict |
.cpublas
def test_bert_full(gpu, default_implementation, sdfg_name):
bert_tiny_root = '
get_data_file((bert_tiny_root + '/config.json'), directory_name='bert-tiny')
vocab = get_data_file((bert_tiny_root + '/vocab.txt'), directory_name='bert-tiny')
bert_path = get_data_file((bert_tiny_root + '/bert-tiny.onnx'), directory_name='bert-tiny')
get_data_file((bert_tiny_root + '/pytorch_model.bin'), directory_name='bert-tiny')
model_dir = os.path.dirname(vocab)
tokenizer = BertTokenizer.from_pretrained(vocab)
pt_model = copy_to_gpu(gpu, BertModel.from_pretrained(model_dir))
text = '[CLS] how are you today [SEP] dude [SEP]'
tokenized_text = tokenizer.tokenize(text)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
segment_ids = (([0] * 6) + ([1] * 2))
tokens_tensor = copy_to_gpu(gpu, torch.tensor([indexed_tokens]))
segments_tensors = copy_to_gpu(gpu, torch.tensor([segment_ids]))
attention_mask = copy_to_gpu(gpu, torch.ones(1, 8, dtype=torch.int64))
model = onnx.load(bert_path)
(model, check) = onnxsim.simplify(model, skip_fuse_bn=True, input_shapes=dict(input_ids=tokens_tensor.shape, token_type_ids=segments_tensors.shape, attention_mask=attention_mask.shape))
dace_model = donnx.ONNXModel(sdfg_name, model, cuda=gpu, auto_merge=True)
dace_output = dace_model(input_ids=tokens_tensor, token_type_ids=segments_tensors, attention_mask=attention_mask)
output = pt_model(tokens_tensor, token_type_ids=segments_tensors, attention_mask=attention_mask)
torch_tensors_close('output_0', output[0], dace_output[0])
torch_tensors_close('output_1', output[1], dace_output[1]) |
class VGG19(torch.nn.Module):
def __init__(self, requires_grad=False):
super().__init__()
vgg_pretrained_features = torchvision.models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if (not requires_grad):
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out |
def get_model(transform=None):
if (transform is not None):
transform = TransformSequence([TemporalResample(), transform])
prophet = Prophet(ProphetConfig(add_seasonality='auto', transform=transform))
return prophet |
def not_so_slow(response, case):
assert (response.elapsed < timedelta(milliseconds=100)), 'Response is slow!' |
def generate_value(type, dims, data_type, multiplier):
d = TENSOR_TYPE_TO_DTYPE[data_type]
if (type == 'Normal'):
ret = (np.random.randn(*dims) * multiplier)
elif (type == 'Uniform'):
ret = np.random.uniform((- multiplier), multiplier, size=dims)
elif (type == 'Constant'):
ret = (np.ones(dims) * multiplier)
else:
raise ValueError((('Generator type "' + type) + '" is not supported.'))
return ret.astype(d).tostring() |
def _add_category_whitelists_to_metadata(cfg: CfgNode):
for (dataset_name, whitelisted_cat_ids) in cfg.DATASETS.WHITELISTED_CATEGORIES.items():
meta = MetadataCatalog.get(dataset_name)
meta.whitelisted_categories = whitelisted_cat_ids
logger = logging.getLogger(__name__)
logger.info('Whitelisted categories for dataset {}: {}'.format(dataset_name, meta.whitelisted_categories)) |
def deprecated_version_of(f, oldname, newname=None):
if (newname is None):
newname = f.__name__
warning = ('The function ``%s`` is deprecated and is kept temporarily for backwards compatibility.\nPlease use the new name, ``%s``, instead.' % (oldname, newname))
def fdepr(*a, **kw):
warnings.warn(('MoviePy: ' + warning), PendingDeprecationWarning)
return f(*a, **kw)
fdepr.__doc__ = warning
return fdepr |
def _get_rllib_config(path):
jsonfile = (path / 'params.json')
jsondata = json.loads(open(jsonfile).read())
pklfile = (path / 'params.pkl')
with open(pklfile, 'rb') as file:
pkldata = cloudpickle.load(file)
return (jsondata, pkldata) |
class Speech2TextConfig(PretrainedConfig):
model_type = 'speech_to_text'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self, vocab_size=10000, encoder_layers=12, encoder_ffn_dim=2048, encoder_attention_heads=4, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=4, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='relu', d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, classifier_dropout=0.0, scale_embedding=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, max_source_positions=6000, max_target_positions=1024, num_conv_layers=2, conv_kernel_sizes=(5, 5), conv_channels=1024, input_feat_per_channel=80, input_channels=1, **kwargs):
self.vocab_size = vocab_size
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding
self.max_source_positions = max_source_positions
self.max_target_positions = max_target_positions
self.num_conv_layers = num_conv_layers
self.conv_kernel_sizes = list(conv_kernel_sizes)
self.conv_channels = conv_channels
self.input_feat_per_channel = input_feat_per_channel
self.input_channels = input_channels
if (len(self.conv_kernel_sizes) != self.num_conv_layers):
raise ValueError(f'Configuration for convolutional module is incorrect. It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, `config.num_conv_layers = {self.num_conv_layers}`.')
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs) |
def setup_grad_values(backward_result: BackwardResult, sdfg: dace.SDFG, outputs: List[str]) -> str:
code = '// input grads'
for (param_name, grad_name) in sorted(backward_result.required_grad_names.items()):
zero_init = backward_result.zero_init.get(param_name, True)
code += ('\n' + tensor_init_for_desc(grad_name, sdfg.arrays[grad_name], zeros=zero_init))
code += '// output grads'
for (i, o) in enumerate(outputs):
grad_name = backward_result.given_grad_names[o]
code += f'''
auto {grad_name}_ = grad_outputs[{i}];'''
return code |
class TestNormalizedEnv():
def test_pickleable(self):
inner_env = PointEnv(goal=(1.0, 2.0))
env = NormalizedEnv(inner_env, scale_reward=10.0)
round_trip = pickle.loads(pickle.dumps(env))
assert round_trip
assert (round_trip._scale_reward == env._scale_reward)
assert np.array_equal(round_trip.env._goal, env.env._goal)
step_env(round_trip)
round_trip.close()
env.close()
def test_does_not_modify_action(self):
inner_env = PointEnv(goal=(1.0, 2.0))
env = NormalizedEnv(inner_env, scale_reward=10.0)
a = (env.action_space.high + 1.0)
a_copy = a
env.reset()
env.step(a)
assert np.array_equal(a, a_copy)
env.close() |
def scaled_gradient(source: Tensor, scale: Union[(float, Tensor)]) -> Tensor:
if ((not isinstance(scale, Tensor)) and (scale == 0.0)):
return stop_gradient(source)
return source._raw_backend.scaled_gradient(source, scale) |
class J2Grande(AI21TextGenerationAPI):
config_name = 'ai21_j2_grande'
def __init__(self, api_key):
super().__init__(engine='j2-grande', api_key=api_key) |
def test_clean_input_format(df_countries: pd.DataFrame) -> None:
df_clean_name = clean_country(df_countries, 'messy_country', input_format='name')
df_clean_official = clean_country(df_countries, 'messy_country', input_format='official')
df_clean_alpha2 = clean_country(df_countries, 'messy_country', input_format='alpha-2')
df_clean_alpha3 = clean_country(df_countries, 'messy_country', input_format='alpha-3')
df_clean_numeric = clean_country(df_countries, 'messy_country', input_format='numeric')
df_check_name_and_official = df_countries.copy()
df_check_name_and_official['messy_country_clean'] = ['Canada', 'Canada', np.nan, np.nan, 'Ireland', 'DR Congo', 'Congo Republic', np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
df_check_alpha2 = df_countries.copy()
df_check_alpha2['messy_country_clean'] = [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 'American Samoa', 'Turkey', 'Belize', np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
df_check_alpha3 = df_countries.copy()
df_check_alpha3['messy_country_clean'] = [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 'Argentina', 'Bouvet Island', 'New Zealand', np.nan, np.nan, np.nan]
df_check_numeric = df_countries.copy()
df_check_numeric['messy_country_clean'] = [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 'Greenland', 'Estonia', 'Yemen', np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
assert df_clean_name.equals(df_check_name_and_official)
assert df_clean_official.equals(df_check_name_and_official)
assert df_clean_alpha2.equals(df_check_alpha2)
assert df_clean_alpha3.equals(df_check_alpha3)
assert df_clean_numeric.equals(df_check_numeric) |
class PreBasicBlock(nn.Module):
expansion = 1
bias = False
def __init__(self, inplanes, planes, stride=1, ptype='preact'):
super(PreBasicBlock, self).__init__()
if (ptype != 'no_preact'):
self.preact = nn.Sequential(nn.BatchNorm2d(inplanes), nn.ReLU(inplace=True))
self.conv1 = conv3x3(inplanes, planes, stride, bias=self.bias)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, (planes * self.expansion), 1, bias=self.bias)
if ((stride != 1) or (inplanes != (planes * self.expansion))):
self.downsample = nn.Conv2d(inplanes, (planes * self.expansion), kernel_size=1, stride=stride, bias=self.bias)
else:
self.downsample = nn.Sequential()
self.ptype = ptype
def forward(self, x):
if (self.ptype == 'both_preact'):
x = self.preact(x)
residual = x
if ((self.ptype != 'no_preact') and (self.ptype != 'both_preact')):
x = self.preact(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
residual = self.downsample(residual)
out += residual
return out |
class Vertex(object):
def __init__(self, label, ip='', netmask='', mac='', cpu_alloc=0.0):
self.label = label
self.ip = ip
self.netmask = netmask
self.mac = mac
self.cpu_alloc = cpu_alloc
def get_params(self):
return self.__dict__ |
def _set_socket_options(sock, options):
if (options is None):
return
for opt in options:
sock.setsockopt(*opt) |
(frozen=True)
class Prediction(HalfFrozenObject):
soft: torch.Tensor = attr.ib(default=None)
log_soft: torch.Tensor = attr.ib(default=None)
aux_soft: torch.Tensor = attr.ib(default=None)
aux_log_soft: torch.Tensor = attr.ib(default=None)
hard: torch.Tensor = attr.ib(default=None)
alpha: torch.Tensor = attr.ib(default=None)
alpha_features: torch.Tensor = attr.ib(default=None)
x_hat: torch.Tensor = attr.ib(default=None)
logits: torch.Tensor = attr.ib(default=None)
logits_features: torch.Tensor = attr.ib(default=None)
latent: torch.Tensor = attr.ib(default=None)
latent_node: torch.Tensor = attr.ib(default=None)
latent_features: torch.Tensor = attr.ib(default=None)
hidden: torch.Tensor = attr.ib(default=None)
hidden_features: torch.Tensor = attr.ib(default=None)
var_predicted: torch.Tensor = attr.ib(default=None)
var: torch.Tensor = attr.ib(default=None)
softs: torch.Tensor = attr.ib(default=None)
energy: torch.Tensor = attr.ib(default=None)
energy_feaures: torch.Tensor = attr.ib(default=None)
p_c: torch.Tensor = attr.ib(default=None)
p_u: torch.Tensor = attr.ib(default=None)
p_uc: torch.Tensor = attr.ib(default=None)
chi: torch.Tensor = attr.ib(default=None)
evidence: torch.Tensor = attr.ib(default=None)
evidence_ft: torch.Tensor = attr.ib(default=None)
evidence_nn: torch.Tensor = attr.ib(default=None)
evidence_node: torch.Tensor = attr.ib(default=None)
evidence_per_class: torch.Tensor = attr.ib(default=None)
evidence_ft_per_class: torch.Tensor = attr.ib(default=None)
ft_weight: torch.Tensor = attr.ib(default=None)
nn_weight: torch.Tensor = attr.ib(default=None)
log_ft: torch.Tensor = attr.ib(default=None)
log_ft_per_class: torch.Tensor = attr.ib(default=None)
log_nn: torch.Tensor = attr.ib(default=None)
log_nn_per_class: torch.Tensor = attr.ib(default=None)
log_node: torch.Tensor = attr.ib(default=None)
prediction_confidence_aleatoric: torch.Tensor = attr.ib(default=None)
prediction_confidence_epistemic: torch.Tensor = attr.ib(default=None)
prediction_confidence_structure: torch.Tensor = attr.ib(default=None)
sample_confidence_aleatoric: torch.Tensor = attr.ib(default=None)
sample_confidence_epistemic: torch.Tensor = attr.ib(default=None)
sample_confidence_structure: torch.Tensor = attr.ib(default=None)
sample_confidence_features: torch.Tensor = attr.ib(default=None)
sample_confidence_neighborhood: torch.Tensor = attr.ib(default=None)
mu_1: torch.Tensor = attr.ib(default=None)
mu_1p: torch.Tensor = attr.ib(default=None)
mu_2: torch.Tensor = attr.ib(default=None)
mu_2p: torch.Tensor = attr.ib(default=None)
var_1: torch.Tensor = attr.ib(default=None)
var_1p: torch.Tensor = attr.ib(default=None)
var_2: torch.Tensor = attr.ib(default=None)
var_2p: torch.Tensor = attr.ib(default=None)
log_q: torch.Tensor = attr.ib(default=None)
log_prior: torch.Tensor = attr.ib(default=None)
act_vec: torch.Tensor = attr.ib(default=None)
q: torch.Tensor = attr.ib(default=None)
def collate(self, p_to_collate: Prediction) -> Prediction:
for (var_name, var_val) in vars(self).items():
if (var_val is None):
continue
if (not isinstance(p_to_collate, (list, tuple))):
p_to_collate = [p_to_collate]
p_val = [getattr(p, var_name) for p in p_to_collate]
self.set_value(var_name, torch.cat([var_val, *p_val]))
return self
def to(self, device, **kwargs) -> Prediction:
for (var_name, var_val) in vars(self).items():
if (var_val is None):
continue
self.set_value(var_name, var_val.to(device, **kwargs))
return self |
def collect_class_methods(cls, methods):
if isinstance(methods, (list, tuple)):
return [(getattr(cls, m) if isinstance(m, str) else m) for m in methods]
methods = []
for (_, method) in inspect.getmembers(cls, predicate=inspect.isroutine):
if ((method.__name__[0] == '_') or (method.__name__ in EXCLUDE)):
continue
methods.append(method)
return methods |
class ExpansionPerturbation(TextPerturbation):
name: str = 'expansion'
def __init__(self):
self.contraction_map: Dict[(str, str)] = CONTRACTION_MAP
self.contraction_pattern = re.compile('\\b({})\\b'.format('|'.join(self.contraction_map.keys())), flags=(re.IGNORECASE | re.DOTALL))
def description(self) -> PerturbationDescription:
return PerturbationDescription(name=self.name, robustness=True)
def perturb(self, text: str, rng: Random) -> str:
def expand_match(contraction):
match = contraction.group(0)
expanded_contraction = self.contraction_map.get(match, self.contraction_map.get(match.lower()))
return match_case(match, expanded_contraction)
return self.contraction_pattern.sub(expand_match, text) |
class TokenVocab(BaseVocab):
def __init__(self, *args, **kwargs):
recount = kwargs.pop('recount', False)
initialize_zero = kwargs.pop('initialize_zero', True)
super(TokenVocab, self).__init__(*args, **kwargs)
if recount:
self.count()
elif os.path.isfile(self.filename):
self.load()
else:
self.count()
self.dump()
self.index_vocab()
embed_dims = [len(self), self.embed_size]
if initialize_zero:
self._embeddings_array = np.zeros(embed_dims)
else:
self._embeddings_array = np.random.randn(*embed_dims)
return
def setup(self):
self.placeholder = None
del self._embeddings
with tf.device('/cpu:0'):
with tf.variable_scope(self.name.title()):
self._embeddings = tf.Variable(self._embeddings_array, name='Embeddings', dtype=tf.float32, trainable=True)
return
def count(self, conll_files=None):
if (conll_files is None):
conll_files = self.train_files
for conll_file in conll_files:
with codecs.open(conll_file, encoding='utf-8', errors='ignore') as f:
for (line_num, line) in enumerate(f):
try:
line = line.strip()
if (line and (not line.startswith('#'))):
line = line.split('\t')
assert (len(line) == 10)
token = line[self.conll_idx]
if (not self.cased):
token = token.lower()
self.counts[token] += 1
except:
raise ValueError(('File %s is misformatted at line %d' % (conll_file, (line_num + 1))))
return
def load(self):
with codecs.open(self.filename, encoding='utf-8') as f:
for (line_num, line) in enumerate(f):
try:
line = line.strip()
if line:
line = line.split('\t')
(token, count) = line
self.counts[token] = int(count)
except:
raise ValueError(('File %s is misformatted at line %d' % (train_file, (line_num + 1))))
return
def dump(self):
with codecs.open(self.filename, 'w', encoding='utf-8') as f:
for (word, count) in self.sorted_counts(self.counts):
f.write(('%s\t%d\n' % (word, count)))
return
def index_vocab(self):
for (token, count) in self.sorted_counts(self.counts):
if ((count >= self.min_occur_count) and (token not in self) and ((not self.max_rank) or (len(self) < self.max_rank))):
self[token] = len(self)
return
def fit_to_zipf(self, plot=True):
zipf = Zipf.from_configurable(self, self.counts, name=('zipf-%s' % self.name))
if plot:
zipf.plot()
return zipf
def sorted_counts(counts):
return sorted(counts.most_common(), key=(lambda x: ((- x[1]), x[0])))
def conll_idx(self):
return self._conll_idx |
class ActionConfig():
space: ActionSpace = MISSING
extended_road_options: bool = False
cont_normalized_actions: bool = True
disc_dimensions: Tuple[(int, int)] = (5, 5)
hie_num_target_speeds: int = 5
hie_accel: bool = False
disc_hie_cross_prod: bool = False |
def polymorphic_model(type_list: Optional[Union[(List, Tuple[List])]]=None):
def decorator(cls):
if isinstance(type_list, tuple):
for type_list_ in type_list:
type_list_.append(cls)
elif (type_list is not None):
type_list.append(cls)
assert (len(cls._schema.fields['type'].choices) == 1)
def _claim_polymorphic(data):
return (data['type'] == cls._schema.fields['type'].choices[0])
cls._claim_polymorphic = _claim_polymorphic
return cls
return decorator |
def process_video_mat(video_mat):
result = []
for shot_vec in video_mat:
shot_vec = shot_vec[0][0]
result.append(shot_vec)
result = np.array(result)
return result |
def main(args):
aishell1_dir = args.aishell1_dir
aishell1_md_dir = os.path.join(aishell1_dir, 'metadata')
os.makedirs(aishell1_md_dir, exist_ok=True)
create_aishell1_metadata(aishell1_dir, aishell1_md_dir) |
def build_resnet18(num_classes: int, norm_layer):
return ResNet(BasicBlock, layers=[2, 2, 2, 2], norm_layer=norm_layer, num_classes=num_classes) |
def segment_ids(size, is_sorted):
if (size == 0):
return st.just(np.empty(shape=[0], dtype=np.int32))
if is_sorted:
return arrays([size], dtype=np.int32, elements=st.booleans()).map((lambda x: (np.cumsum(x, dtype=np.int32) - x[0])))
else:
return arrays([size], dtype=np.int32, elements=st.integers(min_value=0, max_value=(2 * size))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.