code stringlengths 101 5.91M |
|---|
def remove_extra_space_around_variable(t):
var_names = extract_variable_names(t)
result = str(t)
for v in var_names:
result = result.replace((('" ' + v) + ' "'), v)
return result |
.parametrize('n_unique_action, len_list, dim_context, reward_type, reward_structure, decay_function, click_model, eta, random_state, err, description', invalid_input_of_init)
def test_synthetic_slate_init_using_invalid_inputs(n_unique_action, len_list, dim_context, reward_type, reward_structure, decay_function, click_model, eta, random_state, err, description):
with pytest.raises(err, match=f'{description}*'):
_ = SyntheticSlateBanditDataset(n_unique_action=n_unique_action, len_list=len_list, dim_context=dim_context, reward_type=reward_type, reward_structure=reward_structure, decay_function=decay_function, click_model=click_model, eta=eta, random_state=random_state) |
class DecodingBlocks(nn.Module):
def __init__(self, num_in, num_out, bilinear=False):
super(DecodingBlocks, self).__init__()
if bilinear:
self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode='nearest'), nn.BatchNorm3d(num_in), nn.ReLU(inplace=True))
else:
self.up = nn.Sequential(nn.ConvTranspose3d(num_in, num_in, 2, stride=2), nn.BatchNorm3d(num_in), nn.ReLU(inplace=True))
self.DecodeConv = nn.Sequential(nn.Conv3d((2 * num_in), num_in, 3, padding=1), nn.BatchNorm3d(num_in), nn.ReLU(inplace=True), nn.Conv3d(num_in, num_out, 3, padding=1), nn.BatchNorm3d(num_out), nn.ReLU(inplace=True))
def forward(self, x1, x2):
x1 = self.up(x1)
x = torch.cat([x1, x2], dim=1)
x = self.DecodeConv(x)
return x |
class TestLevels(unittest.TestCase):
TEST_NET = '\nlayer {\n name: "data"\n type: "DummyData"\n top: "data"\n dummy_data_param { shape { dim: 1 dim: 1 dim: 10 dim: 10 } }\n}\nlayer {\n name: "NoLevel"\n type: "InnerProduct"\n bottom: "data"\n top: "NoLevel"\n inner_product_param { num_output: 1 }\n}\nlayer {\n name: "Level0Only"\n type: "InnerProduct"\n bottom: "data"\n top: "Level0Only"\n include { min_level: 0 max_level: 0 }\n inner_product_param { num_output: 1 }\n}\nlayer {\n name: "Level1Only"\n type: "InnerProduct"\n bottom: "data"\n top: "Level1Only"\n include { min_level: 1 max_level: 1 }\n inner_product_param { num_output: 1 }\n}\nlayer {\n name: "Level>=0"\n type: "InnerProduct"\n bottom: "data"\n top: "Level>=0"\n include { min_level: 0 }\n inner_product_param { num_output: 1 }\n}\nlayer {\n name: "Level>=1"\n type: "InnerProduct"\n bottom: "data"\n top: "Level>=1"\n include { min_level: 1 }\n inner_product_param { num_output: 1 }\n}\n'
def setUp(self):
self.f = tempfile.NamedTemporaryFile(mode='w+')
self.f.write(self.TEST_NET)
self.f.flush()
def tearDown(self):
self.f.close()
def check_net(self, net, blobs):
net_blobs = [b for b in net.blobs.keys() if ('data' not in b)]
self.assertEqual(net_blobs, blobs)
def test_0(self):
net = caffe.Net(self.f.name, caffe.TEST)
self.check_net(net, ['NoLevel', 'Level0Only', 'Level>=0'])
def test_1(self):
net = caffe.Net(self.f.name, caffe.TEST, level=1)
self.check_net(net, ['NoLevel', 'Level1Only', 'Level>=0', 'Level>=1']) |
def crop_video(sub_set, video, crop_path, instanc_size):
video_crop_base_path = join(crop_path, sub_set, video)
if (not isdir(video_crop_base_path)):
makedirs(video_crop_base_path)
sub_set_base_path = join(ann_base_path, sub_set)
xmls = sorted(glob.glob(join(sub_set_base_path, video, '*.xml')))
for xml in xmls:
xmltree = ET.parse(xml)
objects = xmltree.findall('object')
objs = []
filename = xmltree.findall('filename')[0].text
im = cv2.imread(xml.replace('xml', 'JPEG').replace('Annotations', 'Data'))
avg_chans = np.mean(im, axis=(0, 1))
for object_iter in objects:
trackid = int(object_iter.find('trackid').text)
bndbox = object_iter.find('bndbox')
bbox = [int(bndbox.find('xmin').text), int(bndbox.find('ymin').text), int(bndbox.find('xmax').text), int(bndbox.find('ymax').text)]
(z, x) = crop_like_SiamFC(im, bbox, instanc_size=instanc_size, padding=avg_chans)
cv2.imwrite(join(video_crop_base_path, '{:06d}.{:02d}.z.jpg'.format(int(filename), trackid)), z)
cv2.imwrite(join(video_crop_base_path, '{:06d}.{:02d}.x.jpg'.format(int(filename), trackid)), x) |
def _prepare_worker(worker, driver_path, args, partitions, search):
create_mpi_script(driver_path, args, worker['hostname'], worker['gpus'], partitions, search) |
def register_Ns3LteRrcSapHandoverPreparationInfo_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteRrcSap::HandoverPreparationInfo const &', 'arg0')])
cls.add_instance_attribute('asConfig', 'ns3::LteRrcSap::AsConfig', is_const=False)
return |
class Problem(IterableDataset):
name = NotImplemented
dependencies = {}
symbols = ['<PAD>', '<GO>', '<STOP>', '=']
def __init__(self, paradigm, vocab, config):
super().__init__()
assert (paradigm is not None)
self.paradigm = paradigm
self.vocab = vocab
self.config = config
def __iter__(self):
return self
def __next__(self):
(x, y, label) = self.solve(self.generate(), self.paradigm)
return (self.vocab(x), self.vocab(y), label)
def __repr__(self):
r = f'{self.__class__.__name__}('
r += ', '.join([f'{k}={v}' for (k, v) in self.config.items()])
r += ')'
return r
def generate(self):
pass
def question(args):
pass
def thought(args) -> list[T]:
pass
def answer(args):
pass
def max_config(config1, config2):
if ((config1 is None) or (config1['max_digits'] < config2['max_digits'])):
return config2
else:
return config1
def solve(cls, args, paradigm):
(x, y, label) = Problem._init_question_xyl(cls.question(args))
tail_recursion = False
if (paradigm == 'wt'):
pass
elif (paradigm == 'rot'):
for (sub_cls, sub_args, t_type) in cls.thought(args):
t_q = sub_cls.question(sub_args)
if (t_type == 'tail'):
tail_recursion = True
t_a = None
else:
assert (not tail_recursion), 'Tail thought is not at the end'
t_a = sub_cls.answer(sub_args)
Problem._add_thought_xyl(t_q, t_a, x, y, label)
elif (paradigm == 'cot'):
t = _flatten_thought(cls, args)
x.extend(t)
y.extend(t)
label.extend(([Label.T] * len(t)))
else:
raise ValueError(f'Unsupported paradigm {paradigm}')
if (not tail_recursion):
Problem._add_answer_xyl(cls.answer(args), x, y, label)
return (x, y, label)
def _init_question_xyl(question) -> tuple[(list[str], list[str], list[int])]:
x = tokenizer(question)
y = x[1:]
label = ([Label.Q] * len(y))
return (x, y, label)
def _add_answer_xyl(answer, x, y, label):
answer = tokenizer(answer)
x += answer[:(- 1)]
y += answer
label += ([Label.A] * len(answer))
def _add_thought_xyl(t_q, t_a, x, y, label):
t_q = tokenizer(t_q)
if (t_a is None):
t_q[0] = '<TAIL>'
x += t_q
y += (t_q + ['<THINK>'])
label += ([Label.T] * (len(t_q) + 1))
if (t_a is not None):
t_a = tokenizer(t_a)
x += t_a
y += (['<PAD>'] * (len(t_a) - 1))
label += ([Label.PAD] * (len(t_a) - 1))
def get_train_loader(self, batch_size, num_workers=1, collate_fn=collate_by_len):
return DataLoader(self, batch_size, collate_fn=collate_fn, pin_memory=True, num_workers=num_workers)
def enum_args(self):
max_num = (10 ** self.config['max_digits'])
return product(range(max_num), range(max_num))
def get_unique_args(self, size):
unique_args = set()
for _ in range((size * 1000)):
if (len(unique_args) == size):
break
unique_args.add(self.generate())
return unique_args
def split_qta(x: Union[(list, Tensor)], y: Union[(list, Tensor)], label: Union[(list, Tensor)]):
if (not isinstance(label, Tensor)):
label = torch.tensor(label)
len_q = ((label == Label.Q).sum() + 1)
len_a = (label == Label.A).sum()
question = x[:len_q]
thought = x[len_q:((- len_a) + 1)]
answer = y[(- len_a):]
return (question, thought, answer)
def log10_uniform(log10_a, log10_b):
return (10 ** ((random.random() * (log10_b - log10_a)) + log10_a))
def log_randrange(a, b, offset=3):
return int((Problem.log10_uniform(math.log10((a + offset)), math.log10((b + offset))) - offset))
def sample_positive_fraction(max_digit, reduce=False, zero=False):
if zero:
numer = Problem.log_randrange(0, max_digit)
else:
numer = Problem.log_randrange(1, max_digit)
denom = Problem.log_randrange(1, max_digit)
if reduce:
gcd = math.gcd(numer, denom)
numer = (numer // gcd)
denom = (denom // gcd)
return (numer, denom)
def sample_fraction(max_digit, reduce=False, zero=False):
(numer, denom) = Problem.sample_positive_fraction(max_digit, reduce, zero)
if (random.random() < 0.5):
numer = (- numer)
return (numer, denom)
def sample_linear_2d(max_digit, min_num=0):
max_coef = (10 ** max_digit)
x_coef = Problem.log_randrange(min_num, max_coef)
x_coef = Problem.assign_sign(x_coef)
y_coef = Problem.log_randrange(min_num, max_coef)
y_coef = Problem.assign_sign(y_coef)
if ((x_coef == 0) and (y_coef == 0)):
return Problem.sample_linear_2d(max_digit)
const = Problem.log_randrange(0, max_coef)
const = Problem.assign_sign(const)
return (x_coef, y_coef, const)
def assign_sign(arg):
if (random.random() < 0.5):
return arg
else:
return (- arg)
def required_symbols(cls, recurse=True):
dep_symbols = []
dep_symbols.extend(cls.symbols)
if recurse:
for dep in cls.dependencies:
dep_symbols.extend(dep.required_symbols(recurse=True))
return dep_symbols
def recursive_dependencies(cls):
dep = [dep.recursive_dependencies() for dep in cls.dependencies]
return list(dict.fromkeys(chain(*dep, cls.dependencies))) |
def _materialize_mask_slice(mask, i, j, QPos, KPos, block_size):
return materialize_mask(mask, QPos, KPos, q_slice=hax.ds.block(i, block_size), k_slice=hax.ds.block(j, block_size)) |
.parametrize('content_type, expected', ((True, SCHEMA_LOADING_ERROR), (None, SCHEMA_LOADING_ERROR), ('application/json', SCHEMA_SYNTAX_ERROR), ('application/x-yaml', SCHEMA_SYNTAX_ERROR)))
def test_invalid_content_type( content_type, expected: str):
content = '\n<html>\n<style>\n html {\n margin: 0;\n background: #fafafa;\n }\n</style>\n<html>\n '
response = Response(response=content)
if (content_type is None):
del response.headers['Content-Type']
elif (content_type is not True):
response.headers['Content-Type'] = content_type
path = '/openapi/'
handler =
handler.respond_with_response(response)
schema_url =
with pytest.raises(SchemaError, match=expected):
schemathesis.from_uri(schema_url) |
.parametrize('dtype', [np.float32, np.float64])
def test_preserve_output(dtype):
image = np.arange(9, dtype=dtype).reshape((3, 3))
output = np.zeros_like(image, dtype=dtype)
gaussian_image = gaussian(image, sigma=1, output=output, preserve_range=True)
assert (gaussian_image is output) |
def test_nokeepdims_mask1():
mask = ak.index.Index8(np.array([False, False, False, True, False, False, True, True, False, False, False, False, False, True, False, False, False, False, False, True, True, True, True, True, True, False, False, False, False, False]))
content = ak.contents.ByteMaskedArray(mask, ak.contents.NumpyArray(np.arange(((2 * 3) * 5), dtype=np.int64)), valid_when=False)
regular = ak.contents.RegularArray(content, 5, zeros_length=0)
listoffset = regular.to_ListOffsetArray64(False)
regular_regular = ak.contents.RegularArray(regular, 3, zeros_length=0)
listoffset_regular = regular_regular.to_ListOffsetArray64(False)
regular_listoffset = ak.contents.RegularArray(listoffset, 3, zeros_length=0)
listoffset_listoffset = regular_listoffset.to_ListOffsetArray64(False)
assert (str(ak.highlevel.Array(listoffset_listoffset).type) == '2 * var * var * ?int64')
axis1 = ak.operations.sum(listoffset_listoffset, axis=(- 1))
axis2 = ak.operations.sum(listoffset_listoffset, axis=(- 2))
axis3 = ak.operations.sum(listoffset_listoffset, axis=(- 3))
assert (str(ak.highlevel.Array(axis1).type) == '2 * var * int64')
assert (str(ak.highlevel.Array(axis2).type) == '2 * var * int64')
assert (str(ak.highlevel.Array(axis3).type) == '3 * var * int64')
assert (str(ak.highlevel.Array(listoffset_regular).type) == '2 * var * 5 * ?int64')
axis1 = ak.operations.sum(listoffset_regular, axis=(- 1))
axis2 = ak.operations.sum(listoffset_regular, axis=(- 2))
axis3 = ak.operations.sum(listoffset_regular, axis=(- 3))
assert (str(ak.highlevel.Array(axis1).type) == '2 * var * int64')
assert (str(ak.highlevel.Array(axis2).type) == '2 * 5 * int64')
assert (str(ak.highlevel.Array(axis3).type) == '3 * 5 * int64')
assert (str(ak.highlevel.Array(regular_listoffset).type) == '2 * 3 * var * ?int64')
axis1 = ak.operations.sum(regular_listoffset, axis=(- 1))
axis2 = ak.operations.sum(regular_listoffset, axis=(- 2))
axis3 = ak.operations.sum(regular_listoffset, axis=(- 3))
assert (str(ak.highlevel.Array(axis1).type) == '2 * 3 * int64')
assert (str(ak.highlevel.Array(axis2).type) == '2 * var * int64')
assert (str(ak.highlevel.Array(axis3).type) == '3 * var * int64')
assert (str(ak.highlevel.Array(regular_regular).type) == '2 * 3 * 5 * ?int64')
axis1 = ak.operations.sum(regular_regular, axis=(- 1))
axis2 = ak.operations.sum(regular_regular, axis=(- 2))
axis3 = ak.operations.sum(regular_regular, axis=(- 3))
assert (str(ak.highlevel.Array(axis1).type) == '2 * 3 * int64')
assert (str(ak.highlevel.Array(axis2).type) == '2 * 5 * int64')
assert (str(ak.highlevel.Array(axis3).type) == '3 * 5 * int64') |
def test_stl_pass_by_pointer(msg):
with pytest.raises(TypeError) as excinfo:
m.stl_pass_by_pointer()
assert (msg(excinfo.value) == '\n stl_pass_by_pointer(): incompatible function arguments. The following argument types are supported:\n 1. (v: List[int] = None) -> List[int]\n\n Invoked with:\n ')
with pytest.raises(TypeError) as excinfo:
m.stl_pass_by_pointer(None)
assert (msg(excinfo.value) == '\n stl_pass_by_pointer(): incompatible function arguments. The following argument types are supported:\n 1. (v: List[int] = None) -> List[int]\n\n Invoked with: None\n ')
assert (m.stl_pass_by_pointer([1, 2, 3]) == [1, 2, 3]) |
def steepest_descent(Av, b, x0, num_iterations, debug=False):
Ax = Av(x0)
r = [(b[i] - Ax[i]) for i in range(len(x0))]
for i in range(num_iterations):
rTr = np.sum([np.sum((r[k] * r[k])) for k in range(len(x0))])
Ar = Av(r)
alpha = (rTr / np.sum([np.sum((r[k] * Ar[k])) for k in range(len(x0))]))
x0 = [(x0[k] + (alpha * r[k])) for k in range(len(x0))]
r = [(r[k] - (alpha * Ar[k])) for k in range(len(x0))]
if debug:
print(f1(x0, Av, b), f2(x0, Av, b), '\n\n')
return x0 |
def subsample_classes(dataset, include_classes=range(160)):
include_classes_cars = (np.array(include_classes) + 1)
cls_idxs = [x for (x, t) in enumerate(dataset.target) if (t in include_classes_cars)]
target_xform_dict = {}
for (i, k) in enumerate(include_classes):
target_xform_dict[k] = i
dataset = subsample_dataset(dataset, cls_idxs)
return dataset |
def test_dump_and_load():
plan = generate_wdm_2d()
serialized_plan = optplan.dumps(plan)
deserialized_plan = optplan.loads(serialized_plan) |
_utils.test()
def test_return_struct_field():
tp = ti.types.struct(a=ti.i32)
f = tp.field(shape=1)
def bar() -> tp:
return f[0]
def foo() -> tp:
return bar()
assert (foo().a == 0) |
def register_Ns3LteEnbCphySapUser_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteEnbCphySapUser const &', 'arg0')])
return |
class Accuracy(nn.Module):
def __init__(self, topk=(1,), thresh=None):
super().__init__()
self.topk = topk
self.thresh = thresh
def forward(self, pred, target):
return accuracy(pred, target, self.topk, self.thresh) |
class CamVid(Dataset):
CLASSES = ['Sky', 'Building', 'Pole', 'Road', 'Pavement', 'Tree', 'SignSymbol', 'Fence', 'Car', 'Pedestrian', 'Bicyclist']
CLASSES_ALL = ['Wall', 'Animal', 'Archway', 'Bicyclist', 'Bridge', 'Building', 'Car', 'CarLuggage', 'Child', 'Pole', 'Fence', 'LaneDrive', 'LaneNonDrive', 'MiscText', 'Motorcycle/Scooter', 'OtherMoving', 'ParkingBlock', 'Pedestrian', 'Road', 'RoadShoulder', 'Sidewalk', 'SignSymbol', 'Sky', 'SUV/PickupTruck', 'TrafficCone', 'TrafficLight', 'Train', 'Tree', 'Truck/Bus', 'Tunnel', 'VegetationMisc']
PALETTE = torch.tensor([[128, 128, 128], [128, 0, 0], [192, 192, 128], [128, 64, 128], [0, 0, 192], [128, 128, 0], [192, 128, 128], [64, 64, 128], [64, 0, 128], [64, 64, 0], [0, 128, 192]])
PALETTE_ALL = torch.tensor([[64, 192, 0], [64, 128, 64], [192, 0, 128], [0, 128, 192], [0, 128, 64], [128, 0, 0], [64, 0, 128], [64, 0, 192], [192, 128, 64], [192, 192, 128], [64, 64, 128], [128, 0, 192], [192, 0, 64], [128, 128, 64], [192, 0, 192], [128, 64, 64], [64, 192, 128], [64, 64, 0], [128, 64, 128], [128, 128, 192], [0, 0, 192], [192, 128, 128], [128, 128, 128], [64, 128, 192], [0, 0, 64], [0, 64, 64], [192, 64, 128], [128, 128, 0], [192, 128, 192], [64, 0, 64], [192, 192, 0]])
def __init__(self, root: str, split: str='train', transform=None) -> None:
super().__init__()
assert (split in ['train', 'val', 'test'])
self.split = split
self.transform = transform
self.n_classes = len(self.CLASSES)
self.ignore_label = (- 1)
img_path = (Path(root) / split)
self.files = list(img_path.glob('*.png'))
if (not self.files):
raise Exception(f'No images found in {img_path}')
print(f'Found {len(self.files)} {split} images.')
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, index: int) -> Tuple[(Tensor, Tensor)]:
img_path = str(self.files[index])
lbl_path = str(self.files[index]).replace(self.split, (self.split + '_labels')).replace('.png', '_L.png')
image = io.read_image(img_path)
label = io.read_image(lbl_path)
if self.transform:
(image, label) = self.transform(image, label)
return (image, (self.encode(label).long() - 1))
def encode(self, label: Tensor) -> Tensor:
label = label.permute(1, 2, 0)
mask = torch.zeros(label.shape[:(- 1)])
for (index, color) in enumerate(self.PALETTE):
bool_mask = torch.eq(label, color)
class_map = torch.all(bool_mask, dim=(- 1))
mask[class_map] = (index + 1)
return mask |
def networkx_to_sparsegraph(nx_graph: Union[('nx.Graph', 'nx.DiGraph')], label_name: str=None, sparse_node_attrs: bool=True, sparse_edge_attrs: bool=True) -> 'SparseGraph':
import networkx as nx
int_names = True
for node in nx_graph.nodes:
int_names &= isinstance(node, int)
if int_names:
node_names = None
else:
node_names = np.array(nx_graph.nodes)
nx_graph = nx.convert_node_labels_to_integers(nx_graph)
adj_matrix = nx.adjacency_matrix(nx_graph)
attrs = set()
for (_, node_data) in nx_graph.nodes().data():
attrs.update(node_data.keys())
if (label_name is None):
labels = None
else:
if (label_name not in attrs):
raise ValueError("No attribute with label name '{}' found.".format(label_name))
attrs.remove(label_name)
labels = [0 for _ in range(nx_graph.number_of_nodes())]
if (len(attrs) > 0):
all_integer = all((isinstance(attr, int) for attr in attrs))
if all_integer:
attr_names = None
attr_mapping = None
else:
attr_names = np.array(list(attrs))
attr_mapping = {k: i for (i, k) in enumerate(attr_names)}
if sparse_node_attrs:
attr_matrix = sp.lil_matrix((nx_graph.number_of_nodes(), len(attr_names)), dtype=np.float32)
else:
attr_matrix = np.zeros((nx_graph.number_of_nodes(), len(attr_names)), dtype=np.float32)
else:
attr_matrix = None
attr_names = None
for (inode, node_attrs) in nx_graph.nodes.data():
for (key, val) in node_attrs.items():
if (key == label_name):
labels[inode] = val
else:
if (not isinstance(val, Number)):
if (node_names is None):
raise ValueError("Node {} has attribute '{}' with value '{}', which is not a number.".format(inode, key, val))
else:
raise ValueError("Node '{}' has attribute '{}' with value '{}', which is not a number.".format(node_names[inode], key, val))
if (attr_mapping is None):
attr_matrix[(inode, key)] = val
else:
attr_matrix[(inode, attr_mapping[key])] = val
if ((attr_matrix is not None) and sparse_node_attrs):
attr_matrix = attr_matrix.tocsr()
if (labels is None):
class_names = None
else:
try:
labels = np.array(labels, dtype=np.float32)
class_names = None
except ValueError:
class_names = np.unique(labels)
class_mapping = {k: i for (i, k) in enumerate(class_names)}
labels_int = np.empty(nx_graph.number_of_nodes(), dtype=np.float32)
for (inode, label) in enumerate(labels):
labels_int[inode] = class_mapping[label]
labels = labels_int
return SparseGraph(adj_matrix=adj_matrix, attr_matrix=attr_matrix, labels=labels, node_names=node_names, attr_names=attr_names, class_names=class_names, metadata=None) |
def is_lyndon(w):
i = 0
for let in w[1:]:
if (w[i] < let):
i = 0
elif (w[i] == let):
i += 1
else:
return False
return (i == 0) |
_level_function()
def with_field(array, what, where=None, *, highlevel=True, behavior=None, attrs=None):
(yield (array, what))
return _impl(array, what, where, highlevel, behavior, attrs) |
class CharacterTextSlotEncoder(_BaseTextEncoder):
def __init__(self, vocab_list, slots):
self._vocab_list = (['<pad>', '<eos>', '<unk>'] + vocab_list)
self._vocab2idx = {v: idx for (idx, v) in enumerate(self._vocab_list)}
self.slots = slots
self.slot2id = {self.slots[i]: (i + len(self._vocab_list)) for i in range(len(self.slots))}
self.id2slot = {(i + len(self._vocab_list)): self.slots[i] for i in range(len(self.slots))}
def encode(self, s):
(sent, iobs) = s.strip('\r\n ').split('\t')
sent = sent.split(' ')[1:(- 1)]
iobs = iobs.split(' ')[1:(- 1)]
tokens = []
for (i, (wrd, iob)) in enumerate(zip(sent, iobs)):
if (wrd in '?!.,;-'):
continue
if (wrd == '&'):
wrd = 'AND'
if ((iob != 'O') and ((i == 0) or (iobs[(i - 1)] != iob))):
tokens.append(self.slot2id[('B-' + iob)])
tokens += [self.vocab_to_idx(v) for v in wrd]
if ((iob != 'O') and ((i == (len(sent) - 1)) or (iobs[(i + 1)] != iob))):
tokens.append(self.slot2id[('E-' + iob)])
if (i == (len(sent) - 1)):
tokens.append(self.eos_idx)
else:
tokens.append(self.vocab_to_idx(' '))
return tokens
def decode(self, idxs, ignore_repeat=False):
vocabs = []
for (t, idx) in enumerate(idxs):
v = self.idx_to_vocab(idx)
if ((idx == self.pad_idx) or (ignore_repeat and (t > 0) and (idx == idxs[(t - 1)]))):
continue
elif (idx == self.eos_idx):
break
else:
vocabs.append(v)
return ''.join(vocabs)
def load_from_file(cls, vocab_file, slots_file):
with open(vocab_file, 'r') as f:
vocab_list = [line.strip('\r\n') for line in f]
org_slots = open(slots_file).read().split('\n')
slots = []
for slot in org_slots[1:]:
slots.append(('B-' + slot))
slots.append(('E-' + slot))
return cls(vocab_list, slots)
def vocab_size(self):
return (len(self._vocab_list) + len(self.slots))
def token_type(self):
return 'character-slot'
def vocab_to_idx(self, vocab):
return self._vocab2idx.get(vocab, self.unk_idx)
def idx_to_vocab(self, idx):
idx = int(idx)
if (idx < len(self._vocab_list)):
return self._vocab_list[idx]
else:
token = self.id2slot[idx]
if (token[0] == 'B'):
return (token + ' ')
elif (token[0] == 'E'):
return (' ' + token)
else:
raise ValueError('id2slot get:', token) |
def make_sail_logger(exp_name: str, label: str, save_data: bool=True, save_dir: str='./logs', use_tb: bool=False, tb_dir: Optional[str]=None, use_wb: bool=False, config: Optional[dict]=None, time_delta: float=1.0, asynchronous: bool=False, print_fn: Optional[Callable[([str], None)]]=None, serialize_fn: Optional[Callable[([Mapping[(str, Any)]], str)]]=base.to_numpy) -> base.Logger:
if (not print_fn):
print_fn = print
terminal_logger = terminal.TerminalLogger(label=label, print_fn=print_fn)
loggers = [terminal_logger]
if save_data:
os.makedirs(save_dir, exist_ok=True)
fd = open(os.path.join(save_dir, f'{exp_name}.csv'), 'a')
loggers.append(csv.CSVLogger(directory_or_file=fd, label=exp_name, add_uid=False, flush_every=2))
if use_wb:
wb_logger = WBLogger(scope=label)
wb_logger = filters.TimeFilter(wb_logger, time_delta)
loggers.append(wb_logger)
if use_tb:
if (tb_dir is None):
tb_dir = './tblogs'
loggers.append(TFSummaryLogger(tb_dir, label))
logger = aggregators.Dispatcher(loggers, serialize_fn)
logger = filters.NoneFilter(logger)
if config:
logger = ResultFilter(logger, game_name=config['game_name'])
if asynchronous:
logger = async_logger.AsyncLogger(logger)
logger = filters.TimeFilter(logger, 5.0)
return logger |
def pix2coord(x, y, cdim, imgdim, origin='upper'):
cx = (((x / imgdim[0]) * (cdim[1] - cdim[0])) + cdim[0])
if (origin == 'lower'):
cy = (((y / imgdim[1]) * (cdim[3] - cdim[2])) + cdim[2])
else:
cy = (cdim[3] - ((y / imgdim[1]) * (cdim[3] - cdim[2])))
return (cx, cy) |
def index_fill(g, self, dim, index, value):
dim_value = sym_help._parse_arg(dim, 'i')
if (sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK):
return g.op('ATen', self, index, value, dim_i=dim_value, operator_s='index_fill')
(expanded_index_shape, expanded_index) = sym_help._index_fill_reshape_helper(g, self, dim, index)
value = sym_help._maybe_get_scalar(value)
value = sym_help._if_scalar_type_as(g, value, self)
expanded_value = expand(g, value, expanded_index_shape, None)
return scatter(g, self, dim, expanded_index, expanded_value) |
class set_detect_anomaly(object):
def __init__(self, mode: bool) -> None:
self.prev = torch.is_anomaly_enabled()
torch.set_anomaly_enabled(mode)
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> None:
torch.set_anomaly_enabled(self.prev) |
class LayerSlowFast(SlowFast):
args = {'slowfast_config': 'Kinetics/c2/SLOWFAST_8x8_R50', 'num_layers': 5}
output_dims = [88, 352, 704, 1408, 2304]
def __init__(self, args):
super().__init__(args)
self.num_layers = args.num_layers
def _forward(self, x):
model = self.model
xs = []
x = model.s1(x)
x = model.s1_fuse(x)
xs.append([v.clone().detach() for v in x])
x = model.s2(x)
x = model.s2_fuse(x)
for pathway in range(model.num_pathways):
pool = getattr(model, 'pathway{}_pool'.format(pathway))
x[pathway] = pool(x[pathway])
xs.append([v.clone().detach() for v in x])
x = model.s3(x)
x = model.s3_fuse(x)
xs.append([v.clone().detach() for v in x])
x = model.s4(x)
x = model.s4_fuse(x)
xs.append([v.clone().detach() for v in x])
x = model.s5(x)
xs.append([v.clone().detach() for v in x])
head = self.model.head
assert (len(x) == head.num_pathways), 'Input tensor does not contain {} pathway'.format(head.num_pathways)
def get_pool(x):
pool_out = []
for pathway in range(head.num_pathways):
m = getattr(head, 'pathway{}_avgpool'.format(pathway))
pool_out.append(m(x[pathway]))
x = torch.cat(pool_out, 1)
x = x.permute((0, 2, 3, 4, 1))
return x
xs = [get_pool(x) for x in xs]
return xs
def forward(self, data, no_grad=True):
x = data
if no_grad:
for (i, _) in enumerate(x):
x[i].requires_grad_(False)
xs = self._forward(x)
xs = [x.mean([1, 2, 3]) for x in xs]
return xs |
def save_checkpoint(state, is_best, filename='w_gt_checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'w_gt_model_best.pth.tar') |
class FieldAwareFactorizationMachineModel(keras.Model):
def __init__(self, num_users, num_items, embed_mf_size, lambda_weights, learning_rate=0.01, name='FFM', **kwargs):
super().__init__(name=name, **kwargs)
tf.random.set_seed(42)
self.num_users = num_users
self.num_items = num_items
self.embed_mf_size = embed_mf_size
self.lambda_weights = lambda_weights
self.initializer = tf.initializers.GlorotUniform()
self.user_mf_embedding = keras.layers.Embedding(input_dim=self.num_users, output_dim=self.embed_mf_size, embeddings_initializer=self.initializer, name='U_MF', embeddings_regularizer=keras.regularizers.l2(self.lambda_weights), dtype=tf.float32)
self.item_mf_embedding = keras.layers.Embedding(input_dim=self.num_items, output_dim=self.embed_mf_size, embeddings_regularizer=keras.regularizers.l2(self.lambda_weights), embeddings_initializer=self.initializer, name='I_MF', dtype=tf.float32)
self.u_bias = keras.layers.Embedding(input_dim=self.num_users, output_dim=1, embeddings_initializer=self.initializer, name='B_U_MF', dtype=tf.float32)
self.i_bias = keras.layers.Embedding(input_dim=self.num_items, output_dim=1, embeddings_initializer=self.initializer, name='B_I_MF', dtype=tf.float32)
self.bias_ = tf.Variable(0.0, name='GB')
self.user_mf_embedding(0)
self.item_mf_embedding(0)
self.u_bias(0)
self.i_bias(0)
self.loss = keras.losses.BinaryCrossentropy()
self.optimizer = tf.optimizers.Adam(learning_rate)
def call(self, inputs, training=None, mask=None):
(user, item) = inputs
user_mf_e = self.user_mf_embedding(user)
item_mf_e = self.item_mf_embedding(item)
mf_output = tf.reduce_sum((user_mf_e * item_mf_e), axis=(- 1))
return (((mf_output + self.bias_) + self.u_bias(user)) + self.i_bias(item))
def train_step(self, batch):
(user, pos, label) = batch
with tf.GradientTape() as tape:
output = self(inputs=(user, pos), training=True)
loss = self.loss(label, output)
grads = tape.gradient(loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
return loss
def predict(self, inputs, training=False, **kwargs):
output = self.call(inputs=inputs, training=training)
return output
def get_recs(self, inputs, training=False, **kwargs):
(user, item) = inputs
user_mf_e = self.user_mf_embedding(user)
item_mf_e = self.item_mf_embedding(item)
mf_output = tf.expand_dims(tf.reduce_sum((user_mf_e * item_mf_e), axis=(- 1)), (- 1))
return tf.squeeze((((mf_output + self.bias_) + self.u_bias(user)) + self.i_bias(item)))
def get_top_k(self, preds, train_mask, k=100):
return tf.nn.top_k(tf.where(train_mask, preds, (- np.inf)), k=k, sorted=True) |
def test_get_actions():
policy = FixedPolicy(None, np.array([1, 2, 3]))
assert (policy.get_actions(np.array([0]).reshape(1, 1))[0] == 1)
assert (policy.get_action(np.array([0]))[0] == 2)
assert (policy.get_action(np.array([0]))[0] == 3)
with pytest.raises(IndexError):
policy.get_action(np.ndarray([0])) |
def EmptyArray_pad(self, length, axis=0):
if (axis < 0):
raise NotImplementedError
else:
indxarray = []
for i in range(length):
indxarray.append((- 1))
return IndexedOptionArray(indxarray, self) |
class BasicScatter(EmObjective):
def __init__(self, sim, grid, FF_cond, E_background=None):
super().__init__(sim)
self.grid = grid
self.pf = 2
self.E_background = E_background
self._compute_objective(FF_cond)
def _compute_objective(self, FF_cond):
(self.points, self.triangles) = farfield.make_sphere_point(4)
if ('box_center' in FF_cond):
FF_arg = {'points': self.points, 'omegas': self.sim.omega, 'grid': self.grid, 'dxes': self.sim.dxes, 'box_center': FF_cond['box_center'], 'box_size': FF_cond['box_size'], 'eps_0': 1}
self.FF_projection_matrix = farfield.make_near2farfield_box_matrix(**FF_arg)
elif ('pos' in FF_cond):
FF_arg = {'points': self.points, 'omegas': self.sim.omega, 'grid': self.grid, 'dxes': self.sim.dxes, 'pos': FF_cond['pos'], 'width': FF_cond['width'], 'polarity': 1, 'eps_0': 1}
self.FF_projection_matrix = farfield.make_near2farfield_matrix(**FF_arg)
n_p = self.points.shape[0]
sum_matrix = scipy.sparse.hstack([scipy.sparse.csr_matrix((n_p, n_p)), scipy.sparse.eye(n_p), scipy.sparse.eye(n_p)])
points2triangles = farfield.points2triangles_averaging_matrix(self.points, self.triangles)
scatter_area_vector = farfield.area_selection_vector(self.points, self.triangles, [0, np.pi], [(- np.pi), np.pi])
directed_area_vector = farfield.area_selection_vector(self.points, self.triangles, FF_cond['th_bounds'], FF_cond['ph_bounds'])
self.scattered_power_vector = ((scatter_area_vector points2triangles) sum_matrix)
self.directed_power_vector = ((directed_area_vector points2triangles) sum_matrix)
self.objective_alpha = 1
self.directivity = 1
self.FarField = np.ones((3 * n_p))
def calculate_partial_df_dx(self, x, z):
if (self.E_background is not None):
E = (x - E_background)
else:
E = x
FF_mat = self.FF_projection_matrix
FarField = (FF_mat E)
dFarField_square_viol_dx = (scipy.sparse.diags(np.conj(FarField)) FF_mat)
n_points = self.points.size
sum_matrix = scipy.sparse.hstack([scipy.sparse.csr_matrix((n_points, n_points)), scipy.sparse.eye(n_points), scipy.sparse.eye(n_points)])
scattered_power = (self.scattered_power_vector (np.abs(FarField) ** 2))
directed_power = (self.directed_power_vector (np.abs(FarField) ** 2))
dscattered_power_viol_dx = (self.scattered_power_vector dFarField_square_viol_dx)
ddirected_power_viol_dx = (self.directed_power_vector dFarField_square_viol_dx)
directivity = (directed_power / scattered_power)
ddirectivity_viol_dx = (((ddirected_power_viol_dx * scattered_power) - (directed_power * dscattered_power_viol_dx)) / (scattered_power ** 2))
alpha = self.objective_alpha
f0 = ((directivity < alpha) * (alpha - directivity))
df0_viol_dx = (((directivity < alpha) * (- 1)) * ddirectivity_viol_dx)
df_viol_dx = ((self.pf * (f0 ** (self.pf - 1))) * df0_viol_dx)
return df_viol_dx
def calculate_f(self, x, z):
if (self.E_background is not None):
E = (x - E_background)
else:
E = x
self.FarField = (self.FF_projection_matrix E)
scattered_power = (self.scattered_power_vector (np.abs(self.FarField) ** 2))
directed_power = (self.directed_power_vector (np.abs(self.FarField) ** 2))
self.directivity = (directed_power / scattered_power)
alpha = self.objective_alpha
f0 = ((self.directivity < alpha) * (alpha - self.directivity))
f = np.sum((f0 ** self.pf))
return f
def get_electric_fields(self, param: Parametrization):
return fdfd_tools.unvec(self.sim.simulate(param.get_structure()), self.sim.get_dims()) |
class WingLoss(_Loss):
def __init__(self, width=5, curvature=0.5, reduction='mean'):
super(WingLoss, self).__init__(reduction=reduction)
self.width = width
self.curvature = curvature
def forward(self, prediction, target):
return F.wing_loss(prediction, target, self.width, self.curvature, self.reduction) |
class UploadCodeAsArtifact(Callback):
def __init__(self, code_dir: str, use_git: bool=True):
self.code_dir = code_dir
self.use_git = use_git
_zero_only
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
code = wandb.Artifact('project-source', type='code')
if self.use_git:
git_dir_path = Path(subprocess.check_output(['git', 'rev-parse', '--git-dir']).strip().decode('utf8')).resolve()
for path in Path(self.code_dir).resolve().rglob('*'):
if (path.is_file() and (not str(path).startswith(str(git_dir_path))) and (subprocess.run(['git', 'check-ignore', '-q', str(path)]).returncode == 1)):
code.add_file(str(path), name=str(path.relative_to(self.code_dir)))
else:
for path in Path(self.code_dir).resolve().rglob('*.py'):
code.add_file(str(path), name=str(path.relative_to(self.code_dir)))
experiment.log_artifact(code) |
class CaptureStd():
def __init__(self, out=True, err=True):
if out:
self.out_buf = StringIO()
self.out = 'error: CaptureStd context is unfinished yet, called too early'
else:
self.out_buf = None
self.out = 'not capturing stdout'
if err:
self.err_buf = StringIO()
self.err = 'error: CaptureStd context is unfinished yet, called too early'
else:
self.err_buf = None
self.err = 'not capturing stderr'
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
self.out = apply_print_resets(self.out_buf.getvalue())
if self.err_buf:
sys.stderr = self.err_old
self.err = self.err_buf.getvalue()
def __repr__(self):
msg = ''
if self.out_buf:
msg += f'''stdout: {self.out}
'''
if self.err_buf:
msg += f'''stderr: {self.err}
'''
return msg |
def get_label_to_indices_map_2() -> Dict[(str, List[int])]:
contradiction_indices = []
entailment_indices = []
neutral_indices = []
train_inputs_collections = torch.load(constants.MNLI_TRAIN_INPUT_COLLECTIONS_PATH)
for (index, train_inputs) in enumerate(train_inputs_collections):
if (train_inputs['labels'].item() == 0):
contradiction_indices.append(index)
if (train_inputs['labels'].item() == 1):
entailment_indices.append(index)
if (train_inputs['labels'].item() == 2):
neutral_indices.append(index)
return {'contradiction': contradiction_indices, 'entailment': entailment_indices, 'neutral': neutral_indices} |
class MetricSpacesCategory(RegressiveCovariantConstructionCategory):
_functor_category = 'Metric'
def default_super_categories(cls, category):
return Category.join([category.Topological(), super().default_super_categories(category)])
def _repr_object_names(self):
return 'metric {}'.format(self.base_category()._repr_object_names()) |
def register_Ns3UanMacAloha_methods(root_module, cls):
cls.add_constructor([param('ns3::UanMacAloha const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True)
cls.add_method('AttachPhy', 'void', [param('ns3::Ptr< ns3::UanPhy >', 'phy')], is_virtual=True)
cls.add_method('Clear', 'void', [], is_virtual=True)
cls.add_method('Enqueue', 'bool', [param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True)
cls.add_method('GetAddress', 'ns3::Address', [], is_virtual=True)
cls.add_method('GetBroadcast', 'ns3::Address', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetAddress', 'void', [param('ns3::UanAddress', 'addr')], is_virtual=True)
cls.add_method('SetForwardUpCb', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::UanAddress const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True)
return |
def eval_nested(pred, label):
label_total = 0
pred_total = 0
cnt = 0
if (pred is not None):
pred_total += 1
if (label is not None):
label_total += 1
if ((pred is not None) and (label is not None)):
partial_scores = Evaluator.eval_partial_match(pred, label)
cnt += Evaluator.eval_exact_match(pred, label, partial_scores)
return (label_total, pred_total, cnt) |
def save_json_config(config: str, path: str):
with open(path, 'w') as f:
json.dump(config, f) |
def convert_model_to_int32(model_path: str, out_path: str):
print('ONNX INT64 --> INT32 Converter')
print(('Loading Model: ' + model_path))
model = onnx.load_model(model_path)
ch.check_model(model)
opset_version = model.opset_import[0].version
graph = model.graph
init = graph.initializer
params_dict = make_param_dictionary(init)
print('Converting INT64 model params to INT32...')
converted_params = convert_params_to_int32(params_dict)
print('Converting constant INT64 nodes to INT32...')
new_nodes = convert_constant_nodes_to_int32(graph.node)
graph_name = f'{graph.name}-int32'
print('Creating new graph...')
graph_int32 = h.make_graph(new_nodes, graph_name, graph.input, graph.output, initializer=converted_params)
print('Creating new int32 model...')
model_int32 = h.make_model(graph_int32, producer_name='onnx-typecast')
model_int32.opset_import[0].version = opset_version
ch.check_model(model_int32)
print(f'Saving converted model as: {out_path}')
onnx.save_model(model_int32, out_path)
print(f'Done.')
return |
class TestCategoricalGRUPolicy(TfGraphTestCase):
def test_invalid_env(self):
env = GarageEnv(DummyBoxEnv())
with pytest.raises(ValueError):
CategoricalGRUPolicy(env_spec=env.spec)
.parametrize('obs_dim, action_dim, hidden_dim', [((1,), 1, 4), ((2,), 2, 4), ((1, 1), 1, 4), ((2, 2), 2, 4)])
def test_get_action_state_include_action(self, obs_dim, action_dim, hidden_dim):
env = GarageEnv(DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = CategoricalGRUPolicy(env_spec=env.spec, hidden_dim=hidden_dim, state_include_action=True)
policy.reset()
obs = env.reset()
(action, _) = policy.get_action(obs.flatten())
assert env.action_space.contains(action)
(actions, _) = policy.get_actions([obs.flatten()])
for action in actions:
assert env.action_space.contains(action)
.parametrize('obs_dim, action_dim, hidden_dim', [((1,), 1, 4), ((2,), 2, 4), ((1, 1), 1, 4), ((2, 2), 2, 4)])
def test_build_state_include_action(self, obs_dim, action_dim, hidden_dim):
env = GarageEnv(DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = CategoricalGRUPolicy(env_spec=env.spec, hidden_dim=hidden_dim, state_include_action=True)
policy.reset(do_resets=None)
obs = env.reset()
state_input = tf.compat.v1.placeholder(tf.float32, shape=(None, None, policy.input_dim))
dist_sym = policy.build(state_input, name='dist_sym').dist
concat_obs = np.concatenate([obs.flatten(), np.zeros(action_dim)])
output1 = self.sess.run([policy.distribution.probs], feed_dict={policy.model.input: [[concat_obs], [concat_obs]]})
output2 = self.sess.run([dist_sym.probs], feed_dict={state_input: [[concat_obs], [concat_obs]]})
assert np.array_equal(output1, output2)
.parametrize('obs_dim, action_dim, hidden_dim', [((1,), 1, 4), ((2,), 2, 4), ((1, 1), 1, 4), ((2, 2), 2, 4)])
def test_build_state_not_include_action(self, obs_dim, action_dim, hidden_dim):
env = GarageEnv(DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = CategoricalGRUPolicy(env_spec=env.spec, hidden_dim=hidden_dim, state_include_action=False)
policy.reset(do_resets=None)
obs = env.reset()
state_input = tf.compat.v1.placeholder(tf.float32, shape=(None, None, policy.input_dim))
dist_sym = policy.build(state_input, name='dist_sym').dist
output1 = self.sess.run([policy.distribution.probs], feed_dict={policy.model.input: [[obs.flatten()], [obs.flatten()]]})
output2 = self.sess.run([dist_sym.probs], feed_dict={state_input: [[obs.flatten()], [obs.flatten()]]})
assert np.array_equal(output1, output2)
.parametrize('obs_dim, action_dim, hidden_dim, obs_type', [((1,), 1, 4, 'discrete'), ((2,), 2, 4, 'discrete'), ((1, 1), 1, 4, 'discrete'), ((2, 2), 2, 4, 'discrete'), ((1,), 1, 4, 'dict')])
def test_get_action(self, obs_dim, action_dim, hidden_dim, obs_type):
assert (obs_type in ['discrete', 'dict'])
if (obs_type == 'discrete'):
env = GarageEnv(DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
else:
env = GarageEnv(DummyDictEnv(obs_space_type='box', act_space_type='discrete'))
policy = CategoricalGRUPolicy(env_spec=env.spec, hidden_dim=hidden_dim, state_include_action=False)
policy.reset(do_resets=None)
obs = env.reset()
if (obs_type == 'discrete'):
obs = obs.flatten()
(action, _) = policy.get_action(obs)
assert env.action_space.contains(action)
(actions, _) = policy.get_actions([obs])
for action in actions:
assert env.action_space.contains(action)
def test_is_pickleable(self):
env = GarageEnv(DummyDiscreteEnv(obs_dim=(1,), action_dim=1))
policy = CategoricalGRUPolicy(env_spec=env.spec, state_include_action=False)
obs = env.reset()
policy.model._gru_cell.weights[0].load(tf.ones_like(policy.model._gru_cell.weights[0]).eval())
output1 = self.sess.run([policy.distribution.probs], feed_dict={policy.model.input: [[obs.flatten()], [obs.flatten()]]})
p = pickle.dumps(policy)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
policy_pickled = pickle.loads(p)
output2 = sess.run([policy_pickled.distribution.probs], feed_dict={policy_pickled.model.input: [[obs.flatten()], [obs.flatten()]]})
assert np.array_equal(output1, output2)
def test_state_info_specs(self):
env = GarageEnv(DummyDiscreteEnv(obs_dim=(10,), action_dim=4))
policy = CategoricalGRUPolicy(env_spec=env.spec, state_include_action=False)
assert (policy.state_info_specs == [])
def test_state_info_specs_with_state_include_action(self):
env = GarageEnv(DummyDiscreteEnv(obs_dim=(10,), action_dim=4))
policy = CategoricalGRUPolicy(env_spec=env.spec, state_include_action=True)
assert (policy.state_info_specs == [('prev_action', (4,))]) |
class TestBlackmanHarris(object):
def test_basic(self):
assert_allclose(windows.blackmanharris(6, False), [6e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645])
assert_allclose(windows.blackmanharris(7, sym=False), [6e-05, 0., 0., 0., 0., 0., 0.])
assert_allclose(windows.blackmanharris(6), [6e-05, 0., 0., 0., 0., 6e-05])
assert_allclose(windows.blackmanharris(7, sym=True), [6e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645, 6e-05]) |
def get_command_registry(agent_test_config):
command_registry = CommandRegistry()
enabled_command_categories = [x for x in COMMAND_CATEGORIES if (x not in agent_test_config.disabled_command_categories)]
for command_category in enabled_command_categories:
command_registry.import_commands(command_category)
return command_registry |
class LeanBranchCond():
name: str
cond_var: str
exprs: Optional[Tuple[(str, str)]]
is_eq: bool
assert_rw: List[str] |
def CFiniteSequences(base_ring, names=None, category=None):
if isinstance(base_ring, PolynomialRing_general):
polynomial_ring = base_ring
base_ring = polynomial_ring.base_ring()
if (names is None):
names = ['x']
elif (len(names) > 1):
raise NotImplementedError('Multidimensional o.g.f. not implemented.')
if (category is None):
category = Rings().Commutative()
if (base_ring not in [QQ, ZZ]):
raise ValueError('O.g.f. base not rational.')
polynomial_ring = PolynomialRing(base_ring, names)
return CFiniteSequences_generic(polynomial_ring, category) |
def iter_seq(doc_it):
docs = tuple(doc_it)
return (text(docs[0]) if (len(docs) == 1) else _Seq(docs)) |
class Test_get_crops(unittest.TestCase):
def test(self):
(row_crop, col_crop) = utils.get_crops(40, 10, 25, 25, 0.1, 800)
self.assertEqual(row_crop, slice(0, 500))
self.assertEqual(col_crop, slice(150, 650)) |
def normalize_input(a):
if (isinstance(a, tuple) and (len(a) == 2) and isinstance(a[0], tuple) and isinstance(a[1], dict)):
return a
elif isinstance(a, tuple):
return (a, {})
elif isinstance(a, dict):
return (tuple(), a)
else:
return ((a,), {}) |
class SSHWorker(Worker):
def __init__(self, name, job_queue, result_queue, host, options):
Worker.__init__(self, name, job_queue, result_queue, options)
self.host = host
self.cwd = os.getcwd()
def run_one(self, c, g):
cmdline = 'ssh -x -t -t {0} "cd {1}; {2}"'.format(self.host, self.cwd, self.get_cmd(c, g))
result = Popen(cmdline, shell=True, stdout=PIPE, stderr=PIPE, stdin=PIPE).stdout
for line in result.readlines():
if (str(line).find('Cross') != (- 1)):
return float(line.split()[(- 1)][0:(- 1)]) |
class YoungRepresentations_Seminormal(SymmetricGroupRepresentations_class):
_default_ring = QQ
Element = YoungRepresentation_Seminormal
def _repr_(self):
return ('Seminormal representations of the symmetric group of order %s! over %s' % (self._n, self._ring)) |
_connect.numpy.implements('full_like')
def _nep_18_impl(a, fill_value, dtype=None, order=UNSUPPORTED, subok=UNSUPPORTED, shape=UNSUPPORTED):
return full_like(a, fill_value=fill_value, dtype=dtype) |
class FileNotFoundOnZenodo(ZenodoException):
def __init__(self, file_name):
super().__init__(f'File {file_name} not found on Zenodo.', level=None) |
def make_trainer(cfg, network):
network = _wrapper_factory(cfg, network)
return Trainer(network) |
def update_recursive(dict1, dict2):
for (k, v) in dict2.items():
if (k not in dict1):
dict1[k] = dict()
if isinstance(v, dict):
update_recursive(dict1[k], v)
else:
dict1[k] = v |
def _unpack_list(list_value):
list_node = list_value.node()
assert (list_node.kind() == 'prim::ListConstruct')
return list(list_node.inputs()) |
def kaiming_uniform(tensor, fan, a):
bound = math.sqrt((6 / ((1 + (a ** 2)) * fan)))
if (tensor is not None):
tensor.data.uniform_((- bound), bound) |
def get_class_labels(info):
if ('label' not in info.features):
return {}
class_label = info.features['label']
class_to_idx = {n: class_label.str2int(n) for n in class_label.names}
return class_to_idx |
def fixed_classes_uniform_labelings_scores(score_func, n_samples, n_clusters_range, n_classes, n_runs=5):
scores = np.zeros((len(n_clusters_range), n_runs))
labels_a = random_labels(n_samples=n_samples, n_classes=n_classes)
for (i, n_clusters) in enumerate(n_clusters_range):
for j in range(n_runs):
labels_b = random_labels(n_samples=n_samples, n_classes=n_clusters)
scores[(i, j)] = score_func(labels_a, labels_b)
return scores |
class ConvertCommand(BaseTransformersCLICommand):
def register_subcommand(parser: ArgumentParser):
train_parser = parser.add_parser('convert', help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.')
train_parser.add_argument('--model_type', type=str, required=True, help="Model's type.")
train_parser.add_argument('--tf_checkpoint', type=str, required=True, help='TensorFlow checkpoint path or folder.')
train_parser.add_argument('--pytorch_dump_output', type=str, required=True, help='Path to the PyTorch savd model output.')
train_parser.add_argument('--config', type=str, default='', help='Configuration file path or folder.')
train_parser.add_argument('--finetuning_task_name', type=str, default=None, help='Optional fine-tuning task name if the TF model was a finetuned model.')
train_parser.set_defaults(func=convert_command_factory)
def __init__(self, model_type: str, tf_checkpoint: str, pytorch_dump_output: str, config: str, finetuning_task_name: str, *args):
self._logger = logging.get_logger('transformers-cli/converting')
self._logger.info('Loading model {}'.format(model_type))
self._model_type = model_type
self._tf_checkpoint = tf_checkpoint
self._pytorch_dump_output = pytorch_dump_output
self._config = config
self._finetuning_task_name = finetuning_task_name
def run(self):
if (self._model_type == 'albert'):
try:
from transformers.convert_albert_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif (self._model_type == 'bert'):
try:
from transformers.convert_bert_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif (self._model_type == 'funnel'):
try:
from transformers.convert_funnel_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif (self._model_type == 'gpt'):
from transformers.convert_openai_original_tf_checkpoint_to_pytorch import convert_openai_checkpoint_to_pytorch
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif (self._model_type == 'transfo_xl'):
try:
from transformers.convert_transfo_xl_original_tf_checkpoint_to_pytorch import convert_transfo_xl_checkpoint_to_pytorch
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
if ('ckpt' in self._tf_checkpoint.lower()):
TF_CHECKPOINT = self._tf_checkpoint
TF_DATASET_FILE = ''
else:
TF_DATASET_FILE = self._tf_checkpoint
TF_CHECKPOINT = ''
convert_transfo_xl_checkpoint_to_pytorch(TF_CHECKPOINT, self._config, self._pytorch_dump_output, TF_DATASET_FILE)
elif (self._model_type == 'gpt2'):
try:
from transformers.convert_gpt2_original_tf_checkpoint_to_pytorch import convert_gpt2_checkpoint_to_pytorch
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif (self._model_type == 'xlnet'):
try:
from transformers.convert_xlnet_original_tf_checkpoint_to_pytorch import convert_xlnet_checkpoint_to_pytorch
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_xlnet_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name)
elif (self._model_type == 'xlm'):
from transformers.convert_xlm_original_pytorch_checkpoint_to_pytorch import convert_xlm_checkpoint_to_pytorch
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
elif (self._model_type == 'lxmert'):
from transformers.convert_lxmert_original_pytorch_checkpoint_to_pytorch import convert_lxmert_checkpoint_to_pytorch
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
else:
raise ValueError('--model_type should be selected in the list [bert, gpt, gpt2, transfo_xl, xlnet, xlm, lxmert]') |
class ParamDictCVMOdelHandler(CommonModelHandler):
def __init__(self, dict_params, model_class, *args, **kw):
super().__init__(*args, **kw)
self.dict_params = dict_params
self.model_class = model_class
def _get_normal_model_instance(self, *args, **kw):
return self.model_class(**self.dict_params)
def get_loader(self, *args, **kw):
raise NotImplementedError() |
class ModelParallelTransformerDecoderLayer(TransformerDecoderLayer):
def build_fc1(self, input_dim, output_dim):
return ColumnParallelLinear(input_dim, output_dim, gather_output=False)
def build_fc2(self, input_dim, output_dim):
return RowParallelLinear(input_dim, output_dim, input_is_parallel=True)
def build_self_attention(self, embed_dim, args, **unused_kwargs):
return ModelParallelMultiheadAttention(embed_dim=embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, self_attention=(not getattr(args, 'cross_self_attention', False)))
def build_encoder_attention(self, embed_dim, args, **unused_kwargs):
return ModelParallelMultiheadAttention(embed_dim=embed_dim, num_heads=args.decoder_attention_heads, kdim=getattr(args, 'encoder_embed_dim', None), vdim=getattr(args, 'encoder_embed_dim', None), dropout=args.attention_dropout, encoder_decoder_attention=True) |
def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig:
(overrides, deletes) = override_module_args(args)
config_path = os.path.join('..', 'config')
GlobalHydra.instance().clear()
with initialize(config_path=config_path):
try:
composed_cfg = compose('config', overrides=overrides, strict=False)
except:
logger.error(('Error when composing. Overrides: ' + str(overrides)))
raise
for k in deletes:
composed_cfg[k] = None
cfg = OmegaConf.create(OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True))
from omegaconf import _utils
old_primitive = _utils.is_primitive_type
_utils.is_primitive_type = (lambda _: True)
if ((cfg.task is None) and getattr(args, 'task', None)):
cfg.task = Namespace(**vars(args))
from fairseq.tasks import TASK_REGISTRY
_set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task])
cfg.task._name = args.task
if ((cfg.model is None) and getattr(args, 'arch', None)):
cfg.model = Namespace(**vars(args))
from fairseq.models import ARCH_MODEL_REGISTRY
_set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch])
cfg.model._name = args.arch
if ((cfg.optimizer is None) and getattr(args, 'optimizer', None)):
cfg.optimizer = Namespace(**vars(args))
from fairseq.optim import OPTIMIZER_REGISTRY
_set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer])
cfg.optimizer._name = args.optimizer
if ((cfg.lr_scheduler is None) and getattr(args, 'lr_scheduler', None)):
cfg.lr_scheduler = Namespace(**vars(args))
from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY
_set_legacy_defaults(cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler])
cfg.lr_scheduler._name = args.lr_scheduler
if ((cfg.criterion is None) and getattr(args, 'criterion', None)):
cfg.criterion = Namespace(**vars(args))
from fairseq.criterions import CRITERION_REGISTRY
_set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion])
cfg.criterion._name = args.criterion
_utils.is_primitive_type = old_primitive
OmegaConf.set_struct(cfg, True)
return cfg |
class betabinom_gen(rv_discrete):
def _rvs(self, n, a, b):
p = self._random_state.beta(a, b, self._size)
return self._random_state.binomial(n, p, self._size)
def _get_support(self, n, a, b):
return (0, n)
def _argcheck(self, n, a, b):
return (((n >= 0) & (a > 0)) & (b > 0))
def _logpmf(self, x, n, a, b):
k = floor(x)
combiln = ((- log((n + 1))) - betaln(((n - k) + 1), (k + 1)))
return ((combiln + betaln((k + a), ((n - k) + b))) - betaln(a, b))
def _pmf(self, x, n, a, b):
return exp(self._logpmf(x, n, a, b))
def _stats(self, n, a, b, moments='mv'):
e_p = (a / (a + b))
e_q = (1 - e_p)
mu = (n * e_p)
var = ((((n * ((a + b) + n)) * e_p) * e_q) / ((a + b) + 1))
(g1, g2) = (None, None)
if ('s' in moments):
g1 = (1.0 / sqrt(var))
g1 *= (((a + b) + (2 * n)) * (b - a))
g1 /= (((a + b) + 2) * (a + b))
if ('k' in moments):
g2 = (a + b)
g2 *= (((a + b) - 1) + (6 * n))
g2 += (((3 * a) * b) * (n - 2))
g2 += (6 * (n ** 2))
g2 -= ((((3 * e_p) * b) * n) * (6 - n))
g2 -= (((18 * e_p) * e_q) * (n ** 2))
g2 *= (((a + b) ** 2) * ((1 + a) + b))
g2 /= (((((n * a) * b) * ((a + b) + 2)) * ((a + b) + 3)) * ((a + b) + n))
g2 -= 3
return (mu, var, g1, g2) |
def create_wrapper(inp, out, top, vmap, worker):
tmp = os.path.join(worker.output, 'tmp.v')
yosys_command = (((('read_verilog ' + inp) + '; synth -flatten; opt; opt_clean; write_verilog ') + tmp) + ';\n')
subprocess.call([worker.path['yosys'], '-p', yosys_command], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
out_file = open(out, 'w')
tmp_file = open(tmp)
isVector = {}
line = tmp_file.readline()
while line:
tokens = line.strip().strip(';').strip().split()
if ((len(tokens) > 0) and (tokens[0] == 'module')):
out_file.write(line)
if ((len(tokens) > 0) and ((tokens[0] == 'input') or (tokens[0] == 'output'))):
out_file.write(line)
if (len(tokens) == 2):
isVector[tokens[1]] = False
else:
isVector[tokens[2]] = True
line = tmp_file.readline()
tmp_file.close()
arg_list = []
input_dict = {}
output_dict = {}
num_out = module_info(inp, worker.path['yosys'])[4]
pair_out = []
for n in num_out:
for i in range(num_out[n]):
pair_out.append((n, i))
map_file = open(vmap)
line = map_file.readline()
while line:
tokens = line.split()
if (tokens[0] == 'input'):
if (tokens[3] in isVector):
if (isVector[tokens[3]] is False):
input_dict[tokens[3]] = int(tokens[1])
else:
input_dict[(((tokens[3] + '[') + tokens[2]) + ']')] = int(tokens[1])
elif (('\\' + tokens[3]) in isVector):
if (isVector[('\\' + tokens[3])] is False):
input_dict[('\\' + tokens[3])] = int(tokens[1])
else:
input_dict[(((('\\' + tokens[3]) + '[') + tokens[2]) + ']')] = int(tokens[1])
if (tokens[0] == 'output'):
port_num = int(tokens[1])
curr_pair = (tokens[3], int(tokens[2]))
curr_pair_2 = (('\\' + tokens[3]), int(tokens[2]))
if (curr_pair in pair_out):
pair_out.remove(curr_pair)
if (curr_pair_2 in pair_out):
pair_out.remove(curr_pair_2)
while (port_num in list(output_dict.values())):
port_num -= 1
if (tokens[3] in isVector):
if (isVector[tokens[3]] is False):
output_dict[tokens[3]] = port_num
else:
output_dict[(((tokens[3] + '[') + tokens[2]) + ']')] = port_num
elif (('\\' + tokens[3]) in isVector):
if (isVector[('\\' + tokens[3])] is False):
output_dict[('\\' + tokens[3])] = port_num
else:
output_dict[(((('\\' + tokens[3]) + '[') + tokens[2]) + ']')] = port_num
line = map_file.readline()
inp_digit = len(str(len(input_dict)))
out_digit = len(str(len(output_dict)))
map_file.close()
out_file.write(' top U0 (')
first = True
for i in input_dict:
if (not first):
out_file.write(',')
first = False
out_file.write(' .pi{0:0{1}}( {2} ) '.format(input_dict[i], inp_digit, i))
for i in output_dict:
if (not first):
out_file.write(',')
first = False
out_file.write(' .po{0:0{1}}( {2} ) '.format(output_dict[i], out_digit, i))
out_file.write(');\n')
for i in pair_out:
if (not isVector[i[0]]):
out_file.write(' assign {} = 0;\n'.format(i[0]))
else:
out_file.write(' assign {0}[{1}] = 0;\n'.format(i[0], i[1]))
out_file.write('endmodule\n\n')
top_file = open(top)
line = top_file.readline()
replaced = False
while line:
tokens = line.split()
if ((len(tokens) > 0) and (tokens[0] == 'module') and (not replaced)):
line = line.replace(worker.modulename, 'top', 1)
replaced = True
out_file.write(line)
line = top_file.readline()
top_file.close()
out_file.close()
os.remove(top)
shutil.move(out, top)
os.remove(os.path.join(worker.output, 'tmp.v')) |
def test_coefficient_tracker_keeps_track_of_shifted_coefficient_based_on_configured_interval_between_batches():
effective_dim_context = 4
effective_dim_action_context = 3
with mock.patch('obp.simulator.coefficient_drifter.sample_random_uniform_coefficients', MockCoefSample().fake_sample):
drifter = CoefficientDrifter(drift_interval=2, effective_dim_context=effective_dim_context, effective_dim_action_context=effective_dim_action_context)
(actual_context_coef, _, _) = drifter.get_coefficients(n_rounds=3)
expected_context_coef = np.asarray([[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0]])
assert np.allclose(actual_context_coef, expected_context_coef)
(actual_context_coef, _, _) = drifter.get_coefficients(n_rounds=3)
expected_context_coef_2 = np.asarray([[2.0, 2.0, 2.0, 2.0], [3.0, 3.0, 3.0, 3.0], [3.0, 3.0, 3.0, 3.0]])
assert np.allclose(actual_context_coef, expected_context_coef_2) |
def test_string_primitive_statement_delta_all(default_test_case):
value = 'te'
statement = stmt.StringPrimitiveStatement(default_test_case, value)
with mock.patch('pynguin.utils.randomness.next_char') as char_mock:
char_mock.side_effect = ['a', 'b']
with mock.patch('pynguin.utils.randomness.next_int') as int_mock:
int_mock.return_value = 0
with mock.patch('pynguin.utils.randomness.next_float') as float_mock:
deletion = [0.0, 0.0, 1.0]
replacement = [0.0, 0.0]
insertion = [0.0, 0.0, 1.0]
float_mock.side_effect = ((deletion + replacement) + insertion)
statement.delta()
assert (statement.value == 'ba') |
class InactiveLearningNodeMean(LearningNodeMean, InactiveLeaf):
def __init__(self, initial_stats=None):
super().__init__(initial_stats) |
.parametrize('device', ['cpu', 'cuda'])
.parametrize('M', [0, 1, 7, 8])
def test_compatibility(device, M, L=32, B=2):
lsp2lpc = diffsptk.LineSpectralPairsToLinearPredictiveCoefficients(M, log_gain=True)
U.check_compatibility(device, lsp2lpc, [], f'nrand -l {(B * L)} | lpc -l {L} -m {M} | lpc2lsp -m {M} -k 1', f'lsp2lpc -m {M} -k 1', [], dx=(M + 1), dy=(M + 1))
U.check_differentiable(device, lsp2lpc, [B, (M + 1)]) |
def accum_opt_update(params, grads, opt_state, opt, freeze_processor):
grads = jax.tree_util.tree_map((lambda *x: (sum(x) / (sum([jnp.any(k) for k in x]) + 1e-12))), *grads)
(updates, opt_state) = opt.update(grads, opt_state)
if freeze_processor:
params_subset = _filter_out_processor(params)
assert (len(params) > len(params_subset))
assert params_subset
updates_subset = _filter_out_processor(updates)
new_params = optax.apply_updates(params_subset, updates_subset)
new_params = hk.data_structures.merge(params, new_params)
else:
new_params = optax.apply_updates(params, updates)
return (new_params, opt_state) |
def get_unique_stat_by_name(stats: Iterable[Stat], name: str) -> Optional[Stat]:
matching_stats: List[Stat] = get_all_stats_by_name(stats, name)
if (len(matching_stats) == 0):
return None
return singleton(matching_stats) |
def point_of_order(E, n):
def ffext(poly):
rng = poly.parent()
fld = rng.base_ring()
if (fld in FiniteFields()):
return poly.splitting_field(rng.variable_name())
return fld.extension(poly, rng.variable_name())
n = ZZ(n)
if (n == 1):
return E(0)
(l, m) = n.is_prime_power(get_data=True)
if (not m):
raise NotImplementedError('only prime-power orders are currently supported')
xpoly = E.division_polynomial(n).radical()
xpoly //= E.division_polynomial((n // l)).radical()
if (xpoly.degree() < 1):
raise ValueError('curve does not have any points of the specified order')
mu = xpoly.factor()[0][0]
FF = ffext(mu)
xx = mu.any_root(ring=FF, assume_squarefree=True)
Y = polygen(FF, 'Y')
ypoly = E.defining_polynomial()(xx, Y, 1)
if ypoly.is_irreducible():
FF = ffext(ypoly)
xx = FF(xx)
EE = E.change_ring(FF)
pt = EE.lift_x(xx)
pt.set_order(n, check=False)
return pt |
class TransfoXLModelLanguageGenerationTest(unittest.TestCase):
special_tokens = prepare_generation_special_tokens()
def test_lm_generate_transfo_xl_wt103(self):
model = TransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103')
input_ids = torch.Tensor([[33, 1297, 2, 1, 1009, 4, 1109, 11739, 4762, 358, 5, 25, 245, 22, 1706, 17, 20098, 5, 3215, 21, 37, 1110, 3, 13, 1041, 4, 24, 603, 490, 2, 71477, 20098, 104447, 2, 20961, 1, 2604, 4, 1, 329, 3, 6224, 831, 16002, 2, 8, 603, 78967, 29546, 23, 803, 20, 25, 416, 5, 8, 232, 4, 277, 6, 1855, 4601, 3, 29546, 54, 8, 3609, 5, 57211, 49, 4, 1, 277, 18, 8, 1755, 15691, 3, 341, 25, 416, 693, 42573, 71, 17, 401, 94, 31, 17919, 2, 29546, 7873, 18, 1, 435, 23, 11011, 755, 5, 5167, 3, 7983, 98, 84, 2, 29546, 3267, 8, 3609, 4, 1, 4865, 1075, 2, 6087, 71, 6, 346, 8, 5854, 3, 29546, 824, 1400, 1868, 2, 19, 160, 2, 311, 8, 5496, 2, 20920, 17, 25, 15097, 3, 24, 24, 0]]).long()
expected_output_ids = [33, 1297, 2, 1, 1009, 4, 1109, 11739, 4762, 358, 5, 25, 245, 22, 1706, 17, 20098, 5, 3215, 21, 37, 1110, 3, 13, 1041, 4, 24, 603, 490, 2, 71477, 20098, 104447, 2, 20961, 1, 2604, 4, 1, 329, 3, 6224, 831, 16002, 2, 8, 603, 78967, 29546, 23, 803, 20, 25, 416, 5, 8, 232, 4, 277, 6, 1855, 4601, 3, 29546, 54, 8, 3609, 5, 57211, 49, 4, 1, 277, 18, 8, 1755, 15691, 3, 341, 25, 416, 693, 42573, 71, 17, 401, 94, 31, 17919, 2, 29546, 7873, 18, 1, 435, 23, 11011, 755, 5, 5167, 3, 7983, 98, 84, 2, 29546, 3267, 8, 3609, 4, 1, 4865, 1075, 2, 6087, 71, 6, 346, 8, 5854, 3, 29546, 824, 1400, 1868, 2, 19, 160, 2, 311, 8, 5496, 2, 20920, 17, 25, 15097, 3, 24, 24, 0, 29546, 40, 1092, 18, 8, 5854, 7, 1143, 2, 7, 1, 159, 99, 16, 1, 1009, 4, 1109, 11739, 4762, 358, 5, 25, 245, 28, 1110, 3, 57, 629, 38, 3493, 47, 1094, 7, 1297, 3, 0]
torch.manual_seed(0)
output_ids = model.generate(input_ids, eos_token_ids=self.special_tokens['eos_token_id'], max_length=200)
self.assertListEqual(output_ids[0].tolist(), expected_output_ids) |
class Pattern(Serialize):
raw = None
type = None
def __init__(self, value, flags=(), raw=None):
self.value = value
self.flags = frozenset(flags)
self.raw = raw
def __repr__(self):
return repr(self.to_regexp())
def __hash__(self):
return hash((type(self), self.value, self.flags))
def __eq__(self, other):
return ((type(self) == type(other)) and (self.value == other.value) and (self.flags == other.flags))
def to_regexp(self):
raise NotImplementedError()
def min_width(self):
raise NotImplementedError()
def max_width(self):
raise NotImplementedError()
if Py36:
def _get_flags(self, value):
for f in self.flags:
value = f'(?{f}:{value})'
return value
else:
def _get_flags(self, value):
for f in self.flags:
value = (('(?%s)' % f) + value)
return value |
class TFLayoutLMMainLayer(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def tf_idf_claim(line):
if ('predicted_pages' in line):
sorted_p = list(sorted(line['predicted_pages'], reverse=True, key=(lambda elem: elem[1])))
pages = [p[0] for p in sorted_p[:args.max_page]]
p_lines = []
for page in pages:
lines = db.get_doc_lines(page)
lines = [(line.split('\t')[1] if (len(line.split('\t')[1]) > 1) else '') for line in lines.split('\n')]
p_lines.extend(zip(lines, ([page] * len(lines)), range(len(lines))))
lines = []
for p_line in p_lines:
lines.append({'sentence': p_line[0], 'page': p_line[1], 'line_on_page': p_line[2]})
scores = tf_idf_sim(line['claim'], lines, doc_freqs)
line['predicted_sentences'] = [(s['page'], s['line_on_page']) for s in scores]
return line |
def ResNet101Body(net, from_layer, use_pool5=True, use_dilation_conv5=False, **bn_param):
conv_prefix = ''
conv_postfix = ''
bn_prefix = 'bn_'
bn_postfix = ''
scale_prefix = 'scale_'
scale_postfix = ''
ConvBNLayer(net, from_layer, 'conv1', use_bn=True, use_relu=True, num_output=64, kernel_size=7, pad=3, stride=2, conv_prefix=conv_prefix, conv_postfix=conv_postfix, bn_prefix=bn_prefix, bn_postfix=bn_postfix, scale_prefix=scale_prefix, scale_postfix=scale_postfix, **bn_param)
net.pool1 = L.Pooling(net.conv1, pool=P.Pooling.MAX, kernel_size=3, stride=2)
ResBody(net, 'pool1', '2a', out2a=64, out2b=64, out2c=256, stride=1, use_branch1=True, **bn_param)
ResBody(net, 'res2a', '2b', out2a=64, out2b=64, out2c=256, stride=1, use_branch1=False, **bn_param)
ResBody(net, 'res2b', '2c', out2a=64, out2b=64, out2c=256, stride=1, use_branch1=False, **bn_param)
ResBody(net, 'res2c', '3a', out2a=128, out2b=128, out2c=512, stride=2, use_branch1=True, **bn_param)
from_layer = 'res3a'
for i in xrange(1, 4):
block_name = '3b{}'.format(i)
ResBody(net, from_layer, block_name, out2a=128, out2b=128, out2c=512, stride=1, use_branch1=False, **bn_param)
from_layer = 'res{}'.format(block_name)
ResBody(net, from_layer, '4a', out2a=256, out2b=256, out2c=1024, stride=2, use_branch1=True, **bn_param)
from_layer = 'res4a'
for i in xrange(1, 23):
block_name = '4b{}'.format(i)
ResBody(net, from_layer, block_name, out2a=256, out2b=256, out2c=1024, stride=1, use_branch1=False, **bn_param)
from_layer = 'res{}'.format(block_name)
stride = 2
dilation = 1
if use_dilation_conv5:
stride = 1
dilation = 2
ResBody(net, from_layer, '5a', out2a=512, out2b=512, out2c=2048, stride=stride, use_branch1=True, dilation=dilation, **bn_param)
ResBody(net, 'res5a', '5b', out2a=512, out2b=512, out2c=2048, stride=1, use_branch1=False, dilation=dilation, **bn_param)
ResBody(net, 'res5b', '5c', out2a=512, out2b=512, out2c=2048, stride=1, use_branch1=False, dilation=dilation, **bn_param)
if use_pool5:
net.pool5 = L.Pooling(net.res5c, pool=P.Pooling.AVE, global_pooling=True)
return net |
def register_Ns3BooleanChecker_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::BooleanChecker const &', 'arg0')])
return |
class CacheEntry(object):
def __init__(self, link, persistent):
self.link = link
self.persistent = persistent |
class RobertaPreLayerNormConfig(PretrainedConfig):
model_type = 'roberta-prelayernorm'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout |
def create_problem(filename):
from sfepy.discrete import Problem
problem = Problem.from_conf_file(filename, init_equations=False, init_solvers=False)
return problem |
def kde_viz_panel(hist: List[Tuple[(np.ndarray, np.ndarray)]], kde: np.ndarray, col: str, plot_width: int, plot_height: int, cfg: Config) -> Panel:
fig = kde_viz_figure(hist, kde, col, plot_width, plot_height, cfg)
return Panel(child=row(fig), title='KDE Plot') |
class BinanceCancelOrder(VirtualFunctionTool):
name = 'BinanceCancelOrder'
summary = 'Cancel an existing order.'
parameters: List[ArgParameter] = [{'name': 'order_id', 'type': 'string', 'description': 'The unique identifier of the order to cancel.', 'required': True}]
returns: List[ArgReturn] = [{'name': 'status', 'type': 'string', 'description': 'The status of the canceled order.'}]
exceptions: List[ArgException] = [{'name': 'NotFoundException', 'description': 'The specified order_id does not exist.'}] |
def clean_all_dir(directory):
for subdir in os.listdir(directory):
keyword_path = os.path.join(directory, subdir)
clean_dir(keyword_path) |
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, bn_norm, stride=1, padding=0, groups=1, IN=False):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=False, groups=groups)
if IN:
self.bn = nn.InstanceNorm2d(out_channels, affine=True)
else:
self.bn = get_norm(bn_norm, out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x |
def is_tf_available():
candidates = ('tensorflow', 'tensorflow-cpu', 'tensorflow-gpu', 'tf-nightly', 'tf-nightly-cpu', 'tf-nightly-gpu', 'intel-tensorflow', 'intel-tensorflow-avx512', 'tensorflow-rocm', 'tensorflow-macos')
_tf_version = None
for pkg in candidates:
try:
_tf_version = importlib_metadata.version(pkg)
break
except importlib_metadata.PackageNotFoundError:
pass
if (_tf_version is not None):
if (version.parse(_tf_version) < version.parse('2')):
raise EnvironmentError(f'Tensorflow found but with version {_tf_version}. The minimum version is 2.0')
return True
else:
return False |
def get_session(config=None):
sess = tf.get_default_session()
if (sess is None):
sess = make_session(config=config, make_default=True)
return sess |
def encode(s, strict=False, uts46=False, std3_rules=False, transitional=False):
if isinstance(s, (bytes, bytearray)):
s = s.decode('ascii')
if uts46:
s = uts46_remap(s, std3_rules, transitional)
trailing_dot = False
result = []
if strict:
labels = s.split('.')
else:
labels = _unicode_dots_re.split(s)
if ((not labels) or (labels == [''])):
raise IDNAError('Empty domain')
if (labels[(- 1)] == ''):
del labels[(- 1)]
trailing_dot = True
for label in labels:
s = alabel(label)
if s:
result.append(s)
else:
raise IDNAError('Empty label')
if trailing_dot:
result.append(b'')
s = b'.'.join(result)
if (not valid_string_length(s, trailing_dot)):
raise IDNAError('Domain too long')
return s |
class ReadInput(object):
def __init__(self, entries):
self.entries = entries
self.input_file = entries
self.options = {}
number_of_structures = 0
number_of_obstacles = 0
number_of_articulated = 0
comment_symbols = ['#']
with open(self.input_file, 'r') as f:
for line in f:
if (comment_symbols[0] in line):
(line, comment) = line.split(comment_symbols[0], 1)
line = line.strip()
if (line != ''):
(option, value) = line.split(None, 1)
if (option == 'structure'):
option += str(number_of_structures)
number_of_structures += 1
if (option == 'obstacle'):
option += str(number_of_obstacles)
number_of_obstacles += 1
if (option == 'articulated'):
option += str(number_of_articulated)
number_of_articulated += 1
self.options[option] = value
self.n_steps = int((self.options.get('n_steps') or 0))
self.initial_step = int((self.options.get('initial_step') or 0))
self.n_save = int((self.options.get('n_save') or 1))
self.n_relaxation = int((self.options.get('n_relaxation') or 0))
self.dt = float((self.options.get('dt') or 0.0))
self.eta = float((self.options.get('eta') or 1.0))
self.g = float((self.options.get('g') or 1.0))
self.theta = float((self.options.get('tilt_angle') or 0.0))
self.blob_radius = float((self.options.get('blob_radius') or 1.0))
self.tracer_radius = float((self.options.get('tracer_radius') or 0.0))
self.kT = float((self.options.get('kT') or 1.0))
self.scheme = str((self.options.get('scheme') or 'deterministic_forward_euler'))
self.output_name = str((self.options.get('output_name') or 'run'))
self.random_state = self.options.get('random_state')
self.seed = self.options.get('seed')
self.repulsion_strength_wall = float((self.options.get('repulsion_strength_wall') or 1.0))
self.debye_length_wall = float((self.options.get('debye_length_wall') or 1.0))
self.mobility_blobs_implementation = str((self.options.get('mobility_blobs_implementation') or 'python'))
self.mobility_vector_prod_implementation = str((self.options.get('mobility_vector_prod_implementation') or 'python'))
self.repulsion_strength = float((self.options.get('repulsion_strength') or 1.0))
self.debye_length = float((self.options.get('debye_length') or 1.0))
self.blob_blob_force_implementation = str((self.options.get('blob_blob_force_implementation') or 'None'))
self.body_body_force_torque_implementation = str((self.options.get('body_body_force_torque_implementation') or 'None'))
self.save_body_mobility = str((self.options.get('save_body_mobility') or 'False'))
self.save_blobs_mobility = str((self.options.get('save_blobs_mobility') or 'False'))
self.save_velocities = str((self.options.get('save_velocities') or 'False'))
self.slip_file = self.options.get('slip_file')
self.force_file = self.options.get('force_file')
self.velocity_file = self.options.get('velocity_file')
self.solver_tolerance = float((self.options.get('solver_tolerance') or 1e-08))
self.nonlinear_solver_tolerance = float((self.options.get('nonlinear_solver_tolerance') or 1e-08))
self.rf_delta = float((self.options.get('rf_delta') or 0.001))
self.save_clones = str((self.options.get('save_clones') or 'one_file_per_step'))
self.periodic_length = np.fromstring((self.options.get('periodic_length') or '0 0 0'), sep=' ')
self.omega_one_roller = np.fromstring((self.options.get('omega_one_roller') or '0 0 0'), sep=' ')
self.free_kinematics = str((self.options.get('free_kinematics') or 'True'))
self.plot_velocity_field = np.fromstring((self.options.get('plot_velocity_field') or 'None'), sep=' ')
self.green_particles = np.fromstring((self.options.get('green_particles') or '0 0'), sep=' ', dtype=int)
self.cells = np.fromstring((self.options.get('cells') or '1 1'), sep=' ', dtype=int)
self.sample_HydroGrid = int((self.options.get('sample_HydroGrid') or 1))
self.save_HydroGrid = int((self.options.get('save_HydroGrid') or 0))
self.hydro_interactions = int((self.options.get('hydro_interactions') or 1))
self.update_PC = int((self.options.get('update_PC') or 1))
self.domain = str((self.options.get('domain') or 'single_wall'))
self.call_HydroGrid = (str((self.options.get('call_HydroGrid') or 'False')) == 'True')
self.repulsion_strength_firm = float((self.options.get('repulsion_strength_firm') or 0.0))
self.firm_delta = float((self.options.get('firm_delta') or 0.01))
self.Lub_Cut = float((self.options.get('Lub_Cut') or 4.5))
self.zmin = float((self.options.get('zmin') or 0))
self.zmax = float((self.options.get('zmax') or .0))
self.domType = str((self.options.get('domType') or 'RPB'))
self.num_free_bodies = number_of_structures
self.structures = []
self.structures_ID = []
self.articulated = []
self.articulated_ID = []
for i in range(number_of_structures):
option = ('structure' + str(i))
structure_files = str.split(str(self.options.get(option)))
self.structures.append(structure_files)
for i in range(number_of_obstacles):
option = ('obstacle' + str(i))
structure_files = str.split(str(self.options.get(option)))
self.structures.append(structure_files)
for i in range(number_of_articulated):
option = ('articulated' + str(i))
structure_files = str.split(str(self.options.get(option)))
(head, tail) = ntpath.split(structure_files[1])
tail = tail[:(- 7)]
self.articulated_ID.append(tail)
self.articulated.append(structure_files)
for struct in self.structures:
(head, tail) = ntpath.split(struct[1])
tail = tail[:(- 7)]
self.structures_ID.append(tail)
if (self.initial_step > 0):
for (k, struct) in enumerate(self.structures):
recovery_file = (((((self.output_name + '.') + self.structures_ID[k]) + '.') + str(self.initial_step).zfill(8)) + '.clones')
struct[1] = recovery_file
if (number_of_obstacles > 0):
if ((self.scheme == 'deterministic_forward_euler_dense_algebra') or (self.scheme == 'stochastic_first_order_RFD') or (self.scheme == 'stochastic_adams_bashforth') or (self.scheme == 'stochastic_first_order_RFD_dense_algebra') or (self.scheme == 'stochastic_traction_EM') or (self.scheme == 'Fixman') or (self.scheme == 'stochastic_traction_AB') or (self.scheme == 'stochastic_Slip_Mid_DLA')):
print('Obstacles are not implemented for scheme: ', self.scheme)
sys.exit()
return |
class SimpleIsotypesWrapper(SpeciesWrapper):
def __init__(self, species, labels, structure_class):
SpeciesWrapper.__init__(self, species, labels, '_simple_isotypes_selector', 'isotype_generating_series', 'Simple isomorphism types', structure_class) |
class StarCrystal(UniqueRepresentation, Parent):
def __init__(self, Binf):
self._Binf = Binf
self._cartan_type = Binf.cartan_type()
Parent.__init__(self, category=HighestWeightCrystals().Infinite())
self.module_generators = (self(self._Binf.module_generators[0]),)
t0 = Binf.highest_weight_vector()
B = {i: ElementaryCrystal(Binf.cartan_type(), i) for i in self.index_set()}
self._tens = {i: B[i].tensor(Binf) for i in self.index_set()}
gens = {i: self._tens[i](B[i](0), t0) for i in self.index_set()}
self._embedding = {i: Binf.crystal_morphism({t0: gens[i]}) for i in self.index_set()}
self._pullback = {i: self._tens[i].crystal_morphism({gens[i]: t0}) for i in self.index_set()}
def _repr_(self):
return ('Star-crystal version of %s' % self._Binf)
class Element(ElementWrapper):
def e(self, i):
P = self.parent()
image = P._embedding[i](self.value)
if (image[0].e(i)._m > 0):
return None
return P(P._pullback[i](P._tens[i](image[0].e(i), image[1])))
def f(self, i):
P = self.parent()
image = P._embedding[i](self.value)
return P(P._pullback[i](P._tens[i](image[0].f(i), image[1])))
def weight(self):
return self.value.weight()
def epsilon(self, i):
ep = (- 1)
while (self is not None):
ep += 1
self = self.e(i)
return ep
def phi(self, i):
P = self.parent().weight_lattice_realization()
ac = P.simple_coroot(i)
return (P(self.weight()).scalar(ac) + self.epsilon(i))
def jump(self, i):
P = self.parent().weight_lattice_realization()
ac = P.simple_coroot(i)
return ((P(self.value.weight()).scalar(ac) + self.epsilon(i)) + self.value.epsilon(i)) |
def to_symbol(i):
if (i == 0):
return ''
if (i == 11):
return '+'
if (i == 12):
return '*'
return str((i - 1)) |
class OfflineMetrics():
_metrics_call_requirement_map: Dict[(str, List[str])] = {'HitRate': ['ground_truth'], 'MAP': ['ground_truth'], 'NDCG': ['ground_truth'], 'RocAuc': ['ground_truth'], 'Coverage': ['train'], 'Novelty': ['train'], 'Surprisal': ['train'], 'MRR': ['ground_truth'], 'Precision': ['ground_truth'], 'Recall': ['ground_truth']}
def __init__(self, metrics: List[Metric], query_column: str='query_id', item_column: str='item_id', rating_column: str='rating', category_column: str='category_id', allow_caching: bool=True):
self.unexpectedness_metric: List[Metric] = []
self.diversity_metric: List[Metric] = []
self.main_metrics: List[Metric] = []
self._allow_caching = allow_caching
for metric in metrics:
metric.query_column = query_column
metric.item_column = item_column
metric.rating_column = rating_column
if (metric.__class__.__name__ in ['Unexpectedness']):
self.unexpectedness_metric.append(metric)
elif (metric.__class__.__name__ in ['CategoricalDiversity']):
metric.category_column = category_column
self.diversity_metric.append(metric)
else:
self.main_metrics.append(metric)
self.metrics = self.main_metrics
def _get_enriched_recommendations(self, recommendations: SparkDataFrame, ground_truth: SparkDataFrame, train: Optional[SparkDataFrame]) -> Tuple[(Dict[(str, SparkDataFrame)], Optional[SparkDataFrame])]:
if (len(self.main_metrics) == 0):
return ({}, train)
result_dict = {}
query_column = self.main_metrics[0].query_column
item_column = self.main_metrics[0].item_column
rating_column = self.main_metrics[0].rating_column
default_metric = Recall(topk=2, query_column=query_column, item_column=item_column, rating_column=rating_column)
default_metric._check_duplicates_spark(recommendations)
unchanged_recs = recommendations
result_dict['default'] = default_metric._get_enriched_recommendations(recommendations, ground_truth)
for metric in self.metrics:
if (metric.__class__.__name__ == 'Coverage'):
result_dict['Coverage'] = Coverage(topk=2, query_column=query_column, item_column=item_column, rating_column=rating_column)._get_enriched_recommendations(recommendations)
if ((metric.__class__.__name__ == 'Novelty') and (train is not None)):
novelty_metric = Novelty(topk=2, query_column=query_column, item_column=item_column, rating_column=rating_column)
cur_recs = novelty_metric._get_enriched_recommendations(unchanged_recs, train).withColumnRenamed('ground_truth', 'train')
cur_recs = metric._rearrange_columns(cur_recs)
result_dict['Novelty'] = cur_recs
if ((metric.__class__.__name__ == 'Surprisal') and (train is not None)):
result_dict['Surprisal'] = Surprisal(topk=2, query_column=query_column, item_column=item_column, rating_column=rating_column)._get_enriched_recommendations(unchanged_recs, train)
return (result_dict, train)
def _cache_dataframes(self, dataframes: Dict[(str, SparkDataFrame)]) -> None:
for data in dataframes.values():
data.cache()
def _unpersist_dataframes(self, dataframes: Dict[(str, SparkDataFrame)]) -> None:
for data in dataframes.values():
data.unpersist()
def _calculate_metrics(self, enriched_recs_dict: Dict[(str, SparkDataFrame)], train: Optional[SparkDataFrame]=None) -> MetricsReturnType:
result: Dict = {}
for metric in self.metrics:
metric_args = {}
if ((metric.__class__.__name__ == 'Coverage') and (train is not None)):
metric_args['recs'] = enriched_recs_dict['Coverage']
metric_args['train'] = train
elif (metric.__class__.__name__ == 'Surprisal'):
metric_args['recs'] = enriched_recs_dict['Surprisal']
elif (metric.__class__.__name__ == 'Novelty'):
metric_args['recs'] = enriched_recs_dict['Novelty']
else:
metric_args['recs'] = enriched_recs_dict['default']
result.update(metric._spark_compute(**metric_args))
return result
def _check_dataframes_types(self, recommendations: MetricsDataFrameLike, ground_truth: MetricsDataFrameLike, train: Optional[MetricsDataFrameLike], base_recommendations: Optional[Union[(MetricsDataFrameLike, Dict[(str, MetricsDataFrameLike)])]]) -> None:
types = set()
types.add(type(recommendations))
types.add(type(ground_truth))
if (train is not None):
types.add(type(train))
if isinstance(base_recommendations, dict):
for (_, df) in base_recommendations.items():
if (not isinstance(df, list)):
types.add(type(df))
elif (base_recommendations is not None):
types.add(type(base_recommendations))
if (len(types) != 1):
raise ValueError('All given data frames must have the same type')
def __call__(self, recommendations: MetricsDataFrameLike, ground_truth: MetricsDataFrameLike, train: Optional[MetricsDataFrameLike]=None, base_recommendations: Optional[Union[(MetricsDataFrameLike, Dict[(str, MetricsDataFrameLike)])]]=None) -> Dict[(str, float)]:
self._check_dataframes_types(recommendations, ground_truth, train, base_recommendations)
result = {}
if isinstance(recommendations, SparkDataFrame):
assert isinstance(ground_truth, SparkDataFrame)
assert ((train is None) or isinstance(train, SparkDataFrame))
(enriched_recs_dict, train) = self._get_enriched_recommendations(recommendations, ground_truth, train)
if self._allow_caching:
self._cache_dataframes(enriched_recs_dict)
result.update(self._calculate_metrics(enriched_recs_dict, train))
if self._allow_caching:
self._unpersist_dataframes(enriched_recs_dict)
else:
current_map: Dict[(str, Union[(PandasDataFrame, Dict)])] = {'ground_truth': ground_truth, 'train': train}
for metric in self.metrics:
args_to_call: Dict[(str, Optional[Dict])] = {'recommendations': recommendations}
for data_name in self._metrics_call_requirement_map[str(metric.__class__.__name__)]:
args_to_call[data_name] = current_map[data_name]
result.update(metric(**args_to_call))
unexpectedness_result = {}
diversity_result = {}
if (len(self.unexpectedness_metric) != 0):
if (base_recommendations is None):
raise ValueError('Can not calculate Unexpectedness because base_recommendations is None')
if (isinstance(base_recommendations, dict) and (not isinstance(list(base_recommendations.values())[0], list))):
for unexp in self.unexpectedness_metric:
for model_name in base_recommendations:
cur_result = unexp(recommendations, base_recommendations[model_name])
for metric_name in cur_result:
splitted = metric_name.split('')
splitted[0] += ('_' + model_name)
unexpectedness_result[''.join(splitted)] = cur_result[metric_name]
else:
for unexp in self.unexpectedness_metric:
unexpectedness_result.update(unexp(recommendations, base_recommendations))
if (len(self.diversity_metric) != 0):
for diversity in self.diversity_metric:
diversity_result.update(diversity(recommendations))
return {**result, **unexpectedness_result, **diversity_result} |
class Regressor(abc.ABC):
def __init__(self, input_shape, output_dim, name):
self._input_shape = input_shape
self._output_dim = output_dim
self._name = name
self._variable_scope = None
self._cached_params = None
self._cached_param_shapes = None
def fit(self, xs, ys):
def predict(self, xs):
def get_params_internal(self):
def get_params(self):
if (self._cached_params is None):
self._cached_params = self.get_params_internal()
return self._cached_params
def get_param_shapes(self):
if (self._cached_param_shapes is None):
params = self.get_params()
param_values = tf.compat.v1.get_default_session().run(params)
self._cached_param_shapes = [val.shape for val in param_values]
return self._cached_param_shapes
def get_param_values(self):
params = self.get_params()
param_values = tf.compat.v1.get_default_session().run(params)
return flatten_tensors(param_values)
def set_param_values(self, param_values):
param_values = unflatten_tensors(param_values, self.get_param_shapes())
for (param, value) in zip(self.get_params(), param_values):
param.load(value)
def flat_to_params(self, flattened_params):
return unflatten_tensors(flattened_params, self.get_param_shapes())
def __getstate__(self):
new_dict = self.__dict__.copy()
del new_dict['_cached_params']
return new_dict
def __setstate__(self, state):
self._cached_params = None
self.__dict__.update(state) |
def extract_resnet(name):
configs = ('18', '34', '50', '101', '152')
resnet = models.resnet50
for config in configs:
if (config in name):
resnet = getattr(models, 'resnet{}'.format(config))
break
resnet = resnet(pretrained=True)
resnet.avgpool = nn.AdaptiveAvgPool2d(1)
resnet.fc = nn.Identity()
resnet.eval()
resnet_seq = nn.Sequential(MeanShift(), resnet)
return resnet_seq |
def main():
print('Prepare data')
transform = transforms.Compose([transforms.ToTensor()])
(train_data, [valid_sk_data, valid_im_data], [test_sk_data, test_im_data], dict_class) = load_data(args, transform)
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.prefetch, pin_memory=True)
valid_sk_loader = DataLoader(valid_sk_data, batch_size=(3 * args.batch_size), num_workers=args.prefetch, pin_memory=True)
valid_im_loader = DataLoader(valid_im_data, batch_size=(3 * args.batch_size), num_workers=args.prefetch, pin_memory=True)
test_sk_loader = DataLoader(test_sk_data, batch_size=(3 * args.batch_size), num_workers=args.prefetch, pin_memory=True)
test_im_loader = DataLoader(test_im_data, batch_size=(3 * args.batch_size), num_workers=args.prefetch, pin_memory=True)
if args.log:
if (args.dataset == 'quickdraw_extend'):
pass
elif (not args.attn):
pass
else:
rand_samples_sk = np.random.randint(0, high=len(valid_sk_data), size=5)
rand_samples_im = np.random.randint(0, high=len(valid_im_data), size=5)
for i in range(len(rand_samples_sk)):
(sk, _, lbl_sk) = valid_sk_data[rand_samples_sk[i]]
(im, _, lbl_im) = valid_im_data[rand_samples_im[i]]
if args.cuda:
(sk, im) = (sk.cuda(), im.cuda())
if (i == 0):
sk_log = sk.unsqueeze(0)
im_log = im.unsqueeze(0)
sk_lbl_log = [lbl_sk]
im_lbl_log = [lbl_im]
else:
sk_log = torch.cat((sk_log, sk.unsqueeze(0)), dim=0)
im_log = torch.cat((im_log, im.unsqueeze(0)), dim=0)
sk_lbl_log.append(lbl_sk)
im_lbl_log.append(lbl_im)
print('Create trainable model')
if args.nopretrain:
print('\t* Loading a pretrained model')
im_net = EncoderCNN(out_size=args.emb_size, pretrained=args.nopretrain, attention=args.attn)
sk_net = EncoderCNN(out_size=args.emb_size, pretrained=args.nopretrain, attention=args.attn)
print('Loss, Optimizer & Evaluation')
criterion = DetangledJoinDomainLoss(emb_size=args.emb_size, w_sem=args.w_semantic, w_dom=args.w_domain, w_spa=args.w_triplet, lambd=args.grl_lambda)
criterion.train()
optimizer = torch.optim.SGD(((list(im_net.parameters()) + list(sk_net.parameters())) + list(criterion.parameters())), args.learning_rate, momentum=args.momentum, weight_decay=args.decay, nesterov=True)
print('Check CUDA')
if (args.cuda and (args.ngpu > 1)):
print('\t* Data Parallel')
im_net = nn.DataParallel(im_net, device_ids=list(range(args.ngpu)))
sk_net = nn.DataParallel(sk_net, device_ids=list(range(args.ngpu)))
criterion = nn.DataParallel(criterion, device_ids=list(range(args.ngpu)))
if args.cuda:
print('\t* CUDA')
(im_net, sk_net) = (im_net.cuda(), sk_net.cuda())
criterion = criterion.cuda()
start_epoch = 0
best_map = 0
early_stop_counter = 0
if (args.load is not None):
print('Loading model')
checkpoint = load_checkpoint(args.load)
im_net.load_state_dict(checkpoint['im_state'])
sk_net.load_state_dict(checkpoint['sk_state'])
criterion.load_state_dict(checkpoint['criterion'])
start_epoch = checkpoint['epoch']
best_map = checkpoint['best_map']
print('Loaded model at epoch {epoch} and mAP {mean_ap}%'.format(epoch=checkpoint['epoch'], mean_ap=checkpoint['best_map']))
print('***Train***')
for epoch in range(start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
(loss_train, loss_sem, loss_dom, loss_spa) = train(train_loader, [im_net, sk_net], optimizer, args.cuda, criterion, epoch, args.log_interval)
map_valid = test(valid_im_loader, valid_sk_loader, [im_net, sk_net], args)
if args.log:
im_net.eval()
sk_net.eval()
if (args.dataset == 'quickdraw_extend'):
pass
elif (not args.attn):
pass
else:
with torch.set_grad_enabled(False):
(_, attn_im) = im_net(im_log)
attn_im = nn.Upsample(size=(im_log[0].size(1), im_log[0].size(2)), mode='bilinear', align_corners=False)(attn_im)
attn_im = (attn_im - attn_im.view((attn_im.size(0), (- 1))).min((- 1))[0].unsqueeze((- 1)).unsqueeze((- 1)).unsqueeze((- 1)))
attn_im = (1 - (attn_im / attn_im.view((attn_im.size(0), (- 1))).max((- 1))[0].unsqueeze((- 1)).unsqueeze((- 1)).unsqueeze((- 1))))
(_, attn_sk) = sk_net(sk_log)
attn_sk = nn.Upsample(size=(sk_log[0].size(1), sk_log[0].size(2)), mode='bilinear', align_corners=False)(attn_sk)
attn_sk = (attn_sk - attn_sk.view((attn_sk.size(0), (- 1))).min((- 1))[0].unsqueeze((- 1)).unsqueeze((- 1)).unsqueeze((- 1)))
attn_sk = (attn_sk / attn_sk.view((attn_sk.size(0), (- 1))).max((- 1))[0].unsqueeze((- 1)).unsqueeze((- 1)).unsqueeze((- 1)))
for i in range(im_log.size(0)):
plt_im = torch.cat([im_log[i], attn_im[i]], dim=0)
nam = list(dict_class.keys())[list(dict_class.values()).index(im_lbl_log[i])]
logger.add_image('im{}_{}'.format(i, nam), plt_im)
nam = list(dict_class.keys())[list(dict_class.values()).index(sk_lbl_log[i])]
plt_im = (sk_log[i] * attn_sk[i])
logger.add_image('sk{}_{}'.format(i, nam), plt_im)
logger.add_scalar('loss_train', loss_train.avg)
logger.add_scalar('loss_sem', loss_sem.avg)
logger.add_scalar('loss_dom', loss_dom.avg)
logger.add_scalar('loss_spa', loss_spa.avg)
logger.add_scalar('map_valid', map_valid)
logger.add_scalar('learning_rate', args.learning_rate)
logger.step()
if (map_valid > best_map):
best_map = map_valid
best_epoch = (epoch + 1)
early_stop_counter = 0
if (args.save is not None):
save_checkpoint({'epoch': (epoch + 1), 'im_state': im_net.state_dict(), 'sk_state': sk_net.state_dict(), 'criterion': criterion.state_dict(), 'best_map': best_map}, directory=args.save, file_name='checkpoint')
else:
if (early_stop_counter == args.early_stop):
break
early_stop_counter += 1
if (args.save is not None):
print('Loading best model')
best_model_file = os.path.join(args.save, 'checkpoint.pth')
checkpoint = load_checkpoint(best_model_file)
im_net.load_state_dict(checkpoint['im_state'])
sk_net.load_state_dict(checkpoint['sk_state'])
best_map = checkpoint['best_map']
best_epoch = checkpoint['epoch']
print('Best model at epoch {epoch} and mAP {mean_ap}%'.format(epoch=checkpoint['epoch'], mean_ap=checkpoint['best_map']))
print('***Test***')
map_test = test(test_im_loader, test_sk_loader, [im_net, sk_net], args)
print('Test mAP {mean_ap}%'.format(mean_ap=map_test))
if (args.exp_idf is not None):
with open(os.path.join(args.log, 'results.txt'), 'w') as fp:
print('Epoch: {best_epoch:.3f}'.format(best_epoch=best_epoch), file=fp)
print('Valid: {mean_ap:.3f}'.format(mean_ap=best_map), file=fp)
print('Test mAP: {mean_ap:.3f}'.format(mean_ap=map_test), file=fp) |
def get_device(device: str='cuda'):
if (torch.cuda.is_available() and (device == 'cuda')):
mydevice = torch.device(device)
else:
mydevice = torch.device('cpu')
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
return mydevice |
def load_state(fname, sess=None):
from baselines import logger
logger.warn('load_state method is deprecated, please use load_variables instead')
sess = (sess or get_session())
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.