code stringlengths 101 5.91M |
|---|
def MLP(channels, bias=False, nonlin=LeakyReLU(negative_slope=0.2)):
return Seq(*[Seq(Lin(channels[(i - 1)], channels[i], bias=bias), BatchNorm1d(channels[i]), nonlin) for i in range(1, len(channels))]) |
def parse_optional_tags(string, *, return_string_sans_tags=False):
(safe, literals, state) = strip_string_literals(string)
split = safe.split('\n', 1)
if (len(split) > 1):
(first_line, rest) = split
else:
(first_line, rest) = (split[0], None)
sharp_index = first_line.find('#')
if (sharp_index < 0):
if return_string_sans_tags:
return ({}, string, False)
else:
return {}
(first_line_sans_comments, comment) = ((first_line[:sharp_index] % literals), (first_line[sharp_index:] % literals))
if ((not first_line_sans_comments.endswith(' ')) and (not first_line_sans_comments.rstrip().endswith('sage:'))):
first_line_sans_comments = (first_line_sans_comments.rstrip() + ' ')
if return_string_sans_tags:
if (m := optional_regex.search(comment)):
sharp_index = comment[:(m.start(0) + 1)].rfind('#')
if (sharp_index >= 0):
first_line = (first_line_sans_comments + comment[:sharp_index])
comment = comment[sharp_index:]
else:
return ({}, string, False)
tags = {}
for m in optional_regex.finditer(comment):
cmd = m.group('cmd')
if (cmd and (cmd.lower() == 'known bug')):
tags['bug'] = None
elif cmd:
tags[cmd.lower()] = (m.group('cmd_explanation') or None)
else:
tags.update({m.group(1).lower(): (m.group(2) or None) for m in tag_with_explanation_regex.finditer(m.group('tags'))})
if return_string_sans_tags:
is_persistent = (tags and (first_line_sans_comments.strip() == 'sage:') and (not rest))
return (tags, (((first_line + '\n') + (rest % literals)) if (rest is not None) else first_line), is_persistent)
else:
return tags |
def siren_init_first(**kwargs):
module = kwargs['module']
n = kwargs['n']
if isinstance(module, nn.Linear):
module.weight.data.uniform_(((- 1) / n), (1 / n)) |
()
_context
('--network', 'ckpt_path', help='Network pickle filename', required=True)
('--attr_name', help='choose one of the attr: upper_length or bottom_length', type=str, required=True)
('--trunc', 'truncation', type=float, help='Truncation psi', default=0.8, show_default=True)
('--gen_video', type=bool, default=True, help='If want to generate video')
('--combine', type=bool, default=True, help='If want to combine different editing results in the same frame')
('--seeds', type=legacy.num_range, help='List of random seeds')
('--outdir', help='Where to save the output images', type=str, required=True, default='outputs/editing', metavar='DIR')
('--real', type=bool, help='True for editing real image', default=False)
('--real_w_path', help='Path of latent code for real image')
('--real_img_path', help='Path of real image, this just concat real image with inverted and edited results together')
def main(ctx: click.Context, ckpt_path: str, attr_name: str, truncation: float, gen_video: bool, combine: bool, seeds: Optional[List[int]], outdir: str, real: str, real_w_path: str, real_img_path: str):
legacy.convert(ckpt_path, ckpt_path.replace('.pkl', '.pth'), G_only=real)
ckpt_path = ckpt_path.replace('.pkl', '.pth')
print('start...', flush=True)
config = {'latent': 512, 'n_mlp': 8, 'channel_multiplier': 2}
generator = Generator(size=1024, style_dim=config['latent'], n_mlp=config['n_mlp'], channel_multiplier=config['channel_multiplier'])
generator.load_state_dict(torch.load(ckpt_path)['g_ema'])
generator.eval().cuda()
with torch.no_grad():
mean_path = os.path.join('edit', 'mean_latent.pkl')
if (not os.path.exists(mean_path)):
mean_n = 3000
mean_latent = generator.mean_latent(mean_n).detach()
legacy.save_obj(mean_latent, mean_path)
else:
mean_latent = legacy.load_pkl(mean_path).cuda()
finals = []
if real:
seeds = [0]
for t in seeds:
if real:
if real_img_path:
real_image = cv2.imread(real_img_path)
real_image = cv2.cvtColor(real_image, cv2.COLOR_BGR2RGB)
import torchvision.transforms as transforms
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
real_image = transform(real_image).unsqueeze(0).cuda()
test_input = torch.load(real_w_path)
(output, _) = generator(test_input, False, truncation=1, input_is_latent=True, real=True)
else:
test_input = torch.from_numpy(np.random.RandomState(t).randn(1, 512)).float().cuda()
(output, _) = generator([test_input], False, truncation=truncation, truncation_latent=mean_latent, real=real)
(style_space, latent, noise) = encoder_ifg(generator, test_input, attr_name, truncation, mean_latent, real=real)
image1 = decoder(generator, style_space, latent, noise)
(style_space, latent, noise) = encoder_ss(generator, test_input, attr_name, truncation, mean_latent, real=real)
image2 = decoder(generator, style_space, latent, noise)
(latent, noise) = encoder_sefa(generator, test_input, attr_name, truncation, mean_latent, real=real)
(image3, _) = generator([latent], noise=noise, input_is_latent=True)
if real_img_path:
final = torch.cat((real_image, output, image1, image2, image3), 3)
else:
final = torch.cat((output, image1, image2, image3), 3)
if gen_video:
total_step = 90
if real:
video_ifg_path = f"{outdir}/video/ifg_{attr_name}_{real_w_path.split('/')[(- 2)]}/"
video_ss_path = f"{outdir}/video/ss_{attr_name}_{real_w_path.split('/')[(- 2)]}/"
video_sefa_path = f"{outdir}/video/ss_{attr_name}_{real_w_path.split('/')[(- 2)]}/"
else:
video_ifg_path = f'{outdir}/video/ifg_{attr_name}_{t:05d}/'
video_ss_path = f'{outdir}/video/ss_{attr_name}_{t:05d}/'
video_sefa_path = f'{outdir}/video/ss_{attr_name}_{t:05d}/'
video_comb_path = f'{outdir}/video/tmp'
if combine:
if (not os.path.exists(video_comb_path)):
os.makedirs(video_comb_path)
else:
if (not os.path.exists(video_ifg_path)):
os.makedirs(video_ifg_path)
if (not os.path.exists(video_ss_path)):
os.makedirs(video_ss_path)
if (not os.path.exists(video_sefa_path)):
os.makedirs(video_sefa_path)
for i in range(total_step):
(style_space, latent, noise) = encoder_ifg(generator, test_input, attr_name, truncation, mean_latent, step=i, total=total_step, real=real)
image1 = decoder(generator, style_space, latent, noise)
(style_space, latent, noise) = encoder_ss(generator, test_input, attr_name, truncation, mean_latent, step=i, total=total_step, real=real)
image2 = decoder(generator, style_space, latent, noise)
(latent, noise) = encoder_sefa(generator, test_input, attr_name, truncation, mean_latent, step=i, total=total_step, real=real)
(image3, _) = generator([latent], noise=noise, input_is_latent=True)
if combine:
if real_img_path:
comb_img = torch.cat((real_image, output, image1, image2, image3), 3)
else:
comb_img = torch.cat((output, image1, image2, image3), 3)
legacy.visual(comb_img, os.path.join(video_comb_path, f'{i:05d}.jpg'))
else:
legacy.visual(image1, os.path.join(video_ifg_path, f'{i:05d}.jpg'))
legacy.visual(image2, os.path.join(video_ss_path, f'{i:05d}.jpg'))
if combine:
cmd = f"ffmpeg -hide_banner -loglevel error -y -r 30 -i {video_comb_path}/%05d.jpg -vcodec libx264 -pix_fmt yuv420p {(video_ifg_path.replace('ifg_', '')[:(- 1)] + '.mp4')}"
subprocess.call(cmd, shell=True)
else:
cmd = f"ffmpeg -hide_banner -loglevel error -y -r 30 -i {video_ifg_path}/%05d.jpg -vcodec libx264 -pix_fmt yuv420p {(video_ifg_path[:(- 1)] + '.mp4')}"
subprocess.call(cmd, shell=True)
cmd = f"ffmpeg -hide_banner -loglevel error -y -r 30 -i {video_ss_path}/%05d.jpg -vcodec libx264 -pix_fmt yuv420p {(video_ss_path[:(- 1)] + '.mp4')}"
subprocess.call(cmd, shell=True)
finals.append(final)
final = torch.cat(finals, 2)
legacy.visual(final, os.path.join(outdir, 'final.jpg')) |
class Initialized(ExecutionEvent):
schema: dict[(str, Any)]
operations_count: (int | None)
links_count: (int | None)
location: (str | None)
seed: (int | None)
base_url: str
specification_name: str
start_time: float = field(default_factory=time.monotonic)
started_at: str = field(default_factory=current_datetime)
thread_id: int = field(default_factory=threading.get_ident)
def from_schema(cls, *, schema: BaseSchema, count_operations: bool=True, count_links: bool=True, started_at: (str | None)=None, seed: (int | None)) -> Initialized:
return cls(schema=schema.raw_schema, operations_count=(schema.operations_count if count_operations else None), links_count=(schema.links_count if count_links else None), location=schema.location, base_url=schema.get_base_url(), started_at=(started_at or current_datetime()), specification_name=schema.verbose_name, seed=seed) |
def dataset_options(func):
decorators = [click.option('--datasets-dir', default='./data', show_default=True, help='Path to datasets.'), click.option('--dataset', '-d', type=click.Choice(DATASETS), default='imagenet', show_default=True, help='Specify dataset to use in experiment.'), click.option('--augmentation/--no-augmentation', default=True, show_default=True, help='Add data augmentation.'), click.option('--validation', '-v', default=0, show_default=True, help='Number of examples to use for valdiation'), click.option('--shuffle/--no-shuffle', default=True, show_default=True, help='Shuffle train and validation data before splitting.')]
decorators.reverse()
for decorator in decorators:
func = decorator(func)
return func |
.skipif((get_start_method() != 'fork'), reason='multiprocessing with spawn method is not compatible with pytest.')
def test_mapwrapper_parallel():
in_arg = np.arange(10.0)
out_arg = np.sin(in_arg)
with MapWrapper(2) as p:
out = p(np.sin, in_arg)
assert_equal(list(out), out_arg)
assert_((p._own_pool is True))
assert_(isinstance(p.pool, PWL))
assert_((p._mapfunc is not None))
with assert_raises(Exception) as excinfo:
p(np.sin, in_arg)
assert_((excinfo.type is ValueError))
try:
p = Pool(2)
q = MapWrapper(p.map)
assert_((q._own_pool is False))
q.close()
out = p.map(np.sin, in_arg)
assert_equal(list(out), out_arg)
finally:
p.close() |
def reduce_tau(tau):
assert (tau.imag() > 0)
(a, b) = (ZZ(1), ZZ(0))
(c, d) = (b, a)
k = tau.real().round()
tau -= k
a -= (k * c)
b -= (k * d)
while (tau.abs() < 0.999):
tau = ((- 1) / tau)
(a, b, c, d) = (c, d, (- a), (- b))
k = tau.real().round()
tau -= k
a -= (k * c)
b -= (k * d)
assert (((a * d) - (b * c)) == 1)
assert ((tau.abs() >= 0.999) and (tau.real().abs() <= 0.5))
return (tau, [a, b, c, d]) |
class SegformerImageProcessor(BaseImageProcessor):
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Dict[(str, int)]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[(int, float)]=(1 / 255), do_normalize: bool=True, image_mean: Optional[Union[(float, List[float])]]=None, image_std: Optional[Union[(float, List[float])]]=None, do_reduce_labels: bool=False, **kwargs) -> None:
if ('reduce_labels' in kwargs):
warnings.warn('The `reduce_labels` parameter is deprecated and will be removed in a future version. Please use `do_reduce_labels` instead.', FutureWarning)
do_reduce_labels = kwargs.pop('reduce_labels')
super().__init__(**kwargs)
size = (size if (size is not None) else {'height': 512, 'width': 512})
size = get_size_dict(size)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = (image_mean if (image_mean is not None) else IMAGENET_DEFAULT_MEAN)
self.image_std = (image_std if (image_std is not None) else IMAGENET_DEFAULT_STD)
self.do_reduce_labels = do_reduce_labels
def reduce_labels(self):
warnings.warn('The `reduce_labels` property is deprecated and will be removed in a v4.27. Please use `do_reduce_labels` instead.', FutureWarning)
return self.do_reduce_labels
def from_dict(cls, image_processor_dict: Dict[(str, Any)], **kwargs):
image_processor_dict = image_processor_dict.copy()
if ('reduce_labels' in kwargs):
image_processor_dict['reduce_labels'] = kwargs.pop('reduce_labels')
return super().from_dict(image_processor_dict, **kwargs)
def resize(self, image: np.ndarray, size: Dict[(str, int)], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
size = get_size_dict(size)
if (('height' not in size) or ('width' not in size)):
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}')
return resize(image, size=(size['height'], size['width']), resample=resample, data_format=data_format, **kwargs)
def rescale(self, image: np.ndarray, scale: Union[(int, float)], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs):
return rescale(image, scale=scale, data_format=data_format, **kwargs)
def normalize(self, image: np.ndarray, mean: Union[(float, List[float])], std: Union[(float, List[float])], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs)
def reduce_label(self, label: ImageInput) -> np.ndarray:
label = to_numpy_array(label)
label[(label == 0)] = 255
label = (label - 1)
label[(label == 254)] = 255
return label
def _preprocess(self, image: ImageInput, do_reduce_labels: bool, do_resize: bool, do_rescale: bool, do_normalize: bool, size: Optional[Dict[(str, int)]]=None, resample: PILImageResampling=None, rescale_factor: Optional[float]=None, image_mean: Optional[Union[(float, List[float])]]=None, image_std: Optional[Union[(float, List[float])]]=None):
if do_reduce_labels:
image = self.reduce_label(image)
if do_resize:
image = self.resize(image=image, size=size, resample=resample)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor)
if do_normalize:
image = self.normalize(image=image, mean=image_mean, std=image_std)
return image
def _preprocess_image(self, image: ImageInput, do_resize: bool=None, size: Dict[(str, int)]=None, resample: PILImageResampling=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[(float, List[float])]]=None, image_std: Optional[Union[(float, List[float])]]=None, data_format: Optional[Union[(str, ChannelDimension)]]=None) -> np.ndarray:
image = to_numpy_array(image)
image = self._preprocess(image=image, do_reduce_labels=False, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std)
if (data_format is not None):
image = to_channel_dimension_format(image, data_format)
return image
def _preprocess_mask(self, segmentation_map: ImageInput, do_reduce_labels: bool=None, do_resize: bool=None, size: Dict[(str, int)]=None) -> np.ndarray:
segmentation_map = to_numpy_array(segmentation_map)
added_channel_dim = False
if (segmentation_map.ndim == 2):
added_channel_dim = True
segmentation_map = segmentation_map[(None, ...)]
segmentation_map = self._preprocess(image=segmentation_map, do_reduce_labels=do_reduce_labels, do_resize=do_resize, resample=PILImageResampling.NEAREST, size=size, do_rescale=False, do_normalize=False)
if added_channel_dim:
segmentation_map = segmentation_map.squeeze(0)
segmentation_map = segmentation_map.astype(np.int64)
return segmentation_map
def __call__(self, images, segmentation_maps=None, **kwargs):
return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
def preprocess(self, images: ImageInput, segmentation_maps: Optional[ImageInput]=None, do_resize: Optional[bool]=None, size: Optional[Dict[(str, int)]]=None, resample: PILImageResampling=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[(float, List[float])]]=None, image_std: Optional[Union[(float, List[float])]]=None, do_reduce_labels: Optional[bool]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, **kwargs) -> PIL.Image.Image:
do_resize = (do_resize if (do_resize is not None) else self.do_resize)
do_rescale = (do_rescale if (do_rescale is not None) else self.do_rescale)
do_normalize = (do_normalize if (do_normalize is not None) else self.do_normalize)
do_reduce_labels = (do_reduce_labels if (do_reduce_labels is not None) else self.do_reduce_labels)
resample = (resample if (resample is not None) else self.resample)
size = (size if (size is not None) else self.size)
rescale_factor = (rescale_factor if (rescale_factor is not None) else self.rescale_factor)
image_mean = (image_mean if (image_mean is not None) else self.image_mean)
image_std = (image_std if (image_std is not None) else self.image_std)
images = make_list_of_images(images)
if (segmentation_maps is not None):
segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2)
if (not valid_images(images)):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')
if ((segmentation_maps is not None) and (not valid_images(segmentation_maps))):
raise ValueError('Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')
if ((do_resize and (size is None)) or (resample is None)):
raise ValueError('Size and resample must be specified if do_resize is True.')
if (do_rescale and (rescale_factor is None)):
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if (do_normalize and ((image_mean is None) or (image_std is None))):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
images = [self._preprocess_image(image=img, do_resize=do_resize, resample=resample, size=size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format) for img in images]
data = {'pixel_values': images}
if (segmentation_maps is not None):
segmentation_maps = [self._preprocess_mask(segmentation_map=segmentation_map, do_reduce_labels=do_reduce_labels, do_resize=do_resize, size=size) for segmentation_map in segmentation_maps]
data['labels'] = segmentation_maps
return BatchFeature(data=data, tensor_type=return_tensors)
def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple]=None):
logits = outputs.logits
if (target_sizes is not None):
if (len(logits) != len(target_sizes)):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation |
class HFModelHandler(CommonModelHandler):
def __init__(self, method: GetConfigFrom=GetConfigFrom.HardCoded, *args, **kw):
super().__init__(*args, **kw)
self.pipeline_transformer_config = None
self.method = method
self.tokenizer = None
self.config = None
def _get_normal_model_instance(self, *args, **kw):
if (self.normal_model_instance is None):
cfg = self.get_pipeline_transformer_config()
(model, tokenizer, config) = pretrained_model_config_and_tokenizer(**cfg)
self.tokenizer = tokenizer
self.config = config
self.normal_model_instance = model
assert hasattr(self, 'tokenizer')
assert hasattr(self, 'config')
return self.normal_model_instance
def get_pipeline_transformer_config(self):
if (self.pipeline_transformer_config is None):
if (self.method == GetConfigFrom.Generated):
raise NotImplementedError()
elif (self.method == GetConfigFrom.ParsedArgs):
raise NotImplementedError()
elif (self.method == GetConfigFrom.HardCoded):
assert (not os.path.exists(self.generated_file_name_or_path))
cfg = MODEL_TOKENIZER_AND_CONFIG_FUNCTIONS.get(self.generated_file_name_or_path)()
else:
raise NotImplementedError()
self.pipeline_transformer_config = cfg
return self.pipeline_transformer_config
def get_extra(self, *args, **kw):
return dict(config=self.config, tokenizer=self.tokenizer)
def get_loader(self, *args, **kw):
raise NotImplementedError() |
def MakeUnDir(tspec, *args):
if (type(tspec) == PUNGraph):
return MakeUnDir_PUNGraph(tspec, *args)
if (type(tspec) == PUndirNet):
return MakeUnDir_PUndirNet(tspec, *args)
if (type(tspec) == PDirNet):
return MakeUnDir_PDirNet(tspec, *args)
if (type(tspec) == PNGraph):
return MakeUnDir_PNGraph(tspec, *args)
if (type(tspec) == PNEANet):
return MakeUnDir_PNEANet(tspec, *args)
if (type(tspec) == PNGraphMP):
return MakeUnDir_PNGraphMP(tspec, *args)
if (type(tspec) == PNEANetMP):
return MakeUnDir_PNEANetMP(tspec, *args)
raise TypeError('First argument has invalid type') |
def _sym_solve(Dinv, A, r1, r2, solve):
r = (r2 + A.dot((Dinv * r1)))
v = solve(r)
u = (Dinv * (A.T.dot(v) - r1))
return (u, v) |
def _get_const(value, desc, arg_name):
if (_is_value(value) and (value.node().kind() not in ('onnx::Constant', 'prim::Constant'))):
raise RuntimeError('ONNX symbolic expected a constant value of the {} argument, got `{}`'.format(arg_name, value))
return _parse_arg(value, desc) |
class CutExecutor(ActionExecutor):
def execute(self, script: Script, state: EnvironmentState, info: ExecutionInfo):
current_line = script[0]
info.set_current_line(current_line)
node = state.get_state_node(current_line.object())
if (node is None):
info.object_found_error()
elif self.check_cuttable(state, node, info):
(yield state.change_state([]))
def check_cuttable(self, state: EnvironmentState, node: GraphNode, info: ExecutionInfo):
if (_find_free_hand(state) is None):
info.error('{} does not have a free hand', _get_character_node(state))
return False
if (not _is_character_close_to(state, node)):
info.error('{} is not close to {}', _get_character_node(state), node)
return False
if (Property.EATABLE not in node.properties):
info.error('{} is not eatable', node)
return False
if (Property.CUTTABLE not in node.properties):
info.error('{} is not cuttable', node)
return False
char_node = _get_character_node(state)
holding_nodes = _find_nodes_from(state, char_node, [Relation.HOLDS_LH, Relation.HOLDS_RH])
if (not any([('knife' in node.class_name) for node in holding_nodes])):
info.error('{} is not holding a knife', _get_character_node(state))
return False
return True |
def ComputeErrorRates(label_counts, word_counts, seq_errors, num_seqs):
label_errors = (label_counts.fn + label_counts.fp)
num_labels = (label_counts.truth_count + label_counts.test_count)
return ErrorRates(ComputeErrorRate(label_errors, num_labels), ComputeErrorRate(word_counts.fn, word_counts.truth_count), ComputeErrorRate(word_counts.fp, word_counts.test_count), ComputeErrorRate(seq_errors, num_seqs)) |
def create_base_classifier(return_value, return_prob=None):
classifier = MagicMock()
classifier.predict.return_value = return_value
classifier.predict_proba.return_value = return_prob
return classifier |
def _generate_fantasized_model(model: FantasizerModelOrStack, fantasized_data: Dataset) -> (_fantasized_model | PredictJointPredictYModelStack):
if isinstance(model, ModelStack):
observations = tf.split(fantasized_data.observations, model._event_sizes, axis=(- 1))
fmods = []
for (mod, obs, event_size) in zip(model._models, observations, model._event_sizes):
fmods.append((_fantasized_model(mod, Dataset(fantasized_data.query_points, obs)), event_size))
return PredictJointPredictYModelStack(*fmods)
else:
return _fantasized_model(model, fantasized_data) |
class DigitalMonstersDataset(data.Dataset):
def __init__(self, root_path, input_height=None, input_width=None, output_height=128, output_width=None, is_gray=False, pokemon=True, digimon=True, nexomon=True):
super(DigitalMonstersDataset, self).__init__()
image_list = []
if pokemon:
print('collecting pokemon...')
image_list.extend(list_images_in_dir(os.path.join(root_path, 'pokemon')))
if digimon:
print('collecting digimon...')
image_list.extend(list_images_in_dir(os.path.join(root_path, 'digimon', '200')))
if nexomon:
print('collecting nexomon...')
image_list.extend(list_images_in_dir(os.path.join(root_path, 'nexomon')))
print(f'total images: {len(image_list)}')
self.image_filenames = image_list
self.input_height = input_height
self.input_width = input_width
self.output_height = output_height
self.output_width = output_width
self.root_path = root_path
self.is_gray = is_gray
self.input_transform = transforms.Compose([transforms.RandomAffine(0, translate=((5 / output_height), (5 / output_height)), fillcolor=(255, 255, 255)), transforms.ColorJitter(hue=0.5), transforms.RandomHorizontalFlip(p=0.5), transforms.ToTensor()])
def __getitem__(self, index):
img = load_image(self.image_filenames[index], input_height=self.input_height, input_width=self.input_width, output_height=self.output_height, output_width=self.output_width, crop_height=None, crop_width=None, is_random_crop=False, is_mirror=False, is_gray=False)
img = self.input_transform(img)
return img
def __len__(self):
return len(self.image_filenames) |
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3Packet__gt___Ns3UanAddress_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::Packet const >, ns3::UanAddress, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
cls.add_method('DoGetTypeid', 'std::string', [], is_static=True)
cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True)
cls.add_method('operator()', 'void', [param('ns3::Ptr< ns3::Packet const >', 'arg0'), param('ns3::UanAddress', 'arg1')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return |
class Network():
def __init__(self):
self.graph = tf.Graph()
gpu_options = tf.GPUOptions(allow_growth=True)
tf_config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True, log_device_placement=False)
self.sess = tf.Session(graph=self.graph, config=tf_config)
def initialize(self, config, num_classes=None):
with self.graph.as_default():
with self.sess.as_default():
(h, w) = config.image_size
channels = config.channels
self.images = tf.placeholder(tf.float32, shape=[None, h, w, channels], name='images')
self.labels = tf.placeholder(tf.int32, shape=[None], name='labels')
self.learning_rate = tf.placeholder(tf.float32, name='learning_rate')
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.phase_train = tf.placeholder(tf.bool, name='phase_train')
self.global_step = tf.Variable(0, trainable=False, dtype=tf.int32, name='global_step')
network = imp.load_source('embedding_network', config.embedding_network)
(mu, conv_final) = network.inference(self.images, config.embedding_size)
uncertainty_module = imp.load_source('uncertainty_module', config.uncertainty_module)
log_sigma_sq = uncertainty_module.inference(conv_final, config.embedding_size, phase_train=self.phase_train, weight_decay=config.weight_decay, scope='UncertaintyModule')
self.mu = tf.identity(mu, name='mu')
self.sigma_sq = tf.identity(tf.exp(log_sigma_sq), name='sigma_sq')
loss_list = []
self.watch_list = {}
MLS_loss = mutual_likelihood_score_loss(self.labels, mu, log_sigma_sq)
loss_list.append(MLS_loss)
self.watch_list['loss'] = MLS_loss
reg_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), name='reg_loss')
loss_list.append(reg_loss)
self.watch_list['reg_loss'] = reg_loss
total_loss = tf.add_n(loss_list, name='total_loss')
grads = tf.gradients(total_loss, self.trainable_variables)
train_ops = []
opt = tf.train.MomentumOptimizer(self.learning_rate, momentum=0.9)
apply_gradient_op = opt.apply_gradients(list(zip(grads, self.trainable_variables)))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_ops.extend(([apply_gradient_op] + update_ops))
train_ops.append(tf.assign_add(self.global_step, 1))
self.train_op = tf.group(*train_ops)
for (k, v) in self.watch_list.items():
tf.summary.scalar(('losses/' + k), v)
tf.summary.scalar('learning_rate', self.learning_rate)
self.summary_op = tf.summary.merge_all()
self.sess.run(tf.local_variables_initializer())
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=99)
return
def trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='UncertaintyModule')
def save_model(self, model_dir, global_step):
with self.sess.graph.as_default():
checkpoint_path = os.path.join(model_dir, 'ckpt')
metagraph_path = os.path.join(model_dir, 'graph.meta')
print('Saving variables...')
self.saver.save(self.sess, checkpoint_path, global_step=global_step, write_meta_graph=False)
if (not os.path.exists(metagraph_path)):
print('Saving metagraph...')
self.saver.export_meta_graph(metagraph_path)
def restore_model(self, model_dir, restore_scopes=None):
var_list = self.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
with self.sess.graph.as_default():
if (restore_scopes is not None):
var_list = [var for var in var_list if any([(scope in var.name) for scope in restore_scopes])]
model_dir = os.path.expanduser(model_dir)
ckpt_file = tf.train.latest_checkpoint(model_dir)
print('Restoring {} variables from {} ...'.format(len(var_list), ckpt_file))
saver = tf.train.Saver(var_list)
saver.restore(self.sess, ckpt_file)
def load_model(self, model_path, scope=None):
with self.sess.graph.as_default():
model_path = os.path.expanduser(model_path)
meta_files = [file for file in os.listdir(model_path) if file.endswith('.meta')]
assert (len(meta_files) == 1)
meta_file = os.path.join(model_path, meta_files[0])
ckpt_file = tf.train.latest_checkpoint(model_path)
print(('Metagraph file: %s' % meta_file))
print(('Checkpoint file: %s' % ckpt_file))
saver = tf.train.import_meta_graph(meta_file, clear_devices=True, import_scope=scope)
saver.restore(self.sess, ckpt_file)
self.images = self.graph.get_tensor_by_name('images:0')
self.phase_train = self.graph.get_tensor_by_name('phase_train:0')
self.keep_prob = self.graph.get_tensor_by_name('keep_prob:0')
self.mu = self.graph.get_tensor_by_name('mu:0')
self.sigma_sq = self.graph.get_tensor_by_name('sigma_sq:0')
self.config = imp.load_source('network_config', os.path.join(model_path, 'config.py'))
def train(self, images_batch, labels_batch, learning_rate, keep_prob):
feed_dict = {self.images: images_batch, self.labels: labels_batch, self.learning_rate: learning_rate, self.keep_prob: keep_prob, self.phase_train: True}
(_, wl, sm) = self.sess.run([self.train_op, self.watch_list, self.summary_op], feed_dict=feed_dict)
step = self.sess.run(self.global_step)
return (wl, sm, step)
def extract_feature(self, images, batch_size, proc_func=None, verbose=False):
num_images = len(images)
num_features = self.mu.shape[1]
mu = np.ndarray((num_images, num_features), dtype=np.float32)
sigma_sq = np.ndarray((num_images, num_features), dtype=np.float32)
start_time = time.time()
for start_idx in range(0, num_images, batch_size):
if verbose:
elapsed_time = time.strftime('%H:%M:%S', time.gmtime((time.time() - start_time)))
sys.stdout.write(('# of images: %d Current image: %d Elapsed time: %s \t\r' % (num_images, start_idx, elapsed_time)))
end_idx = min(num_images, (start_idx + batch_size))
images_batch = images[start_idx:end_idx]
if proc_func:
images_batch = proc_func(images_batch)
feed_dict = {self.images: images_batch, self.phase_train: False, self.keep_prob: 1.0}
(mu[start_idx:end_idx], sigma_sq[start_idx:end_idx]) = self.sess.run([self.mu, self.sigma_sq], feed_dict=feed_dict)
if verbose:
print('')
return (mu, sigma_sq) |
def keras_train_distributed(classifier, model_params, save, model_meta, FLAGS, train_dataset_fn, val_dataset_fn, is_pai=True):
(cluster, task_type, task_index) = make_distributed_info_without_evaluator(FLAGS)
dump_into_tf_config(cluster, task_type, task_index)
dist_strategy = tf.contrib.distribute.ParameterServerStrategy()
run_config = tf.estimator.RunConfig(tf_random_seed=get_tf_random_seed(), save_checkpoints_steps=100, train_distribute=dist_strategy, session_config=tf.ConfigProto(log_device_placement=True, device_filters=None))
model_dir = FLAGS.checkpointDir
keras_estimator = tf.keras.estimator.model_to_estimator(classifier, model_dir=model_dir, config=run_config)
estimator_train_compiled(keras_estimator, train_dataset_fn, val_dataset_fn, None, 60, 120)
if ('feature_columns' in model_params):
all_feature_columns = model_params['feature_columns']
elif (('linear_feature_columns' in model_params) and ('dnn_feature_columns' in model_params)):
import copy
all_feature_columns = copy.copy(model_params['linear_feature_columns'])
all_feature_columns.extend(model_params['dnn_feature_columns'])
else:
raise Exception('No expected feature columns in model params')
serving_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(tf.feature_column.make_parse_example_spec(all_feature_columns))
export_path = keras_estimator.export_saved_model(save, serving_input_fn)
export_path_str = str(export_path.decode('utf-8'))
with open('exported_path', 'w') as fn:
fn.write(export_path_str)
save_metadata('model_meta.json', model_meta)
print(('Done training, model exported to: %s' % export_path_str)) |
def do_join(eval_ctx, value, d=u'', attribute=None):
if (attribute is not None):
value = imap(make_attrgetter(eval_ctx.environment, attribute), value)
if (not eval_ctx.autoescape):
return text_type(d).join(imap(text_type, value))
if (not hasattr(d, '__html__')):
value = list(value)
do_escape = False
for (idx, item) in enumerate(value):
if hasattr(item, '__html__'):
do_escape = True
else:
value[idx] = text_type(item)
if do_escape:
d = escape(d)
else:
d = text_type(d)
return d.join(value)
return soft_unicode(d).join(imap(soft_unicode, value)) |
def compute_normal(z, c):
J0 = vec4(1, 0, 0, 0)
J1 = vec4(0, 1, 0, 0)
J2 = vec4(0, 0, 1, 0)
z_curr = z
iterations = 0
while ((z_curr.norm() < max_norm) and (iterations < iters)):
cz = quat_conj(z_curr)
J0 = vec4(tm.dot(J0, cz), tm.dot(J0.xy, z_curr.yx), tm.dot(J0.xz, z_curr.zx), tm.dot(J0.xw, z_curr.wx))
J1 = vec4(tm.dot(J1, cz), tm.dot(J1.xy, z_curr.yx), tm.dot(J1.xz, z_curr.zx), tm.dot(J1.xw, z_curr.wx))
J2 = vec4(tm.dot(J2, cz), tm.dot(J2.xy, z_curr.yx), tm.dot(J2.xz, z_curr.zx), tm.dot(J2.xw, z_curr.wx))
z_curr = (quat_mul(z_curr, z_curr) + c)
iterations += 1
return tm.normalize(tm.vec3(tm.dot(z_curr, J0), tm.dot(z_curr, J1), tm.dot(z_curr, J2))) |
def get_adafactor_weight_predictor(pred_mem: str, pred_type: str, optimizer, scheduler=None, nag_with_predictor=False, true_weights_storage=None) -> WeightPredictor:
has_weight_decay = any([(pg['weight_decay'] != 0) for pg in optimizer.param_groups])
if has_weight_decay:
pass
if (pred_type == 'msnag'):
raise NotImplementedError()
elif (pred_type == 'aggmsnag'):
pred_cls = AdaFactorWClonedWeightPredictionForAggregation
else:
raise NotImplementedError()
return pred_cls(optimizer, fix_fn=None, scheduler=scheduler, nag_with_predictor=nag_with_predictor, true_weights_storage=true_weights_storage) |
def hfft(x, n=None, axis=(- 1), norm=None, overwrite_x=False, workers=None, *, plan=None):
return _execute_1D('hfft', _pocketfft.hfft, x, n=n, axis=axis, norm=norm, overwrite_x=overwrite_x, workers=workers, plan=plan) |
class Pix2pixDataset(BaseDataset):
def modify_commandline_options(parser, is_train):
parser.add_argument('--no_pairing_check', action='store_true', help='If specified, skip sanity check of correct label-image file pairing')
return parser
def initialize(self, opt):
self.opt = opt
(label_paths, image_paths, instance_paths) = self.get_paths(opt)
util.natural_sort(label_paths)
util.natural_sort(image_paths)
if (not opt.no_instance):
util.natural_sort(instance_paths)
label_paths = label_paths[:opt.max_dataset_size]
image_paths = image_paths[:opt.max_dataset_size]
instance_paths = instance_paths[:opt.max_dataset_size]
if (not opt.no_pairing_check):
for (path1, path2) in zip(label_paths, image_paths):
assert self.paths_match(path1, path2), ('The label-image pair (%s, %s) do not look like the right pair because the filenames are quite different. Are you sure about the pairing? Please see data/pix2pix_dataset.py to see what is going on, and use --no_pairing_check to bypass this.' % (path1, path2))
self.label_paths = label_paths
self.image_paths = image_paths
self.instance_paths = instance_paths
size = len(self.label_paths)
self.dataset_size = size
def get_paths(self, opt):
label_paths = []
image_paths = []
instance_paths = []
assert False, 'A subclass of Pix2pixDataset must override self.get_paths(self, opt)'
return (label_paths, image_paths, instance_paths)
def paths_match(self, path1, path2):
filename1_without_ext = os.path.splitext(os.path.basename(path1))[0]
filename2_without_ext = os.path.splitext(os.path.basename(path2))[0]
return (filename1_without_ext == filename2_without_ext)
def __getitem__(self, index):
label_path = self.label_paths[index]
label = Image.open(label_path)
params = get_params(self.opt, label.size)
transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
if self.opt.no_one_hot:
label = label.convert('RGB')
transform_image = get_transform(self.opt, params)
label_tensor = transform_image(label)
else:
label_tensor = (transform_label(label) * 255.0)
label_tensor[(label_tensor == 255)] = self.opt.label_nc
assert (((torch.max(label_tensor) + 1) == self.opt.label_nc), ('uncorrect number of labels (--label_nc=%f does not match the given labels=%f)' % (self.opt.label_nc, (torch.max(label_tensor) + 1))))
image_path = self.image_paths[index]
if (not self.opt.no_pairing_check):
assert self.paths_match(label_path, image_path), ("The label_path %s and image_path %s don't match." % (label_path, image_path))
image = Image.open(image_path)
image = image.convert('RGB')
transform_image = get_transform(self.opt, params)
image_tensor = transform_image(image)
if (self.opt.no_instance_edge & self.opt.no_instance_dist):
instance_tensor = 0
else:
instance_path = self.instance_paths[index]
instance = Image.open(instance_path)
if (instance.mode == 'L'):
instance_tensor = (transform_label(instance) * 255)
instance_tensor = instance_tensor.long()
else:
instance_tensor = transform_label(instance)
input_dict = {'label': label_tensor, 'instance': instance_tensor, 'image': image_tensor, 'path': image_path}
self.postprocess(input_dict)
return input_dict
def postprocess(self, input_dict):
return input_dict
def __len__(self):
return self.dataset_size |
def get_type_information_cname(code, dtype, maxdepth=None):
namesuffix = mangle_dtype_name(dtype)
name = ('__Pyx_TypeInfo_%s' % namesuffix)
structinfo_name = ('__Pyx_StructFields_%s' % namesuffix)
if dtype.is_error:
return '<error>'
if (maxdepth is None):
maxdepth = dtype.struct_nesting_depth()
if (maxdepth <= 0):
assert False
if (name not in code.globalstate.utility_codes):
code.globalstate.utility_codes.add(name)
typecode = code.globalstate['typeinfo']
arraysizes = []
if dtype.is_array:
while dtype.is_array:
arraysizes.append(dtype.size)
dtype = dtype.base_type
complex_possible = (dtype.is_struct_or_union and dtype.can_be_complex())
declcode = dtype.empty_declaration_code()
if dtype.is_simple_buffer_dtype():
structinfo_name = 'NULL'
elif dtype.is_struct:
struct_scope = dtype.scope
if dtype.is_const:
struct_scope = struct_scope.const_base_type_scope
fields = struct_scope.var_entries
assert (len(fields) > 0)
types = [get_type_information_cname(code, f.type, (maxdepth - 1)) for f in fields]
typecode.putln(('static __Pyx_StructField %s[] = {' % structinfo_name), safe=True)
for (f, typeinfo) in zip(fields, types):
typecode.putln((' {&%s, "%s", offsetof(%s, %s)},' % (typeinfo, f.name, dtype.empty_declaration_code(), f.cname)), safe=True)
typecode.putln(' {NULL, NULL, 0}', safe=True)
typecode.putln('};', safe=True)
else:
assert False
rep = str(dtype)
flags = '0'
is_unsigned = '0'
if (dtype is PyrexTypes.c_char_type):
is_unsigned = ('IS_UNSIGNED(%s)' % declcode)
typegroup = "'H'"
elif dtype.is_int:
is_unsigned = ('IS_UNSIGNED(%s)' % declcode)
typegroup = ("%s ? 'U' : 'I'" % is_unsigned)
elif (complex_possible or dtype.is_complex):
typegroup = "'C'"
elif dtype.is_float:
typegroup = "'R'"
elif dtype.is_struct:
typegroup = "'S'"
if dtype.packed:
flags = '__PYX_BUF_FLAGS_PACKED_STRUCT'
elif dtype.is_pyobject:
typegroup = "'O'"
else:
assert False, dtype
typeinfo = 'static __Pyx_TypeInfo %s = { "%s", %s, sizeof(%s), { %s }, %s, %s, %s, %s };'
tup = (name, rep, structinfo_name, declcode, (', '.join([str(x) for x in arraysizes]) or '0'), len(arraysizes), typegroup, is_unsigned, flags)
typecode.putln((typeinfo % tup), safe=True)
return name |
class RenameVar(NodeTransformer):
def __init__(self, oldname: str, newname: str):
self.oldname = oldname
self.newname = newname
def visit_Name_Node(self, node: ast_internal_classes.Name_Node):
return (ast_internal_classes.Name_Node(name=self.newname) if (node.name == self.oldname) else node) |
def clean_ie_pps(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame:
if (output_format not in {'compact', 'standard'}):
raise ValueError(f'output_format {output_format} is invalid. It needs to be "compact" or "standard".')
df = to_dask(df)
df['clean_code_tup'] = df[column].map_partitions((lambda srs: [_format(x, output_format, errors) for x in srs]), meta=object)
df = df.assign(_temp_=df['clean_code_tup'].map(itemgetter(0)))
df = df.rename(columns={'_temp_': f'{column}_clean'})
df = df.drop(columns=['clean_code_tup'])
if inplace:
df[column] = df[f'{column}_clean']
df = df.drop(columns=f'{column}_clean')
df = df.rename(columns={column: f'{column}_clean'})
with ProgressBar(minimum=1, disable=(not progress)):
df = df.compute()
return df |
class SkipUpBlock2D(nn.Module):
def __init__(self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_pre_norm: bool=True, output_scale_factor=np.sqrt(2.0), add_upsample=True, upsample_padding=1):
super().__init__()
self.resnets = nn.ModuleList([])
for i in range(num_layers):
res_skip_channels = (in_channels if (i == (num_layers - 1)) else out_channels)
resnet_in_channels = (prev_output_channel if (i == 0) else out_channels)
self.resnets.append(ResnetBlock2D(in_channels=(resnet_in_channels + res_skip_channels), out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(((resnet_in_channels + res_skip_channels) // 4), 32), groups_out=min((out_channels // 4), 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm))
self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels)
if add_upsample:
self.resnet_up = ResnetBlock2D(in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min((out_channels // 4), 32), groups_out=min((out_channels // 4), 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, use_nin_shortcut=True, up=True, kernel='fir')
self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.skip_norm = torch.nn.GroupNorm(num_groups=min((out_channels // 4), 32), num_channels=out_channels, eps=resnet_eps, affine=True)
self.act = nn.SiLU()
else:
self.resnet_up = None
self.skip_conv = None
self.skip_norm = None
self.act = None
def forward(self, hidden_states, res_hidden_states_tuple, temb=None, skip_sample=None):
for resnet in self.resnets:
res_hidden_states = res_hidden_states_tuple[(- 1)]
res_hidden_states_tuple = res_hidden_states_tuple[:(- 1)]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
hidden_states = resnet(hidden_states, temb)
if (skip_sample is not None):
skip_sample = self.upsampler(skip_sample)
else:
skip_sample = 0
if (self.resnet_up is not None):
skip_sample_states = self.skip_norm(hidden_states)
skip_sample_states = self.act(skip_sample_states)
skip_sample_states = self.skip_conv(skip_sample_states)
skip_sample = (skip_sample + skip_sample_states)
hidden_states = self.resnet_up(hidden_states, temb)
return (hidden_states, skip_sample) |
def mod_endpoint(edge, z, end):
if (edge.get_node1() == z):
edge.set_endpoint1(end)
elif (edge.get_node2() == z):
edge.set_endpoint2(end)
else:
raise ValueError('z not in edge') |
def __add_publish_ex3_subprocess(available_detectors: List[str], available_datasets: List[str], subparsers) -> None:
experiment_parser = subparsers.add_parser('ex3', formatter_class=SortingHelpFormatter, help="Experiment 3: Publish potential hits for known misuses to assess a detector's recall when it uses its own specifications/patterns.", description="Experiment 3: Publish potential hits for known misuses, i.e., detector findings in the same file and method as a known misuse, to assess the detector's recall. Considers the detector's finding when run on individual project versions. This measures the recall of the entire detector (both mining and detection).")
__setup_filter_arguments(experiment_parser, available_datasets)
__setup_checkout_arguments(experiment_parser)
__setup_compile_arguments(experiment_parser)
__setup_run_arguments(experiment_parser, available_detectors)
__setup_publish_arguments(experiment_parser) |
class DirectlyParameterizedNormalDiag(TrackableLayer):
means: Parameter
stds: Parameter
def __init__(self, num_data: int, latent_dim: int, means: Optional[np.ndarray]=None):
super().__init__()
if (means is None):
means = (0.01 * np.random.randn(num_data, latent_dim))
elif np.any((means.shape != (num_data, latent_dim))):
raise EncoderInitializationError(f'means must have shape [num_data, latent_dim] = [{num_data}, {latent_dim}]; got {means.shape} instead.')
stds = (1e-05 * np.ones_like(means))
self.means = Parameter(means, dtype=default_float(), name='w_means')
self.stds = Parameter(stds, transform=positive(), dtype=default_float(), name='w_stds')
def call(self, inputs: Optional[TensorType]=None, *args: Any, **kwargs: Any) -> Tuple[(tf.Tensor, tf.Tensor)]:
if (inputs is not None):
tf.debugging.assert_shapes([(self.means, ['N', 'W']), (self.stds, ['N', 'W']), (inputs, ['N', 'D'])])
return (self.means, self.stds) |
def load_from_json(file):
with open(file, 'r') as json_file:
contents = json.load(json_file)
return contents |
def isstringfunction(rout):
if (not isfunction(rout)):
return 0
if ('result' in rout):
a = rout['result']
else:
a = rout['name']
if (a in rout['vars']):
return isstring(rout['vars'][a])
return 0 |
def write_examples(job_id, args):
job_tmp_dir = os.path.join(args.data_dir, 'tmp', ('job_' + str(job_id)))
owt_dir = os.path.join(args.data_dir, 'openwebtext')
def log(*args):
msg = ' '.join(map(str, args))
print('Job {}:'.format(job_id), msg)
log('Creating example writer')
example_writer = build_pretraining_dataset.ExampleWriter(job_id=job_id, vocab_file=os.path.join(args.data_dir, 'vocab.txt'), output_dir=os.path.join(args.data_dir, 'pretrain_tfrecords'), max_seq_length=args.max_seq_length, num_jobs=args.num_processes, blanks_separate_docs=False, do_lower_case=args.do_lower_case)
log('Writing tf examples')
fnames = sorted(tf.io.gfile.listdir(owt_dir))
fnames = [f for (i, f) in enumerate(fnames) if ((i % args.num_processes) == job_id)]
random.shuffle(fnames)
start_time = time.time()
for (file_no, fname) in enumerate(fnames):
if ((file_no > 0) and ((file_no % 10) == 0)):
elapsed = (time.time() - start_time)
log('processed {:}/{:} files ({:.1f}%), ELAPSED: {:}s, ETA: {:}s, {:} examples written'.format(file_no, len(fnames), ((100.0 * file_no) / len(fnames)), int(elapsed), int(((len(fnames) - file_no) / (file_no / elapsed))), example_writer.n_written))
utils.rmkdir(job_tmp_dir)
with tarfile.open(os.path.join(owt_dir, fname)) as f:
f.extractall(job_tmp_dir)
extracted_files = tf.io.gfile.listdir(job_tmp_dir)
random.shuffle(extracted_files)
for txt_fname in extracted_files:
example_writer.write_examples(os.path.join(job_tmp_dir, txt_fname))
example_writer.finish()
log('Done!') |
def is_PrimeFiniteField(x):
from sage.misc.superseded import deprecation
deprecation(32664, 'the function is_PrimeFiniteField is deprecated; use isinstance(x, sage.rings.finite_rings.finite_field_base.FiniteField) and x.is_prime_field() instead')
from .finite_field_prime_modn import FiniteField_prime_modn
from sage.rings.finite_rings.finite_field_base import FiniteField as FiniteField_generic
return (isinstance(x, FiniteField_prime_modn) or (isinstance(x, FiniteField_generic) and (x.degree() == 1))) |
def v_5_1_BIBD(v, check=True):
v = int(v)
assert (v > 1)
assert (((v % 20) == 5) or ((v % 20) == 1))
if (((v % 5) == 0) and (((v // 5) % 4) == 1) and is_prime_power((v // 5))):
bibd = BIBD_5q_5_for_q_prime_power((v // 5))
elif (v in [21, 41, 61, 81, 141, 161, 281]):
from .difference_family import difference_family
(G, D) = difference_family(v, 5)
bibd = BIBD_from_difference_family(G, D, check=False)
elif (v == 165):
bibd = BIBD_from_PBD(v_5_1_BIBD(41, check=False), 165, 5, check=False)
elif (v == 181):
bibd = BIBD_from_PBD(v_5_1_BIBD(45, check=False), 181, 5, check=False)
elif (v in (201, 285, 301, 401, 421, 425)):
bibd = BIBD_from_TD(v, 5)
elif (((v - 1) // 4) in [80, 81, 85, 86, 90, 91, 95, 96, 110, 111, 115, 116, 120, 121, 250, 251, 255, 256, 260, 261, 265, 266, 270, 271]):
r = ((v - 1) // 4)
if (r <= 96):
(k, t, u) = (5, 16, (r - 80))
elif (r <= 121):
(k, t, u) = (10, 11, (r - 110))
else:
(k, t, u) = (10, 25, (r - 250))
bibd = BIBD_from_PBD(PBD_from_TD(k, t, u), v, 5, check=False)
else:
(r, s, t, u) = _get_r_s_t_u(v)
bibd = BIBD_from_PBD(PBD_from_TD(5, t, u), v, 5, check=False)
if check:
assert is_pairwise_balanced_design(bibd, v, [5])
return bibd |
class TestFeatureOptimizer(unittest.TestCase):
def setUp(self) -> None:
self.model = tf.keras.Sequential([tf.keras.layers.Input((28, 28, 3)), tf.keras.layers.Conv2D(16, (3, 3)), tf.keras.layers.Conv2D(16, (3, 3)), tf.keras.layers.MaxPool2D((2, 2)), tf.keras.layers.Conv2D(16, (3, 3)), tf.keras.layers.Conv2D(16, (3, 3)), tf.keras.layers.MaxPool2D((2, 2)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(10)])
self.model.compile()
def test(self):
extractor = FeatureMapExtractor(model=self.model, layer=self.model.layers[(- 4)])
x = tf.convert_to_tensor(np.random.rand(1, 28, 28, 3), dtype=tf.float32)
outputs = extractor.extract(x)
print(outputs.shape) |
class FlowNetS(nn.Module):
def __init__(self, args, input_channels=12, batchNorm=True):
super(FlowNetS, self).__init__()
self.batchNorm = batchNorm
self.conv1 = conv(self.batchNorm, input_channels, 64, kernel_size=7, stride=2)
self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2)
self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2)
self.conv3_1 = conv(self.batchNorm, 256, 256)
self.conv4 = conv(self.batchNorm, 256, 512, stride=2)
self.conv4_1 = conv(self.batchNorm, 512, 512)
self.conv5 = conv(self.batchNorm, 512, 512, stride=2)
self.conv5_1 = conv(self.batchNorm, 512, 512)
self.conv6 = conv(self.batchNorm, 512, 1024, stride=2)
self.conv6_1 = conv(self.batchNorm, 1024, 1024)
self.deconv5 = deconv(1024, 512)
self.deconv4 = deconv(1026, 256)
self.deconv3 = deconv(770, 128)
self.deconv2 = deconv(386, 64)
self.predict_flow6 = predict_flow(1024)
self.predict_flow5 = predict_flow(1026)
self.predict_flow4 = predict_flow(770)
self.predict_flow3 = predict_flow(386)
self.predict_flow2 = predict_flow(194)
self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)
self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)
self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)
self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if (m.bias is not None):
init.uniform(m.bias)
init.xavier_uniform(m.weight)
if isinstance(m, nn.ConvTranspose2d):
if (m.bias is not None):
init.uniform(m.bias)
init.xavier_uniform(m.weight)
self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
def forward(self, x):
out_conv1 = self.conv1(x)
out_conv2 = self.conv2(out_conv1)
out_conv3 = self.conv3_1(self.conv3(out_conv2))
out_conv4 = self.conv4_1(self.conv4(out_conv3))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
flow6 = self.predict_flow6(out_conv6)
flow6_up = self.upsampled_flow6_to_5(flow6)
out_deconv5 = self.deconv5(out_conv6)
concat5 = torch.cat((out_conv5, out_deconv5, flow6_up), 1)
flow5 = self.predict_flow5(concat5)
flow5_up = self.upsampled_flow5_to_4(flow5)
out_deconv4 = self.deconv4(concat5)
concat4 = torch.cat((out_conv4, out_deconv4, flow5_up), 1)
flow4 = self.predict_flow4(concat4)
flow4_up = self.upsampled_flow4_to_3(flow4)
out_deconv3 = self.deconv3(concat4)
concat3 = torch.cat((out_conv3, out_deconv3, flow4_up), 1)
flow3 = self.predict_flow3(concat3)
flow3_up = self.upsampled_flow3_to_2(flow3)
out_deconv2 = self.deconv2(concat3)
concat2 = torch.cat((out_conv2, out_deconv2, flow3_up), 1)
flow2 = self.predict_flow2(concat2)
if self.training:
return (flow2, flow3, flow4, flow5, flow6)
else:
return (flow2,) |
def redirect_entity(ent, redirects_en):
if (ent is not None):
ent_underscore = ent.replace(' ', '_')
if (ent_underscore in redirects_en):
ent = redirects_en[ent_underscore].replace('_', ' ')
return ent |
def read_keyframes(video_fpath: str, keyframes: FrameTsList, video_stream_idx: int=0) -> FrameList:
try:
with PathManager.open(video_fpath, 'rb') as io:
container = av.open(io)
stream = container.streams.video[video_stream_idx]
frames = []
for pts in keyframes:
try:
container.seek(pts, any_frame=False, stream=stream)
frame = next(container.decode(video=0))
frames.append(frame)
except av.AVError as e:
logger = logging.getLogger(__name__)
logger.warning(f'Read keyframes: Error seeking video file {video_fpath}, video stream {video_stream_idx}, pts {pts}, AV error: {e}')
container.close()
return frames
except OSError as e:
logger = logging.getLogger(__name__)
logger.warning(f'Read keyframes: Error seeking video file {video_fpath}, video stream {video_stream_idx}, pts {pts}, OS error: {e}')
container.close()
return frames
except StopIteration:
logger = logging.getLogger(__name__)
logger.warning(f'Read keyframes: Error decoding frame from {video_fpath}, video stream {video_stream_idx}, pts {pts}')
container.close()
return frames
container.close()
return frames
except OSError as e:
logger = logging.getLogger(__name__)
logger.warning(f'Read keyframes: Error opening video file container {video_fpath}, OS error: {e}')
return [] |
def evaluate(args, model, tokenizer, processor, prefix=''):
(dataset, features) = load_and_cache_examples(args, model, tokenizer, processor, evaluate=True)
if ((not os.path.exists(args.output_dir)) and (args.local_rank in [(- 1), 0])):
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
logger.info('***** Running evaluation {} *****'.format(prefix))
logger.info(' Num examples = %d', len(dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
all_results = []
all_preds = []
ds = {slot: 'none' for slot in model.slot_list}
with torch.no_grad():
diag_state = {slot: torch.tensor([0 for _ in range(args.eval_batch_size)]).to(args.device) for slot in model.slot_list}
for batch in tqdm(eval_dataloader, desc='Evaluating'):
model.eval()
batch = batch_to_device(batch, args.device)
turn_itrs = [features[i.item()].guid.split('-')[2] for i in batch[9]]
reset_diag_state = np.where((np.array(turn_itrs) == '0'))[0]
for slot in model.slot_list:
for i in reset_diag_state:
diag_state[slot][i] = 0
with torch.no_grad():
inputs = {'input_ids': batch[0], 'input_mask': batch[1], 'segment_ids': batch[2], 'start_pos': batch[3], 'end_pos': batch[4], 'inform_slot_id': batch[5], 'refer_id': batch[6], 'diag_state': diag_state, 'class_label_id': batch[8]}
unique_ids = [features[i.item()].guid for i in batch[9]]
values = [features[i.item()].values for i in batch[9]]
input_ids_unmasked = [features[i.item()].input_ids_unmasked for i in batch[9]]
inform = [features[i.item()].inform for i in batch[9]]
outputs = model(**inputs)
for slot in model.slot_list:
updates = outputs[2][slot].max(1)[1]
for (i, u) in enumerate(updates):
if (u != 0):
diag_state[slot][i] = u
results = eval_metric(model, inputs, outputs[0], outputs[1], outputs[2], outputs[3], outputs[4], outputs[5])
(preds, ds) = predict_and_format(model, tokenizer, inputs, outputs[2], outputs[3], outputs[4], outputs[5], unique_ids, input_ids_unmasked, values, inform, prefix, ds)
all_results.append(results)
all_preds.append(preds)
all_preds = [item for sublist in all_preds for item in sublist]
final_results = {}
for k in all_results[0].keys():
final_results[k] = torch.stack([r[k] for r in all_results]).mean()
output_prediction_file = os.path.join(args.output_dir, ('pred_res.%s.%s.json' % (args.predict_type, prefix)))
with open(output_prediction_file, 'w') as f:
json.dump(all_preds, f, indent=2)
return final_results |
class CFQ(TextDataset):
URL = '
def tokenize_punctuation(self, text):
text = map((lambda c: ((' %s ' % c) if (c in string.punctuation) else c)), text)
return ' '.join(''.join(text).split())
def preprocess_sparql(self, query):
query = query.replace('count(*)', 'count ( * )')
tokens = []
for token in query.split():
if token.startswith('ns:'):
token = token[3:]
if token.startswith('m.'):
token = ('m_' + token[2:])
tokens.append(token)
return ' '.join(tokens).replace('\\n', ' ')
def load_data(self, fname: str) -> Tuple[(List[str], List[str])]:
pin = 'complexityMeasures'.encode()
offset = 1
cnt = 0
inputs = []
outputs = []
with open(fname, 'r') as f:
data = mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)
pbar = tqdm(total=len(data))
pbar.update(offset)
while True:
pos = data.find(pin, (offset + 6))
if (pos < 0):
this = data[offset:(len(data) - 2)]
else:
this = data[offset:(pos - 5)]
new_offset = (pos - 4)
pbar.update((new_offset - offset))
offset = new_offset
d = json.loads(this.decode())
inputs.append(self.tokenize_punctuation(d['questionPatternModEntities']))
outputs.append(self.preprocess_sparql(d['sparqlPatternModEntities']))
cnt += 1
if (pos < 0):
break
return (inputs, outputs)
def build_cache(self) -> TextDatasetCache:
index_table = {}
if (not os.path.isdir(os.path.join(self.cache_dir, 'cfq'))):
gzfile = os.path.join(self.cache_dir, os.path.basename(self.URL))
if (not os.path.isfile(gzfile)):
assert False, f'Please download {self.URL} and place it in the {os.path.abspath(self.cache_dir)} folder. Google login needed.'
with tarfile.open(gzfile, 'r') as tf:
tf.extractall(path=self.cache_dir)
splitdir = os.path.join(self.cache_dir, 'cfq', 'splits')
for f in os.listdir(splitdir):
if (not f.endswith('.json')):
continue
name = f[:(- 5)].replace('_split', '')
with open(os.path.join(splitdir, f), 'r') as f:
ind = json.loads(f.read())
index_table[name] = {'train': ind['trainIdxs'], 'val': ind['devIdxs'], 'test': ind['testIdxs']}
(in_sentences, out_sentences) = self.load_data(os.path.join(self.cache_dir, 'cfq/dataset.json'))
assert (len(in_sentences) == len(out_sentences))
return TextDatasetCache().build(index_table, in_sentences, out_sentences, split_punctuation=False) |
def kldiv(x, xp, k=3, base=2):
assert (k <= (len(x) - 1)), 'Set k smaller than num. samples - 1'
assert (k <= (len(xp) - 1)), 'Set k smaller than num. samples - 1'
assert (len(x[0]) == len(xp[0])), 'Two distributions must have same dim.'
d = len(x[0])
n = len(x)
m = len(xp)
const = (log(m) - log((n - 1)))
tree = ss.cKDTree(x)
treep = ss.cKDTree(xp)
nn = [tree.query(point, (k + 1), p=float('inf'))[0][k] for point in x]
nnp = [treep.query(point, k, p=float('inf'))[0][(k - 1)] for point in x]
return (((const + (d * np.mean(list(map(log, nnp))))) - (d * np.mean(list(map(log, nn))))) / log(base)) |
class NetG(nn.Module):
def __init__(self, opt):
super(NetG, self).__init__()
self.encoder1 = Encoder(opt.isize, opt.nz, opt.nc, opt.ngf, opt.ngpu, opt.extralayers)
self.decoder = Decoder(opt.isize, opt.nz, opt.nc, opt.ngf, opt.ngpu, opt.extralayers)
self.encoder2 = Encoder(opt.isize, opt.nz, opt.nc, opt.ngf, opt.ngpu, opt.extralayers)
def forward(self, x):
latent_i = self.encoder1(x)
gen_imag = self.decoder(latent_i)
latent_o = self.encoder2(gen_imag)
return (gen_imag, latent_i, latent_o) |
def _get_pipeline_hyperparameter(hyperparameters, dataset_name, pipeline_name):
hyperparameters_ = deepcopy(hyperparameters)
if hyperparameters:
hyperparameters_ = (hyperparameters_.get(dataset_name) or hyperparameters_)
hyperparameters_ = (hyperparameters_.get(pipeline_name) or hyperparameters_)
if ((hyperparameters_ is None) and dataset_name and pipeline_name):
file_path = os.path.join(PIPELINE_DIR, pipeline_name, (((pipeline_name + '_') + dataset_name.lower()) + '.json'))
if os.path.exists(file_path):
hyperparameters_ = file_path
if (isinstance(hyperparameters_, str) and os.path.exists(hyperparameters_)):
with open(hyperparameters_) as f:
hyperparameters_ = json.load(f)
return hyperparameters_ |
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True) |
def DMSToDecimal(degrees, minutes, seconds):
d = ((abs(degrees) + (minutes / 60.0)) + (seconds / 3600.0))
if (degrees < 0):
return (- d)
else:
return d |
class WordpieceTokenizer(object):
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if (len(chars) > self.max_input_chars_per_word):
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while (start < len(chars)):
end = len(chars)
cur_substr = None
while (start < end):
substr = ''.join(chars[start:end])
if (start > 0):
substr = ('##' + substr)
if (substr in self.vocab):
cur_substr = substr
break
end -= 1
if (cur_substr is None):
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens |
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax((- 1))
acc = accuracy_score(labels, preds)
return {'accuracy': acc} |
class CoordinateDescentTuner(Tuner):
def line_search(self, config, cur_param, epsilon, budget, cur_score=0):
(minval, maxval) = self.search_space[cur_param]['range']
Y = (maxval - minval)
delta = (epsilon * Y)
orig_val = config[cur_param]
if (((orig_val + delta) > maxval) and ((orig_val - delta) < minval)):
return (orig_val, cur_score)
cur_val = (orig_val + delta)
config[cur_param] = cur_val
local_cost = 0
score = self.evaluate_configs([config], show_loading=False)
local_cost += 1
if ((self.maximize and (score < cur_score)) or ((not self.maximize) and (score > cur_score))):
delta = ((- 1) * delta)
cur_val = (orig_val + delta)
config[cur_param] = cur_val
score = self.evaluate_configs([config], show_loading=False)
local_cost += 1
if ((self.maximize and (score < cur_score)) or ((not self.maximize) and (score > cur_score))):
return (orig_val, cur_score)
prev_score = score
while ((local_cost < budget) and (self.cost < self.budget)):
if (((cur_val + delta) > maxval) or ((cur_val + delta) < minval)):
break
cur_val += delta
config[cur_param] = cur_val
score = self.evaluate_configs([config], show_loading=False)
local_cost += 1
if ((self.maximize and (score < cur_score)) or ((not self.maximize) and (score > cur_score))):
cur_val -= delta
break
prev_score = score
return (cur_val, prev_score)
def tune_impl(self, **kwargs):
if (('alpha' not in kwargs) or ('decay_rate' not in kwargs)):
print('Coordinate descent requires alpha and decay_rate!')
return
alpha = kwargs['alpha']
decay_rate = kwargs['decay_rate']
if ('init_method' in kwargs):
init_method = kwargs['init_method']
else:
init_method = 'average'
if ('line_search_budget' in kwargs):
line_search_budget = kwargs['line_search_budget']
else:
line_search_budget = 10
if ('randomize_param_order' in kwargs):
randomize_param_order = bool(kwargs['randomize_param_order'])
else:
randomize_param_order = False
coordinates = sorted(list(self.search_space.keys()))
if (self.start_config is not None):
config = self.start_config
elif (init_method == 'average'):
config = {}
for coordinate in coordinates:
param = self.search_space[coordinate]
if isinstance(param, dict):
if ('range' in param):
(minval, maxval) = param['range']
config[coordinate] = ((maxval + minval) / 2)
elif isinstance(param, list):
config[k] = param[0]
elif (init_method == 'random'):
config = RandomTuner.generate_configs(self.search_space, 1)[0]
else:
print('{} is invalid init_method!'.format(init_method))
return
if ((self.start_config is not None) and (self.start_score is not None)):
score = self.start_score
self.best_score = score
self.log_msg('Starting score: {}'.format(score))
else:
score = self.evaluate_configs([config], show_loading=False)
def config_to_point(config):
coords = sorted(coordinates)
point = tuple([config[coord] for coord in coords])
return point
visited_points = set()
cur_score = score
rounds = 0
rounds_since_last_improvement = 0
last_best_score = cur_score
while (self.cost < self.budget):
self.log_msg('Round {}, current cost {}'.format(rounds, self.cost))
changed = False
new_configs = False
if randomize_param_order:
random.shuffle(coordinates)
for coordinate in coordinates:
if (self.cost >= self.budget):
break
self.log_msg('Coordinate {}, current cost {}'.format(coordinate, self.cost))
orig_val = config[coordinate]
param = self.search_space[coordinate]
if isinstance(param, list):
max_score = cur_score
best_choice = orig_val
for choice in param:
if (choice == orig_val):
continue
config[coordinate] = choice
score = self.evaluate_configs([config], show_loading=False)
if ((self.maximize and (score > max_score)) or ((not self.maximize) and (score < max_score))):
best_choice = choice
max_score = score
if (self.cost >= self.budget):
break
self.log_msg('Old: {}, new: {}'.format(orig_val, best_choice))
if (best_choice != orig_val):
changed = True
config[coordinate] = best_choice
cur_score = max_score
elif isinstance(param, dict):
if ('range' in param):
(best_choice, max_score) = self.line_search(config, coordinate, alpha, line_search_budget, cur_score=cur_score)
print(self.cost)
self.log_msg('Old: {}, New: {}'.format(orig_val, best_choice))
if (best_choice != orig_val):
changed = True
config[coordinate] = best_choice
cur_score = max_score
config_point = config_to_point(config)
if (config_point not in visited_points):
visited_points.add(config_point)
new_configs = True
if (cur_score == last_best_score):
rounds_since_last_improvement += 1
else:
rounds_since_last_improvement = 0
if ((not changed) or (rounds_since_last_improvement >= 5) or (not new_configs)):
alpha *= decay_rate
self.log_msg('New alpha: {}, current cost {}'.format(alpha, self.cost))
if (alpha < 1e-06):
break
rounds_since_last_improvement = 0
rounds += 1 |
def parse_vec2(s: Union[(str, Tuple[(float, float)])]) -> Tuple[(float, float)]:
if isinstance(s, tuple):
return s
parts = s.split(',')
if (len(parts) == 2):
return (float(parts[0]), float(parts[1]))
raise ValueError(f'cannot parse 2-vector {s}') |
class PhysicallyOffsetPaddle125BreakoutWorld(OffsetPaddleBreakoutWorld):
paddle_class = VisuallyFixedOffsetPaddle
paddle_offset = 125 |
def MODEL(model_name, weight_decay, image, label, lr, epoch, is_training):
network_fn = nets_factory.get_network_fn(model_name, weight_decay=weight_decay)
end_points = network_fn(image, is_training=is_training, lr=lr, val=(not is_training))
losses = []
if is_training:
def scale_grad(x, scale):
return ((scale * x) + tf.stop_gradient(((1 - scale) * x)))
with tf.variable_scope('Student_loss'):
loss = tf.losses.softmax_cross_entropy(label, end_points['Logits'])
accuracy = slim.metrics.accuracy(tf.to_int32(tf.argmax(end_points['Logits'], 1)), tf.to_int32(tf.argmax(label, 1)))
tf.summary.scalar('loss', loss)
tf.summary.scalar('accuracy', accuracy)
losses.append((loss + tf.add_n(tf.losses.get_regularization_losses())))
else:
losses = tf.losses.softmax_cross_entropy(label, end_points['Logits'])
accuracy = slim.metrics.accuracy(tf.to_int32(tf.argmax(end_points['Logits'], 1)), tf.to_int32(tf.argmax(label, 1)))
return (losses, accuracy) |
class STS16CLEval(STSEval):
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : STS16CL *****\n\n')
self.seed = seed
self.datasets = ['multisource', 'news']
self.loadFile(taskpath)
def loadFile(self, fpath):
self.data = {}
self.samples = []
for dataset in self.datasets:
if (dataset == 'multisource'):
(sent1, sent2, _, _) = zip(*[l.split('\t') for l in io.open((fpath + ('/STS.input.%s.txt' % dataset)), encoding='utf8').read().splitlines()])
elif (dataset == 'news'):
(sent1, sent2) = zip(*[l.split('\t') for l in io.open((fpath + ('/STS.input.%s.txt' % dataset)), encoding='utf8').read().splitlines()])
raw_scores = np.array([x for x in io.open((fpath + ('/STS.gs.%s.txt' % dataset)), encoding='utf8').read().splitlines()])
not_empty_idx = (raw_scores != '')
gs_scores = [float(x) for x in raw_scores[not_empty_idx]]
sent1 = np.array([s.split() for s in sent1])[not_empty_idx]
sent2 = np.array([s.split() for s in sent2])[not_empty_idx]
sorted_data = sorted(zip(sent1, sent2, gs_scores), key=(lambda z: (len(z[0]), len(z[1]), z[2])))
(sent1, sent2, gs_scores) = map(list, zip(*sorted_data))
self.data[dataset] = (sent1, sent2, gs_scores)
self.samples += (sent1 + sent2) |
class OneFormerImageProcessor(metaclass=DummyObject):
_backends = ['vision']
def __init__(self, *args, **kwargs):
requires_backends(self, ['vision']) |
class Pipeline(_ScikitCompat):
default_input_names = None
def __init__(self, model, tokenizer: PreTrainedTokenizer=None, modelcard: ModelCard=None, framework: Optional[str]=None, args_parser: ArgumentHandler=None, device: int=(- 1), binary_output: bool=False):
if (framework is None):
framework = get_framework()
self.model = model
self.tokenizer = tokenizer
self.modelcard = modelcard
self.framework = framework
self.device = device
self.binary_output = binary_output
self._args_parser = (args_parser or DefaultArgumentHandler())
if ((self.device >= 0) and (self.framework == 'pt')):
self.model = self.model.to('cuda:{}'.format(self.device))
def save_pretrained(self, save_directory):
if (not os.path.isdir(save_directory)):
logger.error('Provided path ({}) should be a directory'.format(save_directory))
return
self.model.save_pretrained(save_directory)
self.tokenizer.save_pretrained(save_directory)
self.modelcard.save_pretrained(save_directory)
def transform(self, X):
return self(X=X)
def predict(self, X):
return self(X=X)
def device_placement(self):
if (self.framework == 'tf'):
with tf.device(('/CPU:0' if (self.device == (- 1)) else '/device:GPU:{}'.format(self.device))):
(yield)
else:
if (self.device >= 0):
torch.cuda.set_device(self.device)
(yield)
def inputs_for_model(self, features: Union[(dict, List[dict])]) -> Dict:
args = ['input_ids', 'attention_mask']
model_type = type(self.model).__name__.lower()
if (('distilbert' not in model_type) and ('xlm' not in model_type)):
args += ['token_type_ids']
if isinstance(features, dict):
return {k: features[k] for k in args}
else:
return {k: [feature[k] for feature in features] for k in args}
def __call__(self, *texts, **kwargs):
inputs = self._args_parser(*texts, **kwargs)
with self.device_placement():
inputs = self.tokenizer.batch_encode_plus(inputs, add_special_tokens=True, return_tensors=self.framework, max_length=self.tokenizer.max_len)
inputs = self.inputs_for_model(inputs)
return self._forward(inputs)
def _forward(self, inputs):
if (self.framework == 'tf'):
predictions = self.model(inputs, training=False)[0]
else:
with torch.no_grad():
predictions = self.model(**inputs)[0].cpu()
return predictions.numpy() |
def test_case83():
url = (brokerIp + '/ngsi-ld/v1/entityOperations/upsert')
headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json', 'Link': '<{{link}}>; rel=" type="application/ld+json"', 'fiware-service': 'openiot', 'fiware-servicepath': 'test'}
r = requests.post(url, data=json.dumps(ld_data.upsertMultipleCommand), headers=headers)
url = (brokerIp + '/ngsi-ld/v1/entities?type=Device')
r = requests.get(url, headers=headers)
assert (r.status_code == 200) |
class Schema():
def __init__(self, schema, table):
self._schema = schema
self._table = table
self._idMap = self._map(self._schema, self._table)
def schema(self):
return self._schema
def idMap(self):
return self._idMap
def _map(self, schema, table):
column_names_original = table['column_names_original']
table_names_original = table['table_names_original']
for (i, (tab_id, col)) in enumerate(column_names_original):
if (tab_id == (- 1)):
idMap = {'*': i}
else:
key = table_names_original[tab_id].lower()
val = col.lower()
idMap[((key + '.') + val)] = i
for (i, tab) in enumerate(table_names_original):
key = tab.lower()
idMap[key] = i
return idMap |
class SliceObjectAction(BaseAction):
valid_actions = {'SliceObject', 'OpenObject', 'CloseObject'}
def get_reward(self, state, prev_state, expert_plan, goal_idx):
if (state.metadata['lastAction'] not in self.valid_actions):
(reward, done) = (self.rewards['invalid_action'], False)
return (reward, done)
subgoal = expert_plan[goal_idx]['planner_action']
(reward, done) = (self.rewards['neutral'], False)
target_object = get_object(subgoal['objectId'], state.metadata)
if (target_object is not None):
is_target_sliced = target_object['isSliced']
(reward, done) = ((self.rewards['positive'], True) if is_target_sliced else (self.rewards['negative'], False))
return (reward, done) |
class IntegralProjectiveCurve_finite_field(IntegralProjectiveCurve):
_point = IntegralProjectiveCurvePoint_finite_field
def places(self, degree=1):
F = self.function_field()
return F.places(degree)
def closed_points(self, degree=1):
F = self.function_field()
places_above = F.places(degree)
points = []
for p in self.singular_closed_points():
if (p.degree() == degree):
points.append(p)
for place in p.places():
if (place.degree() == degree):
places_above.remove(place)
for place in places_above:
p = self.place_to_closed_point(place)
assert (p.degree() == degree)
points.append(p)
return points
_method
def L_polynomial(self, name='t'):
F = self.function_field()
L = F.L_polynomial()
R = L.parent()
T = R.gen()
f = R.one()
for (p, places) in self._singularities:
for place in places:
f = (f * (1 - (T ** place.degree())))
f = (f // (1 - (T ** p.degree())))
return (L * f)
def number_of_rational_points(self, r=1):
q = self.base_ring().order()
L = self.L_polynomial()
Lp = L.derivative()
R = IntegerRing()[[L.parent().gen()]]
L = R(L)
Lp = R(Lp)
f = R((Lp / L), prec=r)
n = ((f[(r - 1)] + (q ** r)) + 1)
return n |
def red(x):
if (x < 0.352):
return 0
if ((x >= 0.352) and (x < 0.662)):
return ((822.58 * x) - 289.55)
if ((x >= 0.662) and (x < 0.89)):
return 255
if (x >= 0.89):
return (((- 1159) * x) + 1286.5) |
class BaseDataset(Dataset, metaclass=ABCMeta):
def __init__(self, ann_file, pipeline, data_prefix=None, test_mode=False, multi_class=False, num_classes=None, start_index=1, modality='RGB', sample_by_class=False, power=0, dynamic_length=False):
super().__init__()
self.ann_file = ann_file
self.data_prefix = (osp.realpath(data_prefix) if ((data_prefix is not None) and osp.isdir(data_prefix)) else data_prefix)
self.test_mode = test_mode
self.multi_class = multi_class
self.num_classes = num_classes
self.start_index = start_index
self.modality = modality
self.sample_by_class = sample_by_class
self.power = power
self.dynamic_length = dynamic_length
assert (not (self.multi_class and self.sample_by_class))
self.pipeline = Compose(pipeline)
self.video_infos = self.load_annotations()
if self.sample_by_class:
self.video_infos_by_class = self.parse_by_class()
class_prob = []
for (_, samples) in self.video_infos_by_class.items():
class_prob.append((len(samples) / len(self.video_infos)))
class_prob = [(x ** self.power) for x in class_prob]
summ = sum(class_prob)
class_prob = [(x / summ) for x in class_prob]
self.class_prob = dict(zip(self.video_infos_by_class, class_prob))
def load_annotations(self):
def load_json_annotations(self):
video_infos = mmcv.load(self.ann_file)
num_videos = len(video_infos)
path_key = ('frame_dir' if ('frame_dir' in video_infos[0]) else 'filename')
for i in range(num_videos):
path_value = video_infos[i][path_key]
if (self.data_prefix is not None):
path_value = osp.join(self.data_prefix, path_value)
video_infos[i][path_key] = path_value
if self.multi_class:
assert (self.num_classes is not None)
else:
assert (len(video_infos[i]['label']) == 1)
video_infos[i]['label'] = video_infos[i]['label'][0]
return video_infos
def parse_by_class(self):
video_infos_by_class = defaultdict(list)
for item in self.video_infos:
label = item['label']
video_infos_by_class[label].append(item)
return video_infos_by_class
def label2array(num, label):
arr = np.zeros(num, dtype=np.float32)
arr[label] = 1.0
return arr
def evaluate(self, results, metrics='top_k_accuracy', metric_options=dict(top_k_accuracy=dict(topk=(1, 5))), logger=None, **deprecated_kwargs):
metric_options = copy.deepcopy(metric_options)
if (deprecated_kwargs != {}):
warnings.warn("Option arguments for metrics has been changed to `metric_options`, See ' for more details")
metric_options['top_k_accuracy'] = dict(metric_options['top_k_accuracy'], **deprecated_kwargs)
if (not isinstance(results, list)):
raise TypeError(f'results must be a list, but got {type(results)}')
assert (len(results) == len(self)), f'The length of results is not equal to the dataset len: {len(results)} != {len(self)}'
metrics = (metrics if isinstance(metrics, (list, tuple)) else [metrics])
allowed_metrics = ['top_k_accuracy', 'mean_class_accuracy', 'mean_average_precision', 'mmit_mean_average_precision']
for metric in metrics:
if (metric not in allowed_metrics):
raise KeyError(f'metric {metric} is not supported')
eval_results = OrderedDict()
gt_labels = [ann['label'] for ann in self.video_infos]
for metric in metrics:
msg = f'Evaluating {metric} ...'
if (logger is None):
msg = ('\n' + msg)
print_log(msg, logger=logger)
if (metric == 'top_k_accuracy'):
topk = metric_options.setdefault('top_k_accuracy', {}).setdefault('topk', (1, 5))
if (not isinstance(topk, (int, tuple))):
raise TypeError(f'topk must be int or tuple of int, but got {type(topk)}')
if isinstance(topk, int):
topk = (topk,)
top_k_acc = top_k_accuracy(results, gt_labels, topk)
log_msg = []
for (k, acc) in zip(topk, top_k_acc):
eval_results[f'top{k}_acc'] = acc
log_msg.append(f'''
top{k}_acc {acc:.4f}''')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if (metric == 'mean_class_accuracy'):
mean_acc = mean_class_accuracy(results, gt_labels)
eval_results['mean_class_accuracy'] = mean_acc
log_msg = f'''
mean_acc {mean_acc:.4f}'''
print_log(log_msg, logger=logger)
continue
if (metric in ['mean_average_precision', 'mmit_mean_average_precision']):
gt_labels = [self.label2array(self.num_classes, label) for label in gt_labels]
if (metric == 'mean_average_precision'):
mAP = mean_average_precision(results, gt_labels)
eval_results['mean_average_precision'] = mAP
log_msg = f'''
mean_average_precision {mAP:.4f}'''
elif (metric == 'mmit_mean_average_precision'):
mAP = mmit_mean_average_precision(results, gt_labels)
eval_results['mmit_mean_average_precision'] = mAP
log_msg = f'''
mmit_mean_average_precision {mAP:.4f}'''
print_log(log_msg, logger=logger)
continue
return eval_results
def dump_results(results, out):
return mmcv.dump(results, out)
def prepare_train_frames(self, idx):
results = copy.deepcopy(self.video_infos[idx])
results['modality'] = self.modality
results['start_index'] = self.start_index
if (self.multi_class and isinstance(results['label'], list)):
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.0
results['label'] = onehot
return self.pipeline(results)
def prepare_test_frames(self, idx):
results = copy.deepcopy(self.video_infos[idx])
results['modality'] = self.modality
results['start_index'] = self.start_index
if (self.multi_class and isinstance(results['label'], list)):
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.0
results['label'] = onehot
return self.pipeline(results)
def __len__(self):
return len(self.video_infos)
def __getitem__(self, idx):
if self.test_mode:
flag = True
times = 0
while flag:
try:
flag = False
frames = self.prepare_test_frames(idx)
except Exception as e:
flag = True
times += 1
if (times >= 5):
log_msg = 'Failed to load video from {} with error {}'.format(self.video_infos[idx], e)
idx = random.randint(0, (len(self.video_infos) - 1))
print(log_msg)
return frames
flag = True
while flag:
try:
flag = False
frames = self.prepare_train_frames(idx)
except Exception as e:
flag = True
log_msg = 'Failed to load video from {} with error {}'.format(self.video_infos[idx], e)
idx = random.randint(0, (len(self.video_infos) - 1))
print(log_msg)
return frames |
def get_polynomial_decay_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, lr_end=1e-07, power=1.0, last_epoch=(- 1)):
lr_init = optimizer.defaults['lr']
if (not (lr_init > lr_end)):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})')
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
elif (current_step > num_training_steps):
return (lr_end / lr_init)
else:
lr_range = (lr_init - lr_end)
decay_steps = (num_training_steps - num_warmup_steps)
pct_remaining = (1 - ((current_step - num_warmup_steps) / decay_steps))
decay = ((lr_range * (pct_remaining ** power)) + lr_end)
return (decay / lr_init)
return LambdaLR(optimizer, lr_lambda, last_epoch) |
def dist_reduce_tensor(tensor):
world_size = get_world_size()
if (world_size < 2):
return tensor
with torch.no_grad():
dist.reduce(tensor, dst=0)
if (get_rank() == 0):
tensor /= world_size
return tensor |
def main():
print(((('\n' + ' SMPLpix Evaluation Loop \n') + '\n') + ' Copyright (c) 2021 - now, Sergey Prokudin (sergey.) '))
args = get_smplpix_arguments()
print('ARGUMENTS:')
pprint.pprint(args)
if (args.checkpoint_path is None):
print('no model checkpoint was specified, looking in the log directory...')
ckpt_path = os.path.join(args.workdir, 'network.h5')
else:
ckpt_path = args.checkpoint_path
if (not os.path.exists(ckpt_path)):
print(('checkpoint %s not found!' % ckpt_path))
return
print('defining the neural renderer model (U-Net)...')
unet = UNet(in_channels=args.n_input_channels, out_channels=args.n_output_channels, n_blocks=args.n_unet_blocks, dim=2, up_mode='resizeconv_linear').to(args.device)
print(('loading the model from checkpoint: %s' % ckpt_path))
unet.load_state_dict(torch.load(ckpt_path))
unet.eval()
generate_eval_video(args, args.data_dir, unet, save_target=args.save_target)
return |
def test_initialize_unknown_binary_policy(digraph_with_unknown_policy):
with pytest.raises(KeyError):
digraph_with_unknown_policy._initialize_binary_policy() |
def _create_graph(structure_dict):
graph = pydot.Dot()
for node in structure_dict['nodes']:
graph.add_node(pydot.Node(node))
for (node1, node2) in structure_dict['edges']:
graph.add_edge(pydot.Edge(node1, node2))
return graph |
def get_device(args):
args.ngpu = (torch.cuda.device_count() if (args.ngpu == None) else args.ngpu)
cuda = ('cuda:' + str(args.gpu_1st))
device = torch.device((cuda if (torch.cuda.is_available() and (args.ngpu > 0)) else 'cpu'))
multi_gpu = (True if (args.ngpu > 1) else False)
return (device, multi_gpu) |
def corpus_bleu(list_of_references, hypotheses, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False):
p_numerators = Counter()
p_denominators = Counter()
(hyp_lengths, ref_lengths) = (0, 0)
assert (len(list_of_references) == len(hypotheses)), 'The number of hypotheses and their reference(s) should be the same '
for (references, hypothesis) in zip(list_of_references, hypotheses):
for (i, _) in enumerate(weights, start=1):
p_i = modified_precision(references, hypothesis, i)
p_numerators[i] += p_i.numerator
p_denominators[i] += p_i.denominator
hyp_len = len(hypothesis)
hyp_lengths += hyp_len
ref_lengths += closest_ref_length(references, hyp_len)
bp = brevity_penalty(ref_lengths, hyp_lengths)
if auto_reweigh:
if ((hyp_lengths < 4) and (weights == (0.25, 0.25, 0.25, 0.25))):
weights = (((1 / hyp_lengths),) * hyp_lengths)
p_n = [Fraction(p_numerators[i], p_denominators[i], _normalize=False) for (i, _) in enumerate(weights, start=1)]
if (p_numerators[1] == 0):
return 0
if (not smoothing_function):
smoothing_function = SmoothingFunction().method0
p_n = smoothing_function(p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths)
s = ((w_i * math.log(p_i)) for (w_i, p_i) in zip(weights, p_n))
s = (bp * math.exp(math.fsum(s)))
return s |
def get_available_gpus():
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if (x.device_type == 'GPU')] |
class DesAscPredictor():
def __init__(self, question, sql, table, history):
self.sql = sql
self.question = question
self.history = history
self.table = table
def generate_output(self):
for key in self.sql:
if ((key == 'orderBy') and self.sql[key]):
try:
col = self.sql[key][1][0][1][1]
except BaseException:
print('question:{} sql:{}'.format(self.question, self.sql))
if ((self.sql[key][0] == 'asc') and self.sql['limit']):
label = 0
elif ((self.sql[key][0] == 'asc') and (not self.sql['limit'])):
label = 1
elif ((self.sql[key][0] == 'desc') and self.sql['limit']):
label = 2
else:
label = 3
return ((self.history + [index_to_column_name(col, self.table), self.sql[key][1][0][1][0]]), label) |
def post_process(out, pb, state, extend=False):
from sfepy.base.base import Struct
dvel = pb.evaluate('ev_diffusion_velocity.i.Omega(m.K, p)', mode='el_avg')
out['dvel'] = Struct(name='output_data', mode='cell', data=dvel, dofs=None)
stress = pb.evaluate('ev_cauchy_stress.i.Omega(m.D, u)', mode='el_avg')
out['cauchy_stress'] = Struct(name='output_data', mode='cell', data=stress, dofs=None)
return out |
def add_stats_to_debug_csv():
row = [multi_stats.get('all', 'tried'), multi_stats.get('all', 'success'), multi_stats.get('all', 'time_spent'), multi_stats.get('is_flickr', 'tried'), multi_stats.get('is_flickr', 'success'), multi_stats.get('is_flickr', 'time_spent'), multi_stats.get('not_flickr', 'tried'), multi_stats.get('not_flickr', 'success'), multi_stats.get('not_flickr', 'time_spent')]
add_debug_csv_row(row) |
class MaskedSoftmax(nn.Module):
def __init__(self):
super(MaskedSoftmax, self).__init__()
self.softmax = nn.Softmax(1)
def forward(self, x, mask=None):
if (mask is not None):
mask = mask.float()
if (mask is not None):
x_masked = ((x * mask) + (1 - (1 / mask)))
else:
x_masked = x
x_max = x_masked.max(1)[0]
x_exp = (x - x_max.unsqueeze((- 1))).exp()
if (mask is not None):
x_exp = (x_exp * mask.float())
return (x_exp / x_exp.sum(1).unsqueeze((- 1))) |
class MiniGridWrapper(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = env.observation_space.spaces['image']
def observation(self, observation):
return observation['image'] |
def rotmat_to_ee(matrix: Union[(torch.Tensor, numpy.ndarray)], convention: str='xyz') -> Union[(torch.Tensor, numpy.ndarray)]:
if ((matrix.shape[(- 1)] != 3) or (matrix.shape[(- 2)] != 3)):
raise ValueError(f'Invalid rotation matrix shape f{matrix.shape}.')
t = Compose([matrix_to_euler_angles])
return t(matrix, convention.upper()) |
class FunctionFieldDerivation(RingDerivationWithoutTwist):
def __init__(self, parent):
RingDerivationWithoutTwist.__init__(self, parent)
self.__field = parent.domain()
def is_injective(self) -> bool:
return False
def _rmul_(self, factor):
return self._lmul_(factor) |
def test_get_parameter_example_from_properties():
schema: dict[(str, Any)] = {'parameters': [{'name': 'param1', 'in': 'query', 'schema': {'type': 'object', 'properties': {'prop1': {'type': 'string', 'example': 'prop1 example string'}, 'prop2': {'type': 'string', 'example': 'prop2 example string'}, 'noExampleProp': {'type': 'string'}}}}]}
example = examples.get_parameter_example_from_properties(schema)
assert ('query' in example)
assert (example['query'] == {'param1': {'prop1': 'prop1 example string', 'prop2': 'prop2 example string'}}) |
def test_initialize_mix():
_pos = 'datasets/ToyFather/train/pos.pl'
_neg = 'datasets/ToyFather/train/neg.pl'
_facts = pathlib.Path('datasets/ToyFather/train/facts.pl')
_db = Database.from_files(pos=_pos, neg=_neg, facts=_facts, lazy_load=True)
_db.neg = ['father(harrypotter,ronweasley).']
assert isinstance(_db.pos, str)
assert isinstance(_db.neg, list)
assert isinstance(_db.facts, pathlib.Path) |
def count_above(errors, epsilon):
above = (errors > epsilon)
total_above = len(errors[above])
above = pd.Series(above)
shift = above.shift(1)
change = (above != shift)
total_consecutive = sum((above & change))
return (total_above, total_consecutive) |
def test_suffix_perturbation():
data_augmenter = DataAugmenter(perturbations=[SuffixPerturbation(suffix='pixel art')])
instance: Instance = Instance(id='id0', input=Input(text='A blue dog'), references=[])
instances: List[Instance] = data_augmenter.generate([instance], include_original=True)
assert (len(instances) == 2)
assert (instances[1].perturbation.suffix == 'pixel art')
assert (instances[1].input.text == 'A blue dog, pixel art') |
def _cardinality_subfield(self, jpol):
k = self.base_ring()
p = k.characteristic()
d = k.degree()
jdeg = jpol.degree()
if (jdeg >= d):
raise ValueError('j-invariant does not lie in a subfield')
GFj = GF((p, jdeg), name='j', modulus=jpol)
j = GFj.gen()
if (j == 1728):
return _cardinality_with_j_invariant_1728(self)
elif (j == 0):
return _cardinality_with_j_invariant_0(self)
E0 = EllipticCurve_from_j(j)
N = E0.cardinality(extension_degree=(d // jdeg))
phi = GFj.hom([self.j_invariant()])
if self.is_isomorphic(E0.base_extend(phi)):
return N
else:
q = k.cardinality()
return ((2 * (q + 1)) - N) |
def make_dataset(dir):
images = []
assert os.path.isdir(dir), ('%s is not a valid directory' % dir)
for (root, _, fnames) in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images |
def crop_all_images(split_dict, global_product_pair_id_map, root_dir, image_root_dir_split, low_res_image_root, crop_images_save_root, target_image_size):
next_anno_id = 0
next_img_id = 0
all_annotations = {}
all_image_infos = {}
for subset_name in list(split_dict.keys()):
CROP_IMAGES_SAVE_PATH = (crop_images_save_root / subset_name)
CROP_IMAGES_SAVE_PATH.mkdir(exist_ok=True, parents=True)
json_obj = {}
images_info = []
annos = []
images_subset_root = (image_root_dir_split / subset_name)
for dir_name in os.listdir(images_subset_root):
if (not (images_subset_root / dir_name).is_dir()):
continue
pair_id = global_product_pair_id_map[dir_name]
dir_path = os.path.join(images_subset_root, dir_name)
for file in os.listdir(dir_path):
new_filename = ((dir_name + '_') + file)
source_filepath = os.path.join(dir_path, file)
im_id = next_img_id
im_filename = new_filename
image_open = Image.open(source_filepath)
(w, h) = image_open.size
low_res_source_filepath = os.path.join(str(dir_path).replace(str(image_root_dir_split), str(low_res_image_root)), file)
low_res_image_open = Image.open(low_res_source_filepath)
(low_res_w, low_res_h) = low_res_image_open.size
single_image_info = create_image_info(image_id=im_id, width=w, height=h, file_name=im_filename, license=0, flickr_url='', coco_url='', data_captured='')
images_info.append(single_image_info)
next_img_id += 1
img_anno_dict = bbox_dict[dir_name][file]
anno_style = img_anno_dict['style']
anno_source = img_anno_dict['source']
anno_bbox = img_anno_dict['bbox']
anno_id = next_anno_id
if (not (CROP_IMAGES_SAVE_PATH / im_filename).is_file()):
if (anno_bbox != ''):
anno_bbox = np.asarray(anno_bbox).astype(np.int32)
high_res_bbox = resize_low_res_bbox_to_high_res(anno_bbox[:4], low_res_w, low_res_h, w, h)
if ((high_res_bbox[3] != 0) and (high_res_bbox[2] != 0)):
cropped = crop_single_bbox(image_open, high_res_bbox, target_image_size)
else:
continue
else:
cropped = _resize_thumbnail(image_open, target_image_size)
single_crop_anno = create_annotations(anno_id=anno_id, image_id=im_id, category_id=anno_style, bbox='', pair_id=pair_id, style=anno_style, source=anno_source, segmentation='', area=0, iscrowd=0)
annos.append(single_crop_anno)
next_anno_id += 1
if (CROP_IMAGES_SAVE_PATH / im_filename).is_file():
continue
cropped.save((CROP_IMAGES_SAVE_PATH / im_filename))
all_image_infos[subset_name] = images_info
all_annotations[subset_name] = annos
json_obj['images'] = images_info
json_obj['annotations'] = annos
with open((root_dir / f'{subset_name}_reid_cropped_{target_image_size[0]}_{target_image_size[1]}.json'), 'w') as f:
json.dump(json_obj, f)
return (all_image_infos, all_annotations) |
def chamfer_distance(x, y, metric='l2', direction='bi'):
if (direction == 'y_to_x'):
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
min_y_to_x = x_nn.kneighbors(y)[0]
chamfer_dist = np.mean(min_y_to_x)
elif (direction == 'x_to_y'):
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
min_x_to_y = y_nn.kneighbors(x)[0]
chamfer_dist = np.mean(min_x_to_y)
elif (direction == 'bi'):
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
min_y_to_x = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
min_x_to_y = y_nn.kneighbors(x)[0]
chamfer_dist = (np.mean(min_y_to_x) + np.mean(min_x_to_y))
else:
raise ValueError("Invalid direction type. Supported types: 'y_x', 'x_y', 'bi'")
return chamfer_dist |
def all_seld_eval(args, pred_directory, result_path=None):
if args.eval:
with open(args.eval_wav_txt) as f:
wav_file_list = [s.strip() for s in f.readlines()]
wav_dir = os.path.dirname(wav_file_list[0])
elif args.val:
with open(args.val_wav_txt) as f:
wav_file_list = [s.strip() for s in f.readlines()]
wav_dir = os.path.dirname(wav_file_list[0])
ref_desc_files = wav_dir.replace('foa', 'metadata').replace('mic', 'metadata')
pred_output_format_files = pred_directory
params = parameters.get_params()
score_obj = cls_compute_seld_results.ComputeSELDResults(params, ref_files_folder=os.path.dirname(ref_desc_files))
(er20, f20, le, lr, seld_err, classwise_test_scr) = score_obj.get_SELD_Results(pred_output_format_files)
print('SELD scores')
print('All\tER\tF\tLE\tLR\tSELD_error')
print('All\t{:0.2f}\t{:0.2f}\t{:0.2f}\t{:0.2f}\t{:0.2f}'.format(er20, f20, le, lr, seld_err))
if (params['average'] == 'macro'):
print('Class-wise results')
print('Class\tER\tF\tLE\tLR\tSELD_error')
for cls_cnt in range(params['unique_classes']):
print('{}\t{:0.2f}\t{:0.2f}\t{:0.2f}\t{:0.2f}\t{:0.2f}'.format(cls_cnt, classwise_test_scr[0][cls_cnt], classwise_test_scr[1][cls_cnt], classwise_test_scr[2][cls_cnt], classwise_test_scr[3][cls_cnt], classwise_test_scr[4][cls_cnt]))
if args.eval:
print('SELD scores', file=codecs.open(result_path, 'w', 'utf-8'))
print('All\tER\tF\tLE\tLR\tSELD_error', file=codecs.open(result_path, 'a', 'utf-8'))
print('All\t{:0.2f}\t{:0.2f}\t{:0.2f}\t{:0.2f}\t{:0.2f}'.format(er20, f20, le, lr, seld_err), file=codecs.open(result_path, 'a', 'utf-8'))
if (params['average'] == 'macro'):
print('Class-wise results', file=codecs.open(result_path, 'a', 'utf-8'))
print('Class\tER\tF\tLE\tLR\tSELD_error', file=codecs.open(result_path, 'a', 'utf-8'))
for cls_cnt in range(params['unique_classes']):
print('{}\t{:0.2f}\t{:0.2f}\t{:0.2f}\t{:0.2f}\t{:0.2f}'.format(cls_cnt, classwise_test_scr[0][cls_cnt], classwise_test_scr[1][cls_cnt], classwise_test_scr[2][cls_cnt], classwise_test_scr[3][cls_cnt], classwise_test_scr[4][cls_cnt]), file=codecs.open(result_path, 'a', 'utf-8'))
return (er20, f20, le, lr, seld_err) |
class LayerNormGeneral(nn.Module):
def __init__(self, affine_shape=None, normalized_dim=((- 1),), scale=True, bias=True, eps=1e-05):
super().__init__()
self.normalized_dim = normalized_dim
self.use_scale = scale
self.use_bias = bias
self.weight = (nn.Parameter(torch.ones(affine_shape)) if scale else None)
self.bias = (nn.Parameter(torch.zeros(affine_shape)) if bias else None)
self.eps = eps
def forward(self, x):
c = (x - x.mean(self.normalized_dim, keepdim=True))
s = c.pow(2).mean(self.normalized_dim, keepdim=True)
x = (c / torch.sqrt((s + self.eps)))
if self.use_scale:
x = (x * self.weight)
if self.use_bias:
x = (x + self.bias)
return x |
class TestAflCov(unittest.TestCase):
tmp_file = './tmp_cmd.out'
version_file = '../VERSION'
afl_cov_cmd = '../afl-cov'
single_generator = './afl/afl-cov-generator.sh'
parallel_generator = './afl/afl-cov-generator-parallel.sh'
afl_cov_live = './afl/afl-cov-generator-live.sh'
top_out_dir = './fwknop-afl.git/test/afl/fuzzing-output/server-access.out'
live_afl_cmd = './fuzzing-wrappers/server-access-redir.sh'
live_parallel_afl_cmd = './fuzzing-wrappers/server-access-parallel-redir.sh'
def do_cmd(self, cmd):
out = []
fh = open(self.tmp_file, 'w')
subprocess.call(cmd, stdin=None, stdout=fh, stderr=subprocess.STDOUT, shell=True)
fh.close()
with open(self.tmp_file, 'r') as f:
for line in f:
out.append(line.rstrip('\n'))
return out
def live_init(self):
if is_dir(os.path.dirname(self.top_out_dir)):
if is_dir(self.top_out_dir):
rmtree(self.top_out_dir)
elif (not is_dir(os.path.dirname(self.top_out_dir))):
os.mkdir(os.path.dirname(self.top_out_dir))
try:
subprocess.Popen([self.afl_cov_live])
except OSError:
return False
time.sleep(2)
return True
def afl_stop(self):
self.do_cmd(('%s --stop-afl --afl-fuzzing-dir %s' % (self.afl_cov_cmd, self.top_out_dir)))
afl_cov_pid = get_running_pid((self.top_out_dir + '/cov/afl-cov-status'), 'afl_cov_pid\\s+\\:\\s+(\\d+)')
if afl_cov_pid:
os.kill(afl_cov_pid, signal.SIGTERM)
return
def test_version(self):
with open(self.version_file, 'r') as f:
version = f.readline().rstrip()
self.assertTrue((version in ''.join(self.do_cmd(('%s --version' % self.afl_cov_cmd)))), 'afl-cov --version does not match VERSION file')
def test_help(self):
self.assertTrue(('--verbose' in ''.join(self.do_cmd(('%s -h' % self.afl_cov_cmd)))), '--verbose not in -h output')
def test_stop_requires_fuzz_dir(self):
self.assertTrue(('Must set' in ''.join(self.do_cmd(('%s --stop-afl' % self.afl_cov_cmd)))), '--afl-fuzzing-dir missing from --stop-afl mode')
def test_func_search_requires_fuzz_dir(self):
self.assertTrue(('Must set' in ''.join(self.do_cmd(('%s --func-search test' % self.afl_cov_cmd)))), '--afl-fuzzing-dir missing from --func-search mode')
def test_line_search_requires_fuzz_dir(self):
self.assertTrue(('Must set' in ''.join(self.do_cmd(('%s --line-search 1234' % self.afl_cov_cmd)))), '--afl-fuzzing-dir missing from --line-search mode')
def test_live_parallel(self):
if (not self.live_init()):
return self.assertTrue(False, ('Could not run generator cmd: %s' % self.afl_cov_live))
wrapper = ('fwknop-afl.git/test/afl/fuzzing-wrappers' + '/server-access-parallel-redir.sh')
if os.path.exists(wrapper):
os.remove(wrapper)
copy('afl/server-access-parallel-redir.sh', wrapper)
curr_dir = os.getcwd()
os.chdir('./fwknop-afl.git/test/afl')
try:
subprocess.Popen([self.live_parallel_afl_cmd, '-M', 'fuzzer01'])
except OSError:
os.chdir(curr_dir)
return self.assertTrue(False, ('Could not run live_parallel_afl_cmd: %s -M fuzzer01' % self.live_parallel_afl_cmd))
try:
subprocess.Popen([self.live_parallel_afl_cmd, '-S', 'fuzzer02'])
except OSError:
os.chdir(curr_dir)
return self.assertTrue(False, ('Could not run live_parallel_afl_cmd: %s -S fuzzer02' % self.live_parallel_afl_cmd))
os.chdir(curr_dir)
time.sleep(3)
self.afl_stop()
if (not (is_dir((self.top_out_dir + '/fuzzer01')) and is_dir((self.top_out_dir + '/fuzzer02')))):
return self.assertTrue(False, 'fuzzer01 or fuzzer02 directory missing')
return self.assertTrue(is_dir((self.top_out_dir + '/cov')), ("live coverage directory '%s' does not exist" % (self.top_out_dir + '/cov')))
def test_live(self):
if (not self.live_init()):
return self.assertTrue(False, ('Could not run generator cmd: %s' % self.afl_cov_live))
wrapper = 'fwknop-afl.git/test/afl/fuzzing-wrappers/server-access-redir.sh'
if os.path.exists(wrapper):
os.remove(wrapper)
copy('afl/server-access-redir.sh', wrapper)
curr_dir = os.getcwd()
os.chdir('./fwknop-afl.git/test/afl')
try:
subprocess.Popen([self.live_afl_cmd])
except OSError:
os.chdir(curr_dir)
return self.assertTrue(False, ('Could not run live_afl_cmd: %s' % self.live_afl_cmd))
os.chdir(curr_dir)
time.sleep(3)
self.afl_stop()
return self.assertTrue(is_dir((self.top_out_dir + '/cov')), ("live coverage directory '%s' does not exist" % (self.top_out_dir + '/cov')))
def test_queue_limit_5(self):
out_str = ''.join(self.do_cmd(('%s --afl-queue-id-limit 5 --overwrite' % self.single_generator)))
self.assertTrue((('Final lcov web report' in out_str) and ("New 'line' coverage: 1585" in out_str)))
def test_queue_limit_5_cover_corpus(self):
out_str = ''.join(self.do_cmd(('%s --afl-queue-id-limit 5 --overwrite --cover-corpus' % self.single_generator)))
self.assertTrue((('Final lcov web report' in out_str) and ("New 'line' coverage: 1585" in out_str)))
def test_overwrite_dir(self):
self.do_cmd(('%s --afl-queue-id-limit 1 --overwrite' % self.single_generator))
out_str = ''.join(self.do_cmd(('%s --afl-queue-id-limit 1' % self.single_generator)))
self.assertTrue(('use --overwrite' in out_str), 'Missing --overwrite not caught')
def test_queue_limit_5_parallel(self):
out_str = ''.join(self.do_cmd(('%s --afl-queue-id-limit 5 --overwrite' % self.parallel_generator)))
self.assertTrue((('Final lcov web report' in out_str) and ("New 'line' coverage: 977" in out_str) and ('Imported 145 new test cases' in out_str) and ('Imported 212 new test cases' in out_str)))
def test_queue_limit_5_parallel_cover_corpus(self):
out_str = ''.join(self.do_cmd(('%s --afl-queue-id-limit 5 --overwrite --cover-corpus' % self.parallel_generator)))
self.assertTrue((('Final lcov web report' in out_str) and ("New 'line' coverage: 977" in out_str) and ('Imported 145 new test cases' in out_str) and ('Imported 212 new test cases' in out_str))) |
def replace_with_separator(text, separator, regexs):
replacement = (('\\1' + separator) + '\\2')
result = text
for regex in regexs:
result = regex.sub(replacement, result)
return result |
def setup(old_style=False, target_package_name='returnn'):
print('Setup for importing RETURNN as framework/package.')
tmp_env_path_dir = tempfile.mkdtemp()
print('Temp dir:', tmp_env_path_dir)
if old_style:
print('Old-style setup!')
src_dir = _base_dir
else:
src_dir = ('%s/returnn' % _base_dir)
os.symlink(src_dir, ('%s/%s' % (tmp_env_path_dir, target_package_name)))
sys.path.insert(0, tmp_env_path_dir)
print(('Import %s module/package.' % target_package_name))
if (target_package_name == 'returnn'):
import returnn
else:
__import__(target_package_name)
print('Setup better_exchook.')
if (target_package_name == 'returnn'):
if old_style:
from returnn import better_exchook
better_exchook.install()
else:
from returnn.util import better_exchook
better_exchook.install()
else:
__import__(target_package_name).better_exchook.install() |
class LinearLR(_LRScheduler):
def __init__(self, optimizer, total_iter, last_epoch=(- 1)):
self.total_iter = total_iter
super(LinearLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
process = (self.last_epoch / self.total_iter)
weight = (1 - process)
return [(weight * group['initial_lr']) for group in self.optimizer.param_groups] |
def recurrent_net(net, cell_net, inputs, initial_cell_inputs, links, timestep=None, scope=None, outputs_with_grads=(0,), recompute_blobs_on_backward=None, forward_only=False):
assert (len(inputs) == 1), 'Only one input blob is supported so far'
input_blobs = [str(i[0]) for i in inputs]
initial_input_blobs = [str(x[1]) for x in initial_cell_inputs]
op_name = net.NextName('recurrent')
def s(name):
scope_name = (op_name if (scope is None) else scope)
return '{}/{}'.format(str(scope_name), str(name))
known_inputs = [str(b) for b in (input_blobs + initial_input_blobs)]
known_inputs += [str(x[0]) for x in initial_cell_inputs]
if (timestep is not None):
known_inputs.append(str(timestep))
references = [core.BlobReference(b) for b in cell_net.Proto().external_input if (b not in known_inputs)]
inner_outputs = list(cell_net.Proto().external_output)
inner_outputs_map = {o: (o + '_grad') for o in inner_outputs}
if (not forward_only):
(backward_ops, backward_mapping) = core.GradientRegistry.GetBackwardPass(cell_net.Proto().op, inner_outputs_map)
backward_mapping = {str(k): v for (k, v) in viewitems(backward_mapping)}
backward_cell_net = core.Net('RecurrentBackwardStep')
del backward_cell_net.Proto().op[:]
if (recompute_blobs_on_backward is not None):
recompute_blobs_on_backward = {str(b) for b in recompute_blobs_on_backward}
for op in cell_net.Proto().op:
if (not recompute_blobs_on_backward.isdisjoint(set(op.output))):
backward_cell_net.Proto().op.extend([op])
assert set(op.output).issubset(recompute_blobs_on_backward)
backward_cell_net.Proto().op.extend(backward_ops)
(backward_ssa, backward_blob_versions) = core.get_ssa(backward_cell_net.Proto())
undefined = core.get_undefined_blobs(backward_ssa)
(ssa, blob_versions) = core.get_ssa(cell_net.Proto())
scratches = [blob for (blob, ver) in viewitems(blob_versions) if ((ver > 0) and (blob in undefined) and (blob not in cell_net.Proto().external_output))]
backward_cell_net.Proto().external_input.extend(scratches)
backward_cell_net.Proto().type = 'simple'
else:
backward_cell_net = None
all_inputs = (([i[1] for i in inputs] + [x[1] for x in initial_cell_inputs]) + references)
all_outputs = []
cell_net.Proto().type = 'simple'
forward_links = []
backward_links = []
aliases = []
recurrent_states = []
for (cell_input, _) in initial_cell_inputs:
cell_input = str(cell_input)
state = s((cell_input + '_states'))
states_grad = (state + '_grad')
cell_output = links[str(cell_input)]
forward_links.append((cell_input, state, 0))
forward_links.append((cell_output, state, 1))
aliases.append((state, (cell_output + '_all'), 1))
aliases.append((state, (cell_output + '_last'), (- 1)))
all_outputs.extend([(cell_output + '_all'), (cell_output + '_last')])
recurrent_states.append(state)
if (backward_cell_net is not None):
backward_links.append(((cell_output + '_grad'), states_grad, 1))
backward_cell_net.Proto().external_input.append((str(cell_output) + '_grad'))
recurrent_input_grad = (cell_input + '_grad')
if (not backward_blob_versions.get(recurrent_input_grad, 0)):
backward_links.append((backward_mapping[cell_input], states_grad, 0))
else:
backward_links.append((recurrent_input_grad, states_grad, 0))
for (input_t, input_blob) in inputs:
forward_links.append((str(input_t), str(input_blob), 0))
if (backward_cell_net is not None):
for (input_t, input_blob) in inputs:
backward_links.append((backward_mapping[str(input_t)], (str(input_blob) + '_grad'), 0))
backward_cell_net.Proto().external_input.extend(cell_net.Proto().external_input)
backward_cell_net.Proto().external_input.extend(cell_net.Proto().external_output)
def unpack_triple(x):
if x:
(a, b, c) = zip(*x)
return (a, b, c)
return ([], [], [])
(link_internal, link_external, link_offset) = unpack_triple(forward_links)
(alias_src, alias_dst, alias_offset) = unpack_triple(aliases)
recurrent_inputs = [str(x[1]) for x in initial_cell_inputs]
if (backward_cell_net is not None):
proto = backward_cell_net.Proto()
operators = []
while (len(proto.op) > 0):
op = proto.op[(- 1)]
proto.op.remove(op)
operators.append(op)
for op in operators[::(- 1)]:
proto.op.extend([op])
for (j, output_blob) in enumerate(op.output):
if (output_blob in proto.external_input):
if (output_blob in op.input):
continue
output_blob = core.BlobReference(output_blob)
accum_blob = (output_blob + '_accum')
proto.op[(- 1)].output[j] = str(accum_blob)
backward_cell_net.Sum([output_blob, accum_blob], [output_blob])
def map_to_dual_list(m):
return ([str(x) for x in list(m.keys())] + [str(x) for x in list(m.values())])
backward_args = {}
if (backward_cell_net is not None):
backward_mapping_keys = set(viewkeys(backward_mapping))
(backward_link_internal, backward_link_external, backward_link_offset) = unpack_triple(backward_links)
params = [x for x in references if (x in backward_mapping_keys)]
param_grads = [str(backward_mapping[x]) for x in references if (x in backward_mapping_keys)]
if (recompute_blobs_on_backward is None):
recompute_blobs_on_backward = set()
backward_args = {'param': [all_inputs.index(p) for p in params], 'backward_link_internal': [str(l) for l in backward_link_internal], 'backward_link_external': [str(l) for l in backward_link_external], 'backward_link_offset': backward_link_offset, 'outputs_with_grads': outputs_with_grads, 'recompute_blobs_on_backward': [str(b) for b in recompute_blobs_on_backward], 'param_grads': param_grads}
if (len(backward_cell_net.Proto().op) != 0):
backward_args['backward_step_net'] = backward_cell_net.Proto()
results = net.RecurrentNetwork(all_inputs, (all_outputs + [s('step_workspaces')]), alias_src=alias_src, alias_dst=[str(a) for a in alias_dst], alias_offset=alias_offset, recurrent_states=recurrent_states, initial_recurrent_state_ids=[all_inputs.index(i) for i in recurrent_inputs], link_internal=[str(l) for l in link_internal], link_external=[str(l) for l in link_external], link_offset=link_offset, enable_rnn_executor=1, step_net=cell_net.Proto(), timestep=('timestep' if (timestep is None) else str(timestep)), **backward_args)
cell_net.Proto().type = 'simple'
return results[:(- 1)] |
def test_constructors():
(loss, (Nsig, poigen, poieval)) = create_loss()
ToyResult(poigen, poieval)
with pytest.raises(TypeError):
ToyResult(poigen, 'poieval')
with pytest.raises(TypeError):
ToyResult(poieval, poieval)
ToysManager(loss, Minuit()) |
def register_Ns3Ipv4PacketFilter_methods(root_module, cls):
cls.add_constructor([param('ns3::Ipv4PacketFilter const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('CheckProtocol', 'bool', [param('ns3::Ptr< ns3::QueueDiscItem >', 'item')], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoClassify', 'int32_t', [param('ns3::Ptr< ns3::QueueDiscItem >', 'item')], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
return |
class RDN(nn.Module):
def __init__(self, args):
super(RDN, self).__init__()
r = args.scale[0]
G0 = args.G0
kSize = args.RDNkSize
(self.D, C, G) = {'A': (20, 6, 32), 'B': (16, 8, 64)}[args.RDNconfig]
self.SFENet1 = nn.Conv2d(args.n_colors, G0, kSize, padding=((kSize - 1) // 2), stride=1)
self.SFENet2 = nn.Conv2d(G0, G0, kSize, padding=((kSize - 1) // 2), stride=1)
self.RDBs = nn.ModuleList()
for i in range(self.D):
self.RDBs.append(RDB(growRate0=G0, growRate=G, nConvLayers=C))
self.GFF = nn.Sequential(*[nn.Conv2d((self.D * G0), G0, 1, padding=0, stride=1), nn.Conv2d(G0, G0, kSize, padding=((kSize - 1) // 2), stride=1)])
if ((r == 2) or (r == 3)):
self.UPNet = nn.Sequential(*[nn.Conv2d(G0, ((G * r) * r), kSize, padding=((kSize - 1) // 2), stride=1), nn.PixelShuffle(r), nn.Conv2d(G, args.n_colors, kSize, padding=((kSize - 1) // 2), stride=1)])
elif (r == 4):
self.UPNet = nn.Sequential(*[nn.Conv2d(G0, (G * 4), kSize, padding=((kSize - 1) // 2), stride=1), nn.PixelShuffle(2), nn.Conv2d(G, (G * 4), kSize, padding=((kSize - 1) // 2), stride=1), nn.PixelShuffle(2), nn.Conv2d(G, args.n_colors, kSize, padding=((kSize - 1) // 2), stride=1)])
else:
raise ValueError('scale must be 2 or 3 or 4.')
def forward(self, x):
f__1 = self.SFENet1(x)
x = self.SFENet2(f__1)
RDBs_out = []
for i in range(self.D):
x = self.RDBs[i](x)
RDBs_out.append(x)
x = self.GFF(torch.cat(RDBs_out, 1))
x += f__1
return self.UPNet(x) |
def volwrite(uri, im, format=None, **kwargs):
imt = type(im)
im = np.asanyarray(im)
if (not np.issubdtype(im.dtype, np.number)):
raise ValueError('Image is not numeric, but {}.'.format(imt.__name__))
elif (im.ndim == 3):
pass
elif ((im.ndim == 4) and (im.shape[3] < 32)):
pass
else:
raise ValueError('Image must be 3D, or 4D if each voxel is a tuple.')
writer = get_writer(uri, format, 'v', **kwargs)
with writer:
writer.append_data(im)
return writer.request.get_result() |
def process_mr_l3cube(paths, short_name):
base_output_path = paths['NER_DATA_DIR']
in_directory = os.path.join(paths['NERBASE'], 'marathi', 'MarathiNLP', 'L3Cube-MahaNER', 'IOB')
input_files = ['train_iob.txt', 'valid_iob.txt', 'test_iob.txt']
input_files = [os.path.join(in_directory, x) for x in input_files]
for input_file in input_files:
if (not os.path.exists(input_file)):
raise FileNotFoundError(('Could not find the expected piece of the l3cube dataset %s' % input_file))
datasets = [convert_mr_l3cube.convert(input_file) for input_file in input_files]
write_dataset(datasets, base_output_path, short_name) |
class BaseModelOutputWithNoAttention(ModelOutput):
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.