code stringlengths 281 23.7M |
|---|
.skipif("sys.platform != 'win32'")
def test_path_to_url_win() -> None:
assert (path_to_url('c:/tmp/file') == 'file:///c:/tmp/file')
assert (path_to_url('c:\\tmp\\file') == 'file:///c:/tmp/file')
assert (path_to_url('\\\\unc\\as\\path') == 'file://unc/as/path')
path = (Path('.') / 'file')
assert (path_to_url('file') == ('file:///' + path.absolute().as_posix())) |
def check_data(args):
source_data_dir = os.path.expanduser('~/Data/AI2thor_offline_data_2.0.2/')
scene_data_dir = args.data_dir
if (os.path.exists(scene_data_dir) and os.listdir(scene_data_dir) and (len(filecmp.dircmp(source_data_dir, scene_data_dir, ignore=['images.hdf5', 'metadata.json']).left_only) == 0)):
print('Scene Data Exists!')
else:
print("Start Copying Dataset to SSD 'tmp' ...")
if os.path.exists(scene_data_dir):
os.removedirs(scene_data_dir)
shutil.copytree(source_data_dir, scene_data_dir, ignore=shutil.ignore_patterns('images.hdf5', 'metadata.json', 'depth.json', 'instance_segmentation.json'))
print('Copy Done!') |
class FileServingWorker(qc.QObject):
def __init__(self, dirname, *args, **kwargs):
super(FileServingWorker, self).__init__(*args, **kwargs)
self.dirname = dirname
self. = None
(int)
def run(self, port):
server_address = ('', port)
self. = RootedHTTPServer(self.dirname, server_address, RootedHTTPRequestHandler)
sa = self.
logger.debug(('Serving on port %s' % sa[1]))
self.
()
def stop(self):
if self.
logger.debug('Shutdown server')
self.
self. |
class MetaFormerBlock(nn.Module):
def __init__(self, dim, token_mixer=nn.Identity, mlp=Mlp, norm_layer=nn.LayerNorm, drop=0.0, drop_path=0.0, layer_scale_init_value=None, res_scale_init_value=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.token_mixer = token_mixer(dim=dim, drop=drop)
self.drop_path1 = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.layer_scale1 = (Scale(dim=dim, init_value=layer_scale_init_value) if layer_scale_init_value else nn.Identity())
self.res_scale1 = (Scale(dim=dim, init_value=res_scale_init_value) if res_scale_init_value else nn.Identity())
self.norm2 = norm_layer(dim)
self.mlp = mlp(dim=dim, drop=drop)
self.drop_path2 = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.layer_scale2 = (Scale(dim=dim, init_value=layer_scale_init_value) if layer_scale_init_value else nn.Identity())
self.res_scale2 = (Scale(dim=dim, init_value=res_scale_init_value) if res_scale_init_value else nn.Identity())
def forward(self, x):
x = (self.res_scale1(x) + self.layer_scale1(self.drop_path1(self.token_mixer(self.norm1(x)))))
x = (self.res_scale2(x) + self.layer_scale2(self.drop_path2(self.mlp(self.norm2(x)))))
return x |
_serializable
class TFRegNetMainLayer(tf.keras.layers.Layer):
config_class = RegNetConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.config = config
self.embedder = TFRegNetEmbeddings(config, name='embedder')
self.encoder = TFRegNetEncoder(config, name='encoder')
self.pooler = tf.keras.layers.GlobalAveragePooling2D(keepdims=True, name='pooler')
_inputs
def call(self, pixel_values: tf.Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> TFBaseModelOutputWithPoolingAndNoAttention:
output_hidden_states = (output_hidden_states if (output_hidden_states is not None) else self.config.output_hidden_states)
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
embedding_output = self.embedder(pixel_values, training=training)
encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
last_hidden_state = encoder_outputs[0]
pooled_output = self.pooler(last_hidden_state)
pooled_output = tf.transpose(pooled_output, perm=(0, 3, 1, 2))
last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))
if output_hidden_states:
hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if (not return_dict):
return ((last_hidden_state, pooled_output) + encoder_outputs[1:])
return TFBaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=(hidden_states if output_hidden_states else encoder_outputs.hidden_states)) |
class Everything():
class AnIntEnum(IntEnum):
A = 1
class AStringEnum(str, Enum):
A = 'a'
string: str
bytes: bytes
an_int: int
a_float: float
a_dict: Dict[(str, int)]
a_list: List[int]
a_homogenous_tuple: TupleSubscriptable[(int, ...)]
a_hetero_tuple: TupleSubscriptable[(str, int, float)]
a_counter: Counter[str]
a_mapping: Mapping[(int, float)]
a_mutable_mapping: MutableMapping[(float, str)]
a_sequence: Sequence[float]
a_mutable_sequence: MutableSequence[str]
a_set: Set[float]
a_mutable_set: MutableSet[int]
a_frozenset: FrozenSet[str]
an_int_enum: AnIntEnum
a_str_enum: AStringEnum
a_datetime: datetime
a_date: date
a_string_enum_dict: Dict[(AStringEnum, int)]
a_bytes_dict: Dict[(bytes, bytes)]
native_union: Union[(int, float, str)]
native_union_with_spillover: Union[(int, str, Set[str])]
native_union_with_union_spillover: Union[(int, str, A, B)] |
def inference_detector(model, img, scores_thr=0.3, augment=False, half=False, merge=False):
cfg = model.cfg
device = next(model.parameters()).device
test_pipeline = ([LoadImage()] + cfg.data.test.pipeline[1:])
test_pipeline = Compose(test_pipeline)
data = dict(img=img)
data = test_pipeline(data)
img = data['img']
half = ((device.type != 'cpu') and half)
if (len(img.shape) == 3):
if isinstance(img, np.ndarray):
img = torch.from_numpy(img.transpose(2, 0, 1)).unsqueeze(0)
elif isinstance(img, torch.Tensor):
img = img.unsqueeze(0)
elif (len(img.shape) == 4):
if isinstance(img, np.ndarray):
img = torch.from_numpy(img.transpose(0, 3, 1, 2))
img = (img.to(device).half() if half else img.to(device).float())
data['img'] = img
assert isinstance(scores_thr, float), 'scores_thr must float'
if ((scores_thr > 1) or (scores_thr < 0)):
raise Exception('scores_thr must >=0 or <=1')
data['scores_thr'] = scores_thr
if augment:
data['augment'] = augment
if merge:
data['merge'] = merge
if half:
model.half()
with torch.no_grad():
t1 = torch_utils.time_synchronized()
result = model(return_loss=False, rescale=True, **data)
t2 = torch_utils.time_synchronized()
return (result, (t2 - t1)) |
class DistInfoPkg(OnSysPath, SiteBuilder):
files: FilesSpec = {'distinfo_pkg-1.0.0.dist-info': {'METADATA': "\n Name: distinfo-pkg\n Author: Steven Ma\n Version: 1.0.0\n Requires-Dist: wheel >= 1.0\n Requires-Dist: pytest; extra == 'test'\n Keywords: sample package\n\n Once upon a time\n There was a distinfo pkg\n ", 'RECORD': 'mod.py,sha256=abc,20\n', 'entry_points.txt': '\n [entries]\n main = mod:main\n ns:sub = mod:main\n '}, 'mod.py': '\n def main():\n print("hello world")\n '}
def make_uppercase(self):
shutil.rmtree((self.site_dir / 'distinfo_pkg-1.0.0.dist-info'))
files = copy.deepcopy(DistInfoPkg.files)
info = files['distinfo_pkg-1.0.0.dist-info']
info['METADATA'] = info['METADATA'].upper()
build_files(files, self.site_dir) |
class BartTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, merges_file, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, **kwargs):
bos_token = (AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token)
eos_token = (AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token)
sep_token = (AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token)
cls_token = (AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token)
unk_token = (AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token)
pad_token = (AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token)
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
super().__init__(errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs)
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.errors = errors
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
bpe_merges = merges_handle.read().split('\n')[1:(- 1)]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+")
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if (not pairs):
return token
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
text = ''.join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
merge_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']))
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
index = token_index
writer.write((' '.join(bpe_tokens) + '\n'))
index += 1
return (vocab_file, merge_file)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return (((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is None):
return (([1] + ([0] * len(token_ids_0))) + [1])
return (((([1] + ([0] * len(token_ids_0))) + [1, 1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space)
if ((is_split_into_words or add_prefix_space) and ((len(text) > 0) and (not text[0].isspace()))):
text = (' ' + text)
return (text, kwargs) |
class GenerateCode(Command):
name = commands.COMMAND_GENERATE_CODE
kind: CodeActionKind = 'quickfix'
document_uri: DocumentUri
position: typing.Range
generate_kind: str
def validate(self, info):
generate.create_generate(kind=self.generate_kind, project=self.project, resource=info.resource, offset=info.current_document.offset_at_position(self.position))
def get_changes(self):
(current_document, resource) = get_resource(self.workspace, self.document_uri)
refactoring = generate.create_generate(kind=self.generate_kind, project=self.project, resource=resource, offset=current_document.offset_at_position(self.position))
rope_changeset = refactoring.get_changes()
return rope_changeset
def get_code_actions(cls, workspace, document, position):
return {f'Generate {generate_kind}': cls(workspace, document_uri=document.uri, position=position, generate_kind=generate_kind) for generate_kind in ['variable', 'function', 'class', 'module', 'package']} |
_module()
class DefaultFormatBundle(object):
def __call__(self, results):
if ('img' in results):
img = results['img']
results = self._add_default_meta_keys(results)
if (len(img.shape) < 3):
img = np.expand_dims(img, (- 1))
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['img'] = DC(to_tensor(img), stack=True)
for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:
if (key not in results):
continue
results[key] = DC(to_tensor(results[key]))
if ('gt_masks' in results):
results['gt_masks'] = DC(results['gt_masks'], cpu_only=True)
if ('gt_semantic_seg' in results):
results['gt_semantic_seg'] = DC(to_tensor(results['gt_semantic_seg'][(None, ...)]), stack=True)
return results
def _add_default_meta_keys(self, results):
img = results['img']
results.setdefault('pad_shape', img.shape)
results.setdefault('scale_factor', 1.0)
num_channels = (1 if (len(img.shape) < 3) else img.shape[2])
results.setdefault('img_norm_cfg', dict(mean=np.zeros(num_channels, dtype=np.float32), std=np.ones(num_channels, dtype=np.float32), to_rgb=False))
return results
def __repr__(self):
return self.__class__.__name__ |
.requires_user_action
class EVENT_MOUSE_ENTER_LEAVE(InteractiveTestCase):
def on_mouse_enter(self, x, y):
print(('Entered at %f, %f' % (x, y)))
def on_mouse_leave(self, x, y):
print(('Left at %f, %f' % (x, y)))
def test_motion(self):
w = Window(200, 200)
try:
w.push_handlers(self)
while (not w.has_exit):
w.dispatch_events()
finally:
w.close()
self.user_verify('Pass test?', take_screenshot=False) |
def clean_check_command(f):
(f)
def inner(ctx, key, db, full_report, stdin, files, cache, ignore, ignore_unpinned_requirements, output, json, html, bare, proxy_protocol, proxy_host, proxy_port, exit_code, policy_file, save_json, save_html, audit_and_monitor, project, apply_remediations, auto_remediation_limit, no_prompt, json_version, *args, **kwargs):
if ((ctx.get_parameter_source('json_version') != click.core.ParameterSource.DEFAULT) and (not (save_json or json or (output == 'json')))):
raise click.UsageError(f'Illegal usage: `--json-version` only works with JSON related outputs.')
try:
proxy_dictionary = get_proxy_dict(proxy_protocol, proxy_host, proxy_port)
if (ctx.get_parameter_source('apply_remediations') != click.core.ParameterSource.DEFAULT):
if (not key):
raise InvalidKeyError(message='The --apply-security-updates option needs an API-KEY. See {link}.')
if (not files):
raise SafetyError(message='--apply-security-updates only works with files; use the "-r" option to specify files to remediate.')
auto_remediation_limit = get_fix_options(policy_file, auto_remediation_limit)
(policy_file, server_audit_and_monitor) = safety.get_server_policies(key=key, policy_file=policy_file, proxy_dictionary=proxy_dictionary)
audit_and_monitor = (audit_and_monitor and server_audit_and_monitor)
except SafetyError as e:
LOG.exception('Expected SafetyError happened: %s', e)
output_exception(e, exit_code_output=exit_code)
except Exception as e:
LOG.exception('Unexpected Exception happened: %s', e)
exception = (e if isinstance(e, SafetyException) else SafetyException(info=e))
output_exception(exception, exit_code_output=exit_code)
return f(ctx, key, db, full_report, stdin, files, cache, ignore, ignore_unpinned_requirements, output, json, html, bare, proxy_protocol, proxy_host, proxy_port, exit_code, policy_file, audit_and_monitor, project, save_json, save_html, apply_remediations, auto_remediation_limit, no_prompt, json_version, *args, **kwargs)
return inner |
class FontBase(BaseType):
default_family: Optional[str] = None
default_size: Optional[str] = None
font_regex = re.compile('\n (\n (\n # style\n (?P<style>normal|italic|oblique) |\n # weight (named | 100..900)\n (\n (?P<weight>[]00) |\n (?P<namedweight>normal|bold)\n ) |\n # size (<float>pt | <int>px)\n (?P<size>[0-9]+((\\.[0-9]+)?[pP][tT]|[pP][xX])|default_size)\n )\\ # size/weight/style are space-separated\n )* # 0-inf size/weight/style tags\n (?P<family>.+) # mandatory font family', re.VERBOSE)
def set_defaults(cls, default_family: ListType[str], default_size: str) -> None:
if default_family:
families = configutils.FontFamilies(default_family)
else:
families = configutils.FontFamilies.from_system_default()
cls.default_family = families.to_str(quote=True)
cls.default_size = default_size
def to_py(self, value: Any) -> Any:
raise NotImplementedError |
def get(fn: str, add_ver: bool=False, unquote: bool=False, do_strip: bool=False, do_readme: bool=False) -> str:
with open(fn) as f:
text = f.read()
if add_ver:
text = ver(text)
if unquote:
text = text.replace('%', '%%')
if do_strip:
text = ''.join((line for line in text.splitlines(keepends=True) if (not line.endswith('# --STRIP DURING BUILD\n'))))
if do_readme:
text = text.replace('', get('README.md'))
return text |
class FortConsumer(MySSH):
def __init__(self, *args, **kwargs):
super(FortConsumer, self).__init__(*args, **kwargs)
self.fort_server = ServerAssets.objects.select_related('assets').get(id=self.scope['path'].split('/')[3])
self.fort_user = FortServerUser.objects.get(id=self.scope['path'].split('/')[4])
self.ip = self.fort_server.assets.asset_management_ip
self.port = self.fort_server.port
self.username = self.fort_user.fort_username
self.password = self.fort_user.fort_password |
class RGAlbum():
def __init__(self, rg_songs, process_mode):
self.songs = rg_songs
self.gain = None
self.peak = None
self.__should_process = None
self.__process_mode = process_mode
def progress(self):
all_ = 0.0
done = 0.0
for song in self.songs:
all_ += song.length
done += (song.length * song.progress)
try:
return max(min((done / all_), 1.0), 0.0)
except ZeroDivisionError:
return 0.0
def done(self):
for song in self.songs:
if (not song.done):
return False
return True
def title(self):
if (not self.songs):
return ''
if (not any((rgs.song('album') for rgs in self.songs))):
return ('(%s)' % EMPTY)
return self.songs[0].song.comma('~artist~album')
def error(self):
for song in self.songs:
if song.error:
return True
return False
def write(self):
if (not self.done):
return
for song in self.songs:
song._write(self.gain, self.peak)
def from_songs(cls, songs, process_mode=UpdateMode.ALWAYS):
return RGAlbum([RGSong(s) for s in songs], process_mode)
_property
def should_process(self):
mode = self.__process_mode
if (mode == UpdateMode.ALWAYS):
return True
elif (mode == UpdateMode.ANY_MISSING):
return (not all((s.has_all_rg_tags for s in self.songs)))
elif (mode == UpdateMode.ALBUM_MISSING):
return (not all((s.album_gain for s in self.songs)))
else:
print_w(('Invalid setting for update mode: ' + mode))
return True |
class ItemList():
def __init__(self):
self._items = []
def __len__(self):
return len(self._items)
def __getitem__(self, index):
return self._items[index]
def __iter__(self):
return iter(self._items)
def __repr__(self):
return f"<List {', '.join((it.__str__() for it in self))}>"
def __lt__(self, other):
return ('/'.join((i.name for i in self)) < '/'.join((i.name for i in other)))
def name_id(self):
return {it.name: it.id for it in self}
def all_name(self):
return [it.name for it in self]
def append(self, item):
self._items.append(item)
def index(self, item):
return self._items.index(item)
def insert(self, pos, item):
self._items.insert(pos, item)
def clear(self):
self._items.clear()
def filter(self, condition) -> list:
return [it for it in self if condition(it)]
def find_by_name(self, name: str):
for item in self:
if (name == item.name):
return item
return None
def find_by_id(self, fid: int):
for item in self:
if (fid == item.id):
return item
return None
def pop_by_id(self, fid):
for item in self:
if (item.id == fid):
self._items.remove(item)
return item
return None
def update_by_id(self, fid, **kwargs):
item = self.find_by_id(fid)
pos = self.index(item)
data = item._asdict()
data.update(kwargs)
self._items[pos] = item.__class__(**data) |
def main(args):
device = torch.device('cuda')
num_classes = (184 if (args.dataset == 'coco') else 179)
num_o = (8 if (args.dataset == 'coco') else 31)
args.num_img = (1 if (args.dataset == 'vg') else 5)
args.model_path = args.model_path.format(args.load_eopch)
args.sample_path = args.sample_path.format(args.load_eopch)
if (args.num_img > 1):
args.sample_path += '_{}'.format(args.num_img)
dataloader = get_dataloader(args.dataset)
netG = graph_aware_generator(num_classes=num_classes, output_dim=3).to(device)
if (not os.path.isfile(args.model_path)):
return
state_dict = torch.load(args.model_path)
new_state_dict = OrderedDict()
for (k, v) in state_dict.items():
name = k[7:]
new_state_dict[name] = v
model_dict = netG.state_dict()
pretrained_dict = {k: v for (k, v) in new_state_dict.items() if (k in model_dict)}
model_dict.update(pretrained_dict)
netG.load_state_dict(model_dict)
netG.cuda()
netG.eval()
if (not os.path.exists(args.sample_path)):
os.makedirs(args.sample_path)
thres = 2.0
with tqdm(total=(dataloader.__len__() * args.num_img)) as pbar:
for (idx, data) in enumerate(dataloader):
(real_images, label, bbox) = data
(real_images, label, bbox) = (real_images.cuda(), label.long().unsqueeze((- 1)).cuda(), bbox.float())
for j in range(args.num_img):
z_obj = torch.from_numpy(truncted_random(num_o=num_o, thres=thres)).float().cuda()
z_im = torch.from_numpy(truncted_random(num_o=1, thres=thres)).view(1, (- 1)).float().cuda()
fake_images = netG.forward(z_obj, bbox, z_im, label.squeeze(dim=(- 1)))
fake_images_uint = img_as_ubyte(((fake_images[0].cpu().detach().numpy().transpose(1, 2, 0) * 0.5) + 0.5))
imageio.imwrite('{save_path}/sample_{idx}_numb_{numb}.jpg'.format(save_path=args.sample_path, idx=idx, numb=j), fake_images_uint)
pbar.update(1) |
class LibrsyncTest(unittest.TestCase):
basis = rpath.RPath(Globals.local_connection, os.path.join(abs_test_dir, b'basis'))
new = rpath.RPath(Globals.local_connection, os.path.join(abs_test_dir, b'new'))
new2 = rpath.RPath(Globals.local_connection, os.path.join(abs_test_dir, b'new2'))
sig = rpath.RPath(Globals.local_connection, os.path.join(abs_test_dir, b'signature'))
sig2 = rpath.RPath(Globals.local_connection, os.path.join(abs_test_dir, b'signature2'))
delta = rpath.RPath(Globals.local_connection, os.path.join(abs_test_dir, b'delta'))
def sig_file_test_helper(self, blocksize, iterations, file_len=None):
for i in range(iterations):
MakeRandomFile(self.basis.path, file_len)
self._clean_file(self.sig)
rdiff_help_text = subprocess.check_output(['rdiff', '--help'])
if (b'-R' in rdiff_help_text):
self.assertEqual(os_system((b'rdiff', b'-b', (b'%i' % blocksize), b'-R', b'rollsum', b'-S', b'8', b'-H', b'md4', b'signature', self.basis.path, self.sig.path)), 0)
elif (b'-H' in rdiff_help_text):
self.assertEqual(os_system((b'rdiff', b'-b', (b'%i' % blocksize), b'-H', b'md4', b'signature', self.basis.path, self.sig.path)), 0)
else:
self.assertEqual(os_system((b'rdiff', b'-b', (b'%i' % blocksize), b'signature', self.basis.path, self.sig.path)), 0)
with self.sig.open('rb') as fp:
rdiff_sig = fp.read()
sf = librsync.SigFile(self.basis.open('rb'), blocksize)
librsync_sig = sf.read()
sf.close()
self.assertEqual(rdiff_sig, librsync_sig)
def _clean_file(self, rp):
rp.setdata()
if rp.lstat():
rp.delete()
def testSigFile(self):
self.sig_file_test_helper(512, 5)
def testSigFile2(self):
self.sig_file_test_helper(2048, 1, 60000)
self.sig_file_test_helper(7168, 1, 6000)
self.sig_file_test_helper(204800, 1, ((40 * 1024) * 1024))
def testSigGenerator(self):
for i in range(5):
MakeRandomFile(self.basis.path)
sf = librsync.SigFile(self.basis.open('rb'))
sigfile_string = sf.read()
sf.close()
sig_gen = librsync.SigGenerator()
with self.basis.open('rb') as infile:
while 1:
buf = infile.read(1000)
if (not buf):
break
sig_gen.update(buf)
siggen_string = sig_gen.get_sig()
self.assertEqual(sigfile_string, siggen_string)
def OldtestDelta(self):
MakeRandomFile(self.basis.path)
self.assertEqual(os_system((b'rdiff', b'signature', self.basis.path, self.sig.path)), 0)
for i in range(5):
MakeRandomFile(self.new.path)
self.assertEqual(os_system((b'rdiff', b'delta', self.sig.path, self.new.path, self.delta.path)), 0)
fp = self.delta.open('rb')
rdiff_delta = fp.read()
fp.close()
df = librsync.DeltaFile(self.sig.open('rb'), self.new.open('rb'))
librsync_delta = df.read()
df.close()
print(len(rdiff_delta), len(librsync_delta))
print(repr(rdiff_delta[:100]))
print(repr(librsync_delta[:100]))
self.assertEqual(rdiff_delta, librsync_delta)
def testDelta(self):
MakeRandomFile(self.basis.path)
self._clean_file(self.sig)
self.assertEqual(os_system((b'rdiff', b'signature', self.basis.path, self.sig.path)), 0)
for i in range(5):
MakeRandomFile(self.new.path)
df = librsync.DeltaFile(self.sig.open('rb'), self.new.open('rb'))
librsync_delta = df.read()
df.close()
fp = self.delta.open('wb')
fp.write(librsync_delta)
fp.close()
self._clean_file(self.new2)
self.assertEqual(os_system((b'rdiff', b'patch', self.basis.path, self.delta.path, self.new2.path)), 0)
new_fp = self.new.open('rb')
new = new_fp.read()
new_fp.close()
new2_fp = self.new2.open('rb')
new2 = new2_fp.read()
new2_fp.close()
self.assertEqual(new, new2)
def testPatch(self):
MakeRandomFile(self.basis.path)
self._clean_file(self.sig)
self.assertEqual(os_system((b'rdiff', b'signature', self.basis.path, self.sig.path)), 0)
for i in range(5):
MakeRandomFile(self.new.path)
self._clean_file(self.delta)
self.assertEqual(os_system((b'rdiff', b'delta', self.sig.path, self.new.path, self.delta.path)), 0)
fp = self.new.open('rb')
real_new = fp.read()
fp.close()
pf = librsync.PatchedFile(self.basis.open('rb'), self.delta.open('rb'))
librsync_new = pf.read()
pf.close()
self.assertEqual(real_new, librsync_new) |
class ActionCompleteTest(unittest.TestCase):
def test_action_complete(self):
self.assertEqual(comtst.rdiff_backup_action(True, True, None, None, ('--api-version', '201'), b'complete', ()), Globals.RET_CODE_ERR)
self.assertEqual(comtst.rdiff_backup_action(True, True, None, None, ('--api-version', '201'), b'complete', ('--cword', '1', '--', 'rdiff-backup')), Globals.RET_CODE_ERR)
self.assertEqual(comtst.rdiff_backup_action(True, True, None, None, ('--api-version', '201'), b'complete', ('--cword', '1', '--', 'rdiff-backup', '--verb'), return_stdout=True), b'--verbosity\n')
self.assertEqual(comtst.rdiff_backup_action(True, True, None, None, ('--api-version', '201'), b'complete', ('--cword', '2', '--', 'rdiff-backup', '--verbosity', ''), return_stdout=True), (os.fsencode('\n'.join(map(str, range(0, 10)))) + b'\n'))
self.assertEqual(comtst.rdiff_backup_action(True, True, None, None, ('--api-version', '201'), b'complete', ('--cword', '1', '--', 'rdiff-backup', '--verbosity', '5'), return_stdout=True), b'--verbosity\n')
self.assertEqual(comtst.rdiff_backup_action(True, True, None, None, ('--api-version', '201'), b'complete', ('--cword', '2', '--', 'rdiff-backup', 'backup', 'D'), return_stdout=True), b'::file::\n')
full_output = comtst.rdiff_backup_action(True, True, None, None, ('--api-version', '201'), b'complete', ('--cword', '2', '--', 'rdiff-backup', 'backup', ''), return_stdout=True)
self.assertTrue(full_output.startswith(b'--'))
self.assertTrue(full_output.endswith(b'::file::\n'))
full_output = comtst.rdiff_backup_action(True, True, None, None, ('--api-version', '201'), b'complete', ('--cword', '1', '--', 'rdiff-backup', ''), return_stdout=True)
self.assertTrue(full_output.startswith(b'-V'))
self.assertTrue(full_output.endswith(b'--version\n'))
self.assertIn(b'complete\n', full_output)
self.assertIn(b'backup\n', full_output)
full_output = comtst.rdiff_backup_action(True, True, None, None, ('--api-version', '201'), b'complete', ('--cword', '1', '--', 'rdiff-backup', '', 'complete'), return_stdout=True)
self.assertTrue(full_output.startswith(b'-V'))
self.assertTrue(full_output.endswith(b'--version\n'))
self.assertNotIn(b'complete\n', full_output)
self.assertNotIn(b'backup\n', full_output)
self.assertEqual(comtst.rdiff_backup_action(True, True, None, None, ('--api-version', '201'), b'complete', ('--cword', '3', '--', 'rdiff-backup', 'backup', '--user-mapping-file', ''), return_stdout=True), b'::file::\n') |
class MobileNet(nn.Module):
def __init__(self, channels, first_stage_stride, in_channels=3, in_size=(224, 224), num_classes=1000):
super(MobileNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
init_block_channels = channels[0][0]
self.features.add_module('init_block', conv3x3_block(in_channels=in_channels, out_channels=init_block_channels, stride=2))
in_channels = init_block_channels
for (i, channels_per_stage) in enumerate(channels[1:]):
stage = nn.Sequential()
for (j, out_channels) in enumerate(channels_per_stage):
stride = (2 if ((j == 0) and ((i != 0) or first_stage_stride)) else 1)
stage.add_module('unit{}'.format((j + 1)), DwsConvBlock(in_channels=in_channels, out_channels=out_channels, stride=stride))
in_channels = out_channels
self.features.add_module('stage{}'.format((i + 1)), stage)
self.features.add_module('final_pool', nn.AvgPool2d(kernel_size=7, stride=1))
self.output = nn.Linear(in_features=in_channels, out_features=num_classes)
self._init_params()
def _init_params(self):
for (name, module) in self.named_modules():
if ('dw_conv.conv' in name):
init.kaiming_normal_(module.weight, mode='fan_in')
elif ((name == 'init_block.conv') or ('pw_conv.conv' in name)):
init.kaiming_normal_(module.weight, mode='fan_out')
elif ('bn' in name):
init.constant_(module.weight, 1)
init.constant_(module.bias, 0)
elif ('output' in name):
init.kaiming_normal_(module.weight, mode='fan_out')
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.output(x)
return x |
class Picture(MetadataBlock):
code = 6
_distrust_size = True
def __init__(self, data=None):
self.type = 0
self.mime = u''
self.desc = u''
self.width = 0
self.height = 0
self.depth = 0
self.colors = 0
self.data = b''
super(Picture, self).__init__(data)
def __eq__(self, other):
try:
return ((self.type == other.type) and (self.mime == other.mime) and (self.desc == other.desc) and (self.width == other.width) and (self.height == other.height) and (self.depth == other.depth) and (self.colors == other.colors) and (self.data == other.data))
except (AttributeError, TypeError):
return False
__hash__ = MetadataBlock.__hash__
def load(self, data):
(self.type, length) = struct.unpack('>2I', data.read(8))
self.mime = data.read(length).decode('UTF-8', 'replace')
(length,) = struct.unpack('>I', data.read(4))
self.desc = data.read(length).decode('UTF-8', 'replace')
(self.width, self.height, self.depth, self.colors, length) = struct.unpack('>5I', data.read(20))
self.data = data.read(length)
def write(self):
f = BytesIO()
mime = self.mime.encode('UTF-8')
f.write(struct.pack('>2I', self.type, len(mime)))
f.write(mime)
desc = self.desc.encode('UTF-8')
f.write(struct.pack('>I', len(desc)))
f.write(desc)
f.write(struct.pack('>5I', self.width, self.height, self.depth, self.colors, len(self.data)))
f.write(self.data)
return f.getvalue()
def __repr__(self):
return ("<%s '%s' (%d bytes)>" % (type(self).__name__, self.mime, len(self.data))) |
def get_most_confident_predictions(predictions_voc):
memo = {}
for (i, p) in enumerate(predictions_voc):
img_id = p['image_id']
p['id'] = i
p['area'] = int((p['bbox'][2] * p['bbox'][3]))
if ((img_id not in memo) or (memo[img_id]['score'] < p['score'])):
memo[img_id] = p
return list(memo.values()) |
class HatchVersionConfig(Generic[PluginManagerBound]):
def __init__(self, root: str, config: dict[(str, Any)], plugin_manager: PluginManagerBound) -> None:
self.root = root
self.config = config
self.plugin_manager = plugin_manager
self._cached: (str | None) = None
self._source_name: (str | None) = None
self._scheme_name: (str | None) = None
self._source: (VersionSourceInterface | None) = None
self._scheme: (VersionSchemeInterface | None) = None
def cached(self) -> str:
if (self._cached is None):
try:
self._cached = self.source.get_version_data()['version']
except Exception as e:
message = f'Error getting the version from source `{self.source.PLUGIN_NAME}`: {e}'
raise type(e)(message) from None
return self._cached
def source_name(self) -> str:
if (self._source_name is None):
source: str = self.config.get('source', 'regex')
if (not source):
message = 'The `source` option under the `tool.hatch.version` table must not be empty if defined'
raise ValueError(message)
if (not isinstance(source, str)):
message = 'Field `tool.hatch.version.source` must be a string'
raise TypeError(message)
self._source_name = source
return self._source_name
def scheme_name(self) -> str:
if (self._scheme_name is None):
scheme: str = self.config.get('scheme', 'standard')
if (not scheme):
message = 'The `scheme` option under the `tool.hatch.version` table must not be empty if defined'
raise ValueError(message)
if (not isinstance(scheme, str)):
message = 'Field `tool.hatch.version.scheme` must be a string'
raise TypeError(message)
self._scheme_name = scheme
return self._scheme_name
def source(self) -> VersionSourceInterface:
if (self._source is None):
from copy import deepcopy
source_name = self.source_name
version_source = self.plugin_manager.version_source.get(source_name)
if (version_source is None):
from hatchling.plugin.exceptions import UnknownPluginError
message = f'Unknown version source: {source_name}'
raise UnknownPluginError(message)
self._source = version_source(self.root, deepcopy(self.config))
return self._source
def scheme(self) -> VersionSchemeInterface:
if (self._scheme is None):
from copy import deepcopy
scheme_name = self.scheme_name
version_scheme = self.plugin_manager.version_scheme.get(scheme_name)
if (version_scheme is None):
from hatchling.plugin.exceptions import UnknownPluginError
message = f'Unknown version scheme: {scheme_name}'
raise UnknownPluginError(message)
self._scheme = version_scheme(self.root, deepcopy(self.config))
return self._scheme |
class TestEdit():
def parse_config(filename):
parser = configparser.ConfigParser()
with open(filename, encoding='utf-8') as reader:
parser.read_file(reader)
return parser
def write_text(file, content):
with open(file, 'wb') as strm:
strm.write(content.encode('utf-8'))
def test_utf8_encoding_retained(self, tmpdir):
config = tmpdir.join('setup.cfg')
self.write_text(str(config), '[names]\njaraco=')
setopt.edit_config(str(config), dict(names=dict(other='yes')))
parser = self.parse_config(str(config))
assert (parser.get('names', 'jaraco') == '')
assert (parser.get('names', 'other') == 'yes')
def test_case_retained(self, tmpdir):
config = tmpdir.join('setup.cfg')
self.write_text(str(config), '[names]\nFoO=bAr')
setopt.edit_config(str(config), dict(names=dict(oTher='yes')))
actual = config.read_text(encoding='ascii')
assert ('FoO' in actual)
assert ('oTher' in actual) |
def base_units_convert(q_format: str, a_format: str) -> QuizEntry:
unit = random.choice(list(UNITS_TO_BASE_UNITS))
question = q_format.format(((unit + ' ') + UNITS_TO_BASE_UNITS[unit][0]))
answer = a_format.format(UNITS_TO_BASE_UNITS[unit][1])
return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE) |
def test_very_deep_dag():
class Inner(Component):
def construct(s):
s.in_ = InPort(Bits32)
s.out = OutPort(Bits32)
def up():
s.out = (s.in_ + 1)
def done(s):
return True
def line_trace(s):
return '{} > {}'.format(s.a, s.b)
class Top(Component):
def construct(s, N=2000):
s.inners = [Inner() for i in range(N)]
for i in range((N - 1)):
s.inners[i].out //= s.inners[(i + 1)].in_
s.out = OutPort(Bits32)
_ff
def ff():
if s.reset:
s.out <<= 0
else:
s.out <<= (s.out + s.inners[(N - 1)].out)
def line_trace(s):
return ((str(s.inners[(- 1)].out) + ' ') + str(s.out))
N = 2000
A = Top(N)
A.apply(UnrollSim())
A.sim_reset()
T = 0
while (T < 5):
assert (A.out == (T * N))
A.sim_tick()
T += 1
return A |
def infer_output_norm(module, output_norm=None):
if (output_norm == module.output_norm()):
return (None, NoOp())
if ((output_norm is None) and (module.output_norm() is not None)):
logger = logging.getLogger('infer_output_norm()')
logger.warning((('trying to set output_norm ({}) '.format(output_norm) + 'but got module.output_norm() ({}), '.format(module.output_norm())) + 'the combined output_norm() will be ({})'.format(module.output_norm())))
return (None, NoOp())
if (output_norm == 'log_softmax'):
if (module.output_norm() is not None):
raise ValueError(('incompatible output_norm ({}) '.format(output_norm) + 'and module.output_norm() ({})'.format(module.output_norm())))
else:
return ('log_softmax', torch.nn.LogSoftmax(dim=(- 1)))
if (output_norm == 'softmax'):
if (module.output_norm() is not None):
raise ValueError(('incompatible output_norm ({}) '.format(output_norm) + 'and module.output_norm() ({})'.format(module.output_norm())))
else:
return ('softmax', torch.nn.Softmax(dim=(- 1)))
raise ValueError(('output_norm ({}) not in '.format(output_norm) + 'supported list = [None, softmax, log_softmax]')) |
def create_bar_chart(series_list: List[QFSeries], names_list, title: str, lines: List[QFSeries], recession_series: QFSeries=None, start_x: datetime=None, end_x: datetime=None, quarterly: bool=False, date_label_format: Tuple[(str, str)]=('%Y', '%y Q{}'), recession_name: str=None) -> BarChart:
assert (len(names_list) > (len(lines) + 1)), 'Not all labels have been specified. Specify one in the list for each series and line.'
bar_chart = BarChart(orientation=Orientation.Vertical, start_x=start_x, end_x=end_x, thickness=(60 if quarterly else 20), align='center')
bar_chart.tick_fontweight = 'bold'
bar_chart.tick_color = 'black'
series_start = series_list[0].index.min()
series_end = series_list[0].index.max()
data_elements = []
for series in series_list:
if (series.index.min() < series_start):
series_start = series.index.min()
if (series.index.max() > series_end):
series_end = series.index.max()
data_element = DataElementDecorator(series)
data_elements.append(data_element)
bar_chart.add_decorator(data_element)
style_colors = Chart.get_axes_colors()
line_decorators = []
for i in range(0, len(lines)):
color = style_colors[((len(style_colors) - (i % len(style_colors))) - 1)]
line_decorator = SeriesLineDecorator(lines[i][start_x:end_x], key=('series_line_' + str(i)), linewidth=4, color=color)
line_decorators.append(line_decorator)
bar_chart.add_decorator(line_decorator)
if (title is not None):
title_decorator = TitleDecorator(title, key='title')
bar_chart.add_decorator(title_decorator)
legend_decorator = _create_legend(bar_chart, data_elements, line_decorators, names_list, quarterly)
if (recession_series is not None):
span_decorator = SpanDecorator.from_int_list(recession_series, key='span')
bar_chart.add_decorator(span_decorator)
if (recession_name is not None):
legend_decorator.add_entry(span_decorator, recession_name)
if quarterly:
display_start = (series_start if (start_x is None) else start_x)
display_end = (series_end if (end_x is None) else end_x)
years = ((display_end - display_start).days // 365)
if (years < 2):
show_every = 1
date_format = date_label_format[1]
elif (years > 10):
show_every = 5
date_format = date_label_format[0]
else:
show_every = 4
date_format = date_label_format[0]
def func(x, pos):
return _quarterly_formatter(x, pos, show_every, date_format)
axes_formatter = AxesFormatterDecorator(x_major=FuncFormatter(func), key='formatter')
bar_chart.add_decorator(axes_formatter)
if (years > 10):
x_major = YearLocator()
else:
x_major = MonthLocator(range(1, 13), bymonthday=30, interval=3)
axes_locator = AxesLocatorDecorator(x_major=x_major, key='locator')
bar_chart.add_decorator(axes_locator)
return bar_chart |
class IWICComponentInfo(com.pIUnknown):
_methods_ = [('GetComponentType', com.STDMETHOD()), ('GetCLSID', com.STDMETHOD()), ('GetSigningStatus', com.STDMETHOD()), ('GetAuthor', com.STDMETHOD()), ('GetVendorGUID', com.STDMETHOD()), ('GetVersion', com.STDMETHOD()), ('GetSpecVersion', com.STDMETHOD()), ('GetFriendlyName', com.STDMETHOD())] |
def main(graph_fname, node_vec_fname, options):
print('Load a HIN...', flush=True)
g = loader.load_a_HIN(graph_fname)
print('Generate random walks...', flush=True)
tmp_walk_fname = 'data/random_walk.txt'
with open(tmp_walk_fname, 'w') as f:
for walk in g.random_walks(options.walk_num, options.walk_length):
f.write(('%s\n' % ' '.join(map(str, walk))))
tmp_node_vec_fname = 'data/vectors.txt'
print('Learn representations...', flush=True)
statement = ('./bin/hin2vec -size %d -train %s -alpha %f -output %s -window %d -negative %d -threads %d -no_circle %d -sigmoid_reg %d ' % (options.dim, tmp_walk_fname, options.alpha, tmp_node_vec_fname, options.window, options.neg, options.num_processes, (1 - (options.allow_circle * 1)), (options.sigmoid_reg * 1)))
print(statement, flush=True)
os.system(statement)
print('Dump vectors...', flush=True)
output_node2vec(g, tmp_node_vec_fname, node_vec_fname, options)
os.remove(tmp_walk_fname)
os.remove(tmp_node_vec_fname)
return |
class LexerContext():
def __init__(self, text, pos, stack=None, end=None):
self.text = text
self.pos = pos
self.end = (end or len(text))
self.stack = (stack or ['root'])
def __repr__(self):
return ('LexerContext(%r, %r, %r)' % (self.text, self.pos, self.stack)) |
class TerminationCondition(object):
def __init__(self, f_tol: float, f_rtol: float, x_tol: float, x_rtol: float, verbose: bool):
self.f_tol = f_tol
self.f_rtol = f_rtol
self.x_tol = x_tol
self.x_rtol = x_rtol
self.verbose = verbose
self._ever_converge = False
self._max_i = (- 1)
self._best_dxnorm = float('inf')
self._best_df = float('inf')
self._best_f = float('inf')
self._best_x: Optional[torch.Tensor] = None
def to_stop(self, i: int, xnext: torch.Tensor, x: torch.Tensor, f: torch.Tensor, fprev: torch.Tensor) -> bool:
xnorm: float = float(x.detach().norm().item())
dxnorm: float = float((x - xnext).detach().norm().item())
fabs: float = float(f.detach().abs().item())
df: float = float((fprev - f).detach().abs().item())
fval: float = float(f.detach().item())
xtcheck = (dxnorm < self.x_tol)
xrcheck = (dxnorm < (self.x_rtol * xnorm))
ytcheck = (df < self.f_tol)
yrcheck = (df < (self.f_rtol * fabs))
converge = (xtcheck or xrcheck or ytcheck or yrcheck)
if self.verbose:
if (i == 0):
print(' #: f | dx, df')
if converge:
print('Finish with convergence')
if ((i == 0) or (((i + 1) % 10) == 0) or converge):
print(('%4d: %.6e | %.3e, %.3e' % ((i + 1), f, dxnorm, df)))
res = ((i > 0) and converge)
if ((not self._ever_converge) and res):
self._ever_converge = True
if (i > self._max_i):
self._max_i = i
if (fval < self._best_f):
self._best_f = fval
self._best_x = x
self._best_dxnorm = dxnorm
self._best_df = df
return res
def get_best_x(self, x: torch.Tensor) -> torch.Tensor:
if ((not self._ever_converge) and (self._max_i > (- 1))):
msg = ('The minimizer does not converge after %d iterations. Best |dx|=%.4e, |df|=%.4e, f=%.4e' % (self._max_i, self._best_dxnorm, self._best_df, self._best_f))
warnings.warn(msg)
assert isinstance(self._best_x, torch.Tensor)
return self._best_x
else:
return x |
class FewVLMPretraining(FewVLM):
def __init__(self, config):
super().__init__(config)
self.losses = self.config.losses.split(',')
def train_step(self, batch):
device = next(self.parameters()).device
vis_feats = batch['vis_feats'].to(device)
input_ids = batch['input_ids'].to(device)
vis_pos = batch['boxes'].to(device)
lm_labels = batch['target_ids'].to(device)
loss_weights = batch['loss_weights'].to(device)
output = self(input_ids=input_ids, vis_inputs=(vis_feats, vis_pos), labels=lm_labels, return_dict=True)
assert ('loss' in output)
lm_mask = (lm_labels != (- 100))
lm_mask = lm_mask.float()
(B, L) = lm_labels.size()
loss = output['loss']
loss = (loss.view(B, L) * lm_mask)
loss = (loss.sum(dim=1) / lm_mask.sum(dim=1).clamp(min=1))
task_counts = {task: 0 for task in self.losses}
task_loss = {task: 0 for task in self.losses}
results = {}
results['loss'] = (loss * loss_weights).mean()
results['total_loss'] = loss.detach().sum()
results['total_loss_count'] = len(loss)
task_counts = {task: 0 for task in self.losses}
task_loss = {task: 0 for task in self.losses}
for (_loss, task) in zip(loss.detach(), batch['task']):
task_loss[task] += _loss
task_counts[task] += 1
for task in self.losses:
if (task_counts[task] > 0):
results[f'{task}_loss'] = task_loss[task]
results[f'{task}_loss_count'] = task_counts[task]
return results
_grad()
def valid_step(self, batch):
self.eval()
device = next(self.parameters()).device
vis_feats = batch['vis_feats'].to(device)
input_ids = batch['input_ids'].to(device)
vis_pos = batch['boxes'].to(device)
lm_labels = batch['target_ids'].to(device)
loss_weights = batch['loss_weights'].to(device)
output = self(input_ids=input_ids, vis_inputs=(vis_feats, vis_pos), labels=lm_labels, return_dict=True)
assert ('loss' in output)
lm_mask = (lm_labels != (- 100))
lm_mask = lm_mask.float()
(B, L) = lm_labels.size()
loss = output['loss']
loss = (loss.view(B, L) * lm_mask)
loss = (loss.sum(dim=1) / lm_mask.sum(dim=1).clamp(min=1))
results = {}
results['loss'] = (loss * loss_weights).mean()
results['total_loss'] = loss.detach().sum()
results['total_loss_count'] = len(loss)
task_counts = {task: 0 for task in self.losses}
task_loss = {task: 0 for task in self.losses}
for (_loss, task) in zip(loss.detach(), batch['task']):
task_loss[task] += _loss
task_counts[task] += 1
for task in self.losses:
if (task_counts[task] > 0):
results[f'{task}_loss'] = task_loss[task]
results[f'{task}_loss_count'] = task_counts[task]
return results
_grad()
def generate_step(self, batch):
self.eval()
device = next(self.parameters()).device
vis_feats = batch['vis_feats'].to(device)
input_ids = batch['input_ids'].to(device)
vis_pos = batch['boxes'].to(device)
vis_attention_mask = None
if ('vis_attention_mask' in batch):
vis_attention_mask = batch['vis_attention_mask'].to(device)
output = self.generate(input_ids=input_ids, vis_inputs=(vis_feats, vis_pos), vis_attention_mask=vis_attention_mask)
generated_sents = self.tokenizer.batch_decode(output, skip_special_tokens=True)
return generated_sents |
class HuggingFaceTokenizer(object):
def __init__(self, pretrained_tokenizer):
if (pretrained_tokenizer == 'facebook/mbart-large-50'):
from transformers import MBart50TokenizerFast
tokenizer_ = MBart50TokenizerFast.from_pretrained('facebook/mbart-large-50', src_lang='en_XX')
else:
raise NotImplementedError
self._tokenizer = tokenizer_
def tokenize(self, text, src_lang=None):
if (src_lang is not None):
found = False
for lang in FAIRSEQ_LANGUAGE_CODES:
if (lang[:2] == src_lang):
self._tokenizer.src_lang = lang
found = True
break
if (not found):
print(('Language code %s not found' % lang))
raise NotImplementedError
tensor = self._tokenizer(text)['input_ids']
tokens = self._tokenizer.convert_ids_to_tokens(tensor, skip_special_tokens=False)
return tokens |
def test_version_check_regex():
text1 = 'Something OTHER UPDATE. [CRITICAL UPDATE Some text.:)]. Something else.'
text2 = '\n\n[CRITICAL\t UPDATE] some text goes here.'
text3 = '[NOTHING]'
text4 = 'asd[CRITICAL UPDATE]'
text5 = 'Other text [CRITICAL UPDATE:>>>>>>>]><<<<asdeqsffqwe qwe sss.'
text6 = '\n\n[CRITICAL UPDATE: U+1F00 1F62D ] some text goes here.'
assert (re.search(SECURITY_EXPRESSION, text1).group(0) == '[CRITICAL UPDATE Some text.:)]')
assert (re.search(SECURITY_EXPRESSION, text2) is None)
assert (re.search(SECURITY_EXPRESSION, text3) is None)
assert (re.search(SECURITY_EXPRESSION, text4).group(0) == '[CRITICAL UPDATE]')
assert (re.search(SECURITY_EXPRESSION, text5).group(0) == '[CRITICAL UPDATE:>>>>>>>]')
assert (re.search(SECURITY_EXPRESSION, text6).group(0) == '[CRITICAL UPDATE: U+1F00 1F62D ]')
assert (re.search(SECURITY_EXPRESSION, text6).group(0) != '[CRITICAL UPDATE: U+1F00 1F62D ') |
class MobileNetV3(nn.Module):
def __init__(self, block_args, num_classes=1000, in_chans=3, stem_size=16, fix_stem=False, num_features=1280, head_bias=True, pad_type='', act_layer=None, norm_layer=None, se_layer=None, se_from_exp=True, round_chs_fn=round_channels, drop_rate=0.0, drop_path_rate=0.0, global_pool='avg'):
super(MobileNetV3, self).__init__()
act_layer = (act_layer or nn.ReLU)
norm_layer = (norm_layer or nn.BatchNorm2d)
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
se_layer = (se_layer or SqueezeExcite)
self.num_classes = num_classes
self.num_features = num_features
self.drop_rate = drop_rate
self.grad_checkpointing = False
if (not fix_stem):
stem_size = round_chs_fn(stem_size)
self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_act_layer(stem_size, inplace=True)
builder = EfficientNetBuilder(output_stride=32, pad_type=pad_type, round_chs_fn=round_chs_fn, se_from_exp=se_from_exp, act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate)
self.blocks = nn.Sequential(*builder(stem_size, block_args))
self.feature_info = builder.features
head_chs = builder.in_chs
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
num_pooled_chs = (head_chs * self.global_pool.feat_mult())
self.conv_head = create_conv2d(num_pooled_chs, self.num_features, 1, padding=pad_type, bias=head_bias)
self.act2 = act_layer(inplace=True)
self.flatten = (nn.Flatten(1) if global_pool else nn.Identity())
self.classifier = (Linear(self.num_features, num_classes) if (num_classes > 0) else nn.Identity())
efficientnet_init_weights(self)
def as_sequential(self):
layers = [self.conv_stem, self.bn1]
layers.extend(self.blocks)
layers.extend([self.global_pool, self.conv_head, self.act2])
layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier])
return nn.Sequential(*layers)
.ignore
def group_matcher(self, coarse=False):
return dict(stem='^conv_stem|bn1', blocks=('^blocks\\.(\\d+)' if coarse else '^blocks\\.(\\d+)\\.(\\d+)'))
.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
.ignore
def get_classifier(self):
return self.classifier
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.flatten = (nn.Flatten(1) if global_pool else nn.Identity())
self.classifier = (Linear(self.num_features, num_classes) if (num_classes > 0) else nn.Identity())
def forward_features(self, x):
x = self.conv_stem(x)
x = self.bn1(x)
if (self.grad_checkpointing and (not torch.jit.is_scripting())):
x = checkpoint_seq(self.blocks, x, flatten=True)
else:
x = self.blocks(x)
return x
def forward_head(self, x, pre_logits: bool=False):
x = self.global_pool(x)
x = self.conv_head(x)
x = self.act2(x)
if pre_logits:
return x.flatten(1)
else:
x = self.flatten(x)
if (self.drop_rate > 0.0):
x = F.dropout(x, p=self.drop_rate, training=self.training)
return self.classifier(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x |
class Lambda(Layer):
_lambda_support
def __init__(self, function, output_shape=None, mask=None, arguments=None, **kwargs):
super(Lambda, self).__init__(**kwargs)
self.function = function
self.arguments = (arguments if arguments else {})
if (mask is not None):
self.supports_masking = True
self.mask = mask
if (output_shape is None):
self._output_shape = None
elif isinstance(output_shape, (tuple, list)):
self._output_shape = tuple(output_shape)
else:
if (not callable(output_shape)):
raise TypeError('In Lambda, `output_shape` must be a list, a tuple, or a function.')
self._output_shape = output_shape
def compute_output_shape(self, input_shape):
if (self._output_shape is None):
if (K.backend() == 'tensorflow'):
if isinstance(input_shape, list):
xs = [K.placeholder(shape=shape) for shape in input_shape]
x = self.call(xs)
else:
x = K.placeholder(shape=input_shape)
x = self.call(x)
if isinstance(x, list):
return [K.int_shape(x_elem) for x_elem in x]
else:
return K.int_shape(x)
warnings.warn('`output_shape` argument not specified for layer {} and cannot be automatically inferred with the Theano backend. Defaulting to output shape `{}` (same as input shape). If the expected output shape is different, specify it via the `output_shape` argument.'.format(self.name, input_shape))
return input_shape
elif isinstance(self._output_shape, (tuple, list)):
if isinstance(input_shape, list):
num_samples = input_shape[0][0]
else:
num_samples = (input_shape[0] if input_shape else None)
return ((num_samples,) + tuple(self._output_shape))
else:
shape = self._output_shape(input_shape)
if (not isinstance(shape, (list, tuple))):
raise ValueError('`output_shape` function must return a tuple or a list of tuples.')
if isinstance(shape, list):
if (isinstance(shape[0], int) or (shape[0] is None)):
shape = tuple(shape)
return shape
def call(self, inputs, mask=None):
arguments = self.arguments
if has_arg(self.function, 'mask'):
arguments['mask'] = mask
return self.function(inputs, **arguments)
def compute_mask(self, inputs, mask=None):
if callable(self.mask):
return self.mask(inputs, mask)
return self.mask
def get_config(self):
if isinstance(self.function, python_types.LambdaType):
function = func_dump(self.function)
function_type = 'lambda'
else:
function = self.function.__name__
function_type = 'function'
if isinstance(self._output_shape, python_types.LambdaType):
output_shape = func_dump(self._output_shape)
output_shape_type = 'lambda'
elif callable(self._output_shape):
output_shape = self._output_shape.__name__
output_shape_type = 'function'
else:
output_shape = self._output_shape
output_shape_type = 'raw'
config = {'function': function, 'function_type': function_type, 'output_shape': output_shape, 'output_shape_type': output_shape_type, 'arguments': self.arguments}
base_config = super(Lambda, self).get_config()
return dict((list(base_config.items()) + list(config.items())))
def from_config(cls, config, custom_objects=None):
globs = globals()
if custom_objects:
globs = dict((list(globs.items()) + list(custom_objects.items())))
function_type = config.pop('function_type')
if (function_type == 'function'):
function = deserialize_keras_object(config['function'], custom_objects=custom_objects, printable_module_name='function in Lambda layer')
elif (function_type == 'lambda'):
function = func_load(config['function'], globs=globs)
else:
raise TypeError('Unknown function type:', function_type)
output_shape_type = config.pop('output_shape_type')
if (output_shape_type == 'function'):
output_shape = deserialize_keras_object(config['output_shape'], custom_objects=custom_objects, printable_module_name='output_shape function in Lambda layer')
elif (output_shape_type == 'lambda'):
output_shape = func_load(config['output_shape'], globs=globs)
else:
output_shape = config['output_shape']
if ('arguments' in config):
for key in config['arguments']:
if isinstance(config['arguments'][key], dict):
arg_dict = config['arguments'][key]
if (('type' in arg_dict) and (arg_dict['type'] == 'ndarray')):
config['arguments'][key] = np.array(arg_dict['value'])
config['function'] = function
config['output_shape'] = output_shape
return cls(**config) |
class WebKitElement(webelem.AbstractWebElement):
_tab: 'webkittab.WebKitTab'
def __init__(self, elem: QWebElement, tab: 'webkittab.WebKitTab') -> None:
super().__init__(tab)
if isinstance(elem, self.__class__):
raise TypeError('Trying to wrap a wrapper!')
if elem.isNull():
raise IsNullError('{} is a null element!'.format(elem))
self._elem = elem
def __str__(self) -> str:
self._check_vanished()
return self._elem.toPlainText()
def __eq__(self, other: object) -> bool:
if (not isinstance(other, WebKitElement)):
return NotImplemented
return (self._elem == other._elem)
def __getitem__(self, key: str) -> str:
self._check_vanished()
if (key not in self):
raise KeyError(key)
return self._elem.attribute(key)
def __setitem__(self, key: str, val: str) -> None:
self._check_vanished()
self._elem.setAttribute(key, val)
def __delitem__(self, key: str) -> None:
self._check_vanished()
if (key not in self):
raise KeyError(key)
self._elem.removeAttribute(key)
def __contains__(self, key: object) -> bool:
assert isinstance(key, str)
self._check_vanished()
return self._elem.hasAttribute(key)
def __iter__(self) -> Iterator[str]:
self._check_vanished()
(yield from self._elem.attributeNames())
def __len__(self) -> int:
self._check_vanished()
return len(self._elem.attributeNames())
def _check_vanished(self) -> None:
if self._elem.isNull():
raise IsNullError('Element {} vanished!'.format(self._elem))
def has_frame(self) -> bool:
self._check_vanished()
return (self._elem.webFrame() is not None)
def geometry(self) -> QRect:
self._check_vanished()
return self._elem.geometry()
def classes(self) -> Set[str]:
self._check_vanished()
return set(self._elem.classes())
def tag_name(self) -> str:
self._check_vanished()
return self._elem.tagName().lower()
def outer_xml(self) -> str:
self._check_vanished()
return self._elem.toOuterXml()
def is_content_editable_prop(self) -> bool:
self._check_vanished()
val = self._elem.evaluateJavaScript('this.isContentEditable || false')
assert isinstance(val, bool)
return val
def value(self) -> webelem.JsValueType:
self._check_vanished()
val = self._elem.evaluateJavaScript('this.value')
assert isinstance(val, (int, float, str, type(None))), val
return val
def set_value(self, value: webelem.JsValueType) -> None:
self._check_vanished()
if self._tab.is_deleted():
raise webelem.OrphanedError('Tab containing element vanished')
if self.is_content_editable():
log.webelem.debug('Filling {!r} via set_text.'.format(self))
assert isinstance(value, str)
self._elem.setPlainText(value)
else:
log.webelem.debug('Filling {!r} via javascript.'.format(self))
value = javascript.to_js(value)
self._elem.evaluateJavaScript('this.value={}'.format(value))
def dispatch_event(self, event: str, bubbles: bool=False, cancelable: bool=False, composed: bool=False) -> None:
self._check_vanished()
log.webelem.debug('Firing event on {!r} via javascript.'.format(self))
self._elem.evaluateJavaScript("this.dispatchEvent(new Event({}, {{'bubbles': {}, 'cancelable': {}, 'composed': {}}}))".format(javascript.to_js(event), javascript.to_js(bubbles), javascript.to_js(cancelable), javascript.to_js(composed)))
def caret_position(self) -> int:
self._check_vanished()
pos = self._elem.evaluateJavaScript('this.selectionStart')
if (pos is None):
return 0
return int(pos)
def insert_text(self, text: str) -> None:
self._check_vanished()
if (not self.is_editable(strict=True)):
raise webelem.Error('Element is not editable!')
log.webelem.debug('Inserting text into element {!r}'.format(self))
self._elem.evaluateJavaScript('\n var text = {};\n var event = document.createEvent("TextEvent");\n event.initTextEvent("textInput", true, true, null, text);\n this.dispatchEvent(event);\n '.format(javascript.to_js(text)))
def _parent(self) -> Optional['WebKitElement']:
self._check_vanished()
elem = cast(Optional[QWebElement], self._elem.parent())
if ((elem is None) or elem.isNull()):
return None
return WebKitElement(elem, tab=self._tab)
def _rect_on_view_js(self) -> Optional[QRect]:
rects = self._elem.evaluateJavaScript('this.getClientRects()')
if (rects is None):
return None
text = utils.compact_text(self._elem.toOuterXml(), 500)
log.webelem.vdebug("Client rectangles of element '{}': {}".format(text, rects))
for i in range(int(rects.get('length', 0))):
rect = rects[str(i)]
width = rect.get('width', 0)
height = rect.get('height', 0)
if ((width > 1) and (height > 1)):
zoom = self._elem.webFrame().zoomFactor()
if (not config.val.zoom.text_only):
rect['left'] *= zoom
rect['top'] *= zoom
width *= zoom
height *= zoom
rect = QRect(int(rect['left']), int(rect['top']), int(width), int(height))
frame = cast(Optional[QWebFrame], self._elem.webFrame())
while (frame is not None):
rect.translate(frame.geometry().topLeft())
frame = frame.parentFrame()
return rect
return None
def _rect_on_view_python(self, elem_geometry: Optional[QRect]) -> QRect:
if (elem_geometry is None):
geometry = self._elem.geometry()
else:
geometry = elem_geometry
rect = QRect(geometry)
frame = cast(Optional[QWebFrame], self._elem.webFrame())
while (frame is not None):
rect.translate(frame.geometry().topLeft())
rect.translate((frame.scrollPosition() * (- 1)))
frame = cast(Optional[QWebFrame], frame.parentFrame())
return rect
def rect_on_view(self, *, elem_geometry: QRect=None, no_js: bool=False) -> QRect:
self._check_vanished()
if ((elem_geometry is None) and (not no_js)):
rect = self._rect_on_view_js()
if (rect is not None):
return rect
return self._rect_on_view_python(elem_geometry)
def _is_hidden_css(self) -> bool:
attr_values = {attr: self._elem.styleProperty(attr, QWebElement.StyleResolveStrategy.ComputedStyle) for attr in ['visibility', 'display', 'opacity']}
invisible = (attr_values['visibility'] == 'hidden')
none_display = (attr_values['display'] == 'none')
zero_opacity = (attr_values['opacity'] == '0')
is_framework = (('ace_text-input' in self.classes()) or ('custom-control-input' in self.classes()))
return (invisible or none_display or (zero_opacity and (not is_framework)))
def _is_visible(self, mainframe: QWebFrame) -> bool:
self._check_vanished()
if self._is_hidden_css():
return False
elem_geometry = self._elem.geometry()
if ((not elem_geometry.isValid()) and (elem_geometry.x() == 0)):
return False
elem_rect = self.rect_on_view(elem_geometry=elem_geometry)
mainframe_geometry = mainframe.geometry()
if elem_rect.isValid():
visible_on_screen = mainframe_geometry.intersects(elem_rect)
else:
visible_on_screen = mainframe_geometry.contains(elem_rect.topLeft())
elem_frame = self._elem.webFrame()
framegeom = QRect(elem_frame.geometry())
if (not framegeom.isValid()):
visible_in_frame = False
elif (elem_frame.parentFrame() is not None):
framegeom.moveTo(0, 0)
framegeom.translate(elem_frame.scrollPosition())
if elem_geometry.isValid():
visible_in_frame = framegeom.intersects(elem_geometry)
else:
visible_in_frame = framegeom.contains(elem_geometry.topLeft())
else:
visible_in_frame = visible_on_screen
return all([visible_on_screen, visible_in_frame])
def remove_blank_target(self) -> None:
elem: Optional[WebKitElement] = self
for _ in range(5):
if (elem is None):
break
if elem.is_link():
if (elem.get('target', None) == '_blank'):
elem['target'] = '_top'
break
elem = elem._parent()
def delete(self) -> None:
self._elem.evaluateJavaScript('this.remove();')
def _move_text_cursor(self) -> None:
if (self.is_text_input() and self.is_editable()):
self._tab.caret.move_to_end_of_document()
def _requires_user_interaction(self) -> bool:
return False
def _click_editable(self, click_target: usertypes.ClickTarget) -> None:
ok = self._elem.evaluateJavaScript('this.focus(); true;')
if ok:
self._move_text_cursor()
else:
log.webelem.debug('Failed to focus via JS, falling back to event')
self._click_fake_event(click_target)
def _click_js(self, click_target: usertypes.ClickTarget) -> None:
settings = QWebSettings.globalSettings()
attribute = QWebSettings.WebAttribute.JavascriptCanOpenWindows
could_open_windows = settings.testAttribute(attribute)
settings.setAttribute(attribute, True)
ok = self._elem.evaluateJavaScript('this.click(); true;')
settings.setAttribute(attribute, could_open_windows)
if (not ok):
log.webelem.debug('Failed to click via JS, falling back to event')
self._click_fake_event(click_target)
def _click_fake_event(self, click_target: usertypes.ClickTarget, button: Qt.MouseButton=Qt.MouseButton.LeftButton) -> None:
self._tab.data.override_target = click_target
super()._click_fake_event(click_target) |
class RegexUserAgentParser(UserAgentParser):
def __init__(self, regexes, handler, *, name=None):
if (name is None):
name = handler.__name__
self._regexes = [(re.compile(regex) if isinstance(regex, str) else regex) for regex in regexes]
self._handler = handler
self._name = name
def name(self):
return self._name
def __call__(self, user_agent):
for regex in self._regexes:
matched = regex.search(user_agent)
if (matched is not None):
break
else:
raise UnableToParse
group_to_name = {v: k for (k, v) in matched.re.groupindex.items()}
(args, kwargs) = ([], {})
for (i, value) in enumerate(matched.groups(), start=1):
name = group_to_name.get(i)
if (name is not None):
kwargs[name] = value
else:
args.append(value)
return self._handler(*args, **kwargs) |
.supported(only_if=(lambda backend: backend.hash_supported(hashes.BLAKE2b(digest_size=64))), skip_message='Does not support BLAKE2b')
class TestBLAKE2b():
test_blake2b = generate_base_hash_test(hashes.BLAKE2b(digest_size=64), digest_size=64)
def test_invalid_digest_size(self, backend):
with pytest.raises(ValueError):
hashes.BLAKE2b(digest_size=65)
with pytest.raises(ValueError):
hashes.BLAKE2b(digest_size=0)
with pytest.raises(ValueError):
hashes.BLAKE2b(digest_size=(- 1)) |
def test_classes_reordered(ourtester):
ourtester.makepyfile(test_one='\n from unittest import TestCase\n\n\n class A(TestCase):\n def test_a(self):\n pass\n\n\n class B(TestCase):\n def test_b(self):\n pass\n\n\n class C(TestCase):\n def test_c(self):\n pass\n\n\n class D(TestCase):\n def test_d(self):\n pass\n ')
args = ['-v', '--randomly-seed=15']
out = ourtester.runpytest(*args)
out.assert_outcomes(passed=4, failed=0)
assert (out.outlines[9:13] == ['test_one.py::D::test_d PASSED', 'test_one.py::B::test_b PASSED', 'test_one.py::C::test_c PASSED', 'test_one.py::A::test_a PASSED']) |
def test_bwlimit():
with expected_protocol(LeCroyT3DSO1204, [(b'CHDR OFF', None), (b'BWL C1,OFF', None), (b'C1:BWL?', b'OFF'), (b'BWL C1,ON', None), (b'C1:BWL?', b'ON')]) as instr:
instr.ch_1.bwlimit = False
assert (instr.ch_1.bwlimit is False)
instr.ch_1.bwlimit = True
assert (instr.ch_1.bwlimit is True) |
class RunEvaluationTest(tf.test.TestCase):
def setUp(self):
self.output_dir = tempfile.mkdtemp()
self.models_file = os.path.join(self.output_dir, 'models_file.json')
self.toy_data = {'abc': ([[0, 1, 1], [0, 0, 1], [0, 0, 0]], [(- 1), 0, (- 2)]), 'abd': ([[0, 1, 0], [0, 0, 1], [0, 0, 0]], [(- 1), 0, (- 2)]), 'abe': ([[0, 0, 1], [0, 0, 0], [0, 0, 0]], [(- 1), 0, (- 2)])}
with tf.gfile.Open(self.models_file, 'w') as f:
json.dump(self.toy_data, f)
for model_id in self.toy_data:
eval_dir = os.path.join(self.output_dir, 'ab', model_id, 'repeat_1')
tf.gfile.MakeDirs(eval_dir)
run_evaluation.FLAGS.train_data_files = 'unused'
run_evaluation.FLAGS.valid_data_file = 'unused'
run_evaluation.FLAGS.test_data_file = 'unused'
run_evaluation.FLAGS.num_repeats = 1
.mock.patch.object(run_evaluation, 'evaluate')
def test_evaluate_single_worker(self, mock_eval):
mock_eval.train_and_evaluate.return_value = 'unused_output'
evaluator = run_evaluation.Evaluator(self.models_file, self.output_dir)
evaluator.run_evaluation()
expected_dir = os.path.join(self.output_dir, 'ab')
mock_eval.train_and_evaluate.assert_has_calls([tf.test.mock.call(tf.test.mock.ANY, tf.test.mock.ANY, os.path.join(expected_dir, 'abc', 'repeat_1')), tf.test.mock.call(tf.test.mock.ANY, tf.test.mock.ANY, os.path.join(expected_dir, 'abd', 'repeat_1')), tf.test.mock.call(tf.test.mock.ANY, tf.test.mock.ANY, os.path.join(expected_dir, 'abe', 'repeat_1'))])
for model_id in self.toy_data:
self.assertTrue(tf.gfile.Exists(os.path.join(expected_dir, model_id, 'repeat_1', 'results.json')))
.mock.patch.object(run_evaluation, 'evaluate')
def test_evaluate_multi_worker_0(self, mock_eval):
mock_eval.train_and_evaluate.return_value = 'unused_output'
evaluator = run_evaluation.Evaluator(self.models_file, self.output_dir, worker_id=0, total_workers=2)
evaluator.run_evaluation()
expected_dir = os.path.join(self.output_dir, 'ab')
mock_eval.train_and_evaluate.assert_has_calls([tf.test.mock.call(tf.test.mock.ANY, tf.test.mock.ANY, os.path.join(expected_dir, 'abc', 'repeat_1')), tf.test.mock.call(tf.test.mock.ANY, tf.test.mock.ANY, os.path.join(expected_dir, 'abe', 'repeat_1'))])
for model_id in ['abc', 'abe']:
self.assertTrue(tf.gfile.Exists(os.path.join(expected_dir, model_id, 'repeat_1', 'results.json')))
.mock.patch.object(run_evaluation, 'evaluate')
def test_evaluate_multi_worker_1(self, mock_eval):
mock_eval.train_and_evaluate.return_value = 'unused_output'
evaluator = run_evaluation.Evaluator(self.models_file, self.output_dir, worker_id=1, total_workers=2)
evaluator.run_evaluation()
expected_dir = os.path.join(self.output_dir, 'ab')
mock_eval.train_and_evaluate.assert_has_calls([tf.test.mock.call(tf.test.mock.ANY, tf.test.mock.ANY, os.path.join(expected_dir, 'abd', 'repeat_1'))])
self.assertTrue(tf.gfile.Exists(os.path.join(expected_dir, 'abd', 'repeat_1', 'results.json')))
.mock.patch.object(run_evaluation, 'evaluate')
def test_evaluate_regex(self, mock_eval):
mock_eval.train_and_evaluate.return_value = 'unused_output'
evaluator = run_evaluation.Evaluator(self.models_file, self.output_dir, model_id_regex='^ab(d|e)')
evaluator.run_evaluation()
expected_dir = os.path.join(self.output_dir, 'ab')
mock_eval.train_and_evaluate.assert_has_calls([tf.test.mock.call(tf.test.mock.ANY, tf.test.mock.ANY, os.path.join(expected_dir, 'abd', 'repeat_1')), tf.test.mock.call(tf.test.mock.ANY, tf.test.mock.ANY, os.path.join(expected_dir, 'abe', 'repeat_1'))])
for model_id in ['abd', 'abe']:
self.assertTrue(tf.gfile.Exists(os.path.join(expected_dir, model_id, 'repeat_1', 'results.json')))
.mock.patch.object(run_evaluation, 'evaluate')
def test_evaluate_repeat(self, mock_eval):
mock_eval.train_and_evaluate.return_value = 'unused_output'
for model_id in self.toy_data:
eval_dir = os.path.join(self.output_dir, 'ab', model_id, 'repeat_2')
tf.gfile.MakeDirs(eval_dir)
with flagsaver.flagsaver(num_repeats=2):
evaluator = run_evaluation.Evaluator(self.models_file, self.output_dir)
evaluator.run_evaluation()
expected_dir = os.path.join(self.output_dir, 'ab')
mock_eval.train_and_evaluate.assert_has_calls([tf.test.mock.call(tf.test.mock.ANY, tf.test.mock.ANY, os.path.join(expected_dir, 'abc', 'repeat_1')), tf.test.mock.call(tf.test.mock.ANY, tf.test.mock.ANY, os.path.join(expected_dir, 'abd', 'repeat_1')), tf.test.mock.call(tf.test.mock.ANY, tf.test.mock.ANY, os.path.join(expected_dir, 'abe', 'repeat_1')), tf.test.mock.call(tf.test.mock.ANY, tf.test.mock.ANY, os.path.join(expected_dir, 'abc', 'repeat_2')), tf.test.mock.call(tf.test.mock.ANY, tf.test.mock.ANY, os.path.join(expected_dir, 'abd', 'repeat_2')), tf.test.mock.call(tf.test.mock.ANY, tf.test.mock.ANY, os.path.join(expected_dir, 'abe', 'repeat_2'))])
for model_id in self.toy_data:
for repeat in range(2):
self.assertTrue(tf.gfile.Exists(os.path.join(expected_dir, model_id, ('repeat_%d' % (repeat + 1)), 'results.json')))
def test_clean_model_dir(self):
model_dir = os.path.join(self.output_dir, 'ab', 'abcde', 'repeat_1')
tf.gfile.MakeDirs(model_dir)
preserved_files = ['model.ckpt-0.index', 'model.ckpt-100.index', 'results.json']
for filename in preserved_files:
with tf.gfile.Open(os.path.join(model_dir, filename), 'w') as f:
f.write('unused')
for filename in ['checkpoint', 'events.out.tfevents']:
with tf.gfile.Open(os.path.join(model_dir, filename), 'w') as f:
f.write('unused')
eval_dir = os.path.join(model_dir, 'eval_dir')
tf.gfile.MakeDirs(eval_dir)
with tf.gfile.Open(os.path.join(eval_dir, 'events.out.tfevents'), 'w') as f:
f.write('unused')
evaluator = run_evaluation.Evaluator(self.models_file, self.output_dir)
evaluator._clean_model_dir(model_dir)
remaining_files = tf.gfile.ListDirectory(model_dir)
self.assertItemsEqual(remaining_files, preserved_files)
.mock.patch.object(run_evaluation, 'evaluate')
def test_recovery_file(self, mock_eval):
mock_eval.train_and_evaluate.return_value = 'unused_output'
recovery_dir = os.path.join(self.output_dir, '_recovery')
tf.gfile.MakeDirs(recovery_dir)
with tf.gfile.Open(os.path.join(recovery_dir, '0'), 'w') as f:
f.write('2')
evaluator = run_evaluation.Evaluator(self.models_file, self.output_dir)
evaluator.run_evaluation()
expected_dir = os.path.join(self.output_dir, 'ab')
mock_eval.train_and_evaluate.assert_has_calls([tf.test.mock.call(tf.test.mock.ANY, tf.test.mock.ANY, os.path.join(expected_dir, 'abe', 'repeat_1'))])
call_args = mock_eval.train_and_evaluate.call_args_list
self.assertEqual(len(call_args), 1)
with tf.gfile.Open(evaluator.recovery_file) as f:
new_idx = int(f.read())
self.assertEqual(new_idx, 3) |
class TestEventHandler(PyScriptTest):
def test_when_decorator_with_event(self):
self.pyscript_run('\n <button id="foo_id">foo_button</button>\n <script type="py">\n from pyscript import when\n ("click", selector="#foo_id")\n def foo(evt):\n print(f"clicked {evt.target.id}")\n </script>\n ')
self.page.locator('text=foo_button').click()
self.wait_for_console('clicked foo_id')
self.assert_no_banners()
def test_when_decorator_without_event(self):
self.pyscript_run('\n <button id="foo_id">foo_button</button>\n <script type="py">\n from pyscript import when\n ("click", selector="#foo_id")\n def foo():\n print("The button was clicked")\n </script>\n ')
self.page.locator('text=foo_button').click()
self.wait_for_console('The button was clicked')
self.assert_no_banners()
def test_multiple_when_decorators_with_event(self):
self.pyscript_run('\n <button id="foo_id">foo_button</button>\n <button id="bar_id">bar_button</button>\n <script type="py">\n from pyscript import when\n ("click", selector="#foo_id")\n def foo_click(evt):\n print(f"foo_click! id={evt.target.id}")\n ("click", selector="#bar_id")\n def bar_click(evt):\n print(f"bar_click! id={evt.target.id}")\n </script>\n ')
self.page.locator('text=foo_button').click()
self.wait_for_console('foo_click! id=foo_id')
self.page.locator('text=bar_button').click()
self.wait_for_console('bar_click! id=bar_id')
self.assert_no_banners()
def test_two_when_decorators(self):
self.pyscript_run('\n <button id="foo_id">foo_button</button>\n <button class="bar_class">bar_button</button>\n <script type="py">\n from pyscript import when\n ("click", selector="#foo_id")\n ("mouseover", selector=".bar_class")\n def foo(evt):\n print(f"got event: {evt.type}")\n </script>\n ')
self.page.locator('text=bar_button').hover()
self.wait_for_console('got event: mouseover')
self.page.locator('text=foo_button').click()
self.wait_for_console('got event: click')
self.assert_no_banners()
def test_two_when_decorators_same_element(self):
self.pyscript_run('\n <button id="foo_id">foo_button</button>\n <script type="py">\n from pyscript import when\n ("click", selector="#foo_id")\n ("mouseover", selector="#foo_id")\n def foo(evt):\n print(f"got event: {evt.type}")\n </script>\n ')
self.page.locator('text=foo_button').hover()
self.wait_for_console('got event: mouseover')
self.page.locator('text=foo_button').click()
self.wait_for_console('got event: click')
self.assert_no_banners()
def test_when_decorator_multiple_elements(self):
self.pyscript_run('\n <button class="bar_class">button1</button>\n <button class="bar_class">button2</button>\n <script type="py">\n from pyscript import when\n ("click", selector=".bar_class")\n def foo(evt):\n print(f"{evt.target.innerText} was clicked")\n </script>\n ')
self.page.locator('text=button1').click()
self.page.locator('text=button2').click()
self.wait_for_console('button2 was clicked')
assert ('button1 was clicked' in self.console.log.lines)
assert ('button2 was clicked' in self.console.log.lines)
self.assert_no_banners()
def test_when_decorator_duplicate_selectors(self):
self.pyscript_run('\n <button id="foo_id">foo_button</button>\n <script type="py">\n from pyscript import when\n ("click", selector="#foo_id")\n ("click", selector="#foo_id")\n def foo(evt):\n foo.n += 1\n print(f"click {foo.n} on {evt.target.id}")\n foo.n = 0\n </script>\n ')
self.page.locator('text=foo_button').click()
self.wait_for_console('click 1 on foo_id')
self.wait_for_console('click 2 on foo_id')
self.assert_no_banners()
_worker('NEXT: error banner not shown')
def test_when_decorator_invalid_selector(self):
self.pyscript_run('\n <button id="foo_id">foo_button</button>\n <script type="py">\n from pyscript import when\n ("click", selector="#.bad")\n def foo(evt):\n ...\n </script>\n ')
self.page.locator('text=foo_button').click()
msg = "Failed to execute 'querySelectorAll' on 'Document': '#.bad' is not a valid selector."
error = self.page.wait_for_selector('.py-error')
banner_text = error.inner_text()
if (msg not in banner_text):
raise AssertionError(f"Expected message '{msg}' does not match banner text '{banner_text}'")
assert (msg in self.console.error.lines[(- 1)])
self.check_py_errors(msg) |
def as_index_variable(idx):
if (idx is None):
return NoneConst.clone()
if isinstance(idx, slice):
return make_slice(idx)
if (isinstance(idx, Variable) and isinstance(idx.type, SliceType)):
return idx
if (isinstance(idx, Variable) and isinstance(idx.type, NoneTypeT)):
return idx
idx = as_tensor_variable(idx)
if (idx.type.dtype not in discrete_dtypes):
raise TypeError('index must be integers or a boolean mask')
return idx |
def _bottleneck_block_v2(inputs, filters, training, projection_shortcut, strides, data_format):
shortcut = inputs
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
if (projection_shortcut is not None):
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(inputs=inputs, filters=filters, kernel_size=1, strides=1, data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(inputs=inputs, filters=(4 * filters), kernel_size=1, strides=1, data_format=data_format)
return (inputs + shortcut) |
def assert_payment_secret_and_hash(response, payment):
assert (len(response) == 7)
assert ('secret' in response)
assert ('secret_hash' in response)
secret = Secret(to_bytes(hexstr=response['secret']))
assert (len(secret) == SECRET_LENGTH)
assert (payment['amount'] == response['amount'])
assert (to_bytes(hexstr=response['secret_hash']) == sha256_secrethash(secret)) |
def s_y_operator(num_spatial_orbitals: int, overlap: (np.ndarray | None)=None) -> FermionicOp:
if (overlap is None):
op = FermionicOp({f'+_{orb} -_{((orb + num_spatial_orbitals) % (2 * num_spatial_orbitals))}': (0.5j * ((- 1.0) ** (orb < num_spatial_orbitals))) for orb in range((2 * num_spatial_orbitals))})
else:
op = ((- 0.5j) * (s_plus_operator(num_spatial_orbitals, overlap) - s_minus_operator(num_spatial_orbitals, overlap.T)))
return op |
def run(args):
(segments, reco2utt) = read_segments(args.segments)
ctms = read_ctm(args.ctm_in, segments)
for (reco, utts) in reco2utt.items():
ctms_for_reco = []
for utt in sorted(utts, key=(lambda x: segments[x][1])):
if ((reco, utt) in ctms):
ctms_for_reco.append(ctms[(reco, utt)])
if (len(ctms_for_reco) == 0):
logger.info('CTM for recording {0} was empty'.format(reco))
continue
try:
ctms_for_reco = resolve_overlaps(ctms_for_reco, segments)
write_ctm(ctms_for_reco, args.ctm_out)
except Exception:
logger.error('Failed to process CTM for recording %s', reco)
raise
args.ctm_out.close()
logger.info('Wrote CTM for %d recordings.', len(ctms)) |
def nrl_colors(img, **kwargs):
nrl_tpw_colors = {'colors': [[0.0, [188, 132, 98]], [0., [188, 130, 99]], [0., [187, 128, 100]], [0., [186, 125, 101]], [1., [185, 124, 102]], [1., [184, 122, 103]], [1., [183, 120, 103]], [1., [182, 119, 104]], [2., [182, 118, 106]], [2., [181, 116, 107]], [2., [180, 114, 108]], [3., [179, 114, 108]], [3., [178, 113, 109]], [3., [177, 111, 110]], [3., [177, 110, 111]], [4., [176, 108, 111]], [4., [176, 106, 110]], [4., [175, 104, 110]], [4., [174, 103, 111]], [5., [174, 101, 111]], [5., [173, 99, 111]], [5., [172, 97, 111]], [6., [171, 95, 112]], [6., [171, 93, 112]], [6., [171, 91, 113]], [6., [170, 90, 113]], [7., [169, 88, 114]], [7., [169, 86, 114]], [7., [168, 85, 115]], [7., [167, 83, 115]], [8., [166, 81, 116]], [8., [166, 80, 118]], [8., [165, 78, 119]], [9., [165, 76, 120]], [9., [164, 75, 120]], [9., [164, 74, 121]], [9., [163, 72, 123]], [10., [162, 70, 124]], [10., [161, 69, 125]], [10., [160, 67, 126]], [10., [160, 66, 128]], [11., [159, 64, 130]], [11., [159, 63, 131]], [11., [158, 61, 132]], [12., [158, 60, 134]], [12., [157, 58, 136]], [12., [156, 57, 137]], [12., [155, 56, 139]], [13., [155, 54, 141]], [13., [154, 52, 142]], [13., [154, 52, 144]], [14., [153, 50, 146]], [14., [153, 49, 148]], [14., [152, 47, 150]], [14., [150, 46, 151]], [15., [147, 45, 150]], [15., [144, 44, 150]], [15., [142, 44, 152]], [15., [138, 48, 156]], [16., [135, 50, 159]], [16., [132, 52, 161]], [16., [131, 56, 164]], [17., [126, 60, 168]], [17., [123, 62, 171]], [17., [121, 65, 173]], [17., [117, 69, 177]], [18., [114, 71, 180]], [18., [111, 74, 182]], [18., [109, 77, 185]], [18., [104, 82, 190]], [19., [101, 84, 193]], [19., [98, 86, 195]], [19., [96, 89, 198]], [20., [93, 92, 200]], [20., [90, 95, 204]], [20., [87, 98, 207]], [20., [83, 103, 211]], [21., [80, 105, 214]], [21., [77, 108, 216]], [21., [74, 110, 220]], [21., [71, 114, 222]], [22., [68, 116, 225]], [22., [65, 120, 228]], [22., [61, 125, 233]], [23., [57, 127, 235]], [23., [55, 130, 239]], [23., [52, 133, 242]], [23., [49, 137, 245]], [24., [47, 139, 247]], [24., [44, 142, 250]], [24., [40, 146, 255]], [25., [40, 148, 255]], [25., [42, 150, 255]], [25., [46, 154, 255]], [25., [50, 158, 255]], [26., [52, 159, 255]], [26., [55, 163, 255]], [26., [59, 167, 255]], [26., [61, 169, 255]], [27., [65, 173, 255]], [27., [70, 178, 255]], [27., [73, 182, 255]], [28., [76, 185, 255]], [28., [79, 188, 255]], [28., [82, 192, 255]], [28., [86, 195, 255]], [29., [88, 199, 255]], [29., [91, 201, 255]], [29., [95, 205, 255]], [29., [97, 207, 255]], [30., [101, 210, 255]], [30., [104, 213, 255]], [30., [107, 216, 255]], [31., [110, 218, 255]], [31., [114, 222, 255]], [31., [115, 223, 255]], [31., [119, 227, 255]], [32., [123, 231, 255]], [32., [125, 233, 255]], [32., [127, 236, 255]], [32., [133, 241, 255]], [33., [136, 244, 255]], [33., [139, 247, 255]], [33., [143, 252, 255]], [34., [145, 254, 255]], [34., [148, 255, 254]], [34., [148, 255, 247]], [34., [148, 255, 241]], [35., [148, 255, 235]], [35., [148, 255, 229]], [35., [148, 255, 223]], [35., [148, 255, 217]], [36., [148, 255, 210]], [36., [148, 255, 205]], [36., [148, 255, 199]], [37., [148, 255, 193]], [37., [148, 255, 187]], [37., [148, 255, 181]], [37., [148, 255, 174]], [38., [148, 255, 168]], [38., [148, 255, 162]], [38., [148, 255, 156]], [39., [148, 255, 150]], [39., [151, 255, 148]], [39., [157, 255, 148]], [39., [163, 255, 148]], [40., [169, 255, 148]], [40., [175, 255, 148]], [40., [181, 255, 148]], [40., [188, 255, 148]], [41., [197, 255, 148]], [41., [203, 255, 148]], [41., [209, 255, 148]], [42., [215, 255, 148]], [42., [221, 255, 148]], [42., [227, 255, 148]], [42., [233, 255, 148]], [43., [239, 255, 148]], [43., [244, 255, 148]], [43., [250, 255, 148]], [43., [254, 254, 146]], [44., [255, 251, 143]], [44., [255, 249, 141]], [44., [255, 247, 139]], [45., [255, 242, 134]], [45., [255, 239, 131]], [45., [255, 236, 128]], [45., [255, 233, 125]], [46., [255, 231, 122]], [46., [255, 227, 120]], [46., [255, 225, 117]], [46., [255, 221, 113]], [47., [255, 218, 110]], [47., [255, 216, 108]], [47., [255, 211, 103]], [48., [255, 209, 101]], [48., [255, 206, 98]], [48., [255, 204, 96]], [48., [255, 199, 91]], [49., [255, 196, 87]], [49., [255, 193, 85]], [49., [255, 191, 82]], [50., [255, 188, 80]], [50., [255, 185, 77]], [50., [255, 182, 74]], [50., [255, 179, 70]], [51., [255, 176, 68]], [51., [255, 173, 64]], [51., [255, 171, 61]], [51., [255, 167, 58]], [52., [255, 164, 55]], [52., [255, 161, 52]], [52., [255, 158, 49]], [53., [255, 154, 46]], [53., [255, 151, 42]], [53., [255, 148, 40]], [53., [252, 144, 39]], [54., [249, 140, 39]], [54., [246, 136, 39]], [54., [243, 132, 39]], [54., [240, 128, 39]], [55., [237, 125, 39]], [55., [234, 121, 39]], [55., [231, 118, 39]], [56., [227, 114, 39]], [56., [225, 111, 39]], [56., [222, 108, 39]], [56., [219, 104, 39]], [57., [216, 101, 39]], [57., [213, 97, 39]], [57., [210, 95, 39]], [57., [206, 91, 39]], [58., [204, 89, 39]], [58., [200, 86, 39]], [58., [198, 83, 39]], [59., [194, 80, 39]], [59., [192, 78, 39]], [59., [188, 75, 39]], [59., [185, 73, 39]], [60., [182, 70, 39]], [60., [179, 68, 39]], [60., [176, 66, 39]], [60., [173, 63, 39]], [61., [171, 62, 39]], [61., [169, 59, 39]], [61., [167, 57, 40]], [62., [165, 56, 40]], [62., [165, 54, 40]], [62., [163, 52, 40]], [62., [161, 50, 41]], [63., [159, 48, 42]], [63., [159, 47, 42]], [63., [157, 46, 43]], [64., [155, 44, 43]], [64., [154, 44, 45]], [64., [156, 45, 48]], [64., [157, 46, 52]], [65., [159, 48, 55]], [65., [160, 50, 58]], [65., [162, 52, 62]], [65., [164, 53, 65]], [66., [165, 55, 69]], [66., [167, 57, 72]], [66., [169, 59, 76]], [67., [171, 61, 80]], [67., [172, 63, 83]], [67., [174, 65, 87]], [67., [176, 67, 91]], [68., [177, 69, 95]], [68., [179, 71, 98]], [68., [181, 73, 102]], [68., [182, 75, 106]], [69., [184, 78, 109]], [69., [186, 80, 114]], [69., [188, 82, 117]], [70., [189, 85, 121]], [70., [191, 87, 125]], [70., [193, 90, 129]], [70., [194, 92, 132]], [71., [196, 95, 137]], [71., [198, 97, 140]], [71., [199, 100, 144]], [71., [201, 103, 148]], [72., [203, 105, 152]], [72., [205, 108, 155]], [72., [206, 110, 159]], [73., [208, 114, 163]], [73., [210, 116, 167]], [73., [211, 120, 171]], [73., [213, 122, 174]], [74., [215, 125, 178]], [74., [216, 128, 182]], [74., [218, 131, 185]], [75.0, [220, 135, 189]]]}
kwargs['palettes'].update(nrl_tpw_colors)
palette = kwargs['palettes']
palette['colors'] = tuple(map(tuple, palette['colors']))
cm = Colormap(*palette['colors'])
img.palettize(cm) |
class UpdaterProcess(BaseProcess):
name = 'Updater'
def setup(self, bot, commands):
self.bot = bot
self.bot_id = bot._bot_id
self.commands = commands
self.fetcher = updates_module.UpdatesFetcher(bot)
def should_stop(self):
try:
command = self.commands.get(False)
except queue.Empty:
val = False
else:
val = (command == 'stop')
self.stop = val
return val
def loop(self):
if self.should_stop():
return
try:
updates = self.fetcher.fetch()
except updates_module.AnotherInstanceRunningError:
self.handle_another_instance()
return
except api.APIError as e:
self.logger.error('An error occured while fetching updates!')
self.logger.debug(('Exception type: %s' % e.__class__.__name__))
self.logger.debug(('Exception content: %s' % str(e)))
return
if (not updates):
return
result = []
for update in updates:
update.set_api(None)
data = {'update': update}
result.append(jobs.Job(self.bot_id, jobs.process_update, data))
self.ipc.command('jobs.bulk_put', result)
def handle_another_instance(self):
self.logger.error('Another instance of this bot is running!')
self.logger.error('Please close any other instance of the bot, and this one will start working again')
self.logger.error("If you can't find other instances just revoke the API token")
result = self.fetcher.block_until_alone(when_stop=self.should_stop)
if result:
self.logger.info('This instance is now the only one. The bot is working again') |
class WMS_GLAD(WMSBase):
layer_prefix = 'GLAD_'
name = 'GLAD'
def __init__(self, m=None):
self.m = m
try:
self.wmslayers = [key for key in self.m.add_wms.GLAD.add_layer.__dict__.keys() if (not ((key in ['m']) or key.startswith('_')))]
except Exception:
self.wmslayers = []
_log_problem(self.name)
def do_add_layer(self, wmslayer, layer):
wms = getattr(self.m.add_wms.GLAD.add_layer, wmslayer)
wms(layer=layer, transparent=True)
self.ask_for_legend(wms, wmslayer) |
def _init_gst():
arch_key = ('64' if (sys.maxsize > (2 ** 32)) else '32')
registry_name = f'gst-registry-{sys.platform}-{arch_key}.bin'
os.environ['GST_REGISTRY'] = os.path.join(get_cache_dir(), registry_name)
assert ('gi.repository.Gst' not in sys.modules)
import gi
assert ('gi.overrides.Gst' not in sys.modules)
sys.modules['gi.overrides.Gst'] = None
sys.modules['gst'] = None
try:
gi.require_version('Gst', '1.0')
from gi.repository import Gst
except (ValueError, ImportError):
return
if Gst.is_initialized():
return
from gi.repository import GLib
try:
(ok, sys.argv[:]) = Gst.init_check(sys.argv)
except GLib.GError:
print_e('Failed to initialize GStreamer')
sys.modules['gi.repository.Gst'] = None
else:
_fix_gst_leaks() |
def test_method_dynamic_instance_attr_6() -> None:
node = builder.extract_node('\n class A:\n # Note: no initializer, so the only assignment happens in get_x\n\n def get_x(self, x):\n self.set_x(x + 1)\n return self.x\n\n def set_x(self, x):\n self.x = x\n\n A().get_x(1) #\n ')
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert (len(inferred) == 1)
assert (inferred[0] is Uninferable) |
def processData(live=False, quiet=False):
if (not quiet):
pprint(('PROCESSING: %s' % sysvals.htmlfile))
sysvals.vprint(('usetraceevents=%s, usetracemarkers=%s, usekprobes=%s' % (sysvals.usetraceevents, sysvals.usetracemarkers, sysvals.usekprobes)))
error = ''
if sysvals.usetraceevents:
(testruns, error) = parseTraceLog(live)
if sysvals.dmesgfile:
for data in testruns:
data.extractErrorInfo()
else:
testruns = loadKernelLog()
for data in testruns:
parseKernelLog(data)
if (sysvals.ftracefile and (sysvals.usecallgraph or sysvals.usetraceevents)):
appendIncompleteTraceLog(testruns)
if (not sysvals.stamp):
pprint('ERROR: data does not include the expected stamp')
return (testruns, {'error': 'timeline generation failed'})
shown = ['bios', 'biosdate', 'cpu', 'host', 'kernel', 'man', 'memfr', 'memsz', 'mode', 'numcpu', 'plat', 'time', 'wifi']
sysvals.vprint('System Info:')
for key in sorted(sysvals.stamp):
if (key in shown):
sysvals.vprint((' %-8s : %s' % (key.upper(), sysvals.stamp[key])))
sysvals.vprint(('Command:\n %s' % sysvals.cmdline))
for data in testruns:
if data.turbostat:
(idx, s) = (0, 'Turbostat:\n ')
for val in data.turbostat.split('|'):
idx += (len(val) + 1)
if (idx >= 80):
idx = 0
s += '\n '
s += (val + ' ')
sysvals.vprint(s)
data.printDetails()
if (len(sysvals.platinfo) > 0):
sysvals.vprint('\nPlatform Info:')
for info in sysvals.platinfo:
sysvals.vprint(('[%s - %s]' % (info[0], info[1])))
sysvals.vprint(info[2])
sysvals.vprint('')
if sysvals.cgdump:
for data in testruns:
data.debugPrint()
sys.exit(0)
if (len(testruns) < 1):
pprint('ERROR: Not enough test data to build a timeline')
return (testruns, {'error': 'timeline generation failed'})
sysvals.vprint(('Creating the html timeline (%s)...' % sysvals.htmlfile))
createHTML(testruns, error)
if (not quiet):
pprint(('DONE: %s' % sysvals.htmlfile))
data = testruns[0]
stamp = data.stamp
(stamp['suspend'], stamp['resume']) = data.getTimeValues()
if data.fwValid:
(stamp['fwsuspend'], stamp['fwresume']) = (data.fwSuspend, data.fwResume)
if error:
stamp['error'] = error
return (testruns, stamp) |
def assert_(condition: object) -> None:
try:
assert condition
except AssertionError:
if (debugmode == 'PRODUCTION'):
log.error(('Deviation from expectations found. %s' % ERR_FRAGMENT), exc_info=True)
elif (debugmode == 'DEBUG_PDB'):
log.error('Deviation from expectations found. Dropping into debugger')
import pdb
pdb.set_trace()
else:
raise |
def delete_all_services_namespace(kubecli: KrknKubernetes, namespace: str):
try:
services = kubecli.get_all_services(namespace)
for service in services:
logging.info(('Deleting services' + service))
kubecli.delete_services(service, namespace)
except Exception as e:
logging.error('Exception when calling delete_all_services_namespace: %s\n', str(e))
raise e
return services |
_HEAD_REGISTRY.register()
class StandardRPNHead(nn.Module):
def __init__(self, *, in_channels: int, num_anchors: int, box_dim: int=4, conv_dims: List[int]=((- 1),)):
super().__init__()
cur_channels = in_channels
if (len(conv_dims) == 1):
out_channels = (cur_channels if (conv_dims[0] == (- 1)) else conv_dims[0])
self.conv = self._get_rpn_conv(cur_channels, out_channels)
cur_channels = out_channels
else:
self.conv = nn.Sequential()
for (k, conv_dim) in enumerate(conv_dims):
out_channels = (cur_channels if (conv_dim == (- 1)) else conv_dim)
if (out_channels <= 0):
raise ValueError(f'Conv output channels should be greater than 0. Got {out_channels}')
conv = self._get_rpn_conv(cur_channels, out_channels)
self.conv.add_module(f'conv{k}', conv)
cur_channels = out_channels
self.objectness_logits = nn.Conv2d(cur_channels, num_anchors, kernel_size=1, stride=1)
self.anchor_deltas = nn.Conv2d(cur_channels, (num_anchors * box_dim), kernel_size=1, stride=1)
for layer in self.modules():
if isinstance(layer, nn.Conv2d):
nn.init.normal_(layer.weight, std=0.01)
nn.init.constant_(layer.bias, 0)
def _get_rpn_conv(self, in_channels, out_channels):
return Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, activation=nn.ReLU())
def from_config(cls, cfg, input_shape):
in_channels = [s.channels for s in input_shape]
assert (len(set(in_channels)) == 1), 'Each level must have the same channel!'
in_channels = in_channels[0]
anchor_generator = build_anchor_generator(cfg, input_shape)
num_anchors = anchor_generator.num_anchors
box_dim = anchor_generator.box_dim
assert (len(set(num_anchors)) == 1), 'Each level must have the same number of anchors per spatial position'
return {'in_channels': in_channels, 'num_anchors': num_anchors[0], 'box_dim': box_dim, 'conv_dims': cfg.MODEL.RPN.CONV_DIMS}
def forward(self, features: List[torch.Tensor]):
pred_objectness_logits = []
pred_anchor_deltas = []
for x in features:
t = self.conv(x)
pred_objectness_logits.append(self.objectness_logits(t))
pred_anchor_deltas.append(self.anchor_deltas(t))
return (pred_objectness_logits, pred_anchor_deltas) |
class CustomCompleterApp(cmd2.Cmd):
def __init__(self):
super().__init__()
self.is_ready = True
default_completer_parser = Cmd2ArgumentParser(description='Testing app-wide argparse completer')
default_completer_parser.add_argument('--myflag', complete_when_ready=True)
_argparser(default_completer_parser)
def do_default_completer(self, args: argparse.Namespace) -> None:
pass
custom_completer_parser = Cmd2ArgumentParser(description='Testing parser-specific argparse completer', ap_completer_type=CustomCompleter)
custom_completer_parser.add_argument('--myflag', complete_when_ready=True)
_argparser(custom_completer_parser)
def do_custom_completer(self, args: argparse.Namespace) -> None:
pass
top_parser = Cmd2ArgumentParser(description='Top Command')
top_subparsers = top_parser.add_subparsers(dest='subcommand', metavar='SUBCOMMAND')
top_subparsers.required = True
_argparser(top_parser)
def do_top(self, args: argparse.Namespace) -> None:
handler = args.cmd2_handler.get()
handler(args)
no_custom_completer_parser = Cmd2ArgumentParser(description='No custom completer')
no_custom_completer_parser.add_argument('--myflag', complete_when_ready=True)
.as_subcommand_to('top', 'no_custom', no_custom_completer_parser, help='no custom completer')
def _subcmd_no_custom(self, args: argparse.Namespace) -> None:
pass
custom_completer_parser = Cmd2ArgumentParser(description='Custom completer', ap_completer_type=CustomCompleter)
custom_completer_parser.add_argument('--myflag', complete_when_ready=True)
.as_subcommand_to('top', 'custom', custom_completer_parser, help='custom completer')
def _subcmd_custom(self, args: argparse.Namespace) -> None:
pass |
class SimpleReplayPool(PoolBase, Serializable):
def __init__(self, env_spec, max_pool_size, replacement_policy='stochastic', replacement_prob=1.0, max_skip_episode=10):
Serializable.quick_init(self, locals())
super(SimpleReplayPool, self).__init__(env_spec)
max_pool_size = int(max_pool_size)
self._max_pool_size = max_pool_size
self._replacement_policy = replacement_policy
self._replacement_prob = replacement_prob
self._max_skip_episode = max_skip_episode
self._observations = np.zeros((max_pool_size, self._observation_dim))
self._actions = np.zeros((max_pool_size, self._action_dim))
self._rewards = np.zeros(max_pool_size)
self._terminals = np.zeros(max_pool_size, dtype='uint8')
self._final_state = np.zeros(max_pool_size, dtype='uint8')
self._bottom = 0
self._top = 0
self._size = 0
self._env_info = dict()
def add_sample(self, observation, action, reward, terminal, final_state, env_info=None):
self._observations[self._top] = observation
self._actions[self._top] = action
self._rewards[self._top] = reward
self._terminals[self._top] = terminal
self._final_state[self._top] = final_state
if (env_info is not None):
if (len(self._env_info) is 0):
for (k, v) in env_info.items():
self._env_info[k] = np.zeros((self._max_pool_size, v.size))
for (k, v) in env_info.items():
self._env_info[k][self._top] = v
self.advance()
def add_path(self, observations, actions, rewards, terminals, last_obs, env_infos=None):
env_info = None
if (env_infos is not None):
env_info = dict()
for (k, v) in env_infos.items():
env_info[k] = v[0]
for t in range(len(observations)):
if (env_info is not None):
for (k, v) in env_infos.items():
env_info[k] = v[t]
self.add_sample(observations[t], actions[t], rewards[t], terminals[t], False, env_info)
self.add_sample(last_obs, np.zeros_like(actions[0]), np.zeros_like(rewards[0]), np.zeros_like(terminals[0]), True, None)
def advance(self):
self._top = ((self._top + 1) % self._max_pool_size)
if (self._size >= self._max_pool_size):
self._bottom = ((self._bottom + 1) % self._max_pool_size)
else:
self._size += 1
def random_batch(self, batch_size):
assert (self._size > 1)
indices = np.zeros(batch_size, dtype='uint64')
transition_indices = np.zeros(batch_size, dtype='uint64')
count = 0
while (count < batch_size):
index = np.random.randint(0, min(self._size, self._max_pool_size))
if (((index + 1) % self._max_pool_size) == self._top):
continue
if self._final_state[index]:
continue
indices[count] = index
transition_index = ((index + 1) % self._max_pool_size)
transition_indices[count] = transition_index
count += 1
return dict(observations=self._observations[indices], actions=self._actions[indices], rewards=self._rewards[indices], terminals=self._terminals[indices], next_observations=self._observations[transition_indices])
def batch(self, batch_size):
indices = np.arange(0, (batch_size - 1))
transition_indices = ((indices + 1) % self._max_pool_size)
return dict(observations=self._observations[indices], actions=self._actions[indices], rewards=self._rewards[indices], terminals=self._terminals[indices], next_observations=self._observations[transition_indices])
def size(self):
return self._size
def __getstate__(self):
d = super(SimpleReplayPool, self).__getstate__()
if (self._env_info is not None):
env_info_binary = dict()
for (k, v) in self._env_info.items():
env_info_binary[k] = v.tobytes()
d.update(dict(env_info=env_info_binary))
d.update(dict(o=self._observations.tobytes(), a=self._actions.tobytes(), r=self._rewards.tobytes(), t=self._terminals.tobytes(), f=self._final_state.tobytes(), bottom=self._bottom, top=self._top, size=self.size))
return d
def __setstate__(self, d):
super(SimpleReplayPool, self).__setstate__(d)
self._observations = np.fromstring(d['o']).reshape(self._max_pool_size, (- 1))
self._actions = np.fromstring(d['a']).reshape(self._max_pool_size, (- 1))
self._rewards = np.fromstring(d['r']).reshape(self._max_pool_size)
self._terminals = np.fromstring(d['t'], dtype=np.uint8)
self._final_state = np.fromstring(d['f'], dtype=np.uint8)
self._bottom = d['bottom']
self._top = d['top']
self._size = d['size']
if ('env_info' in d.keys()):
for (k, v) in d['env_info'].items():
self._env_info[k] = np.fromstring(v).reshape(self._max_pool_size, (- 1)) |
def test_resnet3d_backbone():
with pytest.raises(AssertionError):
ResNet3d(34, None, num_stages=0)
with pytest.raises(AssertionError):
ResNet3d(34, None, num_stages=5)
with pytest.raises(AssertionError):
ResNet3d(50, None, num_stages=0)
with pytest.raises(AssertionError):
ResNet3d(50, None, num_stages=5)
with pytest.raises(AssertionError):
ResNet3d(50, None, spatial_strides=(1,), temporal_strides=(1, 1), dilations=(1, 1, 1), num_stages=4)
with pytest.raises(AssertionError):
ResNet3d(34, None, spatial_strides=(1,), temporal_strides=(1, 1), dilations=(1, 1, 1), num_stages=4)
with pytest.raises(TypeError):
resnet3d_34 = ResNet3d(34, ['resnet', 'bninception'])
resnet3d_34.init_weights()
with pytest.raises(TypeError):
resnet3d_50 = ResNet3d(50, ['resnet', 'bninception'])
resnet3d_50.init_weights()
resnet3d_34 = ResNet3d(34, None, pretrained2d=False, norm_eval=True)
resnet3d_34.init_weights()
resnet3d_34.train()
assert check_norm_state(resnet3d_34.modules(), False)
resnet3d_50 = ResNet3d(50, None, pretrained2d=False, norm_eval=True)
resnet3d_50.init_weights()
resnet3d_50.train()
assert check_norm_state(resnet3d_50.modules(), False)
resnet3d_50_pretrain = ResNet3d(50, 'torchvision://resnet50', norm_eval=True)
resnet3d_50_pretrain.init_weights()
resnet3d_50_pretrain.train()
assert check_norm_state(resnet3d_50_pretrain.modules(), False)
from mmcv.runner import _load_checkpoint
chkp_2d = _load_checkpoint('torchvision://resnet50')
for (name, module) in resnet3d_50_pretrain.named_modules():
if (len(name.split('.')) == 4):
prefix = name.split('.')[:2]
module_type = name.split('.')[2]
submodule_type = name.split('.')[3]
if (module_type == 'downsample'):
name2d = name.replace('conv', '0').replace('bn', '1')
else:
layer_id = name.split('.')[2][(- 1)]
name2d = (((((prefix[0] + '.') + prefix[1]) + '.') + submodule_type) + layer_id)
if isinstance(module, nn.Conv3d):
conv2d_weight = chkp_2d[(name2d + '.weight')]
conv3d_weight = getattr(module, 'weight').data
assert torch.equal(conv3d_weight, (conv2d_weight.data.unsqueeze(2).expand_as(conv3d_weight) / conv3d_weight.shape[2]))
if (getattr(module, 'bias') is not None):
conv2d_bias = chkp_2d[(name2d + '.bias')]
conv3d_bias = getattr(module, 'bias').data
assert torch.equal(conv2d_bias, conv3d_bias)
elif isinstance(module, nn.BatchNorm3d):
for pname in ['weight', 'bias', 'running_mean', 'running_var']:
param_2d = chkp_2d[((name2d + '.') + pname)]
param_3d = getattr(module, pname).data
assert torch.equal(param_2d, param_3d)
conv3d = resnet3d_50_pretrain.conv1.conv
assert torch.equal(conv3d.weight, (chkp_2d['conv1.weight'].unsqueeze(2).expand_as(conv3d.weight) / conv3d.weight.shape[2]))
conv3d = resnet3d_50_pretrain.layer3[2].conv2.conv
assert torch.equal(conv3d.weight, (chkp_2d['layer3.2.conv2.weight'].unsqueeze(2).expand_as(conv3d.weight) / conv3d.weight.shape[2]))
resnet3d_34_no_bn_eval = ResNet3d(34, None, pretrained2d=False, norm_eval=False)
resnet3d_34_no_bn_eval.init_weights()
resnet3d_34_no_bn_eval.train()
assert check_norm_state(resnet3d_34_no_bn_eval.modules(), True)
resnet3d_50_no_bn_eval = ResNet3d(50, None, pretrained2d=False, norm_eval=False)
resnet3d_50_no_bn_eval.init_weights()
resnet3d_50_no_bn_eval.train()
assert check_norm_state(resnet3d_50_no_bn_eval.modules(), True)
frozen_stages = 1
resnet3d_34_frozen = ResNet3d(34, None, pretrained2d=False, frozen_stages=frozen_stages)
resnet3d_34_frozen.init_weights()
resnet3d_34_frozen.train()
assert (resnet3d_34_frozen.conv1.bn.training is False)
for param in resnet3d_34_frozen.conv1.parameters():
assert (param.requires_grad is False)
for i in range(1, (frozen_stages + 1)):
layer = getattr(resnet3d_34_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert (mod.training is False)
for param in layer.parameters():
assert (param.requires_grad is False)
for m in resnet3d_34_frozen.modules():
if hasattr(m, 'conv2'):
assert torch.equal(m.conv2.bn.weight, torch.zeros_like(m.conv2.bn.weight))
assert torch.equal(m.conv2.bn.bias, torch.zeros_like(m.conv2.bn.bias))
frozen_stages = 1
resnet3d_50_frozen = ResNet3d(50, None, pretrained2d=False, frozen_stages=frozen_stages)
resnet3d_50_frozen.init_weights()
resnet3d_50_frozen.train()
assert (resnet3d_50_frozen.conv1.bn.training is False)
for param in resnet3d_50_frozen.conv1.parameters():
assert (param.requires_grad is False)
for i in range(1, (frozen_stages + 1)):
layer = getattr(resnet3d_50_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert (mod.training is False)
for param in layer.parameters():
assert (param.requires_grad is False)
for m in resnet3d_50_frozen.modules():
if hasattr(m, 'conv3'):
assert torch.equal(m.conv3.bn.weight, torch.zeros_like(m.conv3.bn.weight))
assert torch.equal(m.conv3.bn.bias, torch.zeros_like(m.conv3.bn.bias))
input_shape = (1, 3, 6, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
resnet3d_34_frozen = resnet3d_34_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_34_frozen(imgs_gpu)
assert (feat.shape == torch.Size([1, 512, 3, 2, 2]))
else:
feat = resnet3d_34_frozen(imgs)
assert (feat.shape == torch.Size([1, 512, 3, 2, 2]))
input_shape = (1, 3, 6, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
resnet3d_50_frozen = resnet3d_50_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_50_frozen(imgs_gpu)
assert (feat.shape == torch.Size([1, 2048, 3, 2, 2]))
else:
feat = resnet3d_50_frozen(imgs)
assert (feat.shape == torch.Size([1, 2048, 3, 2, 2]))
resnet3d_50_caffe = ResNet3d(50, None, pretrained2d=False, style='caffe')
resnet3d_50_caffe.init_weights()
resnet3d_50_caffe.train()
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
resnet3d_50_caffe = resnet3d_50_caffe.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_50_caffe(imgs_gpu)
assert (feat.shape == torch.Size([1, 2048, 3, 2, 2]))
else:
feat = resnet3d_50_caffe(imgs)
assert (feat.shape == torch.Size([1, 2048, 3, 2, 2]))
resnet3d_34_caffe = ResNet3d(34, None, pretrained2d=False, style='caffe')
resnet3d_34_caffe.init_weights()
resnet3d_34_caffe.train()
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
resnet3d_34_caffe = resnet3d_34_caffe.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_34_caffe(imgs_gpu)
assert (feat.shape == torch.Size([1, 512, 3, 2, 2]))
else:
feat = resnet3d_34_caffe(imgs)
assert (feat.shape == torch.Size([1, 512, 3, 2, 2]))
resnet3d_50_1x1x1 = ResNet3d(50, None, pretrained2d=False, inflate_style='3x3x3')
resnet3d_50_1x1x1.init_weights()
resnet3d_50_1x1x1.train()
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
resnet3d_50_1x1x1 = resnet3d_50_1x1x1.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_50_1x1x1(imgs_gpu)
assert (feat.shape == torch.Size([1, 2048, 3, 2, 2]))
else:
feat = resnet3d_50_1x1x1(imgs)
assert (feat.shape == torch.Size([1, 2048, 3, 2, 2]))
resnet3d_34_1x1x1 = ResNet3d(34, None, pretrained2d=False, inflate_style='3x3x3')
resnet3d_34_1x1x1.init_weights()
resnet3d_34_1x1x1.train()
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
resnet3d_34_1x1x1 = resnet3d_34_1x1x1.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_34_1x1x1(imgs_gpu)
assert (feat.shape == torch.Size([1, 512, 3, 2, 2]))
else:
feat = resnet3d_34_1x1x1(imgs)
assert (feat.shape == torch.Size([1, 512, 3, 2, 2]))
non_local_cfg = dict(sub_sample=True, use_scale=False, norm_cfg=dict(type='BN3d', requires_grad=True), mode='embedded_gaussian')
non_local = ((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0))
resnet3d_nonlocal = ResNet3d(50, None, pretrained2d=False, non_local=non_local, non_local_cfg=non_local_cfg)
resnet3d_nonlocal.init_weights()
for layer_name in ['layer2', 'layer3']:
layer = getattr(resnet3d_nonlocal, layer_name)
for (i, _) in enumerate(layer):
if ((i % 2) == 0):
assert hasattr(layer[i], 'non_local_block')
feat = resnet3d_nonlocal(imgs)
assert (feat.shape == torch.Size([1, 2048, 3, 2, 2])) |
def channels_setup(amount: TokenAmount, our_address: Address, refund_address: Address) -> ChannelSet:
funded = factories.NettingChannelEndStateProperties(balance=amount, address=our_address)
broke = factories.replace(funded, balance=0)
funded_partner = factories.replace(funded, address=refund_address)
properties = [factories.NettingChannelStateProperties(our_state=funded, partner_state=funded_partner), factories.NettingChannelStateProperties(our_state=broke), factories.NettingChannelStateProperties(our_state=funded)]
return factories.make_channel_set(properties) |
('regexp-max-lookbehind', [values.W_Object])
def regexp_max_lookbehind(obj):
if ((not isinstance(obj, values_regex.W_Regexp)) and (not isinstance(obj, values_regex.W_ByteRegexp))):
raise SchemeException('regexp-max-lookbehind: expected regexp or bytes-regexp')
return values.W_Fixnum(1000) |
class SearchsortedOp(COp):
params_type = Generic()
__props__ = ('side',)
check_input = False
def __init__(self, side='left'):
if ((side == 'left') or (side == 'right')):
self.side = side
else:
raise ValueError(f"'{side}' is an invalid value for keyword 'side'")
def get_params(self, node):
return self.side
def make_node(self, x, v, sorter=None):
x = ptb.as_tensor(x, ndim=1)
v = ptb.as_tensor(v)
out_type = v.type.clone(dtype='int64')
if (sorter is None):
return Apply(self, [x, v], [out_type()])
else:
sorter = ptb.as_tensor(sorter, ndim=1)
if ((PYTHON_INT_BITWIDTH == 32) and (sorter.dtype == 'int64')):
raise TypeError('numpy.searchsorted with Python 32bit do not support a sorter of int64.')
if ((sorter.type.ndim == 1) and (sorter.type.dtype not in int_dtypes)):
raise TypeError('sorter must be an integer vector', sorter.type)
return Apply(self, [x, v, sorter], [out_type()])
def infer_shape(self, fgraph, node, shapes):
return [shapes[1]]
def perform(self, node, inputs, output_storage):
x = inputs[0]
v = inputs[1]
if (len(node.inputs) == 3):
sorter = inputs[2]
else:
sorter = None
z = output_storage[0]
z[0] = np.searchsorted(x, v, side=self.side, sorter=sorter).astype(node.outputs[0].dtype)
def c_support_code_struct(self, node, name):
return f'''
int right_{name};
'''
def c_init_code_struct(self, node, name, sub):
side = sub['params']
fail = sub['fail']
return ('\n PyObject* tmp_%(name)s = PyUnicode_FromString("right");\n if (tmp_%(name)s == NULL)\n %(fail)s;\n right_%(name)s = PyUnicode_Compare(%(side)s, tmp_%(name)s);\n Py_DECREF(tmp_%(name)s);\n ' % locals())
def c_code(self, node, name, inames, onames, sub):
sorter = None
if (len(node.inputs) == 3):
(x, v, sorter) = inames
else:
(x, v) = inames
if (not sorter):
sorter = 'NULL'
(z,) = onames
fail = sub['fail']
return ('\n Py_XDECREF(%(z)s);\n %(z)s = (PyArrayObject*) PyArray_SearchSorted(%(x)s, (PyObject*) %(v)s,\n right_%(name)s ? NPY_SEARCHLEFT : NPY_SEARCHRIGHT, (PyObject*) %(sorter)s);\n if (!%(z)s)\n %(fail)s;\n if (PyArray_TYPE(%(z)s) != NPY_INT64){\n PyObject * tmp = PyArray_Cast(%(z)s, NPY_INT64);\n Py_XDECREF(%(z)s);\n %(z)s = (PyArrayObject*) tmp;\n }\n ' % locals())
def c_code_cache_version(self):
return (2,)
def grad(self, inputs, output_gradients):
num_ins = len(inputs)
if (num_ins == 3):
(x, v, sorter) = inputs
else:
(x, v) = inputs
x_grad = _float_zeros_like(x)
v_grad = _float_zeros_like(v)
if (num_ins == 3):
return [x_grad, v_grad, disconnected_type()]
else:
return [x_grad, v_grad] |
class ThanksWidget(TitledWidget):
def __init__(self, view):
super().__init__(view)
self.add_child(P(view, text=_('Thank you for verifying your email address.')))
self.add_child(P(view, text=_('Your account is now active, and you can proceed to log in using the details you provided.'))) |
def _special_method_cache(method, cache_wrapper):
name = method.__name__
special_names = ('__getattr__', '__getitem__')
if (name not in special_names):
return
wrapper_name = ('__cached' + name)
def proxy(self, *args, **kwargs):
if (wrapper_name not in vars(self)):
bound = types.MethodType(method, self)
cache = cache_wrapper(bound)
setattr(self, wrapper_name, cache)
else:
cache = getattr(self, wrapper_name)
return cache(*args, **kwargs)
return proxy |
class ResnetDiscriminator(chainer.Chain):
def __init__(self, bottom_width=8, ch=128, wscale=0.02, output_dim=1):
w = chainer.initializers.Normal(wscale)
super(ResnetDiscriminator, self).__init__()
self.bottom_width = bottom_width
self.ch = ch
with self.init_scope():
self.r0 = DownResBlock1(128)
self.r1 = DownResBlock2(128)
self.r2 = DownResBlock3(128)
self.r3 = DownResBlock3(128)
self.l4 = L.Linear(((bottom_width * bottom_width) * ch), output_dim, initialW=w)
def __call__(self, x):
self.x = x
self.h1 = self.r0(self.x)
self.h2 = self.r1(self.h1)
self.h3 = self.r2(self.h2)
self.h4 = self.r3(self.h3)
return self.l4(F.relu(self.h4))
def differentiable_backward(self, x):
g = backward_linear(self.h4, x, self.l4)
g = F.reshape(g, (x.shape[0], self.ch, self.bottom_width, self.bottom_width))
g = backward_leaky_relu(self.h4, g, 0.0)
g = self.r3.differentiable_backward(g)
g = self.r2.differentiable_backward(g)
g = self.r1.differentiable_backward(g)
g = self.r0.differentiable_backward(g)
return g |
class conv_block(nn.Module):
def __init__(self, ch_in, ch_out, kernel_size=3, stride=1, padding=1, act='relu'):
super(conv_block, self).__init__()
if (act == None):
self.conv = nn.Sequential(nn.Conv2d(ch_in, ch_out, kernel_size=kernel_size, stride=stride, padding=padding), nn.BatchNorm2d(ch_out))
elif (act == 'relu'):
self.conv = nn.Sequential(nn.Conv2d(ch_in, ch_out, kernel_size=kernel_size, stride=stride, padding=padding), nn.BatchNorm2d(ch_out), nn.ReLU(inplace=True))
elif (act == 'sigmoid'):
self.conv = nn.Sequential(nn.Conv2d(ch_in, ch_out, kernel_size=kernel_size, stride=stride, padding=padding), nn.BatchNorm2d(ch_out), nn.Sigmoid())
def forward(self, x):
x = self.conv(x)
return x |
def main(source_data_csv_path):
dfs = []
for (species, hyperparam_expt_dict) in HYPERPARAM_EXPTS.items():
for (hyperparam_expt, params_list) in hyperparam_expt_dict.items():
for param_tuple in params_list:
(param_val, location) = param_tuple
if (location == 'expt'):
param_root = (((RESULTS_ROOT / species) / hyperparam_expt) / f'{hyperparam_expt}_{param_val}')
elif (location == 'learncurve'):
param_root = ((RESULTS_ROOT / species) / 'learncurve')
results_csv = sorted(param_root.glob('err*.csv'))
assert (len(results_csv) == 1), f'did not find only 1 csv: {results_csv}'
results_csv = results_csv[0]
df = pd.read_csv(results_csv)
df.avg_segment_error_rate = (df.avg_segment_error_rate * 100)
df['hyperparam_expt'] = hyperparam_expt
df['hyperparam_val'] = param_val
df['species'] = species
dfs.append(df)
df = pd.concat(dfs)
df['species'] = df['species'].map(DIRNAME_SPECIES_COL_VAL_MAP)
df.to_csv(source_data_csv_path, index=False) |
class GetCurrentInputCommand(ItemInfoCommandBase):
def __init__(self, device_type: str) -> None:
super(GetCurrentInputCommand, self).__init__(device_type, 'CURRENT_INPUT')
def process_response(self, json_obj: Dict[(str, Any)]) -> Optional[InputItem]:
items = dict_get_case_insensitive(json_obj, ResponseKey.ITEMS)
v_input = None
if items:
v_input = InputItem(items[0], False)
return v_input |
def test():
try:
if (implementation.name == 'circuitpython'):
import board
from busio import SPI
from digitalio import DigitalInOut
cs_pin = DigitalInOut(board.P0_15)
dc_pin = DigitalInOut(board.P0_17)
rst_pin = DigitalInOut(board.P0_20)
spi = SPI(clock=board.P0_24, MOSI=board.P0_22)
else:
from machine import Pin, SPI
cs_pin = Pin(16)
dc_pin = Pin(4)
rst_pin = Pin(17)
spi = SPI(1, baudrate=, sck=Pin(14), mosi=Pin(13))
display = Display(spi, dc=dc_pin, cs=cs_pin, rst=rst_pin)
display.clear()
display.fill_rectangle(0, 0, 239, 99, color565(27, 72, 156))
display.fill_rectangle(0, 168, 239, 151, color565(220, 27, 72))
display.draw_image('images/Rototron128x26.raw', 56, 120, 128, 26)
display.set_scroll(top=152, bottom=100)
spectrum = (list(range(152, 221)) + list(reversed(range(152, 220))))
while True:
for y in spectrum:
display.scroll(y)
sleep(0.1)
except KeyboardInterrupt:
display.cleanup() |
def peer_learning_loss(logits_1, logits_2, labels, drop_rate):
dist_1 = F.softmax(logits_1, dim=1)
dist_2 = F.softmax(logits_2, dim=1)
(_, pred_1) = dist_1.topk(1, dim=1, largest=True, sorted=True)
(_, pred_2) = dist_2.topk(1, dim=1, largest=True, sorted=True)
pred_1 = pred_1.squeeze(dim=1)
pred_2 = pred_2.squeeze(dim=1)
disagreement_index = (pred_1 != pred_2).nonzero().squeeze(dim=1)
agreement_index = (pred_1 == pred_2).nonzero().squeeze(dim=1)
logits_1_disagree = logits_1[disagreement_index]
logits_2_disagree = logits_2[disagreement_index]
labels_disagree = labels[disagreement_index]
logits_1_agree = logits_1[agreement_index]
logits_2_agree = logits_2[agreement_index]
labels_agree = labels[agreement_index]
if (agreement_index.shape[0] > 0):
loss_1_agree = F.cross_entropy(logits_1_agree, labels_agree, reduction='none')
ind_1_sorted = torch.argsort(loss_1_agree.data)
loss_2_agree = F.cross_entropy(logits_2_agree, labels_agree, reduction='none')
ind_2_sorted = torch.argsort(loss_2_agree.data)
num_remember = int(((1 - drop_rate) * loss_1_agree.shape[0]))
ind_1_update = ind_1_sorted[:num_remember]
ind_2_update = ind_2_sorted[:num_remember]
if (disagreement_index.shape[0] > 0):
logits_1_final = torch.cat((logits_1_disagree, logits_1_agree[ind_2_update]), dim=0)
labels_1_final = torch.cat((labels_disagree, labels_agree[ind_2_update]), dim=0)
logits_2_final = torch.cat((logits_2_disagree, logits_2_agree[ind_1_update]), dim=0)
labels_2_final = torch.cat((labels_disagree, labels_agree[ind_1_update]), dim=0)
else:
logits_1_final = logits_1_agree[ind_2_update]
labels_1_final = labels_agree[ind_2_update]
logits_2_final = logits_2_agree[ind_1_update]
labels_2_final = labels_agree[ind_1_update]
else:
logits_1_final = logits_1_disagree
labels_1_final = labels_disagree
logits_2_final = logits_2_disagree
labels_2_final = labels_disagree
loss_1_update = F.cross_entropy(logits_1_final, labels_1_final)
loss_2_update = F.cross_entropy(logits_2_final, labels_2_final)
return (loss_1_update, loss_2_update) |
(config_name='../config/kitti_ssc.yaml')
def main(config: DictConfig):
exp_name = config.exp_prefix
exp_name += '_{}_{}'.format(config.dataset, config.run)
exp_name += '_FrusSize_{}'.format(config.frustum_size)
exp_name += '_nRelations{}'.format(config.n_relations)
exp_name += '_WD{}_lr{}'.format(config.weight_decay, config.lr)
if config.CE_ssc_loss:
exp_name += '_CEssc'
if config.geo_scal_loss:
exp_name += '_geoScalLoss'
if config.sem_scal_loss:
exp_name += '_semScalLoss'
if config.fp_loss:
exp_name += '_fpLoss'
if config.relation_loss:
exp_name += '_CERel'
if config.context_prior:
exp_name += '_3DCRP'
if (config.dataset == 'kitti'):
class_names = kitti_class_names
max_epochs = 30
logdir = config.kitti_logdir
full_scene_size = (256, 256, 32)
project_scale = 2
feature = 64
n_classes = 20
class_weights = torch.from_numpy((1 / np.log((semantic_kitti_class_frequencies + 0.001))))
data_module = KittiDataModule(root=config.kitti_root, preprocess_root=config.kitti_preprocess_root, frustum_size=config.frustum_size, project_scale=project_scale, batch_size=int((config.batch_size / config.n_gpus)), num_workers=int(config.num_workers_per_gpu))
elif (config.dataset == 'NYU'):
class_names = NYU_class_names
max_epochs = 30
logdir = config.logdir
full_scene_size = (60, 36, 60)
project_scale = 1
feature = 200
n_classes = 12
class_weights = NYU_class_weights
data_module = NYUDataModule(root=config.NYU_root, preprocess_root=config.NYU_preprocess_root, n_relations=config.n_relations, frustum_size=config.frustum_size, batch_size=int((config.batch_size / config.n_gpus)), num_workers=int((config.num_workers_per_gpu * config.n_gpus)))
project_res = ['1']
if config.project_1_2:
exp_name += '_Proj_2'
project_res.append('2')
if config.project_1_4:
exp_name += '_4'
project_res.append('4')
if config.project_1_8:
exp_name += '_8'
project_res.append('8')
print(exp_name)
model = MonoScene(dataset=config.dataset, frustum_size=config.frustum_size, project_scale=project_scale, n_relations=config.n_relations, fp_loss=config.fp_loss, feature=feature, full_scene_size=full_scene_size, project_res=project_res, n_classes=n_classes, class_names=class_names, context_prior=config.context_prior, relation_loss=config.relation_loss, CE_ssc_loss=config.CE_ssc_loss, sem_scal_loss=config.sem_scal_loss, geo_scal_loss=config.geo_scal_loss, lr=config.lr, weight_decay=config.weight_decay, class_weights=class_weights)
if config.enable_log:
logger = TensorBoardLogger(save_dir=logdir, name=exp_name, version='')
lr_monitor = LearningRateMonitor(logging_interval='step')
checkpoint_callbacks = [ModelCheckpoint(save_last=True, monitor='val/mIoU', save_top_k=1, mode='max', filename='{epoch:03d}-{val/mIoU:.5f}'), lr_monitor]
else:
logger = False
checkpoint_callbacks = False
model_path = os.path.join(logdir, exp_name, 'checkpoints/last.ckpt')
if os.path.isfile(model_path):
trainer = Trainer(callbacks=checkpoint_callbacks, resume_from_checkpoint=model_path, sync_batchnorm=True, deterministic=False, max_epochs=max_epochs, gpus=config.n_gpus, logger=logger, check_val_every_n_epoch=1, log_every_n_steps=10, flush_logs_every_n_steps=100, accelerator='ddp')
else:
trainer = Trainer(callbacks=checkpoint_callbacks, sync_batchnorm=True, deterministic=False, max_epochs=max_epochs, gpus=config.n_gpus, logger=logger, check_val_every_n_epoch=1, log_every_n_steps=10, flush_logs_every_n_steps=100, accelerator='ddp')
trainer.fit(model, data_module) |
def main():
args = parse_args()
cityscapes_path = args.cityscapes_path
out_dir = (args.out_dir if args.out_dir else cityscapes_path)
mmcv.mkdir_or_exist(out_dir)
gt_dir = osp.join(cityscapes_path, args.gt_dir)
poly_files = []
for poly in mmcv.scandir(gt_dir, '_polygons.json', recursive=True):
poly_file = osp.join(gt_dir, poly)
poly_files.append(poly_file)
if (args.nproc > 1):
mmcv.track_parallel_progress(convert_json_to_label, poly_files, args.nproc)
else:
mmcv.track_progress(convert_json_to_label, poly_files)
split_names = ['train', 'val', 'test']
for split in split_names:
filenames = []
for poly in mmcv.scandir(osp.join(gt_dir, split), '_polygons.json', recursive=True):
filenames.append(poly.replace('_gtFine_polygons.json', ''))
with open(osp.join(out_dir, f'{split}.txt'), 'w') as f:
f.writelines(((f + '\n') for f in filenames)) |
.parametrize('comm_pairs, value', (([(b'REFN 7', b'')], 7), ([(b'REFN 7', b'')], 7), ([(b'REFN 7', b'')], 7), ([(b'REFN 7', b'')], 7), ([(b'REFN 7', b'')], 7), ([(b'REFN 7', b'')], 7)))
def test_harmonic_setter(comm_pairs, value):
with expected_protocol(Ametek7270, comm_pairs) as inst:
inst.harmonic = value |
class F23_TestCase(F21_TestCase):
def runTest(self):
F21_TestCase.runTest(self)
self.assert_parse_error('logvol / --size=4096 --name=LVNAME --vgname=VGNAME --mkfsoptions=some,thing --fsprofile=PROFILE')
self.assert_parse('logvol /home --name=home --vgname=vg --size=500 --cachesize=250 --cachepvs=pv.01,pv.02 --cachemode=writeback', 'logvol /home --size=500 --cachesize=250 --cachepvs=pv.01,pv.02 --cachemode=writeback --name=home --vgname=vg\n')
self.assert_parse('logvol /home --name=home --vgname=vg --size=500 --cachesize=250 --cachepvs=pv.01,pv.02')
self.assert_parse_error('logvol /home --name=home --vgname=vg --size=500 --cachesize=250')
self.assert_parse_error('logvol /home --name=home --vgname=vg --size=500 --cachepvs=pv.01,pv.02')
self.assert_parse_error('logvol /home --name=home --vgname=vg --size=500 --cachemode=writeback')
self.assert_parse_error('logvol /home --name=home --vgname=vg --size=500 --cachesize=250 --cachepvs=pv.01,pv.02 --cachemode=writeback --useexisting')
self.assert_parse_error('logvol /home --name=home --vgname=vg --size=500 --cachesize=250 --cachepvs=pv.01,pv.02 --cachemode=writeback --noformat')
self.assert_parse('logvol / --size=4096 --name=LVNAME --vgname=VGNAME --mkfsoptions=some,thing', 'logvol / --size=4096 --mkfsoptions="some,thing" --name=LVNAME --vgname=VGNAME\n')
self.assert_parse_error('logvol / --size=4096 --name=LVNAME --vgname=VGNAME --mkfsoptions=some,thing --noformat')
self.assert_parse_error('logvol / --size=4096 --name=LVNAME --vgname=VGNAME --cachepvs=pv.01,pv.02 --thin --poolname=POOLNAME')
self.assert_parse_error('logvol /home --name=home --vgname=vg --size=500 --cachesize=250 --cachepvs=pv.01,pv.02 --cachemode=bogus', KickstartParseError, 'Invalid cache mode given: bogus') |
def _cut_matrices(n, symmetric=False):
repeat = ((n ** 2) - n)
if symmetric:
repeat = (repeat // 2)
mid = (repeat // 2)
for combination in itertools.islice(product([0, 1], repeat=repeat), 1, None):
cm = np.zeros([n, n], dtype=int)
if symmetric:
triu = tril = combination
else:
triu = combination[:mid]
tril = combination[mid:]
cm[np.triu_indices(n, k=1)] = triu
cm[np.tril_indices(n, k=(- 1))] = tril
(yield cm) |
def assert_bronchus_mask(bronchus_mask):
label_shape_statistics_image_filter = sitk.LabelShapeStatisticsImageFilter()
label_shape_statistics_image_filter.Execute(bronchus_mask)
print(label_shape_statistics_image_filter.GetPhysicalSize(1))
print(label_shape_statistics_image_filter.GetElongation(1))
print(label_shape_statistics_image_filter.GetRoundness(1))
print(label_shape_statistics_image_filter.GetCentroid(1))
assert np.allclose(label_shape_statistics_image_filter.GetPhysicalSize(1), 42823, atol=100)
assert np.allclose(label_shape_statistics_image_filter.GetElongation(1), 1.41, atol=0.01)
assert np.allclose(label_shape_statistics_image_filter.GetRoundness(1), 0.55, atol=0.01)
centroid = label_shape_statistics_image_filter.GetCentroid(1)
assert np.allclose(centroid[0], 8.85, atol=1)
assert np.allclose(centroid[1], (- 160), atol=1)
assert np.allclose(centroid[2], (- 457), atol=1) |
.skipif(pytest.__version__.startswith('3.7.'), reason='pytest-dependency < 0.5 does not support session scope')
def test_dependency_in_modules(test_path):
test_path.makepyfile(test_unnamed_dep1="\n import pytest\n\n class Test1:\n def test_one(self):\n assert True\n\n .dependency(\n depends=['test_unnamed_dep2.py::test_one'],\n scope='session',\n )\n def test_two(self):\n assert True\n ", test_unnamed_dep2='\n import pytest\n\n .dependency\n def test_one():\n assert True\n\n def test_two():\n assert True\n ')
result = test_path.runpytest('-v', '--order-dependencies')
result.assert_outcomes(passed=4, failed=0)
result.stdout.fnmatch_lines(['test_unnamed_dep1.py::Test1::test_one PASSED', 'test_unnamed_dep2.py::test_one PASSED', 'test_unnamed_dep1.py::Test1::test_two PASSED', 'test_unnamed_dep2.py::test_two PASSED']) |
def _aead_cipher_supported(backend: Backend, cipher: _AEADTypes) -> bool:
if _is_evp_aead_supported_cipher(backend, cipher):
return True
else:
cipher_name = _evp_cipher_cipher_name(cipher)
if (backend._fips_enabled and (cipher_name not in backend._fips_aead)):
return False
return (backend._lib.EVP_get_cipherbyname(cipher_name) != backend._ffi.NULL) |
class TestLayerOutputUtil():
def test_generate_layer_output(self):
base_model = keras_model()
qs_obj = get_quantsim_artifacts(base_model)
temp_folder_name = f"temp_keras_layer_output_{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}"
save_dir = os.path.join(os.getcwd(), temp_folder_name)
data_points = 4
batch_size = 3
dataloader = DummyDataLoader(data_count=data_points, batch_size=batch_size)
layer_output_util_obj = LayerOutputUtil(model=qs_obj.model, save_dir=save_dir)
for (batch_num, inp_batch) in enumerate(dataloader):
(batch_x, _) = inp_batch
layer_output_util_obj.generate_layer_outputs(input_batch=batch_x)
assert (data_points == len(glob((save_dir + '/inputs/*.raw'))))
assert (data_points == len(glob((save_dir + '/outputs/*'))))
actual_layer_output_names = list()
unmodified_actual_layer_output_names = list()
for each_layer in qs_obj.model.layers:
layer_output_name = each_layer.output.name
if isinstance(each_layer, QcQuantizeWrapper):
layer_output_name = each_layer.original_layer.output.name
unmodified_actual_layer_output_names.append(layer_output_name)
layer_output_name = re.sub('\\W+', '_', layer_output_name)
actual_layer_output_names.append(layer_output_name)
saved_layer_output_list = list()
for each_layer_output_name in glob((save_dir + '/outputs/layer_outputs_0/*.raw')):
each_layer_output_name = each_layer_output_name.split('/')[(- 1)][:(- 4)]
saved_layer_output_list.append(each_layer_output_name)
np.testing.assert_array_equal(np.array(sorted(actual_layer_output_names)), np.array(sorted(saved_layer_output_list)))
cnt = 0
n_iterations = np.ceil((data_points / batch_size))
for (batch_num, input_batch) in enumerate(dataloader):
(batch_x, _) = input_batch
for inp_batch in batch_x:
actual_output = qs_obj.model.predict(np.expand_dims(inp_batch, axis=0))
last_layer_name = qs_obj.model.layers[(- 1)].original_layer.output.name
last_layer_name = re.sub('\\W+', '_', last_layer_name)
last_layer_file_name = f'{save_dir}/outputs/layer_outputs_{cnt}/{last_layer_name}.raw'
saved_last_layer_output = np.fromfile(last_layer_file_name, dtype=np.float32)
np.testing.assert_array_equal(actual_output[0], saved_last_layer_output)
cnt += 1
if ((batch_num + 1) >= n_iterations):
break
saved_layer_output_name_mapper = json.load(open((temp_folder_name + '/LayerOutputNameMapper.json'), 'r'))
np.testing.assert_array_equal(np.array(unmodified_actual_layer_output_names), np.array(list(saved_layer_output_name_mapper.keys())))
for (layer_idx, unmodified_actual_layer_name) in enumerate(unmodified_actual_layer_output_names):
assert (actual_layer_output_names[layer_idx] == saved_layer_output_name_mapper[unmodified_actual_layer_name])
assert (actual_layer_output_names[layer_idx] == layer_output_util_obj.original_name_to_modified_name_mapper[unmodified_actual_layer_name])
shutil.rmtree(save_dir)
def test_get_quantsim_outputs(self):
base_model = keras_model()
qs_obj = get_quantsim_artifacts(base_model)
qs_model_actual_output_names = list()
for each_layer in qs_obj.model.layers:
layer_output_name = each_layer.output.name
if isinstance(each_layer, QcQuantizeWrapper):
layer_output_name = re.sub('\\W+', '_', each_layer.original_layer.output.name)
qs_model_actual_output_names.append(layer_output_name)
layer_output_obj = LayerOutputUtil(qs_obj.model)
batch_size = 3
data_count = 4
dataloader = DummyDataLoader(batch_size, data_count)
layer_output_dict = layer_output_obj.get_outputs(dataloader[0][0])
for each_actual_output_name in qs_model_actual_output_names:
assert (each_actual_output_name in layer_output_dict.keys()), f'Output not generated for {each_actual_output_name}'
actual_output = qs_obj.model.predict(dataloader[0][0])
np.testing.assert_array_equal(actual_output, layer_output_dict[qs_model_actual_output_names[(- 1)]])
def test_get_original_model_outputs(self):
base_model = keras_model()
base_model_actual_output_names = list()
for each_layer in base_model.layers:
layer_output_name = re.sub('\\W+', '_', each_layer.output.name)
base_model_actual_output_names.append(layer_output_name)
layer_output_obj = LayerOutputUtil(base_model)
batch_size = 3
data_count = 4
dataloader = DummyDataLoader(batch_size, data_count)
layer_output_dict = layer_output_obj.get_outputs(dataloader[0][0])
for each_actual_output_name in base_model_actual_output_names:
assert (each_actual_output_name in layer_output_dict.keys()), f'Output not generated for {each_actual_output_name}'
actual_output = base_model.predict(dataloader[0][0])
np.testing.assert_array_equal(actual_output, layer_output_dict[base_model_actual_output_names[(- 1)]]) |
class UtteranceSequence(nn.Module):
def __init__(self, config):
super(UtteranceSequence, self).__init__()
self.gpu = config.use_gpu
self.droplstm = nn.Dropout(config.dp)
self.bilstm_flag = config.bilstm
self.lstm_layer = config.layer_num
self.batch_size = config.batch_size
self.wordrep = UtteranceRep(config)
if config.ngram_embedding:
self.input_size = (config.word_alphabet_size + 1)
else:
self.input_size = config.word_embedding_dim
if self.bilstm_flag:
self.lstm_hidden = (config.hidden_dim // 2)
else:
self.lstm_hidden = config.hidden_dim
self.word_feature_extractor = config.word_feature_extractor
if (self.word_feature_extractor == 'GRU'):
self.lstm = nn.GRU(self.input_size, self.lstm_hidden, num_layers=self.lstm_layer, batch_first=True, bidirectional=self.bilstm_flag)
elif (self.word_feature_extractor == 'LSTM'):
self.lstm = nn.LSTM(self.input_size, self.lstm_hidden, num_layers=self.lstm_layer, batch_first=True, bidirectional=self.bilstm_flag, dropout=config.dp)
if self.gpu:
self.droplstm = self.droplstm.cuda()
self.lstm = self.lstm.cuda()
def forward(self, word_inputs, word_seq_lengths, return_all=False):
(word_seq_lengths, perm_idx) = word_seq_lengths.sort(0, descending=True)
word_inputs = word_inputs[perm_idx]
word_represent = self.wordrep(word_inputs, word_seq_lengths)
packed_words = pack_padded_sequence(word_represent, word_seq_lengths.cpu().numpy(), True)
(lstm_out, (ht, ct)) = self.lstm(packed_words)
(lstm_out, _) = pad_packed_sequence(lstm_out, True)
(_, unperm_idx) = perm_idx.sort(0)
if return_all:
return self.droplstm(lstm_out[unperm_idx])
elif self.bilstm_flag:
ht = ht.view(self.lstm_layer, 2, len(word_seq_lengths), self.lstm_hidden)
htht = torch.cat([ht[(- 1)][0], ht[(- 1)][1]], (- 1))
return self.droplstm(htht[unperm_idx])
else:
return self.droplstm(ht[(- 1)][unperm_idx]) |
def test_for_with_continue_break() -> None:
src = '\n for i in range(10):\n if i > 5:\n break\n print(unreachable)\n elif i > 2:\n continue\n print(k)\n print(i)\n '
cfg = build_cfg(src)
expected_blocks = [['range(10)'], ['i'], ['i > 5'], ['break'], ['print(i)'], [], ['i > 2'], ['continue'], ['print(k)']]
assert (expected_blocks == _extract_blocks(cfg))
expected_edges = [[['range(10)'], ['i']], [['i'], ['i > 5']], [['i > 5'], ['break']], [['break'], ['print(i)']], [['print(i)'], []], [['i > 5'], ['i > 2']], [['i > 2'], ['continue']], [['continue'], ['i']], [['i > 2'], ['print(k)']], [['print(k)'], ['i']], [['i'], ['print(i)']]]
assert (expected_edges == _extract_edges(cfg)) |
(StepsRunner, 'run_pipeline_steps')
def test_run_step_group_pass(mock_run_steps):
StepsRunner(get_valid_test_pipeline(), Context()).run_step_group(step_group_name='sg1')
mock_run_steps.assert_called_once_with(steps=['step1', 'step2', {'name': 'step3key1', 'in': {'in3k1_1': 'v3k1', 'in3k1_2': 'v3k2'}}, 'step4']) |
.parametrize(['tp', 'alias'], [(list, List), (set, Set), (frozenset, FrozenSet), (collections.Counter, typing.Counter), (collections.deque, typing.Deque)])
def test_generic_concrete_one_arg(tp, alias):
assert_normalize(tp, tp, [nt_zero(Any)])
assert_normalize(alias, tp, [nt_zero(Any)])
if HAS_STD_CLASSES_GENERICS:
assert_normalize(tp[int], tp, [nt_zero(int)])
assert_normalize(alias[int], tp, [nt_zero(int)]) |
class MultiProcessTestBase(unittest.TestCase):
_and_log
def setUp(self) -> None:
os.environ['MASTER_ADDR'] = str('localhost')
os.environ['MASTER_PORT'] = str(get_free_port())
os.environ['GLOO_DEVICE_TRANSPORT'] = 'TCP'
os.environ['NCCL_SOCKET_IFNAME'] = 'lo'
torch.use_deterministic_algorithms(True)
if torch.cuda.is_available():
torch.backends.cudnn.allow_tf32 = False
torch.backends.cuda.matmul.allow_tf32 = False
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
def tearDown(self) -> None:
torch.use_deterministic_algorithms(False)
del os.environ['GLOO_DEVICE_TRANSPORT']
del os.environ['NCCL_SOCKET_IFNAME']
if torch.cuda.is_available():
os.unsetenv('CUBLAS_WORKSPACE_CONFIG')
super().tearDown()
def _run_multi_process_test(self, *, callable: Callable[(..., None)], world_size: int=2, **kwargs) -> None:
ctx = multiprocessing.get_context('forkserver')
processes = []
for rank in range(world_size):
kwargs['rank'] = rank
kwargs['world_size'] = world_size
p = ctx.Process(target=callable, kwargs=kwargs)
p.start()
processes.append(p)
for p in processes:
p.join()
self.assertEqual(0, p.exitcode)
def _run_multi_process_test_per_rank(self, *, callable: Callable[(..., None)], world_size: int, kwargs_per_rank: List[Dict[(str, Any)]]) -> None:
ctx = multiprocessing.get_context('forkserver')
processes = []
for rank in range(world_size):
kwargs = {}
kwargs['rank'] = rank
kwargs['world_size'] = world_size
kwargs.update(kwargs_per_rank[rank])
p = ctx.Process(target=callable, kwargs=kwargs)
p.start()
processes.append(p)
for p in processes:
p.join()
self.assertEqual(0, p.exitcode) |
def test_zoom_range_limit():
vb = pg.ViewBox()
vb.setLimits(minXRange=5, maxXRange=10, minYRange=5, maxYRange=10)
testRange = pg.QtCore.QRect((- 15), (- 15), 7, 7)
vb.setRange(testRange, padding=0)
expected = [[testRange.left(), testRange.right()], [testRange.top(), testRange.bottom()]]
vbViewRange = vb.getState()['viewRange']
assert (vbViewRange == expected)
testRange = pg.QtCore.QRect((- 15), (- 15), 17, 17)
expected = [[(testRange.left() + 3), (testRange.right() - 3)], [(testRange.top() + 3), (testRange.bottom() - 3)]]
vb.setRange(testRange, padding=0)
vbViewRange = vb.getState()['viewRange']
vbTargetRange = vb.getState()['targetRange']
assert (vbViewRange == expected)
assert (vbTargetRange == expected) |
class IoLexer(RegexLexer):
name = 'Io'
url = '
filenames = ['*.io']
aliases = ['io']
mimetypes = ['text/x-iosrc']
version_added = '0.10'
tokens = {'root': [('\\n', Whitespace), ('\\s+', Whitespace), ('//(.*?)$', Comment.Single), ('#(.*?)$', Comment.Single), ('/(\\\\\\n)?[*](.|\\n)*?[*](\\\\\\n)?/', Comment.Multiline), ('/\\+', Comment.Multiline, 'nestedcomment'), ('"(|\\\\[^\\\\]|[^"\\\\])*"', String), ('::=|:=|=|\\(|\\)|;|,|\\*|-|\\+|>|<||!|/|\\||\\^|\\.|%|&|\\[|\\]|\\{|\\}', Operator), ('(clone|do|doFile|doString|method|for|if|else|elseif|then)\\b', Keyword), ('(nil|false|true)\\b', Name.Constant), ('(Object|list|List|Map|args|Sequence|Coroutine|File)\\b', Name.Builtin), ('[a-zA-Z_]\\w*', Name), ('(\\d+\\.?\\d*|\\d*\\.\\d+)([eE][+-]?[0-9]+)?', Number.Float), ('\\d+', Number.Integer)], 'nestedcomment': [('[^+/]+', Comment.Multiline), ('/\\+', Comment.Multiline, '#push'), ('\\+/', Comment.Multiline, '#pop'), ('[+/]', Comment.Multiline)]} |
(frozen=True)
class AmmoPickupConfiguration(bitpacking.BitPackValue):
pickups_state: dict[(AmmoPickupDefinition, AmmoPickupState)]
def __post_init__(self):
for (ammo, state) in self.pickups_state.items():
state.check_consistency(ammo)
def bit_pack_encode(self, metadata) -> Iterator[tuple[(int, int)]]:
default: AmmoPickupConfiguration = metadata['reference']
assert (list(self.pickups_state.keys()) == list(default.pickups_state.keys()))
for (ammo, this) in self.pickups_state.items():
reference = default.pickups_state[ammo]
is_different = (this != reference)
(yield from bitpacking.encode_bool(is_different))
if is_different:
(yield from this.bit_pack_encode({'ammo': ammo}))
def bit_pack_unpack(cls, decoder: bitpacking.BitPackDecoder, metadata):
default: AmmoPickupConfiguration = metadata['reference']
pickups_state = {}
for (ammo, default_state) in default.pickups_state.items():
is_different = bitpacking.decode_bool(decoder)
if is_different:
pickups_state[ammo] = AmmoPickupState.bit_pack_unpack(decoder, {'ammo': ammo})
else:
pickups_state[ammo] = default_state
return cls(pickups_state)
def as_json(self) -> dict:
return {'pickups_state': {ammo.name: state.as_json for (ammo, state) in self.pickups_state.items()}}
def from_json(cls, value: dict, game: RandovaniaGame) -> AmmoPickupConfiguration:
pickup_database = default_database.pickup_database_for_game(game)
return cls(pickups_state={pickup_database.ammo_pickups[name]: AmmoPickupState.from_json(state) for (name, state) in value['pickups_state'].items()})
def replace_state_for_ammo(self, ammo: AmmoPickupDefinition, state: AmmoPickupState) -> AmmoPickupConfiguration:
return self.replace_states({ammo: state})
def replace_states(self, new_states: dict[(AmmoPickupDefinition, AmmoPickupState)]) -> AmmoPickupConfiguration:
pickups_state = copy.copy(self.pickups_state)
for (pickup, state) in new_states.items():
pickups_state[pickup] = state
return AmmoPickupConfiguration(pickups_state) |
class nnUNetTrainerDA5(nnUNetTrainer):
def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self):
patch_size = self.configuration_manager.patch_size
dim = len(patch_size)
if (dim == 2):
do_dummy_2d_data_aug = False
if ((max(patch_size) / min(patch_size)) > 1.5):
rotation_for_DA = {'x': (((((- 15.0) / 360) * 2.0) * np.pi), (((15.0 / 360) * 2.0) * np.pi)), 'y': (0, 0), 'z': (0, 0)}
else:
rotation_for_DA = {'x': (((((- 180.0) / 360) * 2.0) * np.pi), (((180.0 / 360) * 2.0) * np.pi)), 'y': (0, 0), 'z': (0, 0)}
mirror_axes = (0, 1)
elif (dim == 3):
do_dummy_2d_data_aug = ((max(patch_size) / patch_size[0]) > ANISO_THRESHOLD)
if do_dummy_2d_data_aug:
rotation_for_DA = {'x': (((((- 180.0) / 360) * 2.0) * np.pi), (((180.0 / 360) * 2.0) * np.pi)), 'y': (0, 0), 'z': (0, 0)}
else:
rotation_for_DA = {'x': (((((- 30.0) / 360) * 2.0) * np.pi), (((30.0 / 360) * 2.0) * np.pi)), 'y': (((((- 30.0) / 360) * 2.0) * np.pi), (((30.0 / 360) * 2.0) * np.pi)), 'z': (((((- 30.0) / 360) * 2.0) * np.pi), (((30.0 / 360) * 2.0) * np.pi))}
mirror_axes = (0, 1, 2)
else:
raise RuntimeError()
initial_patch_size = get_patch_size(patch_size[(- dim):], *rotation_for_DA.values(), (0.7, 1.43))
if do_dummy_2d_data_aug:
initial_patch_size[0] = patch_size[0]
self.print_to_log_file(f'do_dummy_2d_data_aug: {do_dummy_2d_data_aug}')
self.inference_allowed_mirroring_axes = mirror_axes
return (rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes)
def get_training_transforms(patch_size: Union[(np.ndarray, Tuple[int])], rotation_for_DA: dict, deep_supervision_scales: Union[(List, Tuple)], mirror_axes: Tuple[(int, ...)], do_dummy_2d_data_aug: bool, order_resampling_data: int=3, order_resampling_seg: int=1, border_val_seg: int=(- 1), use_mask_for_norm: List[bool]=None, is_cascaded: bool=False, foreground_labels: Union[(Tuple[(int, ...)], List[int])]=None, regions: List[Union[(List[int], Tuple[(int, ...)], int)]]=None, ignore_label: int=None) -> AbstractTransform:
matching_axes = np.array([sum([(i == j) for j in patch_size]) for i in patch_size])
valid_axes = list(np.where((matching_axes == np.max(matching_axes)))[0])
tr_transforms = []
if do_dummy_2d_data_aug:
ignore_axes = (0,)
tr_transforms.append(Convert3DTo2DTransform())
patch_size_spatial = patch_size[1:]
else:
patch_size_spatial = patch_size
ignore_axes = None
tr_transforms.append(SpatialTransform(patch_size_spatial, patch_center_dist_from_border=None, do_elastic_deform=False, do_rotation=True, angle_x=rotation_for_DA['x'], angle_y=rotation_for_DA['y'], angle_z=rotation_for_DA['z'], p_rot_per_axis=0.5, do_scale=True, scale=(0.7, 1.43), border_mode_data='constant', border_cval_data=0, order_data=order_resampling_data, border_mode_seg='constant', border_cval_seg=(- 1), order_seg=order_resampling_seg, random_crop=False, p_el_per_sample=0.2, p_scale_per_sample=0.2, p_rot_per_sample=0.4, independent_scale_for_each_axis=True))
if do_dummy_2d_data_aug:
tr_transforms.append(Convert2DTo3DTransform())
if np.any((matching_axes > 1)):
tr_transforms.append(Rot90Transform((0, 1, 2, 3), axes=valid_axes, data_key='data', label_key='seg', p_per_sample=0.5))
if np.any((matching_axes > 1)):
tr_transforms.append(TransposeAxesTransform(valid_axes, data_key='data', label_key='seg', p_per_sample=0.5))
tr_transforms.append(OneOfTransform([MedianFilterTransform((2, 8), same_for_each_channel=False, p_per_sample=0.2, p_per_channel=0.5), GaussianBlurTransform((0.3, 1.5), different_sigma_per_channel=True, p_per_sample=0.2, p_per_channel=0.5)]))
tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))
tr_transforms.append(BrightnessTransform(0, 0.5, per_channel=True, p_per_sample=0.1, p_per_channel=0.5))
tr_transforms.append(OneOfTransform([ContrastAugmentationTransform(contrast_range=(0.5, 2), preserve_range=True, per_channel=True, data_key='data', p_per_sample=0.2, p_per_channel=0.5), ContrastAugmentationTransform(contrast_range=(0.5, 2), preserve_range=False, per_channel=True, data_key='data', p_per_sample=0.2, p_per_channel=0.5)]))
tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.25, 1), per_channel=True, p_per_channel=0.5, order_downsample=0, order_upsample=3, p_per_sample=0.15, ignore_axes=ignore_axes))
tr_transforms.append(GammaTransform((0.7, 1.5), invert_image=True, per_channel=True, retain_stats=True, p_per_sample=0.1))
tr_transforms.append(GammaTransform((0.7, 1.5), invert_image=True, per_channel=True, retain_stats=True, p_per_sample=0.1))
if ((mirror_axes is not None) and (len(mirror_axes) > 0)):
tr_transforms.append(MirrorTransform(mirror_axes))
tr_transforms.append(BlankRectangleTransform([[max(1, (p // 10)), (p // 3)] for p in patch_size], rectangle_value=np.mean, num_rectangles=(1, 5), force_square=False, p_per_sample=0.4, p_per_channel=0.5))
tr_transforms.append(BrightnessGradientAdditiveTransform((lambda x, y: np.exp(np.random.uniform(np.log((x[y] // 6)), np.log(x[y])))), ((- 0.5), 1.5), max_strength=(lambda x, y: (np.random.uniform((- 5), (- 1)) if (np.random.uniform() < 0.5) else np.random.uniform(1, 5))), mean_centered=False, same_for_all_channels=False, p_per_sample=0.3, p_per_channel=0.5))
tr_transforms.append(LocalGammaTransform((lambda x, y: np.exp(np.random.uniform(np.log((x[y] // 6)), np.log(x[y])))), ((- 0.5), 1.5), (lambda : (np.random.uniform(0.01, 0.8) if (np.random.uniform() < 0.5) else np.random.uniform(1.5, 4))), same_for_all_channels=False, p_per_sample=0.3, p_per_channel=0.5))
tr_transforms.append(SharpeningTransform(strength=(0.1, 1), same_for_each_channel=False, p_per_sample=0.2, p_per_channel=0.5))
if ((use_mask_for_norm is not None) and any(use_mask_for_norm)):
tr_transforms.append(MaskTransform([i for i in range(len(use_mask_for_norm)) if use_mask_for_norm[i]], mask_idx_in_seg=0, set_outside_to=0))
tr_transforms.append(RemoveLabelTransform((- 1), 0))
if is_cascaded:
if (ignore_label is not None):
raise NotImplementedError('ignore label not yet supported in cascade')
assert (foreground_labels is not None), 'We need all_labels for cascade augmentations'
use_labels = [i for i in foreground_labels if (i != 0)]
tr_transforms.append(MoveSegAsOneHotToData(1, use_labels, 'seg', 'data'))
tr_transforms.append(ApplyRandomBinaryOperatorTransform(channel_idx=list(range((- len(use_labels)), 0)), p_per_sample=0.4, key='data', strel_size=(1, 8), p_per_label=1))
tr_transforms.append(RemoveRandomConnectedComponentFromOneHotEncodingTransform(channel_idx=list(range((- len(use_labels)), 0)), key='data', p_per_sample=0.2, fill_with_other_class_p=0, dont_do_if_covers_more_than_x_percent=0.15))
tr_transforms.append(RenameTransform('seg', 'target', True))
if (regions is not None):
tr_transforms.append(ConvertSegmentationToRegionsTransform(((list(regions) + [ignore_label]) if (ignore_label is not None) else regions), 'target', 'target'))
if (deep_supervision_scales is not None):
tr_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, input_key='target', output_key='target'))
tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
tr_transforms = Compose(tr_transforms)
return tr_transforms |
_module()
class CyclicMomentumUpdaterHook(MomentumUpdaterHook):
def __init__(self, by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=1, step_ratio_up=0.4, **kwargs):
if isinstance(target_ratio, float):
target_ratio = (target_ratio, (target_ratio / 100000.0))
elif isinstance(target_ratio, tuple):
target_ratio = ((target_ratio[0], (target_ratio[0] / 100000.0)) if (len(target_ratio) == 1) else target_ratio)
else:
raise ValueError(f'target_ratio should be either float or tuple, got {type(target_ratio)}')
assert (len(target_ratio) == 2), '"target_ratio" must be list or tuple of two floats'
assert (0 <= step_ratio_up < 1.0), '"step_ratio_up" must be in range [0,1)'
self.target_ratio = target_ratio
self.cyclic_times = cyclic_times
self.step_ratio_up = step_ratio_up
self.momentum_phases = []
assert (not by_epoch), 'currently only support "by_epoch" = False'
super(CyclicMomentumUpdaterHook, self).__init__(by_epoch, **kwargs)
def before_run(self, runner):
super(CyclicMomentumUpdaterHook, self).before_run(runner)
max_iter_per_phase = (runner.max_iters // self.cyclic_times)
iter_up_phase = int((self.step_ratio_up * max_iter_per_phase))
self.momentum_phases.append([0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]])
self.momentum_phases.append([iter_up_phase, max_iter_per_phase, max_iter_per_phase, self.target_ratio[0], self.target_ratio[1]])
def get_momentum(self, runner, base_momentum):
curr_iter = runner.iter
for (start_iter, end_iter, max_iter_per_phase, start_ratio, end_ratio) in self.momentum_phases:
curr_iter %= max_iter_per_phase
if (start_iter <= curr_iter < end_iter):
progress = (curr_iter - start_iter)
return annealing_cos((base_momentum * start_ratio), (base_momentum * end_ratio), (progress / (end_iter - start_iter))) |
class SystemSymPy(with_metaclass(System, SystemBase)):
_id = 2
def __init__(self, obj):
super(SystemSymPy, self).__init__(obj)
_AlgoType.attach(obj)
def onDetach(self, obj):
_AlgoType.detach(obj, True)
def getName(cls):
return 'SymPy + SciPy'
def isConstraintSupported(self, cstrName):
return (_MetaType.isConstraintSupported(cstrName) or getattr(_SystemSymPy, ('add' + cstrName)))
def getSystem(self, obj):
return _SystemSymPy(self, _AlgoType.getProxy(obj))
def isDisabled(self, _obj):
return False
def onChanged(self, obj, prop):
_AlgoType.onChanged(obj, prop)
super(SystemSymPy, self).onChanged(obj, prop) |
def get_requirement_files(allowlist: 'SectionProxy') -> Iterator[Path]:
try:
requirements_path = Path(allowlist['requirements_path'])
except KeyError:
requirements_path = Path()
try:
lines = allowlist['requirements']
requirements_lines = lines.split('\n')
except KeyError:
requirements_lines = []
for requirement_line in requirements_lines:
requirement_line = requirement_line.strip()
if ((not requirement_line) or requirement_line.startswith('#')):
continue
(requirement_line, *_) = requirement_line.split('#', maxsplit=1)
if (requirement_line.strip().find('*') >= 0):
files = sorted(requirements_path.glob(requirement_line.strip()))
for file in files:
requirement = file.name
logger.info('considering %s', (requirements_path / requirement))
(yield (requirements_path / requirement))
else:
requirement = requirement_line.strip()
logger.info('considering %s', (requirements_path / requirement))
(yield (requirements_path / requirement)) |
def upscale(scale, dir, do_face_enhance):
command = 'python inference_realesrgan.py -n RealESRGAN_x4plus --suffix u -s '
try:
float(scale)
command += str(scale)
except:
command += '2'
command += (((' -i ..//' + dir) + ' -o ..//') + dir)
if do_face_enhance:
command += ' --face_enhance'
cwd = os.getcwd()
print(('Invoking Real-ESRGAN: ' + command))
if ((sys.platform == 'win32') or (os.name == 'nt')):
subprocess.call(shlex.split(command), cwd=(cwd + '\\Real-ESRGAN'), stderr=subprocess.DEVNULL)
else:
subprocess.call(shlex.split(command), cwd=(cwd + '/Real-ESRGAN'), stderr=subprocess.DEVNULL) |
class fileChooserDialog():
def __init__(self, title='Choose a file', multiple=False):
warnings.warn('Deprecated fileChooserDialog class called', DeprecationWarning, stacklevel=2)
self.inputfiles = open_file_chooser_dialog(title=title, multiple=multiple)
def getFiles(self):
return self.inputfiles |
class OrthographicHarker():
def __init__(self, data):
self.method_name = 'orthographic_harker'
print('running {}...'.format(self.method_name))
method_start = time.time()
(H, W) = data.mask.shape
zy_hat = ((- data.n[(..., 0)]) / data.n[(..., 2)])
zx_hat = ((- data.n[(..., 1)]) / data.n[(..., 2)])
Dy = ((- generate_discrete_diff(H)) / (2 * data.step_size))
Dx = (generate_discrete_diff(W) / (2 * data.step_size))
A = (Dy.T Dy)
B = (Dx.T Dx)
C = ((Dy.T zy_hat) + (zx_hat Dx))
A = A.toarray()
B = B.toarray()
solver_start = time.time()
self.depth_map = (- solve_sylvester(A, B, C))
solver_end = time.time()
self.solver_running_time = (solver_end - solver_start)
method_end = time.time()
self.total_runtime = (method_end - method_start)
self.facets = construct_facets_from_depth_map_mask(data.mask)
self.vertices = construct_vertices_from_depth_map_and_mask(data.mask, self.depth_map, data.step_size)
self.surface = pv.PolyData(self.vertices, self.facets) |
class Effect1019(BaseEffect):
type = 'passive'
def handler(fit, skill, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Large Autocannon Specialization')), 'damageMultiplier', (skill.getModifiedItemAttr('damageMultiplierBonus') * skill.level), **kwargs) |
def _skip() -> None:
player.skip()
redis.put('backup_playing', False)
try:
current_song = models.CurrentSong.objects.get()
current_song.created = (timezone.now() - datetime.timedelta(seconds=current_song.duration))
current_song.save()
except models.CurrentSong.DoesNotExist:
pass |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.