code stringlengths 281 23.7M |
|---|
class DynamicPatcher(MetaPathFinder, Loader):
def __init__(self, patcher: Patcher) -> None:
self._patcher = patcher
self.sysmodules = {}
self.modules = self._patcher.fake_modules
self._loaded_module_names: Set[str] = set()
for name in self.modules:
if (self.needs_patch(name) and (name in sys.modules)):
self.sysmodules[name] = sys.modules[name]
del sys.modules[name]
for (name, module) in self.modules.items():
sys.modules[name] = module
def cleanup(self) -> None:
for module_name in self.sysmodules:
sys.modules[module_name] = self.sysmodules[module_name]
for module in self._patcher.modules_to_reload:
if (module.__name__ in sys.modules):
reload(module)
reloaded_module_names = [module.__name__ for module in self._patcher.modules_to_reload]
for name in self._loaded_module_names:
if ((name in sys.modules) and (name not in reloaded_module_names)):
del sys.modules[name]
def needs_patch(self, name: str) -> bool:
if (name not in self.modules):
self._loaded_module_names.add(name)
return False
if ((name in sys.modules) and (type(sys.modules[name]) is self.modules[name])):
return False
return True
def find_spec(self, fullname: str, path: Optional[Sequence[Union[(bytes, str)]]], target: Optional[ModuleType]=None) -> Optional[ModuleSpec]:
if self.needs_patch(fullname):
return ModuleSpec(fullname, self)
return None
def load_module(self, fullname: str) -> ModuleType:
sys.modules[fullname] = self.modules[fullname]
return self.modules[fullname] |
def _problem_to_zz(problem_graph: nx.Graph, qubits: Sequence[cirq.Qid], gamma: float):
for (i1, i2, weight) in problem_graph.edges.data('weight'):
q0 = qubits[i1]
q1 = qubits[i2]
(yield cirq.ZZPowGate(exponent=(((2 * gamma) * weight) / np.pi), global_shift=(- 0.5)).on(q0, q1)) |
def extract_file(path, output_dir='.'):
_FILETYPE_TO_OPENER_MODE_MAPPING = {'.zip': (zipfile.ZipFile, 'r'), '.tar.gz': (tarfile.open, 'r:gz'), '.tgz': (tarfile.open, 'r:gz'), '.tar': (tarfile.open, 'r:'), '.tar.bz2': (tarfile.open, 'r:bz2'), '.tbz': (tarfile.open, 'r:bz2')}
cwd = os.getcwd()
os.chdir(output_dir)
extension = ('.' + '.'.join(os.path.abspath(path).split('.')[1:]))
(opener, mode) = _FILETYPE_TO_OPENER_MODE_MAPPING[extension]
with opener(path, mode) as f:
f.extractall()
os.chdir(cwd)
return output_dir |
class Perc(_Numeric):
def to_py(self, value: Union[(float, int, str, _UnsetNone)]) -> Union[(float, int, _UnsetNone)]:
self._basic_py_validation(value, (float, int, str))
if isinstance(value, usertypes.Unset):
return value
elif (not value):
return None
if isinstance(value, str):
value = value.rstrip('%')
try:
value = float(value)
except ValueError:
raise configexc.ValidationError(value, 'must be a valid number!')
self._validate_bounds(value, suffix='%')
return value
def to_str(self, value: Union[(None, float, int, str)]) -> str:
if (value is None):
return ''
elif isinstance(value, str):
return value
else:
return '{}%'.format(value) |
class ProgressBar(object):
def __init__(self, maxval=100, widgets=default_widgets, term_width=None, fd=sys.stdout):
assert (maxval > 0), 'maxval <= 0'
self.maxval = maxval
self.widgets = widgets
self.fd = fd
self.signal_set = False
if (term_width is None):
try:
self.handle_resize(None, None)
signal.signal(signal.SIGWINCH, self.handle_resize)
self.signal_set = True
except:
self.term_width = 79
else:
self.term_width = term_width
self.currval = 0
self.finished = False
self.prev_percentage = (- 1)
self.start_time = None
self.seconds_elapsed = 0
def handle_resize(self, signum, frame):
(h, w) = array('h', ioctl(self.fd, termios.TIOCGWINSZ, ('\x00' * 8)))[:2]
self.term_width = w
def percentage(self):
return ((self.currval * 100.0) / self.maxval)
def _format_widgets(self):
r = []
hfill_inds = []
num_hfill = 0
currwidth = 0
for (i, w) in enumerate(self.widgets):
if isinstance(w, ProgressBarWidgetHFill):
r.append(w)
hfill_inds.append(i)
num_hfill += 1
elif isinstance(w, str):
r.append(w)
currwidth += len(w)
else:
weval = w.update(self)
currwidth += len(weval)
r.append(weval)
for iw in hfill_inds:
r[iw] = r[iw].update(self, ((self.term_width - currwidth) / num_hfill))
return r
def _format_line(self):
return ''.join(self._format_widgets()).ljust(self.term_width)
def _need_update(self):
return (int(self.percentage()) != int(self.prev_percentage))
def update(self, value):
assert (0 <= value <= self.maxval)
self.currval = value
if ((not self._need_update()) or self.finished):
return
if (not self.start_time):
self.start_time = time.time()
self.seconds_elapsed = (time.time() - self.start_time)
self.prev_percentage = self.percentage()
if (value != self.maxval):
self.fd.write((self._format_line() + '\r'))
else:
self.finished = True
self.fd.write((self._format_line() + '\n'))
def start(self):
self.update(0)
return self
def finish(self):
self.update(self.maxval)
if self.signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL) |
class TOggVorbis(TestCase, TOggFileTypeMixin):
Kind = OggVorbis
def setUp(self):
self.filename = get_temp_copy(os.path.join(DATA_DIR, 'empty.ogg'))
self.audio = self.Kind(self.filename)
def tearDown(self):
os.unlink(self.filename)
def test_module_delete(self):
delete(self.filename)
self.scan_file()
self.failIf(OggVorbis(self.filename).tags)
def test_bitrate(self):
self.failUnlessEqual(112000, self.audio.info.bitrate)
def test_channels(self):
self.failUnlessEqual(2, self.audio.info.channels)
def test_sample_rate(self):
self.failUnlessEqual(44100, self.audio.info.sample_rate)
def test_invalid_not_first(self):
with open(self.filename, 'rb') as h:
page = OggPage(h)
page.first = False
self.failUnlessRaises(error, OggVorbisInfo, BytesIO(page.write()))
def test_avg_bitrate(self):
with open(self.filename, 'rb') as h:
page = OggPage(h)
packet = page.packets[0]
packet = ((((packet[:16] + b'\x00\x00\x01\x00') + b'\x00\x00\x00\x00') + b'\x00\x00\x00\x00') + packet[28:])
page.packets[0] = packet
info = OggVorbisInfo(BytesIO(page.write()))
self.failUnlessEqual(info.bitrate, 32768)
def test_overestimated_bitrate(self):
with open(self.filename, 'rb') as h:
page = OggPage(h)
packet = page.packets[0]
packet = ((((packet[:16] + b'\x00\x00\x01\x00') + b'\x00\x00\x00\x01') + b'\x00\x00\x00\x00') + packet[28:])
page.packets[0] = packet
info = OggVorbisInfo(BytesIO(page.write()))
self.failUnlessEqual(info.bitrate, 65536)
def test_underestimated_bitrate(self):
with open(self.filename, 'rb') as h:
page = OggPage(h)
packet = page.packets[0]
packet = ((((packet[:16] + b'\x00\x00\x01\x00') + b'\x01\x00\x00\x00') + b'\x00\x00\x01\x00') + packet[28:])
page.packets[0] = packet
info = OggVorbisInfo(BytesIO(page.write()))
self.failUnlessEqual(info.bitrate, 65536)
def test_negative_bitrate(self):
with open(self.filename, 'rb') as h:
page = OggPage(h)
packet = page.packets[0]
packet = ((((packet[:16] + b'\xff\xff\xff\xff') + b'\xff\xff\xff\xff') + b'\xff\xff\xff\xff') + packet[28:])
page.packets[0] = packet
info = OggVorbisInfo(BytesIO(page.write()))
self.failUnlessEqual(info.bitrate, 0)
def test_vendor(self):
self.failUnless(self.audio.tags.vendor.startswith('Xiph.Org libVorbis'))
self.failUnlessRaises(KeyError, self.audio.tags.__getitem__, 'vendor')
def test_vorbiscomment(self):
self.audio.save()
self.scan_file()
if (ogg is None):
return
self.failUnless(ogg.vorbis.VorbisFile(self.filename))
def test_vorbiscomment_big(self):
self.test_really_big()
self.audio.save()
self.scan_file()
if (ogg is None):
return
vfc = ogg.vorbis.VorbisFile(self.filename).comment()
self.failUnlessEqual(self.audio['foo'], vfc['foo'])
def test_vorbiscomment_delete(self):
self.audio.delete()
self.scan_file()
if (ogg is None):
return
vfc = ogg.vorbis.VorbisFile(self.filename).comment()
self.failUnlessEqual(vfc.keys(), ['VENDOR'])
def test_vorbiscomment_delete_readd(self):
self.audio.delete()
self.audio.tags.clear()
self.audio['foobar'] = ('foobar' * 1000)
self.audio.save()
self.scan_file()
if (ogg is None):
return
vfc = ogg.vorbis.VorbisFile(self.filename).comment()
self.failUnlessEqual(self.audio['foobar'], vfc['foobar'])
self.failUnless(('FOOBAR' in vfc.keys()))
self.failUnless(('VENDOR' in vfc.keys()))
def test_huge_tag(self):
vorbis = self.Kind(os.path.join(DATA_DIR, 'multipagecomment.ogg'))
self.failUnless(('big' in vorbis.tags))
self.failUnless(('bigger' in vorbis.tags))
self.failUnlessEqual(vorbis.tags['big'], [('foobar' * 10000)])
self.failUnlessEqual(vorbis.tags['bigger'], [('quuxbaz' * 10000)])
self.scan_file()
def test_not_my_ogg(self):
fn = os.path.join(DATA_DIR, 'empty.oggflac')
self.failUnlessRaises(error, type(self.audio), fn)
self.failUnlessRaises(error, self.audio.save, fn)
self.failUnlessRaises(error, self.audio.delete, fn)
def test_save_split_setup_packet(self):
fn = os.path.join(DATA_DIR, 'multipage-setup.ogg')
shutil.copy(fn, self.filename)
audio = OggVorbis(self.filename)
tags = audio.tags
self.failUnless(tags)
audio.save()
self.audio = OggVorbis(self.filename)
self.failUnlessEqual(self.audio.tags, tags)
def test_save_split_setup_packet_reference(self):
if (ogg is None):
return
self.test_save_split_setup_packet()
vfc = ogg.vorbis.VorbisFile(self.filename).comment()
for key in self.audio:
self.failUnlessEqual(vfc[key], self.audio[key])
self.ogg_reference(self.filename)
def test_save_grown_split_setup_packet_reference(self):
if (ogg is None):
return
fn = os.path.join(DATA_DIR, 'multipage-setup.ogg')
shutil.copy(fn, self.filename)
audio = OggVorbis(self.filename)
audio['foobar'] = [('quux' * 50000)]
tags = audio.tags
self.failUnless(tags)
audio.save()
self.audio = OggVorbis(self.filename)
self.failUnlessEqual(self.audio.tags, tags)
vfc = ogg.vorbis.VorbisFile(self.filename).comment()
for key in self.audio:
self.failUnlessEqual(vfc[key], self.audio[key])
self.ogg_reference(self.filename)
def test_mime(self):
self.failUnless(('audio/vorbis' in self.audio.mime))
def test_init_padding(self):
self.assertEqual(self.audio.tags._padding, 0) |
class TrayIcon():
def __init__(self, mainWindow) -> None:
self.tray = QSystemTrayIcon(mainWindow)
self.mainWindow = mainWindow
theme_icon = self.mainWindow.settings.value('notification/theme_tray', 'default', str)
self.tray.setIcon(getIconTray(theme_icon))
self.tray.activated.connect(mainWindow.onTrayIconActivated)
self.trayShow = QAction(_('ZapZap'), mainWindow)
self.trayShow.triggered.connect(mainWindow.on_show)
self.traySettings = QAction(_('Settings'), mainWindow)
self.traySettings.triggered.connect(self.mainWindow.openTraySettings)
self.trayExit = QAction(_('Quit'), mainWindow)
self.trayExit.triggered.connect((lambda x=None: mainWindow.closeEvent(x)))
self.trayMenu = QMenu()
self.trayMenu.addAction(self.trayShow)
self.trayMenu.addAction(self.traySettings)
self.trayMenu.insertSeparator(self.trayExit)
self.trayMenu.addAction(self.trayExit)
self.tray.setContextMenu(self.trayMenu)
if mainWindow.settings.value('system/tray_icon', True, bool):
self.tray.show()
def setVisible(self, v):
self.tray.setVisible(v)
def showIconNotification(self, n):
theme_icon = self.mainWindow.settings.value('notification/theme_tray', 'default', str)
n = (999 if (n >= 1000) else n)
self.tray.setIcon(getIconTray(theme_icon, n)) |
class FlaubertTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, merges_file, do_lowercase=False, unk_token='<unk>', bos_token='<s>', sep_token='</s>', pad_token='<pad>', cls_token='</s>', mask_token='<special1>', additional_special_tokens=['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>'], lang2id=None, id2lang=None, **kwargs):
do_lowercase_and_remove_accent = kwargs.pop('do_lowercase_and_remove_accent', None)
if (do_lowercase_and_remove_accent is not None):
logger.warning("`do_lowercase_and_remove_accent` is passed as a keyword argument, but this won't do anything. `FlaubertTokenizer` will always set it to `False`.")
self.do_lowercase_and_remove_accent = False
self.do_lowercase = do_lowercase
super().__init__(unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, lang2id=lang2id, id2lang=id2lang, **kwargs)
try:
import sacremoses
except ImportError:
raise ImportError('You need to install sacremoses to use FlaubertTokenizer. See for installation.')
self.sm = sacremoses
self.cache_moses_punct_normalizer = {}
self.cache_moses_tokenizer = {}
self.lang_with_custom_tokenizer = {'zh', 'th', 'ja'}
self.lang2id = lang2id
self.id2lang = id2lang
if ((lang2id is not None) and (id2lang is not None)):
assert (len(lang2id) == len(id2lang))
self.ja_word_tokenizer = None
self.zh_word_tokenizer = None
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for (k, v) in self.encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
merges = merges_handle.read().split('\n')[:(- 1)]
merges = [tuple(merge.split()[:2]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
def do_lower_case(self):
return self.do_lowercase_and_remove_accent
def moses_punct_norm(self, text, lang):
if (lang not in self.cache_moses_punct_normalizer):
punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang)
self.cache_moses_punct_normalizer[lang] = punct_normalizer
else:
punct_normalizer = self.cache_moses_punct_normalizer[lang]
return punct_normalizer.normalize(text)
def moses_tokenize(self, text, lang):
if (lang not in self.cache_moses_tokenizer):
moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
self.cache_moses_tokenizer[lang] = moses_tokenizer
else:
moses_tokenizer = self.cache_moses_tokenizer[lang]
return moses_tokenizer.tokenize(text, return_str=False, escape=False)
def moses_pipeline(self, text, lang):
text = replace_unicode_punct(text)
text = self.moses_punct_norm(text, lang)
text = remove_non_printing_char(text)
return text
def ja_tokenize(self, text):
if (self.ja_word_tokenizer is None):
try:
import Mykytea
self.ja_word_tokenizer = Mykytea.Mykytea(f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin")
except (AttributeError, ImportError):
logger.error("Make sure you install KyTea ( and it's python wrapper ( with the following steps")
logger.error('1. git clone :neubig/kytea.git && cd kytea')
logger.error('2. autoreconf -i')
logger.error('3. ./configure --prefix=$HOME/local')
logger.error('4. make && make install')
logger.error('5. pip install kytea')
raise
return list(self.ja_word_tokenizer.getWS(text))
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),))
if (token in self.cache):
return self.cache[token]
pairs = get_pairs(word)
if (not pairs):
return (token + '</w>')
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
if (word == '\n </w>'):
word = '\n</w>'
self.cache[token] = word
return word
def preprocess_text(self, text):
text = text.replace('``', '"').replace("''", '"')
text = convert_to_unicode(text)
text = unicodedata.normalize('NFC', text)
if self.do_lowercase:
text = text.lower()
return text
def _tokenize(self, text, bypass_tokenizer=False):
lang = 'fr'
if (lang and self.lang2id and (lang not in self.lang2id)):
logger.error('Supplied language code not found in lang2id mapping. Please check that your language is supported by the loaded pretrained model.')
if bypass_tokenizer:
text = text.split()
else:
text = self.preprocess_text(text)
text = self.moses_pipeline(text, lang=lang)
text = self.moses_tokenize(text, lang=lang)
split_tokens = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(token).split(' ')))
return split_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ''.join(tokens).replace('</w>', ' ').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
bos = [self.bos_token_id]
sep = [self.sep_token_id]
if (token_ids_1 is None):
return ((bos + token_ids_0) + sep)
return ((((bos + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
merge_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']))
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write((json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n'))
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
index = token_index
writer.write((' '.join(bpe_tokens) + '\n'))
index += 1
return (vocab_file, merge_file)
def __getstate__(self):
state = self.__dict__.copy()
state['sm'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
try:
import sacremoses
except ImportError:
raise ImportError('You need to install sacremoses to use XLMTokenizer. See for installation.')
self.sm = sacremoses |
class Upsample(nn.Module):
def __init__(self, channels, use_conv, dims=2):
super().__init__()
self.channels = channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, channels, channels, 3, padding=1)
def forward(self, x):
assert (x.shape[1] == self.channels)
if (self.dims == 3):
x = F.interpolate(x, (x.shape[2], (x.shape[3] * 2), (x.shape[4] * 2)), mode='nearest')
else:
x = F.interpolate(x, scale_factor=2, mode='nearest')
if self.use_conv:
x = self.conv(x)
return x |
.parametrize('path', ['/', _TEST_PATH])
def test_clean_partial_uploads(storage_engine, path):
storage_engine._root_path = path
storage_engine.put_content(_TEST_UPLOADS_PATH, _TEST_CONTENT)
assert storage_engine.exists(_TEST_UPLOADS_PATH)
assert (storage_engine.get_content(_TEST_UPLOADS_PATH) == _TEST_CONTENT)
storage_engine.clean_partial_uploads(timedelta(days=2))
assert storage_engine.exists(_TEST_UPLOADS_PATH)
assert (storage_engine.get_content(_TEST_UPLOADS_PATH) == _TEST_CONTENT)
time.sleep(1)
storage_engine.clean_partial_uploads(timedelta(seconds=0))
assert (not storage_engine.exists(_TEST_UPLOADS_PATH))
storage_engine.remove('uploads')
assert (not storage_engine.exists('uploads'))
storage_engine.clean_partial_uploads(timedelta(seconds=0)) |
def _segm_resnet(name, backbone_name, num_classes, aux, pretrained_backbone=True):
backbone = resnet.__dict__[backbone_name](pretrained=pretrained_backbone, replace_stride_with_dilation=[False, True, True])
return_layers = {'layer4': 'out'}
if aux:
return_layers['layer3'] = 'aux'
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
aux_classifier = None
if aux:
inplanes = 1024
aux_classifier = FCNHead(inplanes, num_classes)
model_map = {'deeplabv3': (DeepLabHead, DeepLabV3), 'fcn': (FCNHead, FCN)}
inplanes = 2048
classifier = model_map[name][0](inplanes, num_classes)
base_model = model_map[name][1]
model = base_model(backbone, classifier, aux_classifier)
return model |
def add_import(project, pymodule, module_name, name=None):
imports = get_module_imports(project, pymodule)
candidates = []
names = []
selected_import = None
if (name is not None):
from_import = FromImport(module_name, 0, [(name, None)])
names.append(name)
candidates.append(from_import)
if ('.' in module_name):
(pkg, mod) = module_name.rsplit('.', 1)
from_import = FromImport(pkg, 0, [(mod, None)])
if project.prefs.get('prefer_module_from_imports'):
selected_import = from_import
candidates.append(from_import)
if name:
names.append(((mod + '.') + name))
else:
names.append(mod)
normal_import = NormalImport([(module_name, None)])
if name:
names.append(((module_name + '.') + name))
else:
names.append(module_name)
candidates.append(normal_import)
visitor = actions.AddingVisitor(project, candidates)
if (selected_import is None):
selected_import = normal_import
for import_statement in imports.imports:
if import_statement.accept(visitor):
selected_import = visitor.import_info
break
imports.add_import(selected_import)
imported_name = names[candidates.index(selected_import)]
return (imports.get_changed_source(), imported_name) |
class LlamaMhaWrapper(torch.nn.Module):
def __init__(self, multihead_attn, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: bool=False, use_cache: bool=False):
super(LlamaMhaWrapper, self).__init__()
self.multihead_attn = multihead_attn
self.attention_mask = attention_mask
self.position_ids = position_ids
self.past_key_value = past_key_value
self.output_attentions = output_attentions
self.use_cache = use_cache
def forward(self, *args, **kwargs):
kwargs['attention_mask'] = self.attention_mask
kwargs['position_ids'] = self.position_ids
kwargs['past_key_value'] = self.past_key_value
kwargs['output_attentions'] = self.output_attentions
kwargs['use_cache'] = self.use_cache
outputs = self.multihead_attn(*args, **kwargs)
outputs = [out for out in outputs if (out is not None)]
return tuple(outputs) |
def test_geojson(driver):
data_url = '
m = folium.Map((41.9, 12.5), zoom_start=10, tiles='cartodbpositron')
marker_cluster = folium.plugins.MarkerCluster(name='cluster').add_to(m)
folium.GeoJson(data_url, embed=False).add_to(marker_cluster)
folium.GeoJson(data_url, embed=False, show=False, name='geojson').add_to(m)
folium.LayerControl(collapsed=False).add_to(m)
html = m.get_root().render()
with temp_html_filepath(html) as filepath:
driver.get_file(filepath)
assert driver.wait_until('.folium-map')
driver.verify_js_logs()
icon = driver.wait_until('.leaflet-marker-icon.marker-cluster > div > span')
assert (icon.text == '18')
control_label = driver.wait_until('.leaflet-control-layers-overlays > label:nth-of-type(2)')
assert (control_label.text == 'geojson')
control_input = control_label.find_element(By.CSS_SELECTOR, value='input')
assert (control_input.get_attribute('checked') is None) |
class IndexedWeightsDataset(data.indexed_dataset.IndexedDataset):
def __init__(self, path):
self.values = []
self.read_data(path)
def read_data(self, path):
with open(path, 'r') as f:
for line in f:
self.values.append(float(line.strip('\n')))
self._len = len(self.values)
def __getitem__(self, i):
self.check_index(i)
return self.values[i]
def __del__(self):
pass
def __len__(self):
return self._len |
class Solution(object):
def maximalRectangle(self, matrix):
if ((matrix is None) or (len(matrix) == 0)):
return 0
(ls_row, ls_col) = (len(matrix), len(matrix[0]))
(left, right, height) = (([0] * ls_col), ([ls_col] * ls_col), ([0] * ls_col))
maxA = 0
for i in range(ls_row):
(curr_left, curr_right) = (0, ls_col)
for j in range(ls_col):
if (matrix[i][j] == '1'):
height[j] += 1
else:
height[j] = 0
for j in range(ls_col):
if (matrix[i][j] == '1'):
left[j] = max(left[j], curr_left)
else:
(left[j], curr_left) = (0, (j + 1))
for j in range((ls_col - 1), (- 1), (- 1)):
if (matrix[i][j] == '1'):
right[j] = min(right[j], curr_right)
else:
(right[j], curr_right) = (ls_col, j)
for j in range(ls_col):
maxA = max(maxA, ((right[j] - left[j]) * height[j]))
return maxA |
def convert_pytorch_grid2scipy(grid):
(_, H, W, D) = grid.shape
grid_x = (((grid[(0, ...)] + 1) * (D - 1)) / 2)
grid_y = (((grid[(1, ...)] + 1) * (W - 1)) / 2)
grid_z = (((grid[(2, ...)] + 1) * (H - 1)) / 2)
grid = np.stack([grid_z, grid_y, grid_x])
identity_grid = np.meshgrid(np.arange(H), np.arange(W), np.arange(D), indexing='ij')
grid = (grid - identity_grid)
return grid |
def join_dataset_splits(datasets):
assert (len(datasets) == 3), 'Expecting train, val, test datasets'
(n1, n2, n3) = (len(datasets[0]), len(datasets[1]), len(datasets[2]))
data_list = (([datasets[0].get(i) for i in range(n1)] + [datasets[1].get(i) for i in range(n2)]) + [datasets[2].get(i) for i in range(n3)])
datasets[0]._indices = None
datasets[0]._data_list = data_list
(datasets[0].data, datasets[0].slices) = datasets[0].collate(data_list)
split_idxs = [list(range(n1)), list(range(n1, (n1 + n2))), list(range((n1 + n2), ((n1 + n2) + n3)))]
datasets[0].split_idxs = split_idxs
return datasets[0] |
def test_negotiate_locale():
assert (core.negotiate_locale(['de_DE', 'en_US'], ['de_DE', 'de_AT']) == 'de_DE')
assert (core.negotiate_locale(['de_DE', 'en_US'], ['en', 'de']) == 'de')
assert (core.negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at']) == 'de_DE')
assert (core.negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at']) == 'de_DE')
assert (core.negotiate_locale(['ja', 'en_US'], ['ja_JP', 'en_US']) == 'ja_JP')
assert (core.negotiate_locale(['no', 'sv'], ['nb_NO', 'sv_SE']) == 'nb_NO') |
def parse_version_info(version_str: str, length: int=4) -> tuple:
from packaging.version import parse
version = parse(version_str)
assert version.release, f'failed to parse version {version_str}'
release = list(version.release)
release = release[:length]
if (len(release) < length):
release = (release + ([0] * (length - len(release))))
if version.is_prerelease:
release.extend(list(version.pre))
elif version.is_postrelease:
release.extend(list(version.post))
else:
release.extend([0, 0])
return tuple(release) |
def calculate_arg_defaults(builder: IRBuilder, fn_info: FuncInfo, func_reg: (Value | None), symtable: dict[(SymbolNode, SymbolTarget)]) -> None:
fitem = fn_info.fitem
for arg in fitem.arguments:
if (arg.initializer and (not is_constant(arg.initializer))):
value = builder.coerce(builder.accept(arg.initializer), symtable[arg.variable].type, arg.line)
if (not fn_info.is_nested):
name = ((fitem.fullname + '.') + arg.variable.name)
builder.add(InitStatic(value, name, builder.module_name))
else:
assert (func_reg is not None)
builder.add(SetAttr(func_reg, arg.variable.name, value, arg.line)) |
def test_handle_block_closed_channel():
channel_state = factories.create(factories.NettingChannelStateProperties(close_transaction=TransactionExecutionStatus(finished_block_number=50, result=TransactionExecutionStatus.SUCCESS), settle_timeout=50))
pseudo_random_generator = random.Random()
block = Block(block_number=90, gas_limit=100000, block_hash=factories.make_block_hash())
before_settle = handle_state_transitions(block, channel_state=channel_state, block_number=block.block_number, block_hash=None, pseudo_random_generator=pseudo_random_generator)
assert (get_status(before_settle.new_state) == ChannelState.STATE_CLOSED)
assert (not before_settle.events)
block = Block(block_number=102, gas_limit=100000, block_hash=factories.make_block_hash())
after_settle = handle_state_transitions(block, channel_state=before_settle.new_state, block_number=block.block_number, block_hash=None, pseudo_random_generator=pseudo_random_generator)
assert (get_status(after_settle.new_state) == ChannelState.STATE_SETTLING)
assert after_settle.events |
class TrainOptions():
def __init__(self):
self.parser = ArgumentParser()
self.initialize()
def initialize(self):
self.parser.add_argument('--exp_dir', type=str, help='Path to experiment output directory')
self.parser.add_argument('--dataset_type', default='ffhq_encode', type=str, help='Type of dataset/experiment to run')
self.parser.add_argument('--encoder_type', default='GradualStyleEncoder', type=str, help='Which encoder to use')
self.parser.add_argument('--input_nc', default=3, type=int, help='Number of input image channels to the psp encoder')
self.parser.add_argument('--label_nc', default=0, type=int, help='Number of input label channels to the psp encoder')
self.parser.add_argument('--output_size', default=1024, type=int, help='Output size of generator')
self.parser.add_argument('--feat_ind', default=0, type=int, help='Layer index of G to accept the first-layer feature')
self.parser.add_argument('--max_pooling', action='store_true', help='Apply max pooling or average pooling')
self.parser.add_argument('--use_skip', action='store_true', help='Using skip connection from the encoder to the styleconv layers of G')
self.parser.add_argument('--use_skip_torgb', action='store_true', help='Using skip connection from the encoder to the toRGB layers of G.')
self.parser.add_argument('--skip_max_layer', default=7, type=int, help='Layer used for skip connection. 1,2,3,4,5,6,7 correspond to 4,8,16,32,64,128,256')
self.parser.add_argument('--crop_face', action='store_true', help='Use aligned cropped face to predict style latent code w+')
self.parser.add_argument('--affine_augment', action='store_true', help='Apply random affine transformation during training')
self.parser.add_argument('--random_crop', action='store_true', help='Apply random crop during training')
self.parser.add_argument('--resize_factors', type=str, default=None, help='For super-res, comma-separated resize factors to use for inference.')
self.parser.add_argument('--blind_sr', action='store_true', help='Whether training blind SR (will use ./datasetsffhq_degradation_dataset.py)')
self.parser.add_argument('--use_latent_mask', action='store_true', help='For segmentation/sketch to face translation, fuse w+ from two sources')
self.parser.add_argument('--latent_mask', type=str, default='8,9,10,11,12,13,14,15,16,17', help='Comma-separated list of latents to perform style-mixing with')
self.parser.add_argument('--res_num', default=2, type=int, help='Layer number of the resblocks of the translation network T')
self.parser.add_argument('--toonify_weights', default=None, type=str, help='Path to Toonify StyleGAN model weights')
self.parser.add_argument('--generate_training_data', action='store_true', help='Whether generating training data (for video editing) or load real data')
self.parser.add_argument('--use_att', default=0, type=int, help='Layer of MLP used for attention, 0 not use attention')
self.parser.add_argument('--editing_w_path', type=str, default=None, help='Path to the editing vector v')
self.parser.add_argument('--zero_noise', action='store_true', help='Whether using zero noises')
self.parser.add_argument('--direction_path', type=str, default=None, help='Path to the direction vector to augment generated data')
self.parser.add_argument('--batch_size', default=4, type=int, help='Batch size for training')
self.parser.add_argument('--test_batch_size', default=8, type=int, help='Batch size for testing and inference')
self.parser.add_argument('--workers', default=4, type=int, help='Number of train dataloader workers')
self.parser.add_argument('--test_workers', default=8, type=int, help='Number of test/inference dataloader workers')
self.parser.add_argument('--learning_rate', default=0.0001, type=float, help='Optimizer learning rate')
self.parser.add_argument('--optim_name', default='ranger', type=str, help='Which optimizer to use')
self.parser.add_argument('--train_decoder', default=False, type=bool, help='Whether to train the decoder model')
self.parser.add_argument('--start_from_latent_avg', action='store_true', help='Whether to add average latent vector to generate codes from encoder.')
self.parser.add_argument('--learn_in_w', action='store_true', help='Whether to learn in w space instead of w+')
self.parser.add_argument('--lpips_lambda', default=0.8, type=float, help='LPIPS loss multiplier factor')
self.parser.add_argument('--id_lambda', default=0, type=float, help='ID loss multiplier factor')
self.parser.add_argument('--l2_lambda', default=1.0, type=float, help='L2 loss multiplier factor')
self.parser.add_argument('--w_norm_lambda', default=0, type=float, help='W-norm loss multiplier factor')
self.parser.add_argument('--lpips_lambda_crop', default=0, type=float, help='LPIPS loss multiplier factor for inner image region')
self.parser.add_argument('--l2_lambda_crop', default=0, type=float, help='L2 loss multiplier factor for inner image region')
self.parser.add_argument('--moco_lambda', default=0, type=float, help='Moco-based feature similarity loss multiplier factor')
self.parser.add_argument('--adv_lambda', default=0, type=float, help='Adversarial loss multiplier factor')
self.parser.add_argument('--d_reg_every', default=16, type=int, help='Interval of the applying r1 regularization')
self.parser.add_argument('--r1', default=1, type=float, help='weight of the r1 regularization')
self.parser.add_argument('--tmp_lambda', default=0, type=float, help='Temporal loss multiplier factor')
self.parser.add_argument('--stylegan_weights', default=model_paths['stylegan_ffhq'], type=str, help='Path to StyleGAN model weights')
self.parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to pSp model checkpoint')
self.parser.add_argument('--max_steps', default=500000, type=int, help='Maximum number of training steps')
self.parser.add_argument('--image_interval', default=100, type=int, help='Interval for logging train images during training')
self.parser.add_argument('--board_interval', default=50, type=int, help='Interval for logging metrics to tensorboard')
self.parser.add_argument('--val_interval', default=1000, type=int, help='Validation interval')
self.parser.add_argument('--save_interval', default=None, type=int, help='Model checkpoint interval')
self.parser.add_argument('--use_wandb', action='store_true', help='Whether to use Weights & Biases to track experiment.')
def parse(self):
opts = self.parser.parse_args()
return opts |
def save_colorful_images(prediction, filename, output_dir, palettes):
im = Image.fromarray(palettes[prediction.astype('uint8').squeeze()])
fn = os.path.join(output_dir, filename)
out_dir = os.path.split(fn)[0]
if (not os.path.exists(out_dir)):
os.mkdir(out_dir)
im.save(fn) |
class TestDOTAR3DetGWD(TestDOTA):
def eval(self):
txt_name = '{}.txt'.format(self.cfgs.VERSION)
real_test_img_list = self.get_test_image()
r3det_gwd = build_whole_network.DetectionNetworkR3DetGWD(cfgs=self.cfgs, is_training=False)
self.test_dota(det_net=r3det_gwd, real_test_img_list=real_test_img_list, txt_name=txt_name)
if (not self.args.show_box):
os.remove(txt_name) |
class MetricMeter():
def __init__(self, delimiter='\t'):
self.meters = defaultdict(AverageMeter)
self.delimiter = delimiter
def update(self, input_dict):
if (input_dict is None):
return
if (not isinstance(input_dict, dict)):
raise TypeError('Input to MetricMeter.update() must be a dictionary')
for (k, v) in input_dict.items():
if isinstance(v, torch.Tensor):
v = v.item()
self.meters[k].update(v)
def __str__(self):
output_str = []
for (name, meter) in self.meters.items():
output_str.append(f'{name} {meter.val:.4f} ({meter.avg:.4f})')
return self.delimiter.join(output_str) |
def test_pytester_run_with_timeout(pytester: Pytester) -> None:
testfile = pytester.makepyfile('def test_no_timeout(): pass')
timeout = 120
start = time.time()
result = pytester.runpytest_subprocess(testfile, timeout=timeout)
end = time.time()
duration = (end - start)
assert (result.ret == ExitCode.OK)
assert (duration < timeout) |
class MemcacheClient(config.Parser):
def __init__(self, serializer: Optional[Serializer]=None, deserializer: Optional[Deserializer]=None):
self.serializer = serializer
self.deserializer = deserializer
def parse(self, key_path: str, raw_config: config.RawConfig) -> 'MemcacheContextFactory':
pool = pool_from_config(raw_config, prefix=f'{key_path}.', serializer=self.serializer, deserializer=self.deserializer)
return MemcacheContextFactory(pool, key_path) |
class TestUnconnectedCommand(CommandTest):
def test_info_command(self):
gametime.SERVER_START_TIME = 86400
expected = ('## BEGIN INFO 1.1\nName: %s\nUptime: %s\nConnected: %d\nVersion: Evennia %s\n## END INFO' % (settings.SERVERNAME, datetime.datetime.fromtimestamp(gametime.SERVER_START_TIME).ctime(), SESSIONS.account_count(), utils.get_evennia_version()))
self.call(unloggedin.CmdUnconnectedInfo(), '', expected)
del gametime.SERVER_START_TIME |
class JAXLinker(JITLinker):
def fgraph_convert(self, fgraph, input_storage, storage_map, **kwargs):
from pytensor.link.jax.dispatch import jax_funcify
from pytensor.tensor.random.type import RandomType
shared_rng_inputs = [inp for inp in fgraph.inputs if (isinstance(inp, SharedVariable) and isinstance(inp.type, RandomType))]
if shared_rng_inputs:
warnings.warn(f'The RandomType SharedVariables {shared_rng_inputs} will not be used in the compiled JAX graph. Instead a copy will be used.', UserWarning)
new_shared_rng_inputs = [shared(inp.get_value(borrow=False)) for inp in shared_rng_inputs]
fgraph.replace_all(zip(shared_rng_inputs, new_shared_rng_inputs), import_missing=True, reason='JAXLinker.fgraph_convert')
for (old_inp, new_inp) in zip(shared_rng_inputs, new_shared_rng_inputs):
new_inp_storage = [new_inp.get_value(borrow=True)]
storage_map[new_inp] = new_inp_storage
old_inp_storage = storage_map.pop(old_inp)
for (input_storage_idx, input_storage_item) in enumerate(input_storage):
if (input_storage_item is old_inp_storage):
break
else:
raise ValueError()
input_storage[input_storage_idx] = new_inp_storage
fgraph.remove_input(fgraph.inputs.index(old_inp), reason='JAXLinker.fgraph_convert')
return jax_funcify(fgraph, input_storage=input_storage, storage_map=storage_map, **kwargs)
def jit_compile(self, fn):
import jax
static_argnums = [n for (n, i) in enumerate(self.fgraph.inputs) if isinstance(i, Constant)]
return jax.jit(fn, static_argnums=static_argnums)
def create_thunk_inputs(self, storage_map):
from pytensor.link.jax.dispatch import jax_typify
thunk_inputs = []
for n in self.fgraph.inputs:
sinput = storage_map[n]
if isinstance(sinput[0], (RandomState, Generator)):
new_value = jax_typify(sinput[0], dtype=getattr(sinput[0], 'dtype', None))
sinput[0] = new_value
thunk_inputs.append(sinput)
return thunk_inputs |
class ShortcutFilteringFilter(logging.Filter):
def __init__(self, *, is_blacklist: bool, filters: str):
super().__init__()
self.__is_blacklist = is_blacklist
self.__filters = filters
def filter(self, record):
if (record.levelno >= logging.ERROR):
return True
if (record.name == __name__):
return True
shortcut = getattr(record, 'custom_shortcut', None)
if self.__is_blacklist:
if (shortcut is None):
return True
if (shortcut in self.__filters):
return False
return True
else:
if (shortcut is None):
return False
if (shortcut in self.__filters):
return True
return False |
class CallbackRegistry():
_by_group: dict[(str, list[RegisteredCallback])] = field(default_factory=(lambda : defaultdict(list)))
_by_callback_name: dict[(str, list[RegisteredCallback])] = field(default_factory=(lambda : defaultdict(list)))
def _register_module(self) -> None:
module = _path_hook._module_being_imported
for name in builtin_callback_names:
callback = getattr(module, name, None)
if (not callback):
continue
self.register_callback(name, callback)
def register_callback(self, name: str, callback: Callable[(..., None)], group: Optional[Any]=None) -> None:
import python
original = getattr(python, name, None)
if (not isinstance(original, HookedCallback)):
hook_callback(python, name)
if (not group):
module = _path_hook._module_being_imported
if (not module):
raise ValueError(f'No module is being imported and group is {group!r}.')
group = module.__name__
registered_callback = RegisteredCallback(name, callback)
self._by_callback_name[name].append(registered_callback)
self._by_group[group].append(registered_callback)
def unregister(self, group: Any) -> None:
callbacks = self._by_group.get(group)
if (not callbacks):
raise KeyError(f'Group {group!r} does not exist.')
by_name = self._by_callback_name
for callback in callbacks:
name = callback.name
by_name[name].remove(callback)
def dispatch(self, callback_name: str, *args: tuple[Any], **kwargs: dict[(str, Any)]) -> None:
for callback in self._by_callback_name[callback_name]:
ret = callback(*args, **kwargs)
if (ret is not None):
return ret |
def lsymeig(A: LinearOperator, neig: Optional[int]=None, M: Optional[LinearOperator]=None, bck_options: Mapping[(str, Any)]={}, method: Union[(str, Callable, None)]=None, **fwd_options) -> Tuple[(torch.Tensor, torch.Tensor)]:
return symeig(A, neig, 'lowest', M, method=method, bck_options=bck_options, **fwd_options) |
class QuantizeUpSample(nn.Module):
def __init__(self, size=None, scale_factor=None):
super(QuantizeUpSample, self).__init__()
self.size = size
self.scale_factor = scale_factor
def forward(self, x):
return QF.upsample(x, size=self.size, scale_factor=self.scale_factor) |
def test_scalar_conversion():
n = 3
arrays = [m.create_rec_simple(n), m.create_rec_packed(n), m.create_rec_nested(n), m.create_enum_array(n)]
funcs = [m.f_simple, m.f_packed, m.f_nested]
for (i, func) in enumerate(funcs):
for (j, arr) in enumerate(arrays):
if ((i == j) and (i < 2)):
assert ([func(arr[k]) for k in range(n)] == [(k * 10) for k in range(n)])
else:
with pytest.raises(TypeError) as excinfo:
func(arr[0])
assert ('incompatible function arguments' in str(excinfo.value)) |
('pypyr.moduleloader.get_module')
(Step, 'run_conditional_decorators')
('unittest.mock.MagicMock', new=DeepCopyMagicMock)
def test_foreach_thrice_with_substitutions(mock_run, mock_moduleloader):
step = Step({'name': 'step1', 'foreach': ['{key1}', '{key2}', 'key3']})
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert (mock_logger_info.mock_calls == [call('foreach: running step value1'), call('foreach: running step value2'), call('foreach: running step key3'), call('foreach decorator looped 3 times.')])
assert (mock_run.call_count == 3)
mutated_context = get_test_context()
mutated_context['i'] = 'value1'
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'value2'
mock_run.assert_any_call(mutated_context)
mutated_context['i'] = 'key3'
mock_run.assert_any_call(mutated_context)
assert (len(context) == (original_len + 1))
assert (context['i'] == 'key3')
assert (step.for_counter == 'key3') |
class GradClip(ViewOp):
__props__ = ()
def __init__(self, clip_lower_bound, clip_upper_bound):
self.clip_lower_bound = clip_lower_bound
self.clip_upper_bound = clip_upper_bound
if (not (self.clip_upper_bound >= self.clip_lower_bound)):
raise ValueError('`clip_upper_bound` should be >= `clip_lower_bound`')
def grad(self, args, g_outs):
return [pytensor.tensor.clip(g_out, self.clip_lower_bound, self.clip_upper_bound) for g_out in g_outs] |
class KombuProducerContextFactory(ContextFactory):
def __init__(self, connection: Connection, exchange: Exchange, max_connections: Optional[int]=None, serializer: Optional[KombuSerializer]=None):
self.connection = connection
self.exchange = exchange
self.producers = Producers(limit=max_connections)
self.serializer = serializer
def make_object_for_context(self, name: str, span: Span) -> '_KombuProducer':
return _KombuProducer(name, span, self.connection, self.exchange, self.producers, serializer=self.serializer) |
class MultiFatigueModel(OptionGeneric):
def __init__(self, model: (FatigueModel | list), state_only: bool, split_controls: bool=True, apply_to_joint_dynamics: bool=False, **params):
super(MultiFatigueModel, self).__init__(**params)
if isinstance(model, FatigueModel):
model = [model]
if self.suffix():
model_tp = {}
for (i, key) in enumerate(self.suffix()):
if key:
model_tp[key] = model[i]
else:
model_tp[key] = model
else:
model_tp = model
self.models = model_tp
self.state_only = state_only
self.apply_to_joint_dynamics = apply_to_joint_dynamics
self.split_controls = split_controls
def shape(self):
return len(self.models)
def model_type() -> str:
def color() -> tuple:
def plot_factor() -> tuple:
def suffix(self) -> tuple:
def add(self, fatigue: FatigueModel):
self.models.append(fatigue)
def dynamics(self, dxdt, nlp, index, states, controls):
for suffix in self.suffix():
dxdt = self._dynamics_per_suffix(dxdt, suffix, nlp, index, states, controls)
return dxdt
def _dynamics_per_suffix(self, dxdt, suffix, nlp, index, states, controls):
def default_state_only():
def default_apply_to_joint_dynamics():
def default_bounds(self, index: int, variable_type: VariableType) -> tuple:
def default_initial_guess(self, index: int, variable_type: VariableType):
def _convert_to_models_key(self, item: (int | str)):
if isinstance(self.models, dict):
return list(self.models.keys())[item]
else:
return item |
def test_list_build_source_namespaces():
namespaces_expected = [{'personal': True, 'score': 1, 'avatar_url': 'avatarurl', 'id': 'knownuser', 'title': 'knownuser', 'url': ' {'score': 2, 'title': 'someorg', 'personal': False, 'url': ' 'avatar_url': 'avatarurl', 'id': 'someorg'}]
found = get_bitbucket_trigger().list_build_source_namespaces()
found = sorted(found, key=(lambda d: sorted(d.items())))
namespaces_expected = sorted(namespaces_expected, key=(lambda d: sorted(d.items())))
assert (found == namespaces_expected) |
def make_rst(path, main, subpath=[]):
shelp = capture(main, (subpath + ['--help']))
dhelp = parse_help(subpath, shelp)
fn = os.path.join(path, (dhelp['program'].replace(' ', '_') + '.rst'))
with open(fn, 'w') as f:
f.write(format_rst(dhelp))
for (subcommand, _) in dhelp['subcommands'][1:]:
make_rst(path, main, (subpath + [subcommand])) |
class BaseTemplateStrategy():
def __init__(self, strategy):
self.strategy = strategy
def render(self, tpl=None, html=None, context=None):
if ((not tpl) and (not html)):
raise ValueError('Missing template or html parameters')
context = (context or {})
if tpl:
return self.render_template(tpl, context)
else:
return self.render_string(html, context)
def render_template(self, tpl, context):
raise NotImplementedError('Implement in subclass')
def render_string(self, html, context):
raise NotImplementedError('Implement in subclass') |
class PluginErrorWindow(UniqueWindow):
def __init__(self, parent, failures):
if self.is_not_unique():
return
super().__init__()
self.set_title(_('Plugin Errors'))
self.set_border_width(6)
self.set_transient_for(parent)
self.set_default_size(520, 300)
scrolledwin = Gtk.ScrolledWindow()
vbox = Gtk.VBox(spacing=6)
vbox.set_border_width(6)
scrolledwin.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrolledwin.add_with_viewport(vbox)
keys = failures.keys()
show_expanded = (len(keys) <= 3)
for key in sorted(keys):
expander = Gtk.Expander(label=util.bold(key))
expander.set_use_markup(True)
if show_expanded:
expander.set_expanded(True)
message = (failures[key][0:1] + failures[key][3:])
failure = Gtk.Label(label=''.join(message).strip())
failure.set_alignment(0, 0)
failure.set_padding(12, 6)
failure.set_selectable(True)
failure.set_line_wrap(True)
vbox.pack_start(expander, False, True, 0)
expander.add(failure)
self.use_header_bar()
if (not self.has_close_button()):
vbox2 = Gtk.VBox(spacing=12)
close = Button(_('_Close'), Icons.WINDOW_CLOSE)
close.connect('clicked', (lambda *x: self.destroy()))
b = Gtk.HButtonBox()
b.set_layout(Gtk.ButtonBoxStyle.END)
b.pack_start(close, True, True, 0)
vbox2.pack_start(scrolledwin, True, True, 0)
vbox2.pack_start(b, False, True, 0)
self.add(vbox2)
close.grab_focus()
else:
self.add(scrolledwin)
self.get_child().show_all() |
class SawyerDoorUnlockEnvV2(SawyerXYZEnv):
def __init__(self):
hand_low = ((- 0.5), 0.4, (- 0.15))
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.1), 0.8, 0.15)
obj_high = (0.1, 0.85, 0.15)
goal_low = (0.0, 0.64, 0.21)
goal_high = (0.2, 0.7, 0.2111)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_pos': np.array([0, 0.85, 0.15]), 'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32)}
self.goal = np.array([0, 0.85, 0.1])
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
self._lock_length = 0.1
self._random_reset_space = Box(np.array(obj_low), np.array(obj_high))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_door_lock.xml')
_assert_task_is_set
def evaluate_state(self, obs, action):
(reward, tcp_to_obj, tcp_open, obj_to_target, near_button, button_pressed) = self.compute_reward(action, obs)
info = {'success': float((obj_to_target <= 0.02)), 'near_object': float((tcp_to_obj <= 0.05)), 'grasp_success': float((tcp_open > 0)), 'grasp_reward': near_button, 'in_place_reward': button_pressed, 'obj_to_target': obj_to_target, 'unscaled_reward': reward}
return (reward, info)
def _target_site_config(self):
return [('goal_unlock', self._target_pos), ('goal_lock', np.array([10.0, 10.0, 10.0]))]
def _get_id_main_object(self):
return None
def _get_pos_objects(self):
return self._get_site_pos('lockStartUnlock')
def _get_quat_objects(self):
return self.sim.data.get_body_xquat('door_link')
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
qvel[9] = 0
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
door_pos = self.init_config['obj_init_pos']
if self.random_init:
door_pos = self._get_state_rand_vec()
self.sim.model.body_pos[self.model.body_name2id('door')] = door_pos
self._set_obj_xyz(1.5708)
self.obj_init_pos = self.get_body_com('lock_link')
self._target_pos = (self.obj_init_pos + np.array([0.1, (- 0.04), 0.0]))
return self._get_obs()
def compute_reward(self, action, obs):
del action
gripper = obs[:3]
lock = obs[4:7]
offset = np.array([0.0, 0.055, 0.07])
scale = np.array([0.25, 1.0, 0.5])
shoulder_to_lock = (((gripper + offset) - lock) * scale)
shoulder_to_lock_init = (((self.init_tcp + offset) - self.obj_init_pos) * scale)
ready_to_push = reward_utils.tolerance(np.linalg.norm(shoulder_to_lock), bounds=(0, 0.02), margin=np.linalg.norm(shoulder_to_lock_init), sigmoid='long_tail')
obj_to_target = abs((self._target_pos[0] - lock[0]))
pushed = reward_utils.tolerance(obj_to_target, bounds=(0, 0.005), margin=self._lock_length, sigmoid='long_tail')
reward = ((2 * ready_to_push) + (8 * pushed))
return (reward, np.linalg.norm(shoulder_to_lock), obs[3], obj_to_target, ready_to_push, pushed) |
def main():
process_list = []
if (len(sys.argv) > 1):
pattern = (('.*' + sys.argv[1]) + '.*')
else:
pattern = '.*'
print((('\nFiltering processes with regex:: ' + pattern) + '\n'))
regex = re.compile(pattern, (re.I | re.UNICODE))
dsz.control.echo.Off()
cmd = ops.cmd.getDszCommand('processes -list')
proc_items = cmd.execute()
if cmd.success:
for proc_item in proc_items.initialprocesslistitem.processitem:
pid = str(proc_item.id)
ppid = str(proc_item.parentid)
name = str(proc_item.name.encode('utf-8'))
path = str(proc_item.path.encode('utf-8'))
user = str(proc_item.user.encode('utf-8'))
c_time = str(proc_item.created.time)
c_date = str(proc_item.created.date)
process = [pid, ppid, path, name, user, c_date, c_time]
if regex:
tmp_str = ' '.join(process)
if re.search(regex, tmp_str):
process_list.append(process)
if (process_list > 1):
pprint(process_list, header=['PID', 'PPID', 'Path', 'Name', 'User', 'CDate', 'CTime'])
dsz.control.echo.On() |
def batch_random_blur(images_list, height, width, blur_probability=0.5):
def generate_selector(p, bsz):
shape = [bsz, 1, 1, 1]
selector = tf.cast(tf.less(tf.random_uniform(shape, 0, 1, dtype=tf.float32), p), tf.float32)
return selector
new_images_list = []
for images in images_list:
images_new = random_blur(images, height, width, p=1.0)
selector = generate_selector(blur_probability, tf.shape(images)[0])
images = ((images_new * selector) + (images * (1 - selector)))
images = tf.clip_by_value(images, 0.0, 1.0)
new_images_list.append(images)
return new_images_list |
class Precond():
def __init__(self):
self.precond_dict = {}
def addPrecond(self, cond, obj1, obj2):
if (cond not in self.precond_dict.keys()):
self.precond_dict[cond] = {}
if (obj1 not in self.precond_dict[cond]):
self.precond_dict[cond][obj1] = set(obj2)
else:
old_objects = list(self.precond_dict[cond][obj1])
self.precond_dict[cond][obj1] = set((old_objects + obj2))
def printConds(self):
res = [str(len(self.precond_dict.keys()))]
for cond in self.precond_dict.keys():
elem_list = []
for l in self.precond_dict[cond].keys():
this_str = '{} --> {}'.format(str(l), ' / '.join([str(p) for p in list(self.precond_dict[cond][l])]))
elem_list.append(this_str)
elements = ', '.join(elem_list)
stri = '{}: {}'.format(cond, elements)
res.append(stri)
return res
def printCondsJSON(self):
conds = []
for cond in self.precond_dict.keys():
if (cond != 'nearby'):
for it in self.precond_dict[cond].keys():
it_lowercase = [it[0].lower().replace(' ', '_'), it[1]]
if (len(self.precond_dict[cond][it]) == 0):
conds.append({cond: it_lowercase})
else:
for elements in list(self.precond_dict[cond][it]):
elements_lower = [elements[0].lower().replace(' ', '_'), elements[1]]
conds.append({cond: [it_lowercase, elements_lower]})
return conds
def removeCond(self, cond, object_id=None, second=None):
if (object_id is None):
del self.precond_dict[cond]
elif (second is None):
del self.precond_dict[cond][object_id]
else:
self.precond_dict[cond][object_id].remove(second)
def obtainCond(self, cond):
if (cond in self.precond_dict.keys()):
return self.precond_dict[cond].keys()
return [] |
def deal_range(pattern):
global ptn_len
ptn_len = 0
p = list(pattern)
if (len(pattern) == 1):
sub_ptn_list[ptn_len].start = p[0]
for i in range((len(pattern) - 1)):
sub_ptn_list[ptn_len].start = p[i]
sub_ptn_list[ptn_len].end = p[(i + 1)]
ptn_len = (ptn_len + 1) |
class SymbolFilter(Filter):
latex_symbols = {'\\alpha': '', '\\beta': '', '\\gamma': '', '\\delta': '', '\\varepsilon': '', '\\zeta': '', '\\eta': '', '\\vartheta': '', '\\iota': '', '\\kappa': '', '\\lambda': '', '\\mu': '', '\\nu': '', '\\xi': '', '\\pi': '', '\\varrho': '', '\\sigma': '', '\\tau': '', '\\upsilon': '', '\\varphi': '', '\\chi': '', '\\psi': '', '\\omega': '', '\\Gamma': '', '\\Delta': '', '\\Theta': '', '\\Lambda': '', '\\Xi': '', '\\Pi': '', '\\Sigma': '', '\\Upsilon': '', '\\Phi': '', '\\Psi': '', '\\Omega': '', '\\leftarrow': '', '\\longleftarrow': '', '\\rightarrow': '', '\\longrightarrow': '', '\\Leftarrow': '', '\\Longleftarrow': '', '\\Rightarrow': '', '\\Longrightarrow': '', '\\leftrightarrow': '', '\\longleftrightarrow': '', '\\Leftrightarrow': '', '\\Longleftrightarrow': '', '\\mapsto': '', '\\longmapsto': '', '\\relbar': '', '\\Relbar': '', '\\hookleftarrow': '', '\\hookrightarrow': '', '\\leftharpoondown': '', '\\rightharpoondown': '', '\\leftharpoonup': '', '\\rightharpoonup': '', '\\rightleftharpoons': '', '\\leadsto': '', '\\downharpoonleft': '', '\\downharpoonright': '', '\\upharpoonleft': '', '\\upharpoonright': '', '\\restriction': '', '\\uparrow': '', '\\Uparrow': '', '\\downarrow': '', '\\Downarrow': '', '\\updownarrow': '', '\\Updownarrow': '', '\\langle': '', '\\rangle': '', '\\lceil': '', '\\rceil': '', '\\lfloor': '', '\\rfloor': '', '\\flqq': '', '\\frqq': '', '\\bot': '', '\\top': '', '\\wedge': '', '\\bigwedge': '', '\\vee': '', '\\bigvee': '', '\\forall': '', '\\exists': '', '\\nexists': '', '\\neg': '', '\\Box': '', '\\Diamond': '', '\\vdash': '', '\\models': '', '\\dashv': '', '\\surd': '', '\\le': '', '\\ge': '', '\\ll': '', '\\gg': '', '\\lesssim': '', '\\gtrsim': '', '\\lessapprox': '', '\\gtrapprox': '', '\\in': '', '\\notin': '', '\\subset': '', '\\supset': '', '\\subseteq': '', '\\supseteq': '', '\\sqsubset': '', '\\sqsupset': '', '\\sqsubseteq': '', '\\sqsupseteq': '', '\\cap': '', '\\bigcap': '', '\\cup': '', '\\bigcup': '', '\\sqcup': '', '\\bigsqcup': '', '\\sqcap': '', '\\Bigsqcap': '', '\\setminus': '', '\\propto': '', '\\uplus': '', '\\bigplus': '', '\\sim': '', '\\doteq': '', '\\simeq': '', '\\approx': '', '\\asymp': '', '\\cong': '', '\\equiv': '', '\\Join': '', '\\bowtie': '', '\\prec': '', '\\succ': '', '\\preceq': '', '\\succeq': '', '\\parallel': '', '\\mid': '', '\\pm': '', '\\mp': '', '\\times': '', '\\div': '', '\\cdot': '', '\\star': '', '\\circ': '', '\\dagger': '', '\\ddagger': '', '\\lhd': '', '\\rhd': '', '\\unlhd': '', '\\unrhd': '', '\\triangleleft': '', '\\triangleright': '', '\\triangle': '', '\\triangleq': '', '\\oplus': '', '\\bigoplus': '', '\\otimes': '', '\\bigotimes': '', '\\odot': '', '\\bigodot': '', '\\ominus': '', '\\oslash': '', '\\dots': '...', '\\cdots': '', '\\sum': '', '\\prod': '', '\\coprod': '', '\\infty': '', '\\int': '', '\\oint': '', '\\clubsuit': '', '\\diamondsuit': '', '\\heartsuit': '', '\\spadesuit': '', '\\aleph': '', '\\emptyset': '', '\\nabla': '', '\\partial': '', '\\flat': '', '\\natural': '', '\\sharp': '', '\\angle': '', '\\copyright': '', '\\textregistered': '', '\\textonequarter': '14', '\\textonehalf': '12', '\\textthreequarters': '34', '\\textordfeminine': 'a', '\\textordmasculine': 'o', '\\euro': '', '\\pounds': '', '\\yen': '', '\\textcent': '', '\\textcurrency': '', '\\textdegree': ''}
isabelle_symbols = {'\\<zero>': '0', '\\<one>': '1', '\\<two>': '2', '\\<three>': '3', '\\<four>': '4', '\\<five>': '5', '\\<six>': '6', '\\<seven>': '7', '\\<eight>': '8', '\\<nine>': '9', '\\<A>': 'A', '\\<B>': 'B', '\\<C>': 'C', '\\<D>': 'D', '\\<E>': 'E', '\\<F>': 'F', '\\<G>': 'G', '\\<H>': 'H', '\\<I>': 'I', '\\<J>': 'J', '\\<K>': 'K', '\\<L>': 'L', '\\<M>': 'M', '\\<N>': 'N', '\\<O>': 'O', '\\<P>': 'P', '\\<Q>': 'Q', '\\<R>': 'R', '\\<S>': 'S', '\\<T>': 'T', '\\<U>': 'U', '\\<V>': 'V', '\\<W>': 'W', '\\<X>': 'X', '\\<Y>': 'Y', '\\<Z>': 'Z', '\\<a>': 'a', '\\<b>': 'b', '\\<c>': 'c', '\\<d>': 'd', '\\<e>': 'e', '\\<f>': 'f', '\\<g>': 'g', '\\<h>': 'h', '\\<i>': 'i', '\\<j>': 'j', '\\<k>': 'k', '\\<l>': 'l', '\\<m>': 'm', '\\<n>': 'n', '\\<o>': 'o', '\\<p>': 'p', '\\<q>': 'q', '\\<r>': 'r', '\\<s>': 's', '\\<t>': 't', '\\<u>': 'u', '\\<v>': 'v', '\\<w>': 'w', '\\<x>': 'x', '\\<y>': 'y', '\\<z>': 'z', '\\<AA>': 'A', '\\<BB>': 'B', '\\<CC>': 'C', '\\<DD>': 'D', '\\<EE>': 'E', '\\<FF>': 'F', '\\<GG>': 'G', '\\<HH>': 'H', '\\<II>': 'I', '\\<JJ>': 'J', '\\<KK>': 'K', '\\<LL>': 'L', '\\<MM>': 'M', '\\<NN>': 'N', '\\<OO>': 'O', '\\<PP>': 'P', '\\<QQ>': 'Q', '\\<RR>': 'R', '\\<SS>': 'S', '\\<TT>': 'T', '\\<UU>': 'U', '\\<VV>': 'V', '\\<WW>': 'W', '\\<XX>': 'X', '\\<YY>': 'Y', '\\<ZZ>': 'Z', '\\<aa>': 'a', '\\<bb>': 'b', '\\<cc>': 'c', '\\<dd>': 'd', '\\<ee>': 'e', '\\<ff>': 'f', '\\<gg>': 'g', '\\<hh>': 'h', '\\<ii>': 'i', '\\<jj>': 'j', '\\<kk>': 'k', '\\<ll>': 'l', '\\<mm>': 'm', '\\<nn>': 'n', '\\<oo>': 'o', '\\<pp>': 'p', '\\<qq>': 'q', '\\<rr>': 'r', '\\<ss>': 's', '\\<tt>': 't', '\\<uu>': 'u', '\\<vv>': 'v', '\\<ww>': 'w', '\\<xx>': 'x', '\\<yy>': 'y', '\\<zz>': 'z', '\\<alpha>': '', '\\<beta>': '', '\\<gamma>': '', '\\<delta>': '', '\\<epsilon>': '', '\\<zeta>': '', '\\<eta>': '', '\\<theta>': '', '\\<iota>': '', '\\<kappa>': '', '\\<lambda>': '', '\\<mu>': '', '\\<nu>': '', '\\<xi>': '', '\\<pi>': '', '\\<rho>': '', '\\<sigma>': '', '\\<tau>': '', '\\<upsilon>': '', '\\<phi>': '', '\\<chi>': '', '\\<psi>': '', '\\<omega>': '', '\\<Gamma>': '', '\\<Delta>': '', '\\<Theta>': '', '\\<Lambda>': '', '\\<Xi>': '', '\\<Pi>': '', '\\<Sigma>': '', '\\<Upsilon>': '', '\\<Phi>': '', '\\<Psi>': '', '\\<Omega>': '', '\\<bool>': 'B', '\\<complex>': 'C', '\\<nat>': 'N', '\\<rat>': 'Q', '\\<real>': 'R', '\\<int>': 'Z', '\\<leftarrow>': '', '\\<longleftarrow>': '', '\\<rightarrow>': '', '\\<longrightarrow>': '', '\\<Leftarrow>': '', '\\<Longleftarrow>': '', '\\<Rightarrow>': '', '\\<Longrightarrow>': '', '\\<leftrightarrow>': '', '\\<longleftrightarrow>': '', '\\<Leftrightarrow>': '', '\\<Longleftrightarrow>': '', '\\<mapsto>': '', '\\<longmapsto>': '', '\\<midarrow>': '', '\\<Midarrow>': '', '\\<hookleftarrow>': '', '\\<hookrightarrow>': '', '\\<leftharpoondown>': '', '\\<rightharpoondown>': '', '\\<leftharpoonup>': '', '\\<rightharpoonup>': '', '\\<rightleftharpoons>': '', '\\<leadsto>': '', '\\<downharpoonleft>': '', '\\<downharpoonright>': '', '\\<upharpoonleft>': '', '\\<upharpoonright>': '', '\\<restriction>': '', '\\<Colon>': '', '\\<up>': '', '\\<Up>': '', '\\<down>': '', '\\<Down>': '', '\\<updown>': '', '\\<Updown>': '', '\\<langle>': '', '\\<rangle>': '', '\\<lceil>': '', '\\<rceil>': '', '\\<lfloor>': '', '\\<rfloor>': '', '\\<lparr>': '', '\\<rparr>': '', '\\<lbrakk>': '', '\\<rbrakk>': '', '\\<lbrace>': '', '\\<rbrace>': '', '\\<guillemotleft>': '', '\\<guillemotright>': '', '\\<bottom>': '', '\\<top>': '', '\\<and>': '', '\\<And>': '', '\\<or>': '', '\\<Or>': '', '\\<forall>': '', '\\<exists>': '', '\\<nexists>': '', '\\<not>': '', '\\<box>': '', '\\<diamond>': '', '\\<turnstile>': '', '\\<Turnstile>': '', '\\<tturnstile>': '', '\\<TTurnstile>': '', '\\<stileturn>': '', '\\<surd>': '', '\\<le>': '', '\\<ge>': '', '\\<lless>': '', '\\<ggreater>': '', '\\<lesssim>': '', '\\<greatersim>': '', '\\<lessapprox>': '', '\\<greaterapprox>': '', '\\<in>': '', '\\<notin>': '', '\\<subset>': '', '\\<supset>': '', '\\<subseteq>': '', '\\<supseteq>': '', '\\<sqsubset>': '', '\\<sqsupset>': '', '\\<sqsubseteq>': '', '\\<sqsupseteq>': '', '\\<inter>': '', '\\<Inter>': '', '\\<union>': '', '\\<Union>': '', '\\<squnion>': '', '\\<Squnion>': '', '\\<sqinter>': '', '\\<Sqinter>': '', '\\<setminus>': '', '\\<propto>': '', '\\<uplus>': '', '\\<Uplus>': '', '\\<noteq>': '=', '\\<sim>': '', '\\<doteq>': '', '\\<simeq>': '', '\\<approx>': '', '\\<asymp>': '', '\\<cong>': '', '\\<smile>': '', '\\<equiv>': '', '\\<frown>': '', '\\<Join>': '', '\\<bowtie>': '', '\\<prec>': '', '\\<succ>': '', '\\<preceq>': '', '\\<succeq>': '', '\\<parallel>': '', '\\<bar>': '', '\\<plusminus>': '', '\\<minusplus>': '', '\\<times>': '', '\\<div>': '', '\\<cdot>': '', '\\<star>': '', '\\<bullet>': '', '\\<circ>': '', '\\<dagger>': '', '\\<ddagger>': '', '\\<lhd>': '', '\\<rhd>': '', '\\<unlhd>': '', '\\<unrhd>': '', '\\<triangleleft>': '', '\\<triangleright>': '', '\\<triangle>': '', '\\<triangleq>': '', '\\<oplus>': '', '\\<Oplus>': '', '\\<otimes>': '', '\\<Otimes>': '', '\\<odot>': '', '\\<Odot>': '', '\\<ominus>': '', '\\<oslash>': '', '\\<dots>': '...', '\\<cdots>': '', '\\<Sum>': '', '\\<Prod>': '', '\\<Coprod>': '', '\\<infinity>': '', '\\<integral>': '', '\\<ointegral>': '', '\\<clubsuit>': '', '\\<diamondsuit>': '', '\\<heartsuit>': '', '\\<spadesuit>': '', '\\<aleph>': '', '\\<emptyset>': '', '\\<nabla>': '', '\\<partial>': '', '\\<flat>': '', '\\<natural>': '', '\\<sharp>': '', '\\<angle>': '', '\\<copyright>': '', '\\<registered>': '', '\\<hyphen>': '\xad', '\\<inverse>': ' ', '\\<onequarter>': '14', '\\<onehalf>': '12', '\\<threequarters>': '34', '\\<ordfeminine>': 'a', '\\<ordmasculine>': 'o', '\\<section>': '', '\\<paragraph>': '', '\\<exclamdown>': '', '\\<questiondown>': '', '\\<euro>': '', '\\<pounds>': '', '\\<yen>': '', '\\<cent>': '', '\\<currency>': '', '\\<degree>': '', '\\<amalg>': '', '\\<mho>': '', '\\<lozenge>': '', '\\<wp>': '', '\\<wrong>': '', '\\<struct>': '', '\\<acute>': ' ', '\\<index>': '', '\\<dieresis>': ' ', '\\<cedilla>': ' ', '\\<hungarumlaut>': ' ', '\\<some>': '', '\\<newline>': '', '\\<open>': '', '\\<close>': '', '\\<here>': '', '\\<^sub>': '', '\\<^sup>': '', '\\<^bold>': '', '\\<^bsub>': '', '\\<^esub>': '', '\\<^bsup>': '', '\\<^esup>': ''}
lang_map = {'isabelle': isabelle_symbols, 'latex': latex_symbols}
def __init__(self, **options):
Filter.__init__(self, **options)
lang = get_choice_opt(options, 'lang', ['isabelle', 'latex'], 'isabelle')
self.symbols = self.lang_map[lang]
def filter(self, lexer, stream):
for (ttype, value) in stream:
if (value in self.symbols):
(yield (ttype, self.symbols[value]))
else:
(yield (ttype, value)) |
class TrainPipelineSparseDist(TrainPipeline[(In, Out)]):
def __init__(self, model: torch.nn.Module, optimizer: torch.optim.Optimizer, device: torch.device, execute_all_batches: bool=True, apply_jit: bool=False) -> None:
self._model = model
self._optimizer = optimizer
self._device = device
self._execute_all_batches = execute_all_batches
self._apply_jit = apply_jit
if (device.type == 'cuda'):
self._memcpy_stream: Optional[torch.cuda.streams.Stream] = torch.cuda.Stream(priority=(- 1))
self._data_dist_stream: Optional[torch.cuda.streams.Stream] = torch.cuda.Stream(priority=(- 1))
else:
self._memcpy_stream: Optional[torch.cuda.streams.Stream] = None
self._data_dist_stream: Optional[torch.cuda.streams.Stream] = None
self._batch_i: Optional[In] = None
self._batch_ip1: Optional[In] = None
self._batch_ip2: Optional[In] = None
self._context = TrainPipelineContext()
self._pipelined_modules: List[ShardedModule] = []
def _fill_pipeline(self, dataloader_iter: Iterator[In]) -> None:
if (self._batch_i and self._batch_ip1):
return
if (self._batch_i and self._execute_all_batches):
return
self._batch_i = self._copy_batch_to_gpu(dataloader_iter)
if (self._batch_i is None):
raise StopIteration
self._init_pipelined_modules(self._batch_i)
self._start_sparse_data_dist(self._batch_i)
self._wait_sparse_data_dist()
self._batch_ip1 = self._copy_batch_to_gpu(dataloader_iter)
def progress(self, dataloader_iter: Iterator[In]) -> Out:
self._fill_pipeline(dataloader_iter)
if self._model.training:
with record_function('## zero_grad ##'):
self._optimizer.zero_grad()
with record_function('## wait_for_batch ##'):
_wait_for_batch(cast(In, self._batch_i), self._data_dist_stream)
self._start_sparse_data_dist(self._batch_ip1)
self._batch_ip2 = self._copy_batch_to_gpu(dataloader_iter)
with record_function('## forward ##'):
(losses, output) = cast(Tuple[(torch.Tensor, Out)], self._model(self._batch_i))
self._wait_sparse_data_dist()
if self._model.training:
with record_function('## backward ##'):
torch.sum(losses, dim=0).backward()
with record_function('## optimizer ##'):
self._optimizer.step()
self._batch_i = self._batch_ip1
self._batch_ip1 = self._batch_ip2
return output
def _init_pipelined_modules(self, batch: In) -> None:
if self._pipelined_modules:
return
(self._pipelined_modules, self._model) = _rewrite_model(model=self._model, context=self._context, dist_stream=self._data_dist_stream, batch=self._batch_i, apply_jit=self._apply_jit)
self._start_sparse_data_dist(self._batch_i)
_override_input_dist_forwards(self._pipelined_modules)
def _copy_batch_to_gpu(self, dataloader_iter: Iterator[In]) -> Optional[In]:
with record_function('## copy_batch_to_gpu ##'):
with torch.cuda.stream(self._memcpy_stream):
batch = next(dataloader_iter, None)
if (batch is not None):
batch = _to_device(batch, self._device, non_blocking=True)
elif (not self._execute_all_batches):
raise StopIteration
return batch
def _start_sparse_data_dist(self, batch: Optional[In]) -> None:
if (batch is None):
return
with record_function('## start_sparse_data_dist ##'):
with torch.cuda.stream(self._data_dist_stream):
_wait_for_batch(batch, self._memcpy_stream)
_start_data_dist(self._pipelined_modules, batch, self._context)
def _wait_sparse_data_dist(self) -> None:
with record_function('## wait_sparse_data_dist ##'):
with torch.cuda.stream(self._data_dist_stream):
self._context.module_contexts = self._context.module_contexts_next_batch.copy()
self._context.input_dist_tensors_requests.clear()
for (names, awaitable) in self._context.fused_splits_awaitables:
for (name, request) in zip(names, awaitable.wait()):
self._context.input_dist_tensors_requests[name] = request |
class ResNet152bn_CIFAR(ResNetD):
def __init__(self, n_classes: int, n_input_channels: int=3, input_dimension: int=2, final_layer_dropout: float=0.0, stochastic_depth_p: float=0.0, squeeze_excitation: bool=False, squeeze_excitation_rd_ratio: float=(1.0 / 16)):
super().__init__(n_classes, n_input_channels, config='152_cifar_bn', input_dimension=input_dimension, final_layer_dropout=final_layer_dropout, stochastic_depth_p=stochastic_depth_p, squeeze_excitation=squeeze_excitation, squeeze_excitation_rd_ratio=squeeze_excitation_rd_ratio) |
def uniq(container):
try:
sort = sorted((unbool(i) for i in container))
sliced = itertools.islice(sort, 1, None)
for (i, j) in zip(sort, sliced):
if equal(i, j):
return False
except (NotImplementedError, TypeError):
seen = []
for e in container:
e = unbool(e)
for i in seen:
if equal(i, e):
return False
seen.append(e)
return True |
def index(request, person_pk=None):
people = models.Person.objects.all()
titles = models.Person.title.tag_model.objects.all()
skills = models.Skill.objects.all()
hobbies = models.Person.hobbies.tag_model.objects.all()
if person_pk:
person = models.Person.objects.get(pk=person_pk)
submit_label = 'Update'
else:
person = None
submit_label = 'Add'
if request.POST:
person_form = forms.PersonForm(request.POST, instance=person)
if person_form.is_valid():
person = person_form.save()
messages.success(request, ('Form saved as Person %d' % person.pk))
return HttpResponseRedirect(reverse(index))
else:
person_form = forms.PersonForm(instance=person)
return render(request, 'example/index.html', {'title': 'Django Tagulous Example', 'Person_name': models.Person.__name__, 'Title_name': models.Person.title.tag_model.__name__, 'Skill_name': models.Skill.__name__, 'Hobby_name': models.Person.hobbies.tag_model.__name__, 'people': people, 'titles': titles, 'hobbies': hobbies, 'skills': skills, 'person_form': person_form, 'form_media': person_form.media, 'submit_label': submit_label}) |
class _NonLocalBlockND_Group(nn.Module):
def __init__(self, in_channels, num_group, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True, relu_layer=True, use_softmax=True, use_ffconv=True, use_attention=True):
super(_NonLocalBlockND_Group, self).__init__()
assert (dimension in [1, 2, 3])
assert (dimension == 2)
assert (num_group in [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048])
self.dimension = dimension
self.sub_sample = sub_sample
self.in_channels = in_channels
self.inter_channels = inter_channels
self.num_group = num_group
if (self.inter_channels is None):
self.inter_channels = (in_channels // 2)
if (self.inter_channels == 0):
self.inter_channels = 1
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
self.relu_layer = relu_layer
self.relu = nn.ReLU(inplace=True)
self.use_softmax = use_softmax
self.use_ffconv = use_ffconv
self.use_attention = use_attention
if self.use_softmax:
self.softmax = nn.Softmax(dim=2)
assert (self.num_group <= self.inter_channels)
if self.use_attention:
self.inter_channels_group = (self.inter_channels // self.num_group)
print(self.inter_channels_group)
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
assert (sub_sample == False)
if sub_sample:
self.g = nn.Sequential(self.g, max_pool_layer)
self.phi = nn.Sequential(self.phi, max_pool_layer)
self.W = nn.Sequential(conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0))
if bn_layer:
self.W.add_module('bn', bn(self.in_channels))
nn.init.constant_(self.W[0].weight, 0)
nn.init.constant_(self.W[0].bias, 0)
if self.use_ffconv:
self.ffconv = FPNFFConv(self.in_channels)
def forward(self, x):
if self.use_attention:
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, (- 1))
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, (- 1))
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, (- 1))
if (self.num_group == 1):
f = torch.matmul(theta_x, phi_x)
if (self.use_softmax == True):
f_div_C = self.softmax(f)
else:
N = f.size((- 1))
f_div_C = (f / N)
yy = torch.matmul(f_div_C, g_x)
yy = yy.permute(0, 2, 1).contiguous()
yy = yy.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(yy)
else:
g_xs = torch.split(g_x, self.inter_channels_group, dim=2)
theta_xs = torch.split(theta_x, self.inter_channels_group, dim=2)
phi_xs = torch.split(phi_x, self.inter_channels_group, dim=1)
y_group = []
for (gx, tx, px) in zip(g_xs, theta_xs, phi_xs):
f = torch.matmul(tx, px)
if (self.use_softmax == True):
f_div_C = self.softmax(f)
else:
N = f.size((- 1))
f_div_C = (f / N)
yy = torch.matmul(f_div_C, gx)
yy = yy.permute(0, 2, 1).contiguous()
y_group.append(yy)
y_out = torch.cat(y_group, dim=1)
y_out = y_out.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y_out)
z = (W_y + x)
if self.relu_layer:
z = self.relu(z)
else:
z = x
if self.use_ffconv:
zz = self.ffconv(z)
else:
zz = z
return zz |
(unittest.mock._patch.decoration_helper)
def _decoration_helper(self, patched, args, keywargs):
extra_args = []
with contextlib.ExitStack() as exit_stack:
for patching in patched.patchings:
arg = exit_stack.enter_context(patching)
if (not getattr(patching, 'dont_pass', False)):
if (patching.attribute_name is not None):
keywargs.update(arg)
elif (patching.new is unittest.mock.DEFAULT):
extra_args.append(arg)
args += tuple(extra_args)
(yield (args, keywargs)) |
def process(ayat):
result = []
cur_y = ayat[0][1]
same_line = []
for ayah in ayat:
if (abs((ayah[1] - cur_y)) < 20):
same_line.append(ayah)
else:
same_line.sort(key=(lambda tup: tup[0]))
for s in same_line[::(- 1)]:
result.append(s)
cur_y = ayah[1]
same_line = [ayah]
same_line.sort(key=(lambda tup: tup[0]))
for s in same_line[::(- 1)]:
result.append(s)
return result |
def stop_our_server():
if is_our_server_running():
try:
server.stop()
do_request(ADDRESS, 'stopserver', 0.1)
print('Stopped our command server.')
except Exception as err:
print('Failed to stop command server:')
print(err) |
class Bars(object):
widgtet_list = Widgets_List()
def init_top_single_bar(self):
return Bar(widgets=self.widgtet_list.init_top_single(), opacity=1, size=21)
def init_top_double_bar(self):
return Bar(widgets=self.widgtet_list.init_top_double(), opacity=1, size=21)
def init_bottom_double_bar(self):
return Bar(widgets=self.widgtet_list.init_bottom_double(), opacity=1, size=21) |
def _set_max_batch_size(source: PersistentTensorDict):
tensor_data = list(source._items_metadata())
for (key, val) in tensor_data:
if (not val['array']):
_set_max_batch_size(source.get(key))
batch_size = []
if (not tensor_data):
source.batch_size = batch_size
return
curr_dim = 0
tensor_data = list(source._values_metadata())
while True:
if (tensor_data[0]['dim'] > curr_dim):
curr_dim_size = tensor_data[0]['shape'][curr_dim]
else:
source.batch_size = batch_size
return
for tensor in tensor_data[1:]:
if ((tensor['dim'] <= curr_dim) or (tensor['shape'][curr_dim] != curr_dim_size)):
source.batch_size = batch_size
return
batch_size.append(curr_dim_size)
curr_dim += 1 |
class ExecutionContext(object):
def __init__(self, client: CDPSession, contextPayload: Dict, objectHandleFactory: Any, frame: 'Frame'=None) -> None:
self._client = client
self._frame = frame
self._contextId = contextPayload.get('id')
auxData = contextPayload.get('auxData', {'isDefault': False})
self._isDefault = bool(auxData.get('isDefault'))
self._objectHandleFactory = objectHandleFactory
def frame(self) -> Optional['Frame']:
return self._frame
async def evaluate(self, pageFunction: str, *args: Any, force_expr: bool=False) -> Any:
handle = (await self.evaluateHandle(pageFunction, *args, force_expr=force_expr))
try:
result = (await handle.jsonValue())
except NetworkError as e:
if ('Object reference chain is too long' in e.args[0]):
return
if ("Object couldn't be returned by value" in e.args[0]):
return
raise
(await handle.dispose())
return result
async def evaluateHandle(self, pageFunction: str, *args: Any, force_expr: bool=False) -> 'JSHandle':
suffix = f'//# sourceURL={EVALUATION_SCRIPT_URL}'
if (force_expr or ((not args) and (not helper.is_jsfunc(pageFunction)))):
try:
if SOURCE_URL_REGEX.match(pageFunction):
expressionWithSourceUrl = pageFunction
else:
expressionWithSourceUrl = f'''{pageFunction}
{suffix}'''
_obj = (await self._client.send('Runtime.evaluate', {'expression': expressionWithSourceUrl, 'contextId': self._contextId, 'returnByValue': False, 'awaitPromise': True, 'userGesture': True}))
except Exception as e:
_rewriteError(e)
exceptionDetails = _obj.get('exceptionDetails')
if exceptionDetails:
raise ElementHandleError('Evaluation failed: {}'.format(helper.getExceptionMessage(exceptionDetails)))
remoteObject = _obj.get('result')
return self._objectHandleFactory(remoteObject)
try:
_obj = (await self._client.send('Runtime.callFunctionOn', {'functionDeclaration': f'''{pageFunction}
{suffix}
''', 'executionContextId': self._contextId, 'arguments': [self._convertArgument(arg) for arg in args], 'returnByValue': False, 'awaitPromise': True, 'userGesture': True}))
except Exception as e:
_rewriteError(e)
exceptionDetails = _obj.get('exceptionDetails')
if exceptionDetails:
raise ElementHandleError('Evaluation failed: {}'.format(helper.getExceptionMessage(exceptionDetails)))
remoteObject = _obj.get('result')
return self._objectHandleFactory(remoteObject)
def _convertArgument(self, arg: Any) -> Dict:
if (arg == math.inf):
return {'unserializableValue': 'Infinity'}
if (arg == (- math.inf)):
return {'unserializableValue': '-Infinity'}
objectHandle = (arg if isinstance(arg, JSHandle) else None)
if objectHandle:
if (objectHandle._context != self):
raise ElementHandleError('JSHandles can be evaluated only in the context they were created!')
if objectHandle._disposed:
raise ElementHandleError('JSHandle is disposed!')
if objectHandle._remoteObject.get('unserializableValue'):
return {'unserializableValue': objectHandle._remoteObject.get('unserializableValue')}
if (not objectHandle._remoteObject.get('objectId')):
return {'value': objectHandle._remoteObject.get('value')}
return {'objectId': objectHandle._remoteObject.get('objectId')}
return {'value': arg}
async def queryObjects(self, prototypeHandle: 'JSHandle') -> 'JSHandle':
if prototypeHandle._disposed:
raise ElementHandleError('Prototype JSHandle is disposed!')
if (not prototypeHandle._remoteObject.get('objectId')):
raise ElementHandleError('Prototype JSHandle must not be referencing primitive value')
response = (await self._client.send('Runtime.queryObjects', {'prototypeObjectId': prototypeHandle._remoteObject['objectId']}))
return self._objectHandleFactory(response.get('objects')) |
def test_loading_unexpected_error(retort, strict_coercion, debug_trail):
loader_ = retort.replace(strict_coercion=strict_coercion, debug_trail=debug_trail).extend(recipe=[loader(str, bad_string_loader)]).get_loader(List[str])
if (debug_trail == DebugTrail.DISABLE):
raises_exc(TypeError(), (lambda : loader_(['1', 2, 3])))
elif (debug_trail == DebugTrail.FIRST):
raises_exc(with_trail(TypeError(), [1]), (lambda : loader_(['1', 2, 3])))
elif (debug_trail == DebugTrail.ALL):
raises_exc(CompatExceptionGroup("while loading iterable <class 'list'>", [with_trail(TypeError(), [1]), with_trail(TypeError(), [2])]), (lambda : loader_(['1', 2, 3]))) |
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layers = torch.nn.Sequential(torch.nn.Linear(128, 64), torch.nn.ReLU(), torch.nn.Linear(64, 32), torch.nn.ReLU(), torch.nn.Linear(32, 2))
def forward(self, X: torch.Tensor) -> torch.Tensor:
return self.layers(X) |
def test_vectorgrid_dict_options():
m = folium.Map(location=(30, 20), zoom_start=4)
url = '
options = {'subdomain': 'test', 'token': 'test_token', 'vectorTileLayerStyles': {'all': {'fill': True, 'weight': 1, 'fillColor': 'grey', 'color': 'purple', 'fillOpacity': 0.3, 'opacity': 0.6}}}
vc = VectorGridProtobuf(url, 'test', options)
m.add_child(vc)
out = normalize(m._parent.render())
script = f'<script src="{VectorGridProtobuf.default_js[0][1]}"></script>'
assert (script in out)
assert (url in out)
assert ('L.vectorGrid.protobuf' in out)
assert ('"token": "test_token"' in out)
assert ('"subdomain": "test"' in out)
for (k, v) in options['vectorTileLayerStyles']['all'].items():
if (type(v) == bool):
assert (f'"{k}": {str(v).lower()}' in out)
continue
if (type(v) == str):
assert (f'"{k}": "{v}"' in out)
continue
assert (f'"{k}": {v}' in out) |
class MobileNetV1PreTrainedModel(PreTrainedModel):
config_class = MobileNetV1Config
load_tf_weights = load_tf_weights_in_mobilenet_v1
base_model_prefix = 'mobilenet_v1'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = False
def _init_weights(self, module: Union[(nn.Linear, nn.Conv2d)]) -> None:
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if (module.bias is not None):
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm2d):
module.bias.data.zero_()
module.weight.data.fill_(1.0) |
class _EvalManager():
def __init__(self, quantsim_factory: Callable, eval_func: Callable[([tf.keras.Model], float)], results_dir: str):
self._quantsim_factory = quantsim_factory
self._eval_func = eval_func
self._results_dir = results_dir
os.makedirs(self._results_dir, exist_ok=True)
self._all_sessions: List[_EvalSession] = []
self._ptq_sessions: List[_PtqSession] = []
def get_best_ptq_result(self) -> PtqResult:
if (not self._ptq_sessions):
raise RuntimeError
ptq_results = [sess.ptq_result for sess in self._ptq_sessions]
return max(ptq_results, key=(lambda ptq_result: ptq_result.accuracy))
def analysis_session(self, title: str) -> '_EvalSession':
return self._get_session(title, _EvalSession)
def ptq_session(self, title: str) -> '_PtqSession':
sess = self._get_session(title, _PtqSession)
self._ptq_sessions.append(sess)
return sess
def _get_session(self, title: str, session_cls: type):
session = session_cls(title, self._quantsim_factory, self._eval_func, results_dir=os.path.join(self._results_dir, '.trace'))
self._all_sessions.append(session)
return session
def export_diagnostics(self) -> str:
loader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
env = jinja2.Environment(loader=loader)
template = env.get_template('auto_quant_diagnostics_template.html')
if any((sess.diagnostics.contains_bokeh() for sess in self._all_sessions)):
from bokeh.resources import CDN
head = CDN.render()
else:
head = ''
body = {sess.title: sess.diagnostics for sess in self._all_sessions if (not sess.diagnostics.is_empty())}
html = template.render(head=head, body=body)
filename = os.path.join(self._results_dir, 'diagnostics.html')
with open(filename, 'w') as f:
f.write(html)
return html |
class MockAnchorGenerator2x2(anchor_generator.AnchorGenerator):
def name_scope(self):
return 'MockAnchorGenerator'
def num_anchors_per_location(self):
return [1]
def _generate(self, feature_map_shape_list):
return box_list.BoxList(tf.constant([[0, 0, 0.5, 0.5], [0, 0.5, 0.5, 1], [0.5, 0, 1, 0.5], [0.5, 0.5, 1, 1]], tf.float32)) |
class SliceType(Type[slice]):
def clone(self, **kwargs):
return type(self)()
def filter(self, x, strict=False, allow_downcast=None):
if isinstance(x, slice):
return x
else:
raise TypeError('Expected a slice!')
def __str__(self):
return 'slice'
def __eq__(self, other):
return (type(self) == type(other))
def __hash__(self):
return hash(type(self))
def may_share_memory(a, b):
return (isinstance(a, slice) and (a is b)) |
def decode_residuals(inp, blocksize, result):
method = inp.read_uint(2)
if (method >= 2):
raise FLACDecodeException('Reserved residual coding method')
parambits = [4, 5][method]
escapeparam = [15, 31][method]
partitionorder = inp.read_uint(4)
numpartitions = (1 << partitionorder)
if ((blocksize % numpartitions) != 0):
raise FLACDecodeException('Block size not divisible by number of Rice partitions')
for i in range(numpartitions):
count = (blocksize >> partitionorder)
if (i == 0):
count -= len(result)
param = inp.read_uint(parambits)
if (param < escapeparam):
result.extend((inp.read_rice_signed_int(param) for _ in range(count)))
else:
numbits = inp.read_uint(5)
result.extend((inp.read_signed_int(numbits) for _ in range(count))) |
def affinity_seg(inputs, output_stride=16):
assert ((output_stride == 16) or (output_stride == 8)), 'output_stride should be 16 or 8'
with tf.variable_scope('resnet_v1_101'):
net = resnet_v1_base.resnet_head(inputs)
net = resnet_v1_base.resnet_block(net, 64, 256, 2, 1, 3, scope='block1')
if (output_stride == 16):
net = resnet_v1_base.resnet_block(net, 256, 512, 2, 1, 4, scope='block2')
net = resnet_v1_base.resnet_block(net, 512, 1024, 1, 1, 23, scope='block3')
net = resnet_v1_base.resnet_block(net, 1024, 2048, 1, 2, 3, scope='block4', rate_multiple=[1, 2, 4])
else:
net = resnet_v1_base.resnet_block(net, 256, 512, 1, 1, 4, scope='block2')
net = resnet_v1_base.resnet_block(net, 512, 1024, 1, 2, 23, scope='block3')
net = resnet_v1_base.resnet_block(net, 1024, 2048, 1, 4, 3, scope='block4', rate_multiple=[1, 2, 4])
with tf.variable_scope('deeplab_v3'):
affinity_out = resnet_v1_base.aspp_layer(net, output_stride, scope='aspp_inst')
score_affinity = resnet_v1_base.resnet_score_layer(affinity_out, (7 * 8), scope='logits_inst')
return score_affinity |
class Testing_branch_renderer_case_mixin(Testing_renderer_case_mixin):
def test_branch_tagged_0_commits_clean(self):
self.assert_rendered(self.define_pieces('v1.2.3', branch=True), 'branch_tagged_0_commits_clean')
def test_branch_tagged_1_commits_clean(self):
self.assert_rendered(self.define_pieces('v1.2.3', branch=True, distance=1), 'branch_tagged_1_commits_clean')
def test_branch_tagged_0_commits_dirty(self):
self.assert_rendered(self.define_pieces('v1.2.3', branch=True, dirty=True), 'branch_tagged_0_commits_dirty')
def test_branch_tagged_1_commits_dirty(self):
self.assert_rendered(self.define_pieces('v1.2.3', branch=True, distance=1, dirty=True), 'branch_tagged_1_commits_dirty')
def test_branch_untagged_0_commits_clean(self):
self.assert_rendered(self.define_pieces(None, branch=True), 'branch_untagged_0_commits_clean')
def test_branch_untagged_1_commits_clean(self):
self.assert_rendered(self.define_pieces(None, branch=True, distance=1), 'branch_untagged_1_commits_clean')
def test_branch_untagged_0_commits_dirty(self):
self.assert_rendered(self.define_pieces(None, branch=True, dirty=True), 'branch_untagged_0_commits_dirty')
def test_branch_untagged_1_commits_dirty(self):
self.assert_rendered(self.define_pieces(None, branch=True, distance=1, dirty=True), 'branch_untagged_1_commits_dirty') |
def setup_module():
global connection, table
connection = Connection(**connection_kwargs)
assert (connection is not None)
maybe_delete_table()
cfs = {'cf1': {}, 'cf2': None, 'cf3': {'max_versions': 1}}
connection.create_table(TEST_TABLE_NAME, families=cfs)
table = connection.table(TEST_TABLE_NAME)
assert (table is not None) |
class SSConv(nn.Module):
def __init__(self, in_ch, out_ch, kernel_size=3):
super(SSConv, self).__init__()
self.depth_conv = nn.Conv2d(in_channels=out_ch, out_channels=out_ch, kernel_size=kernel_size, stride=1, padding=(kernel_size // 2), groups=out_ch)
self.point_conv = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=1, stride=1, padding=0, groups=1, bias=False)
self.Act1 = nn.LeakyReLU()
self.Act2 = nn.LeakyReLU()
self.BN = nn.BatchNorm2d(in_ch)
def forward(self, input):
out = self.point_conv(self.BN(input))
out = self.Act1(out)
out = self.depth_conv(out)
out = self.Act2(out)
return out |
class Conv2d1bit(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding=1, dilation=1, groups=1, bias=False, binarized=False):
super(Conv2d1bit, self).__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.binarized = binarized
def forward(self, input):
weight = (Binarize.apply(self.weight) if self.binarized else self.weight)
bias = (Binarize.apply(self.bias) if ((self.bias is not None) and self.binarized) else self.bias)
return F.conv2d(input=input, weight=weight, bias=bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups) |
class PCenter(LocateSolver, BaseOutputMixin):
def __init__(self, name: str, problem: pulp.LpProblem, aij: np.array):
self.problem = problem
self.name = name
self.aij = aij
def __add_obj(self) -> None:
weight = getattr(self, 'weight_var')
self.problem += (weight, 'objective function')
def from_cost_matrix(cls, cost_matrix: np.array, p_facilities: int, predefined_facilities_arr: np.array=None, name: str='p-center'):
r_cli = range(cost_matrix.shape[0])
r_fac = range(cost_matrix.shape[1])
model = pulp.LpProblem(name, pulp.LpMinimize)
p_center = PCenter(name, model, cost_matrix)
FacilityModelBuilder.add_facility_integer_variable(p_center, r_fac, 'y[{i}]')
FacilityModelBuilder.add_client_assign_variable(p_center, r_cli, r_fac, 'z[{i}_{j}]')
FacilityModelBuilder.add_weight_continuous_variable(p_center)
if (predefined_facilities_arr is not None):
FacilityModelBuilder.add_predefined_facility_constraint(p_center, predefined_facilities_arr)
p_center.__add_obj()
FacilityModelBuilder.add_facility_constraint(p_center, p_facilities)
FacilityModelBuilder.add_assignment_constraint(p_center, r_fac, r_cli)
FacilityModelBuilder.add_opening_constraint(p_center, r_fac, r_cli)
FacilityModelBuilder.add_minimized_maximum_constraint(p_center, cost_matrix, r_fac, r_cli)
return p_center
def from_geodataframe(cls, gdf_demand: GeoDataFrame, gdf_fac: GeoDataFrame, demand_col: str, facility_col: str, p_facilities: int, predefined_facility_col: str=None, distance_metric: str='euclidean', name: str='p-center'):
predefined_facilities_arr = None
if (predefined_facility_col is not None):
predefined_facilities_arr = gdf_fac[predefined_facility_col].to_numpy()
dem = gdf_demand[demand_col]
fac = gdf_fac[facility_col]
dem_type_geom = dem.geom_type.unique()
fac_type_geom = fac.geom_type.unique()
_msg = " geodataframe contains mixed type geometries or is not a point. Be sure deriving centroid from geometries doesn't affect the results."
if ((len(dem_type_geom) > 1) or ('Point' not in dem_type_geom)):
warnings.warn(f'Demand{_msg}', UserWarning, stacklevel=2)
dem = dem.centroid
if ((len(fac_type_geom) > 1) or ('Point' not in fac_type_geom)):
warnings.warn(f'Facility{_msg}', UserWarning, stacklevel=2)
fac = fac.centroid
dem_data = np.array([dem.x.to_numpy(), dem.y.to_numpy()]).T
fac_data = np.array([fac.x.to_numpy(), fac.y.to_numpy()]).T
if (gdf_demand.crs != gdf_fac.crs):
raise ValueError(f'Geodataframes crs are different: gdf_demand-{gdf_demand.crs}, gdf_fac-{gdf_fac.crs}')
distances = cdist(dem_data, fac_data, distance_metric)
return cls.from_cost_matrix(distances, p_facilities, predefined_facilities_arr, name)
def facility_client_array(self) -> None:
fac_vars = getattr(self, 'fac_vars')
cli_vars = getattr(self, 'cli_assgn_vars')
len_fac_vars = len(fac_vars)
self.fac2cli = []
for j in range(len_fac_vars):
array_cli = []
if (fac_vars[j].value() > 0):
for i in range(len(cli_vars)):
if (cli_vars[(i, j)].value() > 0):
array_cli.append(i)
self.fac2cli.append(array_cli)
def solve(self, solver: pulp.LpSolver, results: bool=True):
self.problem.solve(solver)
self.check_status()
if results:
self.facility_client_array()
self.client_facility_array()
return self |
def get_model_params(model_name, override_params, num_classes):
if model_name.startswith('efficientnet'):
(w, d, s, p) = efficientnet_params(model_name)
(blocks_args, global_params) = efficientnet(width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s, num_classes=num_classes)
else:
raise NotImplementedError(('model name is not pre-defined: %s' % model_name))
if override_params:
global_params = global_params._replace(**override_params)
return (blocks_args, global_params) |
class Computer(Prodict):
brand: str
cpu: Cpu
rams: List[Ram]
dict_key: dict
uninitialized: str
rams2: List[Ram]
def total_ram(self):
return sum([ram.capacity for ram in self.rams])
def total_ram2(self):
if (('rams2' in self) and (self['rams2'] is not None)):
return sum([ram.capacity for ram in self.rams2])
return 0 |
class Dataset(torch.utils.data.Dataset):
def __init__(self, args, data_path, vocabs, rev_vocabs, images, split, set_type=None):
self.images = images
self.max_len = args.max_len
self.vocabs = vocabs
self.rev_vocabs = rev_vocabs
self.split = split
self.dataset = args.dataset
self.set_type = set_type
self.BOS = vocabs['<BOS>']
self.EOS = vocabs['<EOS>']
self.PAD = vocabs['<PAD>']
self.UNK = vocabs['<UNK>']
self.MASK = vocabs['<MASK>']
self.CLS = vocabs['<CLS>']
if (self.set_type == 'P'):
self.load_data_multi_sents(data_path)
else:
self.load_data(data_path)
def load_data_multi_sents(self, data_path):
self.datas = []
dropdata = 0
with open(data_path, 'r', encoding='utf-8') as fin:
for line in fin:
jterm = json.loads(line.strip())
if ((jterm['img1'] in self.images) and (jterm['img2'] in self.images)):
if (self.split == 'train'):
self.datas.append(jterm)
else:
for des in jterm['description']:
new_jterm = {}
new_jterm['img1'] = jterm['img1']
new_jterm['img2'] = jterm['img2']
new_jterm['description'] = des
self.datas.append(new_jterm)
else:
dropdata += 1
print('Total datas ', len(self.datas), 'drop ', dropdata, ' data')
def load_data(self, data_path):
self.datas = []
dropdata = 0
with open(data_path, 'r', encoding='utf-8') as fin:
for line in fin:
jterm = json.loads(line.strip())
if ((jterm['img1'] in self.images) and (jterm['img2'] in self.images)):
self.datas.append(jterm)
else:
dropdata += 1
print('Total datas ', len(self.datas), 'drop ', dropdata, ' data')
def __len__(self):
return len(self.datas)
def __getitem__(self, index):
data = self.datas[index]
description = data['description']
batch = {}
if ((self.split == 'train') or (self.set_type == 'P')):
img1 = torch.from_numpy(self.images[data['img1'].replace('.jpg', '')])
img2 = torch.from_numpy(self.images[data['img2'].replace('.jpg', '')])
(dim, n, n) = (img1.size(0), img1.size(1), img1.size(2))
(img1, img2) = (img1.view(dim, (- 1)).transpose(0, 1), img2.view(dim, (- 1)).transpose(0, 1))
ImgId = ((data['img1'] + '_') + data['img2'])
(cap, cap_len, cap_label) = self.padding(description)
batch['img1'] = img1
batch['img2'] = img2
batch['cap'] = cap
batch['cap_label'] = cap_label
return batch
else:
img1 = torch.from_numpy(self.images[data['img1'].replace('.jpg', '')])
img2 = torch.from_numpy(self.images[data['img2'].replace('.jpg', '')])
(dim, n, n) = (img1.size(0), img1.size(1), img1.size(2))
(img1, img2) = (img1.view(dim, (- 1)).transpose(0, 1), img2.view(dim, (- 1)).transpose(0, 1))
ImgId = ((data['img1'] + '_') + data['img2'])
gt_caps = [' '.join(tokens) for tokens in description]
return (img1, img2, gt_caps, ImgId)
def padding(self, sent):
if (len(sent) > (self.max_len - 3)):
sent = sent[:(self.max_len - 3)]
text = list(map((lambda t: self.vocabs.get(t, self.UNK)), sent))
(text, output_label) = self.mask_sent(text)
prob = random.random()
if (prob < 0.15):
text = (([self.BOS] + text) + [self.MASK])
output_label = (([(- 1)] + output_label) + [self.EOS])
else:
text = (([self.BOS] + text) + [self.EOS])
output_label = (([(- 1)] + output_label) + [(- 1)])
length = len(text)
text = (text + ([self.PAD] * (self.max_len - length)))
output_label = (output_label + ([(- 1)] * (self.max_len - length)))
T = torch.LongTensor(text)
output_label = torch.LongTensor(output_label)
return (T, length, output_label)
def random_mask(self, x, i, prob):
if (prob < 0.8):
x[i] = self.MASK
elif (prob < 0.9):
x[i] = random.choice(list(range(len(self.vocabs))))
return x
def mask_sent(self, x):
output_label = []
for (i, token) in enumerate(x):
prob = random.random()
if (prob < 0.15):
prob /= 0.15
x = self.random_mask(x, i, prob)
output_label.append(token)
else:
output_label.append((- 1))
if all(((o == (- 1)) for o in output_label)):
output_label[0] = x[0]
x[0] = self.MASK
return (x, output_label)
def CLS(self):
return self.CLS |
class Terminal256Formatter(Formatter):
name = 'Terminal256'
aliases = ['terminal256', 'console256', '256']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.xterm_colors = []
self.best_match = {}
self.style_string = {}
self.usebold = ('nobold' not in options)
self.useunderline = ('nounderline' not in options)
self.useitalic = ('noitalic' not in options)
self._build_color_table()
self._setup_styles()
self.linenos = options.get('linenos', False)
self._lineno = 0
def _build_color_table(self):
self.xterm_colors.append((0, 0, 0))
self.xterm_colors.append((205, 0, 0))
self.xterm_colors.append((0, 205, 0))
self.xterm_colors.append((205, 205, 0))
self.xterm_colors.append((0, 0, 238))
self.xterm_colors.append((205, 0, 205))
self.xterm_colors.append((0, 205, 205))
self.xterm_colors.append((229, 229, 229))
self.xterm_colors.append((127, 127, 127))
self.xterm_colors.append((255, 0, 0))
self.xterm_colors.append((0, 255, 0))
self.xterm_colors.append((255, 255, 0))
self.xterm_colors.append((92, 92, 255))
self.xterm_colors.append((255, 0, 255))
self.xterm_colors.append((0, 255, 255))
self.xterm_colors.append((255, 255, 255))
valuerange = (0, 95, 135, 175, 215, 255)
for i in range(217):
r = valuerange[((i // 36) % 6)]
g = valuerange[((i // 6) % 6)]
b = valuerange[(i % 6)]
self.xterm_colors.append((r, g, b))
for i in range(1, 22):
v = (8 + (i * 10))
self.xterm_colors.append((v, v, v))
def _closest_color(self, r, g, b):
distance = ((257 * 257) * 3)
match = 0
for i in range(0, 254):
values = self.xterm_colors[i]
rd = (r - values[0])
gd = (g - values[1])
bd = (b - values[2])
d = (((rd * rd) + (gd * gd)) + (bd * bd))
if (d < distance):
match = i
distance = d
return match
def _color_index(self, color):
index = self.best_match.get(color, None)
if (color in ansicolors):
index = color
self.best_match[color] = index
if (index is None):
try:
rgb = int(str(color), 16)
except ValueError:
rgb = 0
r = ((rgb >> 16) & 255)
g = ((rgb >> 8) & 255)
b = (rgb & 255)
index = self._closest_color(r, g, b)
self.best_match[color] = index
return index
def _setup_styles(self):
for (ttype, ndef) in self.style:
escape = EscapeSequence()
if ndef['ansicolor']:
escape.fg = self._color_index(ndef['ansicolor'])
elif ndef['color']:
escape.fg = self._color_index(ndef['color'])
if ndef['bgansicolor']:
escape.bg = self._color_index(ndef['bgansicolor'])
elif ndef['bgcolor']:
escape.bg = self._color_index(ndef['bgcolor'])
if (self.usebold and ndef['bold']):
escape.bold = True
if (self.useunderline and ndef['underline']):
escape.underline = True
if (self.useitalic and ndef['italic']):
escape.italic = True
self.style_string[str(ttype)] = (escape.color_string(), escape.reset_string())
def _write_lineno(self, outfile):
self._lineno += 1
outfile.write(('%s%04d: ' % ((((self._lineno != 1) and '\n') or ''), self._lineno)))
def format(self, tokensource, outfile):
return Formatter.format(self, tokensource, outfile)
def format_unencoded(self, tokensource, outfile):
if self.linenos:
self._write_lineno(outfile)
for (ttype, value) in tokensource:
not_found = True
while (ttype and not_found):
try:
(on, off) = self.style_string[str(ttype)]
spl = value.split('\n')
for line in spl[:(- 1)]:
if line:
outfile.write(((on + line) + off))
if self.linenos:
self._write_lineno(outfile)
else:
outfile.write('\n')
if spl[(- 1)]:
outfile.write(((on + spl[(- 1)]) + off))
not_found = False
except KeyError:
ttype = ttype.parent
if not_found:
outfile.write(value)
if self.linenos:
outfile.write('\n') |
def _map_context(numcores):
if ((numcores is not None) and (numcores > 1)):
try:
from joblib import Parallel, delayed
from joblib.pool import has_shareable_memory
map = (lambda x, y: Parallel(n_jobs=numcores)(delayed(has_shareable_memory)(x))(y))
parallel = True
except ImportError:
map = (lambda x, y: list(builtins.map(x, y)))
warnings.warn('Could not import joblib. map will be non-parallel.', ImportError)
parallel = False
else:
parallel = False
map = (lambda x, y: list(builtins.map(x, y)))
(yield map) |
class MDEditorWidget(forms.Textarea):
def __init__(self, config_name='default', *args, **kwargs):
super(MDEditorWidget, self).__init__(*args, **kwargs)
self.config = MDConfig(config_name)
def render(self, name, value, renderer=None, attrs=None):
if (value is None):
value = ''
final_attrs = self.build_attrs(self.attrs, attrs, name=name)
return mark_safe(render_to_string('markdown.html', {'final_attrs': flatatt(final_attrs), 'value': conditional_escape(force_str(value)), 'id': final_attrs['id'], 'config': self.config}))
def build_attrs(self, base_attrs, extra_attrs=None, **kwargs):
attrs = dict(base_attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
def _get_media(self):
return forms.Media(css={'all': ('mdeditor/css/editormd.css',)}, js=('mdeditor/js/jquery.min.js', 'mdeditor/js/editormd.min.js'))
media = property(_get_media) |
def get_params(shared_model, gpu_id):
theta = {}
for (name, param) in shared_model.named_parameters():
param_copied = param.clone().detach().requires_grad_(True)
if (gpu_id >= 0):
theta[name] = param_copied.to(torch.device('cuda:{}'.format(gpu_id)))
else:
theta[name] = param_copied
return theta |
def test_load_encodings_with_disabled_param():
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True', 'is_symmetric': 'True'}, 'params': {'is_quantized': 'False', 'is_symmetric': 'True'}}, 'params': {}, 'op_type': {}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
with open('./quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
tf.compat.v1.reset_default_graph()
model = keras_model()
sim = QuantizationSimModel(model, config_file='./quantsim_config.json')
param_encodings = {'conv2d_1/kernel:0': [{'bitwidth': 4, 'is_symmetric': 'False', 'max': 0., 'min': (- 0.), 'offset': (- 7.0), 'scale': 0.}]}
activation_encodings = {'conv2d_1/Tanh:0': [{'bitwidth': 8, 'dtype': 'int', 'is_symmetric': 'False', 'max': 5., 'min': (- 7.), 'offset': (- 144), 'scale': 0.}]}
dummy_encodings = {'activation_encodings': activation_encodings, 'param_encodings': param_encodings}
encoding_file_path = os.path.join('./', 'dummy.encodings')
with open(encoding_file_path, 'w') as encoding_fp:
json.dump(dummy_encodings, encoding_fp, sort_keys=True, indent=4)
sim.load_encodings_to_sim(encoding_file_path='./dummy.encodings')
extracted_encoding = sim.get_encodings_dict()
assert ('conv2d_1/kernel:0' not in extracted_encoding['param_encodings'])
expected_encoding = activation_encodings['conv2d_1/Tanh:0'][0]
actual_encoding = extracted_encoding['activation_encodings']['conv2d_1/Tanh:0'][0]
assert (actual_encoding.get('bitwidth') == expected_encoding.get('bitwidth'))
assert (actual_encoding.get('offset') == expected_encoding.get('offset'))
assert (actual_encoding.get('is_symmetric') == expected_encoding.get('is_symmetric'))
assert np.allclose(actual_encoding.get('min'), expected_encoding.get('min'), atol=1e-05)
assert np.allclose(actual_encoding.get('max'), expected_encoding.get('max'), atol=1e-05)
if os.path.exists('./dummy.encodings'):
os.remove('./dummy.encodings') |
class InferCwSequenceEmbeddingSharding(BaseCwEmbeddingSharding[(InferSequenceShardingContext, KJTList, List[torch.Tensor], List[torch.Tensor])]):
def create_input_dist(self, device: Optional[torch.device]=None) -> BaseSparseFeaturesDist[KJTList]:
return InferTwSparseFeaturesDist(features_per_rank=self.features_per_rank(), world_size=self._world_size, device=(device if (device is not None) else self._device))
def create_lookup(self, device: Optional[torch.device]=None, fused_params: Optional[Dict[(str, Any)]]=None, feature_processor: Optional[BaseGroupedFeatureProcessor]=None) -> BaseEmbeddingLookup[(KJTList, List[torch.Tensor])]:
return InferGroupedEmbeddingsLookup(grouped_configs_per_rank=self._grouped_embedding_configs_per_rank, world_size=self._world_size, fused_params=fused_params, device=(device if (device is not None) else self._device))
def create_output_dist(self, device: Optional[torch.device]=None) -> BaseEmbeddingDist[(InferSequenceShardingContext, List[torch.Tensor], List[torch.Tensor])]:
device = (device if (device is not None) else self._device)
assert (device is not None)
dist_out = InferCwSequenceEmbeddingDist(device, self._world_size)
return dist_out |
class Downloader():
def __init__(self, **kwargs):
self.ua = kwargs.get('useragent', {'User-Agent': 'Mozilla'})
self.chunk = 1048576
cafile = ssl.get_default_verify_paths().openssl_cafile
try:
if (not os.path.exists(cafile)):
import certifi
cafile = certifi.where()
self.ssl_context = ssl.create_default_context(cafile=cafile)
except:
self.ssl_context = ssl._create_unverified_context()
return
def _decode(self, value, encoding='utf-8', errors='ignore'):
if ((sys.version_info >= (3, 0)) and isinstance(value, bytes)):
return value.decode(encoding, errors)
return value
def open_url(self, url, headers=None):
headers = (self.ua if (headers == None) else headers)
try:
response = urlopen(Request(url, headers=headers), context=self.ssl_context)
except Exception as e:
return None
return response
def get_size(self, size, suffix=None, use_1024=False, round_to=2, strip_zeroes=False):
if (size == (- 1)):
return 'Unknown'
ext = (['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB'] if use_1024 else ['B', 'KB', 'MB', 'GB', 'TB', 'PB'])
div = (1024 if use_1024 else 1000)
s = float(size)
s_dict = {}
for e in ext:
s_dict[e] = s
s /= div
suffix = (next((x for x in ext if (x.lower() == suffix.lower())), None) if suffix else suffix)
biggest = (suffix if suffix else next((x for x in ext[::(- 1)] if (s_dict[x] >= 1)), 'B'))
try:
round_to = int(round_to)
except:
round_to = 2
round_to = (0 if (round_to < 0) else (15 if (round_to > 15) else round_to))
bval = round(s_dict[biggest], round_to)
(a, b) = str(bval).split('.')
b = (b.rstrip('0') if strip_zeroes else (b.ljust(round_to, '0') if (round_to > 0) else ''))
return '{:,}{} {}'.format(int(a), ('' if (not b) else ('.' + b)), biggest)
def _progress_hook(self, response, bytes_so_far, total_size):
if (total_size > 0):
percent = (float(bytes_so_far) / total_size)
percent = round((percent * 100), 2)
t_s = self.get_size(total_size)
try:
b_s = self.get_size(bytes_so_far, t_s.split(' ')[1])
except:
b_s = self.get_size(bytes_so_far)
sys.stdout.write('Downloaded {} of {} ({:.2f}%)\r'.format(b_s, t_s, percent))
else:
b_s = self.get_size(bytes_so_far)
sys.stdout.write('Downloaded {}\r'.format(b_s))
def get_string(self, url, progress=True, headers=None, expand_gzip=True):
response = self.get_bytes(url, progress, headers, expand_gzip)
if (response == None):
return None
return self._decode(response)
def get_bytes(self, url, progress=True, headers=None, expand_gzip=True):
response = self.open_url(url, headers)
if (response == None):
return None
bytes_so_far = 0
try:
total_size = int(response.headers['Content-Length'])
except:
total_size = (- 1)
chunk_so_far = b''
while True:
chunk = response.read(self.chunk)
bytes_so_far += len(chunk)
if progress:
self._progress_hook(response, bytes_so_far, total_size)
if (not chunk):
break
chunk_so_far += chunk
if (expand_gzip and (response.headers.get('Content-Encoding', 'unknown').lower() == 'gzip')):
fileobj = BytesIO(chunk_so_far)
gfile = gzip.GzipFile(fileobj=fileobj)
return gfile.read()
return chunk_so_far
def stream_to_file(self, url, file_path, progress=True, headers=None):
response = self.open_url(url, headers)
if (response == None):
return None
bytes_so_far = 0
try:
total_size = int(response.headers['Content-Length'])
except:
total_size = (- 1)
with open(file_path, 'wb') as f:
while True:
chunk = response.read(self.chunk)
bytes_so_far += len(chunk)
if progress:
self._progress_hook(response, bytes_so_far, total_size)
if (not chunk):
break
f.write(chunk)
return (file_path if os.path.exists(file_path) else None) |
class MCFunctionLexer(RegexLexer):
name = 'MCFunction'
url = '
aliases = ['mcfunction', 'mcf']
filenames = ['*.mcfunction']
mimetypes = ['text/mcfunction']
version_added = '2.12'
_block_comment_prefix = '[>!]'
tokens = {'root': [include('names'), include('comments'), include('literals'), include('whitespace'), include('property'), include('operators'), include('selectors')], 'names': [('^(\\s*)([a-z_]+)', bygroups(Whitespace, Name.Builtin)), ('(?<=run)\\s+[a-z_]+', Name.Builtin), ('\\b[0-9a-fA-F]+(?:-[0-9a-fA-F]+){4}\\b', Name.Variable), include('resource-name'), ('[A-Za-z_][\\w.#%$]+', Keyword.Constant), ('[#%$][\\w.#%$]+', Name.Variable.Magic)], 'resource-name': [('#?[a-z_][a-z_.-]*:[a-z0-9_./-]+', Name.Function), ('#?[a-z0-9_\\.\\-]+\\/[a-z0-9_\\.\\-\\/]+', Name.Function)], 'whitespace': [('\\s+', Whitespace)], 'comments': [(f'^\s*(#{_block_comment_prefix})', Comment.Multiline, ('comments.block', 'comments.block.emphasized')), ('#.*$', Comment.Single)], 'comments.block': [(f'^\s*#{_block_comment_prefix}', Comment.Multiline, 'comments.block.emphasized'), ('^\\s*#', Comment.Multiline, 'comments.block.normal'), default('#pop')], 'comments.block.normal': [include('comments.block.special'), ('\\S+', Comment.Multiline), ('\\n', Text, '#pop'), include('whitespace')], 'comments.block.emphasized': [include('comments.block.special'), ('\\S+', String.Doc), ('\\n', Text, '#pop'), include('whitespace')], 'comments.block.special': [('\\S+', Name.Decorator), include('resource-name'), ('[#%$][\\w.#%$]+', Name.Variable.Magic)], 'operators': [('[\\-~%^?!+*<>\\\\/|&=.]', Operator)], 'literals': [('\\.\\.', Literal), ('(true|false)', Keyword.Pseudo), ('[A-Za-z_]+', Name.Variable.Class), ('[0-7]b', Number.Byte), ('[+-]?\\d*\\.?\\d+([eE]?[+-]?\\d+)?[df]?\\b', Number.Float), ('[+-]?\\d+\\b', Number.Integer), ('"', String.Double, 'literals.string-double'), ("'", String.Single, 'literals.string-single')], 'literals.string-double': [('\\\\.', String.Escape), ('[^\\\\"\\n]+', String.Double), ('"', String.Double, '#pop')], 'literals.string-single': [('\\\\.', String.Escape), ("[^\\\\'\\n]+", String.Single), ("'", String.Single, '#pop')], 'selectors': [('[a-z]', Name.Variable)], 'property': [('\\{', Punctuation, ('property.curly', 'property.key')), ('\\[', Punctuation, ('property.square', 'property.key'))], 'property.curly': [include('whitespace'), include('property'), ('\\}', Punctuation, '#pop')], 'property.square': [include('whitespace'), include('property'), ('\\]', Punctuation, '#pop'), (',', Punctuation)], 'property.key': [include('whitespace'), ('#?[a-z_][a-z_\\.\\-]*\\:[a-z0-9_\\.\\-/]+(?=\\s*\\=)', Name.Attribute, 'property.delimiter'), ('#?[a-z_][a-z0-9_\\.\\-/]+', Name.Attribute, 'property.delimiter'), ('[A-Za-z_\\-\\+]+', Name.Attribute, 'property.delimiter'), ('"', Name.Attribute, 'property.delimiter', 'literals.string-double'), ("'", Name.Attribute, 'property.delimiter', 'literals.string-single'), ('-?\\d+', Number.Integer, 'property.delimiter'), default('#pop')], 'property.key.string-double': [('\\\\.', String.Escape), ('[^\\\\"\\n]+', Name.Attribute), ('"', Name.Attribute, '#pop')], 'property.key.string-single': [('\\\\.', String.Escape), ("[^\\\\'\\n]+", Name.Attribute), ("'", Name.Attribute, '#pop')], 'property.delimiter': [include('whitespace'), ('[:=]!?', Punctuation, 'property.value'), (',', Punctuation), default('#pop')], 'property.value': [include('whitespace'), ('#?[a-z_][a-z_\\.\\-]*\\:[a-z0-9_\\.\\-/]+', Name.Tag), ('#?[a-z_][a-z0-9_\\.\\-/]+', Name.Tag), include('literals'), include('property'), default('#pop')]} |
def test_list_all_commits(project):
data = {'branch': 'new-branch', 'start_branch': 'main', 'commit_message': 'New commit on new branch', 'actions': [{'action': 'create', 'file_path': 'new-file', 'content': 'new content'}]}
commit = project.commits.create(data)
commits = project.commits.list(all=True)
assert (commit not in commits)
all_commits = project.commits.list(get_all=True, all=True)
assert (commit in all_commits)
assert (len(all_commits) > len(commits)) |
class Migration(migrations.Migration):
dependencies = [('auth', '0011_update_proxy_permissions'), ('tasks', '0025_task_sites')]
operations = [migrations.AddField(model_name='task', name='groups', field=models.ManyToManyField(blank=True, help_text='The groups for which this task is active.', to='auth.Group', verbose_name='Group'))] |
class TestNetCDF4Integration(object):
def setup_class(self):
self.tempdir = tempfile.TemporaryDirectory()
return
def teardown_class(self):
self.tempdir.cleanup()
del self.tempdir
return
def setup_method(self):
self.testInst = pysat.Instrument('pysat', 'testing', num_samples=5, use_header=True)
self.testInst.load(date=self.testInst.inst_module._test_dates[''][''], use_header=True)
self.pformat = self.testInst.pandas_format
return
def teardown_method(self):
del self.testInst, self.pformat
return
.parametrize('use_method', [True, False])
def test_nan_metadata_filtered_netcdf4(self, use_method):
self.testInst['test_nan_variable'] = 1.0
self.testInst.meta['test_nan_variable'] = {'test_nan_export': np.nan}
if use_method:
present = self.testInst.meta._export_nan
missing = [present.pop()]
present.append('test_nan_export')
export_nan = list(present)
else:
export_nan = None
present = self.testInst.meta._export_nan
missing = ['test_nan_export']
outfile = os.path.join(self.tempdir.name, 'pysat_test_ncdf.nc')
self.testInst.to_netcdf4(outfile, export_nan=export_nan, epoch_name=default_epoch_name)
with netCDF4.Dataset(outfile) as open_f:
test_vars = open_f['test_nan_variable'].ncattrs()
testing.assert_list_contains(present, test_vars)
for mvar in missing:
assert (mvar not in test_vars), '{:} was written to the netCDF file'.format(repr(mvar))
return
.parametrize('remove', [True, False])
.parametrize('check_type', [None, ['value_max']])
.parametrize('export_nan', [None, ['fill']])
.parametrize('dvar', ['uts', 'string_dummy', 'unicode_dummy', 'int8_dummy', 'int64_dummy'])
def test_filter_netcdf4_metadata(self, remove, check_type, export_nan, dvar):
mdict = self.testInst.meta[dvar].to_dict()
if (dvar.find('int8') >= 0):
data_type = bool
else:
data_type = type(self.testInst[dvar].values[0])
with warnings.catch_warnings(record=True) as war:
fdict = io.filter_netcdf4_metadata(self.testInst, mdict, data_type, export_nan=export_nan, remove=remove, check_type=check_type)
if (len(war) > 0):
assert ((data_type == str) and (not remove)), 'unexpected warning(s). First (of {:d}) warning: {:s}'.format(len(war), war[0].message)
testing.eval_warnings(war, ['Unable to cast'], warn_type=UserWarning)
if (export_nan is None):
export_nan = []
if (check_type is None):
check_type = []
for mkey in mdict.keys():
if (mkey not in fdict.keys()):
try:
is_nan = np.isnan(mdict[mkey])
except TypeError:
is_nan = False
if (mkey in check_type):
assert (not isinstance(mdict[mkey], data_type)), "{:} is a {:}, it shouldn't have been removed".format(repr(mkey), repr(data_type))
assert ((remove | (not ((remove & len(war)) > 0))) | ((mdict[mkey] is None) | is_nan)), '{:} value {:} should have been recast'.format(repr(mkey), repr(mdict[mkey]))
else:
assert ((mdict[mkey] is None) | is_nan), '{:} is not a fill value: {:}'.format(repr(mkey), repr(mdict[mkey]))
assert (mkey not in export_nan), '{:} should have been exported'.format(repr(mkey))
elif ((mkey in export_nan) and (not np.issubdtype(data_type, str)) and np.isnan(mdict[mkey])):
assert np.isnan(fdict[mkey])
elif ((mkey in check_type) and (fdict[mkey] != mdict[mkey])):
assert (fdict[mkey] == data_type(mdict[mkey])), 'unexpected recast meta data {:} value'.format(repr(mkey))
else:
assert (fdict[mkey] == mdict[mkey]), 'meta data {:} changed'.format(repr(mkey))
return
.parametrize('missing', [True, False])
def test_add_netcdf4_standards_to_meta(self, missing):
if missing:
drop_var = self.testInst.vars_no_time[0]
self.testInst.meta.drop(drop_var)
init_meta = self.testInst.meta.to_dict()
assert (self.testInst.index.name not in init_meta)
with warnings.catch_warnings(record=True) as war:
epoch_name = self.testInst.index.name
new_meta = io.add_netcdf4_standards_to_metadict(self.testInst, init_meta, epoch_name)
if missing:
wstr = ''.join(['Unable to find MetaData for ', drop_var])
testing.eval_warnings(war, [wstr], warn_type=UserWarning)
else:
assert (len(war) == 0)
new_labels = ['Format', 'Var_Type', 'Depend_0', 'Display_Type']
assert (new_meta != init_meta)
for var in init_meta.keys():
for label in new_labels:
assert (label not in init_meta[var])
assert (label in new_meta[var])
if (self.testInst.name == 'testing2D'):
assert ('Depend_1' not in init_meta[var])
if (self.testInst.name == 'testing2D'):
for var in self.testInst.vars_no_time:
if (self.testInst.meta[var].children is not None):
assert ('Depend_1' in new_meta[var])
else:
assert ('Depend_1' not in new_meta[var])
return
.parametrize('meta_trans', [{'units': ['testingFillVal', 'testing_FillValue', 'testing_fill_value']}, {'desc': ['tdub', 'test_FillValue']}, {'desc': ['tdub', 'test_FillValue'], 'notes': ['test_notes'], 'fill': ['fill_test']}, {'desc': ['tdub', 'test_FillValue'], 'notes': ['test_notes'], 'fill': ['fill_test'], 'value_min': ['ValueMin', 'Value_Min'], 'value_max': ['ValueMax', 'Value_Max'], 'units': ['takeout'], 'long_name': ['longer_name']}])
.parametrize('assign_flag', [True, False])
def test_meta_translation_to_from_netcdf4(self, assign_flag, meta_trans):
pysat.utils.files.check_and_make_path(self.tempdir.name)
outfile = os.path.join(self.tempdir.name, 'pysat_test_ncdf.nc')
mkwargs = ({} if assign_flag else {'meta_translation': meta_trans})
if assign_flag:
self.testInst._meta_translation_table = meta_trans
pysat.utils.io.inst_to_netcdf(self.testInst, outfile, epoch_name=default_epoch_name, **mkwargs)
with netCDF4.Dataset(outfile) as open_f:
for var in open_f.variables.keys():
test_vars = open_f[var].ncattrs()
if ('MonoTon' not in test_vars):
form = open_f[var].getncattr('Format')
for key in meta_trans.keys():
if ((key != 'fill') and (form != 'S1')):
testing.assert_list_contains(meta_trans[key], test_vars)
for mvar in meta_trans.keys():
assert (mvar not in test_vars), '{:} was written to the netCDF file'.format(repr(mvar))
inv_trans = {}
for key in meta_trans.keys():
for var in meta_trans[key]:
inv_trans[var] = key
tkwargs = decode_times_val(self.testInst.pandas_format)
(data, meta) = pysat.utils.io.load_netcdf(outfile, meta_translation=inv_trans, pandas_format=self.pformat, epoch_name=default_epoch_name, **tkwargs)
attrs = list(meta.attrs())
for key in meta_trans.keys():
wstr = ''.join([key, ' not found in loaded meta information.'])
assert (key in attrs), wstr
for var in meta_trans[key]:
wstr = ''.join([var, ' should have been translated.'])
assert (var not in attrs), wstr
return
def meta_proc_stub(self, meta_dict, vals=None, remove_labels=None):
if (remove_labels is None):
remove_labels = []
if (vals is None):
vals = {}
assert isinstance(meta_dict, dict)
for var in meta_dict.keys():
for key in vals.keys():
meta_dict[var][key] = vals[key]
for var in meta_dict.keys():
for label in remove_labels:
if (label in meta_dict[var].keys()):
meta_dict[var].pop(label)
return meta_dict
.parametrize('assign_flag', [True, False])
def test_meta_processor_to_from_netcdf4(self, assign_flag):
target = {'testing_metadata_pysat_answer': '42', 'testing_metadata_pysat_question': 'simulation running'}
to_meta_proc = functools.partial(self.meta_proc_stub, vals=target, remove_labels=['units'])
outfile = os.path.join(self.tempdir.name, 'pysat_test_ncdf.nc')
mkwargs = ({} if assign_flag else {'meta_processor': to_meta_proc})
if assign_flag:
self.testInst._export_meta_post_processing = to_meta_proc
pysat.utils.io.inst_to_netcdf(self.testInst, outfile, epoch_name=default_epoch_name, **mkwargs)
with netCDF4.Dataset(outfile) as open_f:
for var in open_f.variables.keys():
test_vars = open_f[var].ncattrs()
if ('MonoTon' not in test_vars):
testing.assert_list_contains(list(target.keys()), test_vars)
assert ('units' not in test_vars), "'units' found!"
inv_target = {}
for key in target.keys():
inv_target[key] = target[key][::(- 1)]
from_meta_proc = functools.partial(self.meta_proc_stub, vals=inv_target, remove_labels=[])
tkwargs = decode_times_val(self.testInst.pandas_format)
(data, meta) = pysat.utils.io.load_netcdf(outfile, meta_processor=from_meta_proc, pandas_format=self.pformat, epoch_name=default_epoch_name, **tkwargs)
wstr = ''.join(['Incorrect metadata value after inverse processor for', ' variable: {:} and label: {:}'])
for var in meta.keys():
for key in inv_target.keys():
assert (meta[var][key] == inv_target[key]), wstr.format(var, key)
assert (self.testInst.meta.labels.units in meta[var])
return
def test_missing_metadata(self):
ho_vars = []
for var in self.testInst.meta.keys():
if ('children' in self.testInst.meta[var]):
if (self.testInst.meta[var]['children'] is not None):
for subvar in self.testInst.meta[var]['children'].keys():
ho_vars.append((subvar, var))
self.testInst.meta.keep([])
outfile = os.path.join(self.tempdir.name, 'pysat_test_ncdf.nc')
with warnings.catch_warnings(record=True) as war:
io.inst_to_netcdf(self.testInst, outfile)
exp_warns = []
for var in self.testInst.vars_no_time:
wstr = ''.join(['Unable to find MetaData for ', var])
exp_warns.append(wstr)
testing.eval_warnings(war, exp_warns, warn_type=UserWarning)
for (svar, var) in ho_vars:
wstr = ''.join(['Unable to find MetaData for ', svar, ' subvariable of ', var])
exp_warns.append(wstr)
testing.eval_warnings(war, exp_warns, warn_type=UserWarning)
return |
def get_no_comm_postprocess(stage: Dict[(str, Any)], num_rounds: int, batchsize: int, proxify: Proxify) -> Callable[([DataFrame], DataFrame)]:
if (num_rounds == batchsize):
return (lambda x: x)
try:
import cudf
except ImportError:
return (lambda x: x)
if ((not stage) or (not isinstance(next(iter(stage.values())), cudf.DataFrame))):
return (lambda x: x)
return (lambda x: proxify(x._from_data(x._data.copy(deep=True), x._index.copy(deep=True)))) |
class TestMetricModule(RecMetricModule):
def __init__(self, batch_size: int, world_size: int, rec_tasks: Optional[List[RecTaskInfo]]=None, rec_metrics: Optional[RecMetricList]=None, throughput_metric: Optional[ThroughputMetric]=None, state_metrics: Optional[Dict[(str, StateMetric)]]=None, compute_interval_steps: int=100, min_compute_interval: float=0.0, max_compute_interval: float=float('inf'), memory_usage_limit_mb: float=512) -> None:
super().__init__(batch_size, world_size, rec_tasks=rec_tasks, rec_metrics=rec_metrics, throughput_metric=throughput_metric, state_metrics=state_metrics, compute_interval_steps=compute_interval_steps, min_compute_interval=min_compute_interval, max_compute_interval=max_compute_interval, memory_usage_limit_mb=memory_usage_limit_mb)
def _update_rec_metrics(self, model_out: Dict[(str, torch.Tensor)], **kwargs: Any) -> None:
if isinstance(model_out, MagicMock):
return
(labels, predictions, weights, _) = parse_task_model_outputs(self.rec_tasks, model_out)
self.rec_metrics.update(predictions=predictions, labels=labels, weights=weights) |
class UniformTextureSequence(TextureSequence):
def _get_item_width(self):
raise NotImplementedError('abstract')
def _get_item_height(self):
raise NotImplementedError('abstract')
def item_width(self):
return self._get_item_width()
def item_height(self):
return self._get_item_height() |
def basic_blocks(dim, index, layers, pool_size=3, mlp_ratio=4.0, act_layer=nn.GELU, norm_layer=GroupNorm1, drop_rate=0.0, drop_path_rate=0.0, layer_scale_init_value=1e-05):
blocks = []
for block_idx in range(layers[index]):
block_dpr = ((drop_path_rate * (block_idx + sum(layers[:index]))) / (sum(layers) - 1))
blocks.append(PoolFormerBlock(dim, pool_size=pool_size, mlp_ratio=mlp_ratio, act_layer=act_layer, norm_layer=norm_layer, drop=drop_rate, drop_path=block_dpr, layer_scale_init_value=layer_scale_init_value))
blocks = nn.Sequential(*blocks)
return blocks |
class NotifyingQueue(Event, Generic[T]):
def __init__(self, maxsize: int=None, items: Iterable[T]=()) -> None:
super().__init__()
self.queue = Queue(maxsize, items)
if items:
self.set()
def put(self, item: T) -> None:
self.queue.put(item)
self.set()
def get(self, block: bool=True, timeout: float=None) -> T:
value = self.queue.get(block, timeout)
if self.queue.empty():
self.clear()
return value
def peek(self, block: bool=True, timeout: float=None) -> T:
return self.queue.peek(block, timeout)
def __len__(self) -> int:
return len(self.queue)
def copy(self) -> List[T]:
copy = self.queue.copy()
result = []
while (not copy.empty()):
result.append(copy.get_nowait())
return result
def __repr__(self) -> str:
return f'NotifyingQueue(id={id(self)}, num_items={len(self.queue)})' |
def handle_data(context, data):
context.i += 1
if (context.i < 300):
return
short_mavg = data.history(context.sym, 'price', 100, '1d').mean()
long_mavg = data.history(context.sym, 'price', 300, '1d').mean()
if (short_mavg > long_mavg):
order_target(context.sym, 100)
elif (short_mavg < long_mavg):
order_target(context.sym, 0)
record(AAPL=data.current(context.sym, 'price'), short_mavg=short_mavg, long_mavg=long_mavg) |
def train(train_queue, model, criterion, optimizer):
global is_multi_gpu
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.train()
for (step, (input, target)) in enumerate(train_queue):
n = input.size(0)
input = input.cuda()
target = target.cuda(non_blocking=True)
optimizer.zero_grad()
(logits, logits_aux) = model(input)
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += (args.auxiliary_weight * loss_aux)
loss.backward()
parameters = (model.module.parameters() if is_multi_gpu else model.parameters())
nn.utils.clip_grad_norm_(parameters, args.grad_clip)
optimizer.step()
(prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
if ((step % args.report_freq) == 0):
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
wandb.log({'evaluation_train_accuracy_avg': objs.avg}, step=step)
wandb.log({'evaluation_train_accuracy_top1': top1.avg}, step=step)
wandb.log({'evaluation_train_accuracy_top5': top5.avg}, step=step)
return (top1.avg, objs.avg) |
def voc_ap(rec, prec, use_07_metric=False):
if use_07_metric:
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if (np.sum((rec >= t)) == 0):
p = 0
else:
p = np.max(prec[(rec >= t)])
ap = (ap + (p / 11.0))
else:
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], prec, [0.0]))
for i in range((mpre.size - 1), 0, (- 1)):
mpre[(i - 1)] = np.maximum(mpre[(i - 1)], mpre[i])
i = np.where((mrec[1:] != mrec[:(- 1)]))[0]
ap = np.sum(((mrec[(i + 1)] - mrec[i]) * mpre[(i + 1)]))
return ap |
_exceptions
_parameters('name', 'description')
_parameters('id', 'name', 'slug', 'periodicity', 'description', 'email')
def handle_update(business_logic, query):
find_identifier(business_logic, query, name_ok=False)
emails = query.get('email', None)
if (emails == []):
emails = None
elif (emails == ['-']):
emails = []
canary = business_logic.update(query['id'], query.get('name', None), query.get('periodicity', None), query.get('description', None), emails)
return ('200 OK', {'status': 'ok', 'canary': jsonify_canary(canary)}) |
.online
def test_requirement_source_multiple_files(req_file):
source = _init_requirement([(req_file(), 'flask==2.0.1'), (req_file(), 'requests==2.8.1'), (req_file(), 'pip-api==0.0.22\npackaging==21.0')])
specs = list(source.collect())
assert (ResolvedDependency('Flask', Version('2.0.1')) in specs)
assert (ResolvedDependency('requests', Version('2.8.1')) in specs)
assert (ResolvedDependency('pip-api', Version('0.0.22')) in specs)
assert (ResolvedDependency('packaging', Version('21.0')) in specs) |
def split_by_attr_random(pt2seeds: Dict[(str, List[List[str]])], pt2seed_names: Dict[(str, List[str])], candidate_dir: Path, output_dir: Path, neg_only_pts=None, pos_per_asin=5, times_negative=3, times_asin_negative=5, context_per_sample=2, max_pos_pairs_per_set=None, pct_dev=0.2):
logger.info('Generate by random split')
logger.info(f'pct_dev = {pct_dev}')
ds_train = []
ds_dev = []
for (pt, seeds) in tqdm(pt2seeds.items(), desc='Sample pairs'):
logger.debug(pt)
docs = utils.JsonL.load(Path(candidate_dir, f'{pt}.chunk.jsonl'))
(pos_pairs, neg_pairs) = generate_from_seeds(seeds, max_pos_pairs_per_set=max_pos_pairs_per_set)
neg_pairs_asin = generate_from_asin_reg(docs, sample_per_asin=pos_per_asin)
(pos_pairs_, neg_pairs_, neg_pairs_asin_) = sample_positive_negative(pos_pairs, neg_pairs, neg_pairs_asin, times_negative=times_negative, times_asin_negative=times_asin_negative)
(ds_train_pt, ds_dev_pt) = make_dataset(pos_pairs_, neg_pairs_, neg_pairs_asin_, pct_dev=pct_dev)
ds_train_pt = match_context(ds_train_pt, docs, sample_context=context_per_sample)
ds_dev_pt = match_context(ds_dev_pt, docs, sample_context=context_per_sample)
ds_train.extend(ds_train_pt)
ds_dev.extend(ds_dev_pt)
if neg_only_pts:
for pt in neg_only_pts:
docs = utils.JsonL.load(Path(candidate_dir, f'{pt}.chunk.jsonl'))
neg_pairs_asin = generate_from_asin_reg(docs, sample_per_asin=pos_per_asin)
(ds_train_pt, ds_dev_pt) = make_dataset([], neg_pairs_asin, [], pct_dev=pct_dev)
ds_train_pt = match_context(ds_train_pt, docs, sample_context=context_per_sample)
ds_dev_pt = match_context(ds_dev_pt, docs, sample_context=context_per_sample)
ds_train.extend(ds_train_pt)
ds_dev.extend(ds_dev_pt)
save_datasets(ds_train, ds_dev, output_dir) |
def test_to_cirq():
bb = BloqBuilder()
q = bb.add(OneState())
q = bb.add(Hadamard(), q=q)
cbloq = bb.finalize(q=q)
(circuit, _) = cbloq.to_cirq_circuit()
cirq.testing.assert_has_diagram(circuit, '_c(0): XH')
vec1 = cbloq.tensor_contract()
vec2 = cirq.final_state_vector(circuit)
np.testing.assert_allclose(vec1, vec2) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.