code stringlengths 281 23.7M |
|---|
def target_loss(sess, target_lstm, data_loader):
nll = []
data_loader.reset_pointer()
for it in range(data_loader.num_batch):
batch = data_loader.next_batch()
g_loss = sess.run(target_lstm.pretrain_loss, {target_lstm.x: batch})
nll.append(g_loss)
return np.mean(nll) |
def coord_map_from_to(top_from, top_to):
def collect_bottoms(top):
bottoms = top.fn.inputs
if (top.fn.type_name == 'Crop'):
bottoms = bottoms[:1]
return bottoms
from_maps = {top_from: (None, 1, 0)}
frontier = {top_from}
while frontier:
top = frontier.pop()
try:
bottoms = collect_bottoms(top)
for bottom in bottoms:
from_maps[bottom] = compose(from_maps[top], coord_map(top.fn))
frontier.add(bottom)
except UndefinedMapException:
pass
to_maps = {top_to: (None, 1, 0)}
frontier = {top_to}
while frontier:
top = frontier.pop()
if (top in from_maps):
return compose(to_maps[top], inverse(from_maps[top]))
try:
bottoms = collect_bottoms(top)
for bottom in bottoms:
to_maps[bottom] = compose(to_maps[top], coord_map(top.fn))
frontier.add(bottom)
except UndefinedMapException:
continue
raise RuntimeError('Could not compute map between tops; are they connected by spatial layers?') |
def vector_to_amplitudes(cc_or_eom, vec, kshift=0):
expected_vs = vector_size(cc_or_eom, kshift)
if (expected_vs != len(vec)):
raise ValueError('The size of the vector passed {:d} should be exactly {:d}'.format(len(vec), expected_vs))
itr = iter_12(cc_or_eom, kshift)
nocc = cc_or_eom.nocc
nmo = cc_or_eom.nmo
nkpts = cc_or_eom.nkpts
nvirt = (nmo - nocc)
vs = VectorSplitter(vec)
r1 = vs.get(nvirt, slc=next(itr))
r2 = np.zeros((nkpts, nkpts, nocc, nvirt, nvirt), vec.dtype)
for slc in itr:
vs.get(r2, slc=slc)
return (r1, r2) |
class UserAgentMiddleware(Middleware):
def __init__(self, sdk_version):
sys_info = '{0}; {1}'.format(_platform.system(), _platform.machine())
python_ver = _platform.python_version()
user_agent = 'QiniuPython/{0} ({1}; ) Python/{2}'.format(sdk_version, sys_info, python_ver)
self.user_agent = user_agent
def __call__(self, request, nxt):
if (not request.headers):
request.headers = {}
request.headers['User-Agent'] = self.user_agent
return nxt(request) |
class TestTryStar(TestNameCheckVisitorBase):
_before((3, 11))
def test_eg_types(self):
self.assert_passes('\n from typing import assert_type\n\n def capybara():\n try:\n pass\n except* ValueError as eg:\n assert_type(eg, ExceptionGroup[ValueError])\n except* KeyboardInterrupt as eg:\n assert_type(eg, BaseExceptionGroup[KeyboardInterrupt])\n except* (OSError, (RuntimeError, KeyError)) as eg:\n assert_type(eg, ExceptionGroup[OSError | RuntimeError | KeyError])\n except *ExceptionGroup as eg: # E: bad_except_handler\n pass\n except *int as eg: # E: bad_except_handler\n pass\n ')
_before((3, 11))
def test_variable_scope(self):
self.assert_passes('\n from typing import assert_type, Literal\n\n def capybara():\n x = 0\n try:\n x = 1\n assert_type(x, Literal[1])\n except* ValueError as eg:\n assert_type(x, Literal[0, 1])\n x = 2\n except* TypeError as eg:\n assert_type(x, Literal[0, 1, 2])\n x = 3\n assert_type(x, Literal[1, 2, 3])\n ')
_before((3, 11))
def test_try_else(self):
self.assert_passes('\n from typing import assert_type, Literal\n\n def capybara():\n x = 0\n try:\n x = 1\n assert_type(x, Literal[1])\n except* ValueError as eg:\n assert_type(x, Literal[0, 1])\n x = 2\n except* TypeError as eg:\n assert_type(x, Literal[0, 1, 2])\n x = 3\n else:\n assert_type(x, Literal[1])\n x = 4\n assert_type(x, Literal[2, 3, 4])\n ')
_before((3, 11))
def test_try_finally(self):
self.assert_passes('\n from typing import assert_type, Literal\n\n def capybara():\n x = 0\n try:\n x = 1\n assert_type(x, Literal[1])\n except* ValueError as eg:\n assert_type(x, Literal[0, 1])\n x = 2\n except* TypeError as eg:\n assert_type(x, Literal[0, 1, 2])\n x = 3\n finally:\n assert_type(x, Literal[0, 1, 2, 3])\n x = 4\n assert_type(x, Literal[4])\n ') |
def test_invalid_usage_old():
with raises(NotImplementedError):
sys.argv = shlex.split('visualqc -u {} -i {} -o {} --vis_type labels_contour'.format(fs_dir, id_list, out_dir))
cli_run()
with raises(NotImplementedError):
sys.argv = shlex.split('visualqc -f {} -i {} -o {} --outlier_method random_name_sngkkjfdk'.format(fs_dir, id_list, out_dir))
cli_run()
with raises(NotImplementedError):
sys.argv = shlex.split('visualqc -f {} -i {} -o {} --outlier_feat_types random_name_sngkkjfdk'.format(fs_dir, id_list, out_dir))
cli_run()
for invalid_val in [(- 0.1), (+ 1.1), 1.0, 0.0, np.nan, np.Inf]:
with raises(ValueError):
sys.argv = shlex.split('visualqc -f {} -i {} -o {} --outlier_fraction {val}'.format(fs_dir, id_list, out_dir, val=invalid_val))
cli_run() |
def is_iambic(phrase):
meter = ''
for word in phrase.split():
word = word.strip().strip(string.punctuation).lower()
try:
phones_list = pronouncing.phones_for_word(word)
stresses = pronouncing.stresses(phones_list[0])
if (len(stresses) == 1):
if (stresses == '1'):
stresses = '2'
meter += stresses
except:
return 0
meter = [int(x) for x in meter]
even_stresses_full = [meter[i] for i in range(0, len(meter), 2)]
odd_stresses_full = [meter[i] for i in range(1, len(meter), 2)]
even_stresses = set(even_stresses_full)
odd_stresses = set(odd_stresses_full)
if (0 in odd_stresses):
return 0
if (1 in even_stresses):
return 0
return 1 |
def hard_example_mining(dist_mat, labels, return_inds=False):
assert (len(dist_mat.size()) == 2)
assert (dist_mat.size(0) == dist_mat.size(1))
N = dist_mat.size(0)
is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())
is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())
(dist_ap, relative_p_inds) = torch.max(dist_mat[is_pos].contiguous().view(N, (- 1)), 1, keepdim=True)
(dist_an, relative_n_inds) = torch.min(dist_mat[is_neg].contiguous().view(N, (- 1)), 1, keepdim=True)
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
if return_inds:
ind = labels.new().resize_as_(labels).copy_(torch.arange(0, N).long()).unsqueeze(0).expand(N, N)
p_inds = torch.gather(ind[is_pos].contiguous().view(N, (- 1)), 1, relative_p_inds.data)
n_inds = torch.gather(ind[is_neg].contiguous().view(N, (- 1)), 1, relative_n_inds.data)
p_inds = p_inds.squeeze(1)
n_inds = n_inds.squeeze(1)
return (dist_ap, dist_an, p_inds, n_inds)
return (dist_ap, dist_an) |
class GetMinstServingHtmlHandler(webBase.BaseHandler):
def get(self):
arr = np.arange(30)
image_array = []
for idx in arr:
out_file = (out_dir % ('%05d' % idx))
print(out_file)
image_array.append(out_file)
self.render('minst_serving.html', image_array=image_array) |
def test_imported_module_var_inferable3() -> None:
mod3 = parse(textwrap.dedent("\n from top3.mod import __dunder_var__ as v\n __dunder_var__ = ['w'] + v\n "), module_name='top')
parse("__dunder_var__ = ['v']", module_name='top3.mod')
w_val = mod3.body[(- 1)].value
i_w_val = next(w_val.infer())
assert (i_w_val is not util.Uninferable)
assert (i_w_val.as_string() == "['w', 'v']") |
def autodoc_process_bases(app, name, obj, option, bases: list):
for (idx, base) in enumerate(bases):
base = str(base)
if base.startswith('typing.AbstractAsyncContextManager'):
bases[idx] = ':class:`contextlib.AbstractAsyncContextManager`'
continue
if ('StringEnum' in base == "<enum 'StringEnum'>"):
bases[idx] = ':class:`enum.Enum`'
bases.insert(0, ':class:`str`')
continue
if ('IntEnum' in base):
bases[idx] = ':class:`enum.IntEnum`'
continue
if base.endswith(']'):
base = base.split('[', maxsplit=1)[0]
bases[idx] = f':class:`{base}`'
if ((not (match := re.search(pattern='(telegram(\\.ext|))\\.[_\\w\\.]+', string=base))) or ('_utils' in base)):
continue
parts = match.group(0).split('.')
for (index, part) in enumerate(parts):
if part.startswith('_'):
parts = (parts[:index] + parts[(- 1):])
break
parts = [PRIVATE_BASE_CLASSES.get(part, part) for part in parts]
base = '.'.join(parts)
bases[idx] = f':class:`{base}`' |
class TreeWindowBase(QMdiSubWindow):
def __init__(self, parent=None):
super(TreeWindowBase, self).__init__(parent)
self.model = None
self.find_bar = None
self.view = QTreeView()
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard
self.context_menu = TreeContextMenu(self.view)
def DisplayFound(self, ids):
if (not len(ids)):
return False
parent = QModelIndex()
for dbid in ids:
found = False
n = self.model.rowCount(parent)
for row in xrange(n):
child = self.model.index(row, 0, parent)
if (child.internalPointer().dbid == dbid):
found = True
self.view.setExpanded(parent, True)
self.view.setCurrentIndex(child)
parent = child
break
if (not found):
break
return found
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.model.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, ids):
found = True
if (not self.DisplayFound(ids)):
found = False
self.find_bar.Idle()
if (not found):
self.find_bar.NotFound() |
class XBOGExchangeCalendar(TradingCalendar):
name = 'XBOG'
tz = timezone('America/New_York')
open_times = ((None, time(9, 31)),)
close_times = ((None, time(16)),)
def regular_holidays(self):
return HolidayCalendar([NewYearsDay, Epiphany, StJosephsDay, MaundyThursday, GoodFriday, LabourDay, MondayAfterAscensionDay, MondayAfterCorpusChristi, MondayAfterSacredHeart, StPeterAndStPaulDay, ColombiaIndependenceDay, BattleOfBoyaca, AssumptionDay, DiaDeLaRaza, AllSaintsDay, CartagenaIndependenceDay, ImmaculateConception, ChristmasDay, LastTradingDay]) |
class RandomDropout(nn.Module):
def __init__(self, p=0.5, inplace=False):
super(RandomDropout, self).__init__()
self.p = p
self.inplace = inplace
def forward(self, X):
theta = torch.Tensor(1).uniform_(0, self.p)[0]
return pt_utils.feature_dropout_no_scaling(X, theta, self.train, self.inplace) |
def frames2video(frame_dir, video_file, fps=30, fourcc='XVID', filename_tmpl='{:06d}.jpg', start=0, end=0, show_progress=True):
if (end == 0):
ext = filename_tmpl.split('.')[(- 1)]
end = len([name for name in scandir(frame_dir, ext)])
first_file = osp.join(frame_dir, filename_tmpl.format(start))
check_file_exist(first_file, ('The start frame not found: ' + first_file))
img = cv2.imread(first_file)
(height, width) = img.shape[:2]
resolution = (width, height)
vwriter = cv2.VideoWriter(video_file, VideoWriter_fourcc(*fourcc), fps, resolution)
def write_frame(file_idx):
filename = osp.join(frame_dir, filename_tmpl.format(file_idx))
img = cv2.imread(filename)
vwriter.write(img)
if show_progress:
track_progress(write_frame, range(start, end))
else:
for i in range(start, end):
write_frame(i)
vwriter.release() |
def get_portfoliodiversification_solution(rho: np.ndarray, n: int, q: int, result: MinimumEigensolverResult) -> np.ndarray:
del rho, q
v = result.eigenstate
if isinstance(v, StateFn):
v = v.to_matrix()
N = ((n ** 2) + n)
index_value = [x for x in range(len(v)) if (v[x] == max(v))][0]
string_value = '{0:b}'.format(index_value)
while (len(string_value) < N):
string_value = ('0' + string_value)
x_state = []
for elements in string_value:
if (elements == '0'):
x_state.append(0)
else:
x_state.append(1)
x_state = np.flip(x_state, axis=0)
return cast(np.ndarray, x_state) |
_module()
class X3DHead(BaseHead):
def __init__(self, num_classes, in_channels, loss_cls=dict(type='CrossEntropyLoss'), spatial_type='avg', dropout_ratio=0.5, init_std=0.01, fc1_bias=False):
super().__init__(num_classes, in_channels, loss_cls)
self.spatial_type = spatial_type
self.dropout_ratio = dropout_ratio
self.init_std = init_std
if (self.dropout_ratio != 0):
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.in_channels = in_channels
self.mid_channels = 2048
self.num_classes = num_classes
self.fc1_bias = fc1_bias
self.fc1 = nn.Linear(self.in_channels, self.mid_channels, bias=self.fc1_bias)
self.fc2 = nn.Linear(self.mid_channels, self.num_classes)
self.relu = nn.ReLU()
self.pool = None
if (self.spatial_type == 'avg'):
self.pool = nn.AdaptiveAvgPool3d((1, 1, 1))
elif (self.spatial_type == 'max'):
self.pool = nn.AdaptiveMaxPool3d((1, 1, 1))
else:
raise NotImplementedError
def init_weights(self):
normal_init(self.fc1, std=self.init_std)
normal_init(self.fc2, std=self.init_std)
def forward(self, x):
assert (self.pool is not None)
x = self.pool(x)
x = x.view(x.shape[0], (- 1))
x = self.fc1(x)
x = self.relu(x)
if (self.dropout is not None):
x = self.dropout(x)
cls_score = self.fc2(x)
return cls_score |
class KerasRegressor(BaseWrapper):
def predict(self, x, **kwargs):
kwargs = self.filter_sk_params(Sequential.predict, kwargs)
return np.squeeze(self.model.predict(x, **kwargs))
def score(self, x, y, **kwargs):
kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)
loss = self.model.evaluate(x, y, **kwargs)
if isinstance(loss, list):
return loss[0]
return loss |
def get_video_links():
response = requests.get(SITEMAP_URL)
soup = bs4.BeautifulSoup(response.content, 'lxml')
one_year_ago = (datetime.datetime.now() - datetime.timedelta(days=365)).date()
links = set()
for url in soup.find_all('url'):
loc = url.find('loc').string
path = urllib.parse.urlparse(loc).path
if PATTERN.match(path):
mod_string = url.find('lastmod').string
if (datetime.datetime.strptime(mod_string, '%Y-%m-%d').date() > one_year_ago):
links.add(path)
return links |
class CustomStatsView(BaseView):
('/', methods=['GET'])
def index(self):
return self.render('stats.html', stats=get_stats())
def is_accessible(self):
return current_user.is_authenticated
def inaccessible_callback(self, name, **kwargs):
return redirect(url_for('admin.login_view', next=request.url)) |
def test_uninstall_suffix(pipx_temp_env):
name = 'pbr'
suffix = '_a'
executable_path = (constants.LOCAL_BIN_DIR / app_name(f'{name}{suffix}'))
assert (not run_pipx_cli(['install', PKG[name]['spec'], f'--suffix={suffix}']))
assert executable_path.exists()
assert (not run_pipx_cli(['uninstall', f'{name}{suffix}']))
assert (not file_or_symlink(executable_path)) |
def args_parse():
parser = argparse.ArgumentParser(description='FCGEC preprocess params')
base_args = ArgumentGroup(parser, 'base', 'Base Settings')
base_args.add_arg('mode', str, 'normal', 'STG Mode')
base_args.add_arg('out_uuid', bool, True, 'Output UUID in test file')
base_args.add_arg('err_only', bool, True, 'Construct error dataset')
base_args.add_arg('data_dir', str, 'dataset', 'Dataset path')
base_args.add_arg('out_dir', str, 'STG-Indep', 'Output path')
base_args.add_arg('train_file', str, '', 'Train path')
base_args.add_arg('valid_file', str, '', 'Valid path')
base_args.add_arg('test_file', str, '', 'Test path')
args = parser.parse_args()
return args |
def download_file(url, local_filename):
if (not (' in url)):
f = urlopen(url)
with open(local_filename, 'wb') as lf:
lf.write(f.read())
else:
h = ({} if (not ('YADAGE_INIT_TOKEN' in os.environ)) else {'PRIVATE-TOKEN': os.environ['YADAGE_INIT_TOKEN']})
r = requests.get(url, stream=True, headers=h)
with open(local_filename, 'wb') as f:
shutil.copyfileobj(r.raw, f) |
def train_transform(rotation_range=45):
return A.Compose([A.Perspective(pad_mode=cv2.BORDER_CONSTANT, p=0.5), A.ShiftScaleRotate(shift_limit=0.0, scale_limit=0.1, rotate_limit=rotation_range, interpolation=1, border_mode=cv2.BORDER_CONSTANT, value=0, mask_value=0, always_apply=False, p=0.5), A.RandomBrightnessContrast(p=0.5), A.RGBShift(r_shift_limit=10, g_shift_limit=20, b_shift_limit=20, always_apply=False, p=0.5)], keypoint_params=A.KeypointParams(format='xy', remove_invisible=True)) |
class MultiLingualInput():
en: str = ''
it: str = ''
def clean(self, languages: list[str]) -> 'MultiLingualInput':
new_input = MultiLingualInput()
for lang in ('it', 'en'):
if (lang in languages):
value = getattr(self, lang)
setattr(new_input, lang, value)
return new_input
def to_dict(self) -> dict:
return {'en': self.en, 'it': self.it} |
def get_anonymous_replacement_value(keyword, current_value=None, replacement_strategy=None):
vr = get_baseline_keyword_vr_dict()[keyword]
if (vr == 'CS'):
logging.warning('Keyword %s has Value Representation CS and may require special processing to avoid breaking DICOM conformance or interoperability', keyword)
if (replacement_strategy is None):
replacement_strategy = strategy.ANONYMISATION_HARDCODE_DISPATCH
try:
replacement_value = replacement_strategy[vr](current_value)
except KeyError:
logging.error('Unable to anonymise %s with VR %s, current value is %s', keyword, vr, current_value)
raise
return replacement_value |
class MonitoredMemcacheConnection():
def __init__(self, context_name: str, server_span: Span, pooled_client: PooledClient):
self.context_name = context_name
self.server_span = server_span
self.pooled_client = pooled_client
_prom_instrument
def close(self) -> None:
with self._make_span('close'):
return self.pooled_client.close()
_prom_instrument
def set(self, key: Key, value: Any, expire: int=0, noreply: Optional[bool]=None) -> bool:
with self._make_span('set') as span:
span.set_tag('key', key)
span.set_tag('expire', expire)
span.set_tag('noreply', noreply)
return self.pooled_client.set(key, value, expire=expire, noreply=noreply)
_prom_instrument
def set_many(self, values: Dict[(Key, Any)], expire: int=0, noreply: Optional[bool]=None) -> List[str]:
with self._make_span('set_many') as span:
span.set_tag('key_count', len(values))
span.set_tag('keys', make_keys_str(values.keys()))
span.set_tag('expire', expire)
span.set_tag('noreply', noreply)
return self.pooled_client.set_many(values, expire=expire, noreply=noreply)
_prom_instrument
def replace(self, key: Key, value: Any, expire: int=0, noreply: Optional[bool]=None) -> bool:
with self._make_span('replace') as span:
span.set_tag('key', key)
span.set_tag('expire', expire)
span.set_tag('noreply', noreply)
return self.pooled_client.replace(key, value, expire=expire, noreply=noreply)
_prom_instrument
def append(self, key: Key, value: Any, expire: int=0, noreply: Optional[bool]=None) -> bool:
with self._make_span('append') as span:
span.set_tag('key', key)
span.set_tag('expire', expire)
span.set_tag('noreply', noreply)
return self.pooled_client.append(key, value, expire=expire, noreply=noreply)
_prom_instrument
def prepend(self, key: Key, value: Any, expire: int=0, noreply: Optional[bool]=None) -> bool:
with self._make_span('prepend') as span:
span.set_tag('key', key)
span.set_tag('expire', expire)
span.set_tag('noreply', noreply)
return self.pooled_client.prepend(key, value, expire=expire, noreply=noreply)
_prom_instrument
def cas(self, key: Key, value: Any, cas: int, expire: int=0, noreply: Optional[bool]=None) -> Optional[bool]:
with self._make_span('cas') as span:
span.set_tag('key', key)
span.set_tag('cas', cas)
span.set_tag('expire', expire)
span.set_tag('noreply', noreply)
return self.pooled_client.cas(key, value, cas, expire=expire, noreply=noreply)
_prom_instrument
def get(self, key: Key, default: Any=None) -> Any:
with self._make_span('get') as span:
span.set_tag('key', key)
kwargs = {}
if (default is not None):
kwargs['default'] = default
return self.pooled_client.get(key, **kwargs)
_prom_instrument
def get_many(self, keys: Sequence[Key]) -> Dict[(Key, Any)]:
with self._make_span('get_many') as span:
span.set_tag('key_count', len(keys))
span.set_tag('keys', make_keys_str(keys))
return self.pooled_client.get_many(keys)
_prom_instrument
def gets(self, key: Key, default: Optional[Any]=None, cas_default: Optional[Any]=None) -> Tuple[(Any, Any)]:
with self._make_span('gets') as span:
span.set_tag('key', key)
return self.pooled_client.gets(key, default=default, cas_default=cas_default)
_prom_instrument
def gets_many(self, keys: Sequence[Key]) -> Dict[(Key, Tuple[(Any, Any)])]:
with self._make_span('gets_many') as span:
span.set_tag('key_count', len(keys))
span.set_tag('keys', make_keys_str(keys))
return self.pooled_client.gets_many(keys)
_prom_instrument
def delete(self, key: Key, noreply: Optional[bool]=None) -> bool:
with self._make_span('delete') as span:
span.set_tag('key', key)
span.set_tag('noreply', noreply)
return self.pooled_client.delete(key, noreply=noreply)
_prom_instrument
def delete_many(self, keys: Sequence[Key], noreply: Optional[bool]=None) -> bool:
with self._make_span('delete_many') as span:
span.set_tag('key_count', len(keys))
span.set_tag('noreply', noreply)
span.set_tag('keys', make_keys_str(keys))
return self.pooled_client.delete_many(keys, noreply=noreply)
_prom_instrument
def add(self, key: Key, value: Any, expire: int=0, noreply: Optional[bool]=None) -> bool:
with self._make_span('add') as span:
span.set_tag('key', key)
span.set_tag('expire', expire)
span.set_tag('noreply', noreply)
return self.pooled_client.add(key, value, expire=expire, noreply=noreply)
_prom_instrument
def incr(self, key: Key, value: int, noreply: Optional[bool]=False) -> Optional[int]:
with self._make_span('incr') as span:
span.set_tag('key', key)
span.set_tag('noreply', noreply)
return self.pooled_client.incr(key, value, noreply=noreply)
_prom_instrument
def decr(self, key: Key, value: int, noreply: Optional[bool]=False) -> Optional[int]:
with self._make_span('decr') as span:
span.set_tag('key', key)
span.set_tag('noreply', noreply)
return self.pooled_client.decr(key, value, noreply=noreply)
_prom_instrument
def touch(self, key: Key, expire: int=0, noreply: Optional[bool]=None) -> bool:
with self._make_span('touch') as span:
span.set_tag('key', key)
span.set_tag('expire', expire)
span.set_tag('noreply', noreply)
return self.pooled_client.touch(key, expire=expire, noreply=noreply)
_prom_instrument
def stats(self, *args: str) -> Dict[(str, Any)]:
with self._make_span('stats'):
return self.pooled_client.stats(*args)
_prom_instrument
def flush_all(self, delay: int=0, noreply: Optional[bool]=None) -> bool:
with self._make_span('flush_all') as span:
span.set_tag('delay', delay)
span.set_tag('noreply', noreply)
return self.pooled_client.flush_all(delay=delay, noreply=noreply)
_prom_instrument
def quit(self) -> None:
with self._make_span('quit'):
return self.pooled_client.quit()
def _make_span(self, method_name: str) -> Span:
trace_name = f'{self.context_name}.{method_name}'
span = self.server_span.make_child(trace_name)
span.set_tag('method', method_name)
return span |
def _is_all_proxies(collection):
if isinstance(collection, dict):
collection = list(collection.values())
if all((isinstance(elem, Proxy) for elem in collection)):
return True
if any((isinstance(elem, Proxy) for elem in collection)):
raise ValueError('Collection has mixed proxies and artifacts. This is not allowed.')
return False |
def test_order_marks(item_names_for):
tests_content = '\n import pytest\n\n .order(-1)\n def test_1(): pass\n\n .order(-2)\n def test_2(): pass\n\n .order(1)\n def test_3(): pass\n '
assert (item_names_for(tests_content) == ['test_3', 'test_2', 'test_1']) |
class _CloudStorage(BaseStorageV2):
def __init__(self, context, connection_class, connect_kwargs, upload_params, storage_path, bucket_name, access_key=None, secret_key=None):
super(_CloudStorage, self).__init__()
self.minimum_chunk_size = ((5 * 1024) * 1024)
self.maximum_chunk_size = None
self._initialized = False
self._bucket_name = bucket_name
self._access_key = access_key
self._secret_key = secret_key
self._root_path = storage_path
self._connection_class = connection_class
self._upload_params = upload_params
self._connect_kwargs = connect_kwargs
self._cloud_conn = None
self._cloud_bucket = None
self._context = context
self._list_object_version = _LIST_OBJECT_VERSIONS['v2']
self._session = self._connection_class(aws_access_key_id=self._access_key, aws_secret_access_key=self._secret_key)
def _initialize_cloud_conn(self):
if (not self._initialized):
self._cloud_conn = self._session.client('s3', **self._connect_kwargs)
self._cloud_bucket = self._session.resource('s3', **self._connect_kwargs).Bucket(self._bucket_name)
self._cloud_conn.head_bucket(Bucket=self._bucket_name)
self._initialized = True
def _debug_key(self, obj):
import types
valid_debug_methods = ['copy', 'copy_from', 'delete', 'download_file', 'download_fileobj', 'get', 'get_available_subresources', 'initiate_multipart_upload', 'load', 'put', 'reload', 'restore_object', 'upload_file', 'upload_fileobj']
def debug_method_decorator(f):
(f)
def wrapper(self, *args, **kwargs):
print(('#' * 16))
print(args)
print(kwargs)
print(('#' * 16))
return f(*args, **kwargs)
return wrapper
for method in valid_debug_methods:
new_meth = debug_method_decorator(getattr(obj, method))
obj.__setattr__(method, types.MethodType(new_meth, obj))
def _init_path(self, path=None):
path = (os.path.join(self._root_path, path) if path else self._root_path)
if (path and (path[0] == '/')):
return path[1:]
return path
def get_cloud_conn(self):
self._initialize_cloud_conn()
return self._cloud_conn
def get_cloud_bucket(self):
return self._cloud_bucket
def get_content(self, path):
self._initialize_cloud_conn()
path = self._init_path(path)
obj = self.get_cloud_bucket().Object(path)
try:
return obj.get()['Body'].read()
except botocore.exceptions.ClientError as s3r:
if (s3r.response['Error']['Code'] in _MISSING_KEY_ERROR_CODES):
raise IOError("No such key: '{0}'".format(path))
raise
def put_content(self, path, content):
self._initialize_cloud_conn()
path = self._init_path(path)
obj = self.get_cloud_bucket().Object(path)
obj.put(Body=content, **self._upload_params)
return path
def get_supports_resumable_downloads(self):
return True
def get_direct_download_url(self, path, request_ip=None, expires_in=60, requires_cors=False, head=False, **kwargs):
self._initialize_cloud_conn()
path = self._init_path(path)
client_method = 'get_object'
if head:
client_method = 'head_object'
return self.get_cloud_conn().generate_presigned_url(client_method, Params={'Bucket': self._bucket_name, 'Key': path}, ExpiresIn=expires_in)
def get_direct_upload_url(self, path, mime_type, requires_cors=True):
self._initialize_cloud_conn()
path = self._init_path(path)
return self.get_cloud_conn().generate_presigned_url('put_object', Params={'Bucket': self._bucket_name, 'Key': path, 'ContentType': mime_type}, ExpiresIn=300)
def stream_read(self, path):
self._initialize_cloud_conn()
path = self._init_path(path)
obj = self.get_cloud_bucket().Object(path)
try:
obj.load()
except botocore.exceptions.ClientError as s3r:
if (s3r.response['Error']['Code'] in _MISSING_KEY_ERROR_CODES):
raise IOError("No such key: '{0}'".format(path))
raise
buf = obj.get()['Body']
while True:
data = buf.read(self.buffer_size)
if (not data):
break
(yield data)
def stream_read_file(self, path):
self._initialize_cloud_conn()
path = self._init_path(path)
obj = self.get_cloud_bucket().Object(path)
try:
obj.load()
except botocore.exceptions.ClientError as s3r:
if (s3r.response['Error']['Code'] in _MISSING_KEY_ERROR_CODES):
raise IOError("No such key: '{0}'".format(path))
raise
return StreamReadKeyAsFile(obj.get()['Body'])
def __initiate_multipart_upload(self, path, content_type, content_encoding):
self._initialize_cloud_conn()
path = self._init_path(path)
obj = self.get_cloud_bucket().Object(path)
metadata = {}
if (content_type is not None):
metadata['ContentType'] = content_type
if (content_encoding is not None):
metadata['ContentEncoding'] = content_encoding
metadata = {**metadata, **self._upload_params}
multipart_uploads_started.inc()
return obj.initiate_multipart_upload(**metadata)
def stream_write(self, path, fp, content_type=None, content_encoding=None):
(_, write_error) = self._stream_write_internal(path, fp, content_type, content_encoding)
if (write_error is not None):
logger.error('Error when trying to stream_write path `%s`: %s', path, write_error)
raise IOError('Exception when trying to stream_write path')
def _stream_write_internal(self, path, fp, content_type=None, content_encoding=None, cancel_on_error=True, size=filelike.READ_UNTIL_END):
write_error = None
try:
mp = self.__initiate_multipart_upload(path, content_type, content_encoding)
except botocore.exceptions.ClientError as s3r:
logger.exception('Exception when initiating multipart upload')
return (0, s3r)
upload_parts = []
num_part = 1
total_bytes_written = 0
while ((size == filelike.READ_UNTIL_END) or (total_bytes_written < size)):
bytes_to_copy = self.minimum_chunk_size
if (size != filelike.READ_UNTIL_END):
bytes_to_copy = min(bytes_to_copy, (size - total_bytes_written))
with BytesIO() as buf:
try:
bytes_staged = self.stream_write_to_fp(fp, buf, bytes_to_copy)
if (bytes_staged == 0):
break
buf.seek(0)
part = mp.Part(num_part)
part_upload = part.upload(Body=buf, ContentLength=bytes_staged)
upload_parts.append(_PartUpload(num_part, part_upload['ETag']))
total_bytes_written += bytes_staged
num_part += 1
except (botocore.exceptions.ClientError, botocore.exceptions.ConnectionClosedError, IOError) as e:
logger.warn('Error when writing to stream in stream_write_internal at path %s: %s', path, e)
write_error = e
if cancel_on_error:
try:
mp.abort()
except (botocore.exceptions.ClientError, IOError):
logger.exception('Could not cancel upload')
return (0, write_error)
else:
break
if (total_bytes_written > 0):
multipart_uploads_completed.inc()
self._perform_action_with_retry(mp.complete, MultipartUpload={'Parts': [{'ETag': p.e_tag, 'PartNumber': p.part_number} for p in upload_parts]})
else:
mp.abort()
return (total_bytes_written, write_error)
def exists(self, path):
self._initialize_cloud_conn()
path = self._init_path(path)
obj = self.get_cloud_bucket().Object(path)
try:
obj.load()
except botocore.exceptions.ClientError as s3r:
if (s3r.response['Error']['Code'] in _MISSING_KEY_ERROR_CODES):
return False
raise
return True
def remove(self, path):
self._initialize_cloud_conn()
path = self._init_path(path)
obj = self.get_cloud_bucket().Object(path)
try:
obj.load()
obj.delete()
return
except botocore.exceptions.ClientError as s3r:
if (not (s3r.response['Error']['Code'] in _MISSING_KEY_ERROR_CODES)):
raise
if (not path.endswith('/')):
path += '/'
paginator = self.get_cloud_conn().get_paginator(self._list_object_version)
for page in paginator.paginate(Bucket=self._bucket_name, Prefix=path):
for content in page.get('Contents', ()):
obj = self.get_cloud_bucket().Object(content['Key'])
obj.delete()
def get_checksum(self, path):
self._initialize_cloud_conn()
path = self._init_path(path)
obj = self.get_cloud_bucket().Object(path)
try:
obj.load()
except botocore.exceptions.ClientError as s3r:
if (s3r.response['Error']['Code'] in _MISSING_KEY_ERROR_CODES):
raise IOError("No such key: '{0}'".format(path))
raise
return obj.e_tag[1:(- 1)][:7]
def copy_to(self, destination, path):
self._initialize_cloud_conn()
if ((self.__class__ == destination.__class__) and self._access_key and self._secret_key and (self._access_key == destination._access_key) and (self._secret_key == destination._secret_key) and (self._connect_kwargs == destination._connect_kwargs)):
destination._initialize_cloud_conn()
if (self._cloud_bucket is None):
logger.error('Cloud bucket not found for location %s; Configuration is probably invalid!', self._bucket_name)
return
if (destination._cloud_bucket is None):
logger.error('Cloud bucket not found for location %s; Configuration is probably invalid!', destination._bucket_name)
return
logger.debug('Copying file from %s to %s via a direct boto copy', self._cloud_bucket, destination._cloud_bucket)
source_path = self._init_path(path)
source_obj = self.get_cloud_bucket().Object(source_path)
dest_path = destination._init_path(path)
dest_obj = destination.get_cloud_bucket().Object(dest_path)
dest_obj.copy_from(CopySource={'Bucket': source_obj.bucket_name, 'Key': source_obj.key})
return
logger.debug('Copying file from %s to %s via a streamed copy', self._cloud_bucket, destination)
with self.stream_read_file(path) as fp:
destination.stream_write(path, fp)
def _rel_upload_path(self, uuid):
return 'uploads/{0}'.format(uuid)
def initiate_chunked_upload(self):
self._initialize_cloud_conn()
random_uuid = str(uuid4())
metadata = {_CHUNKS_KEY: []}
return (random_uuid, metadata)
def stream_upload_chunk(self, uuid, offset, length, in_fp, storage_metadata, content_type=None):
self._initialize_cloud_conn()
chunk_path = self._rel_upload_path(str(uuid4()))
(bytes_written, write_error) = self._stream_write_internal(chunk_path, in_fp, cancel_on_error=False, size=length, content_type=content_type)
new_metadata = copy.deepcopy(storage_metadata)
if (bytes_written > 0):
new_metadata[_CHUNKS_KEY].append(_PartUploadMetadata(chunk_path, offset, bytes_written))
return (bytes_written, new_metadata, write_error)
def _chunk_generator(self, chunk_list):
for chunk in chunk_list:
(yield filelike.StreamSlice(self.stream_read_file(chunk.path), 0, chunk.length))
def _chunk_list_from_metadata(storage_metadata):
return [_PartUploadMetadata(*chunk_args) for chunk_args in storage_metadata[_CHUNKS_KEY]]
def _client_side_chunk_join(self, final_path, chunk_list):
if (len(chunk_list) == 1):
chunk_path = self._init_path(chunk_list[0].path)
abs_final_path = self._init_path(final_path)
new_obj = self.get_cloud_bucket().Object(abs_final_path)
new_obj.copy_from(CopySource={'Bucket': self._bucket_name, 'Key': chunk_path})
try:
self.get_cloud_bucket().Object(chunk_path).delete()
except (botocore.exceptions.ClientError, IOError):
msg = 'Failed to clean up chunk %s for move of %s'
logger.exception(msg, chunk_path, abs_final_path)
else:
concatenated = filelike.FilelikeStreamConcat(self._chunk_generator(chunk_list))
self.stream_write(final_path, concatenated)
for chunk in chunk_list:
try:
self.get_cloud_bucket().Object(chunk.path).delete()
except (botocore.exceptions.ClientError, IOError):
msg = 'Failed to clean up chunk %s for reupload of %s'
logger.exception(msg, chunk.path, final_path)
def _perform_action_with_retry(action, *args, **kwargs):
for remaining_retries in range(2, (- 1), (- 1)):
try:
return action(*args, **kwargs)
break
except botocore.exceptions.ClientError as s3re:
if (remaining_retries and (s3re.response['Error'].get('HTTPStatusCode', 0) == 200) and (s3re.response['Error'].get('Code', '') == 'InternalError')):
continue
logger.exception('Exception trying to perform action %s', action)
raise s3re
def _rechunk(chunk, max_chunk_size):
if ((max_chunk_size is None) or (chunk.length <= max_chunk_size)):
(yield chunk)
else:
newchunk_length = (chunk.length // 2)
first_subchunk = _PartUploadMetadata(chunk.path, chunk.offset, newchunk_length)
second_subchunk = _PartUploadMetadata(chunk.path, (chunk.offset + newchunk_length), (chunk.length - newchunk_length))
for subchunk in chain(_CloudStorage._rechunk(first_subchunk, max_chunk_size), _CloudStorage._rechunk(second_subchunk, max_chunk_size)):
(yield subchunk)
def complete_chunked_upload(self, uuid, final_path, storage_metadata, force_client_side=False):
self._initialize_cloud_conn()
chunk_list = self._chunk_list_from_metadata(storage_metadata)
if (len(chunk_list) == 0):
return
server_side_assembly = False
if (not force_client_side):
server_side_assembly = True
for (chunk_offset, chunk) in enumerate(chunk_list):
if ((chunk.length < self.minimum_chunk_size) and ((chunk_offset + 1) < len(chunk_list))):
server_side_assembly = False
break
if server_side_assembly:
logger.debug('Performing server side assembly of multi-part upload for: %s', final_path)
try:
mpu = self.__initiate_multipart_upload(final_path, content_type=None, content_encoding=None)
updated_chunks = chain.from_iterable([_CloudStorage._rechunk(c, self.maximum_chunk_size) for c in chunk_list])
upload_parts = []
for (index, chunk) in enumerate(updated_chunks):
abs_chunk_path = self._init_path(chunk.path)
part_copy = self._perform_action_with_retry(mpu.Part((index + 1)).copy_from, CopySource={'Bucket': self.get_cloud_bucket().name, 'Key': abs_chunk_path}, CopySourceRange=('bytes=%s-%s' % (chunk.offset, ((chunk.length + chunk.offset) - 1))))
upload_parts.append(_PartUpload((index + 1), part_copy['CopyPartResult']['ETag']))
self._perform_action_with_retry(mpu.complete, MultipartUpload={'Parts': [{'ETag': p.e_tag, 'PartNumber': p.part_number} for p in upload_parts]})
except (botocore.exceptions.ClientError, IOError) as ioe:
msg = 'Exception when attempting server-side assembly for: %s'
logger.exception(msg, final_path)
mpu.abort()
raise ioe
else:
self._client_side_chunk_join(final_path, chunk_list)
def cancel_chunked_upload(self, uuid, storage_metadata):
self._initialize_cloud_conn()
for chunk in self._chunk_list_from_metadata(storage_metadata):
self.remove(chunk.path)
def clean_partial_uploads(self, deletion_date_threshold):
self._initialize_cloud_conn()
path = self._init_path('uploads')
paginator = self.get_cloud_conn().get_paginator(self._list_object_version)
for page in paginator.paginate(Bucket=self._bucket_name, Prefix=path):
for obj_info in page.get('Contents', []):
if (obj_info['LastModified'] <= (datetime.now(timezone.utc) - deletion_date_threshold)):
obj = self.get_cloud_bucket().Object(obj_info['Key'])
try:
obj.load()
obj.delete()
logger.debug('Expired blob removed from uploads folder: %s', obj_info['Key'])
except botocore.exceptions.ClientError as s3r:
if (not (s3r.response['Error']['Code'] in _MISSING_KEY_ERROR_CODES)):
logger.exception('Got error when attempting to clean blob with key in uploads folder: %s', obj_info['Key'], str(s3r))
else:
logger.debug('Blob not found in uploads folder with key %s', obj_info['Key']) |
def check_singleton(cand_mol, ctr_node, nei_nodes):
rings = [node for node in (nei_nodes + [ctr_node]) if (node.mol.GetNumAtoms() > 2)]
singletons = [node for node in (nei_nodes + [ctr_node]) if (node.mol.GetNumAtoms() == 1)]
if ((len(singletons) > 0) or (len(rings) == 0)):
return True
n_leaf2_atoms = 0
for atom in cand_mol.GetAtoms():
nei_leaf_atoms = [a for a in atom.GetNeighbors() if (not a.IsInRing())]
if (len(nei_leaf_atoms) > 1):
n_leaf2_atoms += 1
return (n_leaf2_atoms == 0) |
class SearchVisitor(ExtendedTraverserVisitor):
def __init__(self, line: int, column: int, end_line: int, end_column: int) -> None:
self.line = line
self.column = column
self.end_line = end_line
self.end_column = end_column
self.result: (Expression | None) = None
def visit(self, o: Node) -> bool:
if node_starts_after(o, self.line, self.column):
return False
if node_ends_before(o, self.end_line, self.end_column):
return False
if ((o.line == self.line) and (o.end_line == self.end_line) and (o.column == self.column) and (o.end_column == self.end_column)):
if isinstance(o, Expression):
self.result = o
return (self.result is None) |
class Inform7Lexer(RegexLexer):
name = 'Inform 7'
url = '
aliases = ['inform7', 'i7']
filenames = ['*.ni', '*.i7x']
version_added = '2.0'
flags = (re.MULTILINE | re.DOTALL)
_dash = Inform6Lexer._dash
_dquote = Inform6Lexer._dquote
_newline = Inform6Lexer._newline
_start = ('\\A|(?<=[%s])' % _newline)
tokens = {}
token_variants = ['+i6t-not-inline', '+i6t-inline', '+i6t-use-option']
for level in token_variants:
tokens[level] = {'+i6-root': list(Inform6Lexer.tokens['root']), '+i6t-root': [(('[^%s]*' % Inform6Lexer._newline), Comment.Preproc, ('directive', '+p'))], 'root': [('(\\|?\\s)+', Text), ('\\[', Comment.Multiline, '+comment'), (('[%s]' % _dquote), Generic.Heading, ('+main', '+titling', '+titling-string')), default(('+main', '+heading?'))], '+titling-string': [(('[^%s]+' % _dquote), Generic.Heading), (('[%s]' % _dquote), Generic.Heading, '#pop')], '+titling': [('\\[', Comment.Multiline, '+comment'), (('[^%s.;:|%s]+' % (_dquote, _newline)), Generic.Heading), (('[%s]' % _dquote), Generic.Heading, '+titling-string'), (('[%s]{2}|(?<=[\\s%s])\\|[\\s%s]' % (_newline, _dquote, _dquote)), Text, ('#pop', '+heading?')), (('[.;:]|(?<=[\\s%s])\\|' % _dquote), Text, '#pop'), (('[|%s]' % _newline), Generic.Heading)], '+main': [(('(?i)[^%s:a\\[(|%s]+' % (_dquote, _newline)), Text), (('[%s]' % _dquote), String.Double, '+text'), (':', Text, '+phrase-definition'), ('(?i)\\bas\\b', Text, '+use-option'), ('\\[', Comment.Multiline, '+comment'), (('(\\([%s])(.*?)([%s]\\))' % (_dash, _dash)), bygroups(Punctuation, using(this, state=('+i6-root', 'directive'), i6t='+i6t-not-inline'), Punctuation)), (('(%s|(?<=[\\s;:.%s]))\\|\\s|[%s]{2,}' % (_start, _dquote, _newline)), Text, '+heading?'), (('(?i)[a(|%s]' % _newline), Text)], '+phrase-definition': [('\\s+', Text), ('\\[', Comment.Multiline, '+comment'), (('(\\([%s])(.*?)([%s]\\))' % (_dash, _dash)), bygroups(Punctuation, using(this, state=('+i6-root', 'directive', 'default', 'statements'), i6t='+i6t-inline'), Punctuation), '#pop'), default('#pop')], '+use-option': [('\\s+', Text), ('\\[', Comment.Multiline, '+comment'), (('(\\([%s])(.*?)([%s]\\))' % (_dash, _dash)), bygroups(Punctuation, using(this, state=('+i6-root', 'directive'), i6t='+i6t-use-option'), Punctuation), '#pop'), default('#pop')], '+comment': [('[^\\[\\]]+', Comment.Multiline), ('\\[', Comment.Multiline, '#push'), ('\\]', Comment.Multiline, '#pop')], '+text': [(('[^\\[%s]+' % _dquote), String.Double), ('\\[.*?\\]', String.Interpol), (('[%s]' % _dquote), String.Double, '#pop')], '+heading?': [('(\\|?\\s)+', Text), ('\\[', Comment.Multiline, '+comment'), (('[%s]{4}\\s+' % _dash), Text, '+documentation-heading'), (('[%s]{1,3}' % _dash), Text), (('(?i)(volume|book|part|chapter|section)\\b[^%s]*' % _newline), Generic.Heading, '#pop'), default('#pop')], '+documentation-heading': [('\\s+', Text), ('\\[', Comment.Multiline, '+comment'), ('(?i)documentation\\s+', Text, '+documentation-heading2'), default('#pop')], '+documentation-heading2': [('\\s+', Text), ('\\[', Comment.Multiline, '+comment'), (('[%s]{4}\\s' % _dash), Text, '+documentation'), default('#pop:2')], '+documentation': [(('(?i)(%s)\\s*(chapter|example)\\s*:[^%s]*' % (_start, _newline)), Generic.Heading), (('(?i)(%s)\\s*section\\s*:[^%s]*' % (_start, _newline)), Generic.Subheading), (('((%s)\\t.*?[%s])+' % (_start, _newline)), using(this, state='+main')), (('[^%s\\[]+|[%s\\[]' % (_newline, _newline)), Text), ('\\[', Comment.Multiline, '+comment')], '+i6t-not-inline': [(('(%s)( .*?)?([%s]|\\Z)' % (_start, _newline)), Comment.Preproc), (('(%s)([%s]+|Purpose:)[^%s]*' % (_start, _dash, _newline)), Comment.Preproc), (('(%s)( .*?)?([%s]|\\Z)' % (_start, _newline)), Generic.Heading, '+p')], '+i6t-use-option': [include('+i6t-not-inline'), ('(\\{)(N)(\\})', bygroups(Punctuation, Text, Punctuation))], '+i6t-inline': [('(\\{)(\\S[^}]*)?(\\})', bygroups(Punctuation, using(this, state='+main'), Punctuation))], '+i6t': [(('(\\{[%s])(![^}]*)(\\}?)' % _dash), bygroups(Punctuation, Comment.Single, Punctuation)), (('(\\{[%s])(lines)(:)([^}]*)(\\}?)' % _dash), bygroups(Punctuation, Keyword, Punctuation, Text, Punctuation), '+lines'), (('(\\{[%s])([^:}]*)(:?)([^}]*)(\\}?)' % _dash), bygroups(Punctuation, Keyword, Punctuation, Text, Punctuation)), ('(\\(\\+)(.*?)(\\+\\)|\\Z)', bygroups(Punctuation, using(this, state='+main'), Punctuation))], '+p': [('[^]+', Comment.Preproc), (('(%s)( .*?)?([%s]|\\Z)' % (_start, _newline)), Comment.Preproc, '#pop'), (('(%s)([%s]|Purpose:)' % (_start, _dash)), Comment.Preproc), (('(%s)( .*?)?([%s]|\\Z)' % (_start, _newline)), Generic.Heading), ('', Comment.Preproc)], '+lines': [(('(%s)( .*?)?([%s]|\\Z)' % (_start, _newline)), Comment.Preproc), (('(%s)([%s]|Purpose:)[^%s]*' % (_start, _dash, _newline)), Comment.Preproc), (('(%s)( .*?)?([%s]|\\Z)' % (_start, _newline)), Generic.Heading, '+p'), (('(%s)\\w*[ %s]' % (_start, _newline)), Keyword), (('![^%s]*' % _newline), Comment.Single), (('(\\{)([%s]endlines)(\\})' % _dash), bygroups(Punctuation, Keyword, Punctuation), '#pop'), (('[^!{]+?([%s]|\\Z)|.' % _newline), Text)]}
for token in Inform6Lexer.tokens:
if (token == 'root'):
continue
tokens[level][token] = list(Inform6Lexer.tokens[token])
if (not token.startswith('_')):
tokens[level][token][:0] = [include('+i6t'), include(level)]
def __init__(self, **options):
level = options.get('i6t', '+i6t-not-inline')
if (level not in self._all_tokens):
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options) |
class FusedLeakyReLUFunction(Function):
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
ctx.bias = (bias is not None)
if (bias is None):
bias = empty
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
def backward(ctx, grad_output):
(out,) = ctx.saved_tensors
(grad_input, grad_bias) = FusedLeakyReLUFunctionBackward.apply(grad_output, out, ctx.bias, ctx.negative_slope, ctx.scale)
if (not ctx.bias):
grad_bias = None
return (grad_input, grad_bias, None, None) |
def gen_standalone(lark_inst, output=None, out=sys.stdout, compress=False):
if (output is None):
output = partial(print, file=out)
import pickle, zlib, base64
def compressed_output(obj):
s = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
c = zlib.compress(s)
output(repr(base64.b64encode(c)))
def output_decompress(name):
output(('%(name)s = pickle.loads(zlib.decompress(base64.b64decode(%(name)s)))' % locals()))
output(('# The file was automatically generated by Lark v%s' % lark.__version__))
output(('__version__ = "%s"' % lark.__version__))
output()
for (i, pyfile) in enumerate(EXTRACT_STANDALONE_FILES):
with open(os.path.join(_larkdir, pyfile)) as f:
code = extract_sections(f)['standalone']
if i:
code = strip_docstrings(partial(next, iter(code.splitlines(True))))
output(code)
(data, m) = lark_inst.memo_serialize([TerminalDef, Rule])
output('import pickle, zlib, base64')
if compress:
output('DATA = (')
compressed_output(data)
output(')')
output_decompress('DATA')
output('MEMO = (')
compressed_output(m)
output(')')
output_decompress('MEMO')
else:
output('DATA = (')
output(data)
output(')')
output('MEMO = (')
output(m)
output(')')
output('Shift = 0')
output('Reduce = 1')
output('def Lark_StandAlone(**kwargs):')
output(' return Lark._load_from_dict(DATA, MEMO, **kwargs)') |
class RaftInfo(BaseModel, extra='forbid'):
term: int = Field(..., description='Raft divides time into terms of arbitrary length, each beginning with an election. If a candidate wins the election, it remains the leader for the rest of the term. The term number increases monotonically. Each server stores the current term number which is also exchanged in every communication.')
commit: int = Field(..., description='The index of the latest committed (finalized) operation that this peer is aware of.')
pending_operations: int = Field(..., description='Number of consensus operations pending to be applied on this peer')
leader: Optional[int] = Field(default=None, description='Leader of the current term')
role: Optional['StateRole'] = Field(default=None, description='Role of this peer in the current term')
is_voter: bool = Field(..., description='Is this peer a voter or a learner') |
class SlotSelect(discord.ui.Select):
view: ScrimsView
def __init__(self, slots: T.List[ReservedSlot]):
_options = []
for _ in slots:
_options.append(discord.SelectOption(label=f'Slot {_.num}', description=f"Team: {_.team_name} ({(_.leader or 'No leader')})", value=_.id.__str__(), emoji=emote.TextChannel))
super().__init__(max_values=len(slots), placeholder='Select the slot(s) you want to remove from reserved', options=_options)
async def callback(self, interaction: discord.Interaction):
(await interaction.response.defer())
self.view.custom_id = self.values
self.view.stop() |
def create_continuous_contract(df, resolution='1T'):
def _merge_contracts(m1, m2):
if (m1 is None):
return m2
try:
roll_date = m1['expiry'].unique()[(- 1)]
except Exception as e:
combined = m1.merge(m2, left_index=True, right_index=True)
m_highest = (combined['volume_y'] > combined['volume_x'])
if (len(m_highest.index) == 0):
return m1
roll_date = m_highest[m_highest].index[(- 1)]
return pd.concat([m1[(m1.index <= roll_date)], m2[(m2.index > roll_date)]], sort=True)
def _continuous_contract_flags(daily_df):
expirations = list(daily_df['expiry'].dropna().unique())
expirations.sort()
flags = None
for expiration in expirations:
new_contract = daily_df[(daily_df['expiry'] == expiration)].copy()
flags = _merge_contracts(flags, new_contract)
flags['gap'] = 0
for expiration in expirations:
try:
minidf = daily_df[(daily_df.index == expiration)][['symbol', 'expiry', 'diff']]
expiry = flags[((flags.index > expiration) & (flags['expiry'] >= expiration))]['expiry'][0]
gap = minidf[(minidf['expiry'] == expiry)]['diff'][0]
flags.loc[((flags.index <= expiration), 'gap')] = gap
except Exception as e:
pass
flags = flags[flags['symbol'].isin(flags['symbol'].unique())]
if (len(flags.index) <= 1):
flags = pd.DataFrame(index=pd.date_range(start=flags[0:1].index[0], periods=24, freq='1H'), data=flags[['symbol', 'expiry', 'gap']]).ffill()
flags['expiry'] = pd.to_datetime(flags['expiry'], utc=True)
return flags[['symbol', 'expiry', 'gap']]
df = df.copy()
df['dt'] = df.index
daily_df = df.groupby('symbol').resample('D').last().dropna(how='all')
daily_df.index = daily_df.index.droplevel()
daily_df.sort_index(inplace=True)
try:
daily_df['diff'] = daily_df['close'].diff()
except Exception as e:
daily_df['diff'] = daily_df['last'].diff()
flags = _continuous_contract_flags(daily_df)
if (('K' in resolution) or ('V' in resolution) or ('S' in resolution)):
flags = flags.resample('S').last().ffill().reindex(df.index.unique()).ffill()
else:
flags = flags.resample('T').last().ffill().reindex(df.index.unique()).ffill()
flags['dt'] = flags.index
contract = pd.merge(df, flags, how='left', on=['dt', 'symbol']).ffill()
contract.set_index('dt', inplace=True)
contract = contract[(contract.expiry_y == contract.expiry_x)]
contract['expiry'] = contract['expiry_y']
contract.drop(['expiry_y', 'expiry_x'], axis=1, inplace=True)
try:
contract['open'] = (contract['open'] + contract['gap'])
contract['high'] = (contract['high'] + contract['gap'])
contract['low'] = (contract['low'] + contract['gap'])
contract['close'] = (contract['close'] + contract['gap'])
except Exception as e:
contract['last'] = (contract['last'] + contract['gap'])
contract.drop(['gap'], axis=1, inplace=True)
return contract |
class TestOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--ntest', type=int, default=float('inf'), help='# of test examples.')
self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
self.parser.add_argument('--phase_test_type', type=str, default='test_single', help='train, val, test, etc')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--how_many', type=int, default=50, help='how many test images to run')
self.parser.add_argument('--cluster_path', type=str, default='features_clustered_010.npy', help='the path for clustered results of encoded features')
self.parser.add_argument('--use_encoded_image', action='store_true', help='if specified, encode the real image to get the feature map')
self.parser.add_argument('--export_onnx', type=str, help='export ONNX model to a given file')
self.parser.add_argument('--engine', type=str, help='run serialized TRT engine')
self.parser.add_argument('--onnx', type=str, help='run ONNX model via TRT')
self.isTrain = False |
class BeitFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
model_input_names = ['pixel_values']
def __init__(self, do_resize=True, size=256, resample=Image.BICUBIC, do_center_crop=True, crop_size=224, do_normalize=True, image_mean=None, image_std=None, reduce_labels=False, **kwargs):
super().__init__(**kwargs)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = (image_mean if (image_mean is not None) else IMAGENET_STANDARD_MEAN)
self.image_std = (image_std if (image_std is not None) else IMAGENET_STANDARD_STD)
self.reduce_labels = reduce_labels
def __call__(self, images: ImageInput, segmentation_maps: ImageInput=None, return_tensors: Optional[Union[(str, TensorType)]]=None, **kwargs) -> BatchFeature:
valid_images = False
valid_segmentation_maps = False
if (isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images)):
valid_images = True
elif isinstance(images, (list, tuple)):
if ((len(images) == 0) or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0])):
valid_images = True
if (not valid_images):
raise ValueError('Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), `List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples).')
if (segmentation_maps is not None):
if (isinstance(segmentation_maps, (Image.Image, np.ndarray)) or is_torch_tensor(segmentation_maps)):
valid_segmentation_maps = True
elif isinstance(segmentation_maps, (list, tuple)):
if ((len(segmentation_maps) == 0) or isinstance(segmentation_maps[0], (Image.Image, np.ndarray)) or is_torch_tensor(segmentation_maps[0])):
valid_segmentation_maps = True
if (not valid_segmentation_maps):
raise ValueError('Segmentation maps must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example),`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples).')
is_batched = bool((isinstance(images, (list, tuple)) and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]))))
if (not is_batched):
images = [images]
if (segmentation_maps is not None):
segmentation_maps = [segmentation_maps]
if self.reduce_labels:
if (segmentation_maps is not None):
for (idx, map) in enumerate(segmentation_maps):
if (not isinstance(map, np.ndarray)):
map = np.array(map)
map[(map == 0)] = 255
map = (map - 1)
map[(map == 254)] = 255
segmentation_maps[idx] = Image.fromarray(map.astype(np.uint8))
if (self.do_resize and (self.size is not None) and (self.resample is not None)):
images = [self.resize(image=image, size=self.size, resample=self.resample) for image in images]
if (segmentation_maps is not None):
segmentation_maps = [self.resize(map, size=self.size, resample=self.resample) for map in segmentation_maps]
if (self.do_center_crop and (self.crop_size is not None)):
images = [self.center_crop(image, self.crop_size) for image in images]
if (segmentation_maps is not None):
segmentation_maps = [self.center_crop(map, size=self.crop_size) for map in segmentation_maps]
if self.do_normalize:
images = [self.normalize(image=image, mean=self.image_mean, std=self.image_std) for image in images]
data = {'pixel_values': images}
if (segmentation_maps is not None):
labels = []
for map in segmentation_maps:
if (not isinstance(map, np.ndarray)):
map = np.array(map)
labels.append(map.astype(np.int64))
data['labels'] = labels
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
return encoded_inputs |
def handle_withdraw(token_network_state: TokenNetworkState, state_change: ContractReceiveChannelWithdraw, block_number: BlockNumber, block_hash: BlockHash, pseudo_random_generator: random.Random) -> TransitionResult:
return subdispatch_to_channel_by_id(token_network_state=token_network_state, state_change=state_change, block_number=block_number, block_hash=block_hash, pseudo_random_generator=pseudo_random_generator) |
def recover_closest_standard(feature_matrix_all, image_paths, save_path, n_image_samples=10, n_closest=3):
image_paths = np.array([x[0] for x in image_paths])
sample_idxs = np.random.choice(np.arange(len(feature_matrix_all)), n_image_samples)
faiss_search_index = faiss.IndexFlatL2(feature_matrix_all.shape[(- 1)])
faiss_search_index.add(feature_matrix_all)
(_, closest_feature_idxs) = faiss_search_index.search(feature_matrix_all, (n_closest + 1))
sample_paths = image_paths[closest_feature_idxs][sample_idxs]
(f, axes) = plt.subplots(n_image_samples, (n_closest + 1))
for (i, (ax, plot_path)) in enumerate(zip(axes.reshape((- 1)), sample_paths.reshape((- 1)))):
ax.imshow(np.array(Image.open(plot_path)))
ax.set_xticks([])
ax.set_yticks([])
if (i % (n_closest + 1)):
ax.axvline(x=0, color='g', linewidth=13)
else:
ax.axvline(x=0, color='r', linewidth=13)
f.set_size_inches(10, 20)
f.tight_layout()
f.savefig(save_path)
plt.close() |
class Transaction(object):
_types(asset=Asset)
def __init__(self, asset, amount, dt, price, order_id):
self.asset = asset
self.amount = amount
self.dt = dt
self.price = price
self.order_id = order_id
self.type = DATASOURCE_TYPE.TRANSACTION
def __getitem__(self, name):
return self.__dict__[name]
def __repr__(self):
template = '{cls}(asset={asset}, dt={dt}, amount={amount}, price={price})'
return template.format(cls=type(self).__name__, asset=self.asset, dt=self.dt, amount=self.amount, price=self.price)
def to_dict(self):
py = copy(self.__dict__)
del py['type']
del py['asset']
py['sid'] = self.asset
py['commission'] = None
return py |
def run_eval(load_model, load_sess, filename, sample_num_file, hparams, flag):
with open(sample_num_file, 'r') as f:
sample_num = int(f.readlines()[0].strip())
load_sess.run(load_model.iterator.initializer, feed_dict={load_model.filenames: [filename]})
preds = []
labels = []
while True:
try:
(_, _, step_pred, step_labels) = load_model.model.eval(load_sess)
preds.extend(np.reshape(step_pred, (- 1)))
labels.extend(np.reshape(step_labels, (- 1)))
except tf.errors.OutOfRangeError:
break
preds = preds[:sample_num]
labels = labels[:sample_num]
hparams.logger.info('data num:{0:d}'.format(len(labels)))
res = metric.cal_metric(labels, preds, hparams, flag)
return res |
_fixtures(WebFixture, DataTableFixture)
def test_layout_for_contained_table(web_fixture, data_table_fixture):
layout = TableLayout(heading_theme='light')
data_table = DataTable(web_fixture.view, data_table_fixture.columns, data_table_fixture.data, 'my_css_id', table_layout=layout)
assert (data_table.table.layout is layout) |
def main():
try:
examples = DPMSExamples()
print('Initial state')
examples.print_dpms()
print('Setting random timeouts')
examples.set_random_timeouts()
examples.print_dpms()
print('The next example will turn-off your screen, press Ctrl-C to cancel.')
time.sleep(2)
examples.turn_off_display()
print('Turning it on again...')
time.sleep(2)
examples.turn_on_display()
print()
print('Toggle DPMS')
examples.toggle_dpms()
examples.print_dpms()
print('Toggle it again')
examples.toggle_dpms()
examples.print_dpms()
finally:
examples.restore() |
def save_model(model, dirpath):
if os.path.exists(dirpath):
if (os.path.exists(os.path.join(dirpath, 'config.json')) and os.path.isfile(os.path.join(dirpath, 'config.json'))):
os.remove(os.path.join(dirpath, 'config.json'))
if (os.path.exists(os.path.join(dirpath, 'pytorch_model.bin')) and os.path.isfile(os.path.join(dirpath, 'pytorch_model.bin'))):
os.remove(os.path.join(dirpath, 'pytorch_model.bin'))
else:
os.makedirs(dirpath)
model.save_pretrained(dirpath) |
def _add_runpip(subparsers, venv_completer: VenvCompleter, shared_parser: argparse.ArgumentParser) -> None:
p = subparsers.add_parser('runpip', help='Run pip in an existing pipx-managed Virtual Environment', description='Run pip in an existing pipx-managed Virtual Environment', parents=[shared_parser])
p.add_argument('package', help='Name of the existing pipx-managed Virtual Environment to run pip in').completer = venv_completer
p.add_argument('pipargs', nargs=argparse.REMAINDER, default=[], help='Arguments to forward to pip command') |
class MultiplayerMembership(BaseModel):
user: User = peewee.ForeignKeyField(User, backref='sessions')
user_id: int
session: MultiplayerSession = peewee.ForeignKeyField(MultiplayerSession, backref='members')
session_id: int
admin: bool = peewee.BooleanField(default=False)
ready: bool = peewee.BooleanField(default=False)
join_date = peewee.DateTimeField(default=_datetime_now)
can_help_layout_generation: bool = peewee.BooleanField(default=False)
def effective_name(self) -> str:
return self.user.name
def get_by_ids(cls, user_id: (int | User), session_id: (int | MultiplayerSession)) -> Self:
return cls.get((MultiplayerMembership.session == session_id), (MultiplayerMembership.user == user_id))
class Meta():
primary_key = peewee.CompositeKey('user', 'session') |
class EfficientNetBackbone(object):
def __init__(self, cfgs):
self.cfgs = cfgs
self.MEAN_RGB = [(0.485 * 255), (0.456 * 255), (0.406 * 255)]
self.STDDEV_RGB = [(0.229 * 255), (0.224 * 255), (0.225 * 255)]
self._DEFAULT_BLOCKS_ARGS = ['r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25', 'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25', 'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25', 'r1_k3_s11_e6_i192_o320_se0.25']
def efficientnet_params(self, model_name):
params_dict = {'efficientnet-b0': (1.0, 1.0, 224, 0.2), 'efficientnet-b1': (1.0, 1.1, 240, 0.2), 'efficientnet-b2': (1.1, 1.2, 260, 0.3), 'efficientnet-b3': (1.2, 1.4, 300, 0.3), 'efficientnet-b4': (1.4, 1.8, 380, 0.4), 'efficientnet-b5': (1.6, 2.2, 456, 0.4), 'efficientnet-b6': (1.8, 2.6, 528, 0.5), 'efficientnet-b7': (2.0, 3.1, 600, 0.5), 'efficientnet-b8': (2.2, 3.6, 672, 0.5), 'efficientnet-l2': (4.3, 5.3, 800, 0.5)}
return params_dict[model_name]
def swish(self, features, use_native=True, use_hard=False):
if (use_native and use_hard):
raise ValueError('Cannot specify both use_native and use_hard.')
if use_native:
return tf.nn.swish(features)
if use_hard:
return ((features * tf.nn.relu6((features + np.float32(3)))) * (1.0 / 6.0))
features = tf.convert_to_tensor(features, name='features')
return (features * tf.nn.sigmoid(features))
def efficientnet(self, width_coefficient=None, depth_coefficient=None, dropout_rate=0.2, survival_prob=0.8):
global_params = efficientnet_model.GlobalParams(blocks_args=self._DEFAULT_BLOCKS_ARGS, batch_norm_momentum=0.99, batch_norm_epsilon=0.001, dropout_rate=dropout_rate, survival_prob=survival_prob, data_format='channels_last', num_classes=1000, width_coefficient=width_coefficient, depth_coefficient=depth_coefficient, depth_divisor=8, min_depth=None, relu_fn=tf.nn.swish, batch_norm=utils.BatchNormalization, use_se=True, clip_projection_output=False)
return global_params
def get_model_params(self, model_name, override_params):
if model_name.startswith('efficientnet'):
(width_coefficient, depth_coefficient, _, dropout_rate) = self.efficientnet_params(model_name)
global_params = self.efficientnet(width_coefficient, depth_coefficient, dropout_rate)
else:
raise NotImplementedError(('model name is not pre-defined: %s' % model_name))
if override_params:
global_params = global_params._replace(**override_params)
decoder = BlockDecoder()
blocks_args = decoder.decode(global_params.blocks_args)
logging.info('global_params= %s', global_params)
return (blocks_args, global_params)
def build_model(self, images, model_name, training, override_params=None, model_dir=None, fine_tuning=False, features_only=False, pooled_features_only=False):
assert isinstance(images, tf.Tensor)
assert (not (features_only and pooled_features_only))
if (override_params and override_params.get('drop_connect_rate', None)):
override_params['survival_prob'] = (1 - override_params['drop_connect_rate'])
if ((not training) or fine_tuning):
if (not override_params):
override_params = {}
override_params['batch_norm'] = utils.BatchNormalization
if fine_tuning:
override_params['relu_fn'] = functools.partial(self.swish, use_native=False)
(blocks_args, global_params) = self.get_model_params(model_name, override_params)
if model_dir:
param_file = os.path.join(model_dir, 'model_params.txt')
if (not tf.gfile.Exists(param_file)):
if (not tf.gfile.Exists(model_dir)):
tf.gfile.MakeDirs(model_dir)
with tf.gfile.GFile(param_file, 'w') as f:
logging.info('writing to %s', param_file)
f.write(('model_name= %s\n\n' % model_name))
f.write(('global_params= %s\n\n' % str(global_params)))
f.write(('blocks_args= %s\n\n' % str(blocks_args)))
with tf.variable_scope(model_name):
model = efficientnet_model.Model(blocks_args, global_params)
outputs = model(images, training=training, features_only=features_only, pooled_features_only=pooled_features_only)
if features_only:
outputs = tf.identity(outputs, 'features')
elif pooled_features_only:
outputs = tf.identity(outputs, 'pooled_features')
else:
outputs = tf.identity(outputs, 'logits')
return (outputs, model.endpoints)
def build_model_base(self, images, model_name, training, override_params=None):
assert isinstance(images, tf.Tensor)
if (override_params and override_params.get('drop_connect_rate', None)):
override_params['survival_prob'] = (1 - override_params['drop_connect_rate'])
(blocks_args, global_params) = self.get_model_params(model_name, override_params)
with tf.variable_scope(model_name):
model = efficientnet_model.Model(blocks_args, global_params)
features = model(images, training=training, features_only=True)
features = tf.identity(features, 'features')
return (features, model.endpoints)
def build_model_fpn_base(self, images, model_name, training, override_params=None):
(_, feature_dict) = self.build_model_base(images, model_name, training, override_params=override_params)
return feature_dict |
class STSBenchmarkFinetune(SICKEval):
def __init__(self, task_path, seed=1111):
logging.debug('\n\n***** Transfer task : STSBenchmark*****\n\n')
self.seed = seed
train = self.loadFile(os.path.join(task_path, 'sts-train.csv'))
dev = self.loadFile(os.path.join(task_path, 'sts-dev.csv'))
test = self.loadFile(os.path.join(task_path, 'sts-test.csv'))
self.sick_data = {'train': train, 'dev': dev, 'test': test}
def loadFile(self, fpath):
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
text = line.strip().split('\t')
sick_data['X_A'].append(text[5].split())
sick_data['X_B'].append(text[6].split())
sick_data['y'].append(text[4])
sick_data['y'] = [float(s) for s in sick_data['y']]
return sick_data |
def load_score_files(args):
if args.all_shards:
shard_ids = list(range(args.num_shards))
else:
shard_ids = [args.shard_id]
gen_output_lst = []
bitext1_lst = []
bitext2_lst = []
lm_res1_lst = []
for shard_id in shard_ids:
using_nbest = (args.nbest_list is not None)
(pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, backwards_preprocessed_dir, lm_preprocessed_dir) = rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset, args.gen_model_name, shard_id, args.num_shards, args.sampling, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac)
rerank1_is_gen = ((args.gen_model == args.score_model1) and (args.source_prefix_frac is None))
rerank2_is_gen = ((args.gen_model == args.score_model2) and (args.source_prefix_frac is None))
score1_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model1_name, target_prefix_frac=args.target_prefix_frac, source_prefix_frac=args.source_prefix_frac, backwards=args.backwards1)
if (args.score_model2 is not None):
score2_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model2_name, target_prefix_frac=args.target_prefix_frac, source_prefix_frac=args.source_prefix_frac, backwards=args.backwards2)
if (args.language_model is not None):
lm_score_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.lm_name, lm_file=True)
predictions_bpe_file = (pre_gen + '/generate_output_bpe.txt')
if using_nbest:
print('Using predefined n-best list from interactive.py')
predictions_bpe_file = args.nbest_list
gen_output = rerank_utils.BitextOutputFromGen(predictions_bpe_file, bpe_symbol=args.remove_bpe, nbest=using_nbest, prefix_len=args.prefix_len, target_prefix_frac=args.target_prefix_frac)
if rerank1_is_gen:
bitext1 = gen_output
else:
bitext1 = rerank_utils.BitextOutput(score1_file, args.backwards1, args.right_to_left1, args.remove_bpe, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac)
if ((args.score_model2 is not None) or (args.nbest_list is not None)):
if rerank2_is_gen:
bitext2 = gen_output
else:
bitext2 = rerank_utils.BitextOutput(score2_file, args.backwards2, args.right_to_left2, args.remove_bpe, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac)
assert (bitext2.source_lengths == bitext1.source_lengths), 'source lengths for rescoring models do not match'
assert (bitext2.target_lengths == bitext1.target_lengths), 'target lengths for rescoring models do not match'
elif args.diff_bpe:
assert (args.score_model2 is None)
bitext2 = gen_output
else:
bitext2 = None
if (args.language_model is not None):
lm_res1 = rerank_utils.LMOutput(lm_score_file, args.lm_dict, args.prefix_len, args.remove_bpe, args.target_prefix_frac)
else:
lm_res1 = None
gen_output_lst.append(gen_output)
bitext1_lst.append(bitext1)
bitext2_lst.append(bitext2)
lm_res1_lst.append(lm_res1)
return (gen_output_lst, bitext1_lst, bitext2_lst, lm_res1_lst) |
def prepare_build_wheel_files(build_directory, config_settings):
shutil.copy('pyproject.toml', build_directory)
for pyfile in glob('*.py'):
shutil.copy(pyfile, build_directory)
for distinfo in glob('*.dist-info'):
shutil.copytree(distinfo, pjoin(build_directory, distinfo)) |
class SawyerPlateSlideBackV2Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'unused_1': obs[3], 'puck_pos': obs[4:7], 'unused_2': obs[7:]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=10.0)
action['grab_effort'] = (- 1.0)
return action.array
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_puck = (o_d['puck_pos'] + np.array([0.0, (- 0.065), 0.025]))
if (np.linalg.norm((pos_curr[:2] - pos_puck[:2])) > 0.01):
return (pos_puck + np.array([0.0, 0.0, 0.1]))
elif (abs((pos_curr[2] - pos_puck[2])) > 0.04):
return pos_puck
elif (pos_curr[1] > 0.7):
return (pos_curr + np.array([0.0, (- 0.1), 0.0]))
elif (pos_curr[1] > 0.6):
return np.array([0.15, 0.55, pos_curr[2]])
else:
return np.array([(pos_curr[0] - 0.1), 0.55, pos_curr[2]]) |
class OutgoingViewFull(StatsView):
name = 'outgoingViewFull'
def __init__(self, parent):
StatsView.__init__(self)
self.parent = parent
self._cachedValues = []
def getHeaderText(self, fit):
return _t('Remote Reps')
def getTextExtentW(self, text):
(width, height) = self.parent.GetTextExtent(text)
return width
def populatePanel(self, contentPanel, headerPanel):
contentSizer = contentPanel.GetSizer()
parent = self.panel = contentPanel
self.headerPanel = headerPanel
sizerOutgoing = wx.GridSizer(1, 4, 0, 0)
contentSizer.Add(sizerOutgoing, 0, wx.EXPAND, 0)
for (labelName, labelDesc, valueFormat, image, tooltip, val, preSpoolVal, fullSpoolVal, prec, lowest, highest) in stats:
baseBox = wx.BoxSizer(wx.VERTICAL)
baseBox.Add(BitmapLoader.getStaticBitmap(('%s_big' % image), parent, 'gui'), 0, wx.ALIGN_CENTER)
lbl = wx.StaticText(parent, wx.ID_ANY, valueFormat.format(0, ''))
lbl.SetToolTip(wx.ToolTip(tooltip))
setattr(self, labelName, lbl)
baseBox.Add(lbl, 0, wx.ALIGN_CENTER)
self._cachedValues.append(0)
sizerOutgoing.Add(baseBox, 1, wx.ALIGN_LEFT)
def refreshPanel(self, fit):
def formatTooltip(text, preSpool, fullSpool, prec, lowest, highest):
if (roundToPrec(preSpool, prec) == roundToPrec(fullSpool, prec)):
return (False, text)
else:
return (True, '{}\nSpool up: {}-{}'.format(text, formatAmount(preSpool, prec, lowest, highest), formatAmount(fullSpool, prec, lowest, highest)))
defaultSpoolValue = eos.config.settings['globalDefaultSpoolupPercentage']
counter = 0
for (labelName, labelDesc, valueFormat, image, tooltip, val, preSpoolVal, fullSpoolVal, prec, lowest, highest) in stats:
label = getattr(self, labelName)
val = (val(fit, defaultSpoolValue) if (fit is not None) else 0)
preSpoolVal = (preSpoolVal(fit) if (fit is not None) else 0)
fullSpoolVal = (fullSpoolVal(fit) if (fit is not None) else 0)
if (self._cachedValues[counter] != val):
(hasSpool, tooltipText) = formatTooltip(tooltip, preSpoolVal, fullSpoolVal, prec, lowest, highest)
label.SetLabel(valueFormat.format(formatAmount(val, prec, lowest, highest), ('s' if hasSpool else '')))
label.SetToolTip(wx.ToolTip(tooltipText))
self._cachedValues[counter] = val
counter += 1
self.panel.Layout()
self.headerPanel.Layout() |
def is_extension_class(cdef: ClassDef) -> bool:
if any((((not is_trait_decorator(d)) and (not is_dataclass_decorator(d)) and (not get_mypyc_attr_call(d))) for d in cdef.decorators)):
return False
if cdef.info.typeddict_type:
return False
if cdef.info.is_named_tuple:
return False
if (cdef.info.metaclass_type and (cdef.info.metaclass_type.type.fullname not in ('abc.ABCMeta', 'typing.TypingMeta', 'typing.GenericMeta'))):
return False
return True |
.parametrize('page', ['stylesheet/simple.html', 'stylesheet/simple_bg_set_red.html'])
def test_set_delayed(stylesheet_tester, page):
stylesheet_tester.js.load(page)
stylesheet_tester.init_stylesheet('none.css')
stylesheet_tester.set_css('body {background-color: rgb(0, 255, 0);}')
stylesheet_tester.check_set('rgb(0, 255, 0)') |
class NvidiaSensors(base.ThreadPoolText):
defaults = [('format', '{temp}C', 'Display string format. Three options available: ``{temp}`` - temperature, ``{fan_speed}`` and ``{perf}`` - performance level'), ('foreground_alert', 'ff0000', 'Foreground colour alert'), ('gpu_bus_id', '', "GPU's Bus ID, ex: ``01:00.0``. If leave empty will display all available GPU's"), ('update_interval', 2, 'Update interval in seconds.'), ('threshold', 70, 'If the current temperature value is above, then change to foreground_alert colour')]
def __init__(self, **config):
base.ThreadPoolText.__init__(self, '', **config)
self.add_defaults(NvidiaSensors.defaults)
self.foreground_normal = self.foreground
def _get_sensors_data(self, command):
return csv.reader(self.call_process(command, shell=True).strip().replace(' ', '').split('\n'))
def _parse_format_string(self):
return {sensor for sensor in re.findall('{(.+?)}', self.format)}
def poll(self):
sensors = self._parse_format_string()
if (not _all_sensors_names_correct(sensors)):
return 'Wrong sensor name'
bus_id = (f'-i {self.gpu_bus_id}' if self.gpu_bus_id else '')
command = 'nvidia-smi {} --query-gpu={} --format=csv,noheader'.format(bus_id, ','.join((sensors_mapping[sensor] for sensor in sensors)))
try:
sensors_data = [dict(zip(sensors, gpu)) for gpu in self._get_sensors_data(command)]
for gpu in sensors_data:
if gpu.get('temp'):
if (int(gpu['temp']) > self.threshold):
self.foreground = self.foreground_alert
else:
self.foreground = self.foreground_normal
return ' - '.join([self.format.format(**gpu) for gpu in sensors_data])
except Exception:
return None |
class Blosc2(Codec):
codec_id = 'imagecodecs_blosc2'
def __init__(self, level=None, compressor=None, typesize=None, blocksize=None, shuffle=None, numthreads=None):
self.level = level
self.compressor = compressor
self.typesize = typesize
self.blocksize = blocksize
self.shuffle = shuffle
self.numthreads = numthreads
def encode(self, buf):
buf = protective_squeeze(numpy.asarray(buf))
return imagecodecs.blosc2_encode(buf, level=self.level, compressor=self.compressor, typesize=self.typesize, blocksize=self.blocksize, shuffle=self.shuffle, numthreads=self.numthreads)
def decode(self, buf, out=None):
return imagecodecs.blosc2_decode(buf, numthreads=self.numthreads, out=_flat(out)) |
class RDN(nn.Module):
def __init__(self, args):
super(RDN, self).__init__()
r = args.scale[0]
G0 = args.G0
kSize = args.RDNkSize
(self.D, C, G) = {'A': (20, 6, 32), 'B': (16, 8, 64)}[args.RDNconfig]
self.SFENet1 = nn.Conv2d(args.n_colors, G0, kSize, padding=((kSize - 1) // 2), stride=1)
self.SFENet2 = nn.Conv2d(G0, G0, kSize, padding=((kSize - 1) // 2), stride=1)
self.RDBs = nn.ModuleList()
for i in range(self.D):
self.RDBs.append(RDB(growRate0=G0, growRate=G, nConvLayers=C))
self.da = DAM_Module()
self.GFF = nn.Sequential(*[nn.Conv3d((self.D * G0), G0, 1, padding=0, stride=1), nn.Conv3d(G0, G0, kSize, padding=((kSize - 1) // 2), stride=1)])
if ((r == 2) or (r == 3)):
self.UPNet = nn.Sequential(*[nn.Conv2d(G0, ((G * r) * r), kSize, padding=((kSize - 1) // 2), stride=1), nn.PixelShuffle(r), nn.Conv2d(G, args.n_colors, kSize, padding=((kSize - 1) // 2), stride=1)])
elif (r == 4):
self.UPNet = nn.Sequential(*[nn.Conv2d(G0, (G * 4), kSize, padding=((kSize - 1) // 2), stride=1), nn.PixelShuffle(2), nn.Conv2d(G, (G * 4), kSize, padding=((kSize - 1) // 2), stride=1), nn.PixelShuffle(2), nn.Conv2d(G, args.n_colors, kSize, padding=((kSize - 1) // 2), stride=1)])
else:
raise ValueError('scale must be 2 or 3 or 4.')
def forward(self, x):
f__1 = self.SFENet1(x)
x = self.SFENet2(f__1).unsqueeze(1)
RDBs_out = []
for i in range(self.D):
x = self.RDBs[i](x)
RDBs_out.append(x)
x = torch.cat(RDBs_out, 1)
(B, N, C, H, W) = x.size()
x = self.da(x)
x = self.GFF(x)
x = x.view(B, (N * C), H, W)
x += f__1
return self.UPNet(x) |
class NormPQ(object):
def __init__(self, n_percentile, quantize, true_norm=False, verbose=True, method='kmeans', recover='quantize'):
self.M = 2
(self.n_percentile, self.true_norm, self.verbose) = (n_percentile, true_norm, verbose)
self.method = method
self.recover = recover
self.code_dtype = (np.uint8 if (n_percentile <= (2 ** 8)) else (np.uint16 if (n_percentile <= (2 ** 16)) else np.uint32))
self.percentiles = None
self.quantize = quantize
def class_message(self):
return 'NormPQ, percentiles: {}, quantize: {}'.format(self.n_percentile, self.quantize.class_message())
def fit(self, vecs, iter):
assert (vecs.dtype == np.float32)
assert (vecs.ndim == 2)
(N, D) = vecs.shape
assert (self.n_percentile < N), 'the number of norm intervals should be more than Ks'
(norms, normalized_vecs) = normalize(vecs)
self.quantize.fit(normalized_vecs, iter)
if (self.recover == 'quantize'):
compressed_vecs = self.quantize.compress(normalized_vecs)
norms = (norms / np.linalg.norm(compressed_vecs, axis=1))
elif (self.recover == 'normalization'):
warnings.warn('Recover norm by normalization.')
assert False
else:
warnings.warn('No normalization guarantee.')
assert False
if (self.method == 'kmeans'):
(self.percentiles, _) = kmeans2(norms[:], self.n_percentile, iter=iter, minit='points')
elif (self.method == 'kmeans_partial'):
indexes = np.argsort(norms)
count = int((len(norms) * 0.7))
(centers_small_norms, _) = kmeans2(norms[indexes[:count]], (self.n_percentile // 2), iter=iter, minit='points')
(centers_big_norms, _) = kmeans2(norms[indexes[count:]], (self.n_percentile // 2), iter=iter, minit='points')
self.percentiles = np.concatenate((centers_small_norms, centers_big_norms))
elif (self.method == 'percentile'):
self.percentiles = np.percentile(norms, np.linspace(0, 100, (self.n_percentile + 1))[:])
self.percentiles = np.array(self.percentiles, dtype=np.float32)
elif (self.method == 'uniform'):
self.percentiles = np.linspace(np.min(norms), np.max(norms), (self.n_percentile + 1))
self.percentiles = np.array(self.percentiles, dtype=np.float32)
elif (self.method == 'exponential'):
q = 0.98
a = ((1 - q) / (1 - (q ** self.n_percentile)))
self.percentiles = [(np.min(norms) if (i == 0) else (np.min(norms) + (((a * (1 - (q ** i))) / (1 - q)) * (np.max(norms) - np.min(norms))))) for i in range((self.n_percentile + 1))]
self.percentiles = np.array(self.percentiles, dtype=np.float32)
else:
assert False
return self
def encode_norm(self, norms):
if ((self.method == 'kmeans') or (self.method == 'kmeans_partial')):
(norm_index, _) = vq(norms[:], self.percentiles)
else:
norm_index = [np.argmax((self.percentiles[1:] > n)) for n in norms]
norm_index = np.clip(norm_index, 1, self.n_percentile)
return norm_index
def decode_norm(self, norm_index):
if ((self.method == 'kmeans') or (self.method == 'kmeans_partial')):
return self.percentiles[norm_index]
else:
return ((self.percentiles[norm_index] + self.percentiles[(norm_index - 1)]) / 2.0)
def compress(self, vecs):
(norms, normalized_vecs) = normalize(vecs)
compressed_vecs = self.quantize.compress(normalized_vecs)
del normalized_vecs
if (self.recover == 'quantize'):
norms = (norms / np.linalg.norm(compressed_vecs, axis=1))
elif (self.recover == 'normalization'):
warnings.warn('Recover norm by normalization.')
(_, compressed_vecs) = normalize(compressed_vecs)
assert False
else:
warnings.warn('No normalization guarantee.')
assert False
if (not self.true_norm):
norms = self.decode_norm(self.encode_norm(norms))
else:
warnings.warn('Using true norm to compress vector.')
assert False
return (compressed_vecs.transpose() * norms).transpose() |
def focal_loss(alpha: Optional[Sequence]=None, gamma: float=0.0, reduction: str='mean', ignore_index: int=(- 100), device='cpu', dtype=torch.float32) -> FocalLoss:
if (alpha is not None):
if (not isinstance(alpha, Tensor)):
alpha = torch.tensor(alpha)
alpha = alpha.to(device=device, dtype=dtype)
fl = FocalLoss(alpha=alpha, gamma=gamma, reduction=reduction, ignore_index=ignore_index)
return fl |
class _TestSequenceMeta(type):
def __new__(mcs, name, bases, tests):
parent_path = (Path(__file__).parent.parent / 'docs')
cwd = os.getcwd()
os.chdir(parent_path)
for p in filter((lambda x: (x.suffix == '.rst')), parent_path.iterdir()):
with open(p, 'r', encoding='utf8') as file:
data = file.read()
doctree = publish_doctree(data, settings_overrides={'report_level': Reporter.SEVERE_LEVEL})
if hasattr(doctree, 'findall'):
code_blocks = list(doctree.findall(condition=is_code_block))
else:
code_blocks = list(doctree.traverse(condition=is_code_block))
for (i, node) in enumerate(code_blocks):
source_code = node.astext()
name = node.attributes['names'][0]
test_name = f'test_{i}'
tests[test_name] = gen_test(i, name, source_code)
os.chdir(cwd)
return type.__new__(mcs, name, bases, tests) |
class SpeakerVoucher(TimeStampedModel):
class VoucherType(models.TextChoices):
SPEAKER = ('speaker', _('Speaker'))
CO_SPEAKER = ('co_speaker', _('Co-Speaker'))
conference = models.ForeignKey(Conference, on_delete=models.PROTECT, verbose_name=_('conference'), related_name='+')
user = models.ForeignKey('users.User', on_delete=models.CASCADE, null=False, blank=False, verbose_name=_('user'), related_name='+')
voucher_type = models.CharField(max_length=20, choices=VoucherType.choices)
voucher_code = models.TextField(help_text=_('Voucher code generated for this speaker. If the speaker has multiple events, only one code will be generated.'), blank=False, null=False)
pretix_voucher_id = models.IntegerField(help_text=_('ID of the voucher in the Pretix database'), blank=True, null=True)
voucher_email_sent_at = models.DateTimeField(help_text=_('When the email was last sent'), blank=True, null=True)
def generate_code() -> str:
charset = list('ABCDEFGHKLMNPQRSTUVWXYZ')
random_string = get_random_string(length=20, allowed_chars=charset)
return f'SPEAKER-{random_string}'
class Meta():
verbose_name = _('Speakers Voucher')
verbose_name_plural = _('Speakers Vouchers')
unique_together = ('conference', 'user') |
class Input():
def __init__(self, parameter, **kwargs):
super().__init__(**kwargs)
self._parameter = None
self.set_parameter(parameter)
def set_parameter(self, parameter):
self._parameter = parameter
if parameter.is_set():
self.setValue(parameter.value)
if (hasattr(parameter, 'units') and parameter.units):
self.setSuffix((' %s' % parameter.units))
def update_parameter(self):
self._parameter.value = self.value()
def parameter(self):
self.update_parameter()
return self._parameter |
class UserDetailsPluginsList(PluginsList):
template_name = 'plugins/user.html'
def get_filtered_queryset(self, qs):
user = get_object_or_404(User, username=self.kwargs['username'])
return qs.filter((Q(created_by=user) | Q(owners=user)))
def get_context_data(self, **kwargs):
user = get_object_or_404(User, username=self.kwargs['username'])
user_is_trusted = user.has_perm('plugins.can_approve')
context = super(UserDetailsPluginsList, self).get_context_data(**kwargs)
context.update({'title': (_('Plugins from %s') % user), 'user_is_trusted': user_is_trusted, 'plugin_user': user})
return context |
class Metadata(pkg_resources.EmptyProvider):
def __init__(self, *pairs):
self.metadata = dict(pairs)
def has_metadata(self, name):
return (name in self.metadata)
def get_metadata(self, name):
return self.metadata[name]
def get_metadata_lines(self, name):
return pkg_resources.yield_lines(self.get_metadata(name)) |
.parametrize('delete', [True, False])
.parametrize('stylesheet_param', [True, False])
.parametrize('update', [True, False])
.parametrize('changed_option', ['colors.hints.fg', 'colors.hints.bg'])
def test_set_register_stylesheet(delete, stylesheet_param, update, changed_option, qtbot, config_stub, caplog):
config_stub.val.colors.hints.fg = 'magenta'
qss = '{{ conf.colors.hints.fg }}'
with caplog.at_level(9):
if stylesheet_param:
obj = StyleObj()
stylesheet.set_register(obj, qss, update=update)
else:
obj = StyleObj(qss)
stylesheet.set_register(obj, update=update)
assert (caplog.messages[(- 1)] == 'stylesheet for StyleObj: magenta')
assert (obj.rendered_stylesheet == 'magenta')
if delete:
with qtbot.wait_signal(obj.destroyed):
obj.deleteLater()
config_stub.set_obj(changed_option, 'yellow')
expected = ('magenta' if (delete or (not update) or (changed_option != 'colors.hints.fg')) else 'yellow')
assert (obj.rendered_stylesheet == expected) |
class DeactivateButtonEvent(DefaultScript):
def at_script_creation(self):
self.key = 'deactivate_button'
self.desc = 'Deactivate red button temporarily'
self.interval = 21
self.start_delay = True
self.persistent = True
self.repeats = 1
def at_start(self):
self.obj.close_lid()
self.obj.db.lid_locked = True
self.obj.break_lamp(feedback=False)
def at_repeat(self):
self.obj.db.lamp_works = True
desc = 'This is a large red button, inviting yet evil-looking. '
desc += 'Its glass cover is closed, protecting it.'
self.db.desc = desc
self.obj.scripts.add(BlinkButtonEvent)
self.obj.db.lid_locked = False
self.obj.scripts.validate() |
def insert_deepcopy(fgraph, wrapped_inputs, wrapped_outputs):
assert (len(wrapped_inputs) == len(fgraph.inputs))
assert (len(wrapped_outputs) == len(fgraph.outputs))
reason = 'insert_deepcopy'
updated_fgraph_inputs = {fgraph_i for (i, fgraph_i) in zip(wrapped_inputs, fgraph.inputs) if getattr(i, 'update', False)}
all_graph_inputs = list(graph_inputs(fgraph.outputs))
has_destroyers_attr = hasattr(fgraph, 'has_destroyers')
for i in range(len(fgraph.outputs)):
views_of_output_i = set()
view_tree_set(fgraph, alias_root(fgraph.outputs[i]), views_of_output_i)
copied = False
for j in range((i + 1), len(fgraph.outputs)):
if (fgraph.outputs[j] in views_of_output_i):
if (wrapped_outputs[i].borrow and wrapped_outputs[j].borrow):
fgraph.change_node_input('output', i, view_op(fgraph.outputs[i]), reason=reason)
else:
fgraph.change_node_input('output', i, deep_copy_op(fgraph.outputs[i]), reason=reason)
copied = True
break
if (not copied):
for input_j in all_graph_inputs:
if (input_j in updated_fgraph_inputs):
continue
if ((input_j in views_of_output_i) and (not (has_destroyers_attr and fgraph.has_destroyers([input_j])))):
if (input_j in fgraph.inputs):
j = fgraph.inputs.index(input_j)
if (wrapped_outputs[i].borrow and wrapped_inputs[j].borrow):
fgraph.change_node_input('output', i, view_op(fgraph.outputs[i]), reason=reason)
break
else:
fgraph.change_node_input('output', i, deep_copy_op(fgraph.outputs[i]), reason=reason)
break
elif wrapped_outputs[i].borrow:
fgraph.change_node_input('output', i, view_op(fgraph.outputs[i]), reason=reason)
break
else:
fgraph.change_node_input('output', i, deep_copy_op(fgraph.outputs[i]), reason=reason)
break |
class SpatialSegmentSmoothness(object):
def __init__(self, n_chans, n_dims, warped_contours_layer_output=None, lambda_i=1.0):
self.n_dims = n_dims
self.warped_contours_layer_output = warped_contours_layer_output
self.lambda_i = lambda_i
def compute_loss(self, y_true, y_pred):
loss = 0
segments_mask = (1.0 - self.warped_contours_layer_output)
for d in range(self.n_dims):
dCdx = (tf.gather(y_pred, tf.range(1, tf.shape(y_pred)[(d + 1)]), axis=(d + 1)) - tf.gather(y_pred, tf.range(0, (tf.shape(y_pred)[(d + 1)] - 1)), axis=(d + 1)))
loss += tf.reduce_mean(tf.abs((dCdx * tf.gather(segments_mask, tf.range(1, tf.shape(y_pred)[(d + 1)]), axis=(d + 1)))))
return loss |
class FakeDataset(object):
def __init__(self, info, attrs, dims=None):
for (var_name, var_data) in list(info.items()):
if isinstance(var_data, np.ndarray):
info[var_name] = xr.DataArray(var_data)
self.info = info
self.attrs = attrs
self.dims = (dims or {})
def __getitem__(self, key):
return self.info.get(key, self.dims.get(key))
def __contains__(self, key):
return ((key in self.info) or (key in self.dims))
def rename(self, *args, **kwargs):
return self
def close(self):
return |
class EFI_LOADED_IMAGE_PROTOCOL(STRUCT):
_pack_ = 8
_fields_ = [('Revision', UINT32), ('ParentHandle', EFI_HANDLE), ('SystemTable', PTR(EFI_SYSTEM_TABLE)), ('DeviceHandle', EFI_HANDLE), ('FilePath', PTR(EFI_DEVICE_PATH_PROTOCOL)), ('Reserved', PTR(VOID)), ('LoadOptionsSize', UINT32), ('LoadOptions', PTR(VOID)), ('ImageBase', PTR(VOID)), ('ImageSize', UINT64), ('ImageCodeType', EFI_MEMORY_TYPE), ('ImageDataType', EFI_MEMORY_TYPE), ('Unload', EFI_IMAGE_UNLOAD)] |
class GradCam(Explainer):
def __init__(self, gnn_model_path):
super(GradCam, self).__init__(gnn_model_path)
def explain_graph(self, graph, model=None, draw_graph=0, vis_ratio=0.2):
if (model == None):
model = self.model
edge_attr = Variable(graph.edge_attr, requires_grad=True)
pred = model(graph.x, graph.edge_index, edge_attr, graph.batch)
pred[(0, graph.y)].backward()
edge_grads = edge_attr.grad
alpha = torch.mean(edge_grads, dim=1)
edge_score = F.relu(torch.sum((graph.edge_attr.T * alpha).T, dim=1)).cpu().numpy()
edge_score = self.norm_imp(edge_score)
self.last_result = (graph, edge_score)
if draw_graph:
self.visualize(graph, edge_score, 'GradCam', vis_ratio=vis_ratio)
return edge_score |
class AverageMeter(object):
def __init__(self, name, fmt=':f', summary_type=Summary.AVERAGE):
self.name = name
self.fmt = fmt
self.summary_type = summary_type
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
def all_reduce(self):
if torch.cuda.is_available():
device = torch.device('cuda')
elif torch.backends.mps.is_available():
device = torch.device('mps')
else:
device = torch.device('cpu')
total = torch.tensor([self.sum, self.count], dtype=torch.float32, device=device)
dist.all_reduce(total, dist.ReduceOp.SUM, async_op=False)
(self.sum, self.count) = total.tolist()
self.avg = (self.sum / self.count)
def __str__(self):
fmtstr = (((('{name} {val' + self.fmt) + '} ({avg') + self.fmt) + '})')
return fmtstr.format(**self.__dict__)
def summary(self):
fmtstr = ''
if (self.summary_type is Summary.NONE):
fmtstr = ''
elif (self.summary_type is Summary.AVERAGE):
fmtstr = '{name} {avg:.3f}'
elif (self.summary_type is Summary.SUM):
fmtstr = '{name} {sum:.3f}'
elif (self.summary_type is Summary.COUNT):
fmtstr = '{name} {count:.3f}'
else:
raise ValueError(('invalid summary type %r' % self.summary_type))
return fmtstr.format(**self.__dict__) |
def test_resampling_nan_function(verbose=True, *args, **kwargs):
from radis import get_residual
from radis.test.utils import getTestFile
from radis.tools.database import load_spec
plot = True
s = load_spec(getTestFile('CO_Tgas1500K_mole_fraction0.01.spec'), binary=True).crop(2170, 2180, 'cm-1')
s.rescale_path_length(10)
s.name = 'original'
w_exp = s.get_wavenumber()[700:(- 700)][::5]
w_exp += ((np.diff(s.get_wavenumber())[0] * 2) / 3)
s.update('transmittance_noslit')
s.apply_slit(3, 'nm')
assert s.has_nan()
s2 = s.resample(w_exp, inplace=False)
s2.name = 'resampled'
assert (get_residual(s2, s, 'transmittance') < 1e-06)
if plot:
from radis import plot_diff
plot_diff(s, s2, show_points=True) |
class Completion(BaseModel):
id: str
object: str
created: int
model: str
choices: List[TextChoice]
usage: Optional[Usage]
def create(cls, model: str, prompt: str, use_prompt_format: bool=True, max_tokens: Optional[int]=16, temperature: Optional[float]=1.0, top_p: Optional[float]=1.0, stream: bool=False, stop: Optional[List[str]]=None, frequency_penalty: float=0.0, top_k: Optional[int]=None, typical_p: Optional[float]=None, watermark: Optional[bool]=False, seed: Optional[int]=None) -> TCompletion:
pass |
class JumpToMarketItem(ContextMenuSingle):
def __init__(self):
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
def display(self, callingWindow, srcContext, mainItem):
validContexts = ('marketItemMisc', 'fittingModule', 'fittingCharge', 'droneItem', 'implantItem', 'boosterItem', 'projectedModule', 'projectedDrone', 'projectedCharge', 'cargoItem', 'implantItemChar', 'fighterItem', 'projectedFighter')
if ((srcContext not in validContexts) or (mainItem is None)):
return False
if ((mainItem is None) or getattr(mainItem, 'isEmpty', False)):
return False
sMkt = Market.getInstance()
item = getattr(mainItem, 'item', mainItem)
isMutated = getattr(mainItem, 'isMutated', False)
mktGrp = sMkt.getMarketGroupByItem(item)
if ((mktGrp is None) and isMutated):
mktGrp = sMkt.getMarketGroupByItem(mainItem.baseItem)
if ((mktGrp is None) or (mktGrp.ID == 1663)):
return False
doit = ((not mainItem.isEmpty) if (srcContext == 'fittingModule') else True)
return doit
def getText(self, callingWindow, itmContext, mainItem):
return _t('{0} Market Group').format((itmContext if (itmContext is not None) else _t('Item')))
def activate(self, callingWindow, fullContext, mainItem, i):
srcContext = fullContext[0]
if (srcContext in ('fittingCharge', 'projectedCharge')):
item = mainItem.charge
elif hasattr(mainItem, 'item'):
if getattr(mainItem, 'isMutated', False):
item = mainItem.baseItem
else:
item = mainItem.item
else:
item = mainItem
self.mainFrame.notebookBrowsers.SetSelection(0)
self.mainFrame.marketBrowser.jump(item) |
class KarrasVePipeline(DiffusionPipeline):
unet: UNet2DModel
scheduler: KarrasVeScheduler
def __init__(self, unet, scheduler):
super().__init__()
scheduler = scheduler.set_format('pt')
self.register_modules(unet=unet, scheduler=scheduler)
_grad()
def __call__(self, batch_size=1, num_inference_steps=50, generator=None, output_type='pil', **kwargs):
if ('torch_device' in kwargs):
device = kwargs.pop('torch_device')
warnings.warn('`torch_device` is deprecated as an input argument to `__call__` and will be removed in v0.3.0. Consider using `pipe.to(torch_device)` instead.')
if (device is None):
device = ('cuda' if torch.cuda.is_available() else 'cpu')
self.to(device)
img_size = self.unet.config.sample_size
shape = (batch_size, 3, img_size, img_size)
model = self.unet
sample = (torch.randn(*shape) * self.scheduler.config.sigma_max)
sample = sample.to(self.device)
self.scheduler.set_timesteps(num_inference_steps)
for t in tqdm(self.scheduler.timesteps):
sigma = self.scheduler.schedule[t]
sigma_prev = (self.scheduler.schedule[(t - 1)] if (t > 0) else 0)
(sample_hat, sigma_hat) = self.scheduler.add_noise_to_input(sample, sigma, generator=generator)
model_output = ((sigma_hat / 2) * model(((sample_hat + 1) / 2), (sigma_hat / 2))['sample'])
step_output = self.scheduler.step(model_output, sigma_hat, sigma_prev, sample_hat)
if (sigma_prev != 0):
model_output = ((sigma_prev / 2) * model(((step_output['prev_sample'] + 1) / 2), (sigma_prev / 2))['sample'])
step_output = self.scheduler.step_correct(model_output, sigma_hat, sigma_prev, sample_hat, step_output['prev_sample'], step_output['derivative'])
sample = step_output['prev_sample']
sample = ((sample / 2) + 0.5).clamp(0, 1)
sample = sample.cpu().permute(0, 2, 3, 1).numpy()
if (output_type == 'pil'):
sample = self.numpy_to_pil(sample)
return {'sample': sample} |
class HashtagTests(RaveberryTest):
def test_empty(self) -> None:
self.assertFalse(Tag.objects.exists())
def _get_random_hashtag(self) -> str:
html = self.client.get(reverse('musiq')).content
soup = BeautifulSoup(html, 'html.parser')
hashtag = soup.find('span', id='hashtag-text')
return hashtag.text
def test_hashtag(self) -> None:
self.client.post(reverse('submit-hashtag'), {'hashtag': '#test'})
self.assertEqual(self._get_random_hashtag(), '#test')
def test_no_hashtag(self) -> None:
self.client.post(reverse('submit-hashtag'), {'hashtag': 'test'})
self.assertEqual(self._get_random_hashtag(), '#test')
def test_multiple(self) -> None:
hashtags = [('#test' + str(i)) for i in range(10)]
for hashtag in hashtags:
self.client.post(reverse('submit-hashtag'), {'hashtag': hashtag})
for _ in range(10):
self.assertTrue((self._get_random_hashtag() in hashtags)) |
class CocoTasksGT(Dataset):
def __init__(self, task_number: int, set_name: str):
assert (task_number in TASK_NUMBERS)
assert (set_name in ['train'])
self.len_lambda = 60
self.task_number = task_number
self.set_name = set_name
self.only_relevant = False
self.annotation_file = os.path.join(COCO_TASKS_ANNOTATIONS_ROOT, 'task_{}_{}.json'.format(self.task_number, self.set_name))
with redirect_stdout(devnull):
self.task_coco = COCO(self.annotation_file)
self.list_of_valid_images = []
for image_id in self.task_coco.getImgIds():
if (len(self.task_coco.getAnnIds(imgIds=image_id)) > 0):
self.list_of_valid_images.append(image_id)
def __len__(self) -> int:
return len(self.list_of_valid_images)
def __repr__(self) -> str:
return '{dbname}({tname}-{sname}-or:{r})'.format(dbname=self.__class__.__name__, tname=self.task_number, sname=self.set_name, r=self.only_relevant)
def __getitem__(self, item: int) -> Tuple[(Tensor, Tensor, Tensor, List[dict])]:
the_image_id = self.list_of_valid_images[item]
the_image_dict = self.task_coco.loadImgs(the_image_id)[0]
preferred_anns = self.task_coco.loadAnns(self.task_coco.getAnnIds(imgIds=the_image_id, catIds=1))
non_preferred_anns = self.task_coco.loadAnns(self.task_coco.getAnnIds(imgIds=the_image_id, catIds=0))
I = load_image(get_image_file_name(the_image_dict))
anns = []
t = []
size = int(round(np.random.poisson(self.len_lambda)))
size += 1
if (size > MAX_GPU_SIZE):
size = MAX_GPU_SIZE
number_of_preferred = len(preferred_anns)
if (number_of_preferred > 0):
which_preferred_to_add = np.random.choice(preferred_anns, size=number_of_preferred, replace=False)
anns.extend([a for a in which_preferred_to_add])
t.extend(([1] * number_of_preferred))
size -= number_of_preferred
if (size > 0):
number_of_non_preferred = len(non_preferred_anns)
if (number_of_non_preferred > size):
number_of_non_preferred = size
number_of_non_preferred = min(number_of_non_preferred, len(non_preferred_anns))
if (number_of_non_preferred > 0):
which_non_preferred_to_add = np.random.choice(non_preferred_anns, size=number_of_non_preferred, replace=False)
anns.extend([a for a in which_non_preferred_to_add])
t.extend(([0] * number_of_non_preferred))
size -= number_of_non_preferred
detections = [{'bbox': a['bbox'], 'score': 1.0, 'category_id': a['COCO_category_id']} for a in anns]
x = [image_transforms(crop_img_to_bbox(I, target_transforms(det['bbox'], I.size))) for det in detections]
d = [float(det['score']) for det in detections]
return (default_collate(x), Tensor(d).unsqueeze(1), Tensor(t).unsqueeze(1), detections)
def custom_collate(batch):
(x, t, d, detections) = batch[0]
return (x, t, d, detections) |
def asizeof(*objs, **opts):
(t, p, x) = _objs_opts_x(asizeof, objs, **opts)
_asizer.reset(**p)
if t:
if x:
_asizer.exclude_objs(t)
s = _asizer.asizeof(*t)
_asizer.print_stats(objs=t, opts=opts)
_asizer._clear()
else:
s = 0
return s |
def test_foldl_memory_consumption():
x = shared(np.asarray(np.random.uniform(size=(10,)), dtype=config.floatX))
(o, _) = foldl((lambda v, acc: (acc + v)), x, pt.constant(np.asarray(0.0, dtype=config.floatX)))
mode = FAST_RUN
mode = mode.excluding('inplace')
f0 = function([], o, mode=mode)
(inputs, outputs) = clone_optimized_graph(f0)
scan_nodes = grab_scan_node(outputs[0])
assert (scan_nodes is not None)
scan_node = scan_nodes[0]
f1 = function(inputs, scan_node.inputs[2])
if config.scan__allow_output_prealloc:
assert (f1().shape[0] == 2)
else:
assert (f1().shape[0] == 1)
gx = grad(o, x)
f2 = function([], gx)
utt.assert_allclose(f2(), np.ones((10,))) |
class PyramidPooling(Module):
def __init__(self, in_channels, norm_layer, up_kwargs):
super(PyramidPooling, self).__init__()
self.pool1 = AdaptiveAvgPool2d(1)
self.pool2 = AdaptiveAvgPool2d(2)
self.pool3 = AdaptiveAvgPool2d(3)
self.pool4 = AdaptiveAvgPool2d(6)
out_channels = int((in_channels / 4))
self.conv1 = Sequential(Conv2d(in_channels, out_channels, 1, bias=False), norm_layer(out_channels), ReLU(True))
self.conv2 = Sequential(Conv2d(in_channels, out_channels, 1, bias=False), norm_layer(out_channels), ReLU(True))
self.conv3 = Sequential(Conv2d(in_channels, out_channels, 1, bias=False), norm_layer(out_channels), ReLU(True))
self.conv4 = Sequential(Conv2d(in_channels, out_channels, 1, bias=False), norm_layer(out_channels), ReLU(True))
self._up_kwargs = up_kwargs
def forward(self, x):
(_, _, h, w) = x.size()
feat1 = F.interpolate(self.conv1(self.pool1(x)), (h, w), **self._up_kwargs)
feat2 = F.interpolate(self.conv2(self.pool2(x)), (h, w), **self._up_kwargs)
feat3 = F.interpolate(self.conv3(self.pool3(x)), (h, w), **self._up_kwargs)
feat4 = F.interpolate(self.conv4(self.pool4(x)), (h, w), **self._up_kwargs)
return torch.cat((x, feat1, feat2, feat3, feat4), 1) |
def _offline_song_suggestions(query: str) -> List[SuggestionResult]:
results: List[SuggestionResult] = []
terms = query.split()
song_results: Iterable[Mapping[(str, Any)]]
if settings.DEBUG:
matching_songs = ArchivedSong.objects.prefetch_related('queries')
for term in terms:
matching_songs = matching_songs.filter(((Q(title__icontains=term) | Q(artist__icontains=term)) | Q(queries__query__icontains=term)))
song_results = matching_songs.order_by('-counter').annotate(u_id=F('id'), u_url=F('url'), u_artist=F('artist'), u_title=F('title'), u_duration=F('duration'), u_counter=F('counter'), u_cached=F('cached')).values(*u_values_list).distinct()[:storage.get('number_of_suggestions')]
else:
song_results = _postgres_song_results(query)
has_internet = redis.get('has_internet')
for song in song_results:
if (song_utils.is_forbidden(song['u_artist']) or song_utils.is_forbidden(song['u_title'])):
continue
platform = song_utils.determine_url_type(song['u_url'])
if ((not has_internet) and (not song['u_cached'])):
continue
if (platform == 'local'):
if (not song['u_cached']):
continue
else:
assert (platform in ['youtube', 'spotify', 'soundcloud', 'jamendo'])
if (not storage.get(cast(PlatformEnabled, f'{platform}_enabled'))):
continue
result_dict: SuggestionResult = {'key': song['u_id'], 'value': song_utils.displayname(song['u_artist'], song['u_title']), 'counter': song['u_counter'], 'type': platform, 'durationFormatted': song_utils.format_seconds(song['u_duration'])}
results.append(result_dict)
seen_values: Dict[(str, int)] = {}
for (index, result) in enumerate(results):
if (result['value'] in seen_values):
result['confusable'] = True
results[seen_values[result['value']]]['confusable'] = True
seen_values[result['value']] = index
return results |
def is_pipeline_test(test_case):
if (not _run_pipeline_tests):
return unittest.skip('test is pipeline test')(test_case)
else:
try:
import pytest
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case) |
class ScreenMode():
width = None
height = None
depth = None
rate = None
def __init__(self, screen):
self.screen = screen
def __repr__(self):
return f'{self.__class__.__name__}(width={self.width!r}, height={self.height!r}, depth={self.depth!r}, rate={self.rate})' |
.supported(only_if=(lambda backend: backend.dh_supported()), skip_message='DH not supported')
class TestDHSerialization():
.skip_fips(reason='non-FIPS parameters')
def test_dh_public_key(self, backend):
data = load_vectors_from_file(os.path.join('asymmetric', 'DH', 'dhkey.pem'), (lambda pemfile: pemfile.read()), mode='rb')
public_key = load_pem_private_key(data, None, backend).public_key()
for enc in (Encoding.PEM, Encoding.DER, Encoding.OpenSSH, Encoding.Raw, Encoding.X962):
for fmt in (PublicFormat.SubjectPublicKeyInfo, PublicFormat.PKCS1, PublicFormat.OpenSSH, PublicFormat.Raw, PublicFormat.CompressedPoint, PublicFormat.UncompressedPoint):
if ((enc in (Encoding.PEM, Encoding.DER)) and (fmt == PublicFormat.SubjectPublicKeyInfo)):
continue
with pytest.raises(ValueError):
public_key.public_bytes(enc, fmt)
.skip_fips(reason='non-FIPS parameters')
def test_dh_private_key(self, backend):
data = load_vectors_from_file(os.path.join('asymmetric', 'DH', 'dhkey.pem'), (lambda pemfile: pemfile.read()), mode='rb')
private_key = load_pem_private_key(data, None, backend)
for enc in (Encoding.PEM, Encoding.DER, Encoding.OpenSSH, Encoding.Raw, Encoding.X962):
for fmt in (PrivateFormat.PKCS8, PrivateFormat.TraditionalOpenSSL, PrivateFormat.Raw):
if ((enc in (Encoding.PEM, Encoding.DER)) and (fmt is PrivateFormat.PKCS8)):
continue
with pytest.raises(ValueError):
private_key.private_bytes(enc, fmt, NoEncryption()) |
class TFControl():
def __init__(self, k=0.0, n0=0.0, n1=0.0, d0=0.0, d1=0.0, Ts=0.01, limit=1.0):
self.k = k
self.n0 = n0
self.n1 = n1
self.d0 = d0
self.d1 = d1
self.Ts = Ts
self.limit = limit
self.y = 0.0
self.u = 0.0
self.y_delay_1 = 0.0
self.u_delay_1 = 0.0
self.b0 = (((- k) * ((2.0 * n1) - (Ts * n0))) / ((2.0 * d1) + (Ts * d0)))
self.b1 = ((k * ((2.0 * n1) + (Ts * n0))) / ((2.0 * d1) + (Ts * d0)))
self.a0 = (((2.0 * d1) - (Ts * d0)) / ((2.0 * d1) + (Ts * d0)))
def update(self, y):
u = (((self.a0 * self.u_delay_1) + (self.b1 * y)) + (self.b0 * self.y_delay_1))
u_sat = self._saturate(u)
self.y_delay_1 = y
self.u_delay_1 = u_sat
return u_sat
def _saturate(self, u):
if (u >= self.limit):
u_sat = self.limit
elif (u <= (- self.limit)):
u_sat = (- self.limit)
else:
u_sat = u
return u_sat |
def init(disp, _info):
disp.extension_add_method('display', 'dpms_get_version', get_version)
disp.extension_add_method('display', 'dpms_capable', capable)
disp.extension_add_method('display', 'dpms_get_timeouts', get_timeouts)
disp.extension_add_method('display', 'dpms_set_timeouts', set_timeouts)
disp.extension_add_method('display', 'dpms_enable', enable)
disp.extension_add_method('display', 'dpms_disable', disable)
disp.extension_add_method('display', 'dpms_force_level', force_level)
disp.extension_add_method('display', 'dpms_info', info) |
def get_canonical_path(project, resource, offset):
pymod = project.get_pymodule(resource)
pyname = evaluate.eval_location(pymod, offset)
(defmod, lineno) = pyname.get_definition_location()
if (not defmod):
return None
scope = defmod.get_scope().get_inner_scope_for_line(lineno)
names = []
if isinstance(pyname, pynamesdef.ParameterName):
names = [(worder.get_name_at(pymod.get_resource(), offset), 'PARAMETER')]
elif isinstance(pyname, pynamesdef.AssignedName):
names = [(worder.get_name_at(pymod.get_resource(), offset), 'VARIABLE')]
while scope.parent:
if isinstance(scope, pyscopes.FunctionScope):
scope_type = 'FUNCTION'
elif isinstance(scope, pyscopes.ClassScope):
scope_type = 'CLASS'
else:
scope_type = None
names.append((scope.pyobject.get_name(), scope_type))
scope = scope.parent
names.append((defmod.get_resource().real_path, 'MODULE'))
names.reverse()
return names |
def loadAWSInstanceProfiles(neo4j_session, data_path, account_name):
logger.info("[*] Loading AWS Role Instance Profiles into neo4j instance for AWS account '%s'", account_name)
ingest_role_instance_profiles = 'merge (instanceprofile:AWSInstanceProfile {Arn:$Arn}) \n\t\t\t\t\t\t\t\t\ton match set \n\t\t\t\t\t\t\t\t\tinstanceprofile.AccountNo=$AccountNo,\n\t\t\t\t\t\t\t\t\tinstanceprofile.InstanceProfileName=$InstanceProfileName,\n\t\t\t\t\t\t\t\t\tinstanceprofile.InstanceProfileId=$InstanceProfileId, \n\t\t\t\t\t\t\t\t\tinstanceprofile.SourceRoleArn=$SourceRoleArn,\n\t\t\t\t\t\t\t\t\tinstanceprofile.CreateDate=$CreateDate,\n\t\t\t\t\t\t\t\t\tinstanceprofile.Path=$Path\n\t\t\t\t\t\t\t\t\ton create set\n\t\t\t\t\t\t\t\t\tinstanceprofile.AccountNo=$AccountNo,\n\t\t\t\t\t\t\t\t\tinstanceprofile.InstanceProfileName=$InstanceProfileName,\n\t\t\t\t\t\t\t\t\tinstanceprofile.InstanceProfileId=$InstanceProfileId, \n\t\t\t\t\t\t\t\t\tinstanceprofile.SourceRoleArn=$SourceRoleArn,\n\t\t\t\t\t\t\t\t\tinstanceprofile.CreateDate=$CreateDate,\n\t\t\t\t\t\t\t\t\tinstanceprofile.Path=$Path\n\t\t\t\t\t\t\t\t\t'
instance_profiles = getAWSInstanceProfiles(data_path, account_name)
for instance_profile in instance_profiles:
neo4j_session.run(ingest_role_instance_profiles, Arn=instance_profile['Arn'], AccountNo=instance_profile['Arn'].split(':')[4], InstanceProfileName=instance_profile['InstanceProfileName'], InstanceProfileId=instance_profile['InstanceProfileId'], SourceRoleArn=instance_profile['SourceRoleArn'], CreateDate=instance_profile['CreateDate'], Path=instance_profile['Path'])
logger.info("[*] Completed loading AWS Role Instance Profiles into neo4j instance for AWS account '%s'", account_name) |
class OSISAFL3NCFileHandler(NetCDF4FileHandler):
def _get_ease_grid(self):
from pyresample import create_area_def
proj4str = self['Lambert_Azimuthal_Grid/attr/proj4_string']
x_size = self['/dimension/xc']
y_size = self['/dimension/yc']
p_lowerleft_lat = self['lat'].values[((y_size - 1), 0)]
p_lowerleft_lon = self['lon'].values[((y_size - 1), 0)]
p_upperright_lat = self['lat'].values[(0, (x_size - 1))]
p_upperright_lon = self['lon'].values[(0, (x_size - 1))]
area_extent = [p_lowerleft_lon, p_lowerleft_lat, p_upperright_lon, p_upperright_lat]
area_def = create_area_def(area_id='osisaf_lambert_azimuthal_equal_area', description='osisaf_lambert_azimuthal_equal_area', proj_id='osisaf_lambert_azimuthal_equal_area', projection=proj4str, width=x_size, height=y_size, area_extent=area_extent, units='deg')
return area_def
def _get_geographic_grid(self):
from pyresample import create_area_def
x_size = self['/dimension/lon']
y_size = self['/dimension/lat']
lat_0 = self['lat'].min()
lon_0 = self['lon'].min()
lat_1 = self['lat'].max()
lon_1 = self['lon'].max()
area_extent = [lon_0, lat_1, lon_1, lat_0]
area_def = create_area_def(area_id='osisaf_geographic_area', description='osisaf_geographic_area', proj_id='osisaf_geographic_area', projection='+proj=lonlat', width=x_size, height=y_size, area_extent=area_extent, units='deg')
return area_def
def _get_polar_stereographic_grid(self):
from pyresample import create_area_def
try:
proj4str = self['Polar_Stereographic_Grid/attr/proj4_string']
except KeyError:
sma = self['Polar_Stereographic_Grid/attr/semi_major_axis']
smb = self['Polar_Stereographic_Grid/attr/semi_minor_axis']
lon_0 = self['Polar_Stereographic_Grid/attr/straight_vertical_longitude_from_pole']
lat_0 = self['Polar_Stereographic_Grid/attr/latitude_of_projection_origin']
lat_ts = self['Polar_Stereographic_Grid/attr/standard_parallel']
proj4str = f'+a={sma} +b={smb} +lat_ts={lat_ts} +lon_0={lon_0} +proj=stere +lat_0={lat_0}'
x_size = self['/dimension/xc']
y_size = self['/dimension/yc']
p_lowerleft_lat = self['lat'].values[((y_size - 1), 0)]
p_lowerleft_lon = self['lon'].values[((y_size - 1), 0)]
p_upperright_lat = self['lat'].values[(0, (x_size - 1))]
p_upperright_lon = self['lon'].values[(0, (x_size - 1))]
area_extent = [p_lowerleft_lon, p_lowerleft_lat, p_upperright_lon, p_upperright_lat]
area_def = create_area_def(area_id='osisaf_polar_stereographic', description='osisaf_polar_stereographic', proj_id='osisaf_polar_stereographic', projection=proj4str, width=x_size, height=y_size, area_extent=area_extent, units='deg')
return area_def
def _get_finfo_grid(self):
if (self.filename_info['grid'] == 'ease'):
self.area_def = self._get_ease_grid()
return self.area_def
elif ((self.filename_info['grid'] == 'polstere') or (self.filename_info['grid'] == 'stere')):
self.area_def = self._get_polar_stereographic_grid()
return self.area_def
else:
raise ValueError(f"Unknown grid type: {self.filename_info['grid']}")
def _get_ftype_grid(self):
if (self.filetype_info['file_type'] == 'osi_radflux_grid'):
self.area_def = self._get_geographic_grid()
return self.area_def
elif (self.filetype_info['file_type'] in ['osi_sst', 'osi_sea_ice_conc']):
self.area_def = self._get_polar_stereographic_grid()
return self.area_def
def get_area_def(self, area_id):
if ('grid' in self.filename_info):
return self._get_finfo_grid()
else:
return self._get_ftype_grid()
def _get_ds_units(self, ds_info, var_path):
file_units = ds_info.get('file_units')
if (file_units is None):
file_units = self.get((var_path + '/attr/units'))
if (file_units is None):
file_units = 1
return file_units
def get_dataset(self, dataset_id, ds_info):
logger.debug(f"Reading {dataset_id['name']} from {self.filename}")
var_path = ds_info.get('file_key', f"{dataset_id['name']}")
shape = self[(var_path + '/shape')]
data = self[var_path]
if (shape[0] == 1):
data = data[0]
file_units = self._get_ds_units(ds_info, var_path)
valid_min = self.get((var_path + '/attr/valid_min'))
valid_max = self.get((var_path + '/attr/valid_max'))
if ((valid_min is not None) and (valid_max is not None)):
data = data.where((data >= valid_min))
data = data.where((data <= valid_max))
fill_value = self.get((var_path + '/attr/_FillValue'))
if (fill_value is not None):
data = data.where((data != fill_value))
scale_factor = self.get((var_path + '/attr/scale_factor'))
scale_offset = self.get((var_path + '/attr/add_offset'))
if ((scale_offset is not None) and (scale_factor is not None)):
data = ((data * scale_factor) + scale_offset)
if (self.filetype_info['file_type'] == 'osi_radflux_grid'):
data = data.rename({'lon': 'x', 'lat': 'y'})
else:
data = data.rename({'xc': 'x', 'yc': 'y'})
ds_info.update({'units': ds_info.get('units', file_units), 'platform_name': self._get_platname(), 'sensor': self._get_instname()})
ds_info.update(dataset_id.to_dict())
data.attrs.update(ds_info)
return data
def _get_instname(self):
try:
return self['/attr/instrument_name']
except KeyError:
try:
return self['/attr/sensor']
except KeyError:
return 'unknown_sensor'
def _get_platname(self):
try:
return self['/attr/platform_name']
except KeyError:
return self['/attr/platform']
def _parse_datetime(datestr):
for dt_format in ('%Y-%m-%d %H:%M:%S', '%Y%m%dT%H%M%SZ', '%Y-%m-%dT%H:%M:%SZ'):
try:
return datetime.strptime(datestr, dt_format)
except ValueError:
continue
raise ValueError(f'Unsupported date format: {datestr}')
def start_time(self):
poss_names = ['/attr/start_date', '/attr/start_time', '/attr/time_coverage_start']
for name in poss_names:
start_t = self.get(name)
if (start_t is not None):
break
if (start_t is None):
raise ValueError('Unknown start time attribute.')
return self._parse_datetime(start_t)
def end_time(self):
poss_names = ['/attr/stop_date', '/attr/stop_time', '/attr/time_coverage_end']
for name in poss_names:
end_t = self.get(name)
if (end_t is not None):
break
if (end_t is None):
raise ValueError('Unknown stop time attribute.')
return self._parse_datetime(end_t) |
def fmt_item(x, l):
if isinstance(x, np.ndarray):
assert (x.ndim == 0)
x = x.item()
if isinstance(x, (float, np.float32, np.float64)):
v = abs(x)
if (((v < 0.0001) or (v > 10000.0)) and (v > 0)):
rep = ('%7.2e' % x)
else:
rep = ('%7.5f' % x)
else:
rep = str(x)
return ((' ' * (l - len(rep))) + rep) |
def component(function: Callable[(..., (((ComponentType | VdomDict) | str) | None))]) -> Callable[(..., Component)]:
sig = inspect.signature(function)
if (('key' in sig.parameters) and (sig.parameters['key'].kind in (inspect.Parameter.KEYWORD_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD))):
msg = f"Component render function {function} uses reserved parameter 'key'"
raise TypeError(msg)
(function)
def constructor(*args: Any, key: (Any | None)=None, **kwargs: Any) -> Component:
return Component(function, key, args, kwargs, sig)
return constructor |
def main(argv: List[str]):
args = parse_args(argv)
rank = int(os.environ['LOCAL_RANK'])
print('Running with args', args)
if torch.cuda.is_available():
device: torch.device = torch.device(f'cuda:{rank}')
backend = 'nccl'
torch.cuda.set_device(device)
else:
device: torch.device = torch.device('cpu')
backend = 'gloo'
if (not torch.distributed.is_initialized()):
dist.init_process_group(backend=backend)
rank = dist.get_rank()
world_size = dist.get_world_size()
num_embeddings_per_feature = None
if (args.num_embeddings_per_feature is not None):
num_embeddings_per_feature = list(map(int, args.num_embeddings_per_feature.split(',')))
train_loader = NvtBinaryDataloader(binary_file_path=os.path.join(args.binary_path, 'train'), batch_size=args.batch_size).get_dataloader(rank=rank, world_size=world_size)
val_loader = NvtBinaryDataloader(binary_file_path=os.path.join(args.binary_path, 'validation'), batch_size=args.batch_size).get_dataloader(rank=rank, world_size=world_size)
test_loader = NvtBinaryDataloader(binary_file_path=os.path.join(args.binary_path, 'test'), batch_size=args.batch_size).get_dataloader(rank=rank, world_size=world_size)
eb_configs = [EmbeddingBagConfig(name=f't_{feature_name}', embedding_dim=args.embedding_dim, num_embeddings=(none_throws(num_embeddings_per_feature)[feature_idx] if (num_embeddings_per_feature is not None) else args.num_embeddings), feature_names=[feature_name]) for (feature_idx, feature_name) in enumerate(DEFAULT_CAT_NAMES)]
train_model = DLRMTrain(DLRM(embedding_bag_collection=EmbeddingBagCollection(tables=eb_configs, device=torch.device('meta')), dense_in_features=len(DEFAULT_INT_NAMES), dense_arch_layer_sizes=list(map(int, args.dense_arch_layer_sizes.split(','))), over_arch_layer_sizes=list(map(int, args.over_arch_layer_sizes.split(','))), dense_device=device))
train_model = fuse_embedding_optimizer(train_model, optimizer_type=(torchrec.optim.RowWiseAdagrad if args.adagrad else torch.optim.SGD), optimizer_kwargs={'learning_rate': args.learning_rate}, device=torch.device('meta'))
sharders = cast(List[ModuleSharder[nn.Module]], [FusedEmbeddingBagCollectionSharder()])
pg = dist.GroupMember.WORLD
hbm_cap = torch.cuda.get_device_properties(device).total_memory
local_world_size = trec_dist.comm.get_local_size(world_size)
model = DistributedModelParallel(module=train_model, device=device, env=trec_dist.ShardingEnv.from_process_group(pg), plan=trec_dist.planner.EmbeddingShardingPlanner(topology=trec_dist.planner.Topology(world_size=world_size, compute_device=device.type, local_world_size=local_world_size, hbm_cap=hbm_cap), storage_reservation=trec_dist.planner.storage_reservations.HeuristicalStorageReservation(percentage=0.25), batch_size=args.batch_size).collective_plan(train_model, sharders, pg), sharders=sharders)
non_fused_optimizer = KeyedOptimizerWrapper(dict(in_backward_optimizer_filter(model.named_parameters())), (lambda params: (torch.optim.Adagrad(params, lr=args.learning_rate) if args.adagrad else torch.optim.SGD(params, lr=args.learning_rate))))
opt = trec_optim.keyed.CombinedOptimizer([non_fused_optimizer, model.fused_optimizer])
train_pipeline = TrainPipelineSparseDist(model, opt, device)
throughput = ThroughputMetric(batch_size=args.batch_size, world_size=world_size, window_seconds=30, warmup_steps=10)
changing_point_steps = (((TOTAL_TRAINING_SAMPLES * args.lr_change_point) // args.batch_size) // world_size)
for epoch in range(args.epochs):
print(f'starting the {epoch} epoch now')
start_time = time.time()
it = iter(train_loader)
step = 0
losses = []
while True:
try:
train_pipeline._model.train()
(loss, _logits, _labels) = train_pipeline.progress(it)
if (args.change_lr and (step == changing_point_steps)):
print(f'Changing learning rate to: {args.lr_after_change_point}')
optimizer = train_pipeline._optimizer
lr = args.lr_after_change_point
for g in optimizer.param_groups:
g['lr'] = lr
throughput.update()
losses.append(loss)
if (((step % args.throughput_check_freq_within_epoch) == 0) and (step != 0)):
throughput_val = throughput.compute()
if (rank == 0):
print('step', step)
print('throughput', throughput_val)
losses = []
if (((step % args.validation_freq_within_epoch) == 0) and (step != 0)):
validation_it = iter(val_loader)
(auroc_result, accuracy_result, bce_loss) = _eval(train_pipeline, validation_it)
if (rank == 0):
print(f'AUROC over validation set: {auroc_result}.')
print(f'Accuracy over validation set: {accuracy_result}.')
print('binary cross entropy loss', (bce_loss / args.batch_size))
test_it = iter(test_loader)
(auroc_result, accuracy_result, bce_loss) = _eval(train_pipeline, test_it)
if (rank == 0):
print(f'AUROC over test set: {auroc_result}.')
print(f'Accuracy over test set: {accuracy_result}.')
step += 1
except StopIteration:
print('Reached stop iteration')
break
train_time = time.time()
if (rank == 0):
print(f'this epoch training takes {(train_time - start_time)}')
val_it = iter(val_loader)
(auroc_result, accuracy_result, bce_loss) = _eval(train_pipeline, val_it)
if (rank == 0):
print(f'AUROC over validation set: {auroc_result}.')
print(f'Accuracy over validation set: {accuracy_result}.')
print('binary cross entropy loss over validation set', (bce_loss / args.batch_size))
test_it = iter(test_loader)
(auroc_result, accuracy_result, bce_loss) = _eval(train_pipeline, test_it)
if (rank == 0):
print(f'AUROC over test set: {auroc_result}.')
print(f'Accuracy over test set: {accuracy_result}.')
print('binary cross entropy loss over test set', (bce_loss / args.batch_size)) |
class IPCCommandInterface(CommandInterface):
def __init__(self, ipc_client: ipc.Client):
self._client = ipc_client
def execute(self, call: CommandGraphCall, args: tuple, kwargs: dict) -> Any:
(status, result) = self._client.send((call.parent.selectors, call.name, args, kwargs))
if (status == SUCCESS):
return result
if (status == ERROR):
raise CommandError(result)
raise CommandException(result)
def has_command(self, node: CommandGraphNode, command: str) -> bool:
cmd_call = node.call('commands')
commands = self.execute(cmd_call, (), {})
return (command in commands)
def has_item(self, node: CommandGraphNode, object_type: str, item: (str | int)) -> bool:
items_call = node.call('items')
(_, items) = self.execute(items_call, (object_type,), {})
return ((items is not None) and (item in items)) |
def check_frame_wise(cur_path, init_dirname, new_dirname):
new_path = os.path.join(cur_path, new_dirname)
pre_path = os.path.join(cur_path, init_dirname)
for dirname in os.listdir(pre_path):
print(dirname)
if (('.' in dirname) or (dirname == 'list_cvt_v1')):
continue
init_class_path = os.path.join(pre_path, dirname)
new_class_path = os.path.join(new_path, dirname)
for file in os.listdir(init_class_path):
if (os.path.splitext(file)[1] != '.avi'):
continue
else:
init_clip_path = os.path.join(init_class_path, file)
new_clip_path = os.path.join(new_class_path, file)
cap = cv2.VideoCapture(init_clip_path)
cap_new = cv2.VideoCapture(new_clip_path)
if (int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) == int(cap_new.get(cv2.CAP_PROP_FRAME_COUNT))):
continue
else:
raise Exception(('Fail to transform %s' % file))
print(('The transformation of %s is successful' % dirname)) |
def autofill(args):
if (not args.task_name):
args.task_name = os.path.basename(os.getcwd())
if (not args.log_file):
if os.path.exists('./exps/logs'):
args.log_file = './exps/logs/{}_at-{}.log'.format(args.task_name, socket.gethostname())
else:
args.log_file = '.{}_at-{}.log'.format(args.task_name, socket.gethostname())
args.model_prefix = os.path.join(args.model_dir, args.task_name)
return args |
def printHelp():
print('{} [OPTIONS] inputJson outputImg'.format(os.path.basename(sys.argv[0])))
print('')
print('Reads labels as polygons in JSON format and converts them to label images,')
print('where each pixel has an ID that represents the ground truth label.')
print('')
print('Options:')
print(' -h Print this help')
print(' -t Use the "trainIDs" instead of the regular mapping. See "labels.py" for details.') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.