code stringlengths 281 23.7M |
|---|
class downResBlock_3x3(nn.Module):
def __init__(self, in_c, out_c, hid_c=None, conv2d=None, norm_layer=None, non_linear=None):
super(downResBlock_3x3, self).__init__()
if (hid_c is None):
hid_c = in_c
if (conv2d is None):
conv2d = nn.Conv2d
if (norm_layer is None):
norm_layer = Identity
if (non_linear is None):
non_linear = nn.LeakyReLU
self.__build_block(in_c, out_c, hid_c, conv2d, norm_layer, non_linear)
def __build_block(self, in_c, out_c, hid_c, conv2d, norm_layer, non_linear):
self.main_path = nn.Sequential(conv2d(in_c, hid_c, kernel_size=3, padding=1, stride=1), norm_layer(hid_c), non_linear(), conv2d(hid_c, out_c, kernel_size=4, padding=1, stride=2))
self.side_path = conv2d(in_c, out_c, kernel_size=4, padding=1, stride=2)
self.output_layer = nn.Sequential(norm_layer(out_c), non_linear())
def forward(self, input_var):
output = self.main_path(input_var)
res_out = self.side_path(input_var)
final_output = self.output_layer((res_out + output))
return final_output |
def main():
parser = build_parser()
args = parser.parse_args()
files_by_sample = collections.defaultdict(list)
for fname in args.files:
samplename = get_file_samplename(fname, strip_rep=True)
files_by_sample[samplename].append(os.path.abspath(fname))
for (samplename, files) in files_by_sample.items():
samples_with_rep = [get_file_samplename(f, strip_rep=False) for f in files]
cmd = f"Rscript {ARCHR_SCRIPT_PATH} -f {','.join(files)} -n {','.join(samples_with_rep)} -o {samplename} -g {args.genome}"
logging.info(f'Command to run: {cmd}')
if (not args.dry):
tokens = shlex.split(cmd)
retval = subprocess.run(tokens, check=True) |
def calculate_coordinates_shell(distance, num_dimensions, distance_step_size):
if (num_dimensions == 1):
return calculate_coordinates_shell_1d(distance)
if (num_dimensions == 2):
return calculate_coordinates_shell_2d(distance, distance_step_size)
if (num_dimensions == 3):
return calculate_coordinates_shell_3d(distance, distance_step_size)
raise ValueError('No valid dimension') |
class TestCLIIntegration(TestCase):
def test_license(self):
output = subprocess.check_output([sys.executable, '-m', 'pip', 'show', 'jsonschema'], stderr=subprocess.STDOUT)
self.assertIn(b'License: MIT', output)
def test_version(self):
version = subprocess.check_output([sys.executable, '-W', 'ignore', '-m', 'jsonschema', '--version'], stderr=subprocess.STDOUT)
version = version.decode('utf-8').strip()
self.assertEqual(version, metadata.version('jsonschema'))
def test_no_arguments_shows_usage_notes(self):
output = subprocess.check_output([sys.executable, '-m', 'jsonschema'], stderr=subprocess.STDOUT)
output_for_help = subprocess.check_output([sys.executable, '-m', 'jsonschema', '--help'], stderr=subprocess.STDOUT)
self.assertEqual(output, output_for_help) |
class Discriminator_VGG_Patch(nn.Module):
def __init__(self, in_nc, base_nf, norm_type='batch', act_type='leakyrelu', mode='CNA'):
super(Discriminator_VGG_Patch, self).__init__()
conv0 = B.conv_block(in_nc, base_nf, kernel_size=3, norm_type=None, act_type=act_type, mode=mode)
conv1 = B.conv_block(base_nf, base_nf, kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type, mode=mode)
conv2 = B.conv_block(base_nf, (base_nf * 2), kernel_size=3, stride=1, norm_type=norm_type, act_type=act_type, mode=mode)
conv3 = B.conv_block((base_nf * 2), (base_nf * 2), kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type, mode=mode)
conv4 = B.conv_block((base_nf * 2), (base_nf * 4), kernel_size=3, stride=1, norm_type=norm_type, act_type=act_type, mode=mode)
conv5 = B.conv_block((base_nf * 4), (base_nf * 4), kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type, mode=mode)
conv6 = B.conv_block((base_nf * 4), (base_nf * 8), kernel_size=3, stride=1, norm_type=norm_type, act_type=act_type, mode=mode)
conv7 = B.conv_block((base_nf * 8), (base_nf * 8), kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type, mode=mode)
conv8 = B.conv_block((base_nf * 8), (base_nf * 8), kernel_size=3, stride=1, norm_type=norm_type, act_type=act_type, mode=mode)
conv9 = B.conv_block((base_nf * 8), (base_nf * 8), kernel_size=4, stride=2, norm_type=norm_type, act_type=act_type, mode=mode)
conv_final = nn.Conv2d((base_nf * 8), 1, kernel_size=3, stride=1, padding=1)
self.features = B.sequential(conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8, conv9, conv_final)
def forward(self, x):
x = self.features(x)
return x |
class TupletMarginLoss(GenericPairLoss):
def __init__(self, margin=5.73, scale=64, **kwargs):
super().__init__(mat_based_loss=False, **kwargs)
c_f.assert_distance_type(self, CosineSimilarity)
self.margin = np.radians(margin)
self.scale = scale
self.add_to_recordable_attributes(list_of_names=['margin', 'scale'], is_stat=False)
self.add_to_recordable_attributes(list_of_names=['avg_pos_angle', 'avg_neg_angle'], is_stat=True)
def _compute_loss(self, pos_pairs, neg_pairs, indices_tuple):
(a1, p, a2, _) = indices_tuple
if ((len(a1) > 0) and (len(a2) > 0)):
pos_angles = torch.acos(pos_pairs)
self.set_stats(pos_angles, neg_pairs)
pos_pairs = torch.cos((pos_angles - self.margin))
pos_pairs = pos_pairs.unsqueeze(1)
neg_pairs = neg_pairs.repeat(pos_pairs.size(0), 1)
inside_exp = (self.scale * (neg_pairs - pos_pairs))
keep_mask = (a2.unsqueeze(0) == a1.unsqueeze(1))
loss = lmu.logsumexp(inside_exp, keep_mask=keep_mask, add_one=True, dim=1)
return {'loss': {'losses': loss, 'indices': (a1, p), 'reduction_type': 'pos_pair'}}
return self.zero_losses()
def get_default_distance(self):
return CosineSimilarity()
def set_stats(self, pos_angles, neg_pairs):
if self.collect_stats:
with torch.no_grad():
neg_angles = torch.acos(neg_pairs)
self.avg_pos_angle = np.degrees(torch.mean(pos_angles).item())
self.avg_neg_angle = np.degrees(torch.mean(neg_angles).item()) |
def extract_args(detector, aligner, in_path, out_path, args=None):
py_exe = sys.executable
_extract_args = ('%s faceswap.py extract -i %s -o %s -D %s -A %s' % (py_exe, in_path, out_path, detector, aligner))
if args:
_extract_args += (' %s' % args)
return _extract_args.split() |
class Configurable():
global_defaults = {}
def __init__(self, **config):
self._variable_defaults = {}
self._user_config = config
def add_defaults(self, defaults):
self._variable_defaults.update(((d[0], copy.copy(d[1])) for d in defaults))
def __getattr__(self, name):
if (name == '_variable_defaults'):
raise AttributeError
(found, value) = self._find_default(name)
if found:
setattr(self, name, value)
return value
else:
cname = self.__class__.__name__
raise AttributeError(('%s has no attribute: %s' % (cname, name)))
def _find_default(self, name):
defaults = self._variable_defaults.copy()
defaults.update(self.global_defaults)
defaults.update(self._user_config)
if (name in defaults):
return (True, defaults[name])
else:
return (False, None) |
def test_user_avatar(api, mock_req):
mock_req({'getUserProfilePhotos': {'ok': True, 'result': {'total_count': 1, 'photos': [[{'file_id': 'aaaaaa', 'width': 50, 'height': 50, 'file_size': 128}, {'file_id': 'bbbbbb', 'width': 25, 'height': 25, 'file_size': 64}]]}}})
user = botogram.objects.User({'id': 123, 'first_name': 'Bob'})
with pytest.raises(RuntimeError):
user.avatar
user = botogram.objects.User({'id': 123, 'first_name': 'Bob'}, api)
assert (not hasattr(user, '_avatar'))
avatar = user.avatar
assert (avatar.file_id == 'aaaaaa')
assert hasattr(user, '_avatar')
assert (user._avatar == avatar) |
_fixtures(WebFixture, FileInputButtonFixture)
def test_file_upload_button(web_fixture, file_input_button_fixture):
fixture = file_input_button_fixture
wsgi_app = web_fixture.new_wsgi_app(child_factory=file_input_button_fixture.FileUploadForm.factory(), enable_js=True)
web_fixture.reahl_server.set_app(wsgi_app)
browser = web_fixture.driver_browser
browser.open('/')
file_to_upload = temp_file_with('some content')
browser.type(XPath.input_labelled('Choose file(s)'), file_to_upload.name)
assert (len(fixture.domain_object.files) == 0)
browser.click(XPath.button_labelled('Submit'))
assert (len(fixture.domain_object.files) == 1) |
def vgg_face_dag(weights_path=None, return_layer='fc8', **kwargs):
model = Vgg_face_dag(return_layer)
if weights_path:
state_dict = torch.load(weights_path, map_location=torch.device('cuda'))
try:
model.load_state_dict(state_dict)
except:
from collections import OrderedDict
odict = OrderedDict()
for k in state_dict:
new_k = k.replace('module.', '')
odict[new_k] = state_dict[k]
model.load_state_dict(odict)
return model |
def initlogging(logfile):
logging.shutdown()
logger = logging.getLogger()
logger.handlers = []
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', filename=logfile, filemode='w')
ch = logging.StreamHandler()
ch.setLevel(logging.CRITICAL)
ch.setFormatter(logging.Formatter('%(message)s'))
logging.getLogger().addHandler(ch) |
def _parse_string(data: str, type_comments: bool=True) -> tuple[(ast.Module, ParserModule)]:
parser_module = get_parser_module(type_comments=type_comments)
try:
parsed = parser_module.parse((data + '\n'), type_comments=type_comments)
except SyntaxError as exc:
if ((exc.args[0] != MISPLACED_TYPE_ANNOTATION_ERROR) or (not type_comments)):
raise
parser_module = get_parser_module(type_comments=False)
parsed = parser_module.parse((data + '\n'), type_comments=False)
return (parsed, parser_module) |
_tokenizer('moses', dataclass=MosesTokenizerConfig)
class MosesTokenizer(object):
def __init__(self, cfg: MosesTokenizerConfig):
self.cfg = cfg
try:
from sacremoses import MosesTokenizer, MosesDetokenizer
self.tok = MosesTokenizer(cfg.source_lang)
self.detok = MosesDetokenizer(cfg.target_lang)
except ImportError:
raise ImportError('Please install Moses tokenizer with: pip install sacremoses')
def encode(self, x: str) -> str:
return self.tok.tokenize(x, aggressive_dash_splits=(not self.cfg.moses_no_dash_splits), return_str=True, escape=(not self.cfg.moses_no_escape))
def decode(self, x: str) -> str:
return self.detok.detokenize(x.split()) |
class GlobalAttention(nn.Module):
def __init__(self, dim, heads=8, dim_head=64, dropout=0.0, k=7):
super().__init__()
inner_dim = (dim_head * heads)
self.heads = heads
self.scale = (dim_head ** (- 0.5))
self.to_q = nn.Conv2d(dim, inner_dim, 1, bias=False)
self.to_kv = nn.Conv2d(dim, (inner_dim * 2), k, stride=k, bias=False)
self.to_out = nn.Sequential(nn.Conv2d(inner_dim, dim, 1), nn.Dropout(dropout))
def forward(self, x):
shape = x.shape
(b, n, _, y, h) = (*shape, self.heads)
(q, k, v) = (self.to_q(x), *self.to_kv(x).chunk(2, dim=1))
(q, k, v) = map((lambda t: rearrange(t, 'b (h d) x y -> (b h) (x y) d', h=h)), (q, k, v))
dots = (einsum('b i d, b j d -> b i j', q, k) * self.scale)
attn = dots.softmax(dim=(- 1))
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h=h, y=y)
return self.to_out(out) |
class ConnectionItem(GraphicsObject):
def __init__(self, source, target=None):
GraphicsObject.__init__(self)
self.setFlags((self.GraphicsItemFlag.ItemIsSelectable | self.GraphicsItemFlag.ItemIsFocusable))
self.source = source
self.target = target
self.length = 0
self.hovered = False
self.path = None
self.shapePath = None
self.style = {'shape': 'line', 'color': (100, 100, 250), 'width': 1.0, 'hoverColor': (150, 150, 250), 'hoverWidth': 1.0, 'selectedColor': (200, 200, 0), 'selectedWidth': 3.0}
self.source.getViewBox().addItem(self)
self.updateLine()
self.setZValue(0)
def close(self):
if (self.scene() is not None):
self.scene().removeItem(self)
def setTarget(self, target):
self.target = target
self.updateLine()
def setStyle(self, **kwds):
self.style.update(kwds)
if ('shape' in kwds):
self.updateLine()
else:
self.update()
def updateLine(self):
start = Point(self.source.connectPoint())
if isinstance(self.target, TerminalGraphicsItem):
stop = Point(self.target.connectPoint())
elif isinstance(self.target, QtCore.QPointF):
stop = Point(self.target)
else:
return
self.prepareGeometryChange()
self.path = self.generatePath(start, stop)
self.shapePath = None
self.update()
def generatePath(self, start, stop):
path = QtGui.QPainterPath()
path.moveTo(start)
if (self.style['shape'] == 'line'):
path.lineTo(stop)
elif (self.style['shape'] == 'cubic'):
path.cubicTo(Point(stop.x(), start.y()), Point(start.x(), stop.y()), Point(stop.x(), stop.y()))
else:
raise Exception(('Invalid shape "%s"; options are "line" or "cubic"' % self.style['shape']))
return path
def keyPressEvent(self, ev):
if (not self.isSelected()):
ev.ignore()
return
if ((ev.key() == QtCore.Qt.Key.Key_Delete) or (ev.key() == QtCore.Qt.Key.Key_Backspace)):
self.source.disconnect(self.target)
ev.accept()
else:
ev.ignore()
def mousePressEvent(self, ev):
ev.ignore()
def mouseClickEvent(self, ev):
if (ev.button() == QtCore.Qt.MouseButton.LeftButton):
ev.accept()
sel = self.isSelected()
self.setSelected(True)
self.setFocus()
if ((not sel) and self.isSelected()):
self.update()
def hoverEvent(self, ev):
if ((not ev.isExit()) and ev.acceptClicks(QtCore.Qt.MouseButton.LeftButton)):
self.hovered = True
else:
self.hovered = False
self.update()
def boundingRect(self):
return self.shape().boundingRect()
def viewRangeChanged(self):
self.shapePath = None
self.prepareGeometryChange()
def shape(self):
if (self.shapePath is None):
if (self.path is None):
return QtGui.QPainterPath()
stroker = QtGui.QPainterPathStroker()
px = self.pixelWidth()
stroker.setWidth((px * 8))
self.shapePath = stroker.createStroke(self.path)
return self.shapePath
def paint(self, p, *args):
if self.isSelected():
p.setPen(fn.mkPen(self.style['selectedColor'], width=self.style['selectedWidth']))
elif self.hovered:
p.setPen(fn.mkPen(self.style['hoverColor'], width=self.style['hoverWidth']))
else:
p.setPen(fn.mkPen(self.style['color'], width=self.style['width']))
p.drawPath(self.path) |
class TestDiverseSiblingsSearch(TestDiverseBeamSearch):
def assertHypoScore(self, hypo, pos_probs, sibling_rank, diversity_rate, normalized=True, lenpen=1.0):
pos_scores = torch.FloatTensor(pos_probs).log()
pos_scores.sub_((torch.Tensor(sibling_rank) * diversity_rate))
self.assertAlmostEqual(hypo['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypo['tokens'].numel())
score = pos_scores.sum()
if normalized:
score /= (pos_scores.numel() ** lenpen)
self.assertLess(abs((score - hypo['score'])), 1e-06)
def test_diverse_beam_search(self):
search_strategy = search.DiverseSiblingsSearch(self.tgt_dict, diversity_rate=0.5)
generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy)
sample = {'net_input': {'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths}}
hypos = generator.forward(sample)
(eos, w1, w2) = (self.eos, self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0], [0, 1, 1], 0.5)
self.assertHypoTokens(hypos[0][1], [w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.9, 0.4, 1.0], [0, 2, 1], 0.5)
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9], [0, 1, 1], 0.5)
self.assertHypoTokens(hypos[1][1], [w1, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.35, 0.9], [0, 2, 1], 0.5) |
def test_setup_show_with_KeyboardInterrupt_in_test(pytester: Pytester) -> None:
p = pytester.makepyfile('\n import pytest\n \n def arg():\n pass\n def test_arg(arg):\n raise KeyboardInterrupt()\n ')
result = pytester.runpytest('--setup-show', p, no_reraise_ctrlc=True)
result.stdout.fnmatch_lines(['*SETUP F arg*', '*test_arg (fixtures used: arg)*', '*TEARDOWN F arg*', '*! KeyboardInterrupt !*', '*= no tests ran in *'])
assert (result.ret == ExitCode.INTERRUPTED) |
def prune_episodes(episodes, scene, metrics, num_good_episodes):
good_episodes = []
for episode in episodes:
episode_full_id = f"{scene}_{episode['episode_id']}"
try:
episode_stats = metrics.loc[episode_full_id]
except KeyError:
continue
if (not math.isclose(episode_stats['episode_success'], 1)):
continue
good_episodes.append(episode)
if (len(good_episodes) == num_good_episodes):
break
assert (len(good_episodes) == num_good_episodes)
return good_episodes |
def convert_classification(base_model_name, hf_config, downstream_dict):
model = WavLMForSequenceClassification.from_pretrained(base_model_name, config=hf_config)
model.projector.weight.data = downstream_dict['projector.weight']
model.projector.bias.data = downstream_dict['projector.bias']
model.classifier.weight.data = downstream_dict['model.post_net.linear.weight']
model.classifier.bias.data = downstream_dict['model.post_net.linear.bias']
return model |
class HacktoberStats(commands.Cog):
linked_accounts = RedisCache()
def __init__(self, bot: Bot):
self.bot = bot
_month(Month.SEPTEMBER, Month.OCTOBER, Month.NOVEMBER)
(name='hacktoberstats', aliases=('hackstats',), invoke_without_command=True)
async def hacktoberstats_group(self, ctx: commands.Context, github_username: (str | None)=None) -> None:
if (not github_username):
(author_id, author_mention) = self._author_mention_from_context(ctx)
if (await self.linked_accounts.contains(author_id)):
github_username = (await self.linked_accounts.get(author_id))
log.info(f"Getting stats for {author_id} linked GitHub account '{github_username}'")
else:
msg = f'''{author_mention}, you have not linked a GitHub account
You can link your GitHub account using:
```
{ctx.prefix}hackstats link github_username
```
Or query GitHub stats directly using:
```
{ctx.prefix}hackstats github_username
```'''
(await ctx.send(msg))
return
(await self.get_stats(ctx, github_username))
_month(Month.SEPTEMBER, Month.OCTOBER, Month.NOVEMBER)
_group.command(name='link')
async def link_user(self, ctx: commands.Context, github_username: (str | None)=None) -> None:
(author_id, author_mention) = self._author_mention_from_context(ctx)
if github_username:
if (await self.linked_accounts.contains(author_id)):
old_username = (await self.linked_accounts.get(author_id))
log.info(f"{author_id} has changed their github link from '{old_username}' to '{github_username}'")
(await ctx.send(f"{author_mention}, your GitHub username has been updated to: '{github_username}'"))
else:
log.info(f"{author_id} has added a github link to '{github_username}'")
(await ctx.send(f'{author_mention}, your GitHub username has been added'))
(await self.linked_accounts.set(author_id, github_username))
else:
log.info(f"{author_id} tried to link a GitHub account but didn't provide a username")
(await ctx.send(f'{author_mention}, a GitHub username is required to link your account'))
_month(Month.SEPTEMBER, Month.OCTOBER, Month.NOVEMBER)
_group.command(name='unlink')
async def unlink_user(self, ctx: commands.Context) -> None:
(author_id, author_mention) = self._author_mention_from_context(ctx)
stored_user = (await self.linked_accounts.pop(author_id, None))
if stored_user:
(await ctx.send(f'{author_mention}, your GitHub profile has been unlinked'))
log.info(f'{author_id} has unlinked their GitHub account')
else:
(await ctx.send(f'{author_mention}, you do not currently have a linked GitHub account'))
log.info(f'{author_id} tried to unlink their GitHub account but no account was linked')
async def get_stats(self, ctx: commands.Context, github_username: str) -> None:
async with ctx.typing():
prs = (await self.get_october_prs(github_username))
if (prs is None):
(await ctx.send(embed=discord.Embed(title=random.choice(NEGATIVE_REPLIES), description=f'GitHub user `{github_username}` was not found.', colour=discord.Colour.red())))
return
if prs:
stats_embed = (await self.build_embed(github_username, prs))
(await ctx.send('Here are some stats!', embed=stats_embed))
else:
(await ctx.send(f"No valid Hacktoberfest PRs found for '{github_username}'"))
async def build_embed(self, github_username: str, prs: list[dict]) -> discord.Embed:
log.info(f"Building Hacktoberfest embed for GitHub user: '{github_username}'")
(in_review, accepted) = (await self._categorize_prs(prs))
n = (len(accepted) + len(in_review))
if (n >= PRS_FOR_SHIRT):
shirtstr = f'**{github_username} is eligible for a T-shirt or a tree!**'
elif (n == (PRS_FOR_SHIRT - 1)):
shirtstr = f'**{github_username} is 1 PR away from a T-shirt or a tree!**'
else:
shirtstr = f'**{github_username} is {(PRS_FOR_SHIRT - n)} PRs away from a T-shirt or a tree!**'
stats_embed = discord.Embed(title=f"{github_username}'s Hacktoberfest", color=Colours.purple, description=f'''{github_username} has made {n} valid {self._contributionator(n)} in October
{shirtstr}
''')
stats_embed.set_thumbnail(url=f'
stats_embed.set_author(name='Hacktoberfest', url=' icon_url='
review_str = (self._build_prs_string(in_review, github_username) or 'None')
accepted_str = (self._build_prs_string(accepted, github_username) or 'None')
stats_embed.add_field(name=':clock1: In Review', value=review_str)
stats_embed.add_field(name=':tada: Accepted', value=accepted_str)
log.info(f"Hacktoberfest PR built for GitHub user '{github_username}'")
return stats_embed
async def get_october_prs(self, github_username: str) -> (list[dict] | None):
'\n Query GitHub\'s API for PRs created during the month of October by github_username.\n\n PRs with an \'invalid\' or \'spam\' label are ignored unless it is merged or approved\n\n For PRs created after October 3rd, they have to be in a repository that has a\n \'hacktoberfest\' topic, unless the PR is labelled \'hacktoberfest-accepted\' for it\n to count.\n\n If PRs are found, return a list of dicts with basic PR information\n\n For each PR:\n {\n "repo_url": str\n "repo_shortname": str (e.g. "python-discord/sir-lancebot")\n "created_at": datetime.datetime\n "number": int\n }\n\n Otherwise, return empty list.\n None will be returned when the GitHub user was not found.\n '
log.info(f"Fetching Hacktoberfest Stats for GitHub user: '{github_username}'")
base_url = '
action_type = 'pr'
is_query = 'public'
not_query = 'draft'
date_range = f'{CURRENT_YEAR}-09-30T10:00Z..{CURRENT_YEAR}-11-01T12:00Z'
per_page = '300'
query_params = f'+type:{action_type}+is:{is_query}+author:{quote_plus(github_username)}+-is:{not_query}+created:{date_range}&per_page={per_page}'
log.debug(f'GitHub query parameters generated: {query_params}')
jsonresp = (await self._fetch_url(base_url, REQUEST_HEADERS, {'q': query_params}))
if ('message' in jsonresp):
api_message = jsonresp['errors'][0]['message']
if (api_message == GITHUB_NONEXISTENT_USER_MESSAGE):
log.debug(f"No GitHub user found named '{github_username}'")
return None
log.error(f"GitHub API request for '{github_username}' failed with message: {api_message}")
return []
if (jsonresp['total_count'] == 0):
log.info(f"No October PRs found for GitHub user: '{github_username}'")
return []
log.info(f"Found {len(jsonresp['items'])} Hacktoberfest PRs for GitHub user: '{github_username}'")
outlist = []
oct3 = datetime(int(CURRENT_YEAR), 10, 3, 23, 59, 59, tzinfo=UTC)
hackto_topics = {}
for item in jsonresp['items']:
shortname = self._get_shortname(item['repository_url'])
itemdict = {'repo_url': f' 'repo_shortname': shortname, 'created_at': datetime.strptime(item['created_at'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=UTC), 'number': item['number']}
if (self._has_label(item, ['invalid', 'spam']) and (not (await self._is_accepted(itemdict)))):
continue
if (itemdict['created_at'] < oct3):
outlist.append(itemdict)
continue
if self._has_label(item, 'hacktoberfest-accepted'):
outlist.append(itemdict)
continue
if hackto_topics.get(shortname):
outlist.append(itemdict)
continue
topics_query_url = f'
log.debug(f'Fetching repo topics for {shortname} with url: {topics_query_url}')
jsonresp2 = (await self._fetch_url(topics_query_url, GITHUB_TOPICS_ACCEPT_HEADER))
if (jsonresp2.get('names') is None):
log.error(f"Error fetching topics for {shortname}: {jsonresp2['message']}")
continue
if ('hacktoberfest' in jsonresp2['names']):
hackto_topics[shortname] = True
outlist.append(itemdict)
return outlist
async def _fetch_url(self, url: str, headers: dict, params: dict) -> dict:
'Retrieve API response from URL.'
async with self.bot. headers=headers, params=params) as resp:
return (await resp.json())
def _has_label(pr: dict, labels: (list[str] | str)) -> bool:
if (not pr.get('labels')):
return False
if (isinstance(labels, str) and any(((label['name'].casefold() == labels) for label in pr['labels']))):
return True
for item in labels:
if any(((label['name'].casefold() == item) for label in pr['labels'])):
return True
return False
async def _is_accepted(self, pr: dict) -> bool:
query_url = f"
jsonresp = (await self._fetch_url(query_url, REQUEST_HEADERS))
if (message := jsonresp.get('message')):
log.error(f'''Error fetching PR stats for #{pr['number']} in repo {pr['repo_shortname']}:
{message}''')
return False
if jsonresp.get('merged'):
return True
if self._has_label(jsonresp, 'hacktoberfest-accepted'):
return True
query_url += '/reviews'
jsonresp2 = (await self._fetch_url(query_url, REQUEST_HEADERS))
if isinstance(jsonresp2, dict):
log.error(f'''Error fetching PR reviews for #{pr['number']} in repo {pr['repo_shortname']}:
{jsonresp2['message']}''')
return False
if (len(jsonresp2) == 0):
return False
return any(((item.get('status') == 'APPROVED') for item in jsonresp2))
def _get_shortname(in_url: str) -> str:
exp = '
return re.findall(exp, in_url)[0]
async def _categorize_prs(self, prs: list[dict]) -> tuple:
now = datetime.now(tz=UTC)
oct3 = datetime(CURRENT_YEAR, 10, 3, 23, 59, 59, tzinfo=UTC)
in_review = []
accepted = []
for pr in prs:
if ((pr['created_at'] + timedelta(REVIEW_DAYS)) > now):
in_review.append(pr)
elif ((pr['created_at'] <= oct3) or (await self._is_accepted(pr))):
accepted.append(pr)
return (in_review, accepted)
def _build_prs_string(prs: list[tuple], user: str) -> str:
base_url = '
str_list = []
repo_list = [pr['repo_shortname'] for pr in prs]
prs_list = Counter(repo_list).most_common(5)
more = (len(prs) - sum((i[1] for i in prs_list)))
for pr in prs_list:
string = f'{pr[1]} to [{pr[0]}]({base_url}{pr[0]}/pulls/{user})'
str_list.append(string)
if more:
str_list.append(f'...and {more} more')
return '\n'.join(str_list)
def _contributionator(n: int) -> str:
if (n == 1):
return 'contribution'
return 'contributions'
def _author_mention_from_context(ctx: commands.Context) -> tuple[(str, str)]:
author_id = str(ctx.author.id)
author_mention = ctx.author.mention
return (author_id, author_mention) |
class ForRange(ForGenerator):
def init(self, start_reg: Value, end_reg: Value, step: int) -> None:
builder = self.builder
self.start_reg = start_reg
self.end_reg = end_reg
self.step = step
self.end_target = builder.maybe_spill(end_reg)
if (is_short_int_rprimitive(start_reg.type) and is_short_int_rprimitive(end_reg.type)):
index_type: RType = short_int_rprimitive
elif is_fixed_width_rtype(end_reg.type):
index_type = end_reg.type
else:
index_type = int_rprimitive
index_reg = Register(index_type)
builder.assign(index_reg, start_reg, (- 1))
self.index_reg = builder.maybe_spill_assignable(index_reg)
self.index_target: (Register | AssignmentTarget) = builder.get_assignment_target(self.index)
builder.assign(self.index_target, builder.read(self.index_reg, self.line), self.line)
def gen_condition(self) -> None:
builder = self.builder
line = self.line
cmp = ('<' if (self.step > 0) else '>')
comparison = builder.binary_op(builder.read(self.index_reg, line), builder.read(self.end_target, line), cmp, line)
builder.add_bool_branch(comparison, self.body_block, self.loop_exit)
def gen_step(self) -> None:
builder = self.builder
line = self.line
if (is_short_int_rprimitive(self.start_reg.type) and is_short_int_rprimitive(self.end_reg.type)):
new_val = builder.int_op(short_int_rprimitive, builder.read(self.index_reg, line), Integer(self.step), IntOp.ADD, line)
else:
new_val = builder.binary_op(builder.read(self.index_reg, line), Integer(self.step), '+', line)
builder.assign(self.index_reg, new_val, line)
builder.assign(self.index_target, new_val, line) |
class TestIPython(unittest.TestCase):
def test_init(self):
try:
from IPython.testing.globalipapp import get_ipython
except ImportError:
import pytest
pytest.skip()
ip = get_ipython()
ip.run_line_magic('load_ext', 'line_profiler')
ip.run_cell(raw_cell='def func():\n return 2**20')
lprof = ip.run_line_magic('lprun', '-r -f func func()')
timings = lprof.get_stats().timings
self.assertEqual(len(timings), 1)
(func_data, lines_data) = next(iter(timings.items()))
print(f'func_data={func_data}')
print(f'lines_data={lines_data}')
self.assertEqual(func_data[1], 1)
self.assertEqual(func_data[2], 'func')
self.assertEqual(len(lines_data), 1)
self.assertEqual(lines_data[0][0], 2)
self.assertEqual(lines_data[0][1], 1) |
class NewWindow(QtWidgets.QMainWindow):
def __init__(self, *args, m=None, title=None, on_close=None, **kwargs):
super().__init__(*args, **kwargs)
self.m = m
self.setWindowTitle('OpenFile')
self.showhelp = False
self.toolbar = ToolBar(title=title, on_close=on_close)
self.addToolBar(self.toolbar)
self.layout = QtWidgets.QVBoxLayout()
self.layout.setContentsMargins(5, 20, 5, 5)
statusBar = QtWidgets.QStatusBar()
self.setStatusBar(statusBar)
widget = QtWidgets.QWidget()
widget.setLayout(self.layout)
self.setCentralWidget(widget)
if (self.m is not None):
self.m.f.canvas.mpl_connect('close_event', self.on_close)
self.setStyleSheet('\n NewWindow{\n border: 1px solid gray;\n }\n ')
_windows_to_close.add(self)
()
def on_close(self, e):
self.close() |
def _do_query(bz, opt, parser):
q = {}
u = opt.from_url
if u:
q = bz.url_to_query(u)
if opt.components_file:
clist = []
f = open(opt.components_file, 'r')
for line in f.readlines():
line = line.rstrip('\n')
clist.append(line)
opt.component = clist
if opt.status:
val = opt.status
stat = val
if (val == 'ALL'):
stat = None
elif (val == 'DEV'):
stat = ['NEW', 'ASSIGNED', 'NEEDINFO', 'ON_DEV', 'MODIFIED', 'POST', 'REOPENED']
elif (val == 'QE'):
stat = ['ASSIGNED', 'ON_QA', 'FAILS_QA', 'PASSES_QA']
elif (val == 'EOL'):
stat = ['VERIFIED', 'RELEASE_PENDING', 'CLOSED']
elif (val == 'OPEN'):
stat = ['NEW', 'ASSIGNED', 'MODIFIED', 'ON_DEV', 'ON_QA', 'VERIFIED', 'RELEASE_PENDING', 'POST']
opt.status = stat
for optname in ['severity', 'id', 'status', 'component', 'priority', 'product', 'version']:
val = getattr(opt, optname, None)
if (not isinstance(val, str)):
continue
setattr(opt, optname, val.split(','))
include_fields = None
if (opt.output in ['raw', 'json']):
include_fields = ['id']
elif opt.outputformat:
include_fields = []
for (fieldname, rest) in format_field_re.findall(opt.outputformat):
if ((fieldname == 'whiteboard') and rest):
fieldname = ((rest + '_') + fieldname)
elif (fieldname == 'flag'):
fieldname = 'flags'
elif (fieldname == 'cve'):
fieldname = ['keywords', 'blocks']
elif (fieldname == '__unicode__'):
fieldname = ['id', 'status', 'assigned_to', 'summary']
flist = ((isinstance(fieldname, list) and fieldname) or [fieldname])
for f in flist:
if (f not in include_fields):
include_fields.append(f)
if (include_fields is not None):
include_fields.sort()
kwopts = {}
if opt.product:
kwopts['product'] = opt.product
if opt.component:
kwopts['component'] = opt.component
if opt.sub_component:
kwopts['sub_component'] = opt.sub_component
if opt.version:
kwopts['version'] = opt.version
if opt.reporter:
kwopts['reporter'] = opt.reporter
if opt.id:
kwopts['bug_id'] = opt.id
if opt.summary:
kwopts['short_desc'] = opt.summary
if opt.comment:
kwopts['long_desc'] = opt.comment
if opt.cc:
kwopts['cc'] = opt.cc
if opt.assigned_to:
kwopts['assigned_to'] = opt.assigned_to
if opt.qa_contact:
kwopts['qa_contact'] = opt.qa_contact
if opt.status:
kwopts['status'] = opt.status
if opt.blocked:
kwopts['blocked'] = opt.blocked
if opt.dependson:
kwopts['dependson'] = opt.dependson
if opt.keywords:
kwopts['keywords'] = opt.keywords
if opt.keywords_type:
kwopts['keywords_type'] = opt.keywords_type
if opt.url:
kwopts['url'] = opt.url
if opt.url_type:
kwopts['url_type'] = opt.url_type
if opt.whiteboard:
kwopts['status_whiteboard'] = opt.whiteboard
if opt.status_whiteboard_type:
kwopts['status_whiteboard_type'] = opt.status_whiteboard_type
if opt.fixed_in:
kwopts['fixed_in'] = opt.fixed_in
if opt.fixed_in_type:
kwopts['fixed_in_type'] = opt.fixed_in_type
if opt.flag:
kwopts['flag'] = opt.flag
if opt.alias:
kwopts['alias'] = opt.alias
if opt.qa_whiteboard:
kwopts['qa_whiteboard'] = opt.qa_whiteboard
if opt.devel_whiteboard:
kwopts['devel_whiteboard'] = opt.devel_whiteboard
if opt.severity:
kwopts['bug_severity'] = opt.severity
if opt.priority:
kwopts['priority'] = opt.priority
if opt.target_release:
kwopts['target_release'] = opt.target_release
if opt.target_milestone:
kwopts['target_milestone'] = opt.target_milestone
if opt.emailtype:
kwopts['emailtype'] = opt.emailtype
if include_fields:
kwopts['include_fields'] = include_fields
if opt.quicksearch:
kwopts['quicksearch'] = opt.quicksearch
if opt.savedsearch:
kwopts['savedsearch'] = opt.savedsearch
if opt.savedsearch_sharer_id:
kwopts['savedsearch_sharer_id'] = opt.savedsearch_sharer_id
if opt.tags:
kwopts['tags'] = opt.tags
built_query = bz.build_query(**kwopts)
if opt.fields:
_merge_field_opts(built_query, opt.fields, parser)
built_query.update(q)
q = built_query
if (not q):
parser.error("'query' command requires additional arguments")
return bz.query(q) |
class SubModules():
def CryptUnprotectData(encrypted_data: bytes, optional_entropy: str=None) -> bytes:
class DATA_BLOB(ctypes.Structure):
_fields_ = [('cbData', ctypes.c_ulong), ('pbData', ctypes.POINTER(ctypes.c_ubyte))]
pDataIn = DATA_BLOB(len(encrypted_data), ctypes.cast(encrypted_data, ctypes.POINTER(ctypes.c_ubyte)))
pDataOut = DATA_BLOB()
pOptionalEntropy = None
if (optional_entropy is not None):
optional_entropy = optional_entropy.encode('utf-16')
pOptionalEntropy = DATA_BLOB(len(optional_entropy), ctypes.cast(optional_entropy, ctypes.POINTER(ctypes.c_ubyte)))
if ctypes.windll.Crypt32.CryptUnprotectData(ctypes.byref(pDataIn), None, (ctypes.byref(pOptionalEntropy) if (pOptionalEntropy is not None) else None), None, None, 0, ctypes.byref(pDataOut)):
data = (ctypes.c_ubyte * pDataOut.cbData)()
ctypes.memmove(data, pDataOut.pbData, pDataOut.cbData)
ctypes.windll.Kernel32.LocalFree(pDataOut.pbData)
return bytes(data)
raise ValueError('Invalid encrypted_data provided!')
def GetKey(FilePath: str) -> bytes:
with open(FilePath, 'r', encoding='utf-8', errors='ignore') as file:
jsonContent: dict = json.load(file)
encryptedKey: str = jsonContent['os_crypt']['encrypted_key']
encryptedKey = base64.b64decode(encryptedKey.encode())[5:]
return SubModules.CryptUnprotectData(encryptedKey)
def Decrpytion(EncrypedValue: bytes, EncryptedKey: bytes) -> str:
try:
version = EncrypedValue.decode(errors='ignore')
if (version.startswith('v10') or version.startswith('v11')):
iv = EncrypedValue[3:15]
password = EncrypedValue[15:]
authentication_tag = password[(- 16):]
password = password[:(- 16)]
backend = default_backend()
cipher = Cipher(algorithms.AES(EncryptedKey), modes.GCM(iv, authentication_tag), backend=backend)
decryptor = cipher.decryptor()
decrypted_password = (decryptor.update(password) + decryptor.finalize())
return decrypted_password.decode('utf-8')
else:
return str(SubModules.CryptUnprotectData(EncrypedValue))
except:
return 'Decryption Error!, Data cant be decrypt'
def create_mutex(mutex_value) -> bool:
kernel32 = ctypes.windll.kernel32
mutex = kernel32.CreateMutexA(None, False, mutex_value)
return (kernel32.GetLastError() != 183)
def IsAdmin() -> bool:
try:
return bool(ctypes.windll.shell32.IsUserAnAdmin())
except:
return False |
def test_signature(workspace):
sig_position = {'line': 10, 'character': 5}
doc = Document(DOC_URI, workspace, DOC)
sig_info = signature.pylsp_signature_help(doc._config, doc, sig_position)
sigs = sig_info['signatures']
assert (len(sigs) == 1)
assert (sigs[0]['label'] == 'main(param1, param2)')
assert (sigs[0]['parameters'][0]['label'] == 'param1')
assert (sigs[0]['parameters'][0]['documentation'] == {'kind': 'markdown', 'value': 'Docs for param1'})
assert (sig_info['activeParameter'] == 0) |
class SendEnterKeyTest(unittest.TestCase):
def setUp(self):
Timings.fast()
self.app = Application()
self.app.start(_notepad_exe())
self.dlg = self.app.UntitledNotepad
self.ctrl = HwndWrapper(self.dlg.Edit.handle)
def tearDown(self):
self.dlg.menu_select('File -> Exit')
try:
self.app.Notepad["Do&n't Save"].click()
except findbestmatch.MatchError:
self.app.kill()
def test_sendEnterChar(self):
self.ctrl.send_chars('Hello{ENTER}World')
self.assertEqual('Hello\r\nWorld', self.dlg.Edit.window_text()) |
def pad_if_smaller(img, size, fill=0):
size = ((size, size) if isinstance(size, int) else size)
(original_width, original_height) = img.size
pad_height = ((size[1] - original_height) if (original_height < size[1]) else 0)
pad_width = ((size[0] - original_width) if (original_width < size[0]) else 0)
img = functional.pad(img, (0, 0, pad_width, pad_height), fill=fill)
return img |
def test_pages_site_not_found(graphql_client):
query = '\n query Page ($hostname: String!, $language: String!) {\n cmsPages(hostname: $hostname, language: $language){\n body {\n ...on TextSection {\n title\n }\n }\n }\n }\n '
response = graphql_client.query(query, variables={'hostname': 'not-found', 'slug': 'hot-tea', 'language': 'en'})
assert (response['data'] == {'cmsPages': []}) |
def process_ground_paras(retrieved='../data/wq_finetuneq_train_10000.txt', save_path='../data/wq_ft_train_matched.txt', raw_data='../data/wq-train.txt', num_workers=40, debug=False, k=10000, match='string'):
retrieved = [json.loads(l) for l in open(retrieved).readlines()]
raw_data = [json.loads(l) for l in open(raw_data).readlines()]
tokenizer = SimpleTokenizer()
recall = []
processes = ProcessPool(processes=num_workers, initializer=init)
process_qa_para_partial = partial(process_qa_para, k=k, match=match)
num_tasks = len(raw_data)
results = []
for _ in tqdm(processes.imap_unordered(process_qa_para_partial, zip(raw_data, retrieved)), total=len(raw_data)):
results.append(_)
topk_covered = [(len(r['matched_paras']) > 0) for r in results]
print(np.mean(topk_covered))
if debug:
return
processes.close()
processes.join()
with open(save_path, 'w') as g:
for _ in results:
g.write((json.dumps(_) + '\n')) |
def add_idol_config(cfg):
cfg.MODEL.IDOL = CN()
cfg.MODEL.IDOL.NUM_CLASSES = 80
cfg.INPUT.SAMPLING_FRAME_NUM = 1
cfg.INPUT.SAMPLING_FRAME_RANGE = 10
cfg.INPUT.SAMPLING_INTERVAL = 1
cfg.INPUT.SAMPLING_FRAME_SHUFFLE = False
cfg.INPUT.AUGMENTATIONS = []
cfg.INPUT.COCO_PRETRAIN = False
cfg.INPUT.PRETRAIN_SAME_CROP = False
cfg.MODEL.IDOL.MASK_WEIGHT = 2.0
cfg.MODEL.IDOL.DICE_WEIGHT = 5.0
cfg.MODEL.IDOL.GIOU_WEIGHT = 2.0
cfg.MODEL.IDOL.L1_WEIGHT = 5.0
cfg.MODEL.IDOL.CLASS_WEIGHT = 2.0
cfg.MODEL.IDOL.REID_WEIGHT = 2.0
cfg.MODEL.IDOL.DEEP_SUPERVISION = True
cfg.MODEL.IDOL.MASK_STRIDE = 4
cfg.MODEL.IDOL.MATCH_STRIDE = 4
cfg.MODEL.IDOL.FOCAL_ALPHA = 0.25
cfg.MODEL.IDOL.SET_COST_CLASS = 2
cfg.MODEL.IDOL.SET_COST_BOX = 5
cfg.MODEL.IDOL.SET_COST_GIOU = 2
cfg.MODEL.IDOL.NHEADS = 8
cfg.MODEL.IDOL.DROPOUT = 0.1
cfg.MODEL.IDOL.DIM_FEEDFORWARD = 1024
cfg.MODEL.IDOL.ENC_LAYERS = 6
cfg.MODEL.IDOL.DEC_LAYERS = 6
cfg.MODEL.IDOL.HIDDEN_DIM = 256
cfg.MODEL.IDOL.NUM_OBJECT_QUERIES = 300
cfg.MODEL.IDOL.DEC_N_POINTS = 4
cfg.MODEL.IDOL.ENC_N_POINTS = 4
cfg.MODEL.IDOL.NUM_FEATURE_LEVELS = 4
cfg.MODEL.IDOL.CLIP_STRIDE = 1
cfg.MODEL.IDOL.MERGE_ON_CPU = True
cfg.MODEL.IDOL.MULTI_CLS_ON = True
cfg.MODEL.IDOL.APPLY_CLS_THRES = 0.05
cfg.MODEL.IDOL.TEMPORAL_SCORE_TYPE = 'mean'
cfg.MODEL.IDOL.INFERENCE_SELECT_THRES = 0.1
cfg.MODEL.IDOL.NMS_PRE = 0.5
cfg.MODEL.IDOL.ADD_NEW_SCORE = 0.2
cfg.MODEL.IDOL.INFERENCE_FW = True
cfg.MODEL.IDOL.INFERENCE_TW = True
cfg.MODEL.IDOL.MEMORY_LEN = 3
cfg.MODEL.IDOL.BATCH_INFER_LEN = 10
cfg.SOLVER.OPTIMIZER = 'ADAMW'
cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1
cfg.MODEL.SWIN = CN()
cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224
cfg.MODEL.SWIN.PATCH_SIZE = 4
cfg.MODEL.SWIN.EMBED_DIM = 96
cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]
cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]
cfg.MODEL.SWIN.WINDOW_SIZE = 7
cfg.MODEL.SWIN.MLP_RATIO = 4.0
cfg.MODEL.SWIN.QKV_BIAS = True
cfg.MODEL.SWIN.QK_SCALE = None
cfg.MODEL.SWIN.DROP_RATE = 0.0
cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0
cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3
cfg.MODEL.SWIN.APE = False
cfg.MODEL.SWIN.PATCH_NORM = True
cfg.MODEL.SWIN.OUT_FEATURES = ['res2', 'res3', 'res4', 'res5']
cfg.MODEL.SWIN.USE_CHECKPOINT = False
cfg.FIND_UNUSED_PARAMETERS = True |
def try_load_beams(data):
try:
from radio_beam import Beam
except ImportError:
warnings.warn('radio_beam is not installed. No beam can be created.', ImportError)
if isinstance(data, fits.BinTableHDU):
if ('BPA' in data.data.names):
beam_table = data.data
return beam_table
else:
raise ValueError('No beam table found')
elif isinstance(data, fits.HDUList):
for (ihdu, hdu_item) in enumerate(data):
if isinstance(hdu_item, (fits.PrimaryHDU, fits.ImageHDU)):
beam = try_load_beams(hdu_item.header)
elif isinstance(hdu_item, fits.BinTableHDU):
if ('BPA' in hdu_item.data.names):
beam_table = hdu_item.data
return beam_table
try:
return beam
except NameError:
raise ValueError('No beam table found')
elif isinstance(data, (fits.PrimaryHDU, fits.ImageHDU)):
return try_load_beams(data.header)
elif isinstance(data, fits.Header):
try:
beam = Beam.from_fits_header(data)
return beam
except Exception as ex:
beam = None
else:
raise ValueError('How did you get here? This is some sort of error.') |
def _get_pixel_navigation_parameters(point, im_nav_params):
obs_time = get_observation_time(point, im_nav_params.static.scan_params)
(attitude, orbit) = interpolate_navigation_prediction(attitude_prediction=im_nav_params.predicted.attitude, orbit_prediction=im_nav_params.predicted.orbit, observation_time=obs_time)
return PixelNavigationParameters(attitude=attitude, orbit=orbit, proj_params=im_nav_params.static.proj_params) |
def test_connect_plain():
class Top(ComponentLevel3):
def construct(s):
s.src = TestSource(Bits32, [4, 3, 2, 1, 4, 3, 2, 1])
s.sink = TestSink(Bits32, [5, 4, 3, 2, 5, 4, 3, 2])
s.wire0 = Wire(32)
def up_from_src():
s.wire0 = (s.src.out + 1)
connect(s.sink.in_, s.wire0)
def done(s):
return (s.src.done() and s.sink.done())
def line_trace(s):
return ((((s.src.line_trace() + ' >>> ') + ('w0=%s' % s.wire0)) + ' >>> ') + s.sink.line_trace())
_test_model(Top) |
def add_object(model_path, rot_mat=((1, 0, 0), (0, 1, 0), (0, 0, 1)), trans_vec=(0, 0, 0), scale=1, name=None):
if model_path.endswith('.obj'):
bpy.ops.import_scene.obj(filepath=model_path, axis_forward='-Z', axis_up='Y')
else:
raise NotImplementedError('Importing model of this type')
obj_list = []
for (i, obj) in enumerate(bpy.context.selected_objects):
if (name is not None):
if (len(bpy.context.selected_objects) == 1):
obj.name = name
else:
obj.name = ((name + '_') + str(i))
trans_4x4 = Matrix.Translation(trans_vec)
rot_4x4 = Matrix(rot_mat).to_4x4()
scale_4x4 = Matrix(np.eye(4))
obj.matrix_world = ((trans_4x4 rot_4x4) scale_4x4)
obj.scale = (scale, scale, scale)
obj_list.append(obj)
if (len(obj_list) == 1):
return obj_list[0]
else:
return obj_list |
class BigBirdConfig(PretrainedConfig):
model_type = 'big_bird'
def __init__(self, vocab_size=50358, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu_new', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=4096, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_cache=True, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, sep_token_id=66, attention_type='block_sparse', use_bias=True, rescale_embeddings=False, block_size=64, num_random_blocks=3, classifier_dropout=None, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, sep_token_id=sep_token_id, **kwargs)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.type_vocab_size = type_vocab_size
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.is_encoder_decoder = is_encoder_decoder
self.rescale_embeddings = rescale_embeddings
self.attention_type = attention_type
self.use_bias = use_bias
self.block_size = block_size
self.num_random_blocks = num_random_blocks
self.classifier_dropout = classifier_dropout |
def test_set(func, qtbot):
throttled = throttle.Throttle(func, DELAY)
throttled.set_delay(DELAY)
throttled('foo')
throttled('foo')
throttled('foo')
throttled('bar')
func.assert_called_once_with('foo')
func.reset_mock()
qtbot.wait(int((1.5 * DELAY)))
func.assert_called_once_with('bar')
func.reset_mock() |
class DCUN_TFC_GPoCM_TDF_Framework(DenseCUNet_GPoCM_Framework):
def __init__(self, n_fft, hop_length, num_frame, spec_type, spec_est_mode, optimizer, lr, auto_lr_schedule, train_loss, val_loss, **kwargs):
valid_kwargs = inspect.signature(DCUN_TFC_GPoCM_TDF.__init__).parameters
tfc_tdf_net_kwargs = dict(((name, kwargs[name]) for name in valid_kwargs if (name in kwargs)))
tfc_tdf_net_kwargs['n_fft'] = n_fft
spec2spec = DCUN_TFC_GPoCM_TDF(**tfc_tdf_net_kwargs)
train_loss_ = get_conditional_loss(train_loss, n_fft, hop_length, **kwargs)
val_loss_ = get_conditional_loss(val_loss, n_fft, hop_length, **kwargs)
super(DCUN_TFC_GPoCM_TDF_Framework, self).__init__(n_fft, hop_length, num_frame, spec_type, spec_est_mode, spec2spec, optimizer, lr, auto_lr_schedule, train_loss_, val_loss_)
valid_kwargs = inspect.signature(DCUN_TFC_GPoCM_TDF_Framework.__init__).parameters
hp = [key for key in valid_kwargs.keys() if (key not in ['self', 'kwargs'])]
hp = (hp + [key for key in kwargs if (not callable(kwargs[key]))])
self.save_hyperparameters(*hp) |
def moving_code_with_imports(project, resource, source):
import_tools = importutils.ImportTools(project)
pymodule = libutils.get_string_module(project, source, resource)
lines = codeanalyze.SourceLinesAdapter(source)
start = 1
while ((start < lines.length()) and lines.get_line(start).startswith('#')):
start += 1
moving_prefix = source[:lines.get_line_start(start)]
pymodule = libutils.get_string_module(project, source[lines.get_line_start(start):], resource)
origin = project.get_pymodule(resource)
imports = [stmt.import_info for stmt in import_tools.module_imports(origin).imports]
back_names = [name for name in origin if (name not in pymodule)]
imports.append(import_tools.get_from_import(resource, back_names))
source = _add_imports_to_module(import_tools, pymodule, imports)
pymodule = libutils.get_string_module(project, source, resource)
source = import_tools.relatives_to_absolutes(pymodule)
pymodule = libutils.get_string_module(project, source, resource)
source = import_tools.organize_imports(pymodule, selfs=False)
pymodule = libutils.get_string_module(project, source, resource)
module_imports = import_tools.module_imports(pymodule)
imports = [import_stmt.import_info for import_stmt in module_imports.imports]
start = 1
if module_imports.imports:
start = module_imports.imports[(- 1)].end_line
lines = codeanalyze.SourceLinesAdapter(source)
while ((start < lines.length()) and (not lines.get_line(start).strip())):
start += 1
moving = (moving_prefix + source[lines.get_line_start(start):])
return (moving, imports) |
def add_image_net_computational_nodes_in_graph(session: tf.compat.v1.Session, logits_name: str, num_classes: int):
with session.graph.as_default():
y_hat = session.graph.get_tensor_by_name(logits_name)
y_hat_argmax = tf.compat.v1.argmax(y_hat, axis=1)
y = tf.compat.v1.placeholder(tf.compat.v1.int64, shape=[None, num_classes], name='labels')
y_argmax = tf.compat.v1.argmax(y, axis=1)
correct_prediction = tf.compat.v1.equal(y_hat_argmax, y_argmax)
top1_acc = tf.compat.v1.reduce_mean(tf.compat.v1.cast(correct_prediction, tf.compat.v1.float32), name='top1-acc')
top5_acc = tf.compat.v1.reduce_mean(tf.compat.v1.cast(tf.compat.v1.nn.in_top_k(predictions=y_hat, targets=tf.compat.v1.cast(y_argmax, tf.compat.v1.int32), k=5), tf.compat.v1.float32), name='top5-acc')
loss = tf.compat.v1.reduce_mean(tf.compat.v1.losses.softmax_cross_entropy(onehot_labels=y, logits=y_hat)) |
def test_get_all_speakers_user_ids(schedule_item_factory, submission_factory, conference_factory, schedule_item_additional_speaker_factory):
schedule_item_1 = schedule_item_factory(type='talk', submission=submission_factory())
schedule_item_2 = schedule_item_factory(type='talk', conference=schedule_item_1.conference, submission=submission_factory())
schedule_item_3 = schedule_item_factory(type='talk', conference=schedule_item_1.conference, submission=submission_factory())
additional_speaker = schedule_item_additional_speaker_factory()
schedule_item_3.additional_speakers.add(additional_speaker)
schedule_item_different_conf = schedule_item_factory(type='talk', conference=conference_factory(), submission=submission_factory())
speaker_ids = speakers_user_ids(schedule_item_1.conference)
assert (len(speaker_ids) == 4)
assert (schedule_item_1.submission.speaker_id in speaker_ids)
assert (schedule_item_2.submission.speaker_id in speaker_ids)
assert (schedule_item_3.submission.speaker_id in speaker_ids)
assert (additional_speaker.user_id in speaker_ids)
assert (schedule_item_different_conf.submission.speaker_id not in speaker_ids) |
def importESI(string):
sMkt = Market.getInstance()
fitobj = Fit()
refobj = json.loads(string)
items = refobj['items']
fitobj.name = refobj['name']
fitobj.notes = refobj['description']
try:
ship = refobj['ship_type_id']
try:
fitobj.ship = Ship(sMkt.getItem(ship))
except ValueError:
fitobj.ship = Citadel(sMkt.getItem(ship))
except (KeyboardInterrupt, SystemExit):
raise
except:
pyfalog.warning('Caught exception in importESI')
return None
items.sort(key=(lambda k: k['flag']))
moduleList = []
for module in items:
try:
item = sMkt.getItem(module['type_id'], eager='group.category')
if (not item.published):
continue
if (module['flag'] == INV_FLAG_DRONEBAY):
d = Drone(item)
d.amount = module['quantity']
fitobj.drones.append(d)
elif (module['flag'] == INV_FLAG_CARGOBAY):
c = Cargo(item)
c.amount = module['quantity']
fitobj.cargo.append(c)
elif (module['flag'] == INV_FLAG_FIGHTER):
fighter = Fighter(item)
fitobj.fighters.append(fighter)
else:
try:
m = Module(item)
except ValueError:
pyfalog.debug("Item can't be added to any slot (unknown item or just charge)")
continue
if (item.category.name == 'Subsystem'):
if m.fits(fitobj):
fitobj.modules.append(m)
else:
if m.isValidState(FittingModuleState.ACTIVE):
m.state = activeStateLimit(m.item)
moduleList.append(m)
except (KeyboardInterrupt, SystemExit):
raise
except:
pyfalog.warning('Could not process module.')
continue
sFit = svcFit.getInstance()
sFit.recalc(fitobj)
sFit.fill(fitobj)
for module in moduleList:
if module.fits(fitobj):
fitobj.modules.append(module)
return fitobj |
class Solution():
def titleToNumber(self, s: str) -> int:
char = {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'H': 8, 'I': 9, 'J': 10, 'K': 11, 'L': 12, 'M': 13, 'N': 14, 'O': 15, 'P': 16, 'Q': 17, 'R': 18, 'S': 19, 'T': 20, 'U': 21, 'V': 22, 'W': 23, 'X': 24, 'Y': 25, 'Z': 26}
length = len(s)
start = 0
res = 0
while (start < length):
res += (char[s[((- 1) - start)]] * (26 ** start))
start += 1
return res |
class nnUNetTrainerDA5Segord0(nnUNetTrainerDA5):
def get_dataloaders(self):
patch_size = self.configuration_manager.patch_size
dim = len(patch_size)
deep_supervision_scales = self._get_deep_supervision_scales()
(rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes) = self.configure_rotation_dummyDA_mirroring_and_inital_patch_size()
tr_transforms = self.get_training_transforms(patch_size, rotation_for_DA, deep_supervision_scales, mirror_axes, do_dummy_2d_data_aug, order_resampling_data=3, order_resampling_seg=0, use_mask_for_norm=self.configuration_manager.use_mask_for_norm, is_cascaded=self.is_cascaded, foreground_labels=self.label_manager.all_labels, regions=(self.label_manager.foreground_regions if self.label_manager.has_regions else None), ignore_label=self.label_manager.ignore_label)
val_transforms = self.get_validation_transforms(deep_supervision_scales, is_cascaded=self.is_cascaded, foreground_labels=self.label_manager.all_labels, regions=(self.label_manager.foreground_regions if self.label_manager.has_regions else None), ignore_label=self.label_manager.ignore_label)
(dl_tr, dl_val) = self.get_plain_dataloaders(initial_patch_size, dim)
allowed_num_processes = get_allowed_n_proc_DA()
if (allowed_num_processes == 0):
mt_gen_train = SingleThreadedAugmenter(dl_tr, tr_transforms)
mt_gen_val = SingleThreadedAugmenter(dl_val, val_transforms)
else:
mt_gen_train = LimitedLenWrapper(self.num_iterations_per_epoch, dl_tr, tr_transforms, allowed_num_processes, 6, None, True, 0.02)
mt_gen_val = LimitedLenWrapper(self.num_val_iterations_per_epoch, dl_val, val_transforms, max(1, (allowed_num_processes // 2)), 3, None, True, 0.02)
return (mt_gen_train, mt_gen_val) |
class VolumeShareSlippageTestCase(WithCreateBarData, WithSimParams, WithDataPortal, ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-05 14:31', tz='utc')
END_DATE = pd.Timestamp('2006-01-05 14:36', tz='utc')
SIM_PARAMS_CAPITAL_BASE = 100000.0
SIM_PARAMS_DATA_FREQUENCY = 'minute'
SIM_PARAMS_EMISSION_RATE = 'daily'
ASSET_FINDER_EQUITY_SIDS = (133,)
ASSET_FINDER_EQUITY_START_DATE = pd.Timestamp('2006-01-05', tz='utc')
ASSET_FINDER_EQUITY_END_DATE = pd.Timestamp('2006-01-07', tz='utc')
minutes = pd.DatetimeIndex(start=START_DATE, end=(END_DATE - pd.Timedelta('1 minute')), freq='1min')
def CREATE_BARDATA_DATA_FREQUENCY(cls):
return cls.sim_params.data_frequency
def make_equity_minute_bar_data(cls):
(yield (133, pd.DataFrame({'open': [3.0], 'high': [3.15], 'low': [2.85], 'close': [3.0], 'volume': [200]}, index=[cls.minutes[0]])))
def make_futures_info(cls):
return pd.DataFrame({'sid': [1000], 'root_symbol': ['CL'], 'symbol': ['CLF06'], 'start_date': [cls.ASSET_FINDER_EQUITY_START_DATE], 'end_date': [cls.ASSET_FINDER_EQUITY_END_DATE], 'multiplier': [500], 'exchange': ['CMES']})
def make_future_minute_bar_data(cls):
(yield (1000, pd.DataFrame({'open': [5.0], 'high': [5.15], 'low': [4.85], 'close': [5.0], 'volume': [100]}, index=[cls.minutes[0]])))
def init_class_fixtures(cls):
super(VolumeShareSlippageTestCase, cls).init_class_fixtures()
cls.ASSET133 = cls.asset_finder.retrieve_asset(133)
cls.ASSET1000 = cls.asset_finder.retrieve_asset(1000)
def test_volume_share_slippage(self):
slippage_model = VolumeShareSlippage()
open_orders = [Order(dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc), amount=100, filled=0, asset=self.ASSET133)]
bar_data = self.create_bardata(simulation_dt_func=(lambda : self.minutes[0]))
orders_txns = list(slippage_model.simulate(bar_data, self.ASSET133, open_orders))
self.assertEquals(len(orders_txns), 1)
(_, txn) = orders_txns[0]
expected_txn = {'price': float(3.0001875), 'dt': datetime.datetime(2006, 1, 5, 14, 31, tzinfo=pytz.utc), 'amount': int(5), 'asset': self.ASSET133, 'type': DATASOURCE_TYPE.TRANSACTION, 'order_id': open_orders[0].id}
self.assertIsNotNone(txn)
self.assertEquals(expected_txn, txn.__dict__)
open_orders = [Order(dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc), amount=100, filled=0, asset=self.ASSET133)]
bar_data = self.create_bardata(simulation_dt_func=(lambda : self.minutes[1]))
orders_txns = list(slippage_model.simulate(bar_data, self.ASSET133, open_orders))
self.assertEquals(len(orders_txns), 0)
def test_volume_share_slippage_with_future(self):
slippage_model = VolumeShareSlippage(volume_limit=1, price_impact=0.3)
open_orders = [Order(dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc), amount=10, filled=0, asset=self.ASSET1000)]
bar_data = self.create_bardata(simulation_dt_func=(lambda : self.minutes[0]))
orders_txns = list(slippage_model.simulate(bar_data, self.ASSET1000, open_orders))
self.assertEquals(len(orders_txns), 1)
(_, txn) = orders_txns[0]
expected_txn = {'price': 5.015, 'dt': datetime.datetime(2006, 1, 5, 14, 31, tzinfo=pytz.utc), 'amount': 10, 'asset': self.ASSET1000, 'type': DATASOURCE_TYPE.TRANSACTION, 'order_id': open_orders[0].id}
self.assertIsNotNone(txn)
self.assertEquals(expected_txn, txn.__dict__) |
def test_main_prefix(fancy_wheel, tmp_path):
destdir = (tmp_path / 'dest')
main([str(fancy_wheel), '-d', str(destdir), '-p', '/foo'], 'python -m installer')
installed_py_files = list(destdir.rglob('*.py'))
for f in installed_py_files:
assert str(f.parent).startswith(str((destdir / 'foo'))), f'path does not respect destdir+prefix: {f}'
assert ({f.stem for f in installed_py_files} == {'__init__', '__main__', 'data'})
installed_pyc_files = destdir.rglob('*.pyc')
assert ({f.name.split('.')[0] for f in installed_pyc_files} == {'__init__', '__main__'}) |
def testHistogramLUTWidget():
pg.mkQApp()
win = QtWidgets.QMainWindow()
win.show()
cw = QtWidgets.QWidget()
win.setCentralWidget(cw)
l = QtWidgets.QGridLayout()
cw.setLayout(l)
l.setSpacing(0)
v = pg.GraphicsView()
vb = pg.ViewBox()
vb.setAspectLocked()
v.setCentralItem(vb)
l.addWidget(v, 0, 0, 3, 1)
w = pg.HistogramLUTWidget(background='w')
l.addWidget(w, 0, 1)
data = pg.gaussianFilter(np.random.normal(size=(256, 256, 3)), (20, 20, 0))
for i in range(32):
for j in range(32):
data[((i * 8), (j * 8))] += 0.1
img = pg.ImageItem(data)
vb.addItem(img)
vb.autoRange()
w.setImageItem(img)
QtWidgets.QApplication.processEvents()
win.close() |
class TestInstallRequires():
def test_setup_install_includes_dependencies(self, tmp_path, mock_index):
project_root = (tmp_path / 'project')
project_root.mkdir(exist_ok=True)
install_root = (tmp_path / 'install')
install_root.mkdir(exist_ok=True)
self.create_project(project_root)
cmd = [sys.executable, '-c', '__import__("setuptools").setup()', 'install', '--install-base', str(install_root), '--install-lib', str(install_root), '--install-headers', str(install_root), '--install-scripts', str(install_root), '--install-data', str(install_root), '--install-purelib', str(install_root), '--install-platlib', str(install_root)]
env = {**os.environ, '__EASYINSTALL_INDEX': mock_index.url}
cp = subprocess.run(cmd, cwd=str(project_root), env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
assert (cp.returncode != 0)
try:
assert ('/does-not-exist/' in {r.path for r in mock_index.requests})
assert next((line for line in cp.stdout.splitlines() if (('not find suitable distribution for' in line) and ('does-not-exist' in line))))
except Exception:
if ('failed to get random numbers' in cp.stdout):
pytest.xfail(f'{sys.platform} failure - {cp.stdout}')
raise
def create_project(self, root):
config = '\n [metadata]\n name = project\n version = 42\n\n [options]\n install_requires = does-not-exist\n py_modules = mod\n '
(root / 'setup.cfg').write_text(DALS(config), encoding='utf-8')
(root / 'mod.py').touch() |
def main(opt: argparse.Namespace) -> None:
utils.set_gpu(opt.gpu)
device = torch.device('cuda')
run_name = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
run_path = os.path.join(opt.output_root, run_name)
print(f'Start training {run_path}')
print(vars(opt))
os.makedirs(run_path, exist_ok=True)
with open(os.path.join(run_path, 'config.json'), 'w') as f:
json.dump(vars(opt), f, indent=4)
train_dataset: Dataset = data.Scan2Cad(opt.scan2cad_file, opt.scannet_path, opt.shapenet_path, 'train', ['train'], rotation=opt.rotation_augmentation, flip=opt.flip_augmentation, jitter=opt.jitter_augmentation, transformation=data.to_occupancy_grid, scan_rep='sdf', load_mask=True, add_negatives=True)
train_dataloader: DataLoader = DataLoader(train_dataset, shuffle=True, batch_size=opt.batch_size, num_workers=opt.num_workers, pin_memory=True)
val_dataset: Dataset = data.Scan2Cad(opt.scan2cad_file, opt.scannet_path, opt.shapenet_path, 'validation', ['validation'], rotation=opt.rotation_augmentation, flip=opt.flip_augmentation, jitter=opt.jitter_augmentation, transformation=data.to_occupancy_grid, scan_rep='sdf', load_mask=True, add_negatives=True)
val_dataloader: DataLoader = DataLoader(val_dataset, shuffle=False, batch_size=opt.batch_size, num_workers=opt.num_workers, pin_memory=True)
separation_model: nn.Module = SeparationNet(ResNetEncoder(1, [16, 32, 64, 128, 512]), ResNetDecoder(1), ResNetDecoder(1))
completion_model: nn.Module = HourGlass(ResNetEncoder(1), ResNetDecoder(1))
triplet_model: nn.Module = TripletNet(ResNetEncoder(1))
separation_model = separation_model.to(device)
completion_model = completion_model.to(device)
triplet_model = triplet_model.to(device)
model_parameters = ((list(separation_model.parameters()) + list(completion_model.parameters())) + list(triplet_model.parameters()))
optimizer = optim.Adam(model_parameters, lr=opt.learning_rate, weight_decay=opt.weight_decay)
criterion_separation = nn.BCEWithLogitsLoss(reduction='none')
criterion_completion = nn.BCEWithLogitsLoss(reduction='none')
criterion_triplet = nn.TripletMarginLoss(reduction='none', margin=opt.triplet_margin)
iteration_number = 0
for epoch in range(opt.num_epochs):
train_dataloader.dataset.regenerate_negatives()
for (_, (scan, cad, negative)) in enumerate(train_dataloader):
utils.stepwise_learning_rate_decay(optimizer, opt.learning_rate, iteration_number, [40000, 80000, 120000])
separation_model.train()
completion_model.train()
triplet_model.train()
losses = forward(scan, cad, negative, separation_model, completion_model, triplet_model, criterion_separation, criterion_completion, criterion_triplet, device)
(loss_foreground, loss_background, loss_completion, loss_triplet) = losses
loss_total = (((loss_foreground + loss_background) + loss_completion) + loss_triplet)
optimizer.zero_grad()
loss_total.backward()
optimizer.step()
if ((iteration_number % opt.log_frequency) == (opt.log_frequency - 1)):
print(f'[E{epoch:04d}, I{iteration_number:05d}] Total: {loss_total: 05.3f}', f' FG: {loss_foreground: 05.3f} BG: {loss_background: 05.3f}', f' Completion: {loss_completion: 05.3f} Triplet: {loss_triplet: 05.3f}')
if ((iteration_number % opt.validate_frequency) == (opt.validate_frequency - 1)):
with torch.no_grad():
separation_model.eval()
completion_model.eval()
triplet_model.eval()
val_losses = defaultdict(list)
for (_, (scan_v, cad_v, negative_v)) in tqdm(enumerate(val_dataloader), total=len(val_dataloader), leave=False):
losses = forward(scan_v, cad_v, negative_v, separation_model, completion_model, triplet_model, criterion_separation, criterion_completion, criterion_triplet, device)
(loss_foreground, loss_background, loss_completion, loss_triplet) = losses
loss_total = (((loss_foreground + loss_background) + loss_completion) + loss_triplet)
val_losses['FG'].append(loss_foreground.item())
val_losses['BG'].append(loss_background.item())
val_losses['Completion'].append(loss_completion.item())
val_losses['Triplet'].append(loss_triplet.item())
val_losses['Total'].append(loss_total.item())
val_losses_summary = {k: torch.mean(torch.tensor(v)) for (k, v) in val_losses.items()}
print(f"-Val [E{epoch:04d}, I{iteration_number:05d}] Total: {val_losses_summary['Total']:05.3f}", f" FG: {val_losses_summary['FG']:05.3f} BG: {val_losses_summary['BG']:05.3f}", f" Completion: {val_losses_summary['Completion']:05.3f}", f" Triplet: {val_losses_summary['Triplet']:05.3f}")
if ((iteration_number % opt.checkpoint_frequency) == (opt.checkpoint_frequency - 1)):
checkpoint_name = f'{run_name}_{iteration_number:05d}'
torch.save(separation_model.state_dict(), os.path.join(run_path, f'{checkpoint_name}_separation.pt'))
torch.save(completion_model.state_dict(), os.path.join(run_path, f'{checkpoint_name}_completion.pt'))
torch.save(triplet_model.state_dict(), os.path.join(run_path, f'{checkpoint_name}_triplet.pt'))
print(f'Saved model at {run_path}/{checkpoint_name}')
iteration_number += 1 |
class Migration(migrations.Migration):
dependencies = [('sponsors', '0094_sponsorship_locked')]
operations = [migrations.AlterModelOptions(name='benefitfeatureconfiguration', options={'base_manager_name': 'non_polymorphic', 'verbose_name': 'Benefit Feature Configuration', 'verbose_name_plural': 'Benefit Feature Configurations'}), migrations.AlterModelManagers(name='benefitfeatureconfiguration', managers=[('non_polymorphic', django.db.models.manager.Manager())]), migrations.AlterModelManagers(name='emailtargetableconfiguration', managers=[('non_polymorphic', django.db.models.manager.Manager()), ('objects', django.db.models.manager.Manager())]), migrations.AlterModelManagers(name='logoplacementconfiguration', managers=[('non_polymorphic', django.db.models.manager.Manager()), ('objects', django.db.models.manager.Manager())]), migrations.AlterModelManagers(name='providedfileassetconfiguration', managers=[('non_polymorphic', django.db.models.manager.Manager()), ('objects', django.db.models.manager.Manager())]), migrations.AlterModelManagers(name='providedtextassetconfiguration', managers=[('non_polymorphic', django.db.models.manager.Manager()), ('objects', django.db.models.manager.Manager())]), migrations.AlterModelManagers(name='requiredimgassetconfiguration', managers=[('non_polymorphic', django.db.models.manager.Manager()), ('objects', django.db.models.manager.Manager())]), migrations.AlterModelManagers(name='requiredresponseassetconfiguration', managers=[('non_polymorphic', django.db.models.manager.Manager()), ('objects', django.db.models.manager.Manager())]), migrations.AlterModelManagers(name='requiredtextassetconfiguration', managers=[('non_polymorphic', django.db.models.manager.Manager()), ('objects', django.db.models.manager.Manager())]), migrations.AlterModelManagers(name='tieredbenefitconfiguration', managers=[('non_polymorphic', django.db.models.manager.Manager()), ('objects', django.db.models.manager.Manager())])] |
class WebhookResponseValidator(BaseWebhookResponseValidator):
def iter_errors(self, request: WebhookRequest, response: Response) -> Iterator[Exception]:
try:
(_, operation, _, _, _) = self._find_path(request)
except PathError as exc:
(yield exc)
return
(yield from self._iter_errors(response.status_code, response.data, response.headers, response.content_type, operation)) |
class SignIn():
async def sign_in(self: 'pyrogram.Client', phone_number: str, phone_code_hash: str, phone_code: str) -> Union[('types.User', 'types.TermsOfService', bool)]:
phone_number = phone_number.strip(' +')
r = (await self.invoke(raw.functions.auth.SignIn(phone_number=phone_number, phone_code_hash=phone_code_hash, phone_code=phone_code)))
if isinstance(r, raw.types.auth.AuthorizationSignUpRequired):
if r.terms_of_service:
return types.TermsOfService._parse(terms_of_service=r.terms_of_service)
return False
else:
(await self.storage.user_id(r.user.id))
(await self.storage.is_bot(False))
return types.User._parse(self, r.user) |
class TestDagMethods(unittest.TestCase):
def test_exists_trek(self):
node_names = ['x1', 'x2', 'x3', 'x4']
nodes = []
for name in node_names:
node = GraphNode(name)
nodes.append(node)
dag = Dag(nodes)
node1 = dag.get_node('x1')
node2 = dag.get_node('x2')
node3 = dag.get_node('x3')
node4 = dag.get_node('x4')
edge = Edge(node2, node1, Endpoint((- 1)), Endpoint(1))
edge2 = Edge(node2, node3, Endpoint((- 1)), Endpoint(1))
edge3 = Edge(node3, node4, Endpoint((- 1)), Endpoint(1))
dag.add_edge(edge)
dag.add_edge(edge2)
dag.add_edge(edge3)
self.assertTrue(dag.exists_trek(node1, node4)) |
def model_with_legacy_bn_layers_is_training_bool(is_training, is_fused):
inputs = tf.keras.Input(shape=(32, 32, 3))
x = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
layer = normalization_layers.BatchNormalization(momentum=0.3, epsilon=0.65, fused=is_fused)
x = layer.apply(x, training=is_training)
x = tf.keras.layers.Conv2D(16, (2, 2))(x)
with tf.compat.v1.variable_scope('foo'):
with tf.compat.v1.variable_scope('bar'):
layer = normalization_layers.BatchNormalization(momentum=0.4, epsilon=0.25, fused=is_fused)
x = layer.apply(x, training=is_training)
x = tf.nn.relu(x)
layer = normalization_layers.BatchNormalization(momentum=0.5, epsilon=0.35, fused=is_fused)
x = layer.apply(x, training=is_training)
x = tf.keras.layers.Flatten()(x)
outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name='keras_model_functional')(x) |
def solve():
problem = Problem()
problem.addVariables(range(1, 21), ['A', 'B', 'C', 'D', 'E'])
problem.addConstraint(SomeInSetConstraint(['A'], 4, True))
problem.addConstraint(SomeInSetConstraint(['B'], 4, True))
problem.addConstraint(SomeInSetConstraint(['C'], 4, True))
problem.addConstraint(SomeInSetConstraint(['D'], 4, True))
problem.addConstraint(SomeInSetConstraint(['E'], 4, True))
for row in range((len(STUDENTDESKS) - 1)):
for col in range((len(STUDENTDESKS[row]) - 1)):
lst = [STUDENTDESKS[row][col], STUDENTDESKS[row][(col + 1)], STUDENTDESKS[(row + 1)][col], STUDENTDESKS[(row + 1)][(col + 1)]]
lst = [x for x in lst if x]
problem.addConstraint(AllDifferentConstraint(), lst)
solutions = problem.getSolution()
return solutions |
def featurize(smi, fingerprint, radius, length) -> Optional[np.ndarray]:
mol = Chem.MolFromSmiles(smi)
if (mol is None):
return None
if (fingerprint == 'morgan'):
fp = rdmd.GetMorganFingerprintAsBitVect(mol, radius=radius, nBits=length, useChirality=True)
elif (fingerprint == 'pair'):
fp = rdmd.GetHashedAtomPairFingerprintAsBitVect(mol, minLength=1, maxLength=(1 + radius), nBits=length)
elif (fingerprint == 'rdkit'):
fp = rdmd.RDKFingerprint(mol, minPath=1, maxPath=(1 + radius), fpSize=length)
elif (fingerprint == 'maccs'):
fp = rdmd.GetMACCSKeysFingerprint(mol)
elif (fingerprint == 'map4'):
fp = map4.MAP4Calculator(dimensions=length, radius=radius, is_folded=True).calculate(mol)
else:
raise NotImplementedError(f'Unrecognized fingerprint: "{fingerprint}"')
X = np.empty(len(fp))
ConvertToNumpyArray(fp, X)
return X |
class inp(SWMMIOFile):
def __init__(self, file_path):
self._options_df = None
self._files_df = None
self._raingages_df = None
self._evaporation_df = None
self._losses_df = None
self._report_df = None
self._conduits_df = None
self._xsections_df = None
self._lid_usage_df = None
self._pollutants_df = None
self._landuses_df = None
self._buildup_df = None
self._washoff_df = None
self._coverages_df = None
self._loadings_df = None
self._pumps_df = None
self._orifices_df = None
self._weirs_df = None
self._junctions_df = None
self._outfalls_df = None
self._storage_df = None
self._dividers_df = None
self._coordinates_df = None
self._dwf_df = None
self._rdii_df = None
self._hydrographs_df = None
self._vertices_df = None
self._polygons_df = None
self._subcatchments_df = None
self._subareas_df = None
self._infiltration_df = None
self._aquifers_df = None
self._groundwater_df = None
self._inp_section_details = None
self._inflows_df = None
self._curves_df = None
self._timeseries_df = None
self._tags_df = None
self._streets_df = None
self._inlets_df = None
self._inlet_usage_df = None
SWMMIOFile.__init__(self, file_path)
self._sections = ['[OPTIONS]', '[FILES]', '[RAINGAGES]', '[EVAPORATION]', '[LOSSES]', '[REPORT]', '[CONDUITS]', '[XSECTIONS]', '[POLLUTANTS]', '[LANDUSES]', '[BUILDUP]', '[WASHOFF]', '[COVERAGES]', '[LOADINGS]', '[PUMPS]', '[ORIFICES]', '[WEIRS]', '[JUNCTIONS]', '[STORAGE]', '[DIVIDERS]', '[OUTFALLS]', '[VERTICES]', '[SUBCATCHMENTS]', '[SUBAREAS]', '[INFILTRATION]', '[AQUIFERS]', '[GROUNDWATER]', '[CURVES]', '[COORDINATES]', '[DWF]', '[RDII]', '[HYDROGRAPHS]', '[INFLOWS]', '[Polygons]', '[TIMESERIES]', '[LID_USAGE]', '[TAGS]', '[STREETS]', '[INLETS]', '[INLET_USAGE]']
def save(self, target_path=None):
from swmmio.utils.modify_model import replace_inp_section
import shutil
if (target_path is not None):
shutil.copyfile(self.path, target_path)
else:
target_path = self.path
for section in self._sections:
sect_id = section.translate({ord(i): None for i in '[]'}).lower()
sect_id_private = '_{}_df'.format(sect_id)
data = getattr(self, sect_id_private)
if (data is not None):
replace_inp_section(target_path, section, data)
def validate(self):
drop_invalid_model_elements(self)
def trim_to_nodes(self, node_ids):
for section in ['junctions', 'storage', 'outfalls', 'coordinates']:
trim_section_to_nodes(self, node_ids, node_type=section)
def headers(self):
if (self._inp_section_details is None):
self._inp_section_details = get_inp_sections_details(self.path, include_brackets=True)
infil_type = self.options.loc[('INFILTRATION', 'Value')]
infil_cols = INFILTRATION_COLS[infil_type]
self._inp_section_details['[INFILTRATION]'] = list(infil_cols)
return self._inp_section_details
def options(self):
if (self._options_df is None):
self._options_df = get_inp_options_df(self.path)
return self._options_df
def options(self, df):
self._options_df = df
infil_type = df.loc[('INFILTRATION', 'Value')]
infil_cols = INFILTRATION_COLS[infil_type]
h = dict(INP_OBJECTS)
h['[INFILTRATION]'] = list(infil_cols)
self._inp_section_details = h
self._infiltration_df = None
def files(self):
if (self._files_df is None):
self._files_df = dataframe_from_inp(self.path, '[FILES]')
return self._files_df.reset_index()
def files(self, df):
first_col = df.columns[0]
self._files_df = df.set_index(first_col)
def raingages(self):
if (self._raingages_df is not None):
return self._raingages_df
self._raingages_df = dataframe_from_inp(self.path, 'raingages')
return self._raingages_df
def raingages(self, df):
self._raingages_df = df
def evaporation(self):
if (self._evaporation_df is not None):
return self._evaporation_df
self._evaporation_df = dataframe_from_inp(self.path, 'evaporation')
return self._evaporation_df
def evaporation(self, df):
self._evaporation_df = df
def losses(self):
if (self._losses_df is not None):
return self._losses_df
self._losses_df = dataframe_from_inp(self.path, 'losses')
return self._losses_df
def losses(self, df):
self._losses_df = df
def report(self):
if (self._report_df is None):
self._report_df = dataframe_from_inp(self.path, 'report')
return self._report_df
def report(self, df):
self._report_df = df
def conduits(self):
if (self._conduits_df is None):
self._conduits_df = dataframe_from_inp(self.path, '[CONDUITS]')
return self._conduits_df
def conduits(self, df):
self._conduits_df = df
def xsections(self):
if (self._xsections_df is None):
self._xsections_df = dataframe_from_inp(self.path, '[XSECTIONS]')
return self._xsections_df
def xsections(self, df):
self._xsections_df = df
def lid_usage(self):
if (self._lid_usage_df is None):
self._lid_usage_df = dataframe_from_inp(self.path, '[LID_USAGE]')
return self._lid_usage_df
_usage.setter
def lid_usage(self, df):
self._lid_usage_df = df
def pollutants(self):
if (self._pollutants_df is not None):
return self._pollutants_df
self._pollutants_df = dataframe_from_inp(self.path, 'pollutants')
return self._pollutants_df
def pollutants(self, df):
self._pollutants_df = df
def landuses(self):
if (self._landuses_df is None):
self._landuses_df = dataframe_from_inp(self.path, 'LANDUSES')
return self._landuses_df
def landuses(self, df):
self._landuses_df = df
def buildup(self):
if (self._buildup_df is None):
self._buildup_df = dataframe_from_inp(self.path, 'BUILDUP')
return self._buildup_df
def buildup(self, df):
self._buildup_df = df
def washoff(self):
if (self._washoff_df is None):
self._washoff_df = dataframe_from_inp(self.path, 'WASHOFF')
return self._washoff_df
def washoff(self, df):
self._washoff_df = df
def coverages(self):
if (self._coverages_df is None):
self._coverages_df = dataframe_from_inp(self.path, 'coverages')
return self._coverages_df
def coverages(self, df):
self._coverages_df = df
def loadings(self):
if (self._loadings_df is None):
self._loadings_df = dataframe_from_inp(self.path, 'loadings')
return self._loadings_df
def loadings(self, df):
self._loadings_df = df
def pumps(self):
if (self._pumps_df is None):
self._pumps_df = dataframe_from_inp(self.path, '[PUMPS]')
return self._pumps_df
def pumps(self, df):
self._pumps_df = df
def orifices(self):
if (self._orifices_df is None):
self._orifices_df = dataframe_from_inp(self.path, '[ORIFICES]')
return self._orifices_df
def orifices(self, df):
self._orifices_df = df
def weirs(self):
if (self._weirs_df is None):
self._weirs_df = dataframe_from_inp(self.path, '[WEIRS]')
return self._weirs_df
def weirs(self, df):
self._weirs_df = df
def junctions(self):
if (self._junctions_df is None):
self._junctions_df = dataframe_from_inp(self.path, 'JUNCTIONS')
return self._junctions_df
def junctions(self, df):
self._junctions_df = df
def outfalls(self):
if (self._outfalls_df is None):
self._outfalls_df = dataframe_from_inp(self.path, '[OUTFALLS]')
return self._outfalls_df
def outfalls(self, df):
self._outfalls_df = df
def storage(self):
if (self._storage_df is None):
self._storage_df = dataframe_from_inp(self.path, '[STORAGE]')
return self._storage_df
def storage(self, df):
self._storage_df = df
def dividers(self):
if (self._dividers_df is None):
self._dividers_df = dataframe_from_inp(self.path, '[DIVIDERS]')
return self._dividers_df
def dividers(self, df):
self._dividers_df = df
def subcatchments(self):
if (self._subcatchments_df is None):
self._subcatchments_df = dataframe_from_inp(self.path, '[SUBCATCHMENTS]')
return self._subcatchments_df
def subcatchments(self, df):
self._subcatchments_df = df
def subareas(self):
if (self._subareas_df is None):
self._subareas_df = dataframe_from_inp(self.path, '[SUBAREAS]')
return self._subareas_df
def subareas(self, df):
self._subareas_df = df
def infiltration(self):
if (self._infiltration_df is None):
self._infiltration_df = dataframe_from_inp(self.path, 'infiltration')
return self._infiltration_df
def infiltration(self, df):
self._infiltration_df = df
def aquifers(self):
if (self._aquifers_df is None):
self._aquifers_df = dataframe_from_inp(self.path, 'aquifers')
return self._aquifers_df
def aquifers(self, df):
self._aquifers_df = df
def groundwater(self):
if (self._groundwater_df is None):
self._groundwater_df = dataframe_from_inp(self.path, 'groundwater')
return self._groundwater_df
def groundwater(self, df):
self._groundwater_df = df
def coordinates(self):
if (self._coordinates_df is not None):
return self._coordinates_df
self._coordinates_df = dataframe_from_inp(self.path, 'COORDINATES')
return self._coordinates_df
def coordinates(self, df):
self._coordinates_df = df
def dwf(self):
if (self._dwf_df is not None):
return self._dwf_df
self._dwf_df = dataframe_from_inp(self.path, 'DWF')
return self._dwf_df
def dwf(self, df):
self._dwf_df = df
def rdii(self):
if (self._rdii_df is None):
self._rdii_df = dataframe_from_inp(self.path, '[RDII]')
return self._rdii_df
def rdii(self, df):
self._rdii_df = df
def hydrographs(self):
if (self._hydrographs_df is None):
self._hydrographs_df = dataframe_from_inp(self.path, 'hydrographs')
return self._hydrographs_df
def hydrographs(self, df):
self._hydrographs_df = df
def vertices(self):
if (self._vertices_df is not None):
return self._vertices_df
self._vertices_df = dataframe_from_inp(self.path, 'VERTICES')
return self._vertices_df
def vertices(self, df):
self._vertices_df = df
def inflows(self):
if (self._inflows_df is not None):
return self._inflows_df
inf = dataframe_from_inp(self.path, 'INFLOWS', quote_replace='_!!!!_')
self._inflows_df = inf.replace('_!!!!_', np.nan)
return self._inflows_df
def inflows(self, df):
self._inflows_df = df
def polygons(self):
if (self._polygons_df is not None):
return self._polygons_df
self._polygons_df = dataframe_from_inp(self.path, '[Polygons]')
return self._polygons_df
def polygons(self, df):
self._polygons_df = df
def curves(self):
if (self._curves_df is not None):
return self._curves_df
self._curves_df = create_dataframe_multi_index(self.path, '[CURVES]')
return self._curves_df
def curves(self, df):
self._curves_df = df
def timeseries(self):
if (self._timeseries_df is not None):
return self._timeseries_df
self._timeseries_df = create_dataframe_multi_index(self.path, '[TIMESERIES]')
return self._timeseries_df
def timeseries(self, df):
self._timeseries_df = df
def tags(self):
if (self._tags_df is None):
self._tags_df = dataframe_from_inp(self.path, '[TAGS]')
return self._tags_df
def tags(self, df):
self._tags_df = df
def streets(self):
if (self._streets_df is None):
self._streets_df = dataframe_from_inp(self.path, '[STREETS]')
return self._streets_df
def streets(self, df):
self._streets_df = df
def inlets(self):
if (self._inlets_df is None):
self._inlets_df = dataframe_from_inp(self.path, '[INLETS]')
return self._inlets_df
def inlets(self, df):
self._inlets_df = df
def inlet_usage(self):
if (self._inlet_usage_df is None):
self._inlet_usage_df = dataframe_from_inp(self.path, '[INLET_USAGE]')
return self._inlet_usage_df
_usage.setter
def inlet_usage(self, df):
self._inlet_usage_df = df |
_optimizer('adafactor')
class FairseqAdafactor(LegacyFairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
print('using adafactor')
self._optimizer = Adafactor(params, **self.optimizer_config)
def add_args(parser):
parser.add_argument('--adafactor-eps', default='(1e-30, 1e-3)', metavar='E', help='epsilons for Adafactor optimizer')
parser.add_argument('--clip-threshold', type=float, default=1.0, metavar='C', help='threshold for clipping update root mean square')
parser.add_argument('--decay-rate', type=float, default=(- 0.8), metavar='D', help='decay rate of the second moment estimator')
parser.add_argument('--beta1', type=float, default=None, metavar='B', help='beta for first moment estimator. Optional')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
parser.add_argument('--scale-parameter', action='store_true', help='scale learning rate by root mean square of parameter')
parser.add_argument('--relative-step', action='store_true', help='set learning rate to inverse square root of timestep,otherwise use external learning rate')
parser.add_argument('--warmup-init', action='store_true', help='use relative step for warm-up learning rate schedule')
def optimizer_config(self):
return {'lr': self.args.lr[0], 'eps': eval(self.args.adafactor_eps), 'clip_threshold': self.args.clip_threshold, 'decay_rate': self.args.decay_rate, 'beta1': self.args.beta1, 'weight_decay': self.args.weight_decay, 'scale_parameter': self.args.scale_parameter, 'relative_step': self.args.relative_step, 'warmup_init': self.args.warmup_init} |
def build_model(cfg, isTrain=True, dataset_num_overwrite=None):
if (dataset_num_overwrite is None):
dataset_num_overwrite = len(cfg.DATASETS.TRAIN)
if isTrain:
model = Generatic_Model(cfg, cfg.INPUT.SIZE_TRAIN[0], cfg.INPUT.SIZE_TRAIN[1], cfg.MODEL.FEATURE_DIM, use_dir=cfg.INPUT.USE_DIR, dataset_num=dataset_num_overwrite, use_rgb=cfg.INPUT.USE_RGB, use_mask=cfg.DATASETS.MASK, use_pointnet=(not cfg.MODEL.NO_FEATURE_ONLY_RGB), use_depth=cfg.MODEL.USE_DEPTH, use_pc_norm=cfg.MODEL.USE_PC_NORM)
pretrain_model = find_pretrain_file(cfg.OUTPUT_DIR, cfg.MODEL.RESUME_EPOCH)
if (pretrain_model is not None):
logger = logging.getLogger('rendering_model.train')
logger.info('Load pretrain model {}.'.format(pretrain_model))
model.load_state_dict(torch.load(os.path.join(cfg.OUTPUT_DIR, pretrain_model), map_location='cpu'))
else:
model = Generatic_Model(cfg, cfg.INPUT.SIZE_TEST[0], cfg.INPUT.SIZE_TEST[1], cfg.MODEL.FEATURE_DIM, use_dir=cfg.INPUT.USE_DIR, dataset_num=dataset_num_overwrite, use_rgb=cfg.INPUT.USE_RGB, use_mask=cfg.DATASETS.MASK, use_pointnet=(not cfg.MODEL.NO_FEATURE_ONLY_RGB), use_depth=cfg.MODEL.USE_DEPTH, use_pc_norm=cfg.MODEL.USE_PC_NORM)
return model |
class TwilioViewTestCase(TestCase):
def setUp(self):
self.regular_caller = G(Caller, phone_number='+', blacklisted=False)
self.blocked_caller = G(Caller, phone_number='+', blacklisted=True)
self.factory = TwilioRequestFactory(token=settings.TWILIO_AUTH_TOKEN, enforce_csrf_checks=True)
self.uris = []
self.str_uri = '/test_app/decorators/str_view/'
self.uris.append(self.str_uri)
self.str_class_uri = '/test_app/decorators/str_class_view/'
self.uris.append(self.str_class_uri)
self.bytes_uri = '/test_app/decorators/bytes_view/'
self.uris.append(self.bytes_uri)
self.bytes_class_uri = '/test_app/decorators/bytes_class_view/'
self.uris.append(self.bytes_class_uri)
self.verb_uri = '/test_app/decorators/verb_view/'
self.uris.append(self.verb_uri)
self.verb_class_uri = '/test_app/decorators/verb_class_view/'
self.uris.append(self.verb_class_uri)
self.response_uri = '/test_app/decorators/response_view/'
self.uris.append(self.response_uri)
self.response_class_uri = '/test_app/decorators/response_class_view/'
self.uris.append(self.response_class_uri)
def _assertStatusCode(self, actual_code, expected_code, uri):
return self.assertEqual(actual_code, expected_code, ('%s != %s. Bad uri is: %s' % (actual_code, expected_code, uri)))
def test_requires_get_or_post(self):
c = Client(enforce_csrf_checks=True)
with override_settings(DEBUG=False):
for uri in self.uris:
self._assertStatusCode(c.get(uri).status_code, 403, uri)
self._assertStatusCode(c.post(uri).status_code, 403, uri)
self._assertStatusCode(c.head(uri).status_code, 405, uri)
self._assertStatusCode(c.options(uri).status_code, 405, uri)
self._assertStatusCode(c.put(uri).status_code, 405, uri)
self._assertStatusCode(c.delete(uri).status_code, 405, uri)
def test_all_return_statuses_when_debug_true(self):
c = Client(enforce_csrf_checks=True)
with override_settings(DEBUG=True):
for uri in self.uris:
self._assertStatusCode(c.get(uri).status_code, 200, uri)
self._assertStatusCode(c.post(uri).status_code, 200, uri)
self._assertStatusCode(c.head(uri).status_code, 200, uri)
self._assertStatusCode(c.options(uri).status_code, 200, uri)
if uri.endswith('class_view/'):
self._assertStatusCode(c.put(uri).status_code, 405, uri)
self._assertStatusCode(c.delete(uri).status_code, 405, uri)
else:
self._assertStatusCode(c.put(uri).status_code, 200, uri)
self._assertStatusCode(c.delete(uri).status_code, 200, uri)
def test_allows_post(self):
request = self.factory.post(self.str_uri)
self.assertEqual(str_view(request).status_code, 200)
def test_allows_get(self):
request = self.factory.get(self.str_uri)
self.assertEqual(str_view(request).status_code, 200)
def test_class_view_allows_post(self):
request = self.factory.post(self.str_class_uri)
self.assertEqual(StrView.as_view()(request).status_code, 200)
def test_decorator_preserves_metadata(self):
self.assertEqual(str_view.__name__, 'str_view')
def test_class_decorator_preserves_metadata(self):
self.assertEqual(StrView.dispatch.__name__, 'dispatch')
_settings(TWILIO_ACCOUNT_SID=None)
_settings(TWILIO_AUTH_TOKEN=None)
def test_missing_settings_return_forbidden(self):
with override_settings(DEBUG=False):
self.assertEqual(self.client.post(self.str_uri).status_code, 403)
self.assertEqual(self.client.post(self.str_class_uri).status_code, 403)
with override_settings(DEBUG=True):
self.assertEqual(self.client.post(self.str_uri).status_code, 200)
self.assertEqual(self.client.post(self.str_class_uri).status_code, 200)
def test_missing_signature_returns_forbidden(self):
with override_settings(DEBUG=False):
self.assertEqual(self.client.post(self.str_uri).status_code, 403)
self.assertEqual(self.client.post(self.str_class_uri).status_code, 403)
with override_settings(DEBUG=True):
self.assertEqual(self.client.post(self.str_uri).status_code, 200)
self.assertEqual(self.client.post(self.str_class_uri).status_code, 200)
def test_incorrect_signature_returns_forbidden(self):
with override_settings(DEBUG=False):
request = self.factory.post(self.str_uri, HTTP_X_TWILIO_SIGNATURE='fake_signature')
self.assertEqual(str_view(request).status_code, 403)
with override_settings(DEBUG=True):
self.assertEqual(str_view(request).status_code, 200)
with override_settings(DEBUG=False):
request = self.factory.get(self.str_uri, HTTP_X_TWILIO_SIGNATURE='fake_signature')
self.assertEqual(str_view(request).status_code, 403)
with override_settings(DEBUG=True):
self.assertEqual(str_view(request).status_code, 200)
def test_incorrect_signature_returns_forbidden_class_view(self):
with override_settings(DEBUG=False):
request = self.factory.post(self.str_class_uri, HTTP_X_TWILIO_SIGNATURE='fake_signature')
self.assertEqual(StrView.as_view()(request).status_code, 403)
with override_settings(DEBUG=True):
self.assertEqual(StrView.as_view()(request).status_code, 200)
with override_settings(DEBUG=False):
request = self.factory.get(self.str_class_uri, HTTP_X_TWILIO_SIGNATURE='fake_signature')
self.assertEqual(StrView.as_view()(request).status_code, 403)
with override_settings(DEBUG=True):
self.assertEqual(StrView.as_view()(request).status_code, 200)
def test_no_from_field(self):
request = self.factory.post(self.str_uri)
self.assertEqual(str_view(request).status_code, 200)
def test_no_form_field_class_view(self):
request = self.factory.post(self.str_class_uri)
self.assertEqual(StrView.as_view()(request).status_code, 200)
def test_from_field_no_caller(self):
request = self.factory.post(self.str_uri, {'From': '+'})
self.assertEqual(str_view(request).status_code, 200)
def tst_form_field_no_caller_class_view(self):
request = self.factory.post(self.str_class_uri, {'From': '+'})
self.assertEqual(StrView.as_view()(request).status_code, 200)
def test_blacklist_works(self):
with override_settings(DEBUG=False):
request = self.factory.post(self.str_uri, {'From': str(self.blocked_caller.phone_number)})
response = str_view(request)
r = Message()
self.assertEqual(response.content, str(r).encode('utf-8'))
with override_settings(DEBUG=True):
request = self.factory.post(self.str_uri, {'From': str(self.blocked_caller.phone_number)})
response = str_view(request)
r = Message()
self.assertEqual(response.content, str(r).encode('utf-8'))
with override_settings(DEBUG=False):
request = self.factory.post(self.verb_uri, {'From': str(self.blocked_caller.phone_number), 'callsid': 'some-call-sid'})
response = verb_view(request)
r = VoiceResponse()
r.reject()
self.assertEqual(response.content, str(r).encode('utf-8'))
with override_settings(DEBUG=True):
request = self.factory.post(self.verb_uri, {'From': str(self.blocked_caller.phone_number), 'callsid': 'some-call-sid'})
response = verb_view(request)
r = VoiceResponse()
r.reject()
self.assertEqual(response.content, str(r).encode('utf-8'))
def test_black_list_works_class_view(self):
with override_settings(DEBUG=False):
request = self.factory.post(self.str_class_uri, {'From': str(self.blocked_caller.phone_number)})
response = StrView.as_view()(request)
r = Message()
self.assertEqual(response.content, str(r).encode('utf-8'))
with override_settings(DEBUG=True):
request = self.factory.post(self.str_class_uri, {'From': str(self.blocked_caller.phone_number)})
response = StrView.as_view()(request)
r = Message()
self.assertEqual(response.content, str(r).encode('utf-8'))
def test_black_list_works_verb_class_view(self):
with override_settings(DEBUG=False):
request = self.factory.post(self.verb_class_uri, {'From': str(self.blocked_caller.phone_number), 'callsid': 'some-call-sid'})
response = VerbView.as_view()(request)
r = VoiceResponse()
r.reject()
self.assertEqual(response.content, str(r).encode('utf-8'))
with override_settings(DEBUG=True):
request = self.factory.post(self.verb_class_uri, {'From': str(self.blocked_caller.phone_number), 'callsid': 'some-call-sid'})
response = VerbView.as_view()(request)
r = VoiceResponse()
r.reject()
self.assertEqual(response.content, str(r).encode('utf-8'))
def test_decorator_modifies_str(self):
request = self.factory.post(self.str_uri)
self.assertIsInstance(str_view(request), HttpResponse)
def test_decorator_modifies_str_class_view(self):
request = self.factory.post(self.str_class_uri)
self.assertIsInstance(StrView.as_view()(request), HttpResponse)
def test_decorator_modifies_bytes(self):
request = self.factory.post(self.bytes_uri)
self.assertIsInstance(bytes_view(request), HttpResponse)
def test_decorator_modifies_bytes_class_view(self):
request = self.factory.post(self.bytes_class_uri)
self.assertIsInstance(BytesView.as_view()(request), HttpResponse)
def test_decorator_modifies_verb(self):
request = self.factory.post(self.verb_uri)
self.assertIsInstance(verb_view(request), HttpResponse)
def test_decorator_modifies_verb_class_view(self):
request = self.factory.post(self.verb_class_uri)
self.assertIsInstance(VerbView.as_view()(request), HttpResponse)
def test_decorator_preserves_
request = self.factory.post(self.response_uri)
self.assertIsInstance(response_view(request), HttpResponse)
def test_decorator_preserves_
request = self.factory.post(self.response_class_uri)
self.assertIsInstance(ResponseView.as_view()(request), HttpResponse)
def test_override_forgery_protection_off_debug_off(self):
with override_settings(DJANGO_TWILIO_FORGERY_PROTECTION=False, DEBUG=False):
request = self.factory.post(self.str_uri, HTTP_X_TWILIO_SIGNATURE='fake_signature')
self.assertEqual(str_view(request).status_code, 200)
def test_override_forgery_protection_off_debug_off_class_view(self):
with override_settings(DJANGO_TWILIO_FORGERY_PROTECTION=False, DEBUG=False):
request = self.factory.post(self.str_class_uri, HTTP_X_TWILIO_SIGNATURE='fake_signature')
self.assertEqual(StrView.as_view()(request).status_code, 200)
def test_override_forgery_protection_off_debug_on(self):
with override_settings(DJANGO_TWILIO_FORGERY_PROTECTION=False, DEBUG=True):
request = self.factory.post(self.str_uri, HTTP_X_TWILIO_SIGNATURE='fake_signature')
self.assertEqual(str_view(request).status_code, 200)
def test_override_forgery_protection_off_debug_on_class_view(self):
with override_settings(DJANGO_TWILIO_FORGERY_PROTECTION=False, DEBUG=True):
request = self.factory.post(self.str_class_uri, HTTP_X_TWILIO_SIGNATURE='fake_signature')
self.assertEqual(StrView.as_view()(request).status_code, 200)
def test_override_forgery_protection_on_debug_off(self):
with override_settings(DJANGO_TWILIO_FORGERY_PROTECTION=True, DEBUG=False):
request = self.factory.post(self.str_uri, HTTP_X_TWILIO_SIGNATURE='fake_signature')
self.assertEqual(str_view(request).status_code, 403)
def test_override_forgery_protection_on_debug_off_class_view(self):
with override_settings(DJANGO_TWILIO_FORGERY_PROTECTION=True, DEBUG=False):
request = self.factory.post(self.str_class_uri, HTTP_X_TWILIO_SIGNATURE='fake_signature')
self.assertEqual(StrView.as_view()(request).status_code, 403)
def test_override_forgery_protection_on_debug_on(self):
with override_settings(DJANGO_TWILIO_FORGERY_PROTECTION=True, DEBUG=True):
request = self.factory.post(self.str_uri, HTTP_X_TWILIO_SIGNATURE='fake_signature')
self.assertEqual(str_view(request).status_code, 403)
def test_override_forgery_protection_on_debug_on_class_view(self):
with override_settings(DJANGO_TWILIO_FORGERY_PROTECTION=True, DEBUG=True):
request = self.factory.post(self.str_class_uri, HTTP_X_TWILIO_SIGNATURE='fake_signature')
self.assertEqual(StrView.as_view()(request).status_code, 403) |
class DependenciesModel(QtCore.QAbstractTableModel):
_headers = ('Dependency', 'Version', 'License')
def __init__(self, parent):
super().__init__(parent)
self._packages = [(dist.name, dist.version, _get_license(dist)) for dist in importlib.metadata.distributions()]
def columnCount(self, parent: QtCore.QModelIndex=...) -> int:
return len(self._headers)
def headerData(self, section: int, orientation: Qt.Orientation, role: int=...) -> Any:
if (role != Qt.DisplayRole):
return None
if (orientation != Qt.Horizontal):
return section
return self._headers[section]
def rowCount(self, parent: QtCore.QModelIndex=...) -> int:
return len(self._packages)
def data(self, index: QtCore.QModelIndex, role: int=...) -> Any:
if (role not in {Qt.DisplayRole, Qt.EditRole}):
return None
if (index.row() < len(self._packages)):
return self._packages[index.row()][index.column()]
else:
return '' |
def validate(val_list, model, criterion):
print('begin test')
test_loader = torch.utils.data.DataLoader(dataset.listDataset(val_list, shuffle=False, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]), train=False), batch_size=args.batch_size)
model.eval()
mae = 0
for (i, (img, target)) in enumerate(test_loader):
img = img.cuda()
img = Variable(img)
output = model(img)
mae += abs((output.data.sum() - target.sum().type(torch.FloatTensor).cuda()))
mae = (mae / len(test_loader))
print(' * MAE {mae:.3f} '.format(mae=mae))
return mae |
class _BaseAutoModelClass():
_model_mapping = None
def __init__(self, *args, **kwargs):
raise EnvironmentError(f'{self.__class__.__name__} is designed to be instantiated using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or `{self.__class__.__name__}.from_config(config)` methods.')
def from_config(cls, config, **kwargs):
trust_remote_code = kwargs.pop('trust_remote_code', False)
if (hasattr(config, 'auto_map') and (cls.__name__ in config.auto_map)):
if (not trust_remote_code):
raise ValueError('Loading this model requires you to execute the modeling file in that repo on your local machine. Make sure you have read the code there to avoid malicious use, then set the option `trust_remote_code=True` to remove this error.')
if (kwargs.get('revision', None) is None):
logger.warning('Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure no malicious code has been contributed in a newer revision.')
class_ref = config.auto_map[cls.__name__]
(module_file, class_name) = class_ref.split('.')
model_class = get_class_from_dynamic_module(config.name_or_path, (module_file + '.py'), class_name, **kwargs)
return model_class._from_config(config, **kwargs)
elif (type(config) in cls._model_mapping.keys()):
model_class = _get_model_class(config, cls._model_mapping)
return model_class._from_config(config, **kwargs)
raise ValueError(f'''Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.
Model type should be one of {', '.join((c.__name__ for c in cls._model_mapping.keys()))}.''')
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop('config', None)
trust_remote_code = kwargs.pop('trust_remote_code', False)
kwargs['_from_auto'] = True
if (not isinstance(config, PretrainedConfig)):
(config, kwargs) = AutoConfig.from_pretrained(pretrained_model_name_or_path, return_unused_kwargs=True, trust_remote_code=trust_remote_code, **kwargs)
if (hasattr(config, 'auto_map') and (cls.__name__ in config.auto_map)):
if (not trust_remote_code):
raise ValueError(f'Loading {pretrained_model_name_or_path} requires you to execute the modeling file in that repo on your local machine. Make sure you have read the code there to avoid malicious use, then set the option `trust_remote_code=True` to remove this error.')
if (kwargs.get('revision', None) is None):
logger.warning('Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure no malicious code has been contributed in a newer revision.')
class_ref = config.auto_map[cls.__name__]
(module_file, class_name) = class_ref.split('.')
model_class = get_class_from_dynamic_module(pretrained_model_name_or_path, (module_file + '.py'), class_name, **kwargs)
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
elif (type(config) in cls._model_mapping.keys()):
model_class = _get_model_class(config, cls._model_mapping)
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(f'''Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.
Model type should be one of {', '.join((c.__name__ for c in cls._model_mapping.keys()))}.''')
def register(cls, config_class, model_class):
if (hasattr(model_class, 'config_class') and (model_class.config_class != config_class)):
raise ValueError(f'The model class you are passing has a `config_class` attribute that is not consistent with the config class you passed (model has {model_class.config_class} and you passed {config_class}. Fix one of those so they match!')
cls._model_mapping.register(config_class, model_class) |
def override_json(args, json_path, check_consistency=False):
json_params = json.load(open(json_path))
params = vars(args)
if check_consistency:
missing_keys = []
for key in json_params:
if (key not in params):
missing_keys.append(key)
assert (not missing_keys), ('Following keys not in args: ' + str(missing_keys))
missing_keys = []
for key in params:
if ((key not in json_params) and (key != 'config_path')):
missing_keys.append(key)
assert (not missing_keys), ('Following keys not in JSON: ' + str(missing_keys))
json_params.update({k: params[k] for k in params if (params[k] is not None)})
for k in [k for k in params if ((params[k] is None) and (not (k in json_params)))]:
json_params[k] = None
params = json_params
args = Arguments(params)
return args |
def test_json_skipped_dep(vuln_data_skipped_dep):
json_format = format.JsonFormat(False)
expected_json = {'dependencies': [{'name': 'foo', 'version': '1.0', 'vulns': [{'id': 'VULN-0', 'fix_versions': ['1.1', '1.4']}]}, {'name': 'bar', 'skip_reason': 'skip-reason'}], 'fixes': []}
assert (json_format.format(vuln_data_skipped_dep, list()) == json.dumps(expected_json)) |
class GumballMachine():
soldOutState: State
noQuarterState: State
hasQuarterState: State
soldState: State
winnerState: State
state: State = SoldOutState
count: int = 0
def __init__(self, numberGumballs: int):
self.soldOutState = SoldOutState(self)
self.noQuarterState = NoQuarterState(self)
self.hasQuarterState = HasQuarterState(self)
self.soldState = SoldState(self)
self.winnerState = WinnerState(self)
self.count = numberGumballs
if (numberGumballs > 0):
self.state = self.noQuarterState
def insertQuarter(self) -> None:
self.state.insertQuarter()
def ejectQuarter(self) -> None:
self.state.ejectQuarter()
def turnCrank(self) -> None:
self.state.turnCrank()
self.state.dispense()
def setState(self, state: State) -> None:
self.state = state
def releaseBall(self) -> None:
print('A gumball comes rolling out the slot...')
if (self.count > 0):
self.count -= 1
def getCount(self) -> int:
return self.count
def refill(self, count: int) -> None:
self.count += count
print(f'The gumball machine was just refilled; its new count is: {self.count}')
self.state.refill()
def getState(self) -> State:
return self.state
def getSoldOutState(self) -> State:
return self.soldOutState
def getNoQuarterState(self) -> State:
return self.noQuarterState
def getHasQuarterState(self) -> State:
return self.hasQuarterState
def getSoldState(self) -> State:
return self.soldState
def getWinnerState(self) -> State:
return self.winnerState
def __str__(self) -> str:
result: StringBuffer = StringBuffer()
result.append('\nMighty Gumball, Inc.')
result.append('\nJava-enabled Standing Gumball Model #2004')
result.append(f'''
Inventory: {self.count} gumball''')
if (self.count != 1):
result.append('s')
result.append('\n')
result.append(f'''Machine is {self.state}
''')
return str(result)
def __repr__(self) -> str:
return str(self) |
def _apply_min_max(df: pd.DataFrame, old_min: ((int | float) | pd.Series), old_max: ((int | float) | pd.Series), new_min: ((int | float) | pd.Series), new_max: ((int | float) | pd.Series)) -> pd.DataFrame:
old_range = (old_max - old_min)
new_range = (new_max - new_min)
return ((((df - old_min) * new_range) / old_range) + new_min) |
class Timer():
def __init__(self, name='task', verbose=True):
self.name = name
self.verbose = verbose
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.verbose:
print('[Time] {} consumes {:.4f} s'.format(self.name, (time.time() - self.start)))
return (exc_type is None) |
class PairAccumulator(Accumulator):
def __init__(self):
super().__init__()
self._labels = []
self._pairs = []
self._subgroups = []
self._accumulated_size = 0
def state(self) -> Dict[(str, torch.Tensor)]:
state = super().state
state.update({'labels': self.labels, 'pairs': self.pairs, 'subgroups': self._subgroups})
return state
def update(self, embeddings: torch.Tensor, labels: torch.Tensor, pairs: torch.LongTensor, subgroups: torch.Tensor, device=None):
device = (device if device else embeddings.device)
embeddings = embeddings.detach().to(device)
labels = labels.detach().to(device)
pairs = pairs.detach().to(device)
subgroups = subgroups.detach().to(device)
self._embeddings.append(embeddings)
self._labels.append(labels)
self._pairs.append((pairs + self._accumulated_size))
self._subgroups.append(subgroups)
self._accumulated_size += embeddings.shape[0]
def reset(self):
super().reset()
self._labels = []
self._pairs = []
self._subgroups = []
self._accumulated_size = 0
def labels(self):
return (torch.cat(self._labels) if len(self._labels) else torch.Tensor())
def subgroups(self):
return (torch.cat(self._subgroups) if len(self._subgroups) else torch.Tensor())
def pairs(self) -> torch.LongTensor:
return (torch.cat(self._pairs) if len(self._pairs) else torch.LongTensor()) |
def zenodo_api_with_helpful_fallback(url, method, **kwargs):
hostname = urllib.parse.urlparse(url).hostname
access_token = get_zenodo_access_token(hostname)
kwargs['params'] = {'access_token': access_token}
r = getattr(requests, method)(url, **kwargs)
if (r.status_code == 401):
print('The access token you provided is invalid.\n')
keyring.delete_password('Zenodo', hostname)
return zenodo_api_with_helpful_fallback(url, **kwargs)
if (r.status_code == 403):
print("The access token you provided doesn't appear to have the right scopes. Make sure that the access token you provide has the scopes `deposit:actions`, `deposit:write`, and `user:email`.\n")
keyring.delete_password('Zenodo', hostname)
return zenodo_api_with_helpful_fallback(url, **kwargs)
return r |
def counting_context_manager():
nitems = 50
with progress.task('counting (context manager)', nitems, logger=logger) as task:
for iitem in range(nitems):
if (iitem > (nitems // 2)):
message = 'over half already done!'
else:
message = None
task.update(iitem, message)
sleep(0.1) |
class ResourceBaseDeleteView(LoginRequiredMixin, ResourceBaseContextMixin, DeleteView):
context_object_file = 'object'
template_name = 'base/confirm_delete.html'
def dispatch(self, request, *args, **kwargs):
object = self.get_object()
user = self.request.user
if (not check_resources_access(user, object)):
url_name = ('%s_list' % self.resource_name_url_base)
return HttpResponseRedirect(reverse(url_name))
return super().dispatch(request, *args, **kwargs)
def get_success_url(self):
url_name = ('%s_list' % self.resource_name_url_base)
return reverse_lazy(url_name) |
class StateWrapper(object):
def __init__(self, state, workflow):
self.state = state
self.workflow = workflow
for st in workflow.states:
setattr(self, ('is_%s' % st.name), (st.name == self.state.name))
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.state == other.state)
elif isinstance(other, State):
return (self.state == other)
elif is_string(other):
return (self.state.name == other)
else:
return NotImplemented
def __ne__(self, other):
return (not (self == other))
def __str__(self):
return self.state.name
def __repr__(self):
return ('<%s: %r>' % (self.__class__.__name__, self.state))
def __getattr__(self, attr):
if (attr == 'state'):
raise AttributeError(('Trying to access attribute %s of a non-initialized %r object!' % (attr, self.__class__)))
else:
return getattr(self.state, attr)
if (not is_python3):
def __unicode__(self):
return u(str(self))
def __hash__(self):
return hash(self.state.name)
def transitions(self):
return self.workflow.transitions.available_from(self.state) |
def test_unsupported_not_forwarded() -> None:
class FakeFile(io.RawIOBase):
def unsupported_attr(self) -> None:
pass
async_file = trio.wrap_file(FakeFile())
assert hasattr(async_file.wrapped, 'unsupported_attr')
with pytest.raises(AttributeError):
async_file.unsupported_attr |
class PotentialPair1plus1D(BasePotentialPair):
def __init__(self, param):
super().__init__(param)
def set_boundary_conditions(self, variables):
phi_s_cn = variables['Negative current collector potential [V]']
phi_s_cp = variables['Positive current collector potential [V]']
param = self.param
applied_current_density = variables['Total current density [A.m-2]']
total_current = (applied_current_density * param.A_cc)
positive_tab_area = (param.L_y * param.p.L_cc)
pos_tab_bc = ((- total_current) / (param.p.sigma_cc * positive_tab_area))
self.boundary_conditions = {phi_s_cn: {'negative tab': (pybamm.Scalar(0), 'Dirichlet'), 'no tab': (pybamm.Scalar(0), 'Neumann')}, phi_s_cp: {'no tab': (pybamm.Scalar(0), 'Neumann'), 'positive tab': (pos_tab_bc, 'Neumann')}} |
def eval_metrics(results, gt_seg_maps, num_classes, ignore_index, metrics=['mIoU'], nan_to_num=None, label_map=dict(), reduce_zero_label=False, beta=1):
if isinstance(metrics, str):
metrics = [metrics]
allowed_metrics = ['mIoU', 'mDice', 'mFscore']
if (not set(metrics).issubset(set(allowed_metrics))):
raise KeyError('metrics {} is not supported'.format(metrics))
(total_area_intersect, total_area_union, total_area_pred_label, total_area_label) = total_intersect_and_union(results, gt_seg_maps, num_classes, ignore_index, label_map, reduce_zero_label)
all_acc = (total_area_intersect.sum() / total_area_label.sum())
ret_metrics = OrderedDict({'aAcc': all_acc})
for metric in metrics:
if (metric == 'mIoU'):
iou = (total_area_intersect / total_area_union)
acc = (total_area_intersect / total_area_label)
ret_metrics['IoU'] = iou
ret_metrics['Acc'] = acc
elif (metric == 'mDice'):
dice = ((2 * total_area_intersect) / (total_area_pred_label + total_area_label))
acc = (total_area_intersect / total_area_label)
ret_metrics['Dice'] = dice
ret_metrics['Acc'] = acc
elif (metric == 'mFscore'):
precision = (total_area_intersect / total_area_pred_label)
recall = (total_area_intersect / total_area_label)
f_value = torch.tensor([f_score(x[0], x[1], beta) for x in zip(precision, recall)])
ret_metrics['Fscore'] = f_value
ret_metrics['Precision'] = precision
ret_metrics['Recall'] = recall
ret_metrics = {metric: value.numpy() for (metric, value) in ret_metrics.items()}
if (nan_to_num is not None):
ret_metrics = OrderedDict({metric: np.nan_to_num(metric_value, nan=nan_to_num) for (metric, metric_value) in ret_metrics.items()})
return ret_metrics |
def assert_focus_path(self, *names):
for i in names:
self.c.group.next_window()
assert_focused(self, i)
for i in names:
self.c.group.next_window()
assert_focused(self, i)
for i in reversed(names):
assert_focused(self, i)
self.c.group.prev_window()
for i in reversed(names):
assert_focused(self, i)
self.c.group.prev_window() |
def draw_pareto_changing_b(b_set, num_classes, max=1000, min=1, head=0.0, tail=0.99, save_name='./pareto_ref.jpg'):
(fig, ax) = plt.subplots(1, 1)
classes = np.linspace(0, num_classes, (10 * num_classes))
for (i, b) in enumerate(b_set):
rv = pareto(b)
classes_x = np.linspace(pareto.ppf(head, b), pareto.ppf(tail, b), (10 * num_classes))
dist = (((rv.pdf(classes_x) * (max - min)) / b) + min)
ax.plot(classes, dist, label='alpha={}'.format(b))
plt.legend()
ax.set_xlabel('sorted class index')
ax.set_ylabel('sample numbers')
_savefig(save_name) |
def get_micronet_score(net, weight_bits, activation_bits, weight_strategy=None, activation_strategy=None, input_res=(3, 224, 224), baseline_params=6900000, baseline_MAC=):
flops_model = add_flops_counting_methods(net)
flops_model.eval().start_flops_count()
batch = torch.ones(()).new_empty((1, *input_res), dtype=next(flops_model.parameters()).dtype, device=next(flops_model.parameters()).device)
_ = flops_model(batch)
flops_count = flops_model.compute_average_flops_cost(bw_weight=weight_bits, bw_act=activation_bits, strategy=(weight_strategy, activation_strategy))
params_count = get_model_parameters_number(flops_model, bw_weight=weight_bits, w_strategy=weight_strategy)
flops_model.stop_flops_count()
print('Number of parameters:', params_count)
print('MAC count:', flops_count)
score = ((params_count / float(baseline_params)) + (flops_count / float(baseline_MAC)))
return score |
class UnivariatePiecewiseLinearObjective(CircuitFactory):
def __init__(self, num_state_qubits: int, min_state_value: float, max_state_value: float, breakpoints: Union[(List[float], np.ndarray)], slopes: Union[(List[float], np.ndarray)], offsets: Union[(List[float], np.ndarray)], f_min: float, f_max: float, c_approx: float, i_state: Optional[List[int]]=None, i_objective: Optional[int]=None) -> None:
super().__init__((num_state_qubits + 1))
self.num_state_qubits = num_state_qubits
self.min_state_value = min_state_value
self.max_state_value = max_state_value
i_sort = np.argsort(breakpoints)
breakpoints = np.array(breakpoints)[i_sort]
slopes = np.array(slopes)[i_sort]
offsets = np.array(offsets)[i_sort]
for i in reversed(range(len(breakpoints))):
if ((breakpoints[i] <= (self.min_state_value - 1e-06)) or (breakpoints[i] >= (self.max_state_value + 1e-06))):
breakpoints = np.delete(breakpoints, i)
slopes = np.delete(slopes, i)
offsets = np.delete(offsets, i)
min_value_included = False
for point in breakpoints:
if np.isclose(point, min_state_value):
min_value_included = True
break
if (not min_value_included):
breakpoints = np.append(min_state_value, breakpoints)
slopes = np.append(0, slopes)
offsets = np.append(0, offsets)
self._breakpoints = breakpoints
self._slopes = slopes
self._offsets = offsets
self._f_min = f_min
self._f_max = f_max
self._c_approx = c_approx
self.i_state = None
if (i_state is not None):
self.i_state = i_state
else:
self.i_state = list(range(num_state_qubits))
self.i_objective = None
if (i_objective is not None):
self.i_objective = i_objective
else:
self.i_objective = num_state_qubits
lower = min_state_value
upper = max_state_value
self._mapped_breakpoints = []
self._mapped_slopes = []
self._mapped_offsets = []
for (i, point) in enumerate(breakpoints):
mapped_breakpoint = (((point - lower) / (upper - lower)) * ((2 ** num_state_qubits) - 1))
if (mapped_breakpoint <= ((2 ** num_state_qubits) - 1)):
self._mapped_breakpoints += [mapped_breakpoint]
self._mapped_slopes += [((slopes[i] * (upper - lower)) / ((2 ** num_state_qubits) - 1))]
self._mapped_offsets += [offsets[i]]
self._mapped_breakpoints = np.array(self._mapped_breakpoints)
self._mapped_slopes = np.array(self._mapped_slopes)
self._mapped_offsets = np.array(self._mapped_offsets)
if len(self._mapped_breakpoints):
self._slope_angles = np.zeros(len(breakpoints))
self._offset_angles = (((np.pi / 4) * (1 - c_approx)) * np.ones(len(breakpoints)))
for i in range(len(breakpoints)):
self._slope_angles[i] = ((((np.pi * c_approx) * self._mapped_slopes[i]) / 2) / (f_max - f_min))
self._offset_angles[i] += ((((np.pi * c_approx) * (self._mapped_offsets[i] - f_min)) / 2) / (f_max - f_min))
self._slope_angles = (2 * self._slope_angles)
self._offset_angles = (2 * self._offset_angles)
self._pwl_ry = PiecewiseLinearPauliRotations(num_state_qubits, self._mapped_breakpoints, self._slope_angles, self._offset_angles)
else:
self.offset_angle = 0
self.slope_angle = 0
self._pwl_ry = None
def _replacement():
return 'qiskit.circuit.library.LinearAmplitudeFunction'
def value_to_estimation(self, value):
if (self._c_approx < 1):
estimator = ((value - (1 / 2)) + ((np.pi / 4) * self._c_approx))
estimator *= ((2 / np.pi) / self._c_approx)
estimator *= (self._f_max - self._f_min)
estimator += self._f_min
return estimator
else:
return value
def required_ancillas(self):
return self._pwl_ry.num_ancillas
def build(self, qc, q, q_ancillas=None, params=None):
q_state = [q[i] for i in self.i_state]
q_objective = q[self.i_objective]
qubits = (q_state[:] + [q_objective])
if q_ancillas:
qubits += q_ancillas[:self.required_ancillas()]
qc.append(self._pwl_ry.to_instruction(), qubits) |
def test_query_grant(graphql_client, user, conference, grant_factory):
graphql_client.force_login(user)
grant = grant_factory(user_id=user.id, conference=conference)
response = graphql_client.query('query($conference: String!) {\n me {\n grant(conference: $conference) {\n id\n }\n }\n }', variables={'conference': conference.code})
response_grant = response['data']['me']['grant']
assert (int(response_grant['id']) == grant.id) |
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, db, storage, *, config):
self._ephemeral_addr_to_addr_index = {}
Abstract_Wallet.__init__(self, db, storage, config=config)
self.gap_limit = db.get('gap_limit', 20)
self.synchronize()
if self.can_have_lightning():
self.init_lightning()
ln_xprv = self.db.get('lightning_privkey2')
self.lnworker = (LNWallet(self, ln_xprv) if ln_xprv else None)
def has_seed(self):
return self.keystore.has_seed()
def get_addresses(self):
out = self.get_receiving_addresses()
out += self.get_change_addresses()
return out
def get_receiving_addresses(self, *, slice_start=None, slice_stop=None):
return self.db.get_receiving_addresses(slice_start=slice_start, slice_stop=slice_stop)
def get_change_addresses(self, *, slice_start=None, slice_stop=None):
return self.db.get_change_addresses(slice_start=slice_start, slice_stop=slice_stop)
def try_detecting_internal_addresses_corruption(self):
addresses_all = self.get_addresses()
addresses_sample1 = addresses_all[:10]
addresses_rand = addresses_all[10:]
addresses_sample2 = random.sample(addresses_rand, min(len(addresses_rand), 10))
for addr_found in itertools.chain(addresses_sample1, addresses_sample2):
self.check_address_for_corruption(addr_found)
def check_address_for_corruption(self, addr):
if (addr and self.is_mine(addr)):
if (addr != self.derive_address(*self.get_address_index(addr))):
raise InternalAddressCorruption()
def get_seed(self, password):
return self.keystore.get_seed(password)
def change_gap_limit(self, value):
value = int(value)
if (value >= self.min_acceptable_gap()):
self.gap_limit = value
self.db.put('gap_limit', self.gap_limit)
self.save_db()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
k = 0
for addr in addresses[::(- 1)]:
if self.db.get_addr_history(addr):
break
k += 1
return k
def min_acceptable_gap(self) -> int:
n = 0
nmax = 0
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for addr in addresses[0:(- k)]:
if self.address_is_old(addr):
n = 0
else:
n += 1
nmax = max(nmax, n)
return (nmax + 1)
def derive_pubkeys(self, c: int, i: int) -> Sequence[str]:
pass
def derive_address(self, for_change: int, n: int) -> str:
for_change = int(for_change)
pubkeys = self.derive_pubkeys(for_change, n)
return self.pubkeys_to_address(pubkeys)
def export_private_key_for_path(self, path: Union[(Sequence[int], str)], password: Optional[str]) -> str:
if isinstance(path, str):
path = convert_bip32_strpath_to_intpath(path)
(pk, compressed) = self.keystore.get_private_key(path, password)
txin_type = self.get_txin_type()
return bitcoin.serialize_privkey(pk, compressed, txin_type)
def get_public_keys_with_deriv_info(self, address: str):
der_suffix = self.get_address_index(address)
der_suffix = [int(x) for x in der_suffix]
return {k.derive_pubkey(*der_suffix): (k, der_suffix) for k in self.get_keystores()}
def _add_input_sig_info(self, txin, address, *, only_der_suffix):
self._add_txinout_derivation_info(txin, address, only_der_suffix=only_der_suffix)
def _add_output_sig_info(self, txout, address):
pubkey_deriv_info = self.get_public_keys_with_deriv_info(address)
txout.opsender_pubkey = list(pubkey_deriv_info.keys())[0]
def _add_txinout_derivation_info(self, txinout, address, *, only_der_suffix):
if (not self.is_mine(address)):
return
pubkey_deriv_info = self.get_public_keys_with_deriv_info(address)
txinout.pubkeys = sorted([pk for pk in list(pubkey_deriv_info)])
for pubkey in pubkey_deriv_info:
(ks, der_suffix) = pubkey_deriv_info[pubkey]
(fp_bytes, der_full) = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix, only_der_suffix=only_der_suffix)
txinout.bip32_paths[pubkey] = (fp_bytes, der_full)
def create_new_address(self, for_change: bool=False):
assert (type(for_change) is bool)
with self.lock:
n = (self.db.num_change_addresses() if for_change else self.db.num_receiving_addresses())
address = self.derive_address(int(for_change), n)
(self.db.add_change_address(address) if for_change else self.db.add_receiving_address(address))
self.add_address(address)
if for_change:
self._not_old_change_addresses.append(address)
return address
def synchronize_sequence(self, for_change):
limit = (self.gap_limit_for_change if for_change else self.gap_limit)
while True:
num_addr = (self.db.num_change_addresses() if for_change else self.db.num_receiving_addresses())
if (num_addr < limit):
self.create_new_address(for_change)
continue
if for_change:
last_few_addresses = self.get_change_addresses(slice_start=(- limit))
else:
last_few_addresses = self.get_receiving_addresses(slice_start=(- limit))
if any(map(self.address_is_old, last_few_addresses)):
self.create_new_address(for_change)
else:
break
_local_height_cached
def synchronize(self):
with self.lock:
self.synchronize_sequence(False)
self.synchronize_sequence(True)
def get_all_known_addresses_beyond_gap_limit(self):
found = set()
def process_addresses(addrs, gap_limit):
rolling_num_unused = 0
for addr in addrs:
if self.db.get_addr_history(addr):
rolling_num_unused = 0
else:
if (rolling_num_unused >= gap_limit):
found.add(addr)
rolling_num_unused += 1
process_addresses(self.get_receiving_addresses(), self.gap_limit)
process_addresses(self.get_change_addresses(), self.gap_limit_for_change)
return found
def get_address_index(self, address) -> Optional[Sequence[int]]:
return (self.db.get_address_index(address) or self._ephemeral_addr_to_addr_index.get(address))
def get_address_path_str(self, address):
intpath = self.get_address_index(address)
if (intpath is None):
return None
return convert_bip32_intpath_to_strpath(intpath)
def _learn_derivation_path_for_address_from_txinout(self, txinout, address):
for ks in self.get_keystores():
(pubkey, der_suffix) = ks.find_my_pubkey_in_txinout(txinout, only_der_suffix=True)
if (der_suffix is not None):
if (len(der_suffix) != 2):
continue
my_address = self.derive_address(*der_suffix)
if (my_address == address):
self._ephemeral_addr_to_addr_index[address] = list(der_suffix)
return True
return False
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
def get_txin_type(self, address=None):
return self.txin_type |
class Migration(migrations.Migration):
dependencies = [('questions', '0085_section_pages')]
operations = [migrations.CreateModel(name='PageQuestionSet', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('order', models.IntegerField(default=0)), ('page', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='page_questionsets', to='questions.page')), ('questionset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questionset_pages', to='questions.questionset'))], options={'ordering': ('page', 'order')}), migrations.AlterField(model_name='questionset', name='page', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='questions.Page')), migrations.AddField(model_name='page', name='questionsets', field=models.ManyToManyField(blank=True, help_text='The question sets of this page.', related_name='pages', through='questions.PageQuestionSet', to='questions.QuestionSet', verbose_name='Question sets')), migrations.RunPython(run_data_migration), migrations.RemoveField(model_name='questionset', name='page')] |
class TerminusPasteFromHistoryCommand(sublime_plugin.TextCommand):
def run(self, edit):
paste_list = g_clipboard_history.get()
keys = [x[0] for x in paste_list]
self.view.show_popup_menu(keys, (lambda choice_index: self.paste_choice(choice_index)))
def is_enabled(self):
return (not g_clipboard_history.empty())
def paste_choice(self, choice_index):
if (choice_index == (- 1)):
return
text = g_clipboard_history.get()[choice_index][1]
g_clipboard_history.push_text(text)
sublime.set_clipboard(text)
self.view.run_command('terminus_paste') |
def test_loop_variable_initialized_in_loop() -> None:
with AccumulationTable(['i']) as table:
for number in [10, 20, 30, 40, 50, 60]:
i = number
assert (table.loop_variables == {'number': ['N/A', 10, 20, 30, 40, 50, 60]})
assert (table.loop_accumulators == {'i': ['N/A', 10, 20, 30, 40, 50, 60]}) |
def minimize(fun: Callable[(..., float)], x0: np.ndarray, args: Tuple=(), method: Optional[str]=None, **kwargs) -> scipy.optimize.OptimizeResult:
if (method.lower() in OPTIMIZERS):
optimizer = OPTIMIZERS[method.lower()]
return optimizer(fun, x0, args=args, **kwargs)
return scipy.optimize.minimize(fun, x0, args=args, method=method, **kwargs) |
class ThreadState():
def __init__(self, name, trace):
self.root = CallNode({}, OrderedDict(), None)
self.calltree = self.root
self.curr = self.calltree
self.context_switch = 0
self.name = name
if trace:
self.depth = (len(trace) - 1)
for call in trace:
self.curr = ensure_call(self.curr, call)
def total_accumulated(self):
(accumulated, top_depth) = (0.0, None)
for (depth, node) in self.depthorder():
if (accumulated and (top_depth != depth)):
return accumulated
if node['accumulated']:
top_depth = depth
accumulated += node['accumulated']
return accumulated
def call_enter(self, call, now):
node = ensure_call(self.curr, call)
node.info['calls'] += 1
node.info['wall_enter_time'].append(now)
self.curr.info['subcall_enter_time'].append(now)
self.curr = node
def call_exit(self, call, now):
info = self.curr.info
info['wall_exit_time'].append(now)
if (len(info['wall_exit_time']) > len(info['wall_enter_time'])):
info['wall_enter_time'].append(None)
parent_info = self.curr.parent.info
parent_info['subcall_exit_time'].append(now)
if (len(parent_info['subcall_exit_time']) > len(parent_info['subcall_enter_time'])):
parent_info['subcall_enter_time'].append(None)
self.curr = self.curr.parent
def switch_enter(self, now):
assert (self.root != self.curr), 'switch_enter must be called on a initialized ThreadState'
info = self.curr.info
info['sleep_start_time'].append(now)
def switch_exit(self, now):
assert (self.root != self.curr), 'switch_enter must be called on a initialized ThreadState'
info = self.curr.info
info['sleep_end_time'].append(now)
if (len(info['sleep_end_time']) > len(info['sleep_start_time'])):
info['sleep_start_time'].append(None)
def depthorder(self):
iterators = [(1, self.root.children[key]) for key in self.root.children]
while iterators:
(depth, node) = iterators.pop(0)
info = calculate_metrics(node.info)
(yield (depth, info))
if node.children:
children = [((depth + 1), node.children[key]) for key in node.children]
iterators.extend(children)
def traverse(self):
iterators = [(1, self.root.children[key]) for key in self.root.children]
while iterators:
(depth, node) = iterators.pop()
info = calculate_metrics(node.info)
(yield (depth, info))
if node.children:
children = [((depth + 1), node.children[key]) for key in node.children]
iterators.extend(children[::(- 1)]) |
def test_voting_open_and_user_can_vote(graphql_client, submission_factory, user, other_user, mocker):
submission = _submission(submission_factory, user)
graphql_client.force_login(other_user)
can_vote_mock = mocker.patch('api.submissions.permissions.check_if_user_can_vote', return_value=True)
data = _query(graphql_client, submission)
assert (data['submission']['title'] == submission.title.localize('en'))
assert (data['submission']['slug'] == submission.slug)
assert (data['submission']['elevatorPitch'] == submission.elevator_pitch.localize('en'))
assert (data['submission']['abstract'] == submission.abstract.localize('en'))
assert (data['submission']['topic']['name'] == submission.topic.name)
assert (data['submission']['type']['name'] == submission.type.name)
assert (data['submission']['duration']['name'] == submission.duration.name)
assert (data['submission']['audienceLevel']['name'] == submission.audience_level.name)
assert (data['submission']['languages'] == [{'code': 'it'}, {'code': 'en'}])
assert (data['submission']['tags'] == [{'name': 'python'}, {'name': 'GraphQL'}])
assert (data['submission']['speakerLevel'] is None)
assert (data['submission']['previousTalkVideo'] is None)
assert (data['submission']['notes'] is None)
can_vote_mock.assert_called() |
def initialize_decoder(module):
for m in module.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, mode='fan_in', nonlinearity='relu')
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if (m.bias is not None):
nn.init.constant_(m.bias, 0) |
('/suite', methods=['GET', 'POST'])
def suite():
if (not session.get('logged_in')):
return redirect(url_for('login'))
if (request.method == 'GET'):
project = [elem[0] for elem in g.db.execute('select name from project;').fetchall()]
return render_template('suite.html', projects=project)
elif (request.method == 'POST'):
if (request.form['type'] == 'get_suite'):
suite = [elem[0] for elem in g.db.execute(('select name from suite where project="%s";' % request.form['project'])).fetchall()]
return jsonify(suite=suite)
elif (request.form['type'] == 'get_scene'):
all_scene = [elem[0] for elem in g.db.execute(('select name from scene where project="%s";' % request.form['project'])).fetchall()]
scene = g.db.execute(('select data from suite where name="%s"' % request.form['suite'])).fetchall()[0][0]
if ((scene is None) or (scene == '')):
scene = []
else:
scene = scene.split(',')
for i in scene:
all_scene.remove(i)
return jsonify(all_scene=all_scene, scene=scene)
elif (request.form['type'] == 'modify_suite'):
scene_data = ','.join(json.loads(request.form['data']))
g.db.execute(('update suite set data="%s" where name="%s" and project="%s"' % (scene_data, request.form['name'], request.form['project'])))
g.db.commit()
return jsonify(code=200, message=('%s!' % request.form['name']))
elif (request.form['type'] == 'del_suite'):
try:
if ((request.form['suite'],) in g.db.execute('select suite from interf_task;').fetchall()):
return jsonify(msg=',,!')
else:
g.db.execute(('delete from suite where name="%s" and project="%s";' % (request.form['suite'], request.form['project'])))
g.db.commit()
return jsonify(msg='')
except Exception as e:
return jsonify(msg=('Error:' + repr(e)))
elif (request.form['type'] == 'new_suite'):
try:
if ((request.form['suite'],) in g.db.execute('select name from suite;').fetchall()):
return jsonify(msg=',!')
else:
g.db.execute(('insert into suite(name,project) values("%s","%s");' % (request.form['suite'], request.form['project'])))
g.db.commit()
return jsonify(msg='')
except Exception as e:
return jsonify(msg=('Error:' + repr(e))) |
def findContours(*args, **kwargs):
if cv2.__version__.startswith('4'):
(contours, hierarchy) = cv2.findContours(*args, **kwargs)
elif cv2.__version__.startswith('3'):
(_, contours, hierarchy) = cv2.findContours(*args, **kwargs)
else:
raise AssertionError('cv2 must be either version 3 or 4 to call this method')
return (contours, hierarchy) |
def test(strng):
print(strng)
try:
iniFile = open(strng)
iniData = ''.join(iniFile.readlines())
bnf = inifile_BNF()
tokens = bnf.parseString(iniData)
pp.pprint(tokens.asList())
except ParseException as err:
print(err.line)
print(((' ' * (err.column - 1)) + '^'))
print(err)
iniFile.close()
print()
return tokens |
def _get_example(language: str) -> str:
if (language.lower() in _parsing.PY_LANG_CODES):
log.trace(f'Code block has a Python language specifier `{language}`.')
content = _EXAMPLE_PY.format(lang=language)
elif language:
log.trace(f'Code block has a foreign language specifier `{language}`.')
content = f'''{language}
...'''
else:
log.trace('Code block has no language specifier.')
content = '\nHello, world!'
return _EXAMPLE_CODE_BLOCKS.format(content=content) |
def _generate_mock_adapters():
mock_lo0 = Mock(spec=ifaddr.Adapter)
mock_lo0.nice_name = 'lo0'
mock_lo0.ips = [ifaddr.IP('127.0.0.1', 8, 'lo0')]
mock_lo0.index = 0
mock_eth0 = Mock(spec=ifaddr.Adapter)
mock_eth0.nice_name = 'eth0'
mock_eth0.ips = [ifaddr.IP(('2001:db8::', 1, 1), 8, 'eth0')]
mock_eth0.index = 1
mock_eth1 = Mock(spec=ifaddr.Adapter)
mock_eth1.nice_name = 'eth1'
mock_eth1.ips = [ifaddr.IP('192.168.1.5', 23, 'eth1')]
mock_eth1.index = 2
mock_vtun0 = Mock(spec=ifaddr.Adapter)
mock_vtun0.nice_name = 'vtun0'
mock_vtun0.ips = [ifaddr.IP('169.254.3.2', 16, 'vtun0')]
mock_vtun0.index = 3
return [mock_eth0, mock_lo0, mock_eth1, mock_vtun0] |
def temporary_failure(count=1):
return f'''
import py
path = py.path.local(__file__).dirpath().ensure('test.res')
count = path.read() or 1
if int(count) <= {count}:
path.write(int(count) + 1)
raise Exception('Failure: {{0}}'.format(count))''' |
class Money():
def __init__(self, money=None, chntext=None):
self.money = money
self.chntext = chntext
def money2chntext(self):
money = self.money
pattern = re.compile('(\\d+(\\.\\d+)?)')
matchers = pattern.findall(money)
if matchers:
for matcher in matchers:
money = money.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext())
self.chntext = money
return self.chntext |
class Time2CapAmountGetter(SmoothPointGetter):
def getRange(self, xRange, miscParams, src, tgt):
if (not miscParams['useCapsim']):
return super().getRange(xRange=xRange, miscParams=miscParams, src=src, tgt=tgt)
capAmountT0 = (miscParams['capAmountT0'] or 0)
capSimDataRaw = src.item.getCapSimData(startingCap=capAmountT0)
if (not capSimDataRaw):
return super().getRange(xRange=xRange, miscParams=miscParams, src=src, tgt=tgt)
capSimDataMaxTime = capSimDataRaw[(- 1)][0]
(minTime, maxTime) = xRange
maxTime = min(maxTime, capSimDataMaxTime)
maxPointXDistance = ((maxTime - minTime) / self._baseResolution)
capSimDataInRange = {k: v for (k, v) in capSimDataRaw if (minTime <= k <= maxTime)}
prevTime = minTime
xs = []
ys = []
capSimDataBefore = {k: v for (k, v) in capSimDataRaw if (k < minTime)}
if ((len(capSimDataBefore) > 0) and (max(capSimDataBefore) == capSimDataMaxTime)):
return (xs, ys)
maxCapAmount = src.item.ship.getModifiedItemAttr('capacitorCapacity')
capRegenTime = (src.item.ship.getModifiedItemAttr('rechargeRate') / 1000)
def plotCapRegen(prevTime, prevCap, currentTime):
subrangeAmount = math.ceil(((currentTime - prevTime) / maxPointXDistance))
subrangeLength = ((currentTime - prevTime) / subrangeAmount)
for i in range(1, (subrangeAmount + 1)):
subrangeTime = (prevTime + (subrangeLength * i))
subrangeCap = calculateCapAmount(maxCapAmount=maxCapAmount, capRegenTime=capRegenTime, capAmountT0=prevCap, time=(subrangeTime - prevTime))
xs.append(subrangeTime)
ys.append(subrangeCap)
if capSimDataBefore:
timeBefore = max(capSimDataBefore)
capBefore = capSimDataBefore[timeBefore]
prevCap = calculateCapAmount(maxCapAmount=maxCapAmount, capRegenTime=capRegenTime, capAmountT0=capBefore, time=(prevTime - timeBefore))
else:
prevCap = calculateCapAmount(maxCapAmount=maxCapAmount, capRegenTime=capRegenTime, capAmountT0=capAmountT0, time=prevTime)
xs.append(prevTime)
ys.append(prevCap)
for currentTime in sorted(capSimDataInRange):
if (currentTime > prevTime):
plotCapRegen(prevTime=prevTime, prevCap=prevCap, currentTime=currentTime)
currentCap = capSimDataInRange[currentTime]
xs.append(currentTime)
ys.append(currentCap)
prevTime = currentTime
prevCap = currentCap
if (maxTime > prevTime):
plotCapRegen(prevTime=prevTime, prevCap=prevCap, currentTime=maxTime)
return (xs, ys)
def getPoint(self, x, miscParams, src, tgt):
if (not miscParams['useCapsim']):
return super().getPoint(x=x, miscParams=miscParams, src=src, tgt=tgt)
capAmountT0 = (miscParams['capAmountT0'] or 0)
capSimDataRaw = src.item.getCapSimData(startingCap=capAmountT0)
if (not capSimDataRaw):
return super().getPoint(x=x, miscParams=miscParams, src=src, tgt=tgt)
currentTime = x
capSimDataBefore = {k: v for (k, v) in capSimDataRaw if (k <= currentTime)}
capSimDataMaxTime = capSimDataRaw[(- 1)][0]
if ((len(capSimDataBefore) > 0) and (max(capSimDataBefore) == capSimDataMaxTime)):
return None
maxCapAmount = src.item.ship.getModifiedItemAttr('capacitorCapacity')
capRegenTime = (src.item.ship.getModifiedItemAttr('rechargeRate') / 1000)
if capSimDataBefore:
timeBefore = max(capSimDataBefore)
capBefore = capSimDataBefore[timeBefore]
if (timeBefore == currentTime):
currentCap = capBefore
else:
currentCap = calculateCapAmount(maxCapAmount=maxCapAmount, capRegenTime=capRegenTime, capAmountT0=capBefore, time=(currentTime - timeBefore))
else:
currentCap = calculateCapAmount(maxCapAmount=maxCapAmount, capRegenTime=capRegenTime, capAmountT0=capAmountT0, time=currentTime)
return currentCap
def _getCommonData(self, miscParams, src, tgt):
return {'maxCapAmount': src.item.ship.getModifiedItemAttr('capacitorCapacity'), 'capRegenTime': (src.item.ship.getModifiedItemAttr('rechargeRate') / 1000)}
def _calculatePoint(self, x, miscParams, src, tgt, commonData):
time = x
capAmount = calculateCapAmount(maxCapAmount=commonData['maxCapAmount'], capRegenTime=commonData['capRegenTime'], capAmountT0=(miscParams['capAmountT0'] or 0), time=time)
return capAmount |
def test_prevent_redundant_quantity(blank_game_description):
db = blank_game_description.resource_database
(res_a, id_req_a) = make_req_a(db)
(res_b, id_req_b) = make_req_b(db)
the_set = RequirementSet([RequirementList([id_req_a]), RequirementList([id_req_a, id_req_b]), RequirementList([ResourceRequirement.create(res_a, 5, False)])])
assert (the_set.alternatives == frozenset([RequirementList([id_req_a])])) |
class MultiResolutionDataset(Dataset):
def __init__(self, path, transform, resolution=8):
self.env = lmdb.open(path, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False)
if (not self.env):
raise IOError('Cannot open lmdb dataset', path)
with self.env.begin(write=False) as txn:
self.length = int(txn.get('length'.encode('utf-8')).decode('utf-8'))
self.resolution = resolution
self.transform = transform
def __len__(self):
return self.length
def __getitem__(self, index):
with self.env.begin(write=False) as txn:
key = f'{self.resolution}-{str(index).zfill(5)}'.encode('utf-8')
img_bytes = txn.get(key)
buffer = BytesIO(img_bytes)
img = Image.open(buffer)
img = self.transform(img)
return img |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.