code stringlengths 281 23.7M |
|---|
(once={'graceful': True}, base=QueueOnce)
def delete_es_backup_monthly():
today = datetime.datetime.today()
repo_list = [CASE_REPO, AO_REPO]
try:
for repo_name in repo_list:
snapshots = display_snapshot_detail(repo_name)
snapshots = snapshots.get('snapshots')
for snapshot in snapshots:
id = snapshot.get('snapshot')
snapshot_date = snapshot.get('start_time')
snapshot_date = datetime.datetime.strptime(snapshot_date.split('T')[0], '%Y-%m-%d')
if (((today - snapshot_date).days >= 30) and ('arch_mur' not in id)):
delete_snapshot(repo_name, id)
logger.info("deleting snapshot: '{0}'".format(id))
time.sleep(30)
logger.info(' Monthly (%s) elasticsearch snapshot deletion completed', datetime.date.today().strftime('%A'))
slack_message = 'Monthly elasticsearch deletion completed in {0} space in repository: ({1})'.format(get_app_name(), repo_name)
utils.post_to_slack(slack_message, SLACK_BOTS)
except Exception as error:
logger.exception(error)
slack_message = '*ERROR* elasticsearch snapshot deletion failed for {0}. Check logs.'.format(get_app_name())
utils.post_to_slack(slack_message, SLACK_BOTS) |
_register_parser
_set_msg_type(ofproto.OFPT_FEATURES_REPLY)
class OFPSwitchFeatures(MsgBase):
def __init__(self, datapath, datapath_id=None, n_buffers=None, n_tables=None, capabilities=None, actions=None, ports=None):
super(OFPSwitchFeatures, self).__init__(datapath)
self.datapath_id = datapath_id
self.n_buffers = n_buffers
self.n_tables = n_tables
self.capabilities = capabilities
self.actions = actions
self.ports = ports
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPSwitchFeatures, cls).parser(datapath, version, msg_type, msg_len, xid, buf)
(msg.datapath_id, msg.n_buffers, msg.n_tables, msg.capabilities, msg.actions) = struct.unpack_from(ofproto.OFP_SWITCH_FEATURES_PACK_STR, msg.buf, ofproto.OFP_HEADER_SIZE)
msg.ports = {}
n_ports = ((msg_len - ofproto.OFP_SWITCH_FEATURES_SIZE) // ofproto.OFP_PHY_PORT_SIZE)
offset = ofproto.OFP_SWITCH_FEATURES_SIZE
for _i in range(n_ports):
port = OFPPhyPort.parser(msg.buf, offset)
msg.ports[port.port_no] = port
offset += ofproto.OFP_PHY_PORT_SIZE
return msg |
def extractSairennohebitranslationsWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('Kanna no Kanna', 'Kanna no Kanna', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def log_level_formatted_string() -> str:
numeric_levels = [k for k in LOG_LEVEL_CHOICES.keys() if k.isdigit()]
literal_levels = [k for k in LOG_LEVEL_CHOICES.keys() if (not k.isdigit())]
return f'''LEVEL must be one of:
{'/'.join(numeric_levels)} (numeric);
{'/'.join(literal_levels).lower()} (lowercase);
{'/'.join(literal_levels).upper()} (uppercase).''' |
class TestSolidProject(unittest.TestCase):
def setUp(self):
self.library_names = ['PJB_NY_17', 'PJB_NY_18', 'PJB_NY_19']
self.sample = SolidSample('PJB_pool')
for name in self.library_names:
self.sample.addLibrary(name)
self.project = self.sample.projects[0]
def test_libraries(self):
self.assertEqual(len(self.library_names), len(self.project.libraries))
for i in range(len(self.library_names)):
self.assertTrue(isinstance(self.project.libraries[i], SolidLibrary))
self.assertEqual(self.library_names[i], self.project.libraries[i].name)
def test_get_sample(self):
self.assertEqual(self.sample, self.project.getSample())
self.assertEqual(None, SolidProject('No_sample').getSample())
def test_get_run(self):
self.assertEqual(None, self.project.getRun())
def test_is_barcoded(self):
self.assertFalse(self.project.isBarcoded())
for lib in self.project.libraries:
lib.is_barcoded = True
self.assertTrue(self.project.isBarcoded())
self.assertFalse(SolidProject('No_libraries').isBarcoded())
def test_get_library_name_pattern(self):
self.assertEqual('PJB_NY_1*', self.project.getLibraryNamePattern())
def test_get_project_name(self):
self.assertEqual('PJB_pool', self.project.getProjectName())
def test_pretty_print_libraries(self):
self.assertEqual('PJB_NY_17-19', self.project.prettyPrintLibraries()) |
class TogglePublishActionsMixin(object):
def get_inline_actions(self, request, obj=None):
actions = super(TogglePublishActionsMixin, self).get_inline_actions(request=request, obj=obj)
actions.append('toggle_publish')
return actions
def toggle_publish(self, request, obj, parent_obj=None):
if (obj.status == Article.DRAFT):
obj.status = Article.PUBLISHED
else:
obj.status = Article.DRAFT
obj.save()
status = ('unpublished' if (obj.status == Article.DRAFT) else 'published')
messages.info(request, _('Article {}.'.format(status)))
def get_toggle_publish_label(self, obj):
label = ('publish' if (obj.status == Article.DRAFT) else 'unpublish')
return 'Toggle {}'.format(label)
def get_toggle_publish_css(self, obj):
return ('button object-tools' if (obj.status == Article.DRAFT) else 'default') |
def env_registration_test(environment_type: type, wrapper_factory: Callable[([], ObservationNormalizationWrapper)]) -> None:
env: ObservationNormalizationWrapper = wrapper_factory()
assert isinstance(env, environment_type)
assert isinstance(env, ObservationNormalizationWrapper)
env.close() |
class MullerBrownPot(AnaPotBase):
def __init__(self):
A = ((- 200), (- 100), (- 170), 15)
x0 = (1.0, 0.0, (- 0.5), (- 1.0))
y0 = (0.0, 0.5, 1.5, 1.0)
a = ((- 1.0), (- 1.0), (- 6.5), 0.7)
b = (0.0, 0.0, 11.0, 0.6)
c = ((- 10.0), (- 10.0), (- 6.5), 0.7)
V_str_base = '{Ai}*exp({ai}*(x-{xi})**2 + {bi}*(x-{xi})*(y-{yi}) + {ci}*(y-{yi})**2)'
V_str = ''
V_strs = [V_str_base.format(Ai=A[i], ai=a[i], xi=x0[i], bi=b[i], yi=y0[i], ci=c[i]) for i in range(4)]
V_str = ' + '.join(V_strs)
xlim = ((- 1.75), 1.25)
ylim = ((- 0.5), 2.25)
levels = np.linspace((- 200), 100, 100)
minima = (((- 0.5582236), 1., 0.0), (0., 0., 0.0), ((- 0.0435), 0.4648, 0.0))
saddles = (((- 0.822), 0.624, 0.0),)
super(MullerBrownPot, self).__init__(V_str=V_str, xlim=xlim, ylim=ylim, levels=levels, minima=minima, saddles=saddles)
def __str__(self):
return 'MullerBrownPot calculator' |
class Solution():
def countDigitOne(self, n: int) -> int:
cnt = 0
mark = 1
while (n >= mark):
(c, r) = divmod(n, (mark * 10))
cnt += (c * mark)
if (r >= mark):
cnt += min(((r - mark) + 1), mark)
mark *= 10
return cnt |
def _default_custom_logging(monitoring_start_time: float, n_jobs: int, state_jobs: tp.Dict[(str, tp.Set[int])]):
run_time = (time.time() - monitoring_start_time)
date_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
failed_job_indices = sorted(state_jobs['FAILED'])
n_chars = len(str(n_jobs))
print(f'[{date_time}] Launched {int((run_time / 60))} minutes ago,', f"{len(state_jobs['RUNNING']):{n_chars}}/{n_jobs} jobs running,", f'{len(failed_job_indices):{n_chars}}/{n_jobs} jobs failed,', f"{(len(state_jobs['DONE']) - len(failed_job_indices)):{n_chars}}/{n_jobs} jobs done", flush=True)
if (len(failed_job_indices) > 0):
print(f'[{date_time}] Failed jobs, indices {failed_job_indices}', flush=True) |
def increment_tag(session: nox.Session, all_tags: List, version_number: str, tag_type: TagType):
version_branch_tag_pattern = VERSION_TAG_REGEX.format(version=version_number, tag_type=tag_type.value)
sorted_tag_matches = sorted((re.fullmatch(version_branch_tag_pattern, tag.name) for tag in all_tags if re.fullmatch(version_branch_tag_pattern, tag.name)), key=(lambda match: int(match.group(1))), reverse=True)
latest_tag_match = (sorted_tag_matches[0] if sorted_tag_matches else None)
if latest_tag_match:
session.log(f'Found existing {tag_type.name.lower()} tag {latest_tag_match.group(0)}, incrementing it')
tag_increment = (int(latest_tag_match.group(1)) + TAG_INCREMENT)
return f'{version_number}{tag_type.value}{tag_increment}'
return f'{version_number}{tag_type.value}{INITIAL_TAG_INCREMENT}' |
def extractStrangelyinterestingstoryWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('Parameter Rimokon', 'Parameter Remote Controller', 'translated')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class OptionSeriesTreegraphSonificationContexttracksMappingLowpass(Options):
def frequency(self) -> 'OptionSeriesTreegraphSonificationContexttracksMappingLowpassFrequency':
return self._config_sub_data('frequency', OptionSeriesTreegraphSonificationContexttracksMappingLowpassFrequency)
def resonance(self) -> 'OptionSeriesTreegraphSonificationContexttracksMappingLowpassResonance':
return self._config_sub_data('resonance', OptionSeriesTreegraphSonificationContexttracksMappingLowpassResonance) |
class EVM(Eth1ChainRPCModule):
_params(normalize_blockchain_fixtures)
async def resetToGenesisFixture(self, chain_info: Any) -> ChainAPI:
chain = new_chain_from_fixture(chain_info, type(self.chain))
(await self.event_bus.broadcast(ChainReplacementEvent(chain), BroadcastConfig(internal=True)))
return chain
_params(normalize_block)
async def applyBlockFixture(self, block_info: Any) -> str:
(_, _, rlp_encoded) = apply_fixture_block_to_chain(block_info, self.chain)
return encode_hex(rlp_encoded) |
class Block(Node):
def __init__(self, sid, name, kind, custom_attr):
assert isinstance(sid, str), ('expected string, got %s' % type(sid))
assert isinstance(name, str)
assert isinstance(kind, str)
assert isinstance(custom_attr, list)
super().__init__()
self.sid = sid
self.name = name
self.kind = kind
self.custom_attr = custom_attr
def dump_hierarchy(self, indent=0):
print((' ' * indent), ('Block %s (%s)' % (self.kind, repr(self.name))))
for attr in self.custom_attr:
print((' ' * (indent + 1)), ('Attribute (%s)' % attr))
def set_parent(self, n_parent):
assert isinstance(n_parent, System)
super().set_parent(n_parent)
def get_container(self):
ptr = self.n_parent
while (not isinstance(ptr, Container)):
ptr = ptr.n_parent
return ptr
def full_name(self):
name_stack = []
ptr = self
while ptr:
if isinstance(ptr, Model):
name_stack.append(ptr.name)
elif isinstance(ptr, Library):
break
elif isinstance(ptr, Block):
name_stack.append(ptr.name)
ptr = ptr.n_parent
return '/'.join(reversed(name_stack))
def local_name(self):
name_stack = []
ptr = self
while ptr:
if isinstance(ptr, Container):
break
elif isinstance(ptr, Block):
name_stack.append(ptr.name)
ptr = ptr.n_parent
return '/'.join(reversed(name_stack))
def loc(self):
return Location(self.get_container().filename, blockname=self.local_name())
def iter_all_blocks(self):
(yield self) |
class TestTFRecordUtils():
class TestInferTFRecordEncoderDecoder():
def make_multi_features_sample(make_tensor_for_dtype_shape):
return (lambda : {'string_feature': make_tensor_for_dtype_shape([], 'string'), 'int_feature': make_tensor_for_dtype_shape([], 'int64'), 'int_list_feature': make_tensor_for_dtype_shape([5], 'int32')})
.parametrize('dtype', [dtype.name for dtype in DTYPE_TO_PROTO_DTYPE.keys()])
def test_encode_decode_scalar_tensor_with_eager_execution(dtype, make_tensor_for_dtype_shape):
shape = []
tensor = make_tensor_for_dtype_shape(shape, dtype)
sample = {'feature': tensor}
(encoder, decoder) = build_tfrecord_encoder_decoder_from_spec(sample)
np.testing.assert_array_equal(decoder(encoder(sample))['feature'].numpy(), tensor.numpy())
.parametrize('dtype', [dtype.name for dtype in DTYPE_TO_PROTO_DTYPE.keys() if (dtype is not tf.string)])
def test_encode_decode_1d_tensor_with_eager_execution(dtype, make_tensor_for_dtype_shape):
shape = [5]
tensor = make_tensor_for_dtype_shape(shape, dtype)
sample = {'feature': tensor}
(encoder, decoder) = build_tfrecord_encoder_decoder_from_spec(sample)
np.testing.assert_array_equal(decoder(encoder(sample))['feature'].numpy(), tensor.numpy())
def test_encode_decode_multiple_tensors_with_eager_execution(make_multi_features_sample):
sample = make_multi_features_sample()
(encoder, decoder) = build_tfrecord_encoder_decoder_from_spec(sample)
result = decoder(encoder(sample))
assert (result.keys() == sample.keys())
for (key, tensor) in sample.items():
np.testing.assert_array_equal(result[key].numpy(), tensor.numpy())
def dataframe():
return pd.DataFrame({'image_name': [f'data/im_{i}.jpg' for i in range(5)], 'label': ['DOG', 'CAT', 'FISH', 'FISH', 'DOG'], 'split': 'val', **{column: np.random.randint(100, 600, 5) for column in ['crop_x', 'crop_y', 'crop_height', 'crop_width']}}).assign(crop_window=(lambda df: df[['crop_y', 'crop_x', 'crop_height', 'crop_width']].values.tolist()))
def test_infer_tfrecord_encoder_decoder_generalize_from_sample_to_sample(dataframe, tmp_path):
filename = (tmp_path / 'example.tfrecord')
original_dataset = tf.data.Dataset.from_tensor_slices(dataframe.to_dict('list'))
first_sample = next(iter(original_dataset))
(encoder, decoder) = build_tfrecord_encoder_decoder_from_spec(first_sample)
with tf.io.TFRecordWriter(str(filename)) as writer:
for sample in original_dataset:
writer.write(encoder(sample))
parsed_dataset = tf.data.TFRecordDataset(str(filename), num_parallel_reads=tf.data.experimental.AUTOTUNE).map(decoder, num_parallel_calls=tf.data.experimental.AUTOTUNE)
for (i, (original_sample, parsed_sample)) in enumerate(zip(original_dataset, parsed_dataset)):
assert (parsed_sample.keys() == original_sample.keys())
for key in original_sample:
np.testing.assert_array_equal(parsed_sample[key].numpy(), original_sample[key].numpy()) |
def download(url, dst=None):
dst_ = None
if (dst and os.path.isdir(dst)):
dst_ = dst
dst = None
prefix = FileName.from_any(dst=dst, url=url)
(fd, tmpfile) = tempfile.mkstemp('.tmp', prefix=prefix, dir='.')
os.close(fd)
os.unlink(tmpfile)
if PY2:
binurl = url
else:
binurl = list(urlparse.urlsplit(url))
binurl[2] = urlparse.quote(binurl[2])
binurl = urlparse.urlunsplit(binurl)
(tmpfile, headers) = urlib.urlretrieve(binurl, tmpfile)
filename = FileName.from_any(dst=dst, headers=headers, url=url)
if dst_:
filename = os.path.join(dst_, filename)
if os.path.exists(filename):
os.unlink(filename)
shutil.move(tmpfile, filename)
return filename |
class _AssertTraitChangesContext(object):
def __init__(self, obj, xname, count, test_case):
self.obj = obj
self.xname = xname
self.count = count
self.event = None
self.events = []
self.failureException = test_case.failureException
def _listener(self, obj, name, old, new):
self.event = (obj, name, old, new)
self.events.append(self.event)
def __enter__(self):
self.obj.on_trait_change(self._listener, self.xname)
return self
def __exit__(self, exc_type, exc_value, tb):
if (exc_type is not None):
return False
self.obj.on_trait_change(self._listener, self.xname, remove=True)
if ((self.count is not None) and (len(self.events) != self.count)):
msg = 'Change event for {0} was fired {1} times instead of {2}'
items = (self.xname, len(self.events), self.count)
raise self.failureException(msg.format(*items))
elif ((self.count is None) and (not self.events)):
msg = 'A change event was not fired for: {0}'.format(self.xname)
raise self.failureException(msg)
return False |
class TestSolution(unittest.TestCase):
def test_is_power_of_two(self):
solution = Solution()
self.assertRaises(TypeError, solution.is_power_of_two, None)
self.assertEqual(solution.is_power_of_two(0), False)
self.assertEqual(solution.is_power_of_two(1), True)
self.assertEqual(solution.is_power_of_two(2), True)
self.assertEqual(solution.is_power_of_two(15), False)
self.assertEqual(solution.is_power_of_two(16), True)
print('Success: test_is_power_of_two') |
class GenericMenu(menus.MenuPages, inherit_buttons=False):
def __init__(self, source: menus.PageSource, cog: Optional[commands.Cog]=None, ctx=None, clear_reactions_after: bool=True, delete_message_after: bool=False, add_reactions: bool=True, using_custom_emoji: bool=False, using_embeds: bool=False, keyword_to_reaction_mapping: Dict[(str, str)]=None, timeout: int=180, message: discord.Message=None, **kwargs: Any) -> None:
self.cog = cog
self.ctx = ctx
super().__init__(source, clear_reactions_after=clear_reactions_after, delete_message_after=delete_message_after, check_embeds=using_embeds, timeout=timeout, message=message, **kwargs)
def reaction_check(self, payload):
if (payload.message_id != self.message.id):
return False
if (payload.user_id not in (*self.bot.owner_ids, self._author_id)):
return False
return (payload.emoji in self.buttons)
def _skip_single_arrows(self):
max_pages = self._source.get_max_pages()
if (max_pages is None):
return True
return (max_pages == 1)
def _skip_double_triangle_buttons(self):
max_pages = self._source.get_max_pages()
if (max_pages is None):
return True
return (max_pages <= 2)
('', position=menus.First(1), skip_if=_skip_single_arrows)
async def prev(self, payload: discord.RawReactionActionEvent):
if (self.current_page == 0):
(await self.show_page((self._source.get_max_pages() - 1)))
else:
(await self.show_checked_page((self.current_page - 1)))
('', position=menus.First(2))
async def stop_pages_default(self, payload: discord.RawReactionActionEvent) -> None:
self.stop()
with contextlib.suppress(discord.NotFound):
(await self.message.delete())
('', position=menus.First(2), skip_if=_skip_single_arrows)
async def next(self, payload: discord.RawReactionActionEvent):
if (self.current_page == (self._source.get_max_pages() - 1)):
(await self.show_page(0))
else:
(await self.show_checked_page((self.current_page + 1)))
('', position=menus.First(0), skip_if=_skip_double_triangle_buttons)
async def go_to_first_page(self, payload: discord.RawReactionActionEvent):
(await self.show_page(0))
('', position=menus.Last(1), skip_if=_skip_double_triangle_buttons)
async def go_to_last_page(self, payload: discord.RawReactionActionEvent):
(await self.show_page((self._source.get_max_pages() - 1))) |
def _verify_egress(azure_storage_account_url, azure_storage_account_key, azure_storage_container, wait_time, retries, ASSET):
retry_count = 0
data_from_azure = None
while (((data_from_azure is None) or (len(data_from_azure) == 0)) and (retry_count < retries)):
data_from_azure = read_data_from_azure_storage_container(azure_storage_account_url, azure_storage_account_key, azure_storage_container)
if (data_from_azure is None):
retry_count += 1
time.sleep(wait_time)
if ((data_from_azure is None) or (retry_count == retries)):
assert False, 'Failed to read data from Azure IoT Hub'
asset_collected = list()
for ele in data_from_azure:
asset_collected.extend(list(map((lambda d: d['asset']), json.loads(ele)['Body'])))
assert any(filter((lambda x: (ASSET in x)), asset_collected)) |
def document_function(func, func_name=None, example_code=None, image_file=None):
if (func_name is None):
func_name = func.__name__
func_doc = func.__doc__
if (func_doc is None):
print(('function %s is undocumented' % func_name))
func_doc = '\n\n'
elif is_valid_rst(func_doc):
func_doc = dedent(func_doc)
else:
func_doc = ('\n::\n' + func_doc)
func_signature = formatargspec(*getargspec(func))
documentation = ('\n%(func_name)s\n%(title_line)s\n\n.. function:: %(func_name)s%(func_signature)s\n\n%(func_doc)s\n\n ' % {'func_name': func_name, 'title_line': ('~' * len(func_name)), 'func_signature': func_signature, 'func_doc': indent(dedent(func_doc))})
if (image_file is not None):
documentation += ('\n\n.. image:: %s\n\n' % image_file)
if (example_code is not None):
documentation += ('\n**Example** (run in ``ipython --gui=qt``, or in the mayavi2 interactive shell,\nsee :ref:`running-mlab-scripts` for more info)::\n\n%s\n' % indent(example_code))
return documentation |
((detect_target().name() == 'rocm'), 'Not supported by ROCM.')
class BMMAddTestCase(unittest.TestCase):
def setUpClass(cls) -> None:
torch.manual_seed(0)
def __init__(self, *args, **kwargs):
super(BMMAddTestCase, self).__init__(*args, **kwargs)
self.test_count = 0
def _test_rrr(self, B, M, K, N, test_name, dtype='float16'):
target = detect_target()
X = Tensor(shape=[B, M, K], dtype=dtype, name='input_0', is_input=True)
W = Tensor(shape=[B, K, N], dtype=dtype, name='input_1', is_input=True)
D = Tensor(shape=[B, M, N], dtype=dtype, name='input_2', is_input=True)
OP = ops.bmm_rrr_add()
Y = OP(X, W, D)
Y._attrs['name'] = 'output_0'
Y._attrs['is_output'] = True
dll_name = f'test_{self.test_count}.so'
module = compile_model(Y, target, './tmp', test_name, dll_name=dll_name)
X_pt = get_random_torch_tensor([B, M, K], dtype)
W_pt = get_random_torch_tensor([B, K, N], dtype)
D_pt = get_random_torch_tensor([B, M, N], dtype)
Y_pt = torch.bmm(X_pt, W_pt)
Y_pt = (Y_pt + D_pt)
y = get_torch_empty_tensor([B, M, N], dtype)
module.run_with_tensors({'input_0': X_pt, 'input_1': W_pt, 'input_2': D_pt}, [y])
self.assertTrue(torch.allclose(Y_pt, y, atol=0.1, rtol=0.1))
self.test_count += 1
def _test_ccr(self, B, M, N, K, test_name, dtype='float16'):
target = detect_target()
X = Tensor(shape=[B, K, M], dtype=dtype, name='input_0', is_input=True)
W = Tensor(shape=[B, N, K], dtype=dtype, name='input_1', is_input=True)
D = Tensor(shape=[B, M, N], dtype=dtype, name='input_2', is_input=True)
OP = ops.bmm_ccr_add()
Y = OP(X, W, D)
Y._attrs['name'] = 'output_0'
Y._attrs['is_output'] = True
dll_name = f'test_{self.test_count}.so'
module = compile_model(Y, target, './tmp', test_name, dll_name=dll_name)
X_pt = get_random_torch_tensor([B, K, M], dtype)
W_pt = get_random_torch_tensor([B, N, K], dtype)
D_pt = get_random_torch_tensor([B, M, N], dtype)
XT = torch.transpose(X_pt, 2, 1)
Y_pt = torch.bmm(XT, W_pt.transpose(2, 1))
Y_pt = (Y_pt + D_pt)
y = get_torch_empty_tensor([B, M, N], dtype)
module.run_with_tensors({'input_0': X_pt, 'input_1': W_pt, 'input_2': D_pt}, [y])
if ((X_pt.nelement() == 0) or (W_pt.nelement == 0)):
pass
else:
self.assertTrue(torch.allclose(Y_pt, y, atol=0.01, rtol=0.01))
self.test_count += 1
def _test_rcr(self, B, M, N, K, test_name, dtype='float16'):
target = detect_target()
X = Tensor(shape=[B, M, K], dtype=dtype, name='input_0', is_input=True)
W = Tensor(shape=[B, N, K], dtype=dtype, name='input_1', is_input=True)
D = Tensor(shape=[B, M, N], dtype=dtype, name='input_2', is_input=True)
OP = ops.bmm_rcr_add()
Y = OP(X, W, D)
Y._attrs['name'] = 'output_0'
Y._attrs['is_output'] = True
dll_name = f'test_{self.test_count}.so'
module = compile_model(Y, target, './tmp', test_name, dll_name=dll_name)
X_pt = get_random_torch_tensor([B, M, K], dtype)
W_pt = get_random_torch_tensor([B, N, K], dtype)
D_pt = get_random_torch_tensor([B, M, N], dtype)
Y_pt = torch.bmm(X_pt, W_pt.transpose(2, 1))
Y_pt = (Y_pt + D_pt)
y = get_torch_empty_tensor([B, M, N], dtype)
module.run_with_tensors({'input_0': X_pt, 'input_1': W_pt, 'input_2': D_pt}, [y])
if ((X_pt.nelement() == 0) or (W_pt.nelement == 0)):
pass
else:
self.assertTrue(torch.allclose(Y_pt, y, atol=0.01, rtol=0.01))
self.test_count += 1
def _test_crr(self, B, M, K, N, test_name, dtype='float16'):
target = detect_target()
X = Tensor(shape=[B, K, M], dtype=dtype, name='input_0', is_input=True)
W = Tensor(shape=[B, K, N], dtype=dtype, name='input_1', is_input=True)
D = Tensor(shape=[B, M, N], dtype=dtype, name='input_2', is_input=True)
OP = ops.bmm_crr_add()
Y = OP(X, W, D)
Y._attrs['name'] = 'output_0'
Y._attrs['is_output'] = True
dll_name = f'test_{self.test_count}.so'
module = compile_model(Y, target, './tmp', test_name, dll_name=dll_name)
X_pt = get_random_torch_tensor([B, K, M], dtype)
W_pt = get_random_torch_tensor([B, K, N], dtype)
D_pt = get_random_torch_tensor([B, M, N], dtype)
XT = torch.transpose(X_pt, 2, 1)
Y_pt = torch.bmm(XT, W_pt)
Y_pt = (Y_pt + D_pt)
y = get_torch_empty_tensor([B, M, N], dtype)
module.run_with_tensors({'input_0': X_pt, 'input_1': W_pt, 'input_2': D_pt}, [y])
self.assertTrue(torch.allclose(Y_pt, y, atol=0.01, rtol=0.01))
self.test_count += 1
def _test_rcc(self, B, M, K, N, test_name, dtype='float16'):
target = detect_target()
X = Tensor(shape=[B, M, K], dtype=dtype, name='input_0', is_input=True)
W = Tensor(shape=[B, N, K], dtype=dtype, name='input_1', is_input=True)
D = Tensor(shape=[B, N, M], dtype=dtype, name='input_2', is_input=True)
OP = ops.bmm_rcc_add()
Y = OP(X, W, D)
Y._attrs['name'] = 'output_0'
Y._attrs['is_output'] = True
dll_name = f'test_{self.test_count}.so'
module = compile_model(Y, target, './tmp', test_name, dll_name=dll_name)
X_pt = get_random_torch_tensor([B, M, K], dtype)
W_pt = get_random_torch_tensor([B, N, K], dtype)
D_pt = get_random_torch_tensor([B, N, M], dtype)
WT = W_pt.transpose(2, 1)
Y_pt = torch.bmm(X_pt, WT)
Y_pt = (Y_pt.transpose(2, 1) + D_pt)
y = get_torch_empty_tensor([B, N, M], dtype)
module.run_with_tensors({'input_0': X_pt, 'input_1': W_pt, 'input_2': D_pt}, [y])
if ((X_pt.nelement() == 0) or (W_pt.nelement == 0)):
pass
else:
self.assertTrue(torch.allclose(Y_pt, y, atol=0.01, rtol=0.01))
self.test_count += 1
def _test_rrc(self, B, M, K, N, test_name, dtype='float16'):
target = detect_target()
X = Tensor(shape=[B, M, K], dtype=dtype, name='input_0', is_input=True)
W = Tensor(shape=[B, K, N], dtype=dtype, name='input_1', is_input=True)
D = Tensor(shape=[B, N, M], dtype=dtype, name='input_2', is_input=True)
OP = ops.bmm_rrc_add()
Y = OP(X, W, D)
Y._attrs['name'] = 'output_0'
Y._attrs['is_output'] = True
dll_name = f'test_{self.test_count}.so'
module = compile_model(Y, target, './tmp', test_name, dll_name=dll_name)
X_pt = get_random_torch_tensor([B, M, K], dtype)
W_pt = get_random_torch_tensor([B, K, N], dtype)
D_pt = get_random_torch_tensor([B, N, M], dtype)
Y_pt = torch.bmm(X_pt, W_pt)
Y_pt = (Y_pt.transpose(2, 1) + D_pt)
y = get_torch_empty_tensor([B, N, M], dtype)
module.run_with_tensors({'input_0': X_pt, 'input_1': W_pt, 'input_2': D_pt}, [y])
self.assertTrue(torch.allclose(Y_pt, y, atol=0.1, rtol=0.1))
self.test_count += 1
def _test_crc(self, B, M, K, N, test_name, dtype='float16'):
target = detect_target()
X = Tensor(shape=[B, K, M], dtype=dtype, name='input_0', is_input=True)
W = Tensor(shape=[B, K, N], dtype=dtype, name='input_1', is_input=True)
D = Tensor(shape=[B, N, M], dtype=dtype, name='input_2', is_input=True)
OP = ops.bmm_crc_add()
Y = OP(X, W, D)
Y._attrs['name'] = 'output_0'
Y._attrs['is_output'] = True
dll_name = f'test_{self.test_count}.so'
module = compile_model(Y, target, './tmp', test_name, dll_name=dll_name)
X_pt = get_random_torch_tensor([B, K, M], dtype)
W_pt = get_random_torch_tensor([B, K, N], dtype)
D_pt = get_random_torch_tensor([B, N, M], dtype)
XT = torch.transpose(X_pt, 2, 1)
Y_pt = torch.bmm(XT, W_pt)
Y_pt = (Y_pt.transpose(2, 1) + D_pt)
y = get_torch_empty_tensor([B, N, M], dtype)
module.run_with_tensors({'input_0': X_pt, 'input_1': W_pt, 'input_2': D_pt}, [y])
self.assertTrue(torch.allclose(Y_pt, y, atol=0.01, rtol=0.01))
self.test_count += 1
def _test_ccc(self, B, M, N, K, test_name, dtype='float16'):
target = detect_target()
X = Tensor(shape=[B, K, M], dtype=dtype, name='input_0', is_input=True)
W = Tensor(shape=[B, N, K], dtype=dtype, name='input_1', is_input=True)
D = Tensor(shape=[B, N, M], dtype=dtype, name='input_2', is_input=True)
OP = ops.bmm_ccc_add()
Y = OP(X, W, D)
Y._attrs['name'] = 'output_0'
Y._attrs['is_output'] = True
dll_name = f'test_{self.test_count}.so'
module = compile_model(Y, target, './tmp', test_name, dll_name=dll_name)
X_pt = get_random_torch_tensor([B, K, M], dtype)
W_pt = get_random_torch_tensor([B, N, K], dtype)
D_pt = get_random_torch_tensor([B, N, M], dtype)
XT = torch.transpose(X_pt, 2, 1)
Y_pt = torch.bmm(XT, W_pt.transpose(2, 1))
Y_pt = (Y_pt.transpose(2, 1) + D_pt)
y = get_torch_empty_tensor([B, N, M], dtype)
module.run_with_tensors({'input_0': X_pt, 'input_1': W_pt, 'input_2': D_pt}, [y])
if ((X_pt.nelement() == 0) or (W_pt.nelement == 0)):
pass
else:
self.assertTrue(torch.allclose(Y_pt, y, atol=0.01, rtol=0.01))
self.test_count += 1
def test_rrr(self):
self._test_rrr(B=32, M=256, K=256, N=512, test_name='bmm_rrr_add')
def test_ccr(self):
self._test_ccr(B=32, M=256, N=256, K=512, test_name='bmm_ccr_add')
self._test_ccr(B=0, M=256, N=256, K=512, test_name='bmm_ccr_zero_batch')
self._test_ccr(B=1, M=0, N=256, K=512, test_name='bmm_ccr_zero_m')
self._test_ccr(B=1, M=256, N=256, K=0, test_name='bmm_ccr_zero_k')
def test_rcr(self):
self._test_rcr(B=32, M=256, N=256, K=512, test_name='bmm_rcr_add')
self._test_rcr(B=0, M=256, N=256, K=512, test_name='bmm_rcr_zero_batch')
self._test_rcr(B=1, M=0, N=256, K=512, test_name='bmm_rcr_zero_m')
self._test_rcr(B=1, M=256, N=256, K=0, test_name='bmm_rcr_zero_k')
def test_crr(self):
self._test_crr(B=32, M=256, K=256, N=512, test_name='bmm_crr_add')
def test_ccc(self):
self._test_ccc(B=32, M=256, N=256, K=512, test_name='bmm_ccc_add')
self._test_ccc(B=0, M=256, N=256, K=512, test_name='bmm_ccc_zero_batch')
self._test_ccc(B=1, M=0, N=256, K=512, test_name='bmm_ccc_zero_m')
self._test_ccc(B=1, M=256, N=256, K=0, test_name='bmm_ccc_zero_k')
def test_rcc(self):
self._test_rcc(B=32, M=256, N=256, K=512, test_name='bmm_rcc_add')
self._test_rcc(B=0, M=256, N=256, K=512, test_name='bmm_rcc_zero_batch')
self._test_rcc(B=1, M=0, N=256, K=512, test_name='bmm_rcc_zero_m')
self._test_rcc(B=1, M=256, N=256, K=0, test_name='bmm_rcc_zero_k')
def test_rrc(self):
self._test_rrc(B=32, M=256, K=256, N=512, test_name='bmm_rrc_add')
def test_crc(self):
self._test_crc(B=32, M=256, K=256, N=512, test_name='bmm_crc_add')
def test_bmm_add_0_fp32_sm80(self, dtype='float32'):
self._test_rrr(B=8, M=32, K=8, N=64, test_name=f'bmm_rrr_add_{dtype}', dtype=dtype)
self._test_ccr(B=8, M=32, N=64, K=16, test_name=f'bmm_ccr_add_{dtype}', dtype=dtype)
self._test_crr(B=8, M=32, K=16, N=64, test_name=f'bmm_crr_add_{dtype}', dtype=dtype)
self._test_rcr(B=8, M=32, N=64, K=16, test_name=f'bmm_rcr_add_{dtype}', dtype=dtype)
def test_bmm_add_0_bf16(self, dtype='bfloat16'):
self._test_rrr(B=8, M=32, K=8, N=64, test_name=f'bmm_rrr_add_{dtype}', dtype=dtype)
self._test_ccr(B=8, M=32, N=64, K=16, test_name=f'bmm_ccr_add_{dtype}', dtype=dtype)
self._test_crr(B=8, M=32, K=16, N=64, test_name=f'bmm_crr_add_{dtype}', dtype=dtype)
self._test_rcr(B=8, M=32, N=64, K=16, test_name=f'bmm_rcr_add_{dtype}', dtype=dtype)
def test_bmm_add_1_fp32_sm80(self, dtype='float32'):
self._test_rrc(B=8, M=32, K=8, N=64, test_name=f'bmm_rrc_add_{dtype}', dtype=dtype)
self._test_ccc(B=8, M=32, N=64, K=16, test_name=f'bmm_ccc_add_{dtype}', dtype=dtype)
self._test_crc(B=8, M=32, K=16, N=64, test_name=f'bmm_crc_add_{dtype}', dtype=dtype)
self._test_rcc(B=8, M=32, N=64, K=16, test_name=f'bmm_rcc_add_{dtype}', dtype=dtype)
def test_bmm_add_1_bf16(self, dtype='bfloat16'):
self._test_rrc(B=8, M=32, K=8, N=64, test_name=f'bmm_rrc_add_{dtype}', dtype=dtype)
self._test_ccc(B=8, M=32, N=64, K=16, test_name=f'bmm_ccc_add_{dtype}', dtype=dtype)
self._test_crc(B=8, M=32, K=16, N=64, test_name=f'bmm_crc_add_{dtype}', dtype=dtype)
self._test_rcc(B=8, M=32, N=64, K=16, test_name=f'bmm_rcc_add_{dtype}', dtype=dtype)
def test_rrr_sm90(self) -> None:
with env_variables(AIT_FORCE_CUTLASS_SM90_KERNELS='1', INSIDE_RE_WORKER='1'):
with self.assertRaisesRegex(expected_exception=RuntimeError, expected_regex='No GEMM op instances are left after filtering'):
self._test_rrr(B=5, M=7, K=60, N=28, test_name='bmm_rrr_add_wrong_alignment_force_sm90', dtype='float16')
self._test_rrr(B=5, M=7, K=64, N=32, test_name='bmm_rrr_add_fp16_force_sm90', dtype='float16')
self._test_rrr(B=5, M=7, K=60, N=28, test_name='bmm_rrr_add_fp32_force_sm90', dtype='float32')
self._test_rrr(B=5, M=7, K=64, N=32, test_name='bmm_rrr_add_bf16_force_sm90', dtype='bfloat16')
def test_rcr_sm90(self) -> None:
with env_variables(AIT_FORCE_CUTLASS_SM90_KERNELS='1', INSIDE_RE_WORKER='1'):
with self.assertRaisesRegex(expected_exception=RuntimeError, expected_regex='No GEMM op instances are left after filtering'):
self._test_rcr(B=5, M=7, N=60, K=28, test_name='bmm_rcr_add_wrong_alignment_force_sm90', dtype='float16')
self._test_rcr(B=5, M=7, N=64, K=32, test_name='bmm_rcr_add_fp16_force_sm90', dtype='float16')
self._test_rcr(B=5, M=7, N=60, K=28, test_name='bmm_rcr_add_fp32_force_sm90', dtype='float32')
self._test_rcr(B=5, M=7, N=64, K=32, test_name='bmm_rcr_add_bf16_force_sm90', dtype='bfloat16')
def test_ccr_sm90(self) -> None:
with env_variables(AIT_FORCE_CUTLASS_SM90_KERNELS='1', INSIDE_RE_WORKER='1'):
with self.assertRaisesRegex(expected_exception=RuntimeError, expected_regex='No GEMM op instances are left after filtering'):
self._test_ccr(B=5, M=60, N=7, K=28, test_name='bmm_ccr_add_wrong_alignment_force_sm90', dtype='float16')
self._test_ccr(B=5, M=64, N=7, K=32, test_name='bmm_ccr_add_fp16_forse_sm90', dtype='float16')
self._test_ccr(B=5, M=60, N=7, K=28, test_name='bmm_ccr_add_fp32_forse_sm90', dtype='float32')
self._test_ccr(B=5, M=64, N=7, K=32, test_name='bmm_ccr_add_bf16_forse_sm90', dtype='bfloat16')
def test_crr_sm90(self) -> None:
with env_variables(AIT_FORCE_CUTLASS_SM90_KERNELS='1', INSIDE_RE_WORKER='1'):
with self.assertRaisesRegex(expected_exception=RuntimeError, expected_regex='No GEMM op instances are left after filtering'):
self._test_crr(B=5, K=7, M=28, N=60, test_name='bmm_crr_add_wrong_alignment_forse_sm90', dtype='float16')
self._test_crr(B=5, K=7, M=32, N=64, test_name='bmm_crr_add_fp16_forse_sm90', dtype='float16')
self._test_crr(B=5, K=7, M=28, N=60, test_name='bmm_crr_add_fp32_forse_sm90', dtype='float32')
self._test_crr(B=5, K=7, M=32, N=64, test_name='bmm_crr_add_bk_bf16_forse_sm90', dtype='bfloat16')
def test_rrc_sm90(self) -> None:
with env_variables(AIT_FORCE_CUTLASS_SM90_KERNELS='1', INSIDE_RE_WORKER='1'):
with self.assertRaisesRegex(expected_exception=RuntimeError, expected_regex='No GEMM op instances are left after filtering'):
self._test_rrc(B=5, M=7, K=60, N=28, test_name='bmm_rrc_add_wrong_alignment_force_sm90', dtype='float16')
self._test_rrc(B=5, M=7, K=64, N=32, test_name='bmm_rrc_add_fp16_force_sm90', dtype='float16')
self._test_rrc(B=5, M=7, K=60, N=28, test_name='bmm_rrc_add_fp32_force_sm90', dtype='float32')
self._test_rrc(B=5, M=7, K=64, N=32, test_name='bmm_rrc_add_bf16_force_sm90', dtype='bfloat16')
def test_rcc_sm90(self) -> None:
with env_variables(AIT_FORCE_CUTLASS_SM90_KERNELS='1', INSIDE_RE_WORKER='1'):
with self.assertRaisesRegex(expected_exception=RuntimeError, expected_regex='No GEMM op instances are left after filtering'):
self._test_rcc(B=5, M=7, N=60, K=28, test_name='bmm_rcc_add_wrong_alignment_force_sm90', dtype='float16')
self._test_rcc(B=5, M=7, N=64, K=32, test_name='bmm_rcc_add_fp16_force_sm90', dtype='float16')
self._test_rcc(B=5, M=7, N=60, K=28, test_name='bmm_rcc_add_fp32_force_sm90', dtype='float32')
self._test_rcc(B=5, M=7, N=64, K=32, test_name='bmm_rcc_add_bf16_force_sm90', dtype='bfloat16')
def test_ccc_sm90(self) -> None:
with env_variables(AIT_FORCE_CUTLASS_SM90_KERNELS='1', INSIDE_RE_WORKER='1'):
with self.assertRaisesRegex(expected_exception=RuntimeError, expected_regex='No GEMM op instances are left after filtering'):
self._test_ccc(B=5, M=60, N=7, K=28, test_name='bmm_ccc_add_wrong_alignment_force_sm90', dtype='float16')
self._test_ccc(B=5, M=64, N=7, K=32, test_name='bmm_ccc_add_fp16_forse_sm90', dtype='float16')
self._test_ccc(B=5, M=60, N=7, K=28, test_name='bmm_ccc_add_fp32_forse_sm90', dtype='float32')
self._test_ccc(B=5, M=64, N=7, K=32, test_name='bmm_ccc_add_bf16_forse_sm90', dtype='bfloat16')
def test_crc_sm90(self) -> None:
with env_variables(AIT_FORCE_CUTLASS_SM90_KERNELS='1', INSIDE_RE_WORKER='1'):
with self.assertRaisesRegex(expected_exception=RuntimeError, expected_regex='No GEMM op instances are left after filtering'):
self._test_crc(B=5, K=7, M=28, N=60, test_name='bmm_crc_add_wrong_alignment_forse_sm90', dtype='float16')
self._test_crc(B=5, K=7, M=32, N=64, test_name='bmm_crc_add_fp16_forse_sm90', dtype='float16')
self._test_crc(B=5, K=7, M=28, N=60, test_name='bmm_crc_add_fp32_forse_sm90', dtype='float32')
self._test_crc(B=5, K=7, M=32, N=64, test_name='bmm_crc_add_bk_bf16_forse_sm90', dtype='bfloat16') |
.parametrize('alg,expected_hash', list(TINY_DATA_HASHES.items()), ids=list(TINY_DATA_HASHES.keys()))
def test_hash_matches(alg, expected_hash):
if alg.startswith('xxh'):
if (xxhash is None):
pytest.skip('requires xxhash')
if ((alg not in ['xxh64', 'xxh32']) and (XXHASH_MAJOR_VERSION < 2)):
pytest.skip('requires xxhash > 2.0')
fname = os.path.join(DATA_DIR, 'tiny-data.txt')
check_tiny_data(fname)
known_hash = f'{alg}:{expected_hash}'
assert hash_matches(fname, known_hash)
known_hash = f'{alg}:blablablabla'
assert (not hash_matches(fname, known_hash)) |
def output_json(data, code, headers=None):
settings = current_app.config.get('RESTFUL_JSON', {})
if current_app.debug:
settings.setdefault('indent', 4)
settings.setdefault('sort_keys', (not PY3))
dumped = (dumps(data, **settings) + '\n')
resp = make_response(dumped, code)
resp.headers.extend((headers or {}))
return resp |
def fetch_production(zone_key='AM', session=None, target_datetime=None, logger: logging.Logger=logging.getLogger(__name__)) -> dict:
if (target_datetime is not None):
raise NotImplementedError('This parser is not yet able to parse past dates')
r = (session or requests.session())
response = r.get(SOURCE)
response.encoding = 'utf-8'
html_doc = response.text
start_string = "<script type='text/javascript'>"
start_index = (html_doc.find(start_string) + len(start_string))
stop_index = html_doc.find('left:')
soup = BeautifulSoup(html_doc[start_index:stop_index], 'html.parser')
data_string = soup.find(text=re.compile('var'))
if (data_string is None):
logger.warning(f'Could not parse {html_doc}')
raise ValueError('Empty data object scraped, cannot be parsed.')
data_split = data_string.split('\r\n')
gas_tes = re.findall(REGEX, data_split[10])
gas_total = float(gas_tes[0])
hydro_ges = re.findall(REGEX, data_split[11])
hydro_altern = re.findall(REGEX, data_split[8])
hydro_total = (float(hydro_ges[0]) + float(hydro_altern[0]))
nuclear_atom = re.findall(REGEX, data_split[9])
nuclear_total = float(nuclear_atom[0])
time_data = [s for s in data_split if ('time2' in s)][0]
yerevan = tz.gettz(TZ)
date_time = dparser.parse(time_data.split()[3], default=datetime.now(yerevan), fuzzy=True)
return {'zoneKey': zone_key, 'datetime': date_time, 'production': {'gas': gas_total, 'hydro': hydro_total, 'nuclear': nuclear_total, 'biomass': None, 'coal': 0, 'geothermal': 0, 'oil': 0, 'solar': None, 'wind': None}, 'storage': {'hydro': 0, 'battery': 0}, 'source': SOURCE} |
def __create_collection_panel_context_menu():
def collection_delete_tracks_func(panel, context, tracks):
panel.collection.delete_tracks(tracks)
items = []
items.append(menu.simple_separator('cp-sep', after=['properties']))
items.append(menuitems.OpenDirectoryMenuItem('open-directory', after=[items[(- 1)].name]))
items.append(menuitems.TrashMenuItem('trash-tracks', after=[items[(- 1)].name], delete_tracks_func=collection_delete_tracks_func))
for item in items:
item.register('collection-panel-context-menu') |
def nested_get(_dict, dot_key, default=None):
if ((_dict is None) or (dot_key is None)):
return default
elif (('.' in dot_key) and isinstance(_dict, dict)):
dot_key = dot_key.split('.')
this_key = dot_key.pop(0)
return nested_get(_dict.get(this_key, default), '.'.join(dot_key), default)
else:
return _dict.get(dot_key, default) |
def derive_master_key(seed_bytes: bytes) -> Tuple[(bytes, bytes)]:
if (len(seed_bytes) < SEED_MIN_BYTE_LEN):
raise ValueError(f'Invalid seed length ({len(seed_bytes)})')
hmac_out = b''
hmac_data = seed_bytes
success = False
while (not success):
hmac_out = hmac.digest(b'Bitcoin seed', hmac_data, 'sha512')
if validate_private_key(hmac_out[:HMAC_HALF_LEN]):
break
hmac_data = hmac_out
return split_hmac(hmac_out) |
def run(fips_dir, proj_dir, args):
if (not util.is_valid_project_dir(proj_dir)):
log.error('must be run in a project directory')
cfg_name = None
build_tool_args = None
if ('--' in args):
idx = args.index('--')
build_tool_args = args[(idx + 1):]
args = args[:idx]
if (len(args) > 0):
cfg_name = args[0]
if (not cfg_name):
cfg_name = settings.get(proj_dir, 'config')
if (cfg_name == 'clean'):
if (len(args) > 1):
cfg_name = args[1]
else:
cfg_name = settings.get(proj_dir, 'config')
project.make_clean(fips_dir, proj_dir, cfg_name)
else:
project.build(fips_dir, proj_dir, cfg_name, None, build_tool_args) |
class Gpu_interface(Peripherical_interface, metaclass=ABCMeta):
_vendor: str
_model: str
def vendor(self) -> str:
return self._vendor
def vendor(self, value: str):
self._vendor = value
def model(self) -> str:
return self._model
def model(self, value: str):
raise NotImplementedError
def temp(self) -> float:
try:
self._temp = self.get_temp()
except NotImplementedError as e:
try:
raise e
finally:
e = None
del e
else:
return self._temp
def temp(self, value: float):
self._temp = value
def __init__(self, os, vendor, model):
super().__init__(os)
self.vendor = vendor
self.model = model
def get_temp(self) -> float:
raise NotImplementedError |
def test_exponential_increment():
strat = ExponentialScalingStrategy(100, )
generator = strat.get_gas_price()
values = [next(generator) for i in range(20)]
diff = (values[1] - values[0])
for i in range(2, 20):
assert ((values[i] - values[(i - 1)]) > diff)
diff = (values[i] - values[(i - 1)]) |
def test_geometryoperations():
assert ((BOX + CYLINDER) == td.GeometryGroup(geometries=(BOX, CYLINDER)))
assert ((BOX + UNION) == td.GeometryGroup(geometries=(BOX, UNION.geometry_a, UNION.geometry_b)))
assert ((UNION + CYLINDER) == td.GeometryGroup(geometries=(UNION.geometry_a, UNION.geometry_b, CYLINDER)))
assert ((BOX + GROUP) == td.GeometryGroup(geometries=((BOX,) + GROUP.geometries)))
assert ((GROUP + CYLINDER) == td.GeometryGroup(geometries=(GROUP.geometries + (CYLINDER,))))
assert ((BOX | CYLINDER) == td.GeometryGroup(geometries=(BOX, CYLINDER)))
assert ((BOX | UNION) == td.GeometryGroup(geometries=(BOX, UNION.geometry_a, UNION.geometry_b)))
assert ((UNION | CYLINDER) == td.GeometryGroup(geometries=(UNION.geometry_a, UNION.geometry_b, CYLINDER)))
assert ((BOX | GROUP) == td.GeometryGroup(geometries=((BOX,) + GROUP.geometries)))
assert ((GROUP | CYLINDER) == td.GeometryGroup(geometries=(GROUP.geometries + (CYLINDER,))))
assert ((BOX * SPHERE) == td.ClipOperation(operation='intersection', geometry_a=BOX, geometry_b=SPHERE))
assert ((BOX & SPHERE) == td.ClipOperation(operation='intersection', geometry_a=BOX, geometry_b=SPHERE))
assert ((BOX - SPHERE) == td.ClipOperation(operation='difference', geometry_a=BOX, geometry_b=SPHERE))
assert ((BOX ^ SPHERE) == td.ClipOperation(operation='symmetric_difference', geometry_a=BOX, geometry_b=SPHERE)) |
def get_eval_search(expression):
try:
code = compile(expression, '<string>', 'eval')
except SyntaxError as e:
abort(400, f'compiling expression: {e}')
return (lambda node: safer_eval(code, {'node': node, 'parent': node.up, 'up': node.up, 'name': node.name, 'is_leaf': node.is_leaf, 'length': node.dist, 'dist': node.dist, 'd': node.dist, 'props': node.props, 'p': node.props, 'get': dict.get, 'children': node.children, 'ch': node.children, 'size': node.size, 'dx': node.size[0], 'dy': node.size[1], 'regex': re.search, 'startswith': str.startswith, 'endswith': str.endswith, 'upper': str.upper, 'lower': str.lower, 'split': str.split, 'any': any, 'all': all, 'len': len, 'sum': sum, 'abs': abs, 'float': float, 'pi': pi})) |
def upgrade():
op.alter_column('external_identities', 'access_token', type_=sa.Unicode(512), existing_type=sa.Unicode(255))
op.alter_column('external_identities', 'alt_token', type_=sa.Unicode(512), existing_type=sa.Unicode(255))
op.alter_column('external_identities', 'token_secret', type_=sa.Unicode(512), existing_type=sa.Unicode(255))
op.alter_column('users', 'user_name', type_=sa.Unicode(128), existing_type=sa.Unicode(32))
op.alter_column('groups_permissions', 'perm_name', type_=sa.Unicode(64), existing_type=sa.Unicode(30), nullable=False)
op.alter_column('groups_resources_permissions', 'perm_name', type_=sa.Unicode(64), existing_type=sa.Unicode(50), nullable=False)
op.alter_column('users_permissions', 'perm_name', type_=sa.Unicode(64), existing_type=sa.Unicode(30), nullable=False)
op.alter_column('users_resources_permissions', 'perm_name', type_=sa.Unicode(64), existing_type=sa.Unicode(50), nullable=False) |
class ModelRunSchema(BaseModel):
id: str
time_utc: str
status: str
full_refresh: Optional[bool]
materialization: Optional[str]
execution_time: float
('time_utc', pre=True)
def format_time_utc(cls, time_utc):
return convert_partial_iso_format_to_full_iso_format(time_utc) |
.parametrize('left,right,expected', (([], [], ([], [], [])), ([], [1], ([], [], [1])), ([1], [1], ([1], [], [])), ([1], [1, 1], ([1], [], [1])), ([1, 2], [1, 1], ([1], [2], [1])), ([1, 2, 3, 4, 5, 6], [1, 2, 3, 5, 6], ([1, 2, 3], [4, 5, 6], [5, 6]))))
def test_consume_common_prefix(left, right, expected):
actual_a = consume_common_prefix(left, right)
actual_b = consume_common_prefix(right, left)
expected_b = (expected[0], expected[2], expected[1])
assert (actual_a == expected)
assert (actual_b == expected_b) |
class AdCreativePhotoDataMediaElements(AbstractObject):
def __init__(self, api=None):
super(AdCreativePhotoDataMediaElements, self).__init__()
self._isAdCreativePhotoDataMediaElements = True
self._api = api
class Field(AbstractObject.Field):
element_id = 'element_id'
element_type = 'element_type'
x = 'x'
y = 'y'
_field_types = {'element_id': 'string', 'element_type': 'string', 'x': 'float', 'y': 'float'}
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info |
class OnServerNotConfigured(Exception):
def __init__(self, *args):
if args:
self.message = args[0]
else:
self.message = None
self.status_code = 404
def __str__(self):
if self.message:
return 'OnServerNotConfigured, {0} '.format(self.message)
else:
return 'OnServerNotConfigured has been raised' |
.parametrize('tz', (('UTC', 0), ('PST', (- 8)), ('KST', 9)))
def test_jsonify_aware_datetimes(tz):
tzinfo = FixedOffset(hours=tz[1], name=tz[0])
dt = datetime.datetime(2017, 1, 1, 12, 34, 56, tzinfo=tzinfo)
gmt = FixedOffset(hours=0, name='GMT')
expected = dt.astimezone(gmt).strftime('"%a, %d %b %Y %H:%M:%S %Z"')
assert (flask.json.JSONEncoder().encode(dt) == expected) |
class DAGMixinTestCase(unittest.TestCase):
def setUp(self):
self.kwargs = {'name': 'Test DAG Mixin'}
def test_parent_argument_is_skipped(self):
kwargs = copy.copy(self.kwargs)
d = DAGMixinFooMixedInClass(**kwargs)
assert (d.parent is None)
def test_parent_argument_is_None(self):
kwargs = copy.copy(self.kwargs)
kwargs['parent'] = None
d = DAGMixinFooMixedInClass(**kwargs)
assert (d.parent is None)
def test_parent_argument_is_not_a_correct_class_instance(self):
kwargs = copy.copy(self.kwargs)
kwargs['parent'] = 'not a correct type'
with pytest.raises(TypeError) as cm:
d = DAGMixinFooMixedInClass(**kwargs)
assert (str(cm.value) == 'DAGMixinFooMixedInClass.parent should be an instance of DAGMixinFooMixedInClass class or derivative, not str')
def test_parent_attribute_is_not_a_correct_class_instance(self):
kwargs = copy.copy(self.kwargs)
d = DAGMixinFooMixedInClass(**kwargs)
with pytest.raises(TypeError) as cm:
d.parent = 'not a correct type'
assert (str(cm.value) == 'DAGMixinFooMixedInClass.parent should be an instance of DAGMixinFooMixedInClass class or derivative, not str')
def test_parent_attribute_creates_a_cycle(self):
kwargs = copy.copy(self.kwargs)
d1 = DAGMixinFooMixedInClass(**kwargs)
kwargs = copy.copy(self.kwargs)
kwargs['parent'] = d1
d2 = DAGMixinFooMixedInClass(**kwargs)
with pytest.raises(CircularDependencyError) as cm:
d1.parent = d2
assert (str(cm.value) == '<Test DAG Mixin (DAGMixinFooMixedInClass)> (DAGMixinFooMixedInClass) and <Test DAG Mixin (DAGMixinFooMixedInClass)> (DAGMixinFooMixedInClass) creates a circular dependency in their "children" attribute')
def test_parent_argument_is_working_properly(self):
kwargs = copy.copy(self.kwargs)
d1 = DAGMixinFooMixedInClass(**kwargs)
kwargs = copy.copy(self.kwargs)
kwargs['parent'] = d1
d2 = DAGMixinFooMixedInClass(**kwargs)
assert (d1 == d2.parent)
def test_parent_attribute_is_working_properly(self):
kwargs = copy.copy(self.kwargs)
d1 = DAGMixinFooMixedInClass(**kwargs)
d2 = DAGMixinFooMixedInClass(**kwargs)
assert (d2.parent != d1)
d2.parent = d1
assert (d2.parent == d1)
def test_children_attribute_is_an_empty_list_by_default(self):
kwargs = copy.copy(self.kwargs)
d = DAGMixinFooMixedInClass(**kwargs)
assert (d.children == [])
def test_children_attribute_is_set_to_None(self):
kwargs = copy.copy(self.kwargs)
d = DAGMixinFooMixedInClass(**kwargs)
with pytest.raises(TypeError) as cm:
d.children = None
assert (str(cm.value) == 'Incompatible collection type: None is not list-like')
def test_children_attribute_accepts_correct_class_instances_only(self):
kwargs = copy.copy(self.kwargs)
d = DAGMixinFooMixedInClass(**kwargs)
with pytest.raises(TypeError) as cm:
d.children = ['not', 1, '', 'of', 'correct', 'instances']
assert (str(cm.value) == 'DAGMixinFooMixedInClass.children should be a list of DAGMixinFooMixedInClass (or derivative) instances, not str')
def test_children_attribute_is_working_properly(self):
kwargs = copy.copy(self.kwargs)
kwargs['name'] = 'Test DAG Mixin 1'
d1 = DAGMixinFooMixedInClass(**kwargs)
kwargs['name'] = 'Test DAG Mixin 2'
d2 = DAGMixinFooMixedInClass(**kwargs)
kwargs['name'] = 'Test DAG Mixin 3'
d3 = DAGMixinFooMixedInClass(**kwargs)
assert (d1.children == [])
d1.children.append(d2)
assert (d1.children == [d2])
d1.children = [d3]
assert (d1.children == [d3])
def test_is_leaf_attribute_is_read_only(self):
kwargs = copy.copy(self.kwargs)
d1 = DAGMixinFooMixedInClass(**kwargs)
with pytest.raises(AttributeError) as cm:
setattr(d1, 'is_leaf', 'this will not work')
assert (str(cm.value) == "can't set attribute")
def test_is_leaf_attribute_is_working_properly(self):
kwargs = copy.copy(self.kwargs)
d1 = DAGMixinFooMixedInClass(**kwargs)
d2 = DAGMixinFooMixedInClass(**kwargs)
d3 = DAGMixinFooMixedInClass(**kwargs)
d1.children = [d2, d3]
assert (d1.is_leaf is False)
assert (d2.is_leaf is True)
assert (d3.is_leaf is True)
def test_is_root_attribute_is_read_only(self):
kwargs = copy.copy(self.kwargs)
d1 = DAGMixinFooMixedInClass(**kwargs)
with pytest.raises(AttributeError) as cm:
setattr(d1, 'is_root', 'this will not work')
assert (str(cm.value) == "can't set attribute")
def test_is_root_attribute_is_working_properly(self):
kwargs = copy.copy(self.kwargs)
d1 = DAGMixinFooMixedInClass(**kwargs)
d2 = DAGMixinFooMixedInClass(**kwargs)
d3 = DAGMixinFooMixedInClass(**kwargs)
d1.children = [d2, d3]
assert (d1.is_root is True)
assert (d2.is_root is False)
assert (d3.is_root is False)
def test_is_container_attribute_is_read_only(self):
kwargs = copy.copy(self.kwargs)
d1 = DAGMixinFooMixedInClass(**kwargs)
with pytest.raises(AttributeError) as cm:
setattr(d1, 'is_container', 'this will not work')
assert (str(cm.value) == "can't set attribute")
def test_is_container_attribute_working_properly(self):
kwargs = copy.copy(self.kwargs)
d1 = DAGMixinFooMixedInClass(**kwargs)
d2 = DAGMixinFooMixedInClass(**kwargs)
d3 = DAGMixinFooMixedInClass(**kwargs)
d4 = DAGMixinFooMixedInClass(**kwargs)
d1.children = [d2, d3]
d2.children = [d4]
assert (d1.is_container is True)
assert (d2.is_container is True)
assert (d3.is_container is False)
assert (d4.is_container is False)
def test_parents_property_is_read_only(self):
kwargs = copy.copy(self.kwargs)
d1 = DAGMixinFooMixedInClass(**kwargs)
with pytest.raises(AttributeError) as cm:
setattr(d1, 'parents', 'this will not work')
assert (str(cm.value) == "can't set attribute")
def test_parents_property_is_working_properly(self):
kwargs = copy.copy(self.kwargs)
d1 = DAGMixinFooMixedInClass(**kwargs)
d2 = DAGMixinFooMixedInClass(**kwargs)
d3 = DAGMixinFooMixedInClass(**kwargs)
d4 = DAGMixinFooMixedInClass(**kwargs)
d1.children = [d2, d3]
d2.children = [d4]
assert (d1.parents == [])
assert (d2.parents == [d1])
assert (d3.parents == [d1])
assert (d4.parents == [d1, d2])
def test_walk_hierarchy_is_working_properly(self):
kwargs = copy.copy(self.kwargs)
d1 = DAGMixinFooMixedInClass(**kwargs)
d2 = DAGMixinFooMixedInClass(**kwargs)
d3 = DAGMixinFooMixedInClass(**kwargs)
d4 = DAGMixinFooMixedInClass(**kwargs)
d1.children = [d2, d3]
d2.children = [d4]
entities_walked = []
for e in d1.walk_hierarchy():
entities_walked.append(e)
assert (entities_walked == [d1, d2, d4, d3])
entities_walked = []
for e in d1.walk_hierarchy(method=1):
entities_walked.append(e)
assert (entities_walked == [d1, d2, d3, d4])
entities_walked = []
for e in d2.walk_hierarchy():
entities_walked.append(e)
assert (entities_walked == [d2, d4])
entities_walked = []
for e in d3.walk_hierarchy():
entities_walked.append(e)
assert (entities_walked == [d3])
entities_walked = []
for e in d4.walk_hierarchy():
entities_walked.append(e)
assert (entities_walked == [d4]) |
class OptionSeriesSunburstSonificationContexttracksMappingPitch(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get('y')
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get('c6')
def max(self, text: str):
self._config(text, js_type=False)
def min(self):
return self._config_get('c2')
def min(self, text: str):
self._config(text, js_type=False)
def scale(self):
return self._config_get(None)
def scale(self, value: Any):
self._config(value, js_type=False)
def within(self):
return self._config_get('yAxis')
def within(self, text: str):
self._config(text, js_type=False) |
.django_db
def test_child_recipient_with_id_and_no_name(recipient_lookup):
recipient_parameters = {'recipient_name': None, 'recipient_uei': '456', 'parent_recipient_uei': '123', 'recipient_unique_id': None, 'parent_recipient_unique_id': None}
expected_result = 'ede3440b-e344-a923-035e-feed34773b57-C'
assert (obtain_recipient_uri(**recipient_parameters) == expected_result) |
def parse_mongo_uri(db_uri):
regex_str = '^(?P<schema>(mongodb:(?:\\/{2})?))((?P<user>\\w+?):(?P<pwd>(\\w+?))|:?)(?P<host>(\\S+?)):(?P<port>(\\d+))(\\/(?P<db>(\\S+?)))$'
pattern = re.compile(regex_str)
m = pattern.match(db_uri)
if (m is None):
raise Exception('The URI of MongoDB is invalid')
return (m.group('user'), m.group('pwd'), m.group('host'), m.group('port'), m.group('db')) |
def _try_to_count_token(prompt: str, tokenizer, model) -> int:
try:
from dbgpt.model.proxy.llms.proxy_model import ProxyModel
if isinstance(model, ProxyModel):
return model.count_token(prompt)
return len(tokenizer(prompt).input_ids[0])
except Exception as e:
logger.warning(f'Count token error, detail: {e}, return -1')
return (- 1) |
def get_wrapper(data, *args, **kwargs):
if isinstance(data, Base):
return data
for (name, h) in _wrappers().items():
wrapper = h(data, *args, **kwargs)
if (wrapper is not None):
return wrapper.mutate()
fullname = '.'.join([data.__class__.__module__, data.__class__.__qualname__])
raise ValueError(f'Cannot find a wrapper for class {fullname}') |
class RegisteredNexthop(stringify.StringifyMixin):
_HEADER_FMT = '!?H'
HEADER_SIZE = struct.calcsize(_HEADER_FMT)
def __init__(self, connected, family, prefix):
super(RegisteredNexthop, self).__init__()
self.connected = connected
self.family = family
if isinstance(prefix, (IPv4Prefix, IPv6Prefix)):
prefix = prefix.prefix
self.prefix = prefix
def flags(self):
return self.connected
def flags(self, v):
self.connected = v
def parse(cls, buf):
(connected, family) = struct.unpack_from(cls._HEADER_FMT, buf)
rest = buf[cls.HEADER_SIZE:]
(prefix, rest) = _parse_ip_prefix(family, rest)
return (cls(connected, family, prefix), rest)
def serialize(self):
buf = struct.pack(self._HEADER_FMT, self.connected, self.family)
return (buf + _serialize_ip_prefix(self.prefix)) |
class barrier_reply(message):
version = 6
type = 21
def __init__(self, xid=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = barrier_reply()
_version = reader.read('!B')[0]
assert (_version == 6)
_type = reader.read('!B')[0]
assert (_type == 21)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
return True
def pretty_print(self, q):
q.text('barrier_reply {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.breakable()
q.text('}') |
class DataSim(Module, AutoCSR):
def __init__(self, pads, cmds_sim, *, cd_dq_wr, cd_dq_rd, cd_dqs_wr, cd_dqs_rd, cl, cwl, clk_freq, log_level):
self.submodules.log = log = SimLogger(log_level=log_level, clk_freq=clk_freq)
self.log.add_csrs()
bl = 16
(nrows, ncols) = (32768, 1024)
mems = [Memory(len(pads.dq), depth=(nrows * ncols)) for _ in range(8)]
ports = [mem.get_port(write_capable=True, we_granularity=8, async_read=True) for mem in mems]
self.specials += (mems + ports)
ports = Array(ports)
bank = Signal(3)
row = Signal(17)
col = Signal(10)
dq_kwargs = dict(bank=bank, row=row, col=col, bl=bl, nrows=nrows, ncols=ncols, log_level=log_level, clk_freq=clk_freq)
dqs_kwargs = dict(bl=bl, log_level=log_level, clk_freq=clk_freq)
self.submodules.dq_wr = ClockDomainsRenamer(cd_dq_wr)(DQWrite(dq=pads.dq, dmi=pads.dmi, ports=ports, **dq_kwargs))
self.submodules.dq_rd = ClockDomainsRenamer(cd_dq_rd)(DQRead(dq=pads.dq_i, ports=ports, **dq_kwargs))
self.submodules.dqs_wr = ClockDomainsRenamer(cd_dqs_wr)(DQSWrite(dqs=pads.dqs, **dqs_kwargs))
self.submodules.dqs_rd = ClockDomainsRenamer(cd_dqs_rd)(DQSRead(dqs=pads.dqs_i, **dqs_kwargs))
write = Signal()
read = Signal()
read_skew = 1
self.comb += [write.eq(((cmds_sim.data_en.taps[(cwl - 1)] & cmds_sim.data.source.valid) & cmds_sim.data.source.we)), read.eq(((cmds_sim.data_en.taps[((cl - 1) + read_skew)] & cmds_sim.data.source.valid) & (~ cmds_sim.data.source.we))), cmds_sim.data.source.ready.eq((write | read)), self.dq_wr.masked.eq((write & cmds_sim.data.source.masked)), self.dq_wr.trigger.eq(write), self.dq_rd.trigger.eq(read), self.dqs_wr.trigger.eq(write), self.dqs_rd.trigger.eq(read)]
self.sync += [If(cmds_sim.data.source.ready, bank.eq(cmds_sim.data.source.bank), row.eq(cmds_sim.data.source.row), col.eq(cmds_sim.data.source.col))] |
def fxp_sum(x, sizes='best_sizes', axis=None, dtype=None, out=None, vdtype=None):
if isinstance(x, Fxp):
x_vals = x.get_val()
else:
x_vals = x
x_sum = np.sum(x_vals, axis=axis, dtype=vdtype)
if (dtype is not None):
(signed, n_word, n_frac) = utils.get_sizes_from_dtype(dtype)
sum_along_axis = Fxp(x_sum, signed=signed, n_word=n_word, n_frac=n_frac)
elif (out is not None):
if isinstance(out, Fxp):
sum_along_axis = out(x_sum)
else:
raise TypeError('out argument must be a Fxp object!')
elif (sizes == 'best_sizes'):
signed = x.signed
n_word = (np.ceil(np.log2(x().size)).astype(int) + x.n_word)
n_frac = x.n_frac
sum_along_axis = Fxp(x_sum, signed=signed, n_word=n_word, n_frac=n_frac)
elif (sizes == 'tight_sizes'):
sum_along_axis = Fxp(x_sum, signed=x.signed)
elif (sizes == 'same_sizes'):
sum_along_axis = Fxp(x_sum, like=x)
else:
raise ValueError('Could not resolve output size!')
return sum_along_axis |
def test_traverse_overridden():
provider1 = providers.Provider()
provided = provider1.provided
method = provided.method
provider2 = providers.Provider()
provider = method.call()
provider.override(provider2)
all_providers = list(provider.traverse())
assert (len(all_providers) == 4)
assert (provider1 in all_providers)
assert (provider2 in all_providers)
assert (provided in all_providers)
assert (method in all_providers) |
class Project(BaseModel):
class Config():
underscore_attrs_are_private = True
id: ProjectID = Field(default_factory=uuid.uuid4)
name: str
description: Optional[str] = None
dashboard: 'DashboardConfig' = Field(default_factory=_default_dashboard)
team_id: Optional[TeamID]
date_from: Optional[datetime.datetime] = None
date_to: Optional[datetime.datetime] = None
_project_manager: 'ProjectManager' = PrivateAttr(None)
_user_id: UserID = PrivateAttr(None)
def bind(self, project_manager: Optional['ProjectManager'], user_id: Optional[UserID]):
self._project_manager = project_manager
self._user_id = user_id
return self
def project_manager(self) -> 'ProjectManager':
if (self._project_manager is None):
raise ValueError('Project is not binded')
return self._project_manager
def save(self):
self.project_manager.update_project(self._user_id, self)
return self
def load_snapshot(self, snapshot_id: uuid.UUID) -> Snapshot:
return self.project_manager.load_snapshot(self._user_id, self.id, snapshot_id)
def add_snapshot(self, snapshot: Snapshot):
self.project_manager.add_snapshot(self._user_id, self.id, snapshot)
def delete_snapshot(self, snapshot_id: Union[(str, uuid.UUID)]):
if isinstance(snapshot_id, str):
snapshot_id = uuid.UUID(snapshot_id)
self.project_manager.delete_snapshot(self._user_id, self.id, snapshot_id)
def list_snapshots(self, include_reports: bool=True, include_test_suites: bool=True) -> List[SnapshotMetadata]:
return self.project_manager.list_snapshots(self._user_id, self.id, include_reports, include_test_suites)
def get_snapshot_metadata(self, id: uuid.UUID) -> SnapshotMetadata:
return self.project_manager.get_snapshot_metadata(self._user_id, self.id, id)
def build_dashboard_info(self, timestamp_start: Optional[datetime.datetime], timestamp_end: Optional[datetime.datetime]) -> DashboardInfo:
return self.dashboard.build(self.project_manager.data, self.id, timestamp_start, timestamp_end)
def show_dashboard(self, timestamp_start: Optional[datetime.datetime]=None, timestamp_end: Optional[datetime.datetime]=None):
dashboard_info = self.build_dashboard_info(timestamp_start, timestamp_end)
template_params = TemplateParams(dashboard_id=('pd_' + str(uuid.uuid4()).replace('-', '')), dashboard_info=dashboard_info, additional_graphs={})
try:
from IPython.display import HTML
return HTML(determine_template('inline')(params=template_params))
except ImportError as err:
raise Exception('Cannot import HTML from IPython.display, no way to show html') from err
def reload(self, reload_snapshots: bool=False):
project = self.project_manager.get_project(self._user_id, self.id)
self.__dict__.update(project.__dict__)
if reload_snapshots:
self.project_manager.reload_snapshots(self._user_id, self.id) |
def get_unit_max_levels(is_jp: bool) -> Optional[tuple[(list[int], list[int])]]:
file_data = game_data_getter.get_file_latest('DataLocal', 'unitbuy.csv', is_jp)
if (file_data is None):
helper.error_text('Could not get unitbuy.csv')
return None
data = helper.parse_int_list_list(csv_handler.parse_csv(file_data.decode('utf-8')))
max_base_level = helper.copy_first_n(data, 50)
max_plus_level = helper.copy_first_n(data, 51)
return (max_base_level, max_plus_level) |
class recursive_generator(_coconut_base_callable):
__slots__ = ('func', 'reit_store')
def __init__(self, func):
self.func = func
self.reit_store = _coconut.dict()
def __call__(self, *args, **kwargs):
key = (0, args, _coconut.frozenset(kwargs.items()))
try:
_coconut.hash(key)
except _coconut.TypeError:
try:
key = (1, _coconut.pickle.dumps(key, (- 1)))
except _coconut.Exception:
raise _coconut.TypeError('recursive_generator() requires function arguments to be hashable or pickleable')
reit = self.reit_store.get(key)
if (reit is None):
reit = _coconut_reiterable(self.func(*args, **kwargs))
self.reit_store[key] = reit
return reit
def __repr__(self):
return ('recursive_generator(%r)' % (self.func,))
def __reduce__(self):
return (self.__class__, (self.func,)) |
def test_sse_broadcast(browser, clear_log):
browser.slow_click('#button1')
browser.assert_text('WS1 CONNECTED', 'div.sse', timeout=5)
clear_log()
browser.type('#input1', '/all Zombie alert!')
browser.slow_click('#button1')
browser.assert_text('[WS1] Zombie alert!', 'div.sse', timeout=5)
clear_log()
browser.type('#input1', '/all Zombie apocalypse averted (for now)')
browser.slow_click('#button1')
browser.assert_text('[WS1] Zombie apocalypse averted (for now)', 'div.sse', timeout=5)
clear_log()
browser.type('#input1', '/quit')
browser.slow_click('#button1')
browser.assert_text('Bye, WS1!', 'div.ws1', timeout=5)
browser.assert_text('WS1 DISCONNECTED', 'div.sse', timeout=5) |
class OptionalA(ArgumentProcessor):
def __init__(self, arg_proc):
if is_subclass_obj(arg_proc, ArgumentProcessor):
arg_proc = arg_proc()
self.arg_proc = arg_proc
def setdata(self, i, arg_name, f_name):
super().setdata(i, arg_name, f_name)
self.arg_proc.setdata(i, arg_name, f_name)
def __call__(self, opt_arg, all_args):
if (opt_arg is None):
return opt_arg
else:
return self.arg_proc(opt_arg, all_args) |
def test_log_dir(tmp_project):
log_path = Path((tmp_project / 'simple_deploy_logs'))
assert log_path.exists()
log_files = sorted(log_path.glob('*'))
log_filenames = [lf.name for lf in log_files]
assert (len(log_files) == 1)
log_file = log_files[0]
log_file_text = log_file.read_text()
assert ('INFO: Logging run of `manage.py simple_deploy`...' in log_file_text)
assert ('INFO: Configuring project for deployment to Platform.sh...' in log_file_text)
assert ('INFO: CLI args: {' in log_file_text)
assert ('INFO: Deployment target: platform_sh' in log_file_text)
assert ('INFO: Project name: blog' in log_file_text)
assert ('INFO: git status:' in log_file_text)
assert ('INFO: Untracked files:' in log_file_text)
assert ('INFO: (use "git add <file>..." to include in what will be committed)' in log_file_text)
assert ('INFO: \tsimple_deploy_logs/' in log_file_text)
assert ('INFO: --- Your project is now configured for deployment on Platform.sh. ---' in log_file_text)
assert ('INFO: To deploy your project, you will need to:' in log_file_text)
assert ('INFO: - You can find a full record of this configuration in the simple_deploy_logs directory.' in log_file_text) |
class Migration(migrations.Migration):
initial = True
dependencies = [('references', '0001_initial'), ('submissions', '0001_initial'), ('accounts', '0001_initial')]
operations = [migrations.CreateModel(name='Award', fields=[('data_source', models.TextField(choices=[('USA', 'USAspending'), ('DBR', 'DATA Act Broker')], help_text='The source of this entry, either Data Broker (DBR) or USASpending (USA)', null=True)), ('id', models.BigAutoField(primary_key=True, serialize=False)), ('type', models.TextField(db_index=True, help_text='The mechanism used to distribute funding. The federal government can distribute funding in several forms. These award types include contracts, grants, loans, and direct payments.', null=True, verbose_name='Award Type')), ('type_description', models.TextField(blank=True, help_text='The plain text description of the type of the award', null=True, verbose_name='Award Type Description')), ('category', models.TextField(db_index=True, help_text="A field that generalizes the award's type.", null=True, verbose_name='Category')), ('piid', models.TextField(blank=True, db_index=True, help_text='Procurement Instrument Identifier - A unique identifier assigned to a federal contract, purchase order, basic ordering agreement, basic agreement, and blanket purchase agreement. It is used to track the contract, and any modifications or transactions related to it. After October 2017, it is between 13 and 17 digits, both letters and numbers.', null=True)), ('fpds_agency_id', models.TextField(blank=True, null=True)), ('fpds_parent_agency_id', models.TextField(blank=True, null=True)), ('fain', models.TextField(blank=True, db_index=True, help_text="An identification code assigned to each financial assistance award tracking purposes. The FAIN is tied to that award (and all future modifications to that award) throughout the award's life. Each FAIN is assigned by an agency. Within an agency, FAIN are unique: each new award must be issued a new FAIN. FAIN stands for Federal Award Identification Number, though the digits are letters, not numbers.", null=True)), ('uri', models.TextField(blank=True, db_index=True, help_text='The uri of the award', null=True)), ('total_obligation', models.DecimalField(db_index=True, decimal_places=2, help_text='The amount of money the government is obligated to pay for the award', max_digits=23, null=True, verbose_name='Total Obligated')), ('total_subsidy_cost', models.DecimalField(blank=True, decimal_places=2, help_text='The total of the original_loan_subsidy_cost from associated transactions', max_digits=23, null=True)), ('total_loan_value', models.DecimalField(blank=True, decimal_places=2, help_text='The total of the face_value_loan_guarantee from associated transactions', max_digits=23, null=True)), ('date_signed', models.DateField(help_text='The date the award was signed', null=True, verbose_name='Award Date')), ('description', models.TextField(help_text='A description of the award', null=True, verbose_name='Award Description')), ('period_of_performance_start_date', models.DateField(db_index=True, help_text='The start date for the period of performance', null=True, verbose_name='Start Date')), ('period_of_performance_current_end_date', models.DateField(db_index=True, help_text='The current, not original, period of performance end date', null=True, verbose_name='End Date')), ('base_and_all_options_value', models.DecimalField(blank=True, decimal_places=2, help_text='The sum of the base_and_all_options_value from associated transactions', max_digits=23, null=True, verbose_name='Base and All Options Value')), ('base_exercised_options_val', models.DecimalField(blank=True, decimal_places=2, help_text='The sum of the base_exercised_options_val from associated transactions', max_digits=23, null=True, verbose_name='Combined Base and Exercised Options')), ('last_modified_date', models.DateField(blank=True, help_text='The date this award was last modified', null=True)), ('certified_date', models.DateField(blank=True, help_text='The date this record was certified', null=True)), ('create_date', models.DateTimeField(auto_now_add=True, help_text='The date this record was created in the API', null=True)), ('update_date', models.DateTimeField(auto_now=True, help_text='The last time this record was updated in the API', null=True)), ('parent_award_piid', models.TextField(db_index=True, help_text="The piid of the Award's parent Award", null=True, verbose_name='Parent Award Piid')), ('generated_unique_award_id', models.TextField(default='NONE', verbose_name='Generated Unique Award ID')), ('is_fpds', models.BooleanField(default=False, verbose_name='Is FPDS')), ('transaction_unique_id', models.TextField(default='NONE', verbose_name='Transaction Unique ID')), ('total_funding_amount', models.DecimalField(blank=True, decimal_places=2, help_text="A summation of this award's transactions' funding amount", max_digits=23, null=True)), ('non_federal_funding_amount', models.DecimalField(blank=True, decimal_places=2, help_text="A summation of this award's transactions' non-federal funding amount", max_digits=23, null=True)), ('fiscal_year', models.IntegerField(blank=True, help_text='Fiscal Year calculated based on Action Date', null=True)), ('total_subaward_amount', models.DecimalField(decimal_places=2, max_digits=23, null=True)), ('subaward_count', models.IntegerField(default=0)), ('officer_1_name', models.TextField(blank=True, help_text='Executive Compensation Officer 1 Name', null=True)), ('officer_1_amount', models.DecimalField(blank=True, decimal_places=2, help_text='Executive Compensation Officer 1 Amount', max_digits=23, null=True)), ('officer_2_name', models.TextField(blank=True, help_text='Executive Compensation Officer 2 Name', null=True)), ('officer_2_amount', models.DecimalField(blank=True, decimal_places=2, help_text='Executive Compensation Officer 2 Amount', max_digits=23, null=True)), ('officer_3_name', models.TextField(blank=True, help_text='Executive Compensation Officer 3 Name', null=True)), ('officer_3_amount', models.DecimalField(blank=True, decimal_places=2, help_text='Executive Compensation Officer 3 Amount', max_digits=23, null=True)), ('officer_4_name', models.TextField(blank=True, help_text='Executive Compensation Officer 4 Name', null=True)), ('officer_4_amount', models.DecimalField(blank=True, decimal_places=2, help_text='Executive Compensation Officer 4 Amount', max_digits=23, null=True)), ('officer_5_name', models.TextField(blank=True, help_text='Executive Compensation Officer 5 Name', null=True)), ('officer_5_amount', models.DecimalField(blank=True, decimal_places=2, help_text='Executive Compensation Officer 5 Amount', max_digits=23, null=True))], options={'db_table': 'awards'}), migrations.CreateModel(name='BrokerSubaward', fields=[('created_at', models.DateTimeField(blank=True, db_index=True, null=True)), ('updated_at', models.DateTimeField(blank=True, db_index=True, null=True)), ('id', models.IntegerField(db_index=True, primary_key=True, serialize=False)), ('unique_award_key', models.TextField(blank=True, null=True)), ('award_id', models.TextField(blank=True, null=True)), ('parent_award_id', models.TextField(blank=True, null=True)), ('award_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('action_date', models.DateField(blank=True, null=True)), ('fy', models.TextField(blank=True, null=True)), ('awarding_agency_code', models.TextField(blank=True, null=True)), ('awarding_agency_name', models.TextField(blank=True, null=True)), ('awarding_sub_tier_agency_c', models.TextField(blank=True, null=True)), ('awarding_sub_tier_agency_n', models.TextField(blank=True, null=True)), ('awarding_office_code', models.TextField(blank=True, null=True)), ('awarding_office_name', models.TextField(blank=True, null=True)), ('funding_agency_code', models.TextField(blank=True, null=True)), ('funding_agency_name', models.TextField(blank=True, null=True)), ('funding_sub_tier_agency_co', models.TextField(blank=True, null=True)), ('funding_sub_tier_agency_na', models.TextField(blank=True, null=True)), ('funding_office_code', models.TextField(blank=True, null=True)), ('funding_office_name', models.TextField(blank=True, null=True)), ('awardee_or_recipient_uniqu', models.TextField(blank=True, null=True)), ('awardee_or_recipient_legal', models.TextField(blank=True, null=True)), ('dba_name', models.TextField(blank=True, null=True)), ('ultimate_parent_unique_ide', models.TextField(blank=True, null=True)), ('ultimate_parent_legal_enti', models.TextField(blank=True, null=True)), ('legal_entity_country_code', models.TextField(blank=True, null=True)), ('legal_entity_country_name', models.TextField(blank=True, null=True)), ('legal_entity_address_line1', models.TextField(blank=True, null=True)), ('legal_entity_city_name', models.TextField(blank=True, null=True)), ('legal_entity_state_code', models.TextField(blank=True, null=True)), ('legal_entity_state_name', models.TextField(blank=True, null=True)), ('legal_entity_zip', models.TextField(blank=True, null=True)), ('legal_entity_congressional', models.TextField(blank=True, null=True)), ('legal_entity_foreign_posta', models.TextField(blank=True, null=True)), ('business_types', models.TextField(blank=True, null=True)), ('place_of_perform_city_name', models.TextField(blank=True, null=True)), ('place_of_perform_state_code', models.TextField(blank=True, null=True)), ('place_of_perform_state_name', models.TextField(blank=True, null=True)), ('place_of_performance_zip', models.TextField(blank=True, null=True)), ('place_of_perform_congressio', models.TextField(blank=True, null=True)), ('place_of_perform_country_co', models.TextField(blank=True, null=True)), ('place_of_perform_country_na', models.TextField(blank=True, null=True)), ('award_description', models.TextField(blank=True, null=True)), ('naics', models.TextField(blank=True, null=True)), ('naics_description', models.TextField(blank=True, null=True)), ('cfda_numbers', models.TextField(blank=True, null=True)), ('cfda_titles', models.TextField(blank=True, null=True)), ('subaward_type', models.TextField(blank=True, null=True)), ('subaward_report_year', models.SmallIntegerField()), ('subaward_report_month', models.SmallIntegerField()), ('subaward_number', models.TextField(blank=True, null=True)), ('subaward_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('sub_action_date', models.DateField(blank=True, null=True)), ('sub_awardee_or_recipient_uniqu', models.TextField(blank=True, null=True)), ('sub_awardee_or_recipient_legal', models.TextField(blank=True, null=True)), ('sub_dba_name', models.TextField(blank=True, null=True)), ('sub_ultimate_parent_unique_ide', models.TextField(blank=True, null=True)), ('sub_ultimate_parent_legal_enti', models.TextField(blank=True, null=True)), ('sub_legal_entity_country_code', models.TextField(blank=True, null=True)), ('sub_legal_entity_country_name', models.TextField(blank=True, null=True)), ('sub_legal_entity_address_line1', models.TextField(blank=True, null=True)), ('sub_legal_entity_city_name', models.TextField(blank=True, null=True)), ('sub_legal_entity_state_code', models.TextField(blank=True, null=True)), ('sub_legal_entity_state_name', models.TextField(blank=True, null=True)), ('sub_legal_entity_zip', models.TextField(blank=True, null=True)), ('sub_legal_entity_congressional', models.TextField(blank=True, null=True)), ('sub_legal_entity_foreign_posta', models.TextField(blank=True, null=True)), ('sub_business_types', models.TextField(blank=True, null=True)), ('sub_place_of_perform_city_name', models.TextField(blank=True, null=True)), ('sub_place_of_perform_state_code', models.TextField(blank=True, null=True)), ('sub_place_of_perform_state_name', models.TextField(blank=True, null=True)), ('sub_place_of_performance_zip', models.TextField(blank=True, null=True)), ('sub_place_of_perform_congressio', models.TextField(blank=True, null=True)), ('sub_place_of_perform_country_co', models.TextField(blank=True, null=True)), ('sub_place_of_perform_country_na', models.TextField(blank=True, null=True)), ('subaward_description', models.TextField(blank=True, null=True)), ('sub_high_comp_officer1_full_na', models.TextField(blank=True, null=True)), ('sub_high_comp_officer1_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('sub_high_comp_officer2_full_na', models.TextField(blank=True, null=True)), ('sub_high_comp_officer2_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('sub_high_comp_officer3_full_na', models.TextField(blank=True, null=True)), ('sub_high_comp_officer3_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('sub_high_comp_officer4_full_na', models.TextField(blank=True, null=True)), ('sub_high_comp_officer4_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('sub_high_comp_officer5_full_na', models.TextField(blank=True, null=True)), ('sub_high_comp_officer5_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('prime_id', models.IntegerField(blank=True, null=True)), ('internal_id', models.TextField(blank=True, null=True)), ('date_submitted', models.DateTimeField(blank=True, null=True)), ('report_type', models.TextField(blank=True, null=True)), ('transaction_type', models.TextField(blank=True, null=True)), ('program_title', models.TextField(blank=True, null=True)), ('contract_agency_code', models.TextField(blank=True, null=True)), ('contract_idv_agency_code', models.TextField(blank=True, null=True)), ('grant_funding_agency_id', models.TextField(blank=True, null=True)), ('grant_funding_agency_name', models.TextField(blank=True, null=True)), ('federal_agency_name', models.TextField(blank=True, null=True)), ('treasury_symbol', models.TextField(blank=True, null=True)), ('dunsplus4', models.TextField(blank=True, null=True)), ('recovery_model_q1', models.BooleanField(null=True, blank=True)), ('recovery_model_q2', models.BooleanField(null=True, blank=True)), ('compensation_q1', models.BooleanField(null=True, blank=True)), ('compensation_q2', models.BooleanField(null=True, blank=True)), ('high_comp_officer1_full_na', models.TextField(blank=True, null=True)), ('high_comp_officer1_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('high_comp_officer2_full_na', models.TextField(blank=True, null=True)), ('high_comp_officer2_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('high_comp_officer3_full_na', models.TextField(blank=True, null=True)), ('high_comp_officer3_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('high_comp_officer4_full_na', models.TextField(blank=True, null=True)), ('high_comp_officer4_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('high_comp_officer5_full_na', models.TextField(blank=True, null=True)), ('high_comp_officer5_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('sub_id', models.IntegerField(blank=True, null=True)), ('sub_parent_id', models.IntegerField(blank=True, null=True)), ('sub_federal_agency_id', models.TextField(blank=True, null=True)), ('sub_federal_agency_name', models.TextField(blank=True, null=True)), ('sub_funding_agency_id', models.TextField(blank=True, null=True)), ('sub_funding_agency_name', models.TextField(blank=True, null=True)), ('sub_funding_office_id', models.TextField(blank=True, null=True)), ('sub_funding_office_name', models.TextField(blank=True, null=True)), ('sub_naics', models.TextField(blank=True, null=True)), ('sub_cfda_numbers', models.TextField(blank=True, null=True)), ('sub_dunsplus4', models.TextField(blank=True, null=True)), ('sub_recovery_subcontract_amt', models.TextField(blank=True, null=True)), ('sub_recovery_model_q1', models.BooleanField(null=True, blank=True)), ('sub_recovery_model_q2', models.BooleanField(null=True, blank=True)), ('sub_compensation_q1', models.BooleanField(null=True, blank=True)), ('sub_compensation_q2', models.BooleanField(null=True, blank=True)), ('place_of_perform_street', models.TextField(blank=True, null=True)), ('sub_place_of_perform_street', models.TextField(blank=True, null=True))], options={'db_table': 'broker_subaward', 'managed': True}), migrations.CreateModel(name='CovidFinancialAccountMatview', fields=[], options={'db_table': 'mv_covid_financial_account', 'managed': False}), migrations.CreateModel(name='FinancialAccountsByAwards', fields=[('data_source', models.TextField(choices=[('USA', 'USAspending'), ('DBR', 'DATA Act Broker')], help_text='The source of this entry, either Data Broker (DBR) or USASpending (USA)', null=True)), ('financial_accounts_by_awards_id', models.AutoField(primary_key=True, serialize=False)), ('piid', models.TextField(blank=True, null=True)), ('parent_award_id', models.TextField(blank=True, null=True)), ('fain', models.TextField(blank=True, null=True)), ('uri', models.TextField(blank=True, null=True)), ('ussgl480100_undelivered_orders_obligations_unpaid_fyb', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl480100_undelivered_orders_obligations_unpaid_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl483100_undelivered_orders_oblig_transferred_unpaid_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl488100_upward_adjust_pri_undeliv_order_oblig_unpaid_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl490100_delivered_orders_obligations_unpaid_fyb', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl490100_delivered_orders_obligations_unpaid_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl493100_delivered_orders_oblig_transferred_unpaid_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl498100_upward_adjust_pri_deliv_orders_oblig_unpaid_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl480200_undelivered_orders_oblig_prepaid_advanced_fyb', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl480200_undelivered_orders_oblig_prepaid_advanced_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl483200_undeliv_orders_oblig_transferred_prepaid_adv_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl488200_up_adjust_pri_undeliv_order_oblig_ppaid_adv_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl490200_delivered_orders_obligations_paid_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl490800_authority_outlayed_not_yet_disbursed_fyb', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl490800_authority_outlayed_not_yet_disbursed_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl498200_upward_adjust_pri_deliv_orders_oblig_paid_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('obligations_undelivered_orders_unpaid_total_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('obligations_delivered_orders_unpaid_total_fyb', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('obligations_delivered_orders_unpaid_total_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('gross_outlays_undelivered_orders_prepaid_total_fyb', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('gross_outlays_undelivered_orders_prepaid_total_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('gross_outlays_delivered_orders_paid_total_fyb', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('gross_outlay_amount_by_award_fyb', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('gross_outlay_amount_by_award_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('obligations_incurred_total_by_award_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl487100_down_adj_pri_unpaid_undel_orders_oblig_recov_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl497100_down_adj_pri_unpaid_deliv_orders_oblig_recov_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl487200_down_adj_pri_ppaid_undel_orders_oblig_refund_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('ussgl497200_down_adj_pri_paid_deliv_orders_oblig_refund_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('deobligations_recoveries_refunds_of_prior_year_by_award_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('obligations_undelivered_orders_unpaid_total_fyb', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('gross_outlays_delivered_orders_paid_total_cpe', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('drv_award_id_field_type', models.TextField(blank=True, null=True)), ('drv_obligations_incurred_total_by_award', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('transaction_obligated_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('reporting_period_start', models.DateField(blank=True, null=True)), ('reporting_period_end', models.DateField(blank=True, null=True)), ('last_modified_date', models.DateField(blank=True, null=True)), ('certified_date', models.DateField(blank=True, null=True)), ('create_date', models.DateTimeField(auto_now_add=True, null=True)), ('update_date', models.DateTimeField(auto_now=True, null=True))], options={'db_table': 'financial_accounts_by_awards', 'managed': True}), migrations.CreateModel(name='Subaward', fields=[('id', models.IntegerField(primary_key=True, serialize=False)), ('subaward_number', models.TextField(db_index=True)), ('amount', models.DecimalField(decimal_places=2, max_digits=23)), ('description', models.TextField(blank=True, null=True)), ('recovery_model_question1', models.TextField(blank=True, null=True)), ('recovery_model_question2', models.TextField(blank=True, null=True)), ('action_date', models.DateField(blank=True, null=True)), ('award_report_fy_month', models.IntegerField()), ('award_report_fy_year', models.IntegerField()), ('broker_award_id', models.IntegerField(db_index=True, default=0, help_text='The ID of the parent award in broker', verbose_name='FSRS Award ID in the Broker')), ('internal_id', models.TextField(db_index=True, default='', help_text='The internal of the parent award in broker from FSRS', verbose_name='Internal ID of the parent Award')), ('award_type', models.TextField(db_index=True, default='unknown', help_text='Whether the parent Award is a Procurement or a Grant', verbose_name='Award Type')), ('unique_award_key', models.TextField(blank=True, db_index=True, null=True)), ('latest_transaction_id', models.BigIntegerField(blank=True, null=True)), ('last_modified_date', models.DateField(blank=True, null=True)), ('awarding_toptier_agency_name', models.TextField(blank=True, null=True)), ('awarding_subtier_agency_name', models.TextField(blank=True, null=True)), ('funding_toptier_agency_name', models.TextField(blank=True, null=True)), ('funding_subtier_agency_name', models.TextField(blank=True, null=True)), ('awarding_toptier_agency_abbreviation', models.TextField(blank=True, null=True)), ('funding_toptier_agency_abbreviation', models.TextField(blank=True, null=True)), ('awarding_subtier_agency_abbreviation', models.TextField(blank=True, null=True)), ('funding_subtier_agency_abbreviation', models.TextField(blank=True, null=True)), ('prime_award_type', models.TextField(blank=True, null=True)), ('piid', models.TextField(blank=True, null=True)), ('fain', models.TextField(blank=True, null=True)), ('recipient_unique_id', models.TextField(blank=True, null=True)), ('recipient_name', models.TextField(blank=True, null=True)), ('dba_name', models.TextField(blank=True, null=True)), ('parent_recipient_unique_id', models.TextField(blank=True, null=True)), ('parent_recipient_name', models.TextField(blank=True, null=True)), ('business_type_code', models.TextField(blank=True, null=True)), ('business_type_description', models.TextField(blank=True, null=True)), ('business_categories', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), default=list, size=None)), ('prime_recipient_name', models.TextField(blank=True, null=True)), ('pulled_from', models.TextField(blank=True, null=True)), ('type_of_contract_pricing', models.TextField(blank=True, null=True)), ('type_set_aside', models.TextField(blank=True, null=True)), ('extent_competed', models.TextField(blank=True, null=True)), ('product_or_service_code', models.TextField(blank=True, null=True)), ('product_or_service_description', models.TextField(blank=True, null=True)), ('cfda_number', models.TextField(blank=True, null=True)), ('cfda_title', models.TextField(blank=True, null=True)), ('officer_1_name', models.TextField(blank=True, null=True)), ('officer_1_amount', models.TextField(blank=True, null=True)), ('officer_2_name', models.TextField(blank=True, null=True)), ('officer_2_amount', models.TextField(blank=True, null=True)), ('officer_3_name', models.TextField(blank=True, null=True)), ('officer_3_amount', models.TextField(blank=True, null=True)), ('officer_4_name', models.TextField(blank=True, null=True)), ('officer_4_amount', models.TextField(blank=True, null=True)), ('officer_5_name', models.TextField(blank=True, null=True)), ('officer_5_amount', models.TextField(blank=True, null=True)), ('recipient_location_country_code', models.TextField(blank=True, null=True)), ('recipient_location_country_name', models.TextField(blank=True, null=True)), ('recipient_location_state_code', models.TextField(blank=True, null=True)), ('recipient_location_state_name', models.TextField(blank=True, null=True)), ('recipient_location_county_code', models.TextField(blank=True, null=True)), ('recipient_location_county_name', models.TextField(blank=True, null=True)), ('recipient_location_city_code', models.TextField(blank=True, null=True)), ('recipient_location_city_name', models.TextField(blank=True, null=True)), ('recipient_location_zip4', models.TextField(blank=True, null=True)), ('recipient_location_zip5', models.TextField(blank=True, null=True)), ('recipient_location_street_address', models.TextField(blank=True, null=True)), ('recipient_location_congressional_code', models.TextField(blank=True, null=True)), ('recipient_location_foreign_postal_code', models.TextField(blank=True, null=True)), ('pop_country_code', models.TextField(blank=True, null=True)), ('pop_country_name', models.TextField(blank=True, null=True)), ('pop_state_code', models.TextField(blank=True, null=True)), ('pop_state_name', models.TextField(blank=True, null=True)), ('pop_county_code', models.TextField(blank=True, null=True)), ('pop_county_name', models.TextField(blank=True, null=True)), ('pop_city_code', models.TextField(blank=True, null=True)), ('pop_city_name', models.TextField(blank=True, null=True)), ('pop_zip4', models.TextField(blank=True, null=True)), ('pop_street_address', models.TextField(blank=True, null=True)), ('pop_congressional_code', models.TextField(blank=True, null=True)), ('updated_at', models.DateTimeField(blank=True, null=True))], options={'db_table': 'subaward', 'managed': True}), migrations.CreateModel(name='TransactionNormalized', fields=[('id', models.BigAutoField(primary_key=True, serialize=False)), ('usaspending_unique_transaction_id', models.TextField(blank=True, help_text='If this record is legacy USASpending data, this is the unique transaction identifier from that system', null=True)), ('type', models.TextField(help_text='The type for this transaction. For example, A, B, C, D', null=True, verbose_name='Action Type')), ('type_description', models.TextField(blank=True, help_text='The plain text description of the transaction type', null=True, verbose_name='Action Type Description')), ('period_of_performance_start_date', models.DateField(help_text='The period of performance start date', null=True, verbose_name='Period of Performance Start Date')), ('period_of_performance_current_end_date', models.DateField(help_text='The current end date of the period of performance', null=True, verbose_name='Period of Performance Current End Date')), ('action_date', models.DateField(db_index=True, help_text='The date this transaction was actioned', verbose_name='Transaction Date')), ('action_type', models.TextField(blank=True, help_text='The type of transaction. For example, A, B, C, D', null=True)), ('action_type_description', models.TextField(blank=True, null=True)), ('federal_action_obligation', models.DecimalField(blank=True, decimal_places=2, help_text='The obligation of the federal government for this transaction', max_digits=23, null=True)), ('original_loan_subsidy_cost', models.DecimalField(blank=True, decimal_places=2, help_text='The original_loan_subsidy_cost for loan type transactions', max_digits=23, null=True)), ('face_value_loan_guarantee', models.DecimalField(blank=True, decimal_places=2, help_text='The face_value_loan_guarantee for loan type transactions', max_digits=23, null=True)), ('modification_number', models.TextField(blank=True, help_text='The modification number for this transaction', null=True, verbose_name='Modification Number')), ('description', models.TextField(help_text='The description of this transaction', null=True)), ('drv_award_transaction_usaspend', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('drv_current_total_award_value_amount_adjustment', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('drv_potential_total_award_value_amount_adjustment', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('last_modified_date', models.DateField(blank=True, help_text='The date this transaction was last modified', null=True)), ('certified_date', models.DateField(blank=True, help_text='The date this transaction was certified', null=True)), ('create_date', models.DateTimeField(auto_now_add=True, help_text='The date this transaction was created in the API', null=True)), ('update_date', models.DateTimeField(auto_now=True, db_index=True, help_text='The last time this transaction was updated in the API', null=True)), ('fiscal_year', models.IntegerField(blank=True, help_text='Fiscal Year calculated based on Action Date', null=True)), ('transaction_unique_id', models.TextField(default='NONE', verbose_name='Transaction Unique ID')), ('is_fpds', models.BooleanField(default=False, verbose_name='Is FPDS')), ('funding_amount', models.DecimalField(blank=True, decimal_places=2, help_text='Assistance data variable. non_federal_funding_amount + federal_action_obligation', max_digits=23, null=True)), ('non_federal_funding_amount', models.DecimalField(blank=True, decimal_places=2, help_text='Assistance Data variable.', max_digits=23, null=True)), ('unique_award_key', models.TextField(db_index=True, null=True)), ('business_categories', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), default=list, size=None))], options={'db_table': 'transaction_normalized'}), migrations.CreateModel(name='ParentAward', fields=[('award', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='awards.Award')), ('generated_unique_award_id', models.TextField(unique=True)), ('direct_idv_count', models.IntegerField()), ('direct_contract_count', models.IntegerField()), ('direct_total_obligation', models.DecimalField(decimal_places=2, max_digits=23)), ('direct_base_and_all_options_value', models.DecimalField(decimal_places=2, max_digits=23)), ('direct_base_exercised_options_val', models.DecimalField(decimal_places=2, max_digits=23)), ('rollup_idv_count', models.IntegerField()), ('rollup_contract_count', models.IntegerField()), ('rollup_total_obligation', models.DecimalField(decimal_places=2, max_digits=23)), ('rollup_base_and_all_options_value', models.DecimalField(decimal_places=2, max_digits=23)), ('rollup_base_exercised_options_val', models.DecimalField(decimal_places=2, max_digits=23)), ('parent_award', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='awards.ParentAward'))], options={'db_table': 'parent_award', 'managed': True}), migrations.CreateModel(name='TransactionDelta', fields=[('transaction', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='awards.TransactionNormalized')), ('created_at', models.DateTimeField())], options={'db_table': 'transaction_delta'}), migrations.CreateModel(name='TransactionFABS', fields=[('transaction', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='assistance_data', serialize=False, to='awards.TransactionNormalized')), ('published_award_financial_assistance_id', models.IntegerField(blank=True, db_index=True, null=True)), ('afa_generated_unique', models.TextField(db_index=True, unique=True)), ('action_date', models.TextField(blank=True, null=True)), ('action_type', models.TextField(blank=True, null=True)), ('action_type_description', models.TextField(blank=True, null=True)), ('assistance_type', models.TextField(blank=True, null=True)), ('assistance_type_desc', models.TextField(blank=True, null=True)), ('award_description', models.TextField(blank=True, null=True)), ('awardee_or_recipient_legal', models.TextField(blank=True, null=True)), ('awardee_or_recipient_uniqu', models.TextField(blank=True, null=True)), ('awarding_agency_code', models.TextField(blank=True, null=True)), ('awarding_agency_name', models.TextField(blank=True, null=True)), ('awarding_office_code', models.TextField(blank=True, null=True)), ('awarding_office_name', models.TextField(blank=True, null=True)), ('awarding_sub_tier_agency_c', models.TextField(blank=True, null=True)), ('awarding_sub_tier_agency_n', models.TextField(blank=True, null=True)), ('award_modification_amendme', models.TextField(blank=True, null=True)), ('business_funds_indicator', models.TextField(blank=True, null=True)), ('business_funds_ind_desc', models.TextField(blank=True, null=True)), ('business_types', models.TextField(blank=True, null=True)), ('business_types_desc', models.TextField(blank=True, null=True)), ('cfda_number', models.TextField(blank=True, null=True)), ('cfda_title', models.TextField(blank=True, null=True)), ('correction_delete_indicatr', models.TextField(blank=True, null=True)), ('correction_delete_ind_desc', models.TextField(blank=True, null=True)), ('face_value_loan_guarantee', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('fain', models.TextField(blank=True, db_index=True, null=True)), ('federal_action_obligation', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('fiscal_year_and_quarter_co', models.TextField(blank=True, null=True)), ('funding_agency_code', models.TextField(blank=True, null=True)), ('funding_agency_name', models.TextField(blank=True, null=True)), ('funding_office_code', models.TextField(blank=True, null=True)), ('funding_office_name', models.TextField(blank=True, null=True)), ('funding_sub_tier_agency_co', models.TextField(blank=True, null=True)), ('funding_sub_tier_agency_na', models.TextField(blank=True, null=True)), ('is_active', models.BooleanField(default=False)), ('is_historical', models.BooleanField(null=True, blank=True)), ('legal_entity_address_line1', models.TextField(blank=True, null=True)), ('legal_entity_address_line2', models.TextField(blank=True, null=True)), ('legal_entity_address_line3', models.TextField(blank=True, null=True)), ('legal_entity_city_name', models.TextField(blank=True, null=True)), ('legal_entity_city_code', models.TextField(blank=True, null=True)), ('legal_entity_foreign_descr', models.TextField(blank=True, null=True)), ('legal_entity_congressional', models.TextField(blank=True, null=True)), ('legal_entity_country_code', models.TextField(blank=True, null=True)), ('legal_entity_country_name', models.TextField(blank=True, null=True)), ('legal_entity_county_code', models.TextField(blank=True, null=True)), ('legal_entity_county_name', models.TextField(blank=True, null=True)), ('legal_entity_foreign_city', models.TextField(blank=True, null=True)), ('legal_entity_foreign_posta', models.TextField(blank=True, null=True)), ('legal_entity_foreign_provi', models.TextField(blank=True, null=True)), ('legal_entity_state_code', models.TextField(blank=True, null=True)), ('legal_entity_state_name', models.TextField(blank=True, null=True)), ('legal_entity_zip5', models.TextField(blank=True, null=True)), ('legal_entity_zip_last4', models.TextField(blank=True, null=True)), ('modified_at', models.DateTimeField(blank=True, null=True)), ('non_federal_funding_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('original_loan_subsidy_cost', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('period_of_performance_curr', models.TextField(blank=True, null=True)), ('period_of_performance_star', models.TextField(blank=True, null=True)), ('place_of_performance_city', models.TextField(blank=True, null=True)), ('place_of_performance_code', models.TextField(blank=True, null=True)), ('place_of_performance_congr', models.TextField(blank=True, null=True)), ('place_of_perform_country_c', models.TextField(blank=True, null=True)), ('place_of_perform_country_n', models.TextField(blank=True, null=True)), ('place_of_perform_county_co', models.TextField(blank=True, null=True)), ('place_of_perform_county_na', models.TextField(blank=True, null=True)), ('place_of_performance_forei', models.TextField(blank=True, null=True)), ('place_of_perform_state_nam', models.TextField(blank=True, null=True)), ('place_of_perfor_state_code', models.TextField(blank=True, null=True)), ('place_of_performance_zip4a', models.TextField(blank=True, null=True)), ('place_of_performance_zip5', models.TextField(blank=True, null=True)), ('place_of_perform_zip_last4', models.TextField(blank=True, null=True)), ('record_type', models.IntegerField(blank=True, null=True)), ('record_type_description', models.TextField(blank=True, null=True)), ('sai_number', models.TextField(blank=True, null=True)), ('total_funding_amount', models.TextField(blank=True, null=True)), ('ultimate_parent_legal_enti', models.TextField(blank=True, null=True)), ('ultimate_parent_unique_ide', models.TextField(blank=True, null=True)), ('uri', models.TextField(blank=True, db_index=True, null=True)), ('submission_id', models.IntegerField(blank=True, null=True)), ('unique_award_key', models.TextField(db_index=True, null=True)), ('officer_1_name', models.TextField(blank=True, null=True)), ('officer_1_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('officer_2_name', models.TextField(blank=True, null=True)), ('officer_2_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('officer_3_name', models.TextField(blank=True, null=True)), ('officer_3_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('officer_4_name', models.TextField(blank=True, null=True)), ('officer_4_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('officer_5_name', models.TextField(blank=True, null=True)), ('officer_5_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('created_at', models.DateTimeField(blank=True, null=True)), ('updated_at', models.DateTimeField(blank=True, db_index=True, null=True))], options={'db_table': 'transaction_fabs'}), migrations.CreateModel(name='TransactionFPDS', fields=[('transaction', models.OneToOneField(help_text='Non-specific transaction data, fields shared among both assistance and contract transactions', on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='contract_data', serialize=False, to='awards.TransactionNormalized')), ('detached_award_procurement_id', models.IntegerField(blank=True, db_index=True, null=True)), ('detached_award_proc_unique', models.TextField(null=True, unique=True)), ('piid', models.TextField(blank=True, db_index=True, null=True)), ('agency_id', models.TextField(blank=True, null=True)), ('awarding_sub_tier_agency_c', models.TextField(blank=True, null=True)), ('awarding_sub_tier_agency_n', models.TextField(blank=True, null=True)), ('awarding_agency_code', models.TextField(blank=True, null=True)), ('awarding_agency_name', models.TextField(blank=True, null=True)), ('parent_award_id', models.TextField(blank=True, db_index=True, null=True)), ('award_modification_amendme', models.TextField(blank=True, null=True)), ('type_of_contract_pricing', models.TextField(blank=True, db_index=True, null=True)), ('type_of_contract_pric_desc', models.TextField(blank=True, null=True)), ('contract_award_type', models.TextField(blank=True, null=True)), ('contract_award_type_desc', models.TextField(blank=True, null=True)), ('naics', models.TextField(blank=True, db_index=True, null=True)), ('naics_description', models.TextField(blank=True, null=True)), ('awardee_or_recipient_uniqu', models.TextField(blank=True, null=True)), ('ultimate_parent_legal_enti', models.TextField(blank=True, null=True)), ('ultimate_parent_unique_ide', models.TextField(blank=True, null=True)), ('award_description', models.TextField(blank=True, null=True)), ('place_of_performance_zip4a', models.TextField(blank=True, null=True)), ('place_of_performance_zip5', models.TextField(blank=True, null=True)), ('place_of_perform_zip_last4', models.TextField(blank=True, null=True)), ('place_of_perform_city_name', models.TextField(blank=True, null=True)), ('place_of_perform_county_na', models.TextField(blank=True, null=True)), ('place_of_perform_county_co', models.TextField(blank=True, null=True)), ('place_of_performance_congr', models.TextField(blank=True, null=True)), ('awardee_or_recipient_legal', models.TextField(blank=True, null=True)), ('legal_entity_city_name', models.TextField(blank=True, null=True)), ('legal_entity_state_code', models.TextField(blank=True, null=True)), ('legal_entity_state_descrip', models.TextField(blank=True, null=True)), ('legal_entity_county_code', models.TextField(blank=True, null=True)), ('legal_entity_county_name', models.TextField(blank=True, null=True)), ('legal_entity_zip4', models.TextField(blank=True, null=True)), ('legal_entity_zip5', models.TextField(blank=True, null=True)), ('legal_entity_zip_last4', models.TextField(blank=True, null=True)), ('legal_entity_congressional', models.TextField(blank=True, null=True)), ('legal_entity_address_line1', models.TextField(blank=True, null=True)), ('legal_entity_address_line2', models.TextField(blank=True, null=True)), ('legal_entity_address_line3', models.TextField(blank=True, null=True)), ('legal_entity_country_code', models.TextField(blank=True, null=True)), ('legal_entity_country_name', models.TextField(blank=True, null=True)), ('period_of_performance_star', models.TextField(blank=True, null=True)), ('period_of_performance_curr', models.TextField(blank=True, null=True)), ('period_of_perf_potential_e', models.TextField(blank=True, null=True)), ('ordering_period_end_date', models.TextField(blank=True, null=True)), ('action_date', models.TextField(blank=True, null=True)), ('action_type', models.TextField(blank=True, null=True)), ('action_type_description', models.TextField(blank=True, null=True)), ('federal_action_obligation', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('current_total_value_award', models.TextField(blank=True, null=True)), ('potential_total_value_awar', models.TextField(blank=True, null=True)), ('total_obligated_amount', models.TextField(blank=True, null=True)), ('base_exercised_options_val', models.TextField(blank=True, null=True)), ('base_and_all_options_value', models.TextField(blank=True, null=True)), ('funding_sub_tier_agency_co', models.TextField(blank=True, null=True)), ('funding_sub_tier_agency_na', models.TextField(blank=True, null=True)), ('funding_office_code', models.TextField(blank=True, null=True)), ('funding_office_name', models.TextField(blank=True, null=True)), ('awarding_office_code', models.TextField(blank=True, null=True)), ('awarding_office_name', models.TextField(blank=True, null=True)), ('referenced_idv_agency_iden', models.TextField(blank=True, null=True)), ('referenced_idv_agency_desc', models.TextField(blank=True, null=True)), ('funding_agency_code', models.TextField(blank=True, null=True)), ('funding_agency_name', models.TextField(blank=True, null=True)), ('place_of_performance_locat', models.TextField(blank=True, null=True)), ('place_of_performance_state', models.TextField(blank=True, null=True)), ('place_of_perfor_state_desc', models.TextField(blank=True, null=True)), ('place_of_perform_country_c', models.TextField(blank=True, null=True)), ('place_of_perf_country_desc', models.TextField(blank=True, null=True)), ('idv_type', models.TextField(blank=True, null=True)), ('idv_type_description', models.TextField(blank=True, null=True)), ('award_or_idv_flag', models.TextField(blank=True, null=True)), ('referenced_idv_type', models.TextField(blank=True, null=True)), ('referenced_idv_type_desc', models.TextField(blank=True, null=True)), ('vendor_doing_as_business_n', models.TextField(blank=True, null=True)), ('vendor_phone_number', models.TextField(blank=True, null=True)), ('vendor_fax_number', models.TextField(blank=True, null=True)), ('multiple_or_single_award_i', models.TextField(blank=True, null=True)), ('multiple_or_single_aw_desc', models.TextField(blank=True, null=True)), ('referenced_mult_or_single', models.TextField(blank=True, null=True)), ('referenced_mult_or_si_desc', models.TextField(blank=True, null=True)), ('type_of_idc', models.TextField(blank=True, null=True)), ('type_of_idc_description', models.TextField(blank=True, null=True)), ('a_76_fair_act_action', models.TextField(blank=True, null=True)), ('a_76_fair_act_action_desc', models.TextField(blank=True, null=True)), ('dod_claimant_program_code', models.TextField(blank=True, null=True)), ('dod_claimant_prog_cod_desc', models.TextField(blank=True, null=True)), ('clinger_cohen_act_planning', models.TextField(blank=True, null=True)), ('clinger_cohen_act_pla_desc', models.TextField(blank=True, null=True)), ('commercial_item_acquisitio', models.TextField(blank=True, null=True)), ('commercial_item_acqui_desc', models.TextField(blank=True, null=True)), ('commercial_item_test_progr', models.TextField(blank=True, null=True)), ('commercial_item_test_desc', models.TextField(blank=True, null=True)), ('consolidated_contract', models.TextField(blank=True, null=True)), ('consolidated_contract_desc', models.TextField(blank=True, null=True)), ('contingency_humanitarian_o', models.TextField(blank=True, null=True)), ('contingency_humanitar_desc', models.TextField(blank=True, null=True)), ('contract_bundling', models.TextField(blank=True, null=True)), ('contract_bundling_descrip', models.TextField(blank=True, null=True)), ('contract_financing', models.TextField(blank=True, null=True)), ('contract_financing_descrip', models.TextField(blank=True, null=True)), ('contracting_officers_deter', models.TextField(blank=True, null=True)), ('contracting_officers_desc', models.TextField(blank=True, null=True)), ('cost_accounting_standards', models.TextField(blank=True, null=True)), ('cost_accounting_stand_desc', models.TextField(blank=True, null=True)), ('cost_or_pricing_data', models.TextField(blank=True, null=True)), ('cost_or_pricing_data_desc', models.TextField(blank=True, null=True)), ('country_of_product_or_serv', models.TextField(blank=True, null=True)), ('country_of_product_or_desc', models.TextField(blank=True, null=True)), ('construction_wage_rate_req', models.TextField(blank=True, null=True)), ('construction_wage_rat_desc', models.TextField(blank=True, null=True)), ('evaluated_preference', models.TextField(blank=True, null=True)), ('evaluated_preference_desc', models.TextField(blank=True, null=True)), ('extent_competed', models.TextField(blank=True, db_index=True, null=True)), ('extent_compete_description', models.TextField(blank=True, null=True)), ('fed_biz_opps', models.TextField(blank=True, null=True)), ('fed_biz_opps_description', models.TextField(blank=True, null=True)), ('foreign_funding', models.TextField(blank=True, null=True)), ('foreign_funding_desc', models.TextField(blank=True, null=True)), ('government_furnished_prope', models.TextField(blank=True, null=True)), ('government_furnished_desc', models.TextField(blank=True, null=True)), ('information_technology_com', models.TextField(blank=True, null=True)), ('information_technolog_desc', models.TextField(blank=True, null=True)), ('interagency_contracting_au', models.TextField(blank=True, null=True)), ('interagency_contract_desc', models.TextField(blank=True, null=True)), ('local_area_set_aside', models.TextField(blank=True, null=True)), ('local_area_set_aside_desc', models.TextField(blank=True, null=True)), ('major_program', models.TextField(blank=True, null=True)), ('purchase_card_as_payment_m', models.TextField(blank=True, null=True)), ('purchase_card_as_paym_desc', models.TextField(blank=True, null=True)), ('multi_year_contract', models.TextField(blank=True, null=True)), ('multi_year_contract_desc', models.TextField(blank=True, null=True)), ('national_interest_action', models.TextField(blank=True, null=True)), ('national_interest_desc', models.TextField(blank=True, null=True)), ('number_of_actions', models.TextField(blank=True, null=True)), ('number_of_offers_received', models.TextField(blank=True, null=True)), ('other_statutory_authority', models.TextField(blank=True, null=True)), ('performance_based_service', models.TextField(blank=True, null=True)), ('performance_based_se_desc', models.TextField(blank=True, null=True)), ('place_of_manufacture', models.TextField(blank=True, null=True)), ('place_of_manufacture_desc', models.TextField(blank=True, null=True)), ('price_evaluation_adjustmen', models.TextField(blank=True, null=True)), ('product_or_service_code', models.TextField(blank=True, db_index=True, null=True)), ('product_or_service_co_desc', models.TextField(blank=True, null=True)), ('program_acronym', models.TextField(blank=True, null=True)), ('other_than_full_and_open_c', models.TextField(blank=True, null=True)), ('other_than_full_and_o_desc', models.TextField(blank=True, null=True)), ('recovered_materials_sustai', models.TextField(blank=True, null=True)), ('recovered_materials_s_desc', models.TextField(blank=True, null=True)), ('research', models.TextField(blank=True, null=True)), ('research_description', models.TextField(blank=True, null=True)), ('sea_transportation', models.TextField(blank=True, null=True)), ('sea_transportation_desc', models.TextField(blank=True, null=True)), ('labor_standards', models.TextField(blank=True, null=True)), ('labor_standards_descrip', models.TextField(blank=True, null=True)), ('small_business_competitive', models.BooleanField(null=True, blank=True)), ('solicitation_identifier', models.TextField(blank=True, null=True)), ('solicitation_procedures', models.TextField(blank=True, null=True)), ('solicitation_procedur_desc', models.TextField(blank=True, null=True)), ('fair_opportunity_limited_s', models.TextField(blank=True, null=True)), ('fair_opportunity_limi_desc', models.TextField(blank=True, null=True)), ('subcontracting_plan', models.TextField(blank=True, null=True)), ('subcontracting_plan_desc', models.TextField(blank=True, null=True)), ('program_system_or_equipmen', models.TextField(blank=True, null=True)), ('program_system_or_equ_desc', models.TextField(blank=True, null=True)), ('type_set_aside', models.TextField(blank=True, db_index=True, null=True)), ('type_set_aside_description', models.TextField(blank=True, null=True)), ('epa_designated_product', models.TextField(blank=True, null=True)), ('epa_designated_produc_desc', models.TextField(blank=True, null=True)), ('materials_supplies_article', models.TextField(blank=True, null=True)), ('materials_supplies_descrip', models.TextField(blank=True, null=True)), ('transaction_number', models.TextField(blank=True, null=True)), ('sam_exception', models.TextField(blank=True, null=True)), ('sam_exception_description', models.TextField(blank=True, null=True)), ('city_local_government', models.BooleanField(null=True, blank=True)), ('county_local_government', models.BooleanField(null=True, blank=True)), ('inter_municipal_local_gove', models.BooleanField(null=True, blank=True)), ('local_government_owned', models.BooleanField(null=True, blank=True)), ('municipality_local_governm', models.BooleanField(null=True, blank=True)), ('school_district_local_gove', models.BooleanField(null=True, blank=True)), ('township_local_government', models.BooleanField(null=True, blank=True)), ('us_state_government', models.BooleanField(null=True, blank=True)), ('us_federal_government', models.BooleanField(null=True, blank=True)), ('federal_agency', models.BooleanField(null=True, blank=True)), ('federally_funded_research', models.BooleanField(null=True, blank=True)), ('us_tribal_government', models.BooleanField(null=True, blank=True)), ('foreign_government', models.BooleanField(null=True, blank=True)), ('community_developed_corpor', models.BooleanField(null=True, blank=True)), ('labor_surplus_area_firm', models.BooleanField(null=True, blank=True)), ('corporate_entity_not_tax_e', models.BooleanField(null=True, blank=True)), ('corporate_entity_tax_exemp', models.BooleanField(null=True, blank=True)), ('partnership_or_limited_lia', models.BooleanField(null=True, blank=True)), ('sole_proprietorship', models.BooleanField(null=True, blank=True)), ('small_agricultural_coopera', models.BooleanField(null=True, blank=True)), ('international_organization', models.BooleanField(null=True, blank=True)), ('us_government_entity', models.BooleanField(null=True, blank=True)), ('emerging_small_business', models.BooleanField(null=True, blank=True)), ('c8a_program_participant', models.BooleanField(null=True, blank=True)), ('sba_certified_8_a_joint_ve', models.BooleanField(null=True, blank=True)), ('dot_certified_disadvantage', models.BooleanField(null=True, blank=True)), ('self_certified_small_disad', models.BooleanField(null=True, blank=True)), ('historically_underutilized', models.BooleanField(null=True, blank=True)), ('small_disadvantaged_busine', models.BooleanField(null=True, blank=True)), ('the_ability_one_program', models.BooleanField(null=True, blank=True)), ('historically_black_college', models.BooleanField(null=True, blank=True)), ('c1862_land_grant_college', models.BooleanField(null=True, blank=True)), ('c1890_land_grant_college', models.BooleanField(null=True, blank=True)), ('c1994_land_grant_college', models.BooleanField(null=True, blank=True)), ('minority_institution', models.BooleanField(null=True, blank=True)), ('private_university_or_coll', models.BooleanField(null=True, blank=True)), ('school_of_forestry', models.BooleanField(null=True, blank=True)), ('state_controlled_instituti', models.BooleanField(null=True, blank=True)), ('tribal_college', models.BooleanField(null=True, blank=True)), ('veterinary_college', models.BooleanField(null=True, blank=True)), ('educational_institution', models.BooleanField(null=True, blank=True)), ('alaskan_native_servicing_i', models.BooleanField(null=True, blank=True)), ('community_development_corp', models.BooleanField(null=True, blank=True)), ('native_hawaiian_servicing', models.BooleanField(null=True, blank=True)), ('domestic_shelter', models.BooleanField(null=True, blank=True)), ('manufacturer_of_goods', models.BooleanField(null=True, blank=True)), ('hospital_flag', models.BooleanField(null=True, blank=True)), ('veterinary_hospital', models.BooleanField(null=True, blank=True)), ('hispanic_servicing_institu', models.BooleanField(null=True, blank=True)), ('foundation', models.BooleanField(null=True, blank=True)), ('woman_owned_business', models.BooleanField(null=True, blank=True)), ('minority_owned_business', models.BooleanField(null=True, blank=True)), ('women_owned_small_business', models.BooleanField(null=True, blank=True)), ('economically_disadvantaged', models.BooleanField(null=True, blank=True)), ('joint_venture_women_owned', models.BooleanField(null=True, blank=True)), ('joint_venture_economically', models.BooleanField(null=True, blank=True)), ('veteran_owned_business', models.BooleanField(null=True, blank=True)), ('service_disabled_veteran_o', models.BooleanField(null=True, blank=True)), ('contracts', models.BooleanField(null=True, blank=True)), ('grants', models.BooleanField(null=True, blank=True)), ('receives_contracts_and_gra', models.BooleanField(null=True, blank=True)), ('airport_authority', models.BooleanField(null=True, blank=True)), ('council_of_governments', models.BooleanField(null=True, blank=True)), ('housing_authorities_public', models.BooleanField(null=True, blank=True)), ('interstate_entity', models.BooleanField(null=True, blank=True)), ('planning_commission', models.BooleanField(null=True, blank=True)), ('port_authority', models.BooleanField(null=True, blank=True)), ('transit_authority', models.BooleanField(null=True, blank=True)), ('subchapter_s_corporation', models.BooleanField(null=True, blank=True)), ('limited_liability_corporat', models.BooleanField(null=True, blank=True)), ('foreign_owned_and_located', models.BooleanField(null=True, blank=True)), ('american_indian_owned_busi', models.BooleanField(null=True, blank=True)), ('alaskan_native_owned_corpo', models.BooleanField(null=True, blank=True)), ('indian_tribe_federally_rec', models.BooleanField(null=True, blank=True)), ('native_hawaiian_owned_busi', models.BooleanField(null=True, blank=True)), ('tribally_owned_business', models.BooleanField(null=True, blank=True)), ('asian_pacific_american_own', models.BooleanField(null=True, blank=True)), ('black_american_owned_busin', models.BooleanField(null=True, blank=True)), ('hispanic_american_owned_bu', models.BooleanField(null=True, blank=True)), ('native_american_owned_busi', models.BooleanField(null=True, blank=True)), ('subcontinent_asian_asian_i', models.BooleanField(null=True, blank=True)), ('other_minority_owned_busin', models.BooleanField(null=True, blank=True)), ('for_profit_organization', models.BooleanField(null=True, blank=True)), ('nonprofit_organization', models.BooleanField(null=True, blank=True)), ('other_not_for_profit_organ', models.BooleanField(null=True, blank=True)), ('us_local_government', models.BooleanField(null=True, blank=True)), ('referenced_idv_modificatio', models.TextField(blank=True, null=True)), ('undefinitized_action', models.TextField(blank=True, null=True)), ('undefinitized_action_desc', models.TextField(blank=True, null=True)), ('domestic_or_foreign_entity', models.TextField(blank=True, null=True)), ('domestic_or_foreign_e_desc', models.TextField(blank=True, null=True)), ('annual_revenue', models.TextField(blank=True, null=True)), ('division_name', models.TextField(blank=True, null=True)), ('division_number_or_office', models.TextField(blank=True, null=True)), ('number_of_employees', models.TextField(blank=True, null=True)), ('vendor_alternate_name', models.TextField(blank=True, null=True)), ('vendor_alternate_site_code', models.TextField(blank=True, null=True)), ('vendor_enabled', models.TextField(blank=True, null=True)), ('vendor_legal_org_name', models.TextField(blank=True, null=True)), ('vendor_location_disabled_f', models.TextField(blank=True, null=True)), ('vendor_site_code', models.TextField(blank=True, null=True)), ('pulled_from', models.TextField(blank=True, null=True)), ('last_modified', models.TextField(blank=True, null=True)), ('initial_report_date', models.TextField(blank=True, null=True)), ('cage_code', models.TextField(blank=True, null=True)), ('inherently_government_func', models.TextField(blank=True, null=True)), ('inherently_government_desc', models.TextField(blank=True, null=True)), ('organizational_type', models.TextField(blank=True, null=True)), ('referenced_idv_agency_name', models.TextField(blank=True, null=True)), ('referenced_multi_or_single', models.TextField(blank=True, null=True)), ('place_of_perform_country_n', models.TextField(blank=True, null=True)), ('place_of_perform_state_nam', models.TextField(blank=True, null=True)), ('unique_award_key', models.TextField(db_index=True, null=True)), ('solicitation_date', models.DateField(blank=True, null=True)), ('officer_1_name', models.TextField(blank=True, null=True)), ('officer_1_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('officer_2_name', models.TextField(blank=True, null=True)), ('officer_2_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('officer_3_name', models.TextField(blank=True, null=True)), ('officer_3_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('officer_4_name', models.TextField(blank=True, null=True)), ('officer_4_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('officer_5_name', models.TextField(blank=True, null=True)), ('officer_5_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)), ('created_at', models.DateTimeField(blank=True, null=True)), ('updated_at', models.DateTimeField(blank=True, db_index=True, null=True))], options={'db_table': 'transaction_fpds'}), migrations.AddField(model_name='transactionnormalized', name='award', field=models.ForeignKey(help_text='The award which this transaction is contained in', on_delete=django.db.models.deletion.DO_NOTHING, to='awards.Award')), migrations.AddField(model_name='transactionnormalized', name='awarding_agency', field=models.ForeignKey(help_text='The agency which awarded this transaction', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='awards_transactionnormalized_awarding_agency', to='references.Agency')), migrations.AddField(model_name='transactionnormalized', name='funding_agency', field=models.ForeignKey(help_text='The agency which is funding this transaction', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='awards_transactionnormalized_funding_agency', to='references.Agency')), migrations.AddField(model_name='transactionnormalized', name='place_of_performance', field=models.ForeignKey(help_text='The location where the work on this transaction was performed', null=True, on_delete=django.db.models.deletion.CASCADE, to='references.Location')), migrations.AddField(model_name='transactionnormalized', name='recipient', field=models.ForeignKey(help_text='The recipient for this transaction', null=True, on_delete=django.db.models.deletion.CASCADE, to='references.LegalEntity')), migrations.AddField(model_name='subaward', name='award', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='subawards', to='awards.Award')), migrations.AddField(model_name='subaward', name='awarding_agency', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='awarding_subawards', to='references.Agency')), migrations.AddField(model_name='subaward', name='cfda', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='related_subawards', to='references.Cfda')), migrations.AddField(model_name='subaward', name='funding_agency', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='funding_subawards', to='references.Agency')), migrations.AddField(model_name='financialaccountsbyawards', name='award', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='financial_set', to='awards.Award')), migrations.AddField(model_name='financialaccountsbyawards', name='object_class', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='references.ObjectClass')), migrations.AddField(model_name='financialaccountsbyawards', name='program_activity', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='references.RefProgramActivity')), migrations.AddField(model_name='financialaccountsbyawards', name='submission', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='submissions.SubmissionAttributes')), migrations.AddField(model_name='financialaccountsbyawards', name='treasury_account', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='accounts.TreasuryAppropriationAccount')), migrations.AddField(model_name='award', name='awarding_agency', field=models.ForeignKey(help_text='The awarding agency for the award', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='references.Agency')), migrations.AddField(model_name='award', name='earliest_transaction', field=models.ForeignKey(help_text='The earliest transaction by action_date and mod associated with this award', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='earliest_for_award', to='awards.TransactionNormalized')), migrations.AddField(model_name='award', name='funding_agency', field=models.ForeignKey(help_text='The funding agency for the award', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='references.Agency')), migrations.AddField(model_name='award', name='latest_transaction', field=models.ForeignKey(help_text='The latest transaction by action_date and mod associated with this award', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='latest_for_award', to='awards.TransactionNormalized')), migrations.AddField(model_name='award', name='place_of_performance', field=models.ForeignKey(help_text='The principal place of business, where the majority of the work is performed. For example, in a manufacturing contract, this would be the main plant where items are produced.', null=True, on_delete=django.db.models.deletion.CASCADE, to='references.Location')), migrations.AddField(model_name='award', name='recipient', field=models.ForeignKey(help_text='The recipient of the award', null=True, on_delete=django.db.models.deletion.CASCADE, to='references.LegalEntity')), migrations.AlterIndexTogether(name='transactionnormalized', index_together=set([('award', 'action_date')])), migrations.AlterUniqueTogether(name='transactionfabs', unique_together=set([('awarding_sub_tier_agency_c', 'award_modification_amendme', 'fain', 'uri', 'cfda_number')])), migrations.AddIndex(model_name='award', index=models.Index(fields=['-update_date'], name='awards_update_date_desc_idx')), migrations.AddIndex(model_name='award', index=models.Index(fields=['generated_unique_award_id'], name='award_unique_id')), migrations.RunSQL(sql=['CREATE INDEX awards_piid_uppr_idx ON awards (UPPER(piid))', 'CREATE INDEX awards_fain_uppr_idx ON awards (UPPER(fain))', 'CREATE INDEX awards_uri_uppr_idx ON awards (UPPER(uri))', "CREATE INDEX modified_generated_unique_award_id_awards_idx ON awards (REPLACE(generated_unique_award_id, '-', ''))", "CREATE INDEX modified_fain_awards_idx ON awards (REPLACE(fain, '-', ''))", 'CREATE INDEX faba_fain_and_uri ON financial_accounts_by_awards(fain, uri, award_id) WHERE fain IS NOT NULL AND uri IS NOT NULL AND award_id IS NULL', 'CREATE INDEX faba_fain ON financial_accounts_by_awards(fain, uri, award_id) WHERE fain IS NOT NULL AND uri IS NULL AND award_id IS NULL', 'CREATE INDEX faba_uri ON financial_accounts_by_awards(fain, uri, award_id) WHERE fain IS NULL AND uri IS NOT NULL AND award_id IS NULL', 'CREATE INDEX faba_piid ON financial_accounts_by_awards(piid, award_id) WHERE piid IS NOT NULL AND award_id IS NULL', 'DROP TABLE IF EXISTS award_category'])] |
class OptionPlotoptionsStreamgraphSonificationTracksMappingVolume(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class DecoratedGraph():
def __init__(self, graph: DiGraph=None):
self._graph = (graph if graph else DiGraph())
def graph(self) -> DiGraph:
return self._graph
def _write_dot(self, handle: TextIO):
handle.write(ToDotConverter.write(self._graph))
def export_ascii(self) -> str:
if (not GRAPH_EASY_INSTALLED):
warning(f'Invoking graph-easy although it seems like it is not installed on the system.')
with CloseableNamedTemporaryFile(mode='w', encoding='utf-8') as file:
self._write_dot(file)
file.close()
result: CompletedProcess = run(['graph-easy', '--as=ascii', file.name], capture_output=True)
return result.stdout.decode('utf-8')
def export_dot(self, path: str):
with open(path, 'w', encoding='utf-8') as outfile:
self._write_dot(outfile)
def export_plot(self, path: str, type='png'):
with CloseableNamedTemporaryFile(mode='w', encoding='utf-8') as file:
self._write_dot(file)
file.close()
result = run(['dot', f'-T{type}', f'-o{path}', f'{file.name}'], capture_output=True)
if result.returncode:
raise ValueError(f"Could not plot graph! ({result.stderr.decode('utf-8')}") |
class OptionPlotoptionsColumnrangePointEvents(Options):
def click(self):
return self._config_get(None)
def click(self, value: Any):
self._config(value, js_type=False)
def drag(self):
return self._config_get(None)
def drag(self, value: Any):
self._config(value, js_type=False)
def dragStart(self):
return self._config_get(None)
def dragStart(self, value: Any):
self._config(value, js_type=False)
def drop(self):
return self._config_get(None)
def drop(self, value: Any):
self._config(value, js_type=False)
def mouseOut(self):
return self._config_get(None)
def mouseOut(self, value: Any):
self._config(value, js_type=False)
def mouseOver(self):
return self._config_get(None)
def mouseOver(self, value: Any):
self._config(value, js_type=False)
def remove(self):
return self._config_get(None)
def remove(self, value: Any):
self._config(value, js_type=False)
def select(self):
return self._config_get(None)
def select(self, value: Any):
self._config(value, js_type=False)
def unselect(self):
return self._config_get(None)
def unselect(self, value: Any):
self._config(value, js_type=False)
def update(self):
return self._config_get(None)
def update(self, value: Any):
self._config(value, js_type=False) |
class MessageFormatError(Exception):
def __init__(self, attribute=None, desc=None, expected=None, found=None):
message = 'Invalid message received.'
if attribute:
message += ' Invalid message attribute {}.'.format(attribute)
if ((expected is not None) and (found is not None)):
message += " '{}' expected, found '{}'.".format(expected, found)
if desc:
message += (' ' + desc)
Exception.__init__(self, message)
def validate_message_type(message, expected):
if ('message_type' not in message):
raise MessageFormatError(attribute='message_type', desc='Attribute is missing')
elif (message['message_type'] == 'error'):
if ('error' in message):
raise MessageFormatError(desc="Error received from the remote host: '{}'".format(message['error']))
else:
raise MessageFormatError(desc='Unknown error received from the remote host')
if (message['message_type'] != expected):
raise MessageFormatError(attribute='message_type', expected=expected, found=message['message_type']) |
def selectionKDE(subtitlesResultList):
subtitlesSelectedName = u''
subtitlesSelectedIndex = (- 1)
subtitlesItems = u''
subtitlesMatchedByHash = 0
subtitlesMatchedByName = 0
index = 0
for item in subtitlesResultList['data']:
if (item['attributes']['moviehash_match'] == 'True'):
subtitlesMatchedByHash += 1
else:
subtitlesMatchedByName += 1
subtitlesItems += (((str(index) + ' "') + item['attributes']['files'][0]['file_name']) + '" ')
index += 1
if (subtitlesMatchedByName == 0):
tilestr = ((' --title="Subtitles for ' + videoTitle) + '"')
menustr = ((((' --menu="<b>Video title:</b> ' + videoTitle) + '<br><b>File name:</b> ') + videoFileName) + '" ')
elif (subtitlesMatchedByHash == 0):
tilestr = ((' --title="Subtitles for ' + videoFileName) + '"')
menustr = ((' --menu="Search results using file name, NOT video detection. <b>May be unreliable...</b><br><b>File name:</b> ' + videoFileName) + '" ')
else:
tilestr = ((' --title="Subtitles for ' + videoTitle) + '" ')
menustr = ((((' --menu="Search results using file name AND video detection.<br><b>Video title:</b> ' + videoTitle) + '<br><b>File name:</b> ') + videoFileName) + '" ')
process_subtitlesSelection = subprocess.Popen((((((('kdialog --geometry=' + str(opt_gui_width)) + 'x') + str(opt_gui_height)) + tilestr) + menustr) + subtitlesItems), shell=True, stdout=subprocess.PIPE)
result_subtitlesSelection = process_subtitlesSelection.communicate()
if result_subtitlesSelection[0]:
subtitlesSelectedIndex = int(str(result_subtitlesSelection[0], 'utf-8', 'replace').strip('\n'))
subtitlesSelectedName = subtitlesResultList['data'][subtitlesSelectedIndex]['attributes']['files'][0]['file_name']
return (subtitlesSelectedName, subtitlesSelectedIndex) |
def test_branch_condition_not_replaced():
cfg = ControlFlowGraph()
condition = Condition(OperationType.less, [Variable('x', ssa_label=0), Constant(12)])
original_instructions = [Assignment(Variable('x', ssa_label=0), Call(function_symbol('foo'), [])), Branch(condition.copy()), Assignment(Variable('u', ssa_label=1), Call(function_symbol('bar'), [])), Branch(condition.copy()), Assignment(Variable('v', ssa_label=1), Call(function_symbol('bar'), [])), Branch(condition.copy()), Return([Variable('u', ssa_label=1)]), Return([Variable('v', ssa_label=1)])]
nodes = [BasicBlock(0, instructions=[original_instructions[0].copy(), original_instructions[1].copy()]), BasicBlock(1, instructions=[original_instructions[2].copy()]), BasicBlock(2, instructions=[original_instructions[3].copy()]), BasicBlock(3, instructions=[original_instructions[4].copy()]), BasicBlock(4, instructions=[original_instructions[5].copy()]), BasicBlock(5, instructions=[original_instructions[6].copy()]), BasicBlock(6, instructions=[original_instructions[7].copy()])]
cfg.add_nodes_from(nodes)
cfg.add_edges_from([TrueCase(nodes[0], nodes[1]), FalseCase(nodes[0], nodes[2]), UnconditionalEdge(nodes[1], nodes[2]), TrueCase(nodes[2], nodes[3]), FalseCase(nodes[2], nodes[4]), UnconditionalEdge(nodes[3], nodes[4]), TrueCase(nodes[4], nodes[5]), FalseCase(nodes[4], nodes[6])])
_run_cse(cfg, _generate_options(threshold=3))
assert (list(cfg.instructions) == original_instructions) |
.parametrize('order', [1, 2, 0.5])
def test_norm_param(some_thr, rc_dtype, order):
input_ = get_test_array((1000,), rc_dtype)
input_dev = some_thr.to_device(input_)
norm = tr.norm_param(input_dev)
output_dev = some_thr.empty_like(norm.output)
test = get_test_computation(output_dev)
test.parameter.input.connect(norm, norm.output, input_prime=norm.input, order=norm.order)
testc = test.compile(some_thr)
testc(output_dev, input_dev, order)
assert diff_is_negligible(output_dev.get(), (numpy.abs(input_) ** order)) |
_event
class ReactionEvent(ThreadEvent):
message = attr.ib(type='_models.Message')
reaction = attr.ib(type=Optional[str])
def _parse(cls, session, data):
thread = cls._get_thread(session, data)
return cls(author=_threads.User(session=session, id=str(data['userId'])), thread=thread, message=_models.Message(thread=thread, id=data['messageId']), reaction=(data['reaction'] if (data['action'] == 0) else None)) |
.usefixtures('use_tmpdir')
.parametrize('ecl_base, expected_file', (('MY_ECL_BASE', 'MY_ECL_BASE.DATA'), ('relative/path/MY_ECL_BASE', 'relative/path/MY_ECL_BASE.DATA'), ('MY_ECL_BASE%d', 'MY_ECL_BASE0.DATA'), ('MY_ECL_BASE<IENS>', 'MY_ECL_BASE0.DATA')))
def test_run_template_replace_in_ecl(ecl_base, expected_file, prior_ensemble):
config_text = dedent(f'''
NUM_REALIZATIONS 1
ECLBASE {ecl_base}
RUN_TEMPLATE BASE_ECL_FILE.DATA <ECLBASE>.DATA
''')
Path('BASE_ECL_FILE.DATA').write_text('I WANT TO REPLACE:<NUM_CPU>', encoding='utf-8')
Path('config.ert').write_text(config_text, encoding='utf-8')
ert_config = ErtConfig.from_file('config.ert')
run_context = ensemble_context(prior_ensemble, [True], 0, None, '', 'name_%', 'name')
create_run_path(run_context, ert_config.substitution_list, ert_config)
assert ((Path(run_context[0].runpath) / expected_file).read_text() == 'I WANT TO REPLACE:1') |
(name='flowspec.add_local', req_args=[FLOWSPEC_FAMILY, ROUTE_DISTINGUISHER, FLOWSPEC_RULES], opt_args=[FLOWSPEC_ACTIONS])
def add_flowspec_local(flowspec_family, route_dist, rules, **kwargs):
try:
tm = CORE_MANAGER.get_core_service().table_manager
tm.update_flowspec_vrf_table(flowspec_family=flowspec_family, route_dist=route_dist, rules=rules, **kwargs)
return [{FLOWSPEC_FAMILY: flowspec_family, ROUTE_DISTINGUISHER: route_dist, FLOWSPEC_RULES: rules}.update(kwargs)]
except BgpCoreError as e:
raise PrefixError(desc=e) |
class CacheObject():
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.log = logging.getLogger('Main.RedisCache')
def __setitem__(self, key, item):
self.log.info('CacheObject setitem: %s -> %s', key, item)
db.set_in_db_key_value_store(key, item)
def __getitem__(self, key):
self.log.info('CacheObject getitem: %s', key)
return db.get_from_db_key_value_store(key)
def __delitem__(self, key):
db.set_in_db_key_value_store(key, {})
def get(self, key, default='super_sekrit_not_specified_value'):
self.log.info('Cache get for key %s', key)
ret = db.get_from_db_key_value_store(key)
if ret:
return ret
if (default != 'super_sekrit_not_specified_value'):
return default
raise KeyError(('Key %s not found in CacheObject backing store!' % (key,)))
def clear(self):
raise ValueError('Cannot clear a CacheObject')
def copy(self):
raise ValueError('Cannot copy a CacheObject')
def has_key(self, key):
self.log.info('Cache has_key for key %s', key)
return (db.get_from_db_key_value_store(key) != {}) |
class VRRPV2StateBackup(VRRPState):
def _master_down(self):
vrrp_router = self.vrrp_router
vrrp_router.send_advertisement()
vrrp_router.preempt_delay_timer.cancel()
vrrp_router.state_change(vrrp_event.VRRP_STATE_MASTER)
vrrp_router.adver_timer.start(vrrp_router.config.advertisement_interval)
def master_down(self, ev):
self.vrrp_router.logger.debug('%s master_down', self.__class__.__name__)
self._master_down()
def adver(self, ev):
vrrp_router = self.vrrp_router
vrrp_router.logger.debug('%s adver %s %s', self.__class__.__name__, ev.__class__.__name__, vrrp_router.state)
def preempt_delay(self, ev):
self.vrrp_router.logger.warning('%s preempt_delay', self.__class__.__name__)
self._master_down()
def vrrp_received(self, ev):
vrrp_router = self.vrrp_router
vrrp_router.logger.debug('%s vrrp_received', self.__class__.__name__)
(_ip, vrrp_) = vrrp.vrrp.get_payload(ev.packet)
if (vrrp_.priority == 0):
vrrp_router.master_down_timer.start(vrrp_router.params.skew_time)
else:
config = vrrp_router.config
params = vrrp_router.params
if ((not config.preempt_mode) or (config.priority <= vrrp_.priority)):
vrrp_router.preempt_delay_timer.cancel()
vrrp_router.master_down_timer.start(params.master_down_interval)
elif (config.preempt_mode and (config.preempt_delay > 0) and (config.priority > vrrp_.priority)):
if (not vrrp_router.preempt_delay_timer.is_running()):
vrrp_router.preempt_delay_timer.start(config.preempt_delay)
vrrp_router.master_down_timer.start(params.master_down_interval)
def vrrp_shutdown_request(self, ev):
vrrp_router = self.vrrp_router
vrrp_router.logger.debug('%s vrrp_shutdown_request', self.__class__.__name__)
vrrp_router.master_down_timer.cancel()
vrrp_router.preempt_delay_timer.cancel()
vrrp_router.state_change(vrrp_event.VRRP_STATE_INITIALIZE)
def vrrp_config_change_request(self, ev):
vrrp_router = self.vrrp_router
vrrp_router.logger.warning('%s vrrp_config_change_request', self.__class__.__name__)
if ((ev.priority is not None) and vrrp_router.config.address_owner):
vrrp_router.master_down_timer.cancel()
self._master_down()
if ((ev.preempt_mode is not None) or (ev.preempt_delay is not None)):
vrrp_router.preempt_delay_timer.cancel() |
_routes.route('/resend-verification-email', methods=['POST'])
def resend_verification_email():
try:
email = request.json['data']['email']
except TypeError:
logging.error('Bad Request')
raise BadRequestError({'source': ''}, 'Bad Request Error')
try:
user = User.query.filter_by(email=email).one()
except NoResultFound:
logging.info((('User with email: ' + email) + ' not found.'))
raise UnprocessableEntityError({'source': ''}, (('User with email: ' + email) + ' not found.'))
else:
serializer = get_serializer()
hash_ = str(base64.b64encode(str(serializer.dumps([user.email, str_generator()])).encode()), 'utf-8')
link = make_frontend_url('/verify', {'token': hash_})
send_email_confirmation(user.email, link)
logging.info('Verification email resent')
return make_response(jsonify(message='Verification email resent'), 200) |
def delighted_dataset_config(db: Session, delighted_connection_config: ConnectionConfig, delighted_dataset: Dict[(str, Any)]) -> Generator:
fides_key = delighted_dataset['fides_key']
delighted_connection_config.name = fides_key
delighted_connection_config.key = fides_key
delighted_connection_config.save(db=db)
ctl_dataset = CtlDataset.create_from_dataset_dict(db, delighted_dataset)
dataset = DatasetConfig.create(db=db, data={'connection_config_id': delighted_connection_config.id, 'fides_key': fides_key, 'ctl_dataset_id': ctl_dataset.id})
(yield dataset)
dataset.delete(db=db)
ctl_dataset.delete(db=db) |
class CDDevice(KeyedDevice):
class_autoconnect = True
def __init__(self, dev):
Device.__init__(self, dev)
self.name = _('Audio Disc')
self.dev = dev
panel_type = _cdguipanel.CDPanel
def __on_cd_info_retrieved(self, _event_type, cd_playlist, _disc_title):
self.playlists.append(cd_playlist)
self.connected = True
def connect(self):
if self.connected:
return
event.add_ui_callback(self.__on_cd_info_retrieved, 'cd_info_retrieved')
CDPlaylist(device=self.dev)
def disconnect(self):
if (not self.connected):
return
self.playlists = []
self.connected = False
CDDevice.destroy(self) |
def init(model: Model[(InT, OutT)], X: Optional[InT]=None, Y: Optional[OutT]=None) -> None:
key_transform = model.get_ref(KEY_TRANSFORM_REF)
width = (get_width(X) if (X is not None) else None)
if width:
model.set_dim('nO', width)
if key_transform.has_dim('nO'):
key_transform.set_dim('nO', width)
Q = model.ops.alloc1f(model.get_dim('nO'))
Q += model.ops.xp.random.uniform((- 0.1), 0.1, Q.shape)
model.set_param('Q', Q)
X_array = (X.dataXd if (X is not None) else None)
Y_array = (Y.dataXd if (Y is not None) else None)
key_transform.initialize(X_array, Y_array) |
.object(packages.fetchai.protocols.tac.message, 'enforce', side_effect=AEAEnforceError('some error'))
def test_incorrect_message(mocked_enforce):
with mock.patch.object(tac_message_logger, 'error') as mock_logger:
TacMessage(performative=TacMessage.Performative.CANCELLED)
mock_logger.assert_any_call('some error') |
def test_pager_duty_notification():
pager_duty_notif = notification.PagerDuty(phases=[_workflow_execution_succeeded], recipients_email=['my-'])
assert (pager_duty_notif.to_flyte_idl() == _common_pb2.Notification(phases=[_workflow_execution_succeeded], email=None, pager_duty=_common_model.PagerDutyNotification(['my-']).to_flyte_idl(), slack=None)) |
def get_highest_acc():
import re
pattern = 'model.(?P<epoch>\\d+)-(?P<val_acc>[0-9]*\\.?[0-9]*).hdf5'
p = re.compile(pattern)
acces = [float(p.match(f).groups()[1]) for f in os.listdir('models/') if p.match(f)]
if (len(acces) == 0):
import sys
return sys.float_info.min
else:
return np.max(acces) |
def get_icon(name, forecolor, backcolor, higopacity, lowopacity, temp_dir, default):
name_dict = {'sel_wheel': __SVG_WHEEL__, 'sel_image': __SVG_IMAGE__, 'sel_board': __SVG_BOARD__, 'sel_depot': __SVG_DEPOT__, 'wheel': __SVG_WHEEL__, 'image': __SVG_IMAGE__, 'board': __SVG_BOARD__, 'depot': __SVG_DEPOT__, 'about': __SVG_ABOUT__, 'info': __SVG_INFO__, 'settings': __SVG_SETTINGS__, 'save': __SVG_SAVE__, 'open': __SVG_OPEN__, 'quit': __SVG_QUIT__, 'home': __SVG_HOME__, 'update': __SVG_UPDATE__, 'forum': __SVG_FORUM__, 'sel_up': __SVG_UP__, 'sel_down': __SVG_DOWN__, 'box_up': __SVG_UP__, 'box_down': __SVG_DOWN__, 'box_left': __SVG_LEFT__, 'box_right': __SVG_RIGHT__, 'up': __SVG_UP__, 'down': __SVG_DOWN__, 'left': __SVG_LEFT__, 'right': __SVG_RIGHT__, 'reset': __SVG_RESET__, 'zoom_in': __SVG_ZOOM_IN__, 'zoom_out': __SVG_ZOOM_OUT__, 'layout_l': __SVG_LAYOUT_L__, 'layout_r': __SVG_LAYOUT_R__, 'float': __SVG_FLOAT__, 'close': __SVG_CLOSE__}
wid = hig = 100
swid = shig = 24
if (name in ('sel_wheel', 'sel_image', 'sel_board', 'sel_depot', 'wheel', 'image', 'board', 'depot', 'about', 'settings')):
swid = shig = '100'
elif (name in ('sel_up', 'sel_down', 'box_up', 'box_down', 'up', 'down')):
hig = (hig / 2)
shig = (shig / 2)
elif (name in ('box_left', 'box_right', 'left', 'right')):
wid = (wid / 2)
swid = (swid / 2)
if (os.path.isdir(temp_dir) and (name in name_dict)):
with open(os.sep.join([temp_dir, '{}.svg'.format(name)]), 'w') as f:
context = name_dict[name].format(forecolor=forecolor, backcolor=backcolor, stroke=6, higopacity=higopacity, lowopacity=lowopacity)
f.write(__SVG_FMT__.format(context=context, wid=wid, hig=hig, swid=swid, shig=shig)[1:])
return QIcon(os.sep.join([temp_dir, '{}.svg'.format(name)]))
return default |
class ConvertHscicPrescribingTests(TestCase):
def test_data_is_aggregated(self):
raw_data_path = (('frontend/tests/fixtures/commands/' + 'convert_hscic_prescribing/2016_01/') + 'EPD_201601.csv')
gcs_path = 'hscic/prescribing_v2/2016_01/EPD_201601.csv'
client = StorageClient()
bucket = client.get_bucket()
blob = bucket.blob(gcs_path)
with open(raw_data_path, 'rb') as f:
blob.upload_from_file(f)
call_command('convert_hscic_prescribing', filename=raw_data_path)
client = BQClient()
sql = "SELECT *\n FROM {hscic}.prescribing_v2\n WHERE month = TIMESTAMP('2016-01-01')"
rows = list(results_to_dicts(client.query(sql)))
self.assertEqual(len(rows), 9)
for row in rows:
if ((row['practice'] == 'P92042') and (row['bnf_code'] == '0202010B0AAABAB')):
self.assertEqual(row['quantity'], 1288)
def setUp(self):
client = BQClient('hscic')
client.get_or_create_table('prescribing_v2', PRESCRIBING_SCHEMA)
def tearDown(self):
table_name = 'raw_prescribing_data_2016_01'
try:
BQClient('tmp_eu').delete_table(table_name)
except NotFound:
pass
table = BQClient('hscic').get_table('prescribing_v2')
table.delete_all_rows()
try:
os.remove((('frontend/tests/fixtures/commands/' + 'convert_hscic_prescribing/2016_01/') + 'Detailed_Prescribing_Information_formatted.CSV'))
except OSError:
pass |
class SingleSiteRealSpaceNewtonianMonteCarloProposerTest(unittest.TestCase):
class SampleNormalModel():
_variable
def foo(self):
return dist.MultivariateNormal(torch.zeros(2), torch.eye(2))
_variable
def bar(self):
return dist.MultivariateNormal(self.foo(), torch.eye(2))
class SampleLogisticRegressionModel():
_variable
def theta_0(self):
return dist.Normal(tensor(0.0), tensor(1.0))
_variable
def theta_1(self):
return dist.Normal(tensor(0.0), tensor(1.0))
_variable
def x(self, i):
return dist.Normal(tensor(0.0), tensor(1.0))
_variable
def y(self, i):
y = ((self.theta_1() * self.x(i)) + self.theta_0())
probs = (1 / (1 + (y * (- 1)).exp()))
return dist.Bernoulli(probs)
def test_mean_scale_tril_for_node_with_child(self):
foo_key = bm.random_variable((lambda : dist.MultivariateNormal(tensor([1.0, 1.0]), tensor([[1.0, 0.8], [0.8, 1]]))))
bar_key = bm.random_variable((lambda : dist.MultivariateNormal(foo_key(), tensor([[1.0, 0.8], [0.8, 1.0]]))))
nw_proposer = SingleSiteRealSpaceNewtonianMonteCarloProposer(foo_key())
val = tensor([2.0, 2.0])
queries = [foo_key(), bar_key()]
observed_val = tensor([2.0, 2.0])
observations = {bar_key(): observed_val}
world = World.initialize_world(queries, observations)
world_vars = world._variables
world_vars[foo_key] = val
nw_proposer.learning_rate_ = 1.0
prop_dist = nw_proposer.get_proposal_distribution(world).base_dist
(mean, scale_tril) = (prop_dist.mean, prop_dist.scale_tril)
expected_mean = tensor([1.5, 1.5])
expected_scale_tril = torch.linalg.cholesky(tensor([[0.5, 0.4], [0.4, 0.5]]))
self.assertTrue(torch.isclose(mean, expected_mean).all())
self.assertTrue(torch.isclose(scale_tril, expected_scale_tril).all())
def test_mean_scale_tril(self):
model = self.SampleNormalModel()
foo_key = model.foo()
nw_proposer = SingleSiteRealSpaceNewtonianMonteCarloProposer(foo_key)
val = tensor([2.0, 2.0])
val.requires_grad_(True)
distribution = dist.MultivariateNormal(tensor([1.0, 1.0]), tensor([[1.0, 0.8], [0.8, 1]]))
queries = [foo_key]
observations = {}
world = World.initialize_world(queries, observations)
world_vars = world._variables
world_vars[foo_key] = Variable(value=val, distribution=distribution)
nw_proposer.learning_rate_ = 1.0
prop_dist = nw_proposer.get_proposal_distribution(world).base_dist
(mean, scale_tril) = (prop_dist.mean, prop_dist.scale_tril)
expected_mean = tensor([1.0, 1.0])
expected_scale_tril = torch.linalg.cholesky(tensor([[1.0, 0.8], [0.8, 1]]))
self.assertTrue(torch.isclose(mean, expected_mean).all())
self.assertTrue(torch.isclose(scale_tril, expected_scale_tril).all())
def test_mean_scale_tril_for_iids(self):
model = self.SampleNormalModel()
foo_key = model.foo()
nw_proposer = SingleSiteRealSpaceNewtonianMonteCarloProposer(foo_key)
val = tensor([[2.0, 2.0], [2.0, 2.0]])
val.requires_grad_(True)
distribution = dist.Normal(tensor([[1.0, 1.0], [1.0, 1.0]]), tensor([[1.0, 1.0], [1.0, 1.0]]))
queries = [foo_key]
observations = {}
world = World.initialize_world(queries, observations)
world_vars = world._variables
world_vars[foo_key] = Variable(value=val, distribution=distribution)
nw_proposer.learning_rate_ = 1.0
prop_dist = nw_proposer.get_proposal_distribution(world).base_dist
(mean, scale_tril) = (prop_dist.mean, prop_dist.scale_tril)
expected_mean = tensor([1.0, 1.0, 1.0, 1.0])
expected_scale_tril = torch.eye(4)
self.assertTrue(torch.isclose(mean, expected_mean).all())
self.assertTrue(torch.isclose(scale_tril, expected_scale_tril).all())
def test_multi_mean_scale_tril_computation_in_inference(self):
model = self.SampleLogisticRegressionModel()
theta_0_key = model.theta_0()
theta_1_key = model.theta_1()
nw_proposer = SingleSiteRealSpaceNewtonianMonteCarloProposer(theta_0_key)
x_0_key = model.x(0)
x_1_key = model.x(1)
y_0_key = model.y(0)
y_1_key = model.y(1)
theta_0_value = tensor(1.5708)
theta_0_value.requires_grad_(True)
x_0_value = tensor(0.7654)
x_1_value = tensor((- 6.6737))
theta_1_value = tensor((- 0.4459))
theta_0_distribution = dist.Normal(torch.tensor(0.0), torch.tensor(1.0))
queries = [theta_0_key, theta_1_key]
observations = {}
world = World.initialize_world(queries, observations)
world_vars = world._variables
world_vars[theta_0_key] = Variable(value=theta_0_value, distribution=theta_0_distribution, children=set({y_0_key, y_1_key}))
world_vars[theta_1_key] = Variable(value=theta_1_value, distribution=theta_0_distribution, children=set({y_0_key, y_1_key}))
x_distribution = dist.Normal(torch.tensor(0.0), torch.tensor(5.0))
world_vars[x_0_key] = Variable(value=x_0_value, distribution=x_distribution, children=set({y_0_key, y_1_key}))
world_vars[x_1_key] = Variable(value=x_1_value, distribution=x_distribution, children=set({y_0_key, y_1_key}))
y = (theta_0_value + (theta_1_value * x_0_value))
probs_0 = (1 / (1 + (y * (- 1)).exp()))
y_0_distribution = dist.Bernoulli(probs_0)
world_vars[y_0_key] = Variable(value=tensor(1.0), distribution=y_0_distribution, parents=set({theta_0_key, theta_1_key, x_0_key}))
y = (theta_0_value + (theta_1_value * x_1_value))
probs_1 = (1 / (1 + (y * (- 1)).exp()))
y_1_distribution = dist.Bernoulli(probs_1)
world_vars[y_1_key] = Variable(value=tensor(1.0), distribution=y_1_distribution, parents=set({theta_0_key, theta_1_key, x_1_key}))
nw_proposer.learning_rate_ = 1.0
prop_dist = nw_proposer.get_proposal_distribution(world).base_dist
(mean, scale_tril) = (prop_dist.mean, prop_dist.scale_tril)
score = theta_0_distribution.log_prob(theta_0_value)
score += (1 / (1 + ((- 1) * (theta_0_value + (theta_1_value * x_0_value))).exp())).log()
score += (1 / (1 + ((- 1) * (theta_0_value + (theta_1_value * x_1_value))).exp())).log()
expected_first_gradient = torch.autograd.grad(score, theta_0_value, create_graph=True)[0]
expected_second_gradient = torch.autograd.grad(expected_first_gradient, theta_0_value)[0]
expected_covar = (expected_second_gradient.reshape(1, 1).inverse() * (- 1))
expected_scale_tril = torch.linalg.cholesky(expected_covar)
self.assertAlmostEqual(expected_scale_tril.item(), scale_tril.item(), delta=0.001)
expected_first_gradient = expected_first_gradient.unsqueeze(0)
expected_mean = (theta_0_value.unsqueeze(0) + expected_first_gradient.unsqueeze(0).mm(expected_covar)).squeeze(0)
self.assertAlmostEqual(mean.item(), expected_mean.item(), delta=0.001)
proposal_value = dist.MultivariateNormal(mean, scale_tril=scale_tril).sample().reshape(theta_0_value.shape)
proposal_value.requires_grad_(True)
world_vars[theta_0_key].value = proposal_value
y = (proposal_value + (theta_1_value * x_0_value))
probs_0 = (1 / (1 + (y * (- 1)).exp()))
y_0_distribution = dist.Bernoulli(probs_0)
world_vars[y_0_key].distribution = y_0_distribution
world_vars[y_0_key].log_prob = y_0_distribution.log_prob(tensor(1.0))
y = (proposal_value + (theta_1_value * x_1_value))
probs_1 = (1 / (1 + (y * (- 1)).exp()))
y_1_distribution = dist.Bernoulli(probs_1)
world_vars[y_1_key].distribution = y_1_distribution
nw_proposer.learning_rate_ = 1.0
prop_dist = nw_proposer.get_proposal_distribution(world).base_dist
(mean, scale_tril) = (prop_dist.mean, prop_dist.scale_tril)
score = tensor(0.0)
score = theta_0_distribution.log_prob(proposal_value)
score += (1 / (1 + ((- 1) * (proposal_value + (theta_1_value * x_0_value))).exp())).log()
score += (1 / (1 + ((- 1) * (proposal_value + (theta_1_value * x_1_value))).exp())).log()
expected_first_gradient = torch.autograd.grad(score, proposal_value, create_graph=True)[0]
expected_second_gradient = torch.autograd.grad(expected_first_gradient, proposal_value)[0]
expected_covar = (expected_second_gradient.reshape(1, 1).inverse() * (- 1))
expected_scale_tril = torch.linalg.cholesky(expected_covar)
self.assertAlmostEqual(expected_scale_tril.item(), scale_tril.item(), delta=0.001)
expected_first_gradient = expected_first_gradient.unsqueeze(0)
expected_mean = (proposal_value.unsqueeze(0) + expected_first_gradient.unsqueeze(0).mm(expected_covar)).squeeze(0)
self.assertAlmostEqual(mean.item(), expected_mean.item(), delta=0.001)
self.assertAlmostEqual(scale_tril.item(), expected_scale_tril.item(), delta=0.001)
def test_adaptive_alpha_beta_computation(self):
model = self.SampleLogisticRegressionModel()
theta_0_key = model.theta_0()
nw_proposer = SingleSiteRealSpaceNewtonianMonteCarloProposer(theta_0_key)
nw_proposer.learning_rate_ = tensor(0.0416, dtype=torch.float64)
(nw_proposer.running_mean_, nw_proposer.running_var_) = (tensor(0.079658), tensor(0.0039118))
nw_proposer.accepted_samples_ = 37
(alpha, beta) = nw_proposer.compute_beta_priors_from_accepted_lr()
self.assertAlmostEqual(nw_proposer.running_mean_.item(), 0.0786, delta=0.0001)
self.assertAlmostEqual(nw_proposer.running_var_.item(), 0.00384, delta=1e-05)
self.assertAlmostEqual(alpha.item(), 1.4032, delta=0.001)
self.assertAlmostEqual(beta.item(), 16.4427, delta=0.001)
def test_adaptive_vectorized_alpha_beta_computation(self):
model = self.SampleLogisticRegressionModel()
theta_0_key = model.theta_0()
nw_proposer = SingleSiteRealSpaceNewtonianMonteCarloProposer(theta_0_key)
nw_proposer.learning_rate_ = tensor([0.0416, 0.0583], dtype=torch.float64)
(nw_proposer.running_mean_, nw_proposer.running_var_) = (tensor([0.079658, 0.089861]), tensor([0.0039118, 0.0041231]))
nw_proposer.accepted_samples_ = 37
(alpha, beta) = nw_proposer.compute_beta_priors_from_accepted_lr()
self.assertListEqual([round(x.item(), 4) for x in list(nw_proposer.running_mean_)], [0.0786, 0.089])
self.assertListEqual([round(x.item(), 4) for x in list(nw_proposer.running_var_)], [0.0038, 0.004])
self.assertListEqual([round(x.item(), 4) for x in list(alpha)], [1.4032, 1.6984])
self.assertListEqual([round(x.item(), 4) for x in list(beta)], [16.4427, 17.3829]) |
(unsafe_hash=False, eq=True)
class CfgSimple():
graph: Graph
terminals: Set[Any]
def successor(self, element, or_else=...):
return single_element(self.graph.successors(element), or_else)
def predecessor(self, element, or_else=...):
return single_element(self.graph.predecessors(element), or_else)
def first(self):
return single_element(self.graph.sources)
def last_appendable(self):
return single_element(self.sinks_appendable)
def sinks_appendable(self):
return (self.graph.sinks - self.terminals)
def append(self, other):
new_graph = self.graph.copy()
new_terminals = self.terminals.copy()
if ((not self.sinks_appendable) and (not self.graph.is_empty)):
return self
for other in (other if isinstance(other, Iterable) else [other]):
other = self.__make_cfg(other)
edges = itertools.product(self.sinks_appendable, other.graph.sources)
new_graph.union(other.graph, inplace=True, assert_disjoint=True)
new_graph.add_edges(*edges, inplace=True)
new_terminals.update(other.terminals)
return CfgSimple(new_graph, new_terminals)
def replace(self, node, new_node):
new_graph = self.graph.replace(node, (new_node.graph if isinstance(new_node, CfgSimple) else new_node))
new_terminals = self.terminals
if (node in new_terminals):
new_terminals -= {node}
new_terminals |= ({new_node} if (not isinstance(new_node, CfgSimple)) else new_node.sinks)
return self.updated(graph=new_graph, terminals=new_terminals)
def remove(self, node):
return self.updated(graph=self.graph.remove_node(node), terminals=(self.terminals - {node}))
def remove_with_connection(self, node):
p = self.graph.predecessors(node)
s = self.graph.successors(node)
edges = itertools.product(p, s)
return self.updated(graph=self.graph.remove_node(node).add_edges(*edges), terminals=(self.terminals - {node}))
def without_appendable(self, end):
assert (end in self.graph.sinks)
return self.updated(terminals=(self.terminals | {end}))
def updated(self, **kwargs):
return dataclasses.replace(self, **kwargs)
def __make_cfg(element):
return (element if isinstance(element, CfgSimple) else CfgSimple.statement(element))
def empty():
return CfgSimple(Graph.empty(), set())
def statement(element):
return CfgSimple(Graph.single_node(element), terminals=set())
def statement_terminal(element):
return CfgSimple(Graph.single_node(element), terminals={element})
def statements(*elements):
return CfgSimple(Graph.linear(*elements), terminals=set())
def concatenate(*elements):
cfg = CfgSimple.empty()
for e in elements:
cfg >>= e
return cfg
def __add__(self, element):
if isinstance(element, CfgSimple):
return self.updated(graph=self.graph.union(element.graph), terminals=(self.terminals | element.terminals))
else:
return self.updated(graph=self.graph.add_edges(element, create_nodes=False))
def __sub__(self, element):
if isinstance(element, CfgSimple):
return self.updated(graph=self.graph.remove_nodes(*element.graph.nodes, inplace=False), terminals=(self.terminals - element.graph.nodes))
else:
raise Exception()
def __rrshift__(self, other):
return self.__lshift__(other)
def __lshift__(self, other):
return (self.__make_cfg(other) >> self)
def __rshift__(self, other):
return self.append(other)
def visualize(self):
g = Digraph('production_hierarchy')
for n in self.graph.nodes:
g.node(name=str(id(n)), label=((type(n).__name__ + ': ') + str(n)), shape=('hexagon' if isinstance(n, cfg_ir.Block) else 'box'))
for (s, t) in self.graph.edges:
g.edge(str(id(s)), str(id(t)))
return g
def visualize_and_display(self, name='cfg', format='svg'):
g = self.visualize()
g.format = format
g.render(name, cleanup=True)
return g |
class FaucetUntaggedLoopTest(FaucetTest):
NUM_DPS = 1
N_TAGGED = 0
N_UNTAGGED = 2
LINKS_PER_HOST = 2
CONFIG_GLOBAL = '\nvlans:\n 100:\n description: "untagged"\n'
CONFIG = '\n interfaces:\n %(port_1)d:\n native_vlan: 100\n %(port_2)d:\n native_vlan: 100\n %(port_3)d:\n native_vlan: 100\n loop_protect: True\n %(port_4)d:\n native_vlan: 100\n loop_protect: True\n'
def setUp(self):
super().setUp()
self.topo = self.topo_class(self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid], n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED, links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def total_port_bans(self):
total_bans = 0
for i in range((self.LINKS_PER_HOST * self.N_UNTAGGED)):
port_labels = self.port_labels(self.port_map[('port_%u' % (i + 1))])
total_bans += self.scrape_prometheus_var('port_learn_bans', port_labels, dpid=True, default=0)
return total_bans
def test_untagged(self):
(first_host, second_host) = self.hosts_name_ordered()
self.one_ipv4_ping(first_host, second_host.IP())
start_bans = self.total_port_bans()
self.quiet_commands(second_host, ('ip link add name veth-loop1 type veth peer name veth-loop2', 'ip link set veth-loop1 up', 'ip link set veth-loop2 up', 'tc qdisc add dev veth-loop1 root tbf rate 1000kbps latency 10ms burst 1000', 'tc qdisc add dev veth-loop2 root tbf rate 1000kbps latency 10ms burst 1000', 'ip link add dev br-loop1 type bridge', 'ip link set br-loop1 type bridge forward_delay 0', 'ip link set br-loop1 up', 'ip link set dev veth-loop1 master br-loop1', ('ip link set dev %s-eth0 master br-loop1' % second_host.name), 'ip link add dev br-loop2 type bridge', 'ip link set br-loop2 type bridge forward_delay 0', 'ip link set br-loop2 up', 'ip link set dev veth-loop2 master br-loop2', ('ip link set dev %s-eth1 master br-loop2' % second_host.name)))
for _ in range(3):
first_host.cmd(('fping %s -c3 10.0.0.254' % self.FPING_ARGS_SHORT))
end_bans = self.total_port_bans()
if (end_bans > start_bans):
return
time.sleep(1)
self.assertGreater(end_bans, start_bans)
second_host.run_ip_batch(['link set veth-loop1 down', 'link set veth-loop2 down'])
self.one_ipv4_ping(first_host, second_host.IP()) |
class Filter(Source):
__version__ = 0
inputs = List(PipelineBase, record=False)
icon = Str('filter.ico')
type = Str(' filter')
input_info = PipelineInfo(datasets=['any'])
def __init__(self, **traits):
super(Filter, self).__init__(**traits)
self.setup_pipeline()
def __get_pure_state__(self):
d = super(Filter, self).__get_pure_state__()
d.pop('inputs', None)
return d
def setup_pipeline(self):
pass
def update_pipeline(self):
raise NotImplementedError
def update_data(self):
self.render()
self.data_changed = True
def start(self):
if self.running:
return
self._setup_event_handlers()
self.update_pipeline()
super(Filter, self).start()
def stop(self):
if (not self.running):
return
self._teardown_event_handlers()
super(Filter, self).stop()
def _set_outputs(self, new_outputs):
old_outputs = self.outputs
self.outputs = new_outputs
if (len(new_outputs) > 0):
self.output_info.datasets = [get_tvtk_dataset_name(new_outputs[0])]
if (old_outputs == self.outputs):
self.data_changed = True
def _inputs_changed(self, old, new):
if self.running:
self.update_pipeline()
self._setup_input_events(old, new)
def _inputs_items_changed(self, list_event):
if self.running:
self.update_pipeline()
self._setup_input_events(list_event.removed, list_event.added)
def _setup_event_handlers(self):
self._setup_input_events([], self.inputs)
def _teardown_event_handlers(self):
self._setup_input_events(self.inputs, [])
def _setup_input_events(self, removed, added):
for input in removed:
input.on_trait_event(self.update_pipeline, 'pipeline_changed', remove=True)
input.on_trait_event(self.update_data, 'data_changed', remove=True)
for input in added:
input.on_trait_event(self.update_pipeline, 'pipeline_changed')
input.on_trait_event(self.update_data, 'data_changed') |
def extractIndonesianoveltranslatorBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('the telltaler', 'The Telltaler', 'translated'), ('i am no king', 'I am No King', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
('/model-version')
def model_version_metadata():
model_version_count = store.count_model_versions(filters=build_filters(request))
model_version_list = store.list_model_versions(page_size=int(request.args.get('pageSize')), offset=((int(request.args.get('pageNo')) - 1) * int(request.args.get('pageSize'))), filters=build_filters(request), orders=build_orders(request))
return json_pagination_response(page_no=int(request.args.get('pageNo')), total_count=model_version_count, data=([model_version.__dict__ for model_version in model_version_list] if model_version_list else [])) |
.skipif((pymongo.version_tuple < (3, 7)), reason='New in 3.7')
.parametrize('elasticapm_client', [{'span_compression_enabled': True, 'span_compression_same_kind_max_duration': '5ms', 'span_compression_exact_match_max_duration': '50ms'}], indirect=True)
.integrationtest
def test_mongodb_span_compression(instrument, elasticapm_client, mongo_database):
elasticapm_client.begin_transaction('transaction.test')
for i in range(5):
blogpost = {'author': ('Tom%d' % i), 'text': 'Foo', 'date': datetime.datetime.utcnow()}
mongo_database.blogposts.insert_one(blogpost)
elasticapm_client.end_transaction('transaction.test')
transactions = elasticapm_client.events[TRANSACTION]
spans = elasticapm_client.spans_for_transaction(transactions[0])
assert (len(spans) == 1) |
def test_ipv6_parsing():
s = '2001:0db8:0000:0000:0000:ff00:0042:8329 testing 2001:db8:0:0:0:ff00:42:8329 shfaldkafsdfa 2001:db8::ff00:42:8329 asdfadfas afkj;fl ::1 kljfkadf 1:1'
iocs = find_iocs(s)
assert (len(iocs['ipv6s']) == 4)
assert ('2001:0db8:0000:0000:0000:ff00:0042:8329' in iocs['ipv6s'])
assert ('2001:db8:0:0:0:ff00:42:8329' in iocs['ipv6s'])
assert ('2001:db8::ff00:42:8329' in iocs['ipv6s'])
assert ('::1' in iocs['ipv6s'])
assert ('1:1' not in iocs['ipv6s']) |
class TreeNodeForeignKey(models.ForeignKey):
def deconstruct(self):
(name, path, args, kwargs) = super().deconstruct()
return (name, 'django.db.models.ForeignKey', args, kwargs)
def formfield(self, **kwargs):
kwargs.setdefault('form_class', TreeNodeChoiceField)
return super().formfield(**kwargs) |
def logout_validator_via_unsigned_msg(casper, concise_casper, mk_logout_msg_unsigned):
def logout_validator_via_unsigned_msg(validator_index, tx_sender_addr):
logout_tx = mk_logout_msg_unsigned(validator_index, concise_casper.current_epoch())
casper.functions.logout(logout_tx).transact({'from': tx_sender_addr})
return logout_validator_via_unsigned_msg |
def test_index_inheritance():
assert issubclass(MyMultiSubDoc, MySubDoc)
assert issubclass(MyMultiSubDoc, MyDoc2)
assert issubclass(MyMultiSubDoc, document.Document)
assert hasattr(MyMultiSubDoc, '_doc_type')
assert hasattr(MyMultiSubDoc, '_index')
assert ({'properties': {'created_at': {'type': 'date'}, 'name': {'type': 'keyword'}, 'title': {'type': 'keyword'}, 'inner': {'type': 'object', 'properties': {'old_field': {'type': 'text'}}}, 'extra': {'type': 'long'}}} == MyMultiSubDoc._doc_type.mapping.to_dict()) |
def test_parse_unsent_message():
data = {'legacy_attachment_id': 'ee.mid.$xyz', 'story_attachment': {'description': {'text': 'You removed a message'}, 'media': None, 'source': None, 'style_list': ['globally_deleted_message_placeholder', 'fallback'], 'title_with_entities': {'text': ''}, 'properties': [], 'url': None, 'deduplication_key': 'deadbeef123', 'action_links': [], 'messaging_attribution': None, 'messenger_call_to_actions': [], 'xma_layout_info': None, 'target': None, 'subattachments': []}, 'genie_attachment': {'genie_message': None}}
assert (UnsentMessage(id='ee.mid.$xyz') == graphql_to_extensible_attachment(data)) |
def test_matcher_start_zero_plus(matcher):
pattern = [{'ORTH': 'b', 'OP': '*'}, {'ORTH': 'c'}]
matcher.add('TSTEND', [pattern])
nlp = (lambda string: Doc(matcher.vocab, words=string.split()))
assert (len(matcher(nlp('c'))) == 1)
assert (len(matcher(nlp('b c'))) == 1)
assert (len(matcher(nlp('a c'))) == 1)
assert (len(matcher(nlp('a b c'))) == 1)
assert (len(matcher(nlp('a b b c'))) == 1)
assert (len(matcher(nlp('b b c'))) == 1) |
class rate_unit(bsn_tlv):
type = 89
def __init__(self, value=None):
if (value != None):
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack('!H', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!B', self.value))
length = sum([len(x) for x in packed])
packed[1] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = rate_unit()
_type = reader.read('!H')[0]
assert (_type == 89)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.value = reader.read('!B')[0]
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.value != other.value):
return False
return True
def pretty_print(self, q):
q.text('rate_unit {')
with q.group():
with q.indent(2):
q.breakable()
q.text('value = ')
value_name_map = {0: 'OFP_BSN_RATE_UNIT_PPS', 1: 'OFP_BSN_RATE_UNIT_KBITPS'}
if (self.value in value_name_map):
q.text(('%s(%d)' % (value_name_map[self.value], self.value)))
else:
q.text(('%#x' % self.value))
q.breakable()
q.text('}') |
def parseSongs(page):
songs = {}
Parent = htmlFind(page)('div', {'id': 'listAlbum'})
if Parent:
Raw_Data = Parent.findChildren()
(curType, curName, curYear) = ('', '', '')
for elmnt in Raw_Data:
if (elmnt.name == 'div'):
if (elmnt.text == 'other songs:'):
(curType, curName, curYear) = ('Others', '', '')
else:
rgx = re.findall('(.*):\\s"(.*)"\\s\\(([0-9]+)\\)', elmnt.text)
if rgx:
(curType, curName, curYear) = rgx[0]
if (elmnt.name == 'a'):
songs[elmnt.text] = {'year': curYear, 'album': curName, 'type': curType, 'url': ((' + elmnt['href'].strip('.')) if elmnt['href'].startswith('/lyrics/') else elmnt['href'])}
else:
for div in htmlFindAll(page)('div', {'class': 'listalbum-item'}):
a = div.find('a')
songs[a.text] = {'year': '', 'album': '', 'type': '', 'url': ((' + a['href'][2:]) if (a['href'][:2] == '..') else a['href'])}
return songs |
def package_metadata(app, repodir):
meta = {}
for element in ('added', 'Categories', 'Changelog', 'IssueTracker', 'lastUpdated', 'License', 'SourceCode', 'Translation', 'WebSite', 'featureGraphic', 'promoGraphic', 'tvBanner', 'screenshots', 'AuthorEmail', 'AuthorName', 'AuthorPhone', 'AuthorWebSite', 'Bitcoin', 'FlattrID', 'Liberapay', 'Litecoin', 'OpenCollective'):
if ((element in app) and app[element]):
element_new = (element[:1].lower() + element[1:])
meta[element_new] = convert_datetime(app[element])
for element in ('Name', 'Summary', 'Description', 'video'):
element_new = (element[:1].lower() + element[1:])
if ((element in app) and app[element]):
meta[element_new] = {DEFAULT_LOCALE: convert_datetime(app[element])}
elif ('localized' in app):
localized = {k: v[element_new] for (k, v) in app['localized'].items() if (element_new in v)}
if localized:
meta[element_new] = localized
if (('name' not in meta) and app['AutoName']):
meta['name'] = {DEFAULT_LOCALE: app['AutoName']}
if (meta['license'] == 'Unknown'):
del meta['license']
if app['Donate']:
meta['donate'] = [app['Donate']]
if app.get('icon'):
icon_path = os.path.join(repodir, 'icons', app['icon'])
meta['icon'] = {DEFAULT_LOCALE: common.file_entry(icon_path)}
if ('iconv2' in app):
meta['icon'] = app['iconv2']
return meta |
def create_delivery_note(shopify_order, setting, so):
if (not cint(setting.sync_delivery_note)):
return
for fulfillment in shopify_order.get('fulfillments'):
if ((not frappe.db.get_value('Delivery Note', {FULLFILLMENT_ID_FIELD: fulfillment.get('id')}, 'name')) and (so.docstatus == 1)):
dn = make_delivery_note(so.name)
setattr(dn, ORDER_ID_FIELD, fulfillment.get('order_id'))
setattr(dn, ORDER_NUMBER_FIELD, shopify_order.get('name'))
setattr(dn, FULLFILLMENT_ID_FIELD, fulfillment.get('id'))
dn.set_posting_time = 1
dn.posting_date = getdate(fulfillment.get('created_at'))
dn.naming_series = (setting.delivery_note_series or 'DN-Shopify-')
dn.items = get_fulfillment_items(dn.items, fulfillment.get('line_items'), fulfillment.get('location_id'))
dn.flags.ignore_mandatory = True
dn.save()
dn.submit()
if shopify_order.get('note'):
dn.add_comment(text=f"Order Note: {shopify_order.get('note')}") |
class OptionPlotoptionsPolygonSonificationTracksMappingHighpass(Options):
def frequency(self) -> 'OptionPlotoptionsPolygonSonificationTracksMappingHighpassFrequency':
return self._config_sub_data('frequency', OptionPlotoptionsPolygonSonificationTracksMappingHighpassFrequency)
def resonance(self) -> 'OptionPlotoptionsPolygonSonificationTracksMappingHighpassResonance':
return self._config_sub_data('resonance', OptionPlotoptionsPolygonSonificationTracksMappingHighpassResonance) |
def upgrade():
time_unit_enum = postgresql.ENUM('min', 'h', 'd', 'w', 'm', 'y', name='TimeUnit', create_type=False)
review_schedule_model_enum = postgresql.ENUM('effort', 'length', 'duration', name='ReviewScheduleModel', create_type=False)
task_dependency_target_enum = postgresql.ENUM('onend', 'onstart', name='TaskDependencyTarget', create_type=False)
task_dependency_gap_model = postgresql.ENUM('length', 'duration', name='TaskDependencyGapModel', create_type=False)
resource_allocation_strategy_enum = postgresql.ENUM('minallocated', 'maxloaded', 'minloaded', 'order', 'random', name='ResourceAllocationStrategy', create_type=False)
op.create_table('Reviews', sa.Column('id', sa.Integer(), nullable=False), sa.Column('task_id', sa.Integer(), nullable=False), sa.Column('reviewer_id', sa.Integer(), nullable=False), sa.Column('review_number', sa.Integer(), nullable=True), sa.Column('schedule_timing', sa.Float(), nullable=True), sa.Column('schedule_unit', time_unit_enum, nullable=False), sa.Column('schedule_constraint', sa.Integer(), nullable=False), sa.Column('schedule_model', review_schedule_model_enum, nullable=False), sa.Column('status_id', sa.Integer(), nullable=False), sa.Column('status_list_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['id'], ['SimpleEntities.id']), sa.ForeignKeyConstraint(['reviewer_id'], ['Users.id']), sa.ForeignKeyConstraint(['status_id'], ['Statuses.id']), sa.ForeignKeyConstraint(['status_list_id'], ['StatusLists.id']), sa.ForeignKeyConstraint(['task_id'], ['Tasks.id']), sa.PrimaryKeyConstraint('id'))
op.create_table('Task_Responsible', sa.Column('task_id', sa.Integer(), nullable=False), sa.Column('responsible_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['responsible_id'], ['Users.id']), sa.ForeignKeyConstraint(['task_id'], ['Tasks.id']), sa.PrimaryKeyConstraint('task_id', 'responsible_id'))
op.create_table('Task_Alternative_Resources', sa.Column('task_id', sa.Integer(), nullable=False), sa.Column('resource_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['resource_id'], ['Users.id']), sa.ForeignKeyConstraint(['task_id'], ['Tasks.id']), sa.PrimaryKeyConstraint('task_id', 'resource_id'))
op.create_table('Task_Computed_Resources', sa.Column('task_id', sa.Integer(), nullable=False), sa.Column('resource_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['resource_id'], ['Users.id']), sa.ForeignKeyConstraint(['task_id'], ['Tasks.id']), sa.PrimaryKeyConstraint('task_id', 'resource_id'))
op.add_column('EntityTypes', sa.Column('dateable', sa.Boolean(), nullable=True))
op.drop_column('Projects', 'timing_resolution')
op.add_column('Studios', sa.Column('is_scheduling', sa.Boolean(), nullable=True))
op.add_column('Studios', sa.Column('is_scheduling_by_id', sa.Integer(), nullable=True))
op.add_column('Studios', sa.Column('last_schedule_message', sa.PickleType(), nullable=True))
op.add_column('Studios', sa.Column('last_scheduled_at', sa.DateTime(), nullable=True))
op.add_column('Studios', sa.Column('last_scheduled_by_id', sa.Integer(), nullable=True))
op.add_column('Studios', sa.Column('scheduling_started_at', sa.DateTime(), nullable=True))
op.drop_column('Studios', 'daily_working_hours')
op.add_column('Task_Dependencies', sa.Column('dependency_target', task_dependency_target_enum, nullable=True))
op.execute('\n UPDATE\n "Task_Dependencies"\n SET\n dependency_target = \'onend\'\n ')
op.alter_column('Task_Dependencies', 'dependency_target', existing_nullable=True, nullable=False)
op.alter_column('Task_Dependencies', 'depends_to_task_id', new_column_name='depends_to_id')
op.add_column('Task_Dependencies', sa.Column('gap_constraint', sa.Integer(), nullable=True))
op.execute('\n UPDATE\n "Task_Dependencies"\n SET\n gap_constraint = 0\n ')
op.alter_column('Task_Dependencies', 'gap_constraint', existing_nullable=True, nullable=False)
op.add_column('Task_Dependencies', sa.Column('gap_model', task_dependency_gap_model, nullable=True))
op.execute('\n UPDATE\n "Task_Dependencies"\n SET\n gap_model = \'length\'\n ')
op.alter_column('Task_Dependencies', 'gap_model', existing_nullable=True, nullable=False)
op.add_column('Task_Dependencies', sa.Column('gap_timing', sa.Float(), nullable=True))
op.add_column('Task_Dependencies', sa.Column('gap_unit', time_unit_enum, nullable=True))
op.execute('UPDATE "Task_Dependencies" SET gap_timing = 0')
op.alter_column('Task_Dependencies', 'gap_timing', existing_nullable=True, nullable=False)
op.add_column('Tasks', sa.Column('review_number', sa.Integer(), nullable=True))
op.add_column('Tasks', sa.Column('allocation_strategy', resource_allocation_strategy_enum, nullable=True))
op.execute('UPDATE "Tasks" SET allocation_strategy = \'minallocated\'')
op.alter_column('Tasks', 'allocation_strategy', existing_nullable=True, nullable=False)
op.add_column('Tasks', sa.Column('persistent_allocation', sa.Boolean(), nullable=True))
op.execute('UPDATE "Tasks" SET persistent_allocation = TRUE')
op.alter_column('Tasks', 'persistent_allocation', existing_nullable=True, nullable=False)
op.drop_column('Tasks', 'timing_resolution')
op.drop_column('TimeLogs', 'timing_resolution')
op.create_unique_constraint(None, 'Users', ['login'])
op.drop_column('Vacations', 'timing_resolution')
op.execute('insert into "Task_Responsible" select id, responsible_id from "Tasks" where responsible_id is not NULL')
op.drop_column('Tasks', 'responsible_id')
def create_status(name, code):
op.execute(('INSERT INTO "SimpleEntities" (entity_type, name, description,\ncreated_by_id, updated_by_id, date_created, date_updated, type_id,\nthumbnail_id, html_style, html_class, stalker_version)\nVALUES (\'Status\', \'%(name)s\', \'\', NULL, NULL,\n(SELECT CAST(NOW() at time zone \'utc\' AS timestamp)), (SELECT CAST(NOW() at time zone \'utc\' AS timestamp)), NULL, NULL,\n\'\', \'\', \'%(stalker_version)s\')' % {'stalker_version': stalker.__version__, 'name': name}))
op.execute(('INSERT INTO "Entities" (id)\n VALUES ((\n SELECT id\n FROM "SimpleEntities"\n WHERE "SimpleEntities".name = \'%(name)s\'\n ));\n INSERT INTO "Statuses" (id, code)\n VALUES ((\n SELECT id\n FROM "SimpleEntities"\n WHERE "SimpleEntities".name = \'%(name)s\'), \'%(code)s\');' % {'name': name, 'code': code}))
create_status('Waiting For Dependency', 'WFD')
create_status('Dependency Has Revision', 'DREV')
create_status('On Hold', 'OH')
create_status('Stopped', 'STOP')
create_status('Requested Revision', 'RREV')
create_status('Approved', 'APP')
def update_status_lists(entity_type, status_code):
op.execute(('CREATE OR REPLACE FUNCTION add_status_to_status_list(status_list_id INT, status_id INT) RETURNS VOID AS $$\n BEGIN\n INSERT INTO "StatusList_Statuses" (status_list_id, status_id)\n VALUES (status_list_id, status_id);\n EXCEPTION WHEN OTHERS THEN\n -- do nothning\n END;\n $$\n LANGUAGE \'plpgsql\';\n\n select NULL from add_status_to_status_list(\n (SELECT id FROM "StatusLists" WHERE target_entity_type = \'%(entity_type)s\'),\n (SELECT id FROM "Statuses" WHERE code = \'%(status_code)s\')\n );' % {'entity_type': entity_type, 'status_code': status_code}))
for t in ['Task', 'Asset', 'Shot', 'Sequence']:
for s in ['WFD', 'RTS', 'WIP', 'OH', 'STOP', 'PREV', 'HREV', 'DREV', 'CMPL']:
update_status_lists(t, s)
op.execute('drop function add_status_to_status_list(integer, integer);')
op.execute('DELETE FROM "StatusList_Statuses"\nWHERE status_list_id = (SELECT id FROM "StatusLists"\n WHERE target_entity_type = \'Task\')\nAND status_id = (SELECT id FROM "Statuses" WHERE "Statuses".code = \'NEW\')\n')
op.execute('DELETE FROM "StatusList_Statuses"\nWHERE status_list_id = (SELECT id FROM "StatusLists"\nWHERE target_entity_type = \'Asset\')\nAND status_id = (SELECT id FROM "Statuses" WHERE "Statuses".code = \'NEW\')\n')
op.execute('DELETE FROM "StatusList_Statuses"\nWHERE status_list_id = (SELECT id FROM "StatusLists"\nWHERE target_entity_type = \'Shot\')\nAND status_id = (SELECT id FROM "Statuses" WHERE "Statuses".code = \'NEW\')\n')
op.execute('DELETE FROM "StatusList_Statuses"\nWHERE status_list_id = (SELECT id FROM "StatusLists"\nWHERE target_entity_type = \'Sequence\')\nAND status_id = (SELECT id FROM "Statuses" WHERE "Statuses".code = \'NEW\')\n')
op.execute(('INSERT INTO "SimpleEntities" (entity_type, name, description,\ncreated_by_id, updated_by_id, date_created, date_updated, type_id,\nthumbnail_id, html_style, html_class, stalker_version)\nVALUES (\'StatusList\', \'Review Status List\', \'\', NULL, NULL,\n(SELECT CAST(NOW() at time zone \'utc\' AS timestamp)),\n(SELECT CAST(NOW() at time zone \'utc\' AS timestamp)), NULL, NULL,\n\'\', \'\', \'%(stalker_version)s\')' % {'stalker_version': stalker.__version__}))
op.execute('INSERT INTO "Entities" (id)\nVALUES ((\n SELECT id\n FROM "SimpleEntities"\n WHERE "SimpleEntities".name = \'Review Status List\'\n));\nINSERT INTO "StatusLists" (id, target_entity_type)\nVALUES ((\n SELECT id\n FROM "SimpleEntities"\n WHERE "SimpleEntities".name = \'Review Status List\'), \'Review\');')
op.execute('INSERT INTO "StatusList_Statuses" (status_list_id, status_id)\nVALUES\n ((SELECT id FROM "StatusLists" WHERE target_entity_type = \'Review\'),\n (SELECT id FROM "Statuses" WHERE code = \'NEW\')),\n ((SELECT id FROM "StatusLists" WHERE target_entity_type = \'Review\'),\n (SELECT id FROM "Statuses" WHERE code = \'RREV\')),\n ((SELECT id FROM "StatusLists" WHERE target_entity_type = \'Review\'),\n (SELECT id FROM "Statuses" WHERE code = \'APP\'))\n')
op.execute('update "Tasks"\nset status_id = (select id from "Statuses" where code=\'WFD\')\nwhere status_id = (select id from "Statuses" where code=\'NEW\')')
op.execute('update "Tasks"\nset status_id = (select id from "Statuses" where code=\'WIP\')\nwhere status_id = (select id from "Statuses" where code=\'PREV\')')
map((lambda x: op.execute(('DELETE FROM "StatusList_Statuses"\n WHERE status_list_id=(\n SELECT id\n FROM "StatusLists"\n WHERE target_entity_type=\'%s\')\n AND status_id in (\n SELECT id\n FROM "Statuses"\n WHERE code NOT IN\n (\'WFD\', \'RTS\', \'WIP\', \'OH\', \'STOP\', \'PREV\', \'HREV\', \'DREV\', \'CMPL\')\n );' % x))), ['Task', 'Asset', 'Shot', 'Sequence'])
op.execute('update "Tasks" set review_number = 0')
op.alter_column('Shots', '_cut_in', new_column_name='cut_in')
op.alter_column('Shots', '_cut_out', new_column_name='cut_out')
op.alter_column('Tasks', '_schedule_seconds', new_column_name='schedule_seconds')
op.alter_column('Tasks', '_total_logged_seconds', new_column_name='total_logged_seconds') |
.parametrize('mesh_thunk', [(lambda : UnitSquareMesh(5, 5, quadrilateral=False)), (lambda : UnitSquareMesh(5, 5, quadrilateral=True)), (lambda : UnitIcosahedralSphereMesh(2)), (lambda : UnitCubedSphereMesh(3)), (lambda : Mesh(join(cwd, '..', 'meshes', 'unitsquare_unstructured_quadrilaterals.msh')))])
def test_consistent_facet_orientation(mesh_thunk):
mesh = mesh_thunk()
x = SpatialCoordinate(mesh)
degree = 3
fe_cg = FiniteElement('CG', mesh.ufl_cell(), degree, variant='equispaced')
V = FunctionSpace(mesh, fe_cg)
fe_dg = FiniteElement('DG', mesh.ufl_cell(), degree, variant='equispaced')
W = FunctionSpace(mesh, fe_dg)
Q = FunctionSpace(mesh, 'DG', 0)
expression = ((x[0] * (x[0] + sqrt(2.0))) + x[1])
f = Function(V).interpolate(expression)
g = Function(W).interpolate(expression)
q = Function(Q).interpolate(Constant(0.0))
domain = '{[i]: 0 <= i < C.dofs}'
instructions = '\n for i\n R[0, 0] = fmax(real(R[0, 0]), abs(C[i, 0] - D[i, 0]))\n end\n '
par_loop((domain, instructions), dx, {'C': (f, READ), 'D': (g, READ), 'R': (q, RW)})
assert np.allclose(q.dat.data, 0.0) |
class UnicommerceShipmentManifest(Document):
def validate(self):
self.set_shipping_method()
self.set_unicommerce_details()
def before_submit(self):
self.create_and_close_manifest_on_unicommerce()
self.update_manifest_status()
def set_shipping_method(self):
self.third_party_shipping = cint(frappe.db.get_value('Unicommerce Channel', self.channel_id, 'shipping_handled_by_marketplace'))
def set_unicommerce_details(self):
for package in self.manifest_items:
package_info = get_sales_invoice_details(package.sales_invoice)
if (self.channel_id != package_info.get(CHANNEL_ID_FIELD)):
frappe.throw(_('Row #{} : Only {} channel packages can be added in this manifest').format(package.idx, self.channel_id))
if cint(package_info.get(MANIFEST_GENERATED_CHECK)):
frappe.throw(_('Row #{}: Manifest is already generated, please remove package.').format(package.idx))
for (invoice_field, manifest_field) in FIELD_MAPPING.items():
package.set(manifest_field, package_info[invoice_field])
package.awb_barcode = package.awb_no
def get_facility_code(self) -> str:
facility_codes = {package.facility_code for package in self.manifest_items}
if (len(facility_codes) != 1):
frappe.throw(_('Shipping manifest should only have one facility code, found: {}').format(','.join(facility_codes)))
return list(facility_codes)[0]
def create_and_close_manifest_on_unicommerce(self):
shipping_packages = [d.shipping_package_code for d in self.manifest_items]
facility_code = self.get_facility_code()
client = UnicommerceAPIClient()
response = client.create_and_close_shipping_manifest(channel=self.channel_id, shipping_provider_code=self.shipping_provider_code, shipping_method_code=self.shipping_method_code, shipping_packages=shipping_packages, facility_code=facility_code, third_party_shipping=self.third_party_shipping)
if (not response):
frappe.throw(_('Failed to Generate Manifest on Unicommerce'))
status = response.get('shippingManifestStatus')
pdf_link = status.get('shippingManifestLink')
manifest_code = status.get('shippingManifestCode')
manifest_id = status.get('id')
self.unicommerce_manifest_code = manifest_code
self.unicommerce_manifest_id = manifest_id
self.attach_unicommerce_manifest_pdf(pdf_link, manifest_code)
def attach_unicommerce_manifest_pdf(self, link, manifest_code):
if (not link):
return
pdf_b64 = fetch_pdf_as_base64(link)
if (not pdf_b64):
return
manifest_code = remove_non_alphanumeric_chars(manifest_code)
save_file(f'unicommerce-manifest-{manifest_code}.pdf', pdf_b64, self.doctype, self.name, decode=True, is_private=1)
def update_manifest_status(self):
si_codes = [package.sales_invoice for package in self.manifest_items]
frappe.db.set_value('Sales Invoice', {'name': ('in', si_codes)}, MANIFEST_GENERATED_CHECK, 1) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.