code stringlengths 281 23.7M |
|---|
def _read_from_stream(stream: (BytesIO | StringIO), size: (int | None), seek: (int | None)) -> Generator[(bytes, None, None)]:
was_at = stream.tell()
if (seek is not None):
stream.seek(seek)
try:
data = stream.read(size)
(yield (data if isinstance(data, bytes) else data.encode()))
finally:
stream.seek(was_at) |
def test_optional_list_of_ints():
class Bob(TestSetup):
a: Optional[List[int]] = field(default_factory=list)
assert (Bob.setup('--a [1]') == Bob(a=[1]))
assert (Bob.setup('--a [1,2,3]') == Bob(a=[1, 2, 3]))
assert (Bob.setup('--a []') == Bob(a=[]))
assert (Bob.setup('') == Bob(a=[])) |
class ObjectNode(MultiValueTreeNodeObject):
def format_value(self, value):
try:
klass = value.__class__.__name__
except:
klass = '???'
return ('%s(0x%08X)' % (klass, id(value)))
def tno_has_children(self, node):
try:
return (len(self.value.__dict__) > 0)
except:
return False
def tno_get_children(self, node):
items = [(k, v) for (k, v) in self.value.__dict__.items()]
items.sort(key=itemgetter(0))
return [self.node_for(('.' + k), v) for (k, v) in items] |
class Vk(IntervalModule):
API_LINK = '
app_id = 5160484
access_token = None
session = None
token_error = 'Vk: token error'
format = '{unread}/{total}'
interval = 1
color = '#ffffff'
color_unread = '#ffffff'
color_bad = '#ff0000'
settings = (('app_id', 'Id of your VK API app'), ('access_token', 'Your access token. You must have `messages` and `offline` access permissions'), ('token_error', "Message to be shown if there's some problem with your token"), ('color', 'General color of the output'), ('color_bad', 'Color of the output in case of access token error'), ('color_unread', 'Color of the output if there are unread messages'))
(internet)
def token_request(self, func):
user_open(self.API_LINK.format(id=self.app_id))
self.run = func
(internet)
def init(self):
if self.access_token:
self.session = vk.AuthSession(app_id=self.app_id, access_token=self.access_token)
self.api = vk.API(self.session, v='5.40', lang='en', timeout=10)
try:
permissions = int(self.api.account.getAppPermissions())
assert (((permissions & 65536) == 65536) and ((permissions & 4096) == 4096))
except:
self.token_request(self.error)
else:
self.token_request((lambda : None))
(internet)
def run(self):
total = self.api.messages.getDialogs()['count']
unread = self.api.messages.getDialogs(unread=1)['count']
if (unread > 0):
color = self.color_unread
else:
color = self.color
self.output = {'full_text': self.format.format(total=total, unread=unread), 'color': color}
def error(self):
self.output = {'full_text': self.token_error, 'color': self.color_bad} |
class Article(models.Model):
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
authors = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='')
category = models.ForeignKey(Category_Article, on_delete=models.CASCADE, verbose_name='')
title = models.CharField(max_length=100)
keywords = models.CharField(max_length=200, blank=True, null=True)
desc = models.CharField(max_length=256, blank=True, null=True)
list_pic = models.ImageField(upload_to='article/%Y%m%d', blank=True, null=True)
content = models.TextField()
click_nums = models.IntegerField(default=0, verbose_name='')
is_show = models.BooleanField(default=True, verbose_name='')
add_time = models.DateTimeField(auto_now_add=True)
def get_number(self):
n = self.article_comment_set.all()
num = self.article_comment_set.count()
for i in n:
num += i.articlecommentreply_set.count()
return num
def __str__(self):
return self.title
class Meta():
verbose_name = ''
verbose_name_plural = verbose_name
ordering = ('-add_time',) |
def getSpecialCase(specialcase):
log.info('Special case handler checking for deferred fetches.')
with FETCH_LOCK:
for key in RATE_LIMIT_ITEMS.keys():
if (RATE_LIMIT_ITEMS[key]['ntime'] < time.time()):
try:
(rid, joburl, netloc) = RATE_LIMIT_ITEMS[key]['queue'].get_nowait()
rate = specialcase[netloc][1]
RATE_LIMIT_ITEMS[key]['ntime'] += rate
log.info("Deferred special case item for url '%s' ready. Returning.", joburl)
return (rid, joburl, netloc)
except queue.Empty:
RATE_LIMIT_ITEMS[key]['ntime'] = (- 1)
else:
log.info("Not yet ready to fetch for '%s' (%s < %s)", key, RATE_LIMIT_ITEMS[key]['ntime'], time.time())
return (None, None, None) |
class HistogramListStatsRequest(base_tests.SimpleProtocol):
def runTest(self):
request = ofp.message.bsn_generic_stats_request(name='histograms')
entries = get_stats(self, request)
names = []
for entry in entries:
self.assertEquals(1, len(entry.tlvs))
self.assertIsInstance(entry.tlvs[0], ofp.bsn_tlv.name)
names.append(entry.tlvs[0].value)
logging.debug('Histograms: %r', names)
self.assertIn('test', names) |
class Vortex_phi(object):
def __init__(self, center=[0.5, 0.75, 0.5], radius=0.15):
self.radius = radius
self.center = center
def uOfX(self, X):
dx = (X[0] - self.center[0])
dy = (X[1] - self.center[1])
dBubble = (self.radius - sqrt(((dx ** 2) + (dy ** 2))))
return smoothedHeaviside((epsFactHeaviside * he), dBubble)
def uOfXT(self, X, t):
return self.uOfX(X) |
class OptionSeriesLineSonificationContexttracksMappingPan(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class CallResFrame(FrameWithArgs, CallFlags):
TYPE = 4
def __init__(self):
FrameWithArgs.__init__(self)
CallFlags.__init__(self)
self.code = 0
self.tracing = bytes(25)
self.headers = KVHeaders({}, 1)
self.csumtype = 0
def read_payload(self, fp: IOWrapper, size: int):
offset = 0
self.flags = fp.read_byte('callres.flags')
offset += 1
self.code = fp.read_byte('callres.code')
offset += 1
self.tracing = fp.read_bytes(25, 'callres.tracing')
offset += 25
self.headers = KVHeaders.read_kv_headers(fp, 1, 'callreq.headers')
offset += self.headers.size()
self.csumtype = fp.read_byte('callres.csumtype')
offset += 1
if (self.csumtype != 0):
raise NotImplementedError('Checksum type not supported')
self.args = []
arg_count = 1
while (offset < size):
arg = Arg.read_arg(fp, offset, size, self.is_more_fragments_follow(), ('callres.args[%d]' % arg_count))
self.args.append(arg)
offset += arg.size()
arg_count += 1
def get_payload_size(self):
return (((((_.flag(1) + _.code(1)) + _.tracing(25)) + self.headers.size()) + _.csumtype(1)) + sum([arg.size() for arg in self.args]))
def write_payload(self, fp: IOWrapper):
offset = 0
fp.write_byte(self.flags)
offset += 1
fp.write_byte(self.code)
offset += 1
fp.write_bytes(self.tracing)
offset += 25
self.headers.write_headers(fp)
offset += self.headers.size()
fp.write_byte(self.csumtype)
offset += 1
for arg in self.args:
arg.write_arg(fp)
offset += arg.size()
assert (offset == self.get_payload_size()) |
class TestCTraitNotifiers(unittest.TestCase):
def test_notifiers_empty(self):
class Foo(HasTraits):
x = Int()
foo = Foo(x=1)
x_ctrait = foo.trait('x')
self.assertEqual(x_ctrait._notifiers(True), [])
def test_notifiers_on_trait(self):
class Foo(HasTraits):
x = Int()
def _x_changed(self):
pass
foo = Foo(x=1)
x_ctrait = foo.trait('x')
tnotifiers = x_ctrait._notifiers(True)
self.assertEqual(len(tnotifiers), 1)
(notifier,) = tnotifiers
self.assertEqual(notifier.handler, Foo._x_changed) |
class UnionMeta(Meta):
def __getitem__(self, types):
types_in = types
if (not isinstance(types_in, tuple)):
types_in = (types_in,)
types = []
for type_ in types_in:
if isinstance(type_, str):
type_ = str2type(type_)
types.append(type_)
types = tuple(types)
name_calling_module = get_name_calling_module()
template_var = UnionVar(*types, name_calling_module=name_calling_module)
short_repr = []
for value in types:
if hasattr(value, 'short_repr'):
short_repr.append(value.short_repr())
elif hasattr(value, '__name__'):
short_repr.append(value.__name__)
else:
short_repr.append(repr(value))
return type(f"Union{'_'.join(short_repr)}", (Union,), {'types': types, 'template_var': template_var})
def get_template_parameters(self):
template_params = []
for type_ in self.types:
if hasattr(type_, 'get_template_parameters'):
template_params.extend(type_.get_template_parameters())
template_params.append(self.template_var)
return tuple(template_params)
def __repr__(self):
strings = []
if (not hasattr(self, 'types')):
return super().__repr__()
for p in self.types:
if isinstance(p, Meta):
string = repr(p)
elif isinstance(p, type):
string = p.__name__
else:
string = repr(p)
strings.append(string)
return (('Union[' + ', '.join(strings)) + ']')
def format_as_backend_type(self, backend_type_formatter, **kwargs):
type_ = kwargs.pop(self.template_var.__name__)
return format_type_as_backend_type(type_, backend_type_formatter, **kwargs)
def short_repr(self):
return self.__name__ |
def _tree_to_dict(config_file: str, pre_defines: Defines, tree: Tree[Instruction], schema: SchemaItemDict, site_config: Optional[ConfigDict]=None) -> ConfigDict:
config_dict = (site_config if site_config else {})
defines = pre_defines.copy()
config_dict['DEFINE'] = defines
errors = []
cwd = os.path.dirname(os.path.abspath(config_file))
for node in tree.children:
args: List[FileContextToken]
kw: FileContextToken
(kw, *args) = node
if (kw not in schema):
ConfigWarning.ert_context_warn(f'Unknown keyword {kw!r}', kw)
continue
constraints = schema[kw]
if (kw != constraints.kw):
kw.value = constraints.kw
try:
args = constraints.join_args(args)
args = _substitute_args(args, constraints, defines)
value_list = constraints.apply_constraints(args, kw, cwd)
arglist = config_dict.get(kw, [])
if (kw == 'DEFINE'):
(define_key, *define_args) = value_list
existing_define = next((define for define in arglist if (define[0] == define_key)), None)
if existing_define:
existing_define[1:] = define_args
else:
arglist.append(value_list)
elif constraints.multi_occurrence:
arglist.append(value_list)
config_dict[kw] = arglist
else:
config_dict[kw] = value_list
except ConfigValidationError as e:
if (not constraints.multi_occurrence):
config_dict[kw] = None
errors.append(e)
try:
schema.check_required(config_dict, filename=config_file)
except ConfigValidationError as e:
errors.append(e)
if (len(errors) > 0):
raise ConfigValidationError.from_collected(errors)
return config_dict |
def test_pytorch_task(serialization_settings: SerializationSettings):
(task_config=PyTorch(num_workers=10), cache=True, cache_version='1', requests=Resources(cpu='1'))
def my_pytorch_task(x: int, y: str) -> int:
return x
assert (my_pytorch_task(x=10, y='hello') == 10)
assert (my_pytorch_task.task_config is not None)
assert (my_pytorch_task.get_custom(serialization_settings) == {'workerReplicas': {'replicas': 10, 'resources': {}}, 'masterReplicas': {'replicas': 1, 'resources': {}}})
assert (my_pytorch_task.resources.limits == Resources())
assert (my_pytorch_task.resources.requests == Resources(cpu='1'))
assert (my_pytorch_task.task_type == 'pytorch')
assert (my_pytorch_task.task_type_version == 1) |
class OptionsBasic(OptionsWithTemplates):
def bordered(self):
return self._config_get()
def bordered(self, flag):
if flag:
self.component.attr['class'].add('table-bordered')
else:
self.component.attr['class'].add('table-borderless')
def hover(self):
return self._config_get()
def hover(self, flag):
if flag:
self.component.attr['class'].add('table-hover')
def size(self, alias):
self.component.attr['class'].add(('table-%s' % alias))
def striped(self):
return self._config_get()
def striped(self, flag):
if flag:
self.component.attr['class'].add('table-striped')
def responsive(self):
return self._config_get()
def responsive(self, flag):
if flag:
self.component.attr['class'].add('table-responsive')
def rowCssClasses(self):
return self._config_get([])
def rowCssClasses(self, values):
self._config(values)
def colCssClasses(self):
return self._config_get([])
def colCssClasses(self, values):
self._config(values)
def with_header(self):
return self._config_get(True)
_header.setter
def with_header(self, flag):
self._config(flag)
def with_hover(self):
return self._config_get(True)
_hover.setter
def with_hover(self, flag):
self._config(flag) |
class OptionPlotoptionsOrganizationSonificationContexttracksPointgrouping(Options):
def algorithm(self):
return self._config_get('minmax')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
_set_stats_type(ofproto.OFPMP_GROUP_STATS, OFPGroupStats)
_set_msg_type(ofproto.OFPT_MULTIPART_REQUEST)
class OFPGroupStatsRequest(OFPMultipartRequest):
def __init__(self, datapath, flags=0, group_id=ofproto.OFPG_ALL, type_=None):
super(OFPGroupStatsRequest, self).__init__(datapath, flags)
self.group_id = group_id
def _serialize_stats_body(self):
msg_pack_into(ofproto.OFP_GROUP_MULTIPART_REQUEST_PACK_STR, self.buf, ofproto.OFP_MULTIPART_REQUEST_SIZE, self.group_id) |
def search_get_image_arguments(provider_name: str) -> Dict:
if (provider_name == 'sentisight'):
project_id = '42874'
elif (provider_name == 'nyckel'):
project_id = 'yiilyy1cm0sxiw7n'
else:
raise NotImplementedError(f'Please add a project id for test arguments of provider: {provider_name}')
return {'image_name': 'test.jpg', 'project_id': project_id} |
('/reports/<scanid>', methods=['GET'])
def report_alerts(scanid):
result = fetch_records(scanid)
styles = getSampleStyleSheet()
story = []
styleT = styles['Title']
styleB = styles['BodyText']
styleB.alignment = TA_LEFT
pTitle = Paragraph('<font size="18" color="darkblue">API Vulnerabilities Report</font>', styleT)
story.append(pTitle)
story.append(Spacer(0.5, (0.25 * inch)))
story.append(Paragraph("<font size='14' color='darkblue'><b>Vulnerability Details</b></font>", styleB))
story.append(Spacer(1, (0.5 * inch)))
fileName = (str(scanid) + '.pdf')
pdf = SimpleDocTemplate(fileName, title='API Security Vulnerabilities', pagesize=letter)
for i in result:
fdata = format_data(i, styleB)
vtab = make_table(fdata)
story.append(vtab)
story.append(Spacer(1, (0.5 * inch)))
output = StringIO()
pdf.build(story)
pdf_out = output.getvalue()
output.close()
return send_file((str(scanid) + '.pdf'), as_attachment=True) |
def split_sequence(sequence, max_len):
seq = sequence.sequence
name = sequence.name
n_stretches = sorted(list(re.findall((('N{' + str(max_len)) + ',}'), seq)), cmp=(lambda x, y: cmp(len(x), len(y))), reverse=True)
for s in n_stretches:
seq = seq.replace(s, '_SPLIT-HERE_')
sequences = []
sections = seq.split('_SPLIT-HERE_')
counter = 1
for s in sections:
sequences.append(Fasta(((name + '_split_') + str(counter)), s))
counter += 1
return sequences |
class RMTTestReqClass(object):
def rmttest_positive_01(self):
(config, req) = create_parameters()
rt = ReqEffortEst(config)
(name, value) = rt.rewrite('EffortEstimation-test', req)
assert ('Effort estimation' == name)
assert (value is None)
def rmttest_positive_02(self):
(config, req) = create_parameters()
for i in ReqEffortEst.valid_values:
req['Effort estimation'] = RecordEntry('Effort estimation', Encoding.to_unicode(i))
rt = ReqEffortEst(config)
(name, value) = rt.rewrite('EffortEstimation-test', req)
assert ('Effort estimation' == name)
assert (i == value)
def rmttest_negative_01(self):
(config, req) = create_parameters()
for i in [4, 6, 7, 9, 10, 11, 12, 14, 15, 16, 17, 18, 19, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]:
req['Effort estimation'] = RecordEntry('Effort estimation', Encoding.to_unicode(i))
rt = ReqEffortEst(config)
with pytest.raises(RMTException) as rmte:
rt.rewrite('EffortEstimation-test', req)
assert (4 == rmte.id()) |
def _cast_ctx(context: Union[(Context, click.core.Context)]) -> Context:
if isinstance(context, Context):
return context
if isinstance(context, click.core.Context):
return cast(Context, context.obj)
raise AEAException('clean_after decorator should be used only on methods with Context or click.core.Context object as a first argument.') |
class OptionSeriesPictorialDataEvents(Options):
def click(self):
return self._config_get(None)
def click(self, value: Any):
self._config(value, js_type=False)
def drag(self):
return self._config_get(None)
def drag(self, value: Any):
self._config(value, js_type=False)
def dragStart(self):
return self._config_get(None)
def dragStart(self, value: Any):
self._config(value, js_type=False)
def drop(self):
return self._config_get(None)
def drop(self, value: Any):
self._config(value, js_type=False)
def mouseOut(self):
return self._config_get(None)
def mouseOut(self, value: Any):
self._config(value, js_type=False)
def mouseOver(self):
return self._config_get(None)
def mouseOver(self, value: Any):
self._config(value, js_type=False)
def remove(self):
return self._config_get(None)
def remove(self, value: Any):
self._config(value, js_type=False)
def select(self):
return self._config_get(None)
def select(self, value: Any):
self._config(value, js_type=False)
def unselect(self):
return self._config_get(None)
def unselect(self, value: Any):
self._config(value, js_type=False)
def update(self):
return self._config_get(None)
def update(self, value: Any):
self._config(value, js_type=False) |
def test_invalid_geographic_region():
(w, e) = ((- 10), 10)
for (s, n) in [[(- 200), 90], [(- 90), 200]]:
with pytest.raises(ValueError):
longitude_continuity(None, [w, e, s, n])
(s, n) = ((- 10), 10)
for (w, e) in [[(- 200), 0], [0, 380]]:
with pytest.raises(ValueError):
longitude_continuity(None, [w, e, s, n])
(w, e, s, n) = ((- 180), 200, (- 10), 10)
with pytest.raises(ValueError):
longitude_continuity(None, [w, e, s, n]) |
def cli(args=None):
parser = basic_argument_parser()
parser.add_argument('--predictor-path', type=str, help='Path (a directory) to the exported model that will be evaluated')
parser.add_argument('--num-threads', type=int, default=None, help="Number of omp/mkl threads (per process) to use in Caffe2's GlobalInit")
parser.add_argument('--caffe2-engine', type=str, default=None, help='If set, engine of all ops will be set by this value')
parser.add_argument('--caffe2_logging_print_net_summary', type=int, default=0, help='Control the --caffe2_logging_print_net_summary in GlobalInit')
args = (sys.argv[1:] if (args is None) else args)
run_with_cmdline_args(parser.parse_args(args)) |
class InspectVisitor():
def process(self, instance: _Traversable):
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError('This visitor does not support {}'.format(type(instance))) from e |
def fuse_duplicate_fused_elementwise(sorted_graph: List[Tensor], _workdir: str) -> List[Tensor]:
fusion_groups = find_duplicate_fused_elementwise(sorted_graph)
for (primary_op, duplicate_ops) in fusion_groups.items():
for key in ('outputs', 'output_accessors'):
duplicate_ops_outputs = [output for op in duplicate_ops for output in op._attrs[key]]
primary_op._attrs[key] += duplicate_ops_outputs
if (key != 'outputs'):
continue
for output_tensor in duplicate_ops_outputs:
old_src_ops = output_tensor._attrs['src_ops']
output_tensor._attrs['src_ops'] = ((set(old_src_ops) - set(duplicate_ops)) | {primary_op})
for input_tensor in primary_op._attrs['inputs']:
input_tensor._attrs['dst_ops'] = (set(input_tensor._attrs['dst_ops']) - set(duplicate_ops))
prev_shape = primary_op._attrs['output_accessors'][0].original_shapes
for output_accessor in primary_op._attrs['output_accessors']:
shape = output_accessor.original_shapes
assert (prev_shape == shape), 'Output shapes mismatch in fuse_duplicate_fused_elementwise: {}, {}'.format(prev_shape, shape)
prev_shape = shape
_LOGGER.info('Fusing {} with {}'.format(primary_op._attrs['name'], ', '.join([dup_op._attrs['name'] for dup_op in duplicate_ops])))
return transform_utils.sanitize_sorted_graph(sorted_graph) |
_converter(acc_ops.full_like)
def acc_ops_full_like(target: Target, args: Tuple[(Argument, ...)], kwargs: Dict[(str, Argument)], name: str) -> ConverterOutput:
input_val = kwargs['input']
if (not isinstance(input_val, AITTensor)):
raise RuntimeError(f'Non-tensor inputs for {name}: {input_val}')
fill_value = kwargs['fill_value']
return full()(input_val.shape(), fill_value=fill_value, dtype=input_val.dtype()) |
class OptionSeriesItemSonificationDefaultinstrumentoptionsMappingGapbetweennotes(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
.parametrize('model_meta,test_meta,expected', [(dict(attr='a'), None, ['a']), (dict(attr='a, b'), None, ['a', 'b']), (dict(), None, []), (dict(attr=['a']), None, ['a']), (dict(attr=['a', 'b']), None, ['a', 'b']), (dict(attr=['a', 'b', 'b']), None, ['a', 'b']), (dict(attr='a'), dict(attr='b'), ['a', 'b'])])
def test_get_alert_meta_attrs(model_meta, test_meta, expected):
alert = (BasePendingAlertSchema(**{**BASE_ALERT, 'model_meta': model_meta}) if (test_meta is None) else PendingTestAlertSchema(**{**TEST_ALERT, 'model_meta': model_meta, 'test_meta': test_meta}))
assert (sorted(alert._get_alert_meta_attrs('attr')) == sorted(expected)) |
class RWLockTestCase(unittest.TestCase):
def test_overrelease_read(self):
testLock = hamDb.BkHammingTree()
testLock.get_read_lock()
testLock.free_read_lock()
self.assertRaises(RuntimeError, testLock.free_read_lock)
def test_overrelease_write(self):
testLock = hamDb.BkHammingTree()
testLock.get_write_lock()
testLock.free_write_lock()
self.assertRaises(RuntimeError, testLock.free_write_lock)
def test_reentrant_read(self):
testLock = hamDb.BkHammingTree()
testLock.get_read_lock()
testLock.get_read_lock()
testLock.free_read_lock()
testLock.free_read_lock()
def test_non_reentrant_write(self):
testLock = hamDb.BkHammingTree()
testLock.get_write_lock()
self.assertRaises(RuntimeError, testLock.get_write_lock, blocking=False)
testLock.free_write_lock()
def test_readers_nonexclusive_access(self):
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(Reader(buffer_, rw_lock, 0, 0))
threads.append(Writer(buffer_, rw_lock, 0.2, 0.4, 1))
threads.append(Reader(buffer_, rw_lock, 0.3, 0.3))
threads.append(Reader(buffer_, rw_lock, 0.5, 0))
self.__start_and_join_threads(threads)
self.assertEqual([], threads[0].buffer_read)
self.assertEqual([1], threads[2].buffer_read)
self.assertEqual([1], threads[3].buffer_read)
self.assert_((threads[1].exit_time <= threads[2].entry_time))
self.assert_((threads[2].entry_time <= threads[3].entry_time))
self.assert_((threads[3].exit_time < threads[2].exit_time))
def test_writers_exclusive_access(self):
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(Writer(buffer_, rw_lock, 0, 0.4, 1))
threads.append(Writer(buffer_, rw_lock, 0.1, 0, 2))
threads.append(Reader(buffer_, rw_lock, 0.2, 0))
self.__start_and_join_threads(threads)
self.assertEqual([1, 2], threads[2].buffer_read)
self.assert_((threads[0].exit_time <= threads[1].entry_time))
self.assert_((threads[1].exit_time <= threads[2].exit_time))
def test_writer_priority(self):
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(Writer(buffer_, rw_lock, 0, 0, 1))
threads.append(Reader(buffer_, rw_lock, 0.1, 0.4))
threads.append(Writer(buffer_, rw_lock, 0.2, 0, 2))
threads.append(Reader(buffer_, rw_lock, 0.3, 0))
threads.append(Reader(buffer_, rw_lock, 0.3, 0))
self.__start_and_join_threads(threads)
self.assertEqual([1], threads[1].buffer_read)
self.assertEqual([1, 2], threads[3].buffer_read)
self.assertEqual([1, 2], threads[4].buffer_read)
self.assert_((threads[0].exit_time < threads[1].entry_time))
self.assert_((threads[1].exit_time <= threads[2].entry_time))
self.assert_((threads[2].exit_time <= threads[3].entry_time))
self.assert_((threads[2].exit_time <= threads[4].entry_time))
def test_many_writers_priority(self):
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(Writer(buffer_, rw_lock, 0, 0, 1))
threads.append(Reader(buffer_, rw_lock, 0.1, 0.6))
threads.append(Writer(buffer_, rw_lock, 0.2, 0.1, 2))
threads.append(Reader(buffer_, rw_lock, 0.3, 0))
threads.append(Reader(buffer_, rw_lock, 0.4, 0))
threads.append(Writer(buffer_, rw_lock, 0.5, 0.1, 3))
self.__start_and_join_threads(threads)
self.assertEqual([1], threads[1].buffer_read)
self.assertEqual([1, 2, 3], threads[3].buffer_read)
self.assertEqual([1, 2, 3], threads[4].buffer_read)
self.assert_((threads[0].exit_time < threads[1].entry_time))
self.assert_((threads[1].exit_time <= threads[2].entry_time))
self.assert_((threads[1].exit_time <= threads[5].entry_time))
self.assert_((threads[2].exit_time <= threads[3].entry_time))
self.assert_((threads[2].exit_time <= threads[4].entry_time))
self.assert_((threads[5].exit_time <= threads[3].entry_time))
self.assert_((threads[5].exit_time <= threads[4].entry_time))
def test_context_readers_nonexclusive_access(self):
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(ReaderContext(buffer_, rw_lock, 0, 0))
threads.append(WriterContext(buffer_, rw_lock, 0.2, 0.4, 1))
threads.append(ReaderContext(buffer_, rw_lock, 0.3, 0.3))
threads.append(ReaderContext(buffer_, rw_lock, 0.5, 0))
self.__start_and_join_threads(threads)
self.assertEqual([], threads[0].buffer_read)
self.assertEqual([1], threads[2].buffer_read)
self.assertEqual([1], threads[3].buffer_read)
self.assert_((threads[1].exit_time <= threads[2].entry_time))
self.assert_((threads[2].entry_time <= threads[3].entry_time))
self.assert_((threads[3].exit_time < threads[2].exit_time))
def test_context_writers_exclusive_access(self):
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(WriterContext(buffer_, rw_lock, 0, 0.4, 1))
threads.append(WriterContext(buffer_, rw_lock, 0.1, 0, 2))
threads.append(ReaderContext(buffer_, rw_lock, 0.2, 0))
self.__start_and_join_threads(threads)
self.assertEqual([1, 2], threads[2].buffer_read)
self.assert_((threads[0].exit_time <= threads[1].entry_time))
self.assert_((threads[1].exit_time <= threads[2].exit_time))
def test_context_writer_priority(self):
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(WriterContext(buffer_, rw_lock, 0, 0, 1))
threads.append(ReaderContext(buffer_, rw_lock, 0.1, 0.4))
threads.append(WriterContext(buffer_, rw_lock, 0.2, 0, 2))
threads.append(ReaderContext(buffer_, rw_lock, 0.3, 0))
threads.append(ReaderContext(buffer_, rw_lock, 0.3, 0))
self.__start_and_join_threads(threads)
self.assertEqual([1], threads[1].buffer_read)
self.assertEqual([1, 2], threads[3].buffer_read)
self.assertEqual([1, 2], threads[4].buffer_read)
self.assert_((threads[0].exit_time < threads[1].entry_time))
self.assert_((threads[1].exit_time <= threads[2].entry_time))
self.assert_((threads[2].exit_time <= threads[3].entry_time))
self.assert_((threads[2].exit_time <= threads[4].entry_time))
def test_context_many_writers_priority(self):
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(WriterContext(buffer_, rw_lock, 0, 0, 1))
threads.append(ReaderContext(buffer_, rw_lock, 0.1, 0.6))
threads.append(WriterContext(buffer_, rw_lock, 0.2, 0.1, 2))
threads.append(ReaderContext(buffer_, rw_lock, 0.3, 0))
threads.append(ReaderContext(buffer_, rw_lock, 0.4, 0))
threads.append(WriterContext(buffer_, rw_lock, 0.5, 0.1, 3))
self.__start_and_join_threads(threads)
self.assertEqual([1], threads[1].buffer_read)
self.assertEqual([1, 2, 3], threads[3].buffer_read)
self.assertEqual([1, 2, 3], threads[4].buffer_read)
self.assert_((threads[0].exit_time < threads[1].entry_time))
self.assert_((threads[1].exit_time <= threads[2].entry_time))
self.assert_((threads[1].exit_time <= threads[5].entry_time))
self.assert_((threads[2].exit_time <= threads[3].entry_time))
self.assert_((threads[2].exit_time <= threads[4].entry_time))
self.assert_((threads[5].exit_time <= threads[3].entry_time))
self.assert_((threads[5].exit_time <= threads[4].entry_time))
def __init_variables():
buffer_ = []
rw_lock = hamDb.BkHammingTree()
threads = []
return (buffer_, rw_lock, threads)
def __start_and_join_threads(threads):
for t in threads:
t.start()
for t in threads:
t.join() |
class Object(AbstractSchemaNode):
attributes: Sequence[Union[(ExtractionSchemaNode, Selection, Object)]]
examples: Sequence[Tuple[(str, Union[(Sequence[Mapping[(str, Any)]], Mapping[(str, Any)])])]] = tuple()
def accept(self, visitor: AbstractVisitor[T], **kwargs: Any) -> T:
return visitor.visit_object(self, **kwargs)
def parse_raw(cls, *args: Any, **kwargs: Any) -> Object:
if (PYDANTIC_MAJOR_VERSION != 1):
raise NotImplementedError(f'parse_raw is not supported for pydantic {PYDANTIC_MAJOR_VERSION}')
return super().parse_raw(*args, **kwargs)
def parse_obj(cls, *args: Any, **kwargs: Any) -> Object:
if (PYDANTIC_MAJOR_VERSION != 1):
raise NotImplementedError(f'parse_obj is not supported for pydantic {PYDANTIC_MAJOR_VERSION}')
return super().parse_obj(*args, **kwargs) |
def extractNextlevelforthePLOT(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not ((chp is not None) or (vol is not None) or (frag is not None))) or ('preview' in item['title'].lower())):
return None
if ('scan-trad' in item['tags']):
return None
tagmap = [('Ore Ga Heroine', 'Ore ga Heroine wo Tasukesugite Sekai ga Little Mushiroku', 'translated')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
titlemap = [('For my daughter, I might defeat even the archenemy Chapter', 'For my daughter, I might defeat even the archenemy', 'translated')]
for (titlecomponent, name, tl_type) in titlemap:
if (titlecomponent.lower() in item['title'].lower()):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def text_to_image_with_back(text, page_text, title):
font = ImageFont.truetype(fontpath, 28)
font_title = ImageFont.truetype('src/static/qmzl.ttf', 50)
font_page = ImageFont.truetype(fontpath, 30)
padding = 30
margin = 33
text_list = text.split('\n')
max_width = 0
(title_w, title_h) = font_title.getsize(title)
for text in text_list:
(w, h) = font.getsize(text)
max_width = max(max_width, w)
max_width = max(max_width, font.getsize(page_text)[0])
max_width = max(max_width, title_w)
wa = ((max_width + (padding * 2)) + 100)
ha = ((((((h * len(text_list)) + h) + (margin * len(text_list))) + (padding * 2)) + title_h) + int((title_h * 0.8)))
i = background
change = max((ha / i.height), (wa / i.width))
i = i.resize((int((i.width * change)), int((i.height * change))), Image.ANTIALIAS)
i = i.crop((int(((i.width / 2) - (wa / 2))), int(((i.height / 2) - (ha / 2))), int(((i.width / 2) + (wa / 2))), int(((i.height / 2) + (ha / 2)))))
draw = ImageDraw.Draw(i)
draw.text((int(((i.width / 2) - (title_w / 2))), padding), title, font=font_title, fill=(0, 0, 0))
for j in range(len(text_list)):
text = text_list[j]
draw.text((padding, (((padding + (j * (margin + h))) + margin) + title_h)), text, font=font, fill=(0, 0, 0))
draw.text((padding, (((padding + (j * (margin + h))) + margin) + title_h)), page_text, font=font_page, fill=(0, 0, 0))
return i |
(params=[{'path_prefix': '', 'tenant_alias': 'default', 'client_alias': 'default_tenant', 'user_alias': 'regular', 'login_session_alias': 'default', 'registration_session_password_alias': 'default_password', 'registration_session_oauth_alias': 'default_oauth', 'session_token_alias': 'regular'}, {'path_prefix': '/secondary', 'tenant_alias': 'secondary', 'client_alias': 'secondary_tenant', 'user_alias': 'regular_secondary', 'login_session_alias': 'secondary', 'registration_session_password_alias': 'secondary_password', 'registration_session_oauth_alias': 'secondary_oauth', 'session_token_alias': 'regular_secondary'}])
def tenant_params(request, test_data: TestData) -> TenantParams:
params = request.param
return TenantParams(path_prefix=params['path_prefix'], tenant=test_data['tenants'][params['tenant_alias']], client=test_data['clients'][params['client_alias']], user=test_data['users'][params['user_alias']], login_session=test_data['login_sessions'][params['login_session_alias']], registration_session_password=test_data['registration_sessions'][params['registration_session_password_alias']], registration_session_oauth=test_data['registration_sessions'][params['registration_session_oauth_alias']], session_token=test_data['session_tokens'][params['session_token_alias']], session_token_token=session_token_tokens[params['session_token_alias']]) |
def test_serializable_encoding_rlp_caching(rlp_obj):
assert (rlp_obj._cached_rlp is None)
rlp_code = encode(rlp_obj, cache=False)
assert (rlp_obj._cached_rlp is None)
assert (encode(rlp_obj, cache=True) == rlp_code)
assert (rlp_obj._cached_rlp == rlp_code)
rlp_obj._cached_rlp = b'test-uses-cache'
assert (encode(rlp_obj, cache=True) == b'test-uses-cache')
obj_decoded = decode(rlp_code, sedes=rlp_obj.__class__)
assert (obj_decoded == rlp_obj)
assert (obj_decoded._cached_rlp == rlp_code) |
_os(*metadata.platforms)
(MS_BUILD)
def main():
common.log('MsBuild Beacon')
(server, ip, port) = common.serve_web()
common.clear_web_cache()
common.log(('Updating the callback % (ip, port)))
target_task = 'tmp-file.csproj'
common.copy_file(common.get_path('bin', 'BadTasks.csproj'), target_task)
new_callback = (' % (ip, port))
common.patch_regex(target_task, common.CALLBACK_REGEX, new_callback)
common.execute([MS_BUILD, target_task], timeout=30, kill=True)
common.remove_file(target_task)
server.shutdown() |
class PoetryDependencyGetter(DependencyGetter):
def get(self) -> DependenciesExtract:
dependencies = self._get_poetry_dependencies()
self._log_dependencies(dependencies)
dev_dependencies = self._get_poetry_dev_dependencies()
self._log_dependencies(dev_dependencies, is_dev=True)
return DependenciesExtract(dependencies, dev_dependencies)
def _get_poetry_dependencies(self) -> list[Dependency]:
pyproject_data = load_pyproject_toml(self.config)
dependencies: dict[(str, Any)] = pyproject_data['tool']['poetry']['dependencies']
return self._get_dependencies(dependencies, self.package_module_name_map)
def _get_poetry_dev_dependencies(self) -> list[Dependency]:
dependencies: dict[(str, str)] = {}
pyproject_data = load_pyproject_toml(self.config)
with contextlib.suppress(KeyError):
dependencies = {**dependencies, **pyproject_data['tool']['poetry']['dev-dependencies']}
try:
dependency_groups = pyproject_data['tool']['poetry']['group']
except KeyError:
dependency_groups = {}
for group_values in dependency_groups.values():
with contextlib.suppress(KeyError):
dependencies = {**dependencies, **group_values['dependencies']}
return self._get_dependencies(dependencies, self.package_module_name_map)
def _get_dependencies(self, poetry_dependencies: dict[(str, Any)], package_module_name_map: Mapping[(str, Sequence[str])]) -> list[Dependency]:
dependencies = []
for (dep, spec) in poetry_dependencies.items():
if (dep != 'python'):
optional = self._is_optional(spec)
conditional = self._is_conditional(spec)
dependencies.append(Dependency(dep, self.config, conditional=conditional, optional=optional, module_names=package_module_name_map.get(dep)))
return dependencies
def _is_optional(spec: (str | dict[(str, Any)])) -> bool:
return bool((isinstance(spec, dict) and spec.get('optional')))
def _is_conditional(spec: (str | dict[(str, Any)])) -> bool:
return (isinstance(spec, dict) and ('python' in spec) and ('version' in spec)) |
class OptionPlotoptionsTreemapLevelsDatalabelsFilter(Options):
def operator(self):
return self._config_get(None)
def operator(self, value: Any):
self._config(value, js_type=False)
def property(self):
return self._config_get(None)
def property(self, text: str):
self._config(text, js_type=False) |
def test_hic_transfer_pearson():
outfile = NamedTemporaryFile(suffix='pearson_.h5', delete=False)
outfile.close()
args = '--matrix {} --outFileName {} --method pearson'.format(original_matrix, outfile.name).split()
compute(hicTransform.main, args, 5)
test = hm.hiCMatrix((ROOT + 'hicTransform/pearson.h5'))
new = hm.hiCMatrix(outfile.name)
nt.assert_array_almost_equal(test.matrix.data, new.matrix.data, decimal=DELTA_DECIMAL)
os.unlink(outfile.name) |
def test_detail(app):
client = TestClient(app=app)
response = client.get('/admin/users/1')
assert (response.status_code == 200)
assert (response.template.name == 'dashboard/detail.html')
assert (response.text.count('Edit Row') == 1)
assert (response.text.count('Delete Row') == 2)
response = client.get('/admin/users/1000')
assert (response.status_code == 404) |
class DjangoTemplateSourceInstrumentation(AbstractInstrumentedModule):
name = 'django_template_source'
instrument_list = [('django.template.base', 'Parser.extend_nodelist')]
def call(self, module, method, wrapped, instance, args, kwargs):
ret = wrapped(*args, **kwargs)
if (len(args) > 1):
node = args[1]
elif ('node' in kwargs):
node = kwargs['node']
else:
return ret
if (len(args) > 2):
token = args[2]
elif ('token' in kwargs):
token = kwargs['token']
else:
return ret
if ((not hasattr(node, 'token')) and hasattr(token, 'lineno')):
node.token = token
return ret |
_for(Repository, 'after_insert')
def receive_after_insert(mapper, connection, repo):
logger.debug(('auto creating env var for Repository: %s' % repo.name))
from stalker import defaults
os.environ[(defaults.repo_env_var_template % {'code': repo.code})] = repo.path
os.environ[(defaults.repo_env_var_template_old % {'id': repo.id})] = repo.path |
.django_db
def test_ignore_special_case():
baker.make(RecipientProfile, recipient_level='R', recipient_hash='00077a9a-5a70-8919-fd19-330762af6b85', recipient_unique_id=None, recipient_name='MULTIPLE RECIPIENTS', last_12_months=(- .0))
filters = {'limit': 10, 'page': 1, 'order': 'desc', 'sort': 'amount', 'award_type': 'all'}
(results, meta) = get_recipients(filters=filters)
assert (meta['total'] == 0) |
class OptionPlotoptionsPackedbubbleSonificationContexttracksPointgrouping(Options):
def algorithm(self):
return self._config_get('minmax')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
class Dataframe(object):
def __init__(self, keys=None, inputs=None, texts=None, values=None, features=None):
self.keys = keys
self.inputs = inputs
self.texts = texts
self.values = values
self.features = features
self._homogenize()
def _process(self, col, idx):
if (col is None):
return None
else:
return col[idx]
def iterrows(self):
for i in range(len(self.keys)):
result = {'key': self._process(self.keys, i), 'input': self._process(self.inputs, i), 'text': self._process(self.texts, i), 'values': self._process(self.values, i)}
(yield result)
def _float(self, x):
try:
return float(x)
except:
return np.nan
def _homogenize(self):
if (self.values is None):
return
values = np.zeros((len(self.values), len(self.values[0])))
for (i, v) in enumerate(self.values):
for (j, x) in enumerate(v):
values[(i, j)] = self._float(x)
self.values = np.array(values, dtype=np.float32)
def from_csv(self, filename):
keys = []
inputs = []
values = []
with open(filename, 'r') as f:
reader = csv.reader(f)
h = next(reader)
for r in reader:
keys += [r[0]]
inputs += [r[1]]
values += [r[2:]]
features = h[2:]
self.keys = keys
self.inputs = inputs
self.texts = None
self.values = values
self.features = features
self._homogenize() |
class OptionPlotoptionsBarStatesSelect(Options):
def animation(self) -> 'OptionPlotoptionsBarStatesSelectAnimation':
return self._config_sub_data('animation', OptionPlotoptionsBarStatesSelectAnimation)
def borderColor(self):
return self._config_get('#000000')
def borderColor(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get('#cccccc')
def color(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False) |
def group_qs_params(url):
purl = urlsplit(url)
qs = parse_qsl(purl.query)
nqs = list()
for t in reversed(qs):
if (t[0].endswith('[]') or (t[0] not in [f for (f, _) in nqs])):
nqs.append(t)
purl = purl._replace(query=join_qsl(reversed(nqs)))
return purl.geturl() |
def _pad_input_tensor(op: Operator, tensor_idx: int, f_extract_var_name: Callable[([int, int], str)], alignment_var_to_padding_length: Dict[(str, int)], tensor_list: List[Tensor]) -> None:
original_shape = op._attrs['inputs'][tensor_idx]._attrs['shape']
for (dim_idx, dim) in enumerate(original_shape):
tensor = op._attrs['inputs'][tensor_idx]
original_tensor_debug_str = str(tensor)
previous_shape = tensor._attrs['shape']
padding_shape = list(previous_shape)
new_shape = list(previous_shape)
var_name = f_extract_var_name(tensor_idx, dim_idx)
if ((var_name is None) or (var_name not in alignment_var_to_padding_length)):
continue
padding_length = alignment_var_to_padding_length.get(var_name)
padding_shape[dim_idx] = IntImm(padding_length)
new_shape[dim_idx] = IntImm((dim.value() + padding_length))
tensor._attrs['dst_ops'].remove(op)
padding_tensor = _create_host_zero_tensor(shape=padding_shape, dtype=tensor.dtype())
padded_tensor = ops.concatenate()([tensor, padding_tensor], dim=dim_idx)
op._attrs['inputs'][tensor_idx] = padded_tensor
padded_tensor._attrs['dst_ops'].add(op)
tensor_list.append(padding_tensor)
tensor_list.append(padded_tensor)
_LOGGER.debug('**** Apply padding ****, replace input tensor \n {} \n with \n {} \n'.format(original_tensor_debug_str, padded_tensor))
return |
class Config(BaseModel):
zmq_output_address: str
zmq_input_address: str
gpu_all: List[int]
health_check: HealthCheckerConfig
load_analyzer: LoadAnalyzerConfig
models: ModelsRunnerConfig
max_running_instances: int = 10
cloud_client: Union[(DockerConfig, KubeConfig)] = Field(choose_function=(lambda x: (((x['branch_name'] == 'DockerConfig') and (os.environ.get('CLOUD_CLIENT', 'docker') == 'docker')) or ((x['branch_name'] == 'KubeConfig') and (os.environ.get('CLOUD_CLIENT', 'docker') == 'kube'))))) |
def check(ip, domain, port, args, timeout, payload_map):
try:
socket.setdefaulttimeout(int(timeout))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, int(port)))
s.send((args + '\r\n').encode())
byte_result = s.recv(1024)
return ('Zookeeper\n' + byte_result.decode())
except Exception:
raise |
class DistributedTrainingContext(object):
current_host: str
hosts: typing.List[str]
network_interface_name: str
(exceptions=KeyError, delay=1, tries=10, backoff=1)
def from_env(cls) -> DistributedTrainingContext:
curr_host = os.environ.get(SM_ENV_VAR_CURRENT_HOST)
raw_hosts = os.environ.get(SM_ENV_VAR_HOSTS)
nw_iface = os.environ.get(SM_ENV_VAR_NETWORK_INTERFACE_NAME)
if (not (curr_host and raw_hosts and nw_iface)):
raise KeyError('Unable to locate Sagemaker Environment variables!')
hosts = json.loads(raw_hosts)
return DistributedTrainingContext(curr_host, hosts, nw_iface)
(exceptions=FileNotFoundError, delay=1, tries=10, backoff=1)
def from_sagemaker_context_file(cls) -> DistributedTrainingContext:
with open(SM_RESOURCE_CONFIG_FILE, 'r') as rc_file:
d = json.load(rc_file)
curr_host = d['current_host']
hosts = d['hosts']
nw_iface = d['network_interface_name']
if (not (curr_host and hosts and nw_iface)):
raise KeyError
return DistributedTrainingContext(curr_host, hosts, nw_iface)
def local_execute(cls) -> DistributedTrainingContext:
return DistributedTrainingContext(hosts=['localhost'], current_host='localhost', network_interface_name='dummy') |
def get_canon_types(is_jp: bool) -> Optional[list[str]]:
file_data = game_data_getter.get_file_latest('resLocal', 'CastleRecipeDescriptions.csv', is_jp)
if (file_data is None):
helper.error_text('Could not find CastleRecipeDescriptions.csv')
return None
data = csv_handler.parse_csv(file_data.decode('utf-8'), delimeter=helper.get_text_splitter(is_jp))
types: list[str] = []
for cannon in data:
types.append(cannon[1])
return types |
class OFPPortModPropOptical(OFPPortModProp):
def __init__(self, type_=None, length=None, configure=None, freq_lmda=None, fl_offset=None, grid_span=None, tx_pwr=None):
self.type = type_
self.length = length
self.configure = configure
self.freq_lmda = freq_lmda
self.fl_offset = fl_offset
self.grid_span = grid_span
self.tx_pwr = tx_pwr
def serialize(self):
self.length = struct.calcsize(ofproto.OFP_PORT_MOD_PROP_OPTICAL_PACK_STR)
buf = bytearray()
msg_pack_into(ofproto.OFP_PORT_MOD_PROP_OPTICAL_PACK_STR, buf, 0, self.type, self.length, self.configure, self.freq_lmda, self.fl_offset, self.grid_span, self.tx_pwr)
return buf |
def test_get_registrable_entities():
ctx = context_manager.FlyteContextManager.current_context().with_serialization_settings(flytekit.configuration.SerializationSettings(project='p', domain='d', version='v', image_config=flytekit.configuration.ImageConfig(default_image=flytekit.configuration.Image('def', 'docker.io/def', 'latest'))))
context_manager.FlyteEntities.entities = [foo, wf, 'str', FlyteTask(id=Identifier(ResourceType.TASK, 'p', 'd', 'n', 'v'), type='t', metadata=TaskMetadata().to_taskmetadata_model(), interface=TypedInterface(inputs={}, outputs={}), custom=None)]
entities = flytekit.tools.serialize_helpers.get_registrable_entities(ctx)
assert entities
assert (len(entities) == 3)
for e in entities:
if isinstance(e, RemoteEntity):
assert False, 'found unexpected remote entity'
if (isinstance(e, WorkflowSpec) or isinstance(e, TaskSpec) or isinstance(e, LaunchPlan)):
continue
assert False, f'found unknown entity {type(e)}' |
class Event():
def __init__(self) -> None:
self._event = threading.Event()
def set(self) -> None:
self._event.set()
def wait(self, timeout: Optional[float]=None) -> None:
if (timeout == float('inf')):
timeout = None
if (not self._event.wait(timeout=timeout)):
raise PoolTimeout() |
def setup_to_pass_2():
with open('/etc/rsyslog.d/pytest.conf', 'w') as f:
f.writelines(['*.* action(type="omfwd" target="192.168.2.100" port="514" protocol="tcp"', ' action.resumeRetryCount="100"', ' queue.type="LinkedList" queue.size="1000")', ''])
(yield None)
os.remove('/etc/rsyslog.d/pytest.conf') |
def write_sort_file(staging_dir, extension_priorities, sort_file):
for (dirpath, _dirname, filenames) in os.walk(staging_dir):
for filename in filenames:
fn = os.path.join(dirpath, filename)
for (idx, suffix) in enumerate(extension_priorities):
if fn.endswith(suffix):
priority = ((idx - len(extension_priorities)) - 1)
break
assert fn.startswith((staging_dir + '/'))
fn = fn[(len(staging_dir) + 1):]
if (' ' not in fn):
sort_file.write(('%s %d\n' % (fn, priority))) |
class TestTupleTypeMiss():
def test_tuple_len_set(self, monkeypatch):
with monkeypatch.context() as m:
with pytest.raises(_SpockInstantiationError):
m.setattr(sys, 'argv', [''])
config = ConfigArgBuilder(TupleMixedTypeMiss, desc='Test Builder')
config.generate() |
class Solution():
def maxProfit(self, prices: List[int]) -> int:
def get_transactions(prices):
if (len(prices) < 2):
return []
ret = []
start = prices[0]
for (i, price) in enumerate(prices):
if (i == 0):
continue
if (price < prices[(i - 1)]):
if (start < prices[(i - 1)]):
ret.append(start)
ret.append(prices[(i - 1)])
start = price
if (prices[(- 1)] > start):
ret.append(start)
ret.append(prices[(- 1)])
return ret
def remove_one_transaction(prices):
merge_pos = 0
cost = None
for (i, price) in enumerate(prices):
if (i == 0):
continue
if ((cost is None) or (cost > abs((prices[i] - prices[(i - 1)])))):
merge_pos = (i - 1)
cost = abs((prices[i] - prices[(i - 1)]))
del prices[merge_pos:(merge_pos + 2)]
def get_profit(prices):
ret = 0
for i in range(0, len(prices), 2):
ret = ((ret + prices[(i + 1)]) - prices[i])
return ret
prices = get_transactions(prices)
while ((len(prices) // 2) > 2):
remove_one_transaction(prices)
return get_profit(prices) |
('cuda.gemm_rrr.gen_profiler')
def gen_profiler(func_attrs, workdir, profiler_filename, dim_info_dict):
output_addr_calculator = common.DEFAULT_OUTPUT_ADDR_CALCULATOR.render(stride_dim='N')
return common.gen_profiler(func_attrs=func_attrs, workdir=workdir, profiler_filename=profiler_filename, dim_info_dict=dim_info_dict, src_template=common.SRC_TEMPLATE, problem_args_template=PROBLEM_ARGS_TEMPLATE, problem_args_template_cutlass_3x=PROBLEM_ARGS_TEMPLATE_CUTLASS_3X, args_parser_template=ARGS_PARSER_TEMPLATE, support_split_k=True, output_addr_calculator=output_addr_calculator) |
.use_numba
def test_potential_symmetry_cartesian():
point_mass = [1.1, 1.2, 1.3]
masses = [2670]
distance = 3.3
easting = (point_mass[0] * np.ones(6))
northing = (point_mass[1] * np.ones(6))
upward = (point_mass[2] * np.ones(6))
easting[0] += distance
easting[1] -= distance
northing[2] += distance
northing[3] -= distance
upward[4] += distance
upward[5] -= distance
coordinates = [easting, northing, upward]
results = point_gravity(coordinates, point_mass, masses, 'potential', 'cartesian')
npt.assert_allclose(*results) |
def get_optimizer_cfg(lr, weight_decay=None, weight_decay_norm=None, weight_decay_bias=None, lr_mult=None):
runner = default_runner.Detectron2GoRunner()
cfg = runner.get_default_cfg()
if (lr is not None):
cfg.SOLVER.BASE_LR = lr
if (weight_decay is not None):
cfg.SOLVER.WEIGHT_DECAY = weight_decay
if (weight_decay_norm is not None):
cfg.SOLVER.WEIGHT_DECAY_NORM = weight_decay_norm
if (weight_decay_bias is not None):
cfg.SOLVER.WEIGHT_DECAY_BIAS = weight_decay_bias
if (lr_mult is not None):
cfg.SOLVER.LR_MULTIPLIER_OVERWRITE = [lr_mult]
return cfg |
def test_contract_estimate_gas_with_arguments(w3, math_contract, estimate_gas, transact):
gas_estimate = estimate_gas(contract=math_contract, contract_function='add', func_args=[5, 6])
txn_hash = transact(contract=math_contract, contract_function='add', func_args=[5, 6])
txn_receipt = w3.eth.wait_for_transaction_receipt(txn_hash)
gas_used = txn_receipt.get('gasUsed')
assert (abs((gas_estimate - gas_used)) < 21000) |
def extractShubham068JainWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class Rectangle(SVGItem):
name = 'SVG Rectangle'
tag = 'rect'
def __init__(self, page, x, y, width, height, fill, rx, ry):
super(Rectangle, self).__init__(page, '', css_attrs={'width': width, 'height': height})
self.set_attrs({'x': x, 'y': y, 'fill': fill, 'rx': rx, 'ry': ry})
self.css({'display': 'inline-block'})
self.html_objs = []
def __str__(self):
str_c = ''.join([(h.html() if hasattr(h, 'html') else str(h)) for h in self.html_objs])
return ('<%s %s>%s</%s>' % (self.tag, self.get_attrs(css_class_names=self.style.get_classes()), str_c, self.tag)) |
def set_seeds_globally(seed: int, set_cudnn_determinism: bool, info_txt: str) -> None:
BColors.print_colored(f'Setting random seed globally to: {seed} - {info_txt}', BColors.OKGREEN)
random.seed(seed)
torch.random.manual_seed(seed)
np.random.seed(seed)
if set_cudnn_determinism:
if hasattr(torch, 'use_deterministic_algorithms'):
torch.use_deterministic_algorithms(True)
elif hasattr(torch, 'set_deterministic'):
torch.set_deterministic(True)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False |
class Task(UserControl):
def __init__(self, task_name, task_delete):
super().__init__()
self.task_name = task_name
self.task_delete = task_delete
def build(self):
self.display_task = Checkbox(value=False, label=self.task_name)
self.edit_name = TextField(expand=1)
self.display_view = Row(alignment='spaceBetween', vertical_alignment='center', controls=[self.display_task, Row(spacing=0, controls=[IconButton(icon=icons.CREATE_OUTLINED, tooltip='Edit To-Do', on_click=self.edit_clicked), IconButton(icons.DELETE_OUTLINE, tooltip='Delete To-Do', on_click=self.delete_clicked)])])
self.edit_view = Row(visible=False, alignment='spaceBetween', vertical_alignment='center', controls=[self.edit_name, IconButton(icon=icons.DONE_OUTLINE_OUTLINED, icon_color=colors.GREEN, tooltip='Update To-Do', on_click=self.save_clicked)])
return Column(controls=[self.display_view, self.edit_view])
def edit_clicked(self, e):
self.edit_name.value = self.display_task.label
self.display_view.visible = False
self.edit_view.visible = True
self.update()
def save_clicked(self, e):
self.display_task.label = self.edit_name.value
self.display_view.visible = True
self.edit_view.visible = False
self.update()
def delete_clicked(self, e):
self.task_delete(self) |
def _compute_workspace(sorted_graph: List[Tensor]) -> Workspace:
unique_workspace_size = 0
max_workspace = 0
for node in sorted_graph:
for func in node._attrs['src_ops']:
if ('workspace' in func._attrs):
max_workspace = max(max_workspace, func._attrs['workspace'])
if (('unique_workspace' in func._attrs) and ('unique_workspace_offset' not in func._attrs)):
func._attrs['unique_workspace_offset'] = unique_workspace_size
unique_workspace_size += func._attrs['unique_workspace']
return Workspace(max_workspace, unique_workspace_size) |
def test_rf_better_than_dt(dummy_titanic):
(X_train, y_train, X_test, y_test) = dummy_titanic
dt = DecisionTree(depth_limit=10)
dt.fit(X_train, y_train)
rf = RandomForest(depth_limit=10, num_trees=7, col_subsampling=0.8, row_subsampling=0.8)
rf.fit(X_train, y_train)
pred_test_dt = dt.predict(X_test)
pred_test_binary_dt = np.round(pred_test_dt)
acc_test_dt = accuracy_score(y_test, pred_test_binary_dt)
auc_test_dt = roc_auc_score(y_test, pred_test_dt)
pred_test_rf = rf.predict(X_test)
pred_test_binary_rf = np.round(pred_test_rf)
acc_test_rf = accuracy_score(y_test, pred_test_binary_rf)
auc_test_rf = roc_auc_score(y_test, pred_test_rf)
assert (acc_test_rf > acc_test_dt), 'RandomForest should have higher accuracy than DecisionTree on test set.'
assert (auc_test_rf > auc_test_dt), 'RandomForest should have higher AUC ROC than DecisionTree on test set.' |
class WaterBuilder():
def __init__(self, space=3.0, bond_dist=1.85, pcharge=0.0, residue='WAT', ATYPE='W', verbose=False):
self.space = space
self.bond_dist = bond_dist
self.pcharge = pcharge
self.residue = residue
self.ATYPE = ATYPE
self.verbose = verbose
def atom_coord(self, atom):
return list(map(float, atom[28:56].split()))
def dist(self, firstline, secondline, precision=4):
(coord1, coord2) = list(map(self.atom_coord, [firstline, secondline]))
return np.round(np.sqrt(((((coord1[0] - coord2[0]) ** 2) + ((coord1[1] - coord2[1]) ** 2)) + ((coord1[2] - coord2[2]) ** 2))), precision)
def mean_pdb(self, firstline, secondline):
coord1 = self.atom_coord(firstline)
coord2 = self.atom_coord(secondline)
x = ((coord1[0] + coord2[0]) / 2)
y = ((coord1[1] + coord2[1]) / 2)
z = ((coord1[2] + coord2[2]) / 2)
atype = firstline[12:16]
residue = 'MEA'
self.chain = 'Y'
count = 1
index = 1
mean_atom = ('ATOM %5d %4s %3s %1s%4d %8.3f%8.3f%8.3f 1.00 10.00 %1s' % (count, atype, residue, self.chain, index, x, y, z, self.ATYPE))
return mean_atom
def closest(self, first_atom, atom_list, cutoff=99999):
best_self.distance = 999999
best_candidate = None
for second_atom in atom_list:
self.distance = self.dist(first_atom, second_atom)
if (self.distance < best_self.distance):
best_self.distance = self.distance
if (best_self.distance < cutoff):
best_candidate = second_atom
return (best_candidate, best_self.distance)
def mean3(self, firstline, secondline, thirdline):
coord1 = self.atom_coord(firstline)
coord2 = self.atom_coord(secondline)
coord3 = self.atom_coord(thirdline)
x = (((coord1[0] + coord2[0]) + coord3[0]) / 3)
y = (((coord1[1] + coord2[1]) + coord3[1]) / 3)
z = (((coord1[2] + coord2[2]) + coord3[2]) / 3)
atype = firstline[12:16]
residue = 'MEA'
chain = 'Y'
count = 1
index = 1
mean_atom = ('ATOM %5d %4s %3s %1s%4d %8.3f%8.3f%8.3f 1.00 10.00 %1s' % (count, self.ATYPE, residue, chain, index, x, y, z, self.ATYPE))
return mean_atom
def hydro(self, atom1, atom2, atom3=None, spacing=3):
if (atom2.split()[(- 1)] == 'HD'):
spacing -= 1
index = 99
coord2 = self.atom_coord(atom2)
x = coord2[0]
y = coord2[1]
z = coord2[2]
if self.verbose:
print(x, y, z)
chain = atom1[21]
residue = atom1[17:20]
if atom3:
atom4 = self.mean_pdb(atom1, atom3)
vec_module = self.dist(atom2, atom4)
coord1 = self.atom_coord(atom4)
else:
coord1 = self.atom_coord(atom1)
vec_module = self.dist(atom1, atom2)
alpha = np.arccos(((coord2[0] - coord1[0]) / vec_module))
beta = np.arccos(((coord2[1] - coord1[1]) / vec_module))
gamma = np.arccos(((coord2[2] - coord1[2]) / vec_module))
wat_x = ((spacing * np.cos(alpha)) + x)
wat_y = ((spacing * np.cos(beta)) + y)
wat_z = ((spacing * np.cos(gamma)) + z)
wet = ('%s%5d %2s %3s %1s%4d %8.3f%8.3f%8.3f 1.00 10.00 %1.3f %1s\n' % (self.keyw, index, self.ATYPE, residue, chain, index, wat_x, wat_y, wat_z, self.pcharge, self.ATYPE))
return wet
def hydroH(self, atom1, atom2, atom3):
middle = self.hydro(atom1, atom2, spacing=0)
avg = self.mean_pdb(middle, atom3)
last = self.hydro(avg, atom2, spacing=self.space)
if self.verbose:
print('middle=', middle)
print('avg=', avg)
print('last=', last)
def vector(self, p1, p2=None):
if self.verbose:
print('received ', p1, p2)
if (type(p1) == type(str())):
p1 = self.atom_coord(p1)
(x1, y1, z1) = p1
if (type(p2) == type(str())):
p2 = self.atom_coord(p2)
if (not (p2 == None)):
(x2, y2, z2) = p2
vec_x = (x2 - x1)
vec_y = (y2 - y1)
vec_z = (z2 - z1)
vec = np.array([vec_x, vec_y, vec_z], 'f')
if self.verbose:
print('REAL VECTOR', vec)
else:
vec = np.array([p1[0], p1[1], p1[2]], 'f')
if self.verbose:
print('ATOM VECTOR', vec)
return vec
def bound(self, atom, structure, exclude=None):
bound_list = []
tolerance = 0
bond_dist = self.bond_dist
if (atom.split()[(- 1)] == 'HD'):
bond_dist = 1.15
if self.verbose:
print('HD mode')
for candidate in structure:
if ((candidate == atom) or (candidate == exclude)):
pass
elif ((candidate[0:4] == 'ATOM') or (candidate[0:6] == 'HETATM')):
if ((candidate.split()[(- 1)] == 'SA') or (candidate.split()[(- 1)] == 'S')):
tolerance = 0.35
else:
tolerance = 0
if (self.dist(atom, candidate) <= (bond_dist + tolerance)):
if (not (candidate in bound_list)):
bound_list.append(candidate)
else:
pass
if (len(bound_list) > 0):
return bound_list
else:
print('ERROR: this atom seems to be disconnected:')
print(atom)
print('exit 3')
exit(1)
def furanbolic(self, atom, structure, max=2.35):
the_ring = [atom]
for item in structure:
if (item != atom):
if (item.split()[(- 1)] != 'HD'):
if (self.dist(atom, item) < max):
the_ring.append(item)
if self.verbose:
print('APPEND')
if (len(the_ring) == 5):
print(' - possible furan/oxazole found...')
return True
if (len(the_ring) > 6):
print('WARNING: multiple atoms match the furan/oxazole check...')
return True
else:
return False
def Osp2(self, oxygen, atom1, atom2):
waters = []
angles = [120, (- 120)]
oxyvector = self.vector(oxygen, atom1)
oxyvector = geomutils.normalize(oxyvector)
residue = atom1[17:20]
chain = atom1[21]
for a in angles:
roto = [oxyvector[0], oxyvector[1], oxyvector[2], np.radians(a)]
lone_pair_vector = self.vector(atom2, oxygen)
lone_pair_vector = geomutils.normalize(lone_pair_vector)
water = geomutils.rotate_around_axis(((- lone_pair_vector) * self.space), roto, self.atom_coord(oxygen))
wet = ('%s%5d %2s %3s %1s%4d %8.3f%8.3f%8.3f 1.00 10.00 %1.3f %1s\n' % (self.keyw, 1, self.ATYPE, residue, chain, 1, water[0], water[1], water[2], self.pcharge, self.ATYPE))
waters.append(wet)
if self.verbose:
print(('osp2 returning %d waters' % len(waters)))
return waters
def Osp2_NEW(self, oxygen, atom1, atom2):
waters = []
angles = list(range(0, 360, 10))
oxyvector = self.vector(oxygen, atom1)
oxyvector = self.vector(atom1, atom2)
oxyvector = geomutils.normalize(oxyvector)
mid = self.mean_pdb(atom1, atom2)
residue = atom1[17:20]
chain = atom1[21]
for a in angles:
roto = [oxyvector[0], oxyvector[1], oxyvector[2], np.radians(a)]
lone_pair_vector = self.vector(mid, oxygen)
lone_pair_vector = geomutils.normalize(lone_pair_vector)
water = geomutils.rotate_around_axis(((+ lone_pair_vector) * self.space), roto, self.atom_coord(oxygen))
wet = ('%s%5d %2s %3s %1s%4d %8.3f%8.3f%8.3f 1.00 10.00 %1.3f %1s\n' % (self.keyw, 1, self.ATYPE, residue, chain, 1, water[0], water[1], water[2], self.pcharge, self.ATYPE))
waters.append(wet)
if self.verbose:
print(('Osp2_NEW returning %d waters' % len(waters)))
return waters
def hydrate(self, mol, extended_atoms=False):
added_waters = []
atoms_list = []
hydrate_list = []
numbering_stuff = []
water_mates = []
self.EXTENDED_ATOMS = extended_atoms
input = open(pdbqt, 'r').readlines()
for line in input:
if ((line[0:4] == 'ATOM') or (line[0:6] == 'HETATM')):
atype = line.split()[(- 1)]
atoms_list.append(line)
if ((atype == 'OA') or (atype == 'NA') or (atype == 'HD')):
hydrate_list.append(line)
if (line[0:4] == 'ATOM'):
self.keyw = 'ATOM '
if (line[0:6] == 'HETATM'):
self.keyw = 'HETATM'
for atom in hydrate_list:
atype = atom.split()[(- 1)]
if self.verbose:
print('PROCESSING ATOM :', atom)
print('atype = ', atype)
HYDROXYL = False
position = int(atom.split()[1])
waters_generated = [atom]
if self.verbose:
print('processing ', atom)
print('0: water_mates=', water_mates)
print('1: len(waters_generated)=', len(waters_generated))
print('2: len(water_mates=', len(water_mates))
master = self.bound(atom, atoms_list)
if (len(master) == 0):
print('\n\nERROR: this atom is disconnected:\n', atom)
print('exit 0')
exit(1)
if (atype == 'HD'):
if (len(master) > 1):
print('\n\nERROR (HD) : there is a proximity error and the following hydrogen is in close contact with more than one atom')
print(atom)
print('Bound mates:')
for m in master:
print(m[:(- 1)], ' ==>', self.dist(m, atom))
print('exit 1')
exit(1)
else:
wet = self.hydro(master[0], atom)
if self.verbose:
print('after call to hydro, wet=', wet)
waters_generated.append(wet)
if self.verbose:
print(('HD: added wet: now %d waters_generated:' % len(waters_generated)))
for ind in waters_generated:
print(ind)
print(('HD: added w_g to w_m: previously %d water_mates:' % len(water_mates)))
for ind in water_mates:
print(ind)
print(('HD: 2: %d waters_generated:' % len(waters_generated)))
for ind in waters_generated:
print(ind)
water_mates.append(waters_generated)
if self.verbose:
print(('HD: 2: NOW %d water_mates:' % len(water_mates)))
numbering_stuff.append([position, (len(waters_generated) - 1)])
if (atype == 'OA'):
if (len(master) == 1):
MASTER = master[0]
residue = MASTER[17:20]
chain = MASTER[21]
mates = self.bound(MASTER, atoms_list, exclude=atom)
if (len(mates) <= 2):
v12 = self.vector(atom, MASTER)
v23 = self.vector(mates[0], MASTER)
(plane0, plane1, plane2) = geomutils.normalize(np.cross(v12, v23))
self.chain = 1
roto = [plane0, plane1, plane2, np.radians(50)]
wat = geomutils.rotate_around_axis((geomutils.normalize((- v12)) * self.space), roto, self.atom_coord(atom))
wet = ('%s%5d %2s %3s %1s%4d %8.3f%8.3f%8.3f 1.00 10.00 %1.3f %1s\n' % (self.keyw, 1, self.ATYPE, residue, chain, 1, wat[0], wat[1], wat[2], self.pcharge, self.ATYPE))
if self.verbose:
print('O1: wet:')
print(wet)
waters_generated.append(wet)
if self.verbose:
print('waters_generated:')
print(waters_generated)
print('O1: waters_generated=', end=' ')
for ind in waters_generated:
print(ind)
roto = [plane0, plane1, plane2, np.radians((- 50))]
if self.verbose:
print('calling rotate_around_axis with self.atom_coord(', atom, '), roto=', roto)
wat = geomutils.rotate_around_axis((geomutils.normalize((- v12)) * self.space), roto, self.atom_coord(atom))
if self.verbose:
print('wat =', wat)
wet = ('%s%5d %2s %3s %1s%4d %8.3f%8.3f%8.3f 1.00 10.00 %1.3f %1s\n' % (self.keyw, 1, self.ATYPE, residue, chain, 1, wat[0], wat[1], wat[2], self.pcharge, self.ATYPE))
waters_generated.append(wet)
if self.verbose:
print(('O2: %d wet: %s' % (len(wet), wet)))
print(('O2: %d waters_generated=' % len(waters_generated)), end=' ')
for ind in waters_generated:
print(ind)
water_mates.append(waters_generated)
if self.verbose:
print(('O2: %d water_mates=' % len(water_mates)), end=' ')
for ind in water_mates:
print(ind)
numbering_stuff.append([int(position), (len(waters_generated) - 1)])
elif self.EXTENDED_ATOMS:
directive = self.vector(master[0], atom)
directive = (geomutils.normalize(directive) * self.space)
for q in mates:
position_q = self.vector(q)
position_q = geomutils.vecSum(position_q, directive)
push = geomutils.normalize(self.vector(atom, position_q))
start = self.vector(atom)
lpair = geomutils.vecSum(start, (push * self.space))
wet = ('%s%5d %2s %3s %1s%4d %8.3f%8.3f%8.3f 1.00 10.00 %1.3f %1s\n' % (self.keyw, 1, self.ATYPE, residue, chain, 1, lpair[0], lpair[1], lpair[2], self.pcharge, self.ATYPE))
if self.verbose:
print('EA: wet=', end=' ')
for ind in wet:
print(ind)
waters_generated.append(wet)
if self.verbose:
print('EA: waters_generated=', end=' ')
for ind in waters_generated:
print(ind)
water_mates.append(waters_generated)
if self.verbose:
print(('661:EA: %d water_mates:' % len(water_mates)), end=' ')
for ind in water_mates:
for j in ind:
print(j)
numbering_stuff.append([int(position), (len(waters_generated) - 1)])
if (len(master) == 2):
for m in master:
if (m.split()[(- 1)] == 'HD'):
HYDROXYL = True
if (not HYDROXYL):
O_plane = geomutils.calcPlane(atom, master[0], master[1])
coplanar_mates = geomutils.coplanar(O_plane, atoms_list, atom)
if ((len(coplanar_mates) >= 4) and self.furanbolic(atom, coplanar_mates)):
wet = self.hydro(master[0], atom, master[1])
if self.verbose:
print('notHYDROXYL: wet=', end=' ')
for ind in wet:
print(ind)
waters_generated.append(wet)
if self.verbose:
print('notHYDROXYL: waters_generated=', end=' ')
for ind in waters_generated:
print(ind)
else:
if self.verbose:
print('FURAN MODE FAILED')
lp_waters = self.Osp2(atom, master[0], master[1])
if self.verbose:
print('lp_waters:', end=' ')
for ind in lp_waters:
print(ind)
for w in lp_waters:
waters_generated.append(w)
if self.verbose:
print('else in not HYDROXYL: waters_generated=', end=' ')
for ind in waters_generated:
print(ind)
print('NOT_COPLANAR ELSE 1: waters_generated=', waters_generated)
else:
lp_waters = self.Osp2(atom, master[0], master[1])
if self.verbose:
print('lp_waters:')
for w in lp_waters:
if self.verbose:
print(w)
waters_generated.append(w)
if self.verbose:
print('ELSE: waters_generated=', end=' ')
for ind in waters_generated:
print(ind)
print('before adding else of not HYDROXYL, water_mates=', end=' ')
for ind in water_mates:
print(ind)
water_mates.append(waters_generated)
if self.verbose:
print('LAST 1: waters_generated=', end=' ')
for ind in waters_generated:
print(ind)
print('LAST 1: water_mates =', end=' ')
for wm in water_mates:
print(wm)
print('LAST 1: waters_generated=', waters_generated)
print('LAST 1: water_mates =', water_mates)
numbering_stuff.append([int(position), (len(waters_generated) - 1)])
if (atype == 'NA'):
if (len(master) == 1):
wet = self.hydro(master[0], atom)
waters_generated.append(wet)
water_mates.append(waters_generated)
if self.verbose:
print('NA 1: wet=', end=' ')
for ind in wet:
print(ind)
print('NA 1: waters_generated=', end=' ')
for ind in waters_generated:
print(ind)
print('NA 1: water_mates=', end=' ')
for ind in water_mates:
print(ind)
numbering_stuff.append([int(position), (len(waters_generated) - 1)])
if (len(master) == 2):
wet = self.hydro(master[0], atom, master[1])
if self.verbose:
print('nitrile mode: wet:', wet)
waters_generated.append(wet)
if self.verbose:
print('N: waters_generated:', end=' ')
for ind in waters_generated:
print(ind)
water_mates.append(waters_generated)
if self.verbose:
print('N: water_mates:', end=' ')
for ind in water_mates:
print(ind)
print('NA 2: wet=', wet)
print('NA 2: waters_generated=', waters_generated)
print('NA 2: water_mates=', water_mates)
numbering_stuff.append([int(position), (len(waters_generated) - 1)])
if self.verbose:
print('nitrile numbering_stuff:', end=' ')
for ind in numbering_stuff:
print(ind)
if (len(master) == 3):
master_center = self.mean3(master[0], master[1], master[2])
wet = self.hydro(master_center, atom)
if self.verbose:
print('tertiary amine: wet=', end=' ')
for ind in wet:
print(ind)
waters_generated.append(wet)
if self.verbose:
print('tertiary amine: waters_generated=', end=' ')
for ind in waters_generated:
print(ind)
water_mates.append(waters_generated)
if self.verbose:
print('tertiary amine: water_mates=', end=' ')
for ind in water_mates:
print(ind)
print('NA 3: wet=', wet)
print('NA 3: waters_generated=', waters_generated)
print('NA 3: water_mates=', water_mates)
numbering_stuff.append([int(position), (len(waters_generated) - 1)])
if self.verbose:
print(numbering_stuff[(- 1)], ' added to numbering_stuff')
ctr = 0
for mates in water_mates:
index = input.index(mates[0])
if self.verbose:
print('mates=', mates)
print('mates[0]=', mates[0])
line = ''
for atom in mates:
line += atom
if self.verbose:
print((' %d input line was = %s' % (index, input[index])))
input[index] = line
if self.verbose:
print(('now %d input line set to %s' % (index, line)))
print(('~~~numbering_stuff[%d]=%s' % (ctr, numbering_stuff[ctr])))
ctr += 1
count = 1
final = []
if self.verbose:
print('len(input)=', len(input), ' len(final)=', len(final))
for line in input:
line = line.split('\n')
if self.verbose:
print('ZZZZ: line=', end=' ')
for YYYY in line:
print(YYYY)
if ((len(line) == 3) and (line[(- 1)] != '')):
line = line.remove(line[1])
ct = 0
for item in line:
if ((not (item == '')) and (item.find('WAT') < 0) and (item.find('W 99 1') < 0)):
final.append(item)
if ((len(item) > 13) and (item[13] == 'W')):
if self.verbose:
print('after adding W ', item, ' len(final)=', len(final))
ct += 1
if self.verbose:
print('len(input)=', len(input), ' len(final)=', len(final))
for line in final:
if self.verbose:
print('at top line=', line, ' contains WAT ', (line.find('WAT') > 0))
if ((line.find('WAT') < 0) and ((line[0:4] == 'ATOM') or (line[0:6] == 'HETATM'))):
value = ('%4s' % count)
idx = final.index(line)
final[idx] = ((line[0:7] + value) + line[11:])
count += 1
if self.verbose:
print('in loop line=', line)
for line in final:
idx = final.index(line)
if ('BRANCH' in line):
line = line.split()
(value1, value2) = (int(line[1]), int(line[2]))
addendum1 = 0
addendum2 = 0
for mark in numbering_stuff:
if (value1 > mark[0]):
addendum1 += mark[1]
if (value2 > mark[0]):
addendum2 += mark[1]
value1 += addendum1
value2 += addendum2
final[idx] = ((((line[0] + ' ') + str(value1)) + ' ') + str(value2)) |
def test_current_case(qtbot, notifier, storage):
ensemble = storage.create_experiment().create_ensemble(name='default', ensemble_size=1)
widget = CaseSelector(notifier)
qtbot.addWidget(widget)
assert (widget.count() == 0)
notifier.set_storage(storage)
notifier.set_current_case(ensemble)
assert (widget.count() == 1)
assert (widget.currentData() == ensemble)
widget = CaseSelector(notifier)
qtbot.addWidget(widget)
assert (widget.count() == 1)
assert (widget.currentData() == ensemble) |
class OptionPlotoptionsBarSonificationTracksPointgrouping(Options):
def algorithm(self):
return self._config_get('minmax')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
class Encoder(hk.Module):
def __init__(self, num_layers=4, num_filters=32, activation=jax.nn.relu):
super().__init__()
self.num_layers = num_layers
self.num_filters = num_filters
self._activation = activation
def __call__(self, x):
x = (x.astype(jnp.float32) / 255.0)
x = hk.Conv2D(self.num_filters, kernel_shape=3, stride=2, padding='VALID', w_init=orthogonal_init)(x)
x = self._activation(x)
for _ in range((self.num_layers - 1)):
x = hk.Conv2D(self.num_filters, kernel_shape=3, stride=1, padding='VALID', w_init=orthogonal_init)(x)
x = self._activation(x)
return hk.Flatten()(x) |
def valid_port_range(user_input: str) -> range:
if ('-' not in user_input):
raise ArgumentTypeError("Port range must contain two integers separated by '-'")
(a, b) = user_input.split('-')
(port_a, port_b) = (convert_port(a), convert_port(b))
if (port_b < port_a):
raise ArgumentTypeError(f'Invalid port range [{a},{b}], {b} is < {a}')
return range(port_a, (port_b + 1)) |
class SpacesRecordingWrapper(Wrapper[MazeEnv]):
def __init__(self, env: Union[(gym.Env, MazeEnv)], output_dir: str='space_records'):
super().__init__(env)
self.episode_record: Optional[SpacesTrajectoryRecord] = None
self.last_observation = Optional[ObservationType]
self.last_env_time: Optional[int] = None
self.output_dir = Path(output_dir)
(BaseEnv)
def reset(self) -> Any:
self.write_episode_record()
self.last_observation = self.env.reset()
self.last_env_time = None
self.episode_record = SpacesTrajectoryRecord(id=self.env.get_episode_id())
return self.last_observation
(BaseEnv)
def step(self, action: ActionType) -> Tuple[(ObservationType, Any, bool, Dict[(Any, Any)])]:
assert (self.episode_record is not None), 'Environment must be reset before stepping.'
if (self.env.get_env_time() != self.last_env_time):
self.episode_record.step_records.append(StructuredSpacesRecord())
self.last_env_time = self.env.get_env_time()
actor_id = self.env.actor_id()
(observation, reward, done, info) = self.env.step(action)
self.episode_record.step_records[(- 1)].append(SpacesRecord(actor_id=actor_id, observation=self.last_observation, action=action, reward=reward, done=done, info=info))
self.last_observation = observation
return (observation, reward, done, info)
def write_episode_record(self) -> None:
if (self.episode_record and (len(self.episode_record.step_records) > 0)):
output_path = (self.output_dir / f'{self.episode_record.id}.pkl')
self.output_dir.mkdir(parents=True, exist_ok=True)
with open(output_path, 'wb') as out_f:
pickle.dump(self.episode_record, out_f)
(Wrapper)
def get_observation_and_action_dicts(self, maze_state: Optional[MazeStateType], maze_action: Optional[MazeActionType], first_step_in_episode: bool) -> Tuple[(Optional[Dict[(Union[(int, str)], Any)]], Optional[Dict[(Union[(int, str)], Any)]])]:
return self.env.get_observation_and_action_dicts(maze_state, maze_action, first_step_in_episode) |
class SpanCoTExample(FewshotExample, abc.ABC):
text: str
spans: List[SpanReason]
def _extract_span_reasons(spans: Iterable[Span]) -> List[SpanReason]:
span_reasons: List[SpanReason] = []
for span in spans:
span_reasons.append(SpanReason(text=span.text, is_entity=True, label=span.label_, reason=f'is a {span.label_}'))
return span_reasons |
class OptionPlotoptionsPolygonStatesSelectHalo(Options):
def attributes(self):
return self._config_get(None)
def attributes(self, value: Any):
self._config(value, js_type=False)
def opacity(self):
return self._config_get(0.25)
def opacity(self, num: float):
self._config(num, js_type=False)
def size(self):
return self._config_get(10)
def size(self, num: float):
self._config(num, js_type=False) |
class TupleMeta(Meta):
def __getitem__(self, types):
if (not isinstance(types, tuple)):
types = (types,)
trans_types = []
for type_in in types:
if isinstance(type_in, str):
type_in(str2type(type_in))
trans_types.append(type_in)
return type('TupleBis', (Tuple,), {'types': trans_types})
def get_template_parameters(self):
template_params = []
for type_ in self.types:
if hasattr(type_, 'get_template_parameters'):
template_params.extend(type_.get_template_parameters())
return tuple(template_params)
def __repr__(self):
if (not hasattr(self, 'types')):
return super().__repr__()
strings = []
for type_ in self.types:
if isinstance(type_, Meta):
name = repr(type_)
elif isinstance(type_, type):
name = type_.__name__
else:
name = repr(type_)
strings.append(name)
return f"Tuple[{', '.join(strings)}]"
def format_as_backend_type(self, backend_type_formatter, **kwargs):
return backend_type_formatter.make_tuple_code(self.types, **kwargs) |
class bsn_debug_counter_desc_stats_request(bsn_stats_request):
version = 6
type = 18
stats_type = 65535
experimenter = 6035143
subtype = 13
def __init__(self, xid=None, flags=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (flags != None):
self.flags = flags
else:
self.flags = 0
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(struct.pack('!H', self.stats_type))
packed.append(struct.pack('!H', self.flags))
packed.append(('\x00' * 4))
packed.append(struct.pack('!L', self.experimenter))
packed.append(struct.pack('!L', self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = bsn_debug_counter_desc_stats_request()
_version = reader.read('!B')[0]
assert (_version == 6)
_type = reader.read('!B')[0]
assert (_type == 18)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
_stats_type = reader.read('!H')[0]
assert (_stats_type == 65535)
obj.flags = reader.read('!H')[0]
reader.skip(4)
_experimenter = reader.read('!L')[0]
assert (_experimenter == 6035143)
_subtype = reader.read('!L')[0]
assert (_subtype == 13)
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.flags != other.flags):
return False
return True
def pretty_print(self, q):
q.text('bsn_debug_counter_desc_stats_request {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('flags = ')
value_name_map = {1: 'OFPSF_REQ_MORE'}
q.text(util.pretty_flags(self.flags, value_name_map.values()))
q.breakable()
q.text('}') |
class RelationshipMemberWafRuleRevision(ModelNormal):
allowed_values = {}
validations = {}
_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type)
_nullable = False
_property
def openapi_types():
lazy_import()
return {'type': (TypeWafRuleRevision,), 'id': (str,)}
_property
def discriminator():
return None
attribute_map = {'type': 'type', 'id': 'id'}
read_only_vars = {'id'}
_composed_schemas = {}
_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
return self
required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes'])
_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.') |
class TestClass():
def test_declaration(self, class_book: Struct, record_id: Union):
assert (class_book.declaration() == 'class ClassBook {\n\tchar * title;\n\tint num_pages;\n\tchar * author;\n}')
class_book.add_member((m := ComplexTypeMember(size=64, name='id', offset=12, type=record_id)))
result = f'''class ClassBook {{
char * title;
int num_pages;
char * author;
{m.declaration()};
}}'''
assert (class_book.declaration() == result)
def test_str(self, class_book: Struct):
assert (str(class_book) == 'ClassBook')
def test_copy(self, class_book: Struct):
new_class_book: Struct = class_book.copy()
assert (id(new_class_book) != id(class_book))
assert (new_class_book.size == class_book.size)
assert (new_class_book.type_specifier == class_book.type_specifier == ComplexTypeSpecifier.CLASS)
assert (id(new_class_book.members) != id(class_book.members))
assert (new_class_book.get_member_by_offset(0) == class_book.get_member_by_offset(0))
assert (id(new_class_book.get_member_by_offset(0)) != id(class_book.get_member_by_offset(0)))
assert (len(new_class_book.members) == len(class_book.members))
def test_add_members(self, class_book, title, num_pages, author):
empty_class_book = Class(name='ClassBook', members={}, size=96)
empty_class_book.add_member(title)
empty_class_book.add_member(author)
empty_class_book.add_member(num_pages)
assert (empty_class_book == class_book)
def test_get_member_by_offset(self, class_book, title, num_pages, author):
assert (class_book.get_member_by_offset(0) == title)
assert (class_book.get_member_by_offset(4) == num_pages)
assert (class_book.get_member_by_offset(8) == author)
def test_get_member_name_by_offset(self, class_book, title, num_pages, author):
assert (class_book.get_member_name_by_offset(0) == title.name)
assert (class_book.get_member_name_by_offset(4) == num_pages.name)
assert (class_book.get_member_name_by_offset(8) == author.name)
assert (class_book.get_member_name_by_offset(256) == 'field_0x100')
assert (class_book.get_member_name_by_offset((- 256)) == 'field_minus_0x100')
def test_get_complex_type_name(self, class_book):
assert (class_book.complex_type_name == ComplexTypeName(0, 'ClassBook'))
def test_class_not_struct(self, class_book, book):
assert (book != class_book) |
class OptionSeriesSunburstStatesInactive(Options):
def animation(self) -> 'OptionSeriesSunburstStatesInactiveAnimation':
return self._config_sub_data('animation', OptionSeriesSunburstStatesInactiveAnimation)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def opacity(self):
return self._config_get(0.2)
def opacity(self, num: float):
self._config(num, js_type=False) |
class OptionSeriesTimelineDatalabels(Options):
def align(self):
return self._config_get('center')
def align(self, text: str):
self._config(text, js_type=False)
def allowOverlap(self):
return self._config_get(True)
def allowOverlap(self, flag: bool):
self._config(flag, js_type=False)
def alternate(self):
return self._config_get(True)
def alternate(self, flag: bool):
self._config(flag, js_type=False)
def animation(self) -> 'OptionSeriesTimelineDatalabelsAnimation':
return self._config_sub_data('animation', OptionSeriesTimelineDatalabelsAnimation)
def backgroundColor(self):
return self._config_get('#ffffff')
def backgroundColor(self, text: str):
self._config(text, js_type=False)
def borderColor(self):
return self._config_get('#999999')
def borderColor(self, text: str):
self._config(text, js_type=False)
def borderRadius(self):
return self._config_get(3)
def borderRadius(self, num: float):
self._config(num, js_type=False)
def borderWidth(self):
return self._config_get(1)
def borderWidth(self, num: float):
self._config(num, js_type=False)
def className(self):
return self._config_get(None)
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get('#333333')
def color(self, text: str):
self._config(text, js_type=False)
def connectorColor(self):
return self._config_get(None)
def connectorColor(self, text: str):
self._config(text, js_type=False)
def connectorWidth(self):
return self._config_get(1)
def connectorWidth(self, num: float):
self._config(num, js_type=False)
def crop(self):
return self._config_get(True)
def crop(self, flag: bool):
self._config(flag, js_type=False)
def defer(self):
return self._config_get(True)
def defer(self, flag: bool):
self._config(flag, js_type=False)
def distance(self):
return self._config_get('undefined')
def distance(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def filter(self) -> 'OptionSeriesTimelineDatalabelsFilter':
return self._config_sub_data('filter', OptionSeriesTimelineDatalabelsFilter)
def format(self):
return self._config_get('point.value')
def format(self, text: str):
self._config(text, js_type=False)
def formatter(self):
return self._config_get(None)
def formatter(self, value: Any):
self._config(value, js_type=False)
def inside(self):
return self._config_get(None)
def inside(self, flag: bool):
self._config(flag, js_type=False)
def nullFormat(self):
return self._config_get(None)
def nullFormat(self, flag: bool):
self._config(flag, js_type=False)
def nullFormatter(self):
return self._config_get(None)
def nullFormatter(self, value: Any):
self._config(value, js_type=False)
def overflow(self):
return self._config_get('justify')
def overflow(self, text: str):
self._config(text, js_type=False)
def padding(self):
return self._config_get(5)
def padding(self, num: float):
self._config(num, js_type=False)
def position(self):
return self._config_get('center')
def position(self, text: str):
self._config(text, js_type=False)
def rotation(self):
return self._config_get(0)
def rotation(self, num: float):
self._config(num, js_type=False)
def shadow(self):
return self._config_get(False)
def shadow(self, flag: bool):
self._config(flag, js_type=False)
def shape(self):
return self._config_get('square')
def shape(self, text: str):
self._config(text, js_type=False)
def style(self) -> 'OptionSeriesTimelineDatalabelsStyle':
return self._config_sub_data('style', OptionSeriesTimelineDatalabelsStyle)
def textPath(self) -> 'OptionSeriesTimelineDatalabelsTextpath':
return self._config_sub_data('textPath', OptionSeriesTimelineDatalabelsTextpath)
def useHTML(self):
return self._config_get(False)
def useHTML(self, flag: bool):
self._config(flag, js_type=False)
def verticalAlign(self):
return self._config_get('bottom')
def verticalAlign(self, text: str):
self._config(text, js_type=False)
def width(self):
return self._config_get(None)
def width(self, num: float):
self._config(num, js_type=False)
def x(self):
return self._config_get(0)
def x(self, num: float):
self._config(num, js_type=False)
def y(self):
return self._config_get(0)
def y(self, num: float):
self._config(num, js_type=False)
def zIndex(self):
return self._config_get(6)
def zIndex(self, num: float):
self._config(num, js_type=False) |
def monthly_download_data(db, monkeypatch):
for js in JOB_STATUS:
baker.make('download.JobStatus', job_status_id=js.id, name=js.name, description=js.desc)
baker.make('references.ToptierAgency', toptier_agency_id=1, toptier_code='001', name='Test_Agency', _fill_optional=True)
baker.make('references.Agency', pk=1, toptier_agency_id=1, _fill_optional=True)
baker.make('references.ToptierAgency', toptier_agency_id=2, toptier_code='002', name='Test_Agency 2', _fill_optional=True)
baker.make('references.Agency', pk=2, toptier_agency_id=2, _fill_optional=True)
i = 1
for fiscal_year in range(2001, 2021):
baker.make('search.AwardSearch', award_id=i, generated_unique_award_id=f'CONT_AWD_{i}_0_0', is_fpds=True, type='B', type_description='Purchase Order', piid=f'piid{i}', awarding_agency_id=1, funding_agency_id=1, latest_transaction_id=i, fiscal_year=fiscal_year)
baker.make('search.TransactionSearch', award_id=i, transaction_id=i, is_fpds=True, transaction_unique_id=i, usaspending_unique_transaction_id='', type='B', type_description='Purchase Order', period_of_performance_start_date=datetime.datetime(fiscal_year, 5, 7), period_of_performance_current_end_date=datetime.datetime(fiscal_year, 5, 7), action_date=datetime.datetime(fiscal_year, 5, 7), federal_action_obligation=100, modification_number='1', transaction_description='a description', last_modified_date=datetime.datetime(fiscal_year, 5, 7), award_certified_date=datetime.datetime(fiscal_year, 5, 7), create_date=datetime.datetime(fiscal_year, 5, 7), update_date=datetime.datetime(fiscal_year, 5, 7), fiscal_year=fiscal_year, awarding_agency_id=1, funding_agency_id=1, original_loan_subsidy_cost=100.0, face_value_loan_guarantee=100.0, funding_amount=100.0, non_federal_funding_amount=100.0, generated_unique_award_id=f'CONT_AWD_{i}_0_0', business_categories=[], detached_award_proc_unique=f'test{i}', piid=f'piid{i}', agency_id=1, awarding_sub_tier_agency_c='001', awarding_subtier_agency_abbreviation='Test_Agency', awarding_agency_code='001', awarding_toptier_agency_abbreviation='Test_Agency', parent_award_id=f'000{i}')
baker.make('search.AwardSearch', award_id=(i + 100), generated_unique_award_id=f'ASST_NON_{i}_0_0', is_fpds=False, type='02', type_description='Block Grant', fain=f'fain{i}', awarding_agency_id=1, funding_agency_id=1, latest_transaction_id=(i + 100), fiscal_year=fiscal_year)
baker.make('search.TransactionSearch', award_id=(i + 100), generated_unique_award_id=f'ASST_NON_{i}_0_0', transaction_id=(i + 100), is_fpds=False, transaction_unique_id=(i + 100), usaspending_unique_transaction_id='', type='02', type_description='Block Grant', period_of_performance_start_date=datetime.datetime(fiscal_year, 5, 7), period_of_performance_current_end_date=datetime.datetime(fiscal_year, 5, 7), action_date=datetime.datetime(fiscal_year, 5, 7), federal_action_obligation=100, modification_number=f'{(i + 100)}', transaction_description='a description', last_modified_date=datetime.datetime(fiscal_year, 5, 7), award_certified_date=datetime.datetime(fiscal_year, 5, 7), create_date=datetime.datetime(fiscal_year, 5, 7), update_date=datetime.datetime(fiscal_year, 5, 7), fiscal_year=fiscal_year, awarding_agency_id=1, funding_agency_id=1, original_loan_subsidy_cost=100.0, face_value_loan_guarantee=100.0, funding_amount=100.0, non_federal_funding_amount=100.0, fain=f'fain{i}', awarding_agency_code='001', awarding_sub_tier_agency_c=1, awarding_toptier_agency_abbreviation='Test_Agency', awarding_subtier_agency_abbreviation='Test_Agency')
i += 1
monkeypatch.setattr('usaspending_api.settings.MONTHLY_DOWNLOAD_S3_BUCKET_NAME', 'whatever') |
def get_recognizer_image_generator(labels, height, width, alphabet, augmenter=None, shuffle=True):
n_with_illegal_characters = sum((any(((c not in alphabet) for c in text)) for (_, _, text) in labels))
if (n_with_illegal_characters > 0):
print(f'{n_with_illegal_characters} / {len(labels)} instances have illegal characters.')
labels = labels.copy()
for index in itertools.cycle(range(len(labels))):
if ((index == 0) and shuffle):
random.shuffle(labels)
(filepath, box, text) = labels[index]
cval = typing.cast(int, np.random.randint(low=0, high=255, size=3).astype('uint8'))
if (box is not None):
image = tools.warpBox(image=tools.read(filepath), box=box.astype('float32'), target_height=height, target_width=width, cval=cval)
else:
image = tools.read_and_fit(filepath_or_array=filepath, width=width, height=height, cval=cval)
text = ''.join([c for c in text if (c in alphabet)])
if (not text):
continue
if augmenter:
image = augmenter.augment_image(image)
(yield (image, text)) |
def aggregate(aggregate_type, factory_a, factory_b):
if (aggregate_type == 'empty'):
return providers.Aggregate()
elif (aggregate_type == 'non-string-keys'):
return providers.Aggregate({ExampleA: factory_a, ExampleB: factory_b})
elif (aggregate_type == 'default'):
return providers.Aggregate(example_a=factory_a, example_b=factory_b)
else:
raise ValueError('Unknown factory type "{0}"'.format(aggregate_type)) |
class SCULPT_OT_push_undo(bpy.types.Operator):
bl_idname = 'sculpt.push_undo'
bl_label = 'Push Undo'
bl_description = 'Pushes an undo step in Sculpt mode'
bl_options = {'INTERNAL', 'UNDO'}
def poll(cls, context):
sculpt_object = context.sculpt_object
return (sculpt_object and (sculpt_object.type == 'MESH') and (sculpt_object.mode == 'SCULPT') and (len(sculpt_object.data.vertices) > 0) and (context.space_data.type == 'VIEW_3D'))
def execute(self, context: bpy.types.Context):
tool_settings = context.tool_settings
tool_name = context.workspace.tools.from_space_view3d_mode(context.mode).idname
bpy.ops.wm.tool_set_by_id(name='builtin_brush.Draw')
use_unified_size = tool_settings.unified_paint_settings.use_unified_size
tool_settings.unified_paint_settings.use_unified_size = False
brush_size = tool_settings.sculpt.brush.size
tool_settings.sculpt.brush.size =
stroke = [{'name': 'Null Stroke', 'location': (0, 0, 0), 'mouse': (0, 0), 'mouse_event': (0, 0), 'pressure': 0, 'size': 0, 'pen_flip': False, 'time': 0, 'is_start': True, 'x_tilt': 0, 'y_tilt': 0}]
bpy.ops.sculpt.brush_stroke(stroke=stroke)
bpy.ops.wm.tool_set_by_id(name=tool_name)
tool_settings.unified_paint_settings.use_unified_size = use_unified_size
tool_settings.sculpt.brush.size = brush_size
return {'FINISHED'} |
def test_ngram_sentence_suggester():
nlp = spacy.blank('en')
text = 'The first sentence. The second sentence. And the third sentence.'
sents = [True, False, False, False, True, False, False, False, True, False, False, False, False]
tokenized = nlp(text)
spaces = [bool(t.whitespace_) for t in tokenized]
doc = Doc(tokenized.vocab, words=[t.text for t in tokenized], spaces=spaces, sent_starts=sents)
suggester = registry.misc.get('spacy-experimental.ngram_sentence_suggester.v1')([1])
candidates = suggester([doc])
assert (len(candidates.data) == 16) |
class BlockProcessor():
def __init__(self, parser):
self.parser = parser
self.tab_length = parser.md.tab_length
def lastChild(self, parent):
if len(parent):
return parent[(- 1)]
else:
return None
def detab(self, text):
newtext = []
lines = text.split('\n')
for line in lines:
if line.startswith((' ' * self.tab_length)):
newtext.append(line[self.tab_length:])
elif (not line.strip()):
newtext.append('')
else:
break
return ('\n'.join(newtext), '\n'.join(lines[len(newtext):]))
def looseDetab(self, text, level=1):
lines = text.split('\n')
for i in range(len(lines)):
if lines[i].startswith(((' ' * self.tab_length) * level)):
lines[i] = lines[i][(self.tab_length * level):]
return '\n'.join(lines)
def test(self, parent, block):
pass
def run(self, parent, blocks):
pass |
def extractAjrgWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def arith(res_type, op, args):
target = get_db().target
arg_codes = list(args)
if (res_type == T.string):
assert (op == '+')
op = '||'
if (target is mysql):
return FuncCall(res_type, 'concat', arg_codes)
elif (op == '/'):
if (target != mysql):
arg_codes[0] = Cast(T.float, arg_codes[0])
elif (op == '/~'):
if (target in (mysql, oracle)):
op = 'DIV'
elif (target == bigquery):
return FuncCall(res_type, 'div', arg_codes)
elif (target == snowflake):
res = BinOp(res_type, '/', arg_codes)
return FuncCall(T.int, 'floor', [res])
else:
op = '/'
elif (op == '%'):
if (target is bigquery):
return FuncCall(res_type, 'mod', arg_codes)
elif (op == '**'):
return FuncCall(T.float, 'power', arg_codes)
return BinOp(res_type, op, arg_codes) |
def validate_provider_ids(provider_ids, required=False):
if (not provider_ids):
if required:
raise ValueError('Invalid provider IDs. Provider ids should be provided')
return []
for provider_id in provider_ids:
validate_provider_id(provider_id, True)
return provider_ids |
class Test_utils(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_hex_array_string(self):
expected_result = '0x01 0x02 0x03 0x04'
data = b'\x01\x02\x03\x04'
eq_(expected_result, utils.hex_array(data))
def test_hex_array_bytearray(self):
expected_result = '0x01 0x02 0x03 0x04'
data = bytearray(b'\x01\x02\x03\x04')
eq_(expected_result, utils.hex_array(data))
def test_hex_array_bytes(self):
if six.PY2:
return
expected_result = '0x01 0x02 0x03 0x04'
data = bytes(b'\x01\x02\x03\x04')
eq_(expected_result, utils.hex_array(data))
def test_binary_str_string(self):
expected_result = '\\x01\\x02\\x03\\x04'
data = b'\x01\x02\x03\x04'
eq_(expected_result, utils.binary_str(data))
def test_binary_str_bytearray(self):
expected_result = '\\x01\\x02\\x03\\x04'
data = bytearray(b'\x01\x02\x03\x04')
eq_(expected_result, utils.binary_str(data))
def test_binary_str_bytes(self):
if six.PY2:
return
expected_result = '\\x01\\x02\\x03\\x04'
data = bytes(b'\x01\x02\x03\x04')
eq_(expected_result, utils.binary_str(data)) |
(os.environ, {'FIDES__CONFIG_PATH': 'tests/ctl/test_default_config.toml'}, clear=True)
.unit
def test_get_config_default() -> None:
config = get_config()
assert (config.database.api_engine_pool_size == 50)
assert (config.security.env == 'dev')
assert (config.security.app_encryption_key == '')
assert (config.logging.level == 'INFO') |
.usefixtures('use_tmpdir')
def test_that_quotations_in_forward_model_arglist_are_handled_correctly():
'This is a regression test, making sure that quoted strings behave consistently.\n They should all result in the same.\n See
test_config_file_name = 'test.ert'
test_config_contents = dedent('\n NUM_REALIZATIONS 1\n FORWARD_MODEL COPY_FILE(<FROM>=\'some, thing\', <TO>="some stuff", <FILE>=file.txt)\n FORWARD_MODEL COPY_FILE(<FROM>=\'some, thing\', <TO>=\'some stuff\', <FILE>=file.txt)\n FORWARD_MODEL COPY_FILE(<FROM>="some, thing", <TO>="some stuff", <FILE>=file.txt)\n ')
with open(test_config_file_name, 'w', encoding='utf-8') as fh:
fh.write(test_config_contents)
res_config = ErtConfig.from_file(test_config_file_name)
assert (res_config.forward_model_list[0].private_args['<FROM>'] == 'some, thing')
assert (res_config.forward_model_list[0].private_args['<TO>'] == 'some stuff')
assert (res_config.forward_model_list[0].private_args['<FILE>'] == 'file.txt')
assert (res_config.forward_model_list[1].private_args['<FROM>'] == 'some, thing')
assert (res_config.forward_model_list[1].private_args['<TO>'] == 'some stuff')
assert (res_config.forward_model_list[1].private_args['<FILE>'] == 'file.txt')
assert (res_config.forward_model_list[2].private_args['<FROM>'] == 'some, thing')
assert (res_config.forward_model_list[2].private_args['<TO>'] == 'some stuff')
assert (res_config.forward_model_list[2].private_args['<FILE>'] == 'file.txt') |
def test_reaction_as_decorator_of_other_cls():
class C1(event.Component):
foo = event.AnyProp(settable=True)
c1 = C1()
class C2(event.Component):
.reaction('foo')
def on_foo(self, *events):
print('x')
self.xx = events[(- 1)].new_value
c2 = C2()
loop.iter()
c1.set_foo(3)
loop.iter()
assert (c2.xx == 3) |
def _extract_images(response):
page_has_images = response.xpath('//img')
if page_has_images:
img_df = pd.DataFrame([x.attrib for x in response.xpath('//img')])
if ('src' in img_df):
img_df['src'] = [response.urljoin(url) for url in img_df['src']]
img_df = img_df.apply((lambda col: col.fillna('').str.cat(sep=''))).to_frame().T
img_df = img_df[img_df.columns.intersection(_IMG_ATTRS)]
img_df = img_df.add_prefix('img_')
d = img_df.to_dict('records')[0]
return d
return {} |
def test_llm_prompt_generations_response():
testutil.add_response('login_response_200')
testutil.add_response('api_version_response_v58_200')
testutil.add_response('einstein_llm_prompt_generations_200')
client = testutil.get_client()
prompt_request_body = {'promptTextorId': "I'm writing code in the open source SalesforcePy Python client to test your API. Can you make a topical joke that I could assert against?", 'provider': 'OpenAI', 'additionalConfig': {'maxTokens': 512}}
llm_dad_joke = 'Sure, here's a joke: Why did the Salesforce developer go broke? Because he used up all his API calls!'
generated = client.einstein.llm.prompt.generations(prompt_request_body)
assert (generated[0]['generations'][0]['text'] == llm_dad_joke) |
class DummyStageFlowStatus(Enum):
STAGE_1_INITIALIZED = auto()
STAGE_1_STARTED = auto()
STAGE_1_COMPLETED = auto()
STAGE_1_FAILED = auto()
STAGE_2_INITIALIZED = auto()
STAGE_2_STARTED = auto()
STAGE_2_COMPLETED = auto()
STAGE_2_FAILED = auto()
STAGE_3_INITIALIZED = auto()
STAGE_3_STARTED = auto()
STAGE_3_COMPLETED = auto()
STAGE_3_FAILED = auto() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.