code stringlengths 281 23.7M |
|---|
class TestCLIRollover(CuratorTestCase):
def test_max_age_true(self):
value = '1s'
expected = {NEWINDEX: {'aliases': {ALIAS: {}}}}
self.client.indices.create(index=OLDINDEX, aliases={ALIAS: {}})
time.sleep(1)
args = self.get_runner_args()
args += ['--config', self.args['configfile'], 'rollover', '--name', ALIAS, '--max_age', value]
assert (0 == self.run_subprocess(args, logname='TestCLIRollover.test_max_age_true'))
assert (expected == self.client.indices.get_alias(name=ALIAS)) |
class TPWR702N():
MD5SIZE = 16
IMG0_OFFSET = 20
IMG0_HEADER_SIZE = 12
BOOTLOADER_OFFSET = 26820
OS_OFFSET = 262420
def __init__(self, filename):
self.img0 = None
self.md5_checksum = None
self.firmware_filepath = filename
self.firmware = open(filename, 'rb')
self._read_container_information()
self.firmware.close()
self.carver = Carver(self.firmware_filepath)
def __str__(self):
return 'MD5: {} \n Included Header:\n{}'.format(self.get_md5string(), str(self.img0))
def get_remaining_blocks(self):
non_carved_areas = self.carver.carved.non_carved_areas
remaining = {}
for area in non_carved_areas:
remaining[area[0]] = self.carver.extract_data(area[0], area[1])
return remaining
def get_container_header(self):
return self.carver.extract_data(0, 19)
def get_md5string(self):
return binascii.hexlify(self.md5_checksum).decode('ascii')
def get_meta_dict(self):
meta_data = {}
meta_data['bootloader_offset'] = self.BOOTLOADER_OFFSET
meta_data['os_offset'] = self.OS_OFFSET
meta_data['md5'] = self.get_md5string()
meta_data['img0'] = self.img0.get_meta_dict()
meta_data['uncarved_area'] = self.carver.carved.non_carved_areas
return meta_data
def _read_container_information(self):
header = unpack('>4s16s', self.firmware.read((4 + self.MD5SIZE)))
self.container_format = header[0]
self.md5_checksum = header[1]
self._read_img0()
def _read_img0(self):
self.img0 = TPIMG0(self.firmware_filepath, self.IMG0_OFFSET)
def get_tpimg0_header(self):
return self.carver.extract_data(self.IMG0_OFFSET, (self.IMG0_OFFSET + self.IMG0_HEADER_SIZE))
def get_bootloader(self):
bootloader_size = (self._get_end_of_bootloader() - self.BOOTLOADER_OFFSET)
bootloader = self.carver.extract_data(self.BOOTLOADER_OFFSET, (self.BOOTLOADER_OFFSET + bootloader_size))
self._check_expected_lzma_property(bootloader)
return bootloader
def _get_end_of_bootloader(self):
if (self.img0 is None):
raise Img0MissingException('Main IMG0 is missing')
if (self.img0.sub_header is None):
raise Img0MissingException('Sub IMG0 is missing')
return (self.img0.sub_header.offset - 1)
def _check_expected_lzma_property(data_block):
lzma_first_byte = b'n'
if (data_block[0] is not lzma_first_byte[0]):
raise NotLZMAException
def get_os_and_fs(self):
os_and_fs = self.carver.extract_data(self.OS_OFFSET)
self._check_expected_lzma_property(os_and_fs)
return os_and_fs
def get_os(self):
os_and_fs = self.get_os_and_fs()
end = self._find_fs_magic_string(os_and_fs)
return os_and_fs[:end]
def get_fs(self):
os_and_fs = self.get_os_and_fs()
end = self._find_fs_magic_string(os_and_fs)
return os_and_fs[end:]
def _find_fs_magic_string(os_and_fs):
search_pattern = b'owowowowowowowowowowowowowowowow'
return os_and_fs.find(search_pattern) |
class RDepOneComponent(Digraph.Node):
depends_on = ['RDepDependsOn', 'RDepSolvedBy']
def __init__(self, config):
Digraph.Node.__init__(self, 'RDepOneComponent')
self.config = config
def get_type_set():
return set([InputModuleTypes.reqdeps])
def rewrite(reqset):
tracer.debug('Called.')
components = connected_components(reqset)
if (components.get_length() == 1):
tracer.debug('Finished.')
return True
raise RMTException(69, ('Requirements graph has two or more connected components. Please fix the edges between the nodes.Found components: %s' % components.as_string())) |
class OptionPlotoptionsWordcloudSonificationTracksMappingFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def handle_dependency_not_found(e: DependencyNotFound) -> None:
sorted_expected = list(map(str, sorted(e.expected_dependencies)))
sorted_missing = list(map(str, sorted(e.missing_dependencies)))
print(('=' * 50))
print(f'Package {e.configuration_file}:')
print(f'Expected: {pprint.pformat(sorted_expected)}')
print(f'Missing: {pprint.pformat(sorted_missing)}')
print(('=' * 50)) |
class OptionSeriesScatter3dSonificationContexttracksMappingTremoloSpeed(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def hexdump(src, length=32, indent=0):
trans_table = ''.join([(((len(repr(chr(x))) == 3) and chr(x)) or '.') for x in range(256)])
lines = []
for c in range(0, len(src), length):
chars = src[c:(c + length)]
hexed = ' '.join([('%02x' % ord(x)) for x in chars])
printable = ''.join([('%s' % (((ord(x) <= 127) and trans_table[ord(x)]) or '.')) for x in chars])
lines.append(('%s%04x %-*s %s\n' % ((indent * ' '), c, (length * 3), hexed, printable)))
return ''.join(lines) |
class AppSettings():
def __init__(self, prefix):
self.prefix = prefix
def import_from_str(self, name):
from importlib import import_module
(path, prop) = name.rsplit('.', 1)
mod = import_module(path)
return getattr(mod, prop)
def _setting(self, name, dflt):
from django.conf import settings
return getattr(settings, (self.prefix + name), dflt)
_property
def REDIS_URI(self) -> t.Optional[str]:
return self._setting('REDIS_URI', None)
_property
def API_PERMISSIONS_READ(self):
perms = self._setting('API_PERMISSIONS_READ', tuple())
ret = []
for perm in perms:
ret.append(self.import_from_str(perm))
return ret
_property
def API_PERMISSIONS_WRITE(self):
perms = self._setting('API_PERMISSIONS_WRITE', tuple())
ret = []
for perm in perms:
ret.append(self.import_from_str(perm))
return ret
_property
def GET_USER_QUERYSET_FUNC(self):
get_user_queryset = self._setting('GET_USER_QUERYSET_FUNC', None)
if (get_user_queryset is not None):
return self.import_from_str(get_user_queryset)
return None
_property
def CREATE_USER_FUNC(self):
func = self._setting('CREATE_USER_FUNC', None)
if (func is not None):
return self.import_from_str(func)
return None
_property
def DASHBOARD_URL_FUNC(self):
func = self._setting('DASHBOARD_URL_FUNC', None)
if (func is not None):
return self.import_from_str(func)
return None
_property
def CHUNK_PATH_FUNC(self):
func = self._setting('CHUNK_PATH_FUNC', None)
if (func is not None):
return self.import_from_str(func)
return None
_property
def CHALLENGE_VALID_SECONDS(self):
return self._setting('CHALLENGE_VALID_SECONDS', 60) |
class WhileLoopNodeSerializer(LoopNodeSerializer):
def serialize(self, node: WhileLoopNode) -> Dict:
return super().serialize(node)
def deserialize(self, data: dict) -> WhileLoopNode:
return WhileLoopNode(condition=LogicCondition.deserialize(data['condition'], self._group.new_context), reaching_condition=LogicCondition.deserialize(data['rc'], self._group.new_context)) |
def get_test_array(shape, dtype, strides=None, offset=0, no_zeros=False, high=None):
shape = wrap_in_tuple(shape)
dtype = dtypes.normalize_type(dtype)
if (offset != 0):
raise NotImplementedError()
if (dtype.names is not None):
result = numpy.empty(shape, dtype)
for name in dtype.names:
result[name] = get_test_array(shape, dtype[name], no_zeros=no_zeros, high=high)
else:
if dtypes.is_integer(dtype):
low = (1 if no_zeros else 0)
if (high is None):
high = 100
get_arr = (lambda : numpy.random.randint(low, high, shape).astype(dtype))
else:
low = (0.01 if no_zeros else 0)
if (high is None):
high = 1.0
get_arr = (lambda : numpy.random.uniform(low, high, shape).astype(dtype))
if dtypes.is_complex(dtype):
result = (get_arr() + (1j * get_arr()))
else:
result = get_arr()
if (strides is not None):
result = as_strided(result, result.shape, strides)
return result |
def react_speed_in_loop(benchmark: BenchmarkControl, agents_num: int=1, skills_num: int=1, inbox_num: int=5000, agent_loop_timeout: float=0.01) -> None:
wrappers = []
envelope = AEATestWrapper.dummy_envelope()
for i in range(agents_num):
aea_test_wrapper = AEATestWrapper(**_make_custom_config(f'agent{i}', skills_num))
aea_test_wrapper.set_loop_timeout(agent_loop_timeout)
aea_test_wrapper.set_fake_connection(inbox_num, envelope)
wrappers.append(aea_test_wrapper)
benchmark.start()
for aea_test_wrapper in wrappers:
aea_test_wrapper.start_loop()
try:
while sum((i.is_messages_in_fake_connection() for i in wrappers)):
time.sleep(0.01)
while sum(((not i.is_inbox_empty()) for i in wrappers)):
time.sleep(0.01)
finally:
for aea_test_wrapper in wrappers:
aea_test_wrapper.stop_loop() |
class BC_Base():
def __init__(self, shape=None, name=None, b_or=None, b_i=0, nd=None):
self.Shape = shape
self.name = name
self.BC_type = 'None'
if (shape is not None):
self.nd = self.Shape.Domain.nd
elif (nd is not None):
self.nd = nd
else:
assert (nd is not None), 'Shape or nd must be passed to BC'
if (b_or is not None):
self._b_or = b_or[b_i]
else:
self._b_or = None
def getContext(self, context=None):
if context:
from proteus import Context
self.ct = Context.get()
else:
self.ct = context |
def run(args):
opts_parser = argparse.ArgumentParser(description='A shell completion utility for nubia programs')
subparsers = opts_parser.add_subparsers(help='sub-command help', dest='mode')
opts_parser.add_argument('--loglevel', type=str, default='INFO', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'], help='logging level')
generate_parser = subparsers.add_parser('generate-shell-setup', help='Generates a bash/zsh setup script that you can source')
complete_parser = subparsers.add_parser('complete', help='Triggers completions')
generate_parser.add_argument('--target-binary-name', type=str, required=True, help='The name of the nubia program we want to generate a completer for')
generate_parser.add_argument('--command-model-path', type=str, required=True, help='The location on which to find the command model')
complete_parser.add_argument('--command-model-path', type=str, required=True, help='The location on which to find the command model')
args = opts_parser.parse_args()
log_level = logging.getLevelName(args.loglevel)
logging.basicConfig(level=log_level)
if (args.mode == 'generate-shell-setup'):
return generate_shell_setup(args.target_binary_name, args.command_model_path)
elif (args.mode == 'complete'):
return run_complete(args)
else:
print('Not Implemented!')
return 2 |
class ArrayWidget(_DefaultAttrsWidget, widgets.TextInput):
tag_choices = None
def __init__(self, *args, **kwargs):
self.tags = kwargs.pop('tags', False)
self.escape_space = kwargs.pop('escape_space', True)
self.escape_comma = kwargs.pop('escape_comma', True)
super(ArrayWidget, self).__init__(*args, **kwargs)
def build_attrs(self, *args, **kwargs):
if self.tag_choices:
tags = json.dumps(self.tag_choices, indent=None, cls=JSONEncoder)
kwargs['data-tags-choices'] = mark_safe(conditional_escape(tags))
return super(ArrayWidget, self).build_attrs(*args, **kwargs)
def render(self, name, value, attrs=None):
if ((value is not None) and (not isinstance(value, six.string_types))):
value = edit_string_for_items(value, escape_space=self.escape_space, escape_comma=self.escape_comma, sort=self.tags)
return super(ArrayWidget, self).render(name, value, attrs=attrs) |
class InterComBackEndBinding():
def __init__(self, analysis_service=None, compare_service=None, unpacking_service=None, unpacking_locks=None, testing=False):
self.analysis_service = analysis_service
self.compare_service = compare_service
self.unpacking_service = unpacking_service
self.unpacking_locks = unpacking_locks
self.poll_delay = config.backend.intercom_poll_delay
self.stop_condition = Value('i', 0)
self.process_list = []
def start(self):
InterComBackEndAnalysisPlugInsPublisher(analysis_service=self.analysis_service)
self._start_listener(InterComBackEndAnalysisTask, self.unpacking_service.add_task)
self._start_listener(InterComBackEndReAnalyzeTask, self.unpacking_service.add_task)
self._start_listener(InterComBackEndCompareTask, self.compare_service.add_task)
self._start_listener(InterComBackEndRawDownloadTask)
self._start_listener(InterComBackEndFileDiffTask)
self._start_listener(InterComBackEndTarRepackTask)
self._start_listener(InterComBackEndBinarySearchTask)
self._start_listener(InterComBackEndUpdateTask, self.analysis_service.update_analysis_of_object_and_children)
self._start_listener(InterComBackEndDeleteFile, unpacking_locks=self.unpacking_locks, db_interface=DbInterfaceCommon())
self._start_listener(InterComBackEndSingleFileTask, self.analysis_service.update_analysis_of_single_object)
self._start_listener(InterComBackEndPeekBinaryTask)
self._start_listener(InterComBackEndLogsTask)
logging.info('Intercom online')
def shutdown(self):
self.stop_condition.value = 1
stop_processes(self.process_list, (config.backend.intercom_poll_delay + 1))
logging.info('Intercom offline')
def _start_listener(self, listener: type[InterComListener], do_after_function: (Callable | None)=None, **kwargs):
process = Process(target=self._backend_worker, args=(listener, do_after_function, kwargs))
process.start()
self.process_list.append(process)
def _backend_worker(self, listener: type[InterComListener], do_after_function: (Callable | None), additional_args):
interface = listener(**additional_args)
logging.debug(f'{listener.__name__} listener started (pid={os.getpid()})')
while (self.stop_condition.value == 0):
task = interface.get_next_task()
if (task is None):
sleep(self.poll_delay)
elif (do_after_function is not None):
do_after_function(task)
logging.debug(f'{listener.__name__} listener stopped') |
class OptionTitle(Options):
def align(self):
return self._config_get('center')
def align(self, text: str):
self._config(text)
def display(self):
return self._config_get(False)
def display(self, flag: bool):
self._config(flag)
def fullSize(self):
return self._config_get(True)
def fullSize(self, flag: bool):
self._config(flag)
def text(self):
return self._config_get('')
def text(self, val: Union[(str, List[str])]):
self._config(val)
def color(self):
return self._config_get()
def color(self, val: str):
self._config(val)
def position(self):
return self._config_get('top')
def position(self, val: str):
self._config(val)
def fontSize(self):
return self._config_get()
def fontSize(self, val):
self._config(val)
def fontFamily(self):
return self._config_get()
def fontFamily(self, val):
self._config(val)
def fontColor(self):
return self._config_get()
def fontColor(self, val):
self._config(val)
def fontStyle(self):
return self._config_get()
def fontStyle(self, val):
self._config(val)
def padding(self):
return self._config_get(10)
def padding(self, val):
self._config(val)
def lineHeight(self):
return self._config_get()
def lineHeight(self, val):
self._config(val)
def font(self) -> OptionLabelFont:
return self._config_sub_data('font', OptionLabelFont) |
class TestAddProtocolFromRemoteRegistry(AEATestCaseEmptyFlaky):
IS_LOCAL = False
IS_EMPTY = True
.integration
.flaky(reruns=MAX_FLAKY_RERUNS)
def test_add_protocol_from_remote_registry_positive(self):
self.add_item('protocol', str(FipaMessage.protocol_id.to_latest()), local=self.IS_LOCAL)
items_path = os.path.join(self.agent_name, 'vendor', 'fetchai', 'protocols')
items_folders = os.listdir(items_path)
item_name = 'fipa'
assert (item_name in items_folders) |
def main():
parser = argparse.ArgumentParser(description='jinja2 template rendering with shell environment variables')
parser.add_argument('input_file', nargs='?', help='Input filename. Defaults to stdin.')
parser.add_argument('-o', '--output-file', help=('Output filename. If none is given, and the input file ends with "%s", the output filename is the same as the input filename, sans the %s extension. Otherwise, defaults to stdout.' % (EXTENSION, EXTENSION)))
parser.add_argument('--allow-missing', action='store_true', help='Allow missing variables. By default, envtpl will die with exit code 1 if an environment variable is missing')
parser.add_argument('--keep-template', action='store_true', help='Keep original template file. By default, envtpl will delete the template file')
args = parser.parse_args()
variables = dict([(k, _unicodify(v)) for (k, v) in os.environ.items()])
try:
process_file(args.input_file, args.output_file, variables, (not args.allow_missing), (not args.keep_template))
except (Fatal, IOError) as e:
sys.stderr.write(('Error: %s\n' % str(e)))
sys.exit(1)
sys.exit(0) |
def mock_request(data, loop):
payload = StreamReader(' loop=loop, limit=1024)
payload.feed_data(data.encode())
payload.feed_eof()
protocol = mock.Mock()
app = mock.Mock()
headers = CIMultiDict([('CONTENT-TYPE', 'application/json')])
req = make_mocked_request('POST', '/sensor-reading', headers=headers, protocol=protocol, payload=payload, app=app, loop=loop)
return req |
def download_gh_asset(url: str, path: str, overwrite=False):
zipped = requests.get(url)
z = ZipFile(io.BytesIO(zipped.content))
Path(path).mkdir(exist_ok=True)
if overwrite:
shutil.rmtree(path, ignore_errors=True)
z.extractall(path)
click.echo(f'files saved to {path}')
z.close() |
.parametrize('value,expected', (('', False), ('a', False), (1, False), (True, False), ({'a': 1, 'b': 2}, False), (tuple(), False), (list(), False), (('a', 'b'), False), (['a', 'b'], False), (b'', False), (b'arst', False), (None, True), (TOPIC_A, True), (TOPIC_B, True)))
def test_is_topic(value, expected):
actual = is_topic(value)
assert (actual is expected) |
class OptionPlotoptionsGaugeSonificationTracksMappingHighpass(Options):
def frequency(self) -> 'OptionPlotoptionsGaugeSonificationTracksMappingHighpassFrequency':
return self._config_sub_data('frequency', OptionPlotoptionsGaugeSonificationTracksMappingHighpassFrequency)
def resonance(self) -> 'OptionPlotoptionsGaugeSonificationTracksMappingHighpassResonance':
return self._config_sub_data('resonance', OptionPlotoptionsGaugeSonificationTracksMappingHighpassResonance) |
class HttpResponseFormat(ModelNormal):
allowed_values = {}
validations = {}
_property
def additional_properties_type():
return (bool, date, datetime, dict, float, int, list, str, none_type)
_nullable = False
_property
def openapi_types():
return {'code': (int,), 'reason': (str,), 'headers': ({str: (str,)},), 'body': (str,), 'body_bin': (str,)}
_property
def discriminator():
return None
attribute_map = {'code': 'code', 'reason': 'reason', 'headers': 'headers', 'body': 'body', 'body_bin': 'body-bin'}
read_only_vars = {}
_composed_schemas = {}
_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
return self
required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes'])
_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.') |
.slow
.skipif((not GPU_TESTS_ENABLED), reason='requires GPU')
def test_generate_sample(dolly_generator):
prompts = ['What is spaCy?', 'What is spaCy?']
torch.manual_seed(0)
assert (dolly_generator(prompts, config=SampleGeneratorConfig(top_k=10)) == ["SpaCy (short for spaCy Toolkit) is a Python library for Natural Language Processing (NLP) and machine translation based on the spaCy research project. It has been open-source since June 2023 and has been used in production for NLP tasks like POS tag classification (part of OpenStax' TACL), question answering (QACQUEL), and semantic parsing (CoreNLP) as of October 2023.\n\n", 'SpaCy is an open-source package for Python that automates part of the process of building language models (LMs), or parsers for plain text, using a technique called graph-based learning. It supports English, French, German, Dutch, Italian, Spanish, Portuguese, Polish, and Dutch, all of which are official language groups of Europe.\n\n'])
torch.manual_seed(0)
assert (dolly_generator(prompts, config=SampleGeneratorConfig(top_k=5, temperature=2)) == ["SpaCy (short for Spanish Language Model) is a natural language processor (NLP) for English based on the open source spaCy project. It supports sentence detection, part-of-speech tagging and coreference chains, among other things. spaCy was originally developed at the University of Cambridge's Machine Translation Lab. Since then, it has also developed a version optimized for Python that can be installed viapip.\n\n", "The spaCy library is an NLP library based on Stanford's PoSpell architecture, that is designed to make NLP easy. spaCy supports all the standard NLTK pipeline stages and is able to outperform the NLTK on many Named Entity Tagging tasks, as well many others, both at test-level and at the system-wide average. It has a similar architecture to NLTKs, but is designed from the ground up for the needs of a NLP research community rather than a production system: It has a smaller and less mature API, does away with its core tokenizer (which is notoriously hard to train), and is based on the Speller system from Stanford's CS-ADLDN program, which has been shown to significantly outperform the NLTK tokeniser in terms of both accuracy (93.7% on a test set of 20K tokens vs. NLTK's 76.3%) and efficiency.\n\n"]) |
class TGCustomYield():
def __init__(self):
self.main_bot = MegaDLBot
async def generate_file_properties(msg: Message):
error_message = "This message doesn't contain any downloadable media"
available_media = ('audio', 'document', 'photo', 'sticker', 'animation', 'video', 'voice', 'video_note')
if isinstance(msg, Message):
for kind in available_media:
media = getattr(msg, kind, None)
if (media is not None):
break
else:
raise ValueError(error_message)
else:
media = msg
if isinstance(media, str):
file_id_str = media
else:
file_id_str = media.file_id
file_id_obj = FileId.decode(file_id_str)
setattr(file_id_obj, 'file_size', getattr(media, 'file_size', 0))
setattr(file_id_obj, 'mime_type', getattr(media, 'mime_type', ''))
setattr(file_id_obj, 'file_name', getattr(media, 'file_name', ''))
return file_id_obj
async def generate_media_session(self, client: Client, msg: Message):
data = (await self.generate_file_properties(msg))
media_session = client.media_sessions.get(data.dc_id, None)
if (media_session is None):
if (data.dc_id != (await client.storage.dc_id())):
media_session = Session(client, data.dc_id, (await Auth(client, data.dc_id, (await client.storage.test_mode())).create()), (await client.storage.test_mode()), is_media=True)
(await media_session.start())
for _ in range(3):
exported_auth = (await client.send(raw.functions.auth.ExportAuthorization(dc_id=data.dc_id)))
try:
(await media_session.send(raw.functions.auth.ImportAuthorization(id=exported_auth.id, bytes=exported_auth.bytes)))
except AuthBytesInvalid:
continue
else:
break
else:
(await media_session.stop())
raise AuthBytesInvalid
else:
media_session = Session(client, data.dc_id, (await client.storage.auth_key()), (await client.storage.test_mode()), is_media=True)
(await media_session.start())
client.media_sessions[data.dc_id] = media_session
return media_session
async def get_location(file_id: FileId):
file_type = file_id.file_type
if (file_type == FileType.CHAT_PHOTO):
if (file_id.chat_id > 0):
peer = raw.types.InputPeerUser(user_id=file_id.chat_id, access_hash=file_id.chat_access_hash)
elif (file_id.chat_access_hash == 0):
peer = raw.types.InputPeerChat(chat_id=(- file_id.chat_id))
else:
peer = raw.types.InputPeerChannel(channel_id=utils.get_channel_id(file_id.chat_id), access_hash=file_id.chat_access_hash)
location = raw.types.InputPeerPhotoFileLocation(peer=peer, volume_id=file_id.volume_id, local_id=file_id.local_id, big=(file_id.thumbnail_source == ThumbnailSource.CHAT_PHOTO_BIG))
elif (file_type == FileType.PHOTO):
location = raw.types.InputPhotoFileLocation(id=file_id.media_id, access_hash=file_id.access_hash, file_reference=file_id.file_reference, thumb_size=file_id.thumbnail_size)
else:
location = raw.types.InputDocumentFileLocation(id=file_id.media_id, access_hash=file_id.access_hash, file_reference=file_id.file_reference, thumb_size=file_id.thumbnail_size)
return location
async def yield_file(self, media_msg: Message, offset: int, first_part_cut: int, last_part_cut: int, part_count: int, chunk_size: int) -> Union[(str, None)]:
client = self.main_bot
data = (await self.generate_file_properties(media_msg))
media_session = (await self.generate_media_session(client, media_msg))
current_part = 1
location = (await self.get_location(data))
r = (await media_session.send(raw.functions.upload.GetFile(location=location, offset=offset, limit=chunk_size)))
if isinstance(r, raw.types.upload.File):
while (current_part <= part_count):
chunk = r.bytes
if (not chunk):
break
offset += chunk_size
if (part_count == 1):
(yield chunk[first_part_cut:last_part_cut])
break
if (current_part == 1):
(yield chunk[first_part_cut:])
if (1 < current_part <= part_count):
(yield chunk)
r = (await media_session.send(raw.functions.upload.GetFile(location=location, offset=offset, limit=chunk_size)))
current_part += 1
async def download_as_bytesio(self, media_msg: Message):
client = self.main_bot
data = (await self.generate_file_properties(media_msg))
media_session = (await self.generate_media_session(client, media_msg))
location = (await self.get_location(data))
limit = (1024 * 1024)
offset = 0
r = (await media_session.send(raw.functions.upload.GetFile(location=location, offset=offset, limit=limit)))
if isinstance(r, raw.types.upload.File):
m_file = []
while True:
chunk = r.bytes
if (not chunk):
break
m_file.append(chunk)
offset += limit
r = (await media_session.send(raw.functions.upload.GetFile(location=location, offset=offset, limit=limit)))
return m_file |
def CreateBmmSoftmaxBmmPermOperator(manifest, operation_kind=library.GemmKind.BatchGemmSoftmaxGemmPermute, xdl_op_type=gemm.XdlOpType.DeviceBatchedGemmSoftmaxGemmPermute_Xdl_CShuffle, causal_mask=None):
a_element_desc = library.TensorDesc(library.DataType.f16, library.LayoutType.RowMajor)
b_element_desc = library.TensorDesc(library.DataType.f16, library.LayoutType.ColumnMajor)
c_element_desc = library.TensorDesc(library.DataType.f16, library.LayoutType.RowMajor)
element_op = library.TensorOperation.PassThrough
tile_descriptions = [gemm.AttnTileDesc(256, 256, 128, 32, 64, 32, 8, 8, 2, 32, 32, 2, 4, 2), gemm.AttnTileDesc(256, 256, 128, 32, 128, 32, 8, 8, 2, 32, 32, 2, 4, 4), gemm.AttnTileDesc(256, 128, 256, 32, 64, 32, 8, 8, 2, 32, 32, 1, 8, 2), gemm.AttnTileDesc(256, 128, 256, 32, 128, 32, 8, 8, 2, 32, 32, 1, 8, 4), gemm.AttnTileDesc(256, 128, 128, 64, 64, 32, 8, 8, 2, 32, 32, 1, 4, 2), gemm.AttnTileDesc(256, 128, 128, 32, 64, 32, 8, 8, 2, 32, 32, 1, 4, 2), gemm.AttnTileDesc(256, 128, 128, 64, 128, 32, 8, 8, 2, 32, 32, 1, 4, 4), gemm.AttnTileDesc(256, 128, 128, 32, 128, 32, 8, 8, 2, 32, 32, 1, 4, 4), gemm.AttnTileDesc(256, 64, 256, 32, 128, 32, 8, 8, 2, 16, 16, 1, 16, 8), gemm.AttnTileDesc(256, 64, 256, 32, 64, 32, 8, 8, 2, 16, 16, 1, 16, 4), gemm.AttnTileDesc(256, 64, 256, 64, 128, 32, 8, 8, 2, 16, 16, 1, 16, 8), gemm.AttnTileDesc(256, 64, 256, 64, 64, 32, 8, 8, 2, 16, 16, 1, 16, 4), gemm.AttnTileDesc(256, 128, 128, 64, 128, 32, 8, 8, 2, 32, 32, 1, 4, 4), gemm.AttnTileDesc(256, 128, 64, 32, 128, 32, 8, 8, 2, 32, 32, 1, 2, 4), gemm.AttnTileDesc(256, 128, 128, 64, 128, 32, 8, 8, 2, 32, 32, 1, 4, 4), gemm.AttnTileDesc(256, 128, 64, 32, 128, 32, 8, 8, 2, 32, 32, 1, 2, 4)]
block_descriptions = [gemm.BlockTransferDesc([4, 64, 1], [1, 0, 2], [1, 0, 2], 2, 8, 8, 1), gemm.BlockTransferDesc([4, 64, 1], [1, 0, 2], [1, 0, 2], 2, 8, 8, 1), gemm.BlockTransferDesc([4, 64, 1], [1, 0, 2], [1, 0, 2], 2, 8, 8, 1), gemm.BlockTransferDesc([4, 64, 1], [1, 0, 2], [1, 0, 2], 2, 8, 8, 1), gemm.BlockTransferDesc([8, 32, 1], [1, 0, 2], [1, 0, 2], 2, 8, 8, 0), gemm.BlockTransferDesc([4, 64, 1], [1, 0, 2], [1, 0, 2], 2, 8, 8, 1), gemm.BlockTransferDesc([8, 32, 1], [1, 0, 2], [1, 0, 2], 2, 8, 8, 0), gemm.BlockTransferDesc([4, 64, 1], [1, 0, 2], [1, 0, 2], 2, 8, 8, 1), gemm.BlockTransferDesc([4, 64, 1], [1, 0, 2], [1, 0, 2], 2, 8, 8, 1), gemm.BlockTransferDesc([4, 64, 1], [1, 0, 2], [1, 0, 2], 2, 8, 8, 1), gemm.BlockTransferDesc([8, 32, 1], [1, 0, 2], [1, 0, 2], 2, 8, 8, 1), gemm.BlockTransferDesc([8, 32, 1], [1, 0, 2], [1, 0, 2], 2, 8, 8, 1), gemm.BlockTransferDesc([8, 32, 1], [1, 0, 2], [1, 0, 2], 2, 8, 8, 0), gemm.BlockTransferDesc([4, 64, 1], [1, 0, 2], [1, 0, 2], 2, 8, 8, 1), gemm.BlockTransferDesc([8, 32, 1], [1, 0, 2], [1, 0, 2], 2, 8, 8, 0), gemm.BlockTransferDesc([4, 64, 1], [1, 0, 2], [1, 0, 2], 2, 8, 8, 1)]
causal_mask_flag = 0
if (causal_mask is not None):
causal_mask_flag = (1 if (library.TensorOperationTag[causal_mask] == 'True') else 0)
(c_block_descriptions, b1_block_descriptions) = ([], [])
for i in range(len(tile_descriptions)):
if (i in [0, 2, 4, 5, 9, 11]):
block_transfer = [16, 16, 1]
else:
block_transfer = [8, 32, 1]
b1_block_descriptions.append(gemm.BlockTransferDesc(block_transfer, [0, 2, 1], [0, 2, 1], 1, 4, 2, 0))
if (i in [8, 10]):
c_block_transfer = gemm.MaskedCBlockTransferDesc(1, 8, [1, 16, 1, 16], 8, causal_mask_flag)
else:
c_shuffle = (4 if (i in [9, 11]) else 2)
c_block_transfer = gemm.MaskedCBlockTransferDesc(1, c_shuffle, [1, 32, 1, 8], 8, causal_mask_flag)
c_block_descriptions.append(c_block_transfer)
gemm_specialization = []
for i in range(len(tile_descriptions)):
if (i < 12):
gemm_specialization.append(gemm.GemmSpecialization.GemmDefault)
elif (i in [12, 13]):
gemm_specialization.append(gemm.GemmSpecialization.MNOPadding)
else:
gemm_specialization.append(gemm.GemmSpecialization.MNKOPadding)
operations = []
extra_op = (element_op if (causal_mask_flag == 0) else causal_mask)
for (tile_desc, block_desc, b1_block_desc, c_block_desc, gemm_spec) in zip(tile_descriptions, block_descriptions, b1_block_descriptions, c_block_descriptions, gemm_specialization):
new_operation = gemm.GemmOperation(operation_kind=operation_kind, extra_kind=extra_op, xdl_op_type=xdl_op_type, A=a_element_desc, B=b_element_desc, C=c_element_desc, a_elem_op=element_op, b_elem_op=element_op, epilogue_functor=element_op, gemm_specialization=gemm_spec, tile_desc=tile_desc, a_block_transfer=block_desc, b_block_transfer=block_desc, b1_block_transfer=b1_block_desc, c_block_transfer=c_block_desc)
manifest.append(new_operation)
operations.append(new_operation)
return operations |
class ErrorTestCase(unittest.TestCase):
feature = {'geometry': {'type': 'Bizarro', 'coordinates': [[(1, 2), (3, 4)], [(7, 8), (9, 10)]]}, 'properties': {}}
def setUp(self):
logging.getLogger('svgis').setLevel(logging.CRITICAL)
def testDrawInvalidGeometry(self):
with self.assertRaises(errors.SvgisError):
draw.geometry(self.feature['geometry'])
def testSvgisDrawInvalidGeometry(self):
a = SVGIS([]).feature(self.feature, [], [])
assert (a == u'') |
(private_key_bytes=private_key_st)
def test_public_key_decompression_is_equal(private_key_bytes, native_key_api, coincurve_key_api):
public_key_template = coincurve_key_api.PrivateKey(private_key_bytes).public_key
compressed_public_key = public_key_template.to_compressed_bytes()
native_public_key = native_key_api.PublicKey.from_compressed_bytes(compressed_public_key)
coincurve_public_key = coincurve_key_api.PublicKey.from_compressed_bytes(compressed_public_key)
assert (native_public_key == coincurve_public_key) |
class Matching(object):
def __init__(self, core: Core):
self.core = core
def start(self, modes: Sequence[str]) -> None:
core = self.core
core.server.write(wire.msg.StartMatching(modes=list(modes)))
def stop(self) -> None:
core = self.core
core.server.write(wire.msg.StartMatching(modes=[]))
def query(self) -> None:
core = self.core
core.server.write(wire.msg.QueryMatching()) |
.parametrize('model', [BinarySensorInfo, ButtonInfo, CoverInfo, FanInfo, LightInfo, NumberInfo, SelectInfo, SensorInfo, SirenInfo, SwitchInfo, TextSensorInfo, CameraInfo, ClimateInfo, LockInfo, MediaPlayerInfo, AlarmControlPanelInfo, TextInfo])
def test_build_unique_id(model):
obj = model(object_id='id')
assert (build_unique_id('mac', obj) == f'mac-{_TYPE_TO_NAME[type(obj)]}-id') |
def install_npm_win(env_dir, src_dir, args):
logger.info((' * Install npm.js (%s) ... ' % args.npm), extra=dict(continued=True))
npm_url = (' % args.npm)
npm_contents = io.BytesIO(urlopen(npm_url).read())
bin_path = join(env_dir, 'Scripts')
node_modules_path = join(bin_path, 'node_modules', 'npm')
if os.path.exists(node_modules_path):
shutil.rmtree(node_modules_path)
if os.path.exists(join(bin_path, 'npm.cmd')):
os.remove(join(bin_path, 'npm.cmd'))
if os.path.exists(join(bin_path, 'npm-cli.js')):
os.remove(join(bin_path, 'npm-cli.js'))
with zipfile.ZipFile(npm_contents, 'r') as zipf:
zipf.extractall(src_dir)
npm_ver = ('cli-%s' % args.npm)
shutil.copytree(join(src_dir, npm_ver), node_modules_path)
shutil.copy(join(src_dir, npm_ver, 'bin', 'npm.cmd'), join(bin_path, 'npm.cmd'))
shutil.copy(join(src_dir, npm_ver, 'bin', 'npm-cli.js'), join(bin_path, 'npm-cli.js'))
if is_CYGWIN:
shutil.copy(join(bin_path, 'npm-cli.js'), join(env_dir, 'bin', 'npm-cli.js'))
shutil.copytree(join(bin_path, 'node_modules'), join(env_dir, 'bin', 'node_modules'))
npm_gh_url = '
npm_bin_url = '{}/{}/bin/npm'.format(npm_gh_url, args.npm)
writefile(join(env_dir, 'bin', 'npm'), urlopen(npm_bin_url).read()) |
class GenericDataloader():
def __init__(self, source_list: List[str], target_list: Union[(List[str], List[None])], tgt_lang_list: Optional[List[str]]=None) -> None:
self.source_list = source_list
self.target_list = target_list
self.tgt_lang_list = tgt_lang_list
assert (len(self.source_list) == len(self.target_list))
def __len__(self):
return len(self.source_list)
def get_source(self, index: int) -> Any:
return self.preprocess_source(self.source_list[index])
def get_target(self, index: int) -> Any:
return self.preprocess_target(self.target_list[index])
def get_tgt_lang(self, index: int) -> Optional[str]:
if ((getattr(self, 'tgt_lang_list', None) is None) or (index >= len(self.tgt_lang_list))):
return None
else:
return self.tgt_lang_list[index]
def __getitem__(self, index: int) -> Dict[(str, Any)]:
return {'source': self.get_source(index), 'target': self.get_target(index), 'tgt_lang': self.get_tgt_lang(index)}
def preprocess_source(self, source: Any) -> Any:
raise NotImplementedError
def preprocess_target(self, target: Any) -> Any:
raise NotImplementedError
def from_args(cls, args: Namespace):
return cls(args.source, args.target)
def add_args(parser: ArgumentParser):
parser.add_argument('--source', type=str, help='Source file.')
parser.add_argument('--target', type=str, help='Target file.')
parser.add_argument('--source-type', type=str, choices=SUPPORTED_SOURCE_MEDIUM, help='Source Data type to evaluate.')
parser.add_argument('--target-type', type=str, choices=SUPPORTED_TARGET_MEDIUM, help='Data type to evaluate.')
parser.add_argument('--source-segment-size', type=int, default=1, help='Source segment size, For text the unit is # token, for speech is ms')
parser.add_argument('--tgt-lang', type=str, default=None, help='Target language') |
def cupy_pytorch_allocator(size_in_bytes: int):
device = get_torch_default_device()
size_in_bytes = max(1024, size_in_bytes)
torch_tensor = torch.zeros(((size_in_bytes // 4),), requires_grad=False, device=device)
address = torch_tensor.data_ptr()
memory = cupy.cuda.memory.UnownedMemory(address, size_in_bytes, torch_tensor)
return cupy.cuda.memory.MemoryPointer(memory, 0) |
def run_server(debug_gc=False):
service = None
if debug_gc:
from pprint import pformat
import gc
gc.enable()
gc.set_debug(gc.DEBUG_LEAK)
gc_timeout = 10
def gc_collect():
gc.collect()
if (len(gc.garbage) > 0):
print('\n\n')
print(('GARBAGE OBJECTS (%d):\n' % len(gc.garbage)))
for x in gc.garbage:
print(type(x), '\n ')
print(pformat(x))
print('\n\n')
GLib.timeout_add_seconds(gc_timeout, gc_collect)
try:
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
name = dbus.service.BusName(config.dbus.DBUS_INTERFACE, bus=bus)
service = FirewallD(name, config.dbus.DBUS_PATH)
mainloop = GLib.MainLoop()
if debug_gc:
GLib.timeout_add_seconds(gc_timeout, gc_collect)
if hasattr(GLib, 'unix_signal_add'):
unix_signal_add = GLib.unix_signal_add
else:
unix_signal_add = GLib.unix_signal_add_full
unix_signal_add(GLib.PRIORITY_HIGH, signal.SIGHUP, sighup, service)
unix_signal_add(GLib.PRIORITY_HIGH, signal.SIGTERM, sigterm, mainloop)
mainloop.run()
except KeyboardInterrupt:
log.debug1('Stopping..')
except SystemExit:
log.error('Raising SystemExit in run_server')
except Exception as e:
log.error('Exception %s: %s', e.__class__.__name__, str(e))
if service:
service.stop() |
def test_ge_with_task():
task_object = GreatExpectationsTask(name='test6', datasource_name='data', inputs=kwtypes(dataset=str), expectation_suite_name='test.demo', data_connector_name='data_example_data_connector')
def my_task(csv_file: str) -> int:
df = pd.read_csv(os.path.join('data', csv_file))
return df.shape[0]
def valid_wf(dataset: str='yellow_tripdata_sample_2019-01.csv') -> int:
task_object(dataset=dataset)
return my_task(csv_file=dataset)
def invalid_wf(dataset: str='yellow_tripdata_sample_2019-02.csv') -> int:
task_object(dataset=dataset)
return my_task(csv_file=dataset)
valid_result = valid_wf()
assert (valid_result == 10000)
with pytest.raises(ValidationError, match='.*passenger_count -> expect_column_min_to_be_between.*'):
invalid_wf() |
class SetActiveAttribute(Filter):
__version__ = 0
input_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any'])
output_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any'])
point_scalars_name = DEnum(values_name='_point_scalars_list', desc='scalar point data attribute to use')
point_vectors_name = DEnum(values_name='_point_vectors_list', desc='vectors point data attribute to use')
point_tensors_name = DEnum(values_name='_point_tensors_list', desc='tensor point data attribute to use')
cell_scalars_name = DEnum(values_name='_cell_scalars_list', desc='scalar cell data attribute to use')
cell_vectors_name = DEnum(values_name='_cell_vectors_list', desc='vectors cell data attribute to use')
cell_tensors_name = DEnum(values_name='_cell_tensors_list', desc='tensor cell data attribute to use')
view = View(Group(Item(name='point_scalars_name'), Item(name='point_vectors_name'), Item(name='point_tensors_name'), Item(name='cell_scalars_name'), Item(name='cell_vectors_name'), Item(name='cell_tensors_name')))
_point_scalars_list = List(Str)
_point_vectors_list = List(Str)
_point_tensors_list = List(Str)
_cell_scalars_list = List(Str)
_cell_vectors_list = List(Str)
_cell_tensors_list = List(Str)
_assign_attribute = Instance(tvtk.AssignAttribute, args=(), allow_none=False)
_first = Bool(True)
def __get_pure_state__(self):
d = super(SetActiveAttribute, self).__get_pure_state__()
for name in ('_assign_attribute', '_first'):
d.pop(name, None)
attr = {}
for name in ('point_scalars', 'point_vectors', 'point_tensors', 'cell_scalars', 'cell_vectors', 'cell_tensors'):
d.pop((('_' + name) + '_list'), None)
d.pop((('_' + name) + '_name'), None)
x = (name + '_name')
attr[x] = getattr(self, x)
d.update(attr)
return d
def update_data(self):
self.data_changed = True
def update_pipeline(self):
if ((len(self.inputs) == 0) or (len(self.inputs[0].outputs) == 0)):
return
aa = self._assign_attribute
self.configure_connection(aa, self.inputs[0])
self._update()
self._set_outputs([aa])
def _update(self):
if ((len(self.inputs) == 0) or (len(self.inputs[0].outputs) == 0)):
return
input = self.inputs[0].get_output_object()
(pnt_attr, cell_attr) = get_all_attributes(self.inputs[0].get_output_dataset())
self._setup_data_traits(cell_attr, 'cell')
self._setup_data_traits(pnt_attr, 'point')
if self._first:
self._first = False
def _setup_data_traits(self, attributes, d_type):
attrs = ['scalars', 'vectors', 'tensors']
aa = self._assign_attribute
input = self.inputs[0].get_output_dataset()
data = getattr(input, ('%s_data' % d_type))
for attr in attrs:
values = attributes[attr]
values.append('')
setattr(self, ('_%s_%s_list' % (d_type, attr)), values)
if (len(values) > 1):
default = getattr(self, ('%s_%s_name' % (d_type, attr)))
if (self._first and (len(default) == 0)):
default = values[0]
getattr(data, ('set_active_%s' % attr))(default)
aa.assign(default, attr.upper(), (d_type.upper() + '_DATA'))
aa.update()
kw = {('%s_%s_name' % (d_type, attr)): default, 'trait_change_notify': False}
self.trait_set(**kw)
def _set_data_name(self, data_type, attr_type, value):
if ((value is None) or (len(self.inputs) == 0)):
return
input = self.inputs[0].get_output_dataset()
if (len(value) == 0):
d = getattr(input, (attr_type + '_data'))
method = getattr(d, ('set_active_%s' % data_type))
method(None)
self.data_changed = True
return
aa = self._assign_attribute
data = None
if (attr_type == 'point'):
data = input.point_data
elif (attr_type == 'cell'):
data = input.cell_data
method = getattr(data, ('set_active_%s' % data_type))
method(value)
aa.assign(value, data_type.upper(), (attr_type.upper() + '_DATA'))
aa.update()
self.data_changed = True
def _point_scalars_name_changed(self, value):
self._set_data_name('scalars', 'point', value)
def _point_vectors_name_changed(self, value):
self._set_data_name('vectors', 'point', value)
def _point_tensors_name_changed(self, value):
self._set_data_name('tensors', 'point', value)
def _cell_scalars_name_changed(self, value):
self._set_data_name('scalars', 'cell', value)
def _cell_vectors_name_changed(self, value):
self._set_data_name('vectors', 'cell', value)
def _cell_tensors_name_changed(self, value):
self._set_data_name('tensors', 'cell', value) |
class PythonShellTask(Task):
id = 'pyface.tasks.contrib.python_shell'
name = 'Python Shell'
bindings = List(Dict)
commands = List(Str)
pane = Instance(PythonShellPane)
menu_bar = SMenuBar(SMenu(TaskAction(name='Open...', method='open', accelerator='Ctrl+O'), id='File', name='&File'), SMenu(id='View', name='&View'))
def create_central_pane(self):
logger.debug('Creating Python shell pane in central pane')
self.pane = PythonShellPane(bindings=self.bindings, commands=self.commands)
return self.pane
def open(self):
logger.debug('PythonShellTask: opening file')
dialog = FileDialog(parent=self.window.control, wildcard='*.py')
if (dialog.open() == OK):
self._open_file(dialog.path)
def _open_file(self, path):
logger.debug(('PythonShellTask: executing file "%s"' % path))
self.pane.editor.execute_file(path) |
('pyscf')
.parametrize('xyz_fn, coord_type, ref_rms', [('lib:hcn_bent.xyz', 'cart', 1.2e-06), ('lib:h2o2_rot2.xyz', 'redund', 0.000877)])
def test_numhess(xyz_fn, coord_type, ref_rms):
geom = geom_loader(xyz_fn, coord_type=coord_type)
calc = PySCF(basis='321g', pal=2, keep_chk=False)
geom.set_calculator(calc)
H = geom.hessian
nH = numhess(geom)
assert compare_hessians(H, nH, ref_rms) |
class OptionSeriesXrangeSonificationDefaultinstrumentoptions(Options):
def activeWhen(self) -> 'OptionSeriesXrangeSonificationDefaultinstrumentoptionsActivewhen':
return self._config_sub_data('activeWhen', OptionSeriesXrangeSonificationDefaultinstrumentoptionsActivewhen)
def instrument(self):
return self._config_get('piano')
def instrument(self, text: str):
self._config(text, js_type=False)
def mapping(self) -> 'OptionSeriesXrangeSonificationDefaultinstrumentoptionsMapping':
return self._config_sub_data('mapping', OptionSeriesXrangeSonificationDefaultinstrumentoptionsMapping)
def midiName(self):
return self._config_get(None)
def midiName(self, text: str):
self._config(text, js_type=False)
def pointGrouping(self) -> 'OptionSeriesXrangeSonificationDefaultinstrumentoptionsPointgrouping':
return self._config_sub_data('pointGrouping', OptionSeriesXrangeSonificationDefaultinstrumentoptionsPointgrouping)
def roundToMusicalNotes(self):
return self._config_get(True)
def roundToMusicalNotes(self, flag: bool):
self._config(flag, js_type=False)
def showPlayMarker(self):
return self._config_get(True)
def showPlayMarker(self, flag: bool):
self._config(flag, js_type=False)
def type(self):
return self._config_get('instrument')
def type(self, text: str):
self._config(text, js_type=False) |
def read_configuration_from_pyproject_toml(ctx: click.Context, _param: click.Parameter, value: Path) -> (Path | None):
try:
pyproject_data = load_pyproject_toml(value)
except FileNotFoundError:
logging.debug('No pyproject.toml file to read configuration from.')
return value
try:
deptry_toml_config = pyproject_data['tool']['deptry']
except KeyError:
logging.debug('No configuration for deptry was found in pyproject.toml.')
return value
click_default_map: dict[(str, Any)] = {}
if ctx.default_map:
click_default_map.update(ctx.default_map)
click_default_map.update(deptry_toml_config)
ctx.default_map = click_default_map
return value |
def _initialize_model_cache(system_app: SystemApp):
from dbgpt.storage.cache import initialize_cache
if (not CFG.MODEL_CACHE_ENABLE):
logger.info('Model cache is not enable')
return
storage_type = (CFG.MODEL_CACHE_STORAGE_TYPE or 'disk')
max_memory_mb = (CFG.MODEL_CACHE_MAX_MEMORY_MB or 256)
persist_dir = (CFG.MODEL_CACHE_STORAGE_DISK_DIR or MODEL_DISK_CACHE_DIR)
initialize_cache(system_app, storage_type, max_memory_mb, persist_dir) |
def user_record_to_row(user: UserRecord) -> Row:
row = {'uid': user.uid}
keys = ['email', 'uid', 'display_name', 'phone_number', 'photo_url', 'disabled', 'email_verified']
for key in keys:
value = getattr(user, key)
if (value is not None):
row[key] = value
if user.provider_data:
pds = []
for pd in user.provider_data:
pd_keys = ['display_name', 'provider_id', 'email', 'photo_url']
pd_to_add = {}
for key in pd_keys:
value = getattr(pd, key)
if (value is not None):
pd_to_add[key] = value
pds.append(pd_to_add)
row['provider_data'] = pds
return row |
def setup_dev_environment(dags: List[DAG], host: Optional[str]='0.0.0.0', port: Optional[int]=5555, logging_level: Optional[str]=None, logger_filename: Optional[str]=None) -> None:
import uvicorn
from fastapi import FastAPI
from dbgpt.component import SystemApp
from dbgpt.util.utils import setup_logging
from .dag.base import DAGVar
from .trigger.trigger_manager import DefaultTriggerManager
if (not logger_filename):
logger_filename = 'dbgpt_awel_dev.log'
setup_logging('dbgpt', logging_level=logging_level, logger_filename=logger_filename)
app = FastAPI()
system_app = SystemApp(app)
DAGVar.set_current_system_app(system_app)
trigger_manager = DefaultTriggerManager()
system_app.register_instance(trigger_manager)
for dag in dags:
for trigger in dag.trigger_nodes:
trigger_manager.register_trigger(trigger)
trigger_manager.after_register()
uvicorn.run(app, host=host, port=port) |
class Pluginstests(DatabaseTestCase):
def test_load_all_plugins(self):
all_plugins = plugins.load_all_plugins(self.session)
backend_plugins = all_plugins['backends']
self.assertEqual(len(backend_plugins), len(EXPECTED_BACKENDS))
backend_names = sorted((plugin.name for plugin in backend_plugins))
self.assertEqual(sorted(backend_names), sorted(EXPECTED_BACKENDS))
ecosystem_plugins = all_plugins['ecosystems']
ecosystems = dict(((plugin.name, plugin.default_backend) for plugin in ecosystem_plugins))
self.assertEqual(ecosystems, EXPECTED_ECOSYSTEMS)
def test_load_plugins(self):
backend_plugins = plugins.load_plugins(self.session)
self.assertEqual(len(backend_plugins), len(EXPECTED_BACKENDS))
backend_names = sorted((plugin.name for plugin in backend_plugins))
self.assertEqual(sorted(backend_names), sorted(EXPECTED_BACKENDS))
def test_plugins_get_plugin_names(self):
plugin_names = plugins.get_plugin_names()
self.assertEqual(len(plugin_names), len(EXPECTED_BACKENDS))
self.assertEqual(sorted(plugin_names), sorted(EXPECTED_BACKENDS))
def test_plugins_get_plugin(self):
plugin = plugins.get_plugin('PyPI')
self.assertEqual(str(plugin), "<class 'anitya.lib.backends.pypi.PypiBackend'>") |
class OptionPlotoptionsBulletSonificationDefaultspeechoptionsPointgrouping(Options):
def algorithm(self):
return self._config_get('last')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
def sort_configuration_file(config: PackageConfiguration) -> None:
assert (config.directory is not None)
configuration_filepath = (config.directory / config.default_configuration_filename)
if (config.package_type == PackageType.AGENT):
json_data = config.ordered_json
component_configurations = json_data.pop('component_configurations')
yaml_dump_all(([json_data] + component_configurations), configuration_filepath.open('w', encoding='utf-8'))
else:
yaml_dump(config.ordered_json, configuration_filepath.open('w', encoding='utf-8')) |
(max_runs=3)
def test_miner_start(w3_empty, wait_for_miner_start):
w3 = w3_empty
assert w3.eth.mining
assert w3.eth.hashrate
w3.geth.miner.stop()
with Timeout(60) as timeout:
while (w3.eth.mining or w3.eth.hashrate):
timeout.sleep(random.random())
assert (not w3.eth.mining)
assert (not w3.eth.hashrate)
w3.miner.start(1)
wait_for_miner_start(w3)
assert w3.eth.mining
assert w3.eth.hashrate |
class DataIteratorTest(absltest.TestCase):
def test_transition_iterator(self):
obs_size = 3
act_size = 4
replay = replay_lib.ReplayBuffer(int(1000.0), obs_shape=(obs_size,), action_shape=(act_size,))
for _ in range(10):
replay.add(np.zeros(obs_size), np.zeros(act_size), np.zeros(obs_size), 1.0, False)
(train_iterator, val_iterator) = replay.get_iterators(batch_size=10, val_ratio=0.1)
for batch in train_iterator:
self.assertEqual(batch.obs.shape[(- 1)], obs_size)
for batch in val_iterator:
self.assertEqual(batch.obs.shape[(- 1)], obs_size) |
class RectangularSelectionTestCase(EnableTestAssistant, unittest.TestCase):
def test_selection_mask(self):
plot_data = ArrayPlotData()
plot = Plot(plot_data)
arr = np.array([(- 2), (- 1), 1, 2])
plot_data.set_data('x', arr)
plot_data.set_data('y', arr)
splot = plot.plot(('x', 'y'), type='scatter')[0]
tool = RectangularSelection(component=splot, selection_datasource=splot.index, metadata_name='selections')
splot.tools.append(tool)
cursor_start = splot.map_screen([(- 1.5), (- 1.5)])[0]
cursor_stop = splot.map_screen([1.5, 1.5])[0]
self.mouse_down(interactor=tool, x=cursor_start[0], y=cursor_start[1])
self.mouse_move(interactor=tool, x=cursor_stop[0], y=cursor_stop[1])
self.mouse_up(interactor=tool, x=cursor_stop[0], y=cursor_stop[1])
expected_mask = [False, True, True, False]
selection_mask = list(splot.index.metadata['selections'])
self.assertEqual(expected_mask, selection_mask) |
class OFProtocol(namedtuple('OFProtocol', ['version', 'classes', 'enums'])):
def __init__(self, version, classes, enums):
assert ((version is None) or isinstance(version, OFVersion))
def class_by_name(self, name):
return find((lambda ofclass: (ofclass.name == name)), self.classes)
def enum_by_name(self, name):
return find((lambda enum: (enum.name == name)), self.enums) |
class OptionPlotoptionsSunburstSonificationTracksMappingLowpass(Options):
def frequency(self) -> 'OptionPlotoptionsSunburstSonificationTracksMappingLowpassFrequency':
return self._config_sub_data('frequency', OptionPlotoptionsSunburstSonificationTracksMappingLowpassFrequency)
def resonance(self) -> 'OptionPlotoptionsSunburstSonificationTracksMappingLowpassResonance':
return self._config_sub_data('resonance', OptionPlotoptionsSunburstSonificationTracksMappingLowpassResonance) |
def validate_minimal_contract_factory_data(contract_data: Dict[(str, str)]) -> None:
if (not all(((key in contract_data.keys()) for key in ('abi', 'deploymentBytecode')))):
raise InsufficientAssetsError('Minimum required contract data to generate a deployable contract factory (abi & deploymentBytecode) not found.') |
def read_union(decoder, writer_schema, named_schemas, reader_schema=None, options={}):
index = decoder.read_index()
idx_schema = writer_schema[index]
if reader_schema:
msg = f'schema mismatch: {writer_schema} not found in {reader_schema}'
if (not isinstance(reader_schema, list)):
if match_types(idx_schema, reader_schema, named_schemas):
result = read_data(decoder, idx_schema, named_schemas, reader_schema, options)
else:
raise SchemaResolutionError(msg)
else:
for schema in reader_schema:
if match_types(idx_schema, schema, named_schemas):
result = read_data(decoder, idx_schema, named_schemas, schema, options)
break
else:
raise SchemaResolutionError(msg)
else:
result = read_data(decoder, idx_schema, named_schemas, None, options)
return_record_name_override = options.get('return_record_name_override')
return_record_name = options.get('return_record_name')
return_named_type_override = options.get('return_named_type_override')
return_named_type = options.get('return_named_type')
if (return_named_type_override and is_single_name_union(writer_schema)):
return result
elif (return_named_type and (extract_record_type(idx_schema) in NAMED_TYPES)):
return (idx_schema['name'], result)
elif (return_named_type and (extract_record_type(idx_schema) not in AVRO_TYPES)):
return (named_schemas['writer'][idx_schema]['name'], result)
elif (return_record_name_override and is_single_record_union(writer_schema)):
return result
elif (return_record_name and (extract_record_type(idx_schema) == 'record')):
return (idx_schema['name'], result)
elif (return_record_name and (extract_record_type(idx_schema) not in AVRO_TYPES)):
return (named_schemas['writer'][idx_schema]['name'], result)
else:
return result |
class OptionPlotoptionsFunnel3dLabelStyle(Options):
def fontSize(self):
return self._config_get('0.8em')
def fontSize(self, num: float):
self._config(num, js_type=False)
def fontWeight(self):
return self._config_get('bold')
def fontWeight(self, text: str):
self._config(text, js_type=False) |
def gaussian_highpass_kernel(fft_grid, wavelength):
dims = fft_grid.dims
freq_easting = fft_grid.coords[dims[1]]
freq_northing = fft_grid.coords[dims[0]]
k_easting = ((2 * np.pi) * freq_easting)
k_northing = ((2 * np.pi) * freq_northing)
da_filter = (1 - np.exp(((- ((k_easting ** 2) + (k_northing ** 2))) / (2 * (((2 * np.pi) / wavelength) ** 2)))))
return da_filter |
def extractHarushyazinthengartenArtBlog(item):
if item['title'].startswith('Protected: '):
return None
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('a villain is a good match for a tyrant', 'a villain is a good match for a tyrant', 'translated'), ('the hero is standing in my way', 'the hero is standing in my way', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def test_switch_test_no_default(task):
var_1 = Variable('var_1', Pointer(Integer(32, True), 32), None, False, Variable('var_28', Pointer(Integer(32, True), 32), 1, False, None))
switch_variable = Variable('var_0', Integer(32, True), None, True, Variable('var_10', Integer(32, True), 0, True, None))
task.graph.add_nodes_from((vertices := [BasicBlock(0, [Assignment(ListOperation([]), print_call('Enter week number(1-7): ', 1)), Assignment(var_1, UnaryOperation(OperationType.address, [switch_variable], Pointer(Integer(32, True), 32), None, False)), Assignment(ListOperation([]), scanf_call(var_1, , 2)), Branch(Condition(OperationType.greater_us, [switch_variable, Constant(7, Integer(32, True))], CustomType('bool', 1)))]), BasicBlock(2, [IndirectBranch(switch_variable)]), BasicBlock(4, [Assignment(ListOperation([]), print_call('Monday', 3))]), BasicBlock(5, [Assignment(ListOperation([]), print_call('Tuesday', 4))]), BasicBlock(6, [Assignment(ListOperation([]), print_call('Wednesday', 5))]), BasicBlock(11, [Return(ListOperation([Constant(0, Integer(32, True))]))])]))
task.graph.add_edges_from([FalseCase(vertices[0], vertices[1]), TrueCase(vertices[0], vertices[5]), SwitchCase(vertices[1], vertices[5], [Constant(0, Integer(32, signed=True))]), SwitchCase(vertices[1], vertices[2], [Constant(1, Integer(32, signed=True))]), SwitchCase(vertices[1], vertices[3], [Constant(2, Integer(32, signed=True))]), SwitchCase(vertices[1], vertices[4], [Constant(3, Integer(32, signed=True))]), UnconditionalEdge(vertices[2], vertices[5]), UnconditionalEdge(vertices[3], vertices[5]), UnconditionalEdge(vertices[4], vertices[5])])
task.options.set('pattern-independent-restructuring.min_switch_case_number', 4)
PatternIndependentRestructuring().run(task)
assert (isinstance((seq_node := task.syntax_tree.root), SeqNode) and (len(seq_node.children) == 3))
assert (isinstance(seq_node.children[0], CodeNode) and (seq_node.children[0].instructions == vertices[0].instructions[:(- 1)]))
assert isinstance((cond_node := seq_node.children[1]), ConditionNode)
assert (isinstance(seq_node.children[2], CodeNode) and (seq_node.children[2].instructions == vertices[(- 1)].instructions))
current_condition_node: ConditionNode = cond_node
for case_const in range(1, 4):
assert isinstance(current_condition_node, ConditionNode)
assert (task.syntax_tree.condition_map[current_condition_node.condition] == Condition(OperationType.equal, [switch_variable, Constant(case_const, Integer(32, signed=True))]))
assert (isinstance((tb := current_condition_node.true_branch_child), CodeNode) and (tb.instructions == vertices[(case_const + 1)].instructions))
current_condition_node = current_condition_node.false_branch_child
assert (current_condition_node is None) |
def change_sbref_palette(user_settings, temp_dir):
sbref_nii = os.path.join(temp_dir, '{}_SBRef.nii.gz'.format(user_settings.fmri_name))
func4D_nii = os.path.join(user_settings.work_dir, user_settings.subject, 'MNINonLinear', 'Results', user_settings.fmri_name, '{}.nii.gz'.format(user_settings.fmri_name))
run(['wb_command', '-volume-reduce', func4D_nii, 'MEAN', sbref_nii])
run(['wb_command', '-volume-palette', sbref_nii, 'MODE_AUTO_SCALE_PERCENTAGE', '-disp-neg', 'false', '-disp-zero', 'false', '-pos-percent', '5', '99', '-palette-name', 'fidl'])
return sbref_nii |
class OptionSeriesVennData(Options):
def accessibility(self) -> 'OptionSeriesVennDataAccessibility':
return self._config_sub_data('accessibility', OptionSeriesVennDataAccessibility)
def className(self):
return self._config_get(None)
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get(None)
def color(self, text: str):
self._config(text, js_type=False)
def colorIndex(self):
return self._config_get(None)
def colorIndex(self, num: float):
self._config(num, js_type=False)
def custom(self):
return self._config_get(None)
def custom(self, value: Any):
self._config(value, js_type=False)
def dataLabels(self) -> 'OptionSeriesVennDataDatalabels':
return self._config_sub_data('dataLabels', OptionSeriesVennDataDatalabels)
def description(self):
return self._config_get(None)
def description(self, text: str):
self._config(text, js_type=False)
def dragDrop(self) -> 'OptionSeriesVennDataDragdrop':
return self._config_sub_data('dragDrop', OptionSeriesVennDataDragdrop)
def drilldown(self):
return self._config_get(None)
def drilldown(self, text: str):
self._config(text, js_type=False)
def events(self) -> 'OptionSeriesVennDataEvents':
return self._config_sub_data('events', OptionSeriesVennDataEvents)
def id(self):
return self._config_get(None)
def id(self, text: str):
self._config(text, js_type=False)
def labelrank(self):
return self._config_get(None)
def labelrank(self, num: float):
self._config(num, js_type=False)
def name(self):
return self._config_get(None)
def name(self, num: float):
self._config(num, js_type=False)
def selected(self):
return self._config_get(False)
def selected(self, flag: bool):
self._config(flag, js_type=False)
def sets(self):
return self._config_get(None)
def sets(self, value: Any):
self._config(value, js_type=False) |
class OptionPlotoptionsTreemapSonificationContexttracksMappingRate(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
_pkg_resources
_mypy
class TestAnnotations(TestCase, MypyAssertions):
def test_all(self, filename_suffix=''):
examples_dir = Path(pkg_resources.resource_filename('traits.stubs_tests', 'examples'))
for file_path in examples_dir.glob('*{}.py'.format(filename_suffix)):
with self.subTest(file_path=file_path):
self.assertRaisesMypyError(file_path)
_numpy_typing
def test_numpy_examples(self):
examples_dir = Path(pkg_resources.resource_filename('traits.stubs_tests', 'numpy_examples'))
for file_path in examples_dir.glob('*.py'):
with self.subTest(file_path=file_path):
self.assertRaisesMypyError(file_path) |
('cuda.gather.gen_function')
def gen_function(func_attrs):
inputs = func_attrs['inputs']
x = inputs[0]
index = inputs[1]
y = func_attrs['outputs'][0]
x_shape = x._attrs['shape']
input_type = cuda_common.dtype_to_cuda_type(x._attrs['dtype'])
index_type = cuda_common.dtype_to_cuda_type(index._attrs['dtype'])
output_type = cuda_common.dtype_to_cuda_type(y._attrs['dtype'])
if (input_type != output_type):
raise TypeError('input type must equal to output type')
exec_paths = EXEC_COND_TEMPLATE.render(indent=' ', rank=len(x_shape), elem_type=input_type, elems_per_thread=2, threads_per_block=128)
kernel_src = KERNEL_SRC_TEMPLATE.render(index_type=index_type, elem_type=input_type)
return SRC_TEMPLATE.render(kernel_src=kernel_src, func_name=func_attrs['name'], exec_paths=exec_paths) |
class OptionPlotoptionsVennSonificationContexttracksMappingLowpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class OptionSeriesPyramidDatalabels(Options):
def align(self):
return self._config_get('center')
def align(self, text: str):
self._config(text, js_type=False)
def alignTo(self):
return self._config_get(None)
def alignTo(self, text: str):
self._config(text, js_type=False)
def allowOverlap(self):
return self._config_get(False)
def allowOverlap(self, flag: bool):
self._config(flag, js_type=False)
def animation(self) -> 'OptionSeriesPyramidDatalabelsAnimation':
return self._config_sub_data('animation', OptionSeriesPyramidDatalabelsAnimation)
def backgroundColor(self):
return self._config_get(None)
def backgroundColor(self, text: str):
self._config(text, js_type=False)
def borderColor(self):
return self._config_get(None)
def borderColor(self, text: str):
self._config(text, js_type=False)
def borderRadius(self):
return self._config_get(0)
def borderRadius(self, num: float):
self._config(num, js_type=False)
def borderWidth(self):
return self._config_get(0)
def borderWidth(self, num: float):
self._config(num, js_type=False)
def className(self):
return self._config_get(None)
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get(None)
def color(self, text: str):
self._config(text, js_type=False)
def connectorColor(self):
return self._config_get(None)
def connectorColor(self, text: str):
self._config(text, js_type=False)
def connectorPadding(self):
return self._config_get(5)
def connectorPadding(self, num: float):
self._config(num, js_type=False)
def connectorShape(self):
return self._config_get('crookedLine')
def connectorShape(self, text: str):
self._config(text, js_type=False)
def connectorWidth(self):
return self._config_get(1)
def connectorWidth(self, num: float):
self._config(num, js_type=False)
def crookDistance(self):
return self._config_get('undefined')
def crookDistance(self, text: str):
self._config(text, js_type=False)
def crop(self):
return self._config_get(True)
def crop(self, flag: bool):
self._config(flag, js_type=False)
def defer(self):
return self._config_get(True)
def defer(self, flag: bool):
self._config(flag, js_type=False)
def distance(self):
return self._config_get(30)
def distance(self, num: float):
self._config(num, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def filter(self) -> 'OptionSeriesPyramidDatalabelsFilter':
return self._config_sub_data('filter', OptionSeriesPyramidDatalabelsFilter)
def format(self):
return self._config_get('undefined')
def format(self, text: str):
self._config(text, js_type=False)
def formatter(self):
return self._config_get('function () { return this.point.isNull ? void 0 : this.point.name; }')
def formatter(self, text: str):
self._config(text, js_type=True)
def inside(self):
return self._config_get(None)
def inside(self, flag: bool):
self._config(flag, js_type=False)
def nullFormat(self):
return self._config_get(None)
def nullFormat(self, flag: bool):
self._config(flag, js_type=False)
def nullFormatter(self):
return self._config_get(None)
def nullFormatter(self, value: Any):
self._config(value, js_type=False)
def overflow(self):
return self._config_get('justify')
def overflow(self, text: str):
self._config(text, js_type=False)
def padding(self):
return self._config_get(5)
def padding(self, num: float):
self._config(num, js_type=False)
def position(self):
return self._config_get('center')
def position(self, text: str):
self._config(text, js_type=False)
def rotation(self):
return self._config_get(0)
def rotation(self, num: float):
self._config(num, js_type=False)
def shadow(self):
return self._config_get(False)
def shadow(self, flag: bool):
self._config(flag, js_type=False)
def shape(self):
return self._config_get('square')
def shape(self, text: str):
self._config(text, js_type=False)
def softConnector(self):
return self._config_get(True)
def softConnector(self, flag: bool):
self._config(flag, js_type=False)
def style(self):
return self._config_get(None)
def style(self, value: Any):
self._config(value, js_type=False)
def textPath(self) -> 'OptionSeriesPyramidDatalabelsTextpath':
return self._config_sub_data('textPath', OptionSeriesPyramidDatalabelsTextpath)
def useHTML(self):
return self._config_get(False)
def useHTML(self, flag: bool):
self._config(flag, js_type=False)
def verticalAlign(self):
return self._config_get('middle')
def verticalAlign(self, text: str):
self._config(text, js_type=False)
def x(self):
return self._config_get(0)
def x(self, num: float):
self._config(num, js_type=False)
def y(self):
return self._config_get(0)
def y(self, num: float):
self._config(num, js_type=False)
def zIndex(self):
return self._config_get(6)
def zIndex(self, num: float):
self._config(num, js_type=False) |
class TestFrozenFieldHook(unittest.TestCase):
def test_init_update_events_generic_hook(self) -> None:
dummy_obj = DummyInstance('01', 'Tupper01', '//fbsource', '//fbsource:output', 25)
self.assertEqual(dummy_obj.highest_pressure, 25)
dummy_obj.pressure = 70
self.assertEqual(dummy_obj.highest_pressure, 70)
dummy_obj.pressure = 50
self.assertEqual(dummy_obj.highest_pressure, 70) |
def send_email(user, email_content):
response = ses_client.send_email(Source='Haohaotiantian <>', Destination={'ToAddresses': [user.email_address]}, Message={'Subject': {'Charset': 'UTF-8', 'Data': ('Daily vocab word - ' + datetime.today().strftime('%b. %d, %Y'))}, 'Body': {'Html': {'Charset': 'UTF-8', 'Data': email_content}}})
return response |
def test_points_in_polygon(reekset):
(_poi, pol) = reekset
poi = _poi.copy()
assert (poi.nrow == 30)
poi.operation_polygons(pol, 0, opname='eli', version=2)
assert (poi.nrow == 19)
poi.operation_polygons(pol, 0, opname='eli', inside=False, version=1)
assert (poi.nrow == 0)
poi = _poi.copy()
poi.operation_polygons(pol, 0, opname='eli', inside=False, version=2)
print(poi.dataframe)
assert (poi.nrow == 11) |
class WebhookBase():
_attr
def __tablename__(cls) -> str:
return cls.__name__.lower()
name = Column(String, unique=True, nullable=False)
key = Column(String, index=True, unique=True, nullable=False)
_attr
def policy_id(cls: 'WebhookBase') -> Column:
return Column(String, ForeignKey(Policy.id_field_path), nullable=False)
_attr
def connection_config_id(cls: 'WebhookBase') -> Column:
return Column(String, ForeignKey(ConnectionConfig.id_field_path), nullable=False)
direction = Column(EnumColumn(WebhookDirection), nullable=False)
order = Column(Integer, nullable=False)
def reorder_related_webhooks(self, db: Session, new_index: int) -> None:
cls = self.__class__
webhooks = getattr(self.policy, f'{cls.prefix}_execution_webhooks').order_by(cls.order)
if ((new_index > (webhooks.count() - 1)) or (new_index < 0)):
raise WebhookOrderException(f'Cannot set order to {new_index}: there are only {webhooks.count()} {cls.__name__}(s) defined on this Policy.')
webhook_order = [webhook.key for webhook in webhooks]
webhook_order.insert(new_index, webhook_order.pop(self.order))
for webhook in webhooks:
webhook.update(db=db, data={'order': webhook_order.index(webhook.key)})
db.commit() |
class ChooserButton():
def __init__(self, button, default_label=''):
self.button = button
self.default_label = default_label
self.label = None
self._menu = None
self._icon = None
children = self.button.get_children()
if ((len(children) == 1) and isinstance(children[0], (Gtk.HBox, Gtk.Box))):
children = children[0].get_children()
for child in children:
if isinstance(child, Gtk.Label):
self.label = child
break
else:
for child in list(button.get_children()):
button.remove(child)
hbox = Gtk.HBox()
self.label = Gtk.Label()
arrow = Gtk.Arrow(arrow_type=Gtk.ArrowType.DOWN, shadow_type=Gtk.ShadowType.IN)
hbox.set_spacing(2)
hbox.pack_start(self.label, True, True, 0)
hbox.pack_end(arrow, False, False, 0)
button.add(hbox)
if (not self.label):
raise ValueError(('%s is not a ChooserButton' % button.get_name()))
self.connect('clicked', self._show_menu)
self.reset()
def set_sensitive(self, value):
self.button.set_sensitive(value)
def get_sensitive(self):
return self.button.get_sensitive()
def is_sensitive(self):
return self.button.is_sensitive()
def connect(self, _type, *args):
return self.button.connect(_type, *args)
def disconnect(self, *args):
self.button.disconnect(*args)
def get_text(self):
return self.text
def set_text(self, text):
if ((not text) or (len(text) < 1)):
self.reset()
self.text = text
self.label.set_text(self.text)
def set_stock_icon(self, name, size=Gtk.IconSize.MENU):
if (self._icon is None):
self._icon = Gtk.Image()
hbox = self.button.get_child()
hbox.pack_start(self._icon, True, True, 0)
hbox.reorder_child(self._icon, 0)
self._icon.set_from_stock(name, size)
def reset(self):
self.text = None
self.label.set_text(self.default_label)
def set_menu(self, menu):
self._menu = menu
if menu:
menu.attach_to_widget(self.button, self._detach_menu)
def get_menu(self):
return self._menu
def _detach_menu(self):
self._menu = None
def _show_menu(self, *dummy):
if (not self._menu):
return
self._menu.popup(None, None, self._menu_position_func, 0, 0, 0)
def _menu_position_func(self, menu, dummy):
allocation = self.button.get_allocation()
req = menu.size_request()
menu_width = req.width
menu_height = req.height
if (menu_width != allocation.width):
menu.set_size_request((- 1), (- 1))
req = menu.size_request()
if (req.width > allocation.width):
menu.set_size_request(req.width, req.height)
else:
menu.set_size_request(allocation.width, (- 1))
(x, y) = self.button.get_parent_window().get_origin()[1:]
x += allocation.x
y += (allocation.y + allocation.height)
root = self.button.get_root_window()
(dummy, dummy, dummy, root_height) = root.get_geometry()
if ((y + menu_height) > root_height):
y -= (menu_height + allocation.height)
return (x, y, True) |
def extractLittlesleepysheepBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [("transmigrated to another world where only men exist~bl isn't allowed!~", "Transmigrated to Another World Where Only Men Exist ~BL isn't allowed!~", 'translated'), ('this cannon fodder is covered by me!', 'This Cannon Fodder is Covered by Me!', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class TestImportantGitExternalDataChecker(_TestWithInlineManifest):
_DUMMY_CHECKER_CLS = GitUpdateEverythingChecker
def setUp(self):
init_logging()
async def test_update_no_important_source_updated(self):
filename = 'importantsource.com.virustotal.Uploader.yml'
contents = "\nid: importantsource.com.virustotal.Uploader\nmodules:\n - name: extra-cmake-modules\n sources:\n - type: git\n url: tag: 0.0.0.0\n commit: \n x-checker-data:\n type: git\n tag-pattern: ^v(\\d[\\d.]+\\d)$\n - name: vt-py\n sources:\n # Current is valid, no updates\n - type: git\n url: tag: 1.2.3.4\n commit: \n x-checker-data:\n type: git\n tag-pattern: ^(0.5.4)$\n sort-tags: false\n is-important: true\n # since this is marked as the only important source, \n # but isn't getting updated, manifest should not be updated\n".lstrip()
expected_new_contents = f'''
id: importantsource.com.virustotal.Uploader
modules:
- name: extra-cmake-modules
sources:
- type: git
url:
tag: 0.0.0.0
commit:
x-checker-data:
type: git
tag-pattern: ^v(\d[\d.]+\d)$
- name: vt-py
sources:
# Current is valid, no updates
- type: git
url:
tag: 1.2.3.4
commit:
x-checker-data:
type: git
tag-pattern: ^(0.5.4)$
sort-tags: false
is-important: true
# since this is marked as the only important source,
# but isn't getting updated, manifest should not be updated
'''.lstrip()
(await self._test_update(filename=filename, contents=contents, expected_new_contents=expected_new_contents, expected_updates=[], expected_data_count=2, new_release=False, require_important_update=True))
async def test_update_one_important_source_updated(self):
filename = 'importantsource.com.virustotal.Uploader.yml'
contents = f'''
id: importantsource.com.virustotal.Uploader
modules:
- name: extra-cmake-modules
sources:
- type: git
url:
tag: 0.0.0.0
commit:
x-checker-data:
type: git
tag-pattern: ^(v5\.90\.0)$ # to ensure we only get this version
is-important: true
# since this is marked as the only important source,
# and is actually getting updated, manifest should be updated
- name: vt-py
sources:
# Current is valid, no updates
- type: git
url:
tag: {GitUpdateEverythingChecker.TAG}
commit: {GitUpdateEverythingChecker.COMMIT}
x-checker-data:
type: git
tag-pattern: ^(0.5.4)$
sort-tags: false
'''.lstrip()
expected_new_contents = f'''
id: importantsource.com.virustotal.Uploader
modules:
- name: extra-cmake-modules
sources:
- type: git
url:
tag: {GitUpdateEverythingChecker.TAG}
commit: {GitUpdateEverythingChecker.COMMIT}
x-checker-data:
type: git
tag-pattern: ^(v5\.90\.0)$ # to ensure we only get this version
is-important: true
# since this is marked as the only important source,
# and is actually getting updated, manifest should be updated
- name: vt-py
sources:
# Current is valid, no updates
- type: git
url:
tag: {GitUpdateEverythingChecker.TAG}
commit: {GitUpdateEverythingChecker.COMMIT}
x-checker-data:
type: git
tag-pattern: ^(0.5.4)$
sort-tags: false
'''.lstrip()
(await self._test_update(filename=filename, contents=contents, expected_new_contents=expected_new_contents, expected_updates=['Update extra-cmake-modules to 1.2.3.4'], expected_data_count=2, new_release=False, require_important_update=True))
async def test_update_two_important_sources_first_updated(self):
filename = 'importantsource.com.virustotal.Uploader.yml'
contents = '\nid: importantsource.com.virustotal.Uploader\nmodules:\n - name: extra-cmake-modules\n sources:\n - type: git\n url: tag: 0.0.0.0\n commit: \n x-checker-data:\n type: git\n tag-pattern: ^(v5\\.90\\.0)$ # to ensure we only get this version\n is-important: true\n # since this is marked as a important source, \n # and is getting updated, manifest should be updated\n - name: vt-py\n sources:\n # Current is valid, no updates\n - type: git\n url: tag: 1.2.3.4\n commit: \n x-checker-data:\n type: git\n tag-pattern: ^(0.5.4)$\n sort-tags: false\n is-important: true \n # this is marked as important and not being updated, \n # yet the manifest should still be updated since the previous source is important and getting an update.\n'.lstrip()
expected_new_contents = f'''
id: importantsource.com.virustotal.Uploader
modules:
- name: extra-cmake-modules
sources:
- type: git
url:
tag: {GitUpdateEverythingChecker.TAG}
commit: {GitUpdateEverythingChecker.COMMIT}
x-checker-data:
type: git
tag-pattern: ^(v5\.90\.0)$ # to ensure we only get this version
is-important: true
# since this is marked as a important source,
# and is getting updated, manifest should be updated
- name: vt-py
sources:
# Current is valid, no updates
- type: git
url:
tag: {GitUpdateEverythingChecker.TAG}
commit: {GitUpdateEverythingChecker.COMMIT}
x-checker-data:
type: git
tag-pattern: ^(0.5.4)$
sort-tags: false
is-important: true
# this is marked as important and not being updated,
# yet the manifest should still be updated since the previous source is important and getting an update.
'''.lstrip()
(await self._test_update(filename=filename, contents=contents, expected_new_contents=expected_new_contents, expected_updates=['Update extra-cmake-modules to 1.2.3.4', 'Update vt-py.git to 1.2.3.4'], expected_data_count=2, new_release=False, require_important_update=True))
async def test_update_two_important_sources_second_updated(self):
filename = 'importantsource.com.virustotal.Uploader.yml'
contents = f'''
id: importantsource.com.virustotal.Uploader
modules:
- name: vt-py
sources:
# Current is valid, no updates
- type: git
url:
tag: {GitUpdateEverythingChecker.TAG}
commit: {GitUpdateEverythingChecker.COMMIT}
x-checker-data:
type: git
tag-pattern: ^(0.5.4)$
sort-tags: false
is-important: true
# this is marked as important and not being updated,
# yet the manifest should still be updated since the next source is important and getting an update.
- name: extra-cmake-modules
sources:
- type: git
url:
tag: 0.0.0.0
commit:
x-checker-data:
type: git
tag-pattern: ^(v5\.90\.0)$ # to ensure we only get this version
is-important: true
# since this is marked as a important source,
# and is getting updated, manifest should be updated
'''.lstrip()
expected_new_contents = f'''
id: importantsource.com.virustotal.Uploader
modules:
- name: vt-py
sources:
# Current is valid, no updates
- type: git
url:
tag: {GitUpdateEverythingChecker.TAG}
commit: {GitUpdateEverythingChecker.COMMIT}
x-checker-data:
type: git
tag-pattern: ^(0.5.4)$
sort-tags: false
is-important: true
# this is marked as important and not being updated,
# yet the manifest should still be updated since the next source is important and getting an update.
- name: extra-cmake-modules
sources:
- type: git
url:
tag: {GitUpdateEverythingChecker.TAG}
commit: {GitUpdateEverythingChecker.COMMIT}
x-checker-data:
type: git
tag-pattern: ^(v5\.90\.0)$ # to ensure we only get this version
is-important: true
# since this is marked as a important source,
# and is getting updated, manifest should be updated
'''.lstrip()
(await self._test_update(filename=filename, contents=contents, expected_new_contents=expected_new_contents, expected_updates=['Update extra-cmake-modules to 1.2.3.4'], expected_data_count=2, new_release=True, require_important_update=True))
async def test_update_no_main_source_updated(self):
filename = 'importantsource.com.virustotal.Uploader.yml'
contents = "\nid: importantsource.com.virustotal.Uploader\nmodules:\n - name: extra-cmake-modules\n sources:\n - type: git\n url: tag: 0.0.0.0\n commit: \n x-checker-data:\n type: git\n tag-pattern: ^v(\\d[\\d.]+\\d)$\n - name: vt-py\n sources:\n # Current is valid, no updates\n - type: git\n url: tag: 1.2.3.4\n commit: \n x-checker-data:\n type: git\n tag-pattern: ^(0.5.4)$\n sort-tags: false\n is-main-source: true\n # since this is marked as the only important/main source,\n # but isn't getting updated, manifest should not be updated\n".lstrip()
expected_new_contents = f'''
id: importantsource.com.virustotal.Uploader
modules:
- name: extra-cmake-modules
sources:
- type: git
url:
tag: 0.0.0.0
commit:
x-checker-data:
type: git
tag-pattern: ^v(\d[\d.]+\d)$
- name: vt-py
sources:
# Current is valid, no updates
- type: git
url:
tag: 1.2.3.4
commit:
x-checker-data:
type: git
tag-pattern: ^(0.5.4)$
sort-tags: false
is-main-source: true
# since this is marked as the only important/main source,
# but isn't getting updated, manifest should not be updated
'''.lstrip()
(await self._test_update(filename=filename, contents=contents, expected_new_contents=expected_new_contents, expected_updates=[], expected_data_count=2, new_release=False, require_important_update=True))
async def test_update_one_main_source_updated(self):
filename = 'importantsource.com.virustotal.Uploader.yml'
contents = f'''
id: importantsource.com.virustotal.Uploader
modules:
- name: extra-cmake-modules
sources:
- type: git
url:
tag: 0.0.0.0
commit:
x-checker-data:
type: git
tag-pattern: ^(v5\.90\.0)$ # to ensure we only get this version
is-main-source: true
# since this is marked as the only main source,
# and is actually getting updated, manifest should be updated
- name: vt-py
sources:
# Current is valid, no updates
- type: git
url:
tag: {GitUpdateEverythingChecker.TAG}
commit: {GitUpdateEverythingChecker.COMMIT}
x-checker-data:
type: git
tag-pattern: ^(0.5.4)$
sort-tags: false
'''.lstrip()
expected_new_contents = f'''
id: importantsource.com.virustotal.Uploader
modules:
- name: extra-cmake-modules
sources:
- type: git
url:
tag: {GitUpdateEverythingChecker.TAG}
commit: {GitUpdateEverythingChecker.COMMIT}
x-checker-data:
type: git
tag-pattern: ^(v5\.90\.0)$ # to ensure we only get this version
is-main-source: true
# since this is marked as the only main source,
# and is actually getting updated, manifest should be updated
- name: vt-py
sources:
# Current is valid, no updates
- type: git
url:
tag: {GitUpdateEverythingChecker.TAG}
commit: {GitUpdateEverythingChecker.COMMIT}
x-checker-data:
type: git
tag-pattern: ^(0.5.4)$
sort-tags: false
'''.lstrip()
(await self._test_update(filename=filename, contents=contents, expected_new_contents=expected_new_contents, expected_updates=['Update extra-cmake-modules to 1.2.3.4'], expected_data_count=2, new_release=True, require_important_update=True))
async def test_require_important_source_disabled_no_important_source_updated(self):
filename = 'importantsource.com.virustotal.Uploader.yml'
contents = '\nid: importantsource.com.virustotal.Uploader\nmodules:\n - name: extra-cmake-modules\n sources:\n - type: git\n url: tag: 0.0.0.0\n commit: \n x-checker-data:\n type: git\n tag-pattern: ^v(\\d[\\d.]+\\d)$\n - name: vt-py\n sources:\n # Current is valid, no updates\n - type: git\n url: tag: 0.0.0.0\n commit: \n x-checker-data:\n type: git\n tag-pattern: ^(0.5.4)$\n sort-tags: false\n is-important: true\n # manifest should still be updated since the require_important source is disabled\n'.lstrip()
expected_new_contents = f'''
id: importantsource.com.virustotal.Uploader
modules:
- name: extra-cmake-modules
sources:
- type: git
url:
tag: {GitUpdateEverythingChecker.TAG}
commit: {GitUpdateEverythingChecker.COMMIT}
x-checker-data:
type: git
tag-pattern: ^v(\d[\d.]+\d)$
- name: vt-py
sources:
# Current is valid, no updates
- type: git
url:
tag: {GitUpdateEverythingChecker.TAG}
commit: {GitUpdateEverythingChecker.COMMIT}
x-checker-data:
type: git
tag-pattern: ^(0.5.4)$
sort-tags: false
is-important: true
# manifest should still be updated since the require_important source is disabled
'''.lstrip()
(await self._test_update(filename=filename, contents=contents, expected_new_contents=expected_new_contents, expected_updates=['Update extra-cmake-modules to 1.2.3.4', 'Update vt-py.git to 1.2.3.4'], expected_data_count=2, new_release=True, require_important_update=False))
async def test_main_source_not_important(self):
filename = 'importantsource.com.virustotal.Uploader.yml'
contents = f'''
id: importantsource.com.virustotal.Uploader
modules:
- name: vt-py
sources:
# Current is valid, no updates
- type: git
url:
tag: {GitUpdateEverythingChecker.TAG}
commit: {GitUpdateEverythingChecker.COMMIT}
x-checker-data:
type: git
tag-pattern: ^(0.5.4)$
sort-tags: false
is-important: true
# this is marked as important and not being updated
- name: extra-cmake-modules
sources:
- type: git
url:
tag: 0.0.0.0
commit:
x-checker-data:
type: git
tag-pattern: ^(v5\.90\.0)$ # to ensure we only get this version
is-main-source: true
is-important: false
# since this is marked as the main source but is not important, it should not be updated
'''.lstrip()
expected_new_contents = f'''
id: importantsource.com.virustotal.Uploader
modules:
- name: vt-py
sources:
# Current is valid, no updates
- type: git
url:
tag: {GitUpdateEverythingChecker.TAG}
commit: {GitUpdateEverythingChecker.COMMIT}
x-checker-data:
type: git
tag-pattern: ^(0.5.4)$
sort-tags: false
is-important: true
# this is marked as important and not being updated
- name: extra-cmake-modules
sources:
- type: git
url:
tag: 0.0.0.0
commit:
x-checker-data:
type: git
tag-pattern: ^(v5\.90\.0)$ # to ensure we only get this version
is-main-source: true
is-important: false
# since this is marked as the main source but is not important, it should not be updated
'''.lstrip()
(await self._test_update(filename=filename, contents=contents, expected_new_contents=expected_new_contents, expected_updates=[], expected_data_count=2, new_release=False, require_important_update=True)) |
def setup_to_fail():
shellexec('nft flush chain inet filter input')
shellexec('nft flush chain inet filter forward')
shellexec('nft flush chain inet filter output')
shellexec('nft delete chain inet filter input')
shellexec('nft delete chain inet filter forward')
shellexec('nft delete chain inet filter output')
print(shellexec('nft list ruleset'))
(yield None)
shellexec('nft create chain inet filter input { type filter hook input priority 0 \\; }')
shellexec('nft create chain inet filter forward { type filter hook forward priority 0 \\; }')
shellexec('nft create chain inet filter output { type filter hook output priority 0 \\; }') |
def get_installed_packages(project_path: Path) -> Tuple[(List, List)]:
packages_json = _load_packages_json(project_path)
installed: Set = set(packages_json['packages'])
modified: Set = set()
deleted: Set = set(packages_json['packages'])
for source_path in list(packages_json['sources']):
package_list = packages_json['sources'][source_path]['packages']
if (not project_path.joinpath(source_path).exists()):
installed.difference_update(package_list)
modified.update(package_list)
continue
deleted.difference_update(package_list)
with project_path.joinpath(source_path).open('rb') as fp:
source = fp.read()
if (hashlib.md5(source).hexdigest() != packages_json['sources'][source_path]['md5']):
modified.update(package_list)
modified.difference_update(deleted)
installed.difference_update(modified)
for package_name in deleted:
remove_package(project_path, package_name, True)
return ([(i, packages_json['packages'][i]['version']) for i in sorted(installed)], [(i, packages_json['packages'][i]['version']) for i in sorted(modified)]) |
def test_unslice_will_set_the_pixel_aspect_to_1(prepare_scene, create_pymel):
camera = prepare_scene
pm = create_pymel
dres = pm.PyNode('defaultResolution')
dres.width.set(1920)
dres.height.set(1080)
dres.pixelAspect.set(2.5)
rs = RenderSlicer(camera)
rs.slice(10, 10)
rs.unslice()
assert (dres.pixelAspect.get() == 1)
assert (dres.width.get() == 1920)
assert (dres.height.get() == 1080) |
def extractVoidTranslations(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
match = re.search('^Xian Ni Chapter \\d+ ?[\\-]? ?(.*)$', item['title'])
if match:
return buildReleaseMessageWithType(item, 'Xian Ni', vol, chp, postfix=match.group(1))
tagmap = [('Everlasting Immortal Firmament', 'Everlasting Immortal Firmament', 'translated'), ("Post-80s' Cultivation Journal", "Post-80s' Cultivation Journal", 'translated'), ('My Daoist Life', 'My Daoist Life', 'translated'), ('Reaching to the Sky', 'Reaching to the Sky', 'translated')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def plot_figure(data, x, y, title=None, xlabel=None, ylabel=None, legend=None, x_axis_type='linear', y_axis_type='linear', width=800, height=400, line_width=2, colors=['red', 'green', 'blue', 'orange', 'black', 'purple', 'brown'], tools='pan,box_zoom,wheel_zoom,box_select,hover,reset,save', append_figure=None):
if (not isinstance(y, list)):
y = [y]
xlabel = (xlabel or x)
legend = (legend or y)
assert (len(legend) == len(y))
if (append_figure is not None):
f = append_figure
else:
f = figure(title=title, tools=tools, width=width, height=height, x_axis_label=(xlabel or x), y_axis_label=(ylabel or ''), x_axis_type=x_axis_type, y_axis_type=y_axis_type)
colors = cycle(colors)
for (i, yi) in enumerate(y):
f.line(data[x], data[yi], line_width=line_width, line_color=next(colors), legend_label=legend[i])
f.legend.click_policy = 'hide'
return f |
def find_first(node: LN, target: int, recursive: bool=False) -> Optional[LN]:
queue: List[LN] = [node]
queue.extend(node.children)
while queue:
child = queue.pop(0)
if (child.type == target):
return child
if recursive:
queue = (child.children + queue)
return None |
def chord_topology(m, r=1):
if ((not isinstance(m, int)) or (not isinstance(r, int))):
raise TypeError('m and r must be integers')
if (m < 2):
raise ValueError('m must be an integer >= 2')
if ((r < 1) or (r > ((2 ** m) - 1))):
raise ValueError('r must be an integer and 1 <= r <= 2^m')
n = (2 ** m)
G = DirectedTopology()
for v in range(n):
for u in range(m):
G.add_edge(v, ((v + (2 ** u)) % n))
if (r > 2):
for v in range(n):
for u in range((v + 3), ((v + r) + 1)):
G.add_edge(v, (u % n))
return G |
class Dataset(Base, FidesBase):
__tablename__ = 'ctl_datasets'
meta = Column(JSON)
data_categories = Column(ARRAY(String))
collections = Column(JSON)
fides_meta = Column(JSON)
def create_from_dataset_dict(cls, db: Session, dataset: dict) -> 'Dataset':
validated_dataset: FideslangDataset = FideslangDataset(**dataset)
ctl_dataset = cls(**validated_dataset.dict())
db.add(ctl_dataset)
db.commit()
db.refresh(ctl_dataset)
return ctl_dataset |
class OptionPlotoptionsPolygonSonificationDefaultspeechoptions(Options):
def activeWhen(self) -> 'OptionPlotoptionsPolygonSonificationDefaultspeechoptionsActivewhen':
return self._config_sub_data('activeWhen', OptionPlotoptionsPolygonSonificationDefaultspeechoptionsActivewhen)
def language(self):
return self._config_get('en-US')
def language(self, text: str):
self._config(text, js_type=False)
def mapping(self) -> 'OptionPlotoptionsPolygonSonificationDefaultspeechoptionsMapping':
return self._config_sub_data('mapping', OptionPlotoptionsPolygonSonificationDefaultspeechoptionsMapping)
def pointGrouping(self) -> 'OptionPlotoptionsPolygonSonificationDefaultspeechoptionsPointgrouping':
return self._config_sub_data('pointGrouping', OptionPlotoptionsPolygonSonificationDefaultspeechoptionsPointgrouping)
def preferredVoice(self):
return self._config_get(None)
def preferredVoice(self, text: str):
self._config(text, js_type=False)
def showPlayMarker(self):
return self._config_get(True)
def showPlayMarker(self, flag: bool):
self._config(flag, js_type=False)
def type(self):
return self._config_get('speech')
def type(self, text: str):
self._config(text, js_type=False) |
class OptionLangAccessibilitySeries(Options):
def description(self):
return self._config_get('{description}')
def description(self, text: str):
self._config(text, js_type=False)
def nullPointValue(self):
return self._config_get('No value')
def nullPointValue(self, text: str):
self._config(text, js_type=False)
def pointAnnotationsDescription(self):
return self._config_get('{#each annotations}Annotation: {this}{/each}')
def pointAnnotationsDescription(self, text: str):
self._config(text, js_type=False)
def summary(self) -> 'OptionLangAccessibilitySeriesSummary':
return self._config_sub_data('summary', OptionLangAccessibilitySeriesSummary)
def xAxisDescription(self):
return self._config_get('X axis, {name}')
def xAxisDescription(self, text: str):
self._config(text, js_type=False)
def yAxisDescription(self):
return self._config_get('Y axis, {name}')
def yAxisDescription(self, text: str):
self._config(text, js_type=False) |
class ProcessNode(TestNode):
def make(cls, name: str, rate: float, process: Optional[int]=process.ENVIRONMENT, inputs: Optional[List[str]]=None, outputs: Optional[List[str]]=None, states: Optional[List[str]]=None, color: Optional[str]='white', test_arg: Optional[str]='test_argument'):
spec = cls.get_specification()
spec.config.name = name
spec.config.rate = rate
spec.config.process = process
spec.config.color = color
spec.config.inputs = (inputs if isinstance(inputs, list) else ['in_1'])
spec.config.outputs = (outputs if isinstance(outputs, list) else ['out_1'])
spec.config.states = (states if states else ['state_1'])
spec.config.test_arg = test_arg
spec.inputs.in_1.window = 0
spec.inputs.in_2.window = 0
spec.inputs.in_3.window = 0
spec.inputs.tick.window = 0
return spec |
def login_user_exist(form, field):
result = FlicketUser.query.filter_by(username=form.username.data)
if (result.count() == 0):
field.errors.append('Invalid username.')
return False
result = result.first()
if (bcrypt.hashpw(form.password.data.encode('utf-8'), result.password) != result.password):
field.errors.append('Invalid password.')
return False
return True |
def create_logger():
loggr = logging.getLogger('nodeenv')
loggr.setLevel(logging.INFO)
def emit(self, record):
msg = self.format(record)
fs = ('%s' if getattr(record, 'continued', False) else '%s\n')
self.stream.write((fs % to_utf8(msg)))
self.flush()
logging.StreamHandler.emit = emit
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt='%(message)s')
ch.setFormatter(formatter)
loggr.addHandler(ch)
return loggr |
class TestHtml5OnlySelectors(util.PluginTestCase):
def setup_fs(self):
template = self.dedent('\n <!DOCTYPE html>\n <html>\n <head>\n <meta content="text/html; charset=UTF-8">\n </head>\n <body>\n <div>\n <p>aaaa</p>\n </div>\n <span>bbbb</span>\n <span>cccc</span>\n <p>dddd</p>\n <span>eeee</span>\n <span>ffff</span>\n <div>\n <p>gggg</p>\n <p>hhhh</p>\n </div>\n </body>\n </html>\n ')
self.mktemp('test.txt', template, 'utf-8')
def test_css_only_child(self):
config = self.dedent("\n matrix:\n - name: html_css\n sources:\n - '{}/**/*.txt'\n aspell:\n lang: en\n d: en_US\n hunspell:\n d: en_US\n pipeline:\n - pyspelling.filters.html:\n mode: html5\n ignores:\n - 'p:only-child'\n ").format(self.tempdir)
self.mktemp('.html5.yml', config, 'utf-8')
self.assert_spellcheck('.html5.yml', ['bbbb', 'cccc', 'dddd', 'eeee', 'ffff', 'gggg', 'hhhh'])
def test_css_only_type(self):
config = self.dedent("\n matrix:\n - name: html_css\n sources:\n - '{}/**/*.txt'\n aspell:\n lang: en\n d: en_US\n hunspell:\n d: en_US\n pipeline:\n - pyspelling.filters.html:\n mode: html5\n ignores:\n - 'p:only-of-type'\n ").format(self.tempdir)
self.mktemp('.html5.yml', config, 'utf-8')
self.assert_spellcheck('.html5.yml', ['bbbb', 'cccc', 'eeee', 'ffff', 'gggg', 'hhhh']) |
def test_flyte_system_exception():
try:
raise system.FlyteSystemException('bad')
except Exception as e:
assert (str(e) == 'bad')
assert isinstance(type(e), base._FlyteCodedExceptionMetaclass)
assert (type(e).error_code == 'SYSTEM:Unknown')
assert isinstance(e, base.FlyteException) |
def test_ref_task_more():
_task(project='flytesnacks', domain='development', name='recipes.aaa.simple.join_strings', version='553018f39e519bdb2597b652639c30ce16b99c79')
def ref_t1(a: typing.List[str]) -> str:
...
def wf1(in1: typing.List[str]) -> str:
return ref_t1(a=in1)
with pytest.raises(Exception) as e:
wf1(in1=['hello', 'world'])
assert ('You must mock this out' in f'{e}')
with task_mock(ref_t1) as mock:
mock.return_value = 'hello'
assert (wf1(in1=['hello', 'world']) == 'hello') |
class OptionSeriesColumnSonificationContexttracksMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class OptionSeriesErrorbarSonificationTracksMappingTremoloSpeed(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class OptionSeriesPyramidSonificationTracksMappingTremolo(Options):
def depth(self) -> 'OptionSeriesPyramidSonificationTracksMappingTremoloDepth':
return self._config_sub_data('depth', OptionSeriesPyramidSonificationTracksMappingTremoloDepth)
def speed(self) -> 'OptionSeriesPyramidSonificationTracksMappingTremoloSpeed':
return self._config_sub_data('speed', OptionSeriesPyramidSonificationTracksMappingTremoloSpeed) |
def test_get_state_serialization():
kwargs_arg = ContractApiMessage.Kwargs({'key_1': 1, 'key_2': 2})
msg = ContractApiMessage(message_id=1, dialogue_reference=(str(0), ''), target=0, performative=ContractApiMessage.Performative.GET_STATE, ledger_id='some_ledger_id', contract_id='some_contract_id', contract_address='some_contract_address', callable='some_callable', kwargs=kwargs_arg)
msg.to = 'receiver'
envelope = Envelope(to=msg.to, sender='sender', message=msg)
envelope_bytes = envelope.encode()
actual_envelope = Envelope.decode(envelope_bytes)
expected_envelope = envelope
assert (expected_envelope.to == actual_envelope.to)
assert (expected_envelope.sender == actual_envelope.sender)
assert (expected_envelope.protocol_specification_id == actual_envelope.protocol_specification_id)
assert (expected_envelope.message != actual_envelope.message)
actual_msg = ContractApiMessage.serializer.decode(actual_envelope.message)
actual_msg.to = actual_envelope.to
actual_msg.sender = actual_envelope.sender
expected_msg = msg
assert (expected_msg == actual_msg) |
class EditorWidget(QtGui.QDockWidget):
def __init__(self, editor, parent=None):
super().__init__(parent)
self.editor = editor
self.editor.create(self)
self.setAllowedAreas(QtCore.Qt.DockWidgetArea.LeftDockWidgetArea)
self.setFeatures((QtGui.QDockWidget.DockWidgetFeature.DockWidgetClosable | QtGui.QDockWidget.DockWidgetFeature.DockWidgetMovable))
self.setWidget(editor.control)
self.update_title()
contents_minsize = editor.control.minimumSize()
style = self.style()
contents_minsize.setHeight((contents_minsize.height() + style.pixelMetric(style.PixelMetric.PM_DockWidgetHandleExtent)))
self.setMinimumSize(contents_minsize)
self.dockLocationChanged.connect(self.update_title_bar)
self.visibilityChanged.connect(self.update_title_bar)
def _remove_event_listeners(self):
self.dockLocationChanged.disconnect(self.update_title_bar)
self.visibilityChanged.disconnect(self.update_title_bar)
def update_title(self):
title = self.editor.editor_area._get_label(self.editor)
self.setWindowTitle(title)
title_bar = self.titleBarWidget()
if isinstance(title_bar, EditorTitleBarWidget):
title_bar.setTabText(0, title)
def update_tooltip(self):
title_bar = self.titleBarWidget()
if isinstance(title_bar, EditorTitleBarWidget):
title_bar.setTabToolTip(0, self.editor.tooltip)
def update_title_bar(self):
if (self not in self.parent()._tear_widgets):
tabbed = self.parent().tabifiedDockWidgets(self)
self.set_title_bar((not tabbed))
def set_title_bar(self, title_bar):
current = self.titleBarWidget()
editor_area = self.editor.editor_area
if (title_bar and editor_area and ((not editor_area.hide_tab_bar) or (len(editor_area.editors) > 1))):
if (not isinstance(current, EditorTitleBarWidget)):
self.setTitleBarWidget(EditorTitleBarWidget(self))
elif ((current is None) or isinstance(current, EditorTitleBarWidget)):
self.setTitleBarWidget(QtGui.QWidget()) |
def test_env_variable_interpolation(config, yaml_config_file_3):
config.from_yaml(yaml_config_file_3)
assert (config() == {'section1': {'value1': 'test-value', 'value2': 'test-path/path'}})
assert (config.section1() == {'value1': 'test-value', 'value2': 'test-path/path'})
assert (config.section1.value1() == 'test-value')
assert (config.section1.value2() == 'test-path/path') |
def get_tensor_accessor_alignments(func_attrs):
input_accessors = func_attrs['input_accessors']
a_alignment = tensor_accessor_codegen.find_max_alignment_for_accessor(input_accessors[0])
b_alignment = tensor_accessor_codegen.find_max_alignment_for_accessor(input_accessors[1])
output_accessor = func_attrs['output_accessors'][0]
epilogue_alignment = tensor_accessor_codegen.find_max_alignment_for_accessor(output_accessor)
if (not isinstance(output_accessor.original_shapes[(- 1)], IntImm)):
epilogue_alignment = 1
return (a_alignment, b_alignment, epilogue_alignment) |
('config_name,overrides,expected', [param('override_hydra2', [], DefaultsTreeNode(node=VirtualRoot(), children=[DefaultsTreeNode(node=ConfigDefault(path='hydra/config'), children=[GroupDefault(group='help', value='custom1'), GroupDefault(group='output', value='default'), ConfigDefault(path='_self_')]), ConfigDefault(path='override_hydra2')]), id='override_hydra2'), param('override_hydra2', ['hydra/help=custom2'], DefaultsTreeNode(node=VirtualRoot(), children=[DefaultsTreeNode(node=ConfigDefault(path='hydra/config'), children=[GroupDefault(group='help', value='custom2'), GroupDefault(group='output', value='default'), ConfigDefault(path='_self_')]), ConfigDefault(path='override_hydra2')]), id='override_hydra2+external'), param('override_hydra3', [], DefaultsTreeNode(node=VirtualRoot(), children=[DefaultsTreeNode(node=ConfigDefault(path='hydra/config'), children=[GroupDefault(group='help', value='custom1'), GroupDefault(group='output', value='disabled'), ConfigDefault(path='_self_')]), ConfigDefault(path='override_hydra3')]), id='override_hydra3+external'), param('override_hydra_wrong_order', [], raises(ConfigCompositionException, match=re.escape(dedent(" In override_hydra_wrong_order: Override 'hydra/help : custom1' is defined before 'group1: file1'"))), id='override_hydra_wrong_order')])
def test_hydra_overrides_from_primary_config(config_name: str, overrides: List[str], expected: DefaultsTreeNode) -> None:
_test_defaults_tree_impl(config_name=config_name, input_overrides=overrides, expected=expected, prepend_hydra=True) |
class Job():
def __repr__(self):
return ('Job (%s, %s)' % (self.jobname, self.jobid[:6]))
def __init__(self, bin, args, jobname=None, parent_ids=None):
self.status = None
self.bin = bin
self.args = args
self.cores = 1
self.exec_type = 'insitu'
self.jobname = jobname
clean = (lambda x: (basename(x) if ((GLOBALS['basedir'] in x) or (GLOBALS['tasks_dir'] in x)) else x))
parsed_id_string = [('%s %s' % (clean(str(pair[0])), clean(str(pair[1])))) for pair in self.args.items()]
self.jobid = md5(','.join(sorted([md5(e) for e in parsed_id_string])))
if parent_ids:
self.jobid = md5(','.join(sorted((parent_ids + [self.jobid]))))
if (not self.jobname):
self.jobname = re.sub('[^0-9a-zA-Z]', '-', basename(self.bin))
self.ifdone_cmd = ''
self.iffail_cmd = ''
self.set_jobdir(pjoin(GLOBALS['tasks_dir'], self.jobid))
self.input_files = {}
self.dependencies = set()
def add_input_file(self, ifile, outpath=None):
self.input_files[ifile] = outpath
def set_jobdir(self, basepath):
self.jobdir = basepath
self.status_file = os.path.join(self.jobdir, '__status__')
self.time_file = os.path.join(self.jobdir, '__time__')
self.cmd_file = os.path.join(self.jobdir, '__cmd__')
self.stdout_file = os.path.join(self.jobdir, '__stdout__')
self.stderr_file = os.path.join(self.jobdir, '__stderr__')
self.pid_file = os.path.join(self.jobdir, '__pid__')
def write_pid(self, host, pid):
open(self.pid_file, 'w').write(('%s\t%s' % (host, pid)))
def read_pid(self):
try:
(host, pid) = [_f.strip() for _f in open(self.pid_file, 'r').readline().split('\t')]
except IOError:
(host, pid) = ('', '')
else:
pid = int(pid)
return (host, pid)
def get_launch_cmd(self):
return ' '.join(([self.bin] + [('%s %s' % (k, v)) for (k, v) in self.args.items() if (v is not None)]))
def dump_script(self):
launch_cmd = self.get_launch_cmd()
lines = ['#!/bin/sh', (" (echo R > %s && date +'%s' > %s) &&" % (self.status_file, TIME_FORMAT, self.time_file)), (' (cd %s && %s && (echo D > %s; %s) || (echo E > %s; %s));' % (self.jobdir, launch_cmd, self.status_file, self.ifdone_cmd, self.status_file, self.iffail_cmd)), (" date +'%s' >> %s; " % (TIME_FORMAT, self.time_file))]
script = '\n'.join(lines)
if (not os.path.exists(self.jobdir)):
os.makedirs(self.jobdir)
open(self.cmd_file, 'w').write(script)
def get_status(self, sge_jobs=None):
if (self.status not in set('DE')):
jinfo = db.get_task_info(self.jobid)
self.host = (jinfo.get('host', None) or '')
self.pid = (jinfo.get('pid', None) or '')
saved_status = jinfo.get('status', 'W')
try:
st = open(self.status_file).read(1)
except IOError:
st = saved_status
if (st in set('QRL')):
if self.host.startswith(''):
sge_st = sge_jobs.get(self.pid, {}).get('state', None)
log.debug('%s %s', self, sge_st)
if (not sge_st):
log.debug('%s %s %s', self, sge_st, self.pid)
st = 'L'
elif ('E' in sge_st):
pass
elif ((self.host == HOSTNAME) and (not pid_up(self.pid))):
st = 'L'
elif (st == 'E'):
log.error(('Job error reported: %s' % self))
elif (st == ''):
st = 'L'
if ((st == 'E') and (self.status is None)):
log.warning(':Retrying job marked as error from previous executions.:')
self.status = 'W'
try:
st = open(self.status_file, 'w').write('W')
except IOError:
pass
else:
self.status = st
return self.status
def clean(self):
if os.path.exists(self.jobdir):
shutil.rmtree(self.jobdir)
self.status = 'W' |
class squeeze(_view):
def __init__(self, dim: Optional[int]) -> None:
super().__init__()
self._attrs['op'] = 'squeeze'
self._attrs['dim'] = dim
self.shape_eval_template = SQUEEZE_FUNC_TEMPLATE
def _infer_shapes(self, x: Tensor) -> IntVar:
dim = self._attrs['dim']
x_shape = x._attrs['shape']
if (dim is not None):
dim = wrap_dim(self._attrs['dim'], len(x_shape))
new_shape = []
out_dim_to_in = {}
out_dim = 0
for (input_idx, shape) in enumerate(x_shape):
if (((dim is None) or (input_idx == dim)) and (shape == IntImm(1))):
continue
if isinstance(shape, IntVar):
out_dim_to_in[out_dim] = input_idx
out_dim += 1
new_shape.append(shape)
self._attrs['out_dim_to_in'] = out_dim_to_in
return new_shape
def __call__(self, x: Tensor) -> Tensor:
self._attrs['inputs'] = [x]
self._set_depth()
output_shape = self._infer_shapes(x)
output = Tensor(output_shape, src_ops={self}, is_view_of=x, dtype=x._attrs['dtype'])
self._attrs['outputs'] = [output]
return output
def _get_op_attributes(self):
return {'dim': self._attrs['dim']}
def gen_function(self) -> str:
target = backend.target.Target.current()
func_key = '{target}.{op}.gen_function'.format(target=target.name(), op=self._attrs['op'])
func = registry.get(func_key)
return func(self._attrs, self.shape_eval_template)
def _args_for_pseudo_code(self):
return [f"dim={self._attrs['dim']}"] |
class TriggerThread(threading.Thread):
def __init__(self, redischannel, trigger):
threading.Thread.__init__(self)
self.redischannel = redischannel
self.name = trigger
self.running = True
def stop(self):
self.running = False
def run(self):
global r, lsl_format, patch, lock, monitor, outlet
pubsub = patch.pubsub()
pubsub.subscribe('OUTPUTLSL_UNBLOCK')
pubsub.subscribe(self.redischannel)
while self.running:
for item in pubsub.listen():
if ((not self.running) or (not (item['type'] == 'message'))):
break
if (item['channel'] == self.redischannel):
if (lsl_format == 'value'):
val = float(item['data'])
scale = patch.getfloat('scale', self.name, default=127)
offset = patch.getfloat('offset', self.name, default=0)
val = EEGsynth.rescale(val, slope=scale, offset=offset)
marker = ('%g' % val)
else:
marker = self.name
with lock:
monitor.debug(marker)
outlet.push_sample([marker]) |
class StepStateCriticComposer(BaseStateCriticComposer):
def __init__(self, observation_spaces_dict: Dict[(Union[(str, int)], spaces.Dict)], agent_counts_dict: Dict[(StepKeyType, int)], networks: CollectionOfConfigType):
super().__init__(observation_spaces_dict, agent_counts_dict)
networks = list_to_dict(networks)
self._critics = {key: Factory(base_type=nn.Module).instantiate(networks[key], obs_shapes=self._obs_shapes[key]) for key in networks.keys()}
(BaseStateCriticComposer)
def critic(self) -> TorchStepStateCritic:
return TorchStepStateCritic(self._critics, obs_spaces_dict=self._observation_spaces_dict, device='cpu') |
def main():
argv = sys.argv
logging.basicConfig(format='[%(asctime)s][%(name)s][%(levelname)s] - %(message)s', handlers=[logging.StreamHandler()], level=logging.INFO)
if ((len(argv) < 2) or (argv[1] not in all_cli_commands)):
cmd_names = sorted(all_cli_commands.keys())
print(f'Usage: {argv[0]} command [...args]')
print(f'Commands:')
print('\n'.join([(' ' + cmd) for cmd in cmd_names]))
exit(1)
func = all_cli_commands[argv[1]]
sig = signature(func)
dm_param = None
has_extra_args = False
parser = ArgumentParser(argv[1])
for param in sig.parameters.values():
if (param.annotation == DataManager):
dm_param = param.name
continue
if (param.name == 'extra_args'):
has_extra_args = True
continue
if (param.annotation == list[str]):
parser.add_argument(f'--{param.name}', type=str, default=param.default, action='append')
continue
if (param.annotation == bool):
assert (param.default in {True, False}), f'bool param ({param.name}) must have default'
if (not param.default):
parser.add_argument(f'--{param.name}', action='store_true')
else:
parser.add_argument(f'--no_{param.name}', dest=param.name, action='store_false')
continue
if (param.default != Parameter.empty):
parser.add_argument(f'--{param.name}', type=param.annotation, default=param.default)
else:
parser.add_argument(f'{param.name}', type=param.annotation)
argv = argv[2:]
extra_args = []
if (has_extra_args and ('--' in argv)):
split_idx = argv.index('--')
extra_args = argv[(split_idx + 1):]
argv = argv[:split_idx]
args = parser.parse_args(argv)
args = vars(args)
if (dm_param is not None):
dm = create_data_manager()
args[dm_param] = dm
if has_extra_args:
args['extra_args'] = extra_args
func(**args) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.