id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
18,347
def fork_waitfor_timed(tmp, pid, timeout): timer_expired = True poll_time = 2 time_passed = 0 while (time_passed < timeout): time.sleep(poll_time) (child_pid, status) = os.waitpid(pid, os.WNOHANG) if ((child_pid, status) == (0, 0)): time_passed = (time_passed + poll_time) else: timer_expired = False break if timer_expired: logging.info('Timer expired (%d sec.), nuking pid %d', timeout, pid) utils.nuke_pid(pid) (child_pid, status) = os.waitpid(pid, 0) raise error.TestError(('Test timeout expired, rc=%d' % status)) else: _check_for_subprocess_exception(tmp, pid) if status: raise error.TestError(('Test subprocess failed rc=%d' % status))
[ "def", "fork_waitfor_timed", "(", "tmp", ",", "pid", ",", "timeout", ")", ":", "timer_expired", "=", "True", "poll_time", "=", "2", "time_passed", "=", "0", "while", "(", "time_passed", "<", "timeout", ")", ":", "time", ".", "sleep", "(", "poll_time", ")...
waits for pid until it terminates or timeout expires .
train
false
18,348
def findLinksRel(link_attrs_list, target_rel): matchesTarget = (lambda attrs: linkHasRel(attrs, target_rel)) return filter(matchesTarget, link_attrs_list)
[ "def", "findLinksRel", "(", "link_attrs_list", ",", "target_rel", ")", ":", "matchesTarget", "=", "(", "lambda", "attrs", ":", "linkHasRel", "(", "attrs", ",", "target_rel", ")", ")", "return", "filter", "(", "matchesTarget", ",", "link_attrs_list", ")" ]
filter the list of link attributes on whether it has target_rel as a relationship .
train
true
18,350
def magic_memit(self, line=''): (opts, stmt) = self.parse_options(line, 'r:t', posix=False, strict=False) repeat = int(getattr(opts, 'r', 1)) if (repeat < 1): repeat = 1 timeout = int(getattr(opts, 't', 0)) if (timeout <= 0): timeout = None mem_usage = [] for _ in range(repeat): tmp = memory_usage((_func_exec, (stmt, self.shell.user_ns)), timeout=timeout) mem_usage.extend(tmp) if mem_usage: print ('maximum of %d: %f MB per loop' % (repeat, max(mem_usage))) else: print 'ERROR: could not read memory usage, try with a lower interval or more iterations'
[ "def", "magic_memit", "(", "self", ",", "line", "=", "''", ")", ":", "(", "opts", ",", "stmt", ")", "=", "self", ".", "parse_options", "(", "line", ",", "'r:t'", ",", "posix", "=", "False", ",", "strict", "=", "False", ")", "repeat", "=", "int", ...
measure memory usage of a python statement usage .
train
false
18,351
def _api_change_opts(name, output, kwargs): value = kwargs.get('value') value2 = kwargs.get('value2') if (value and value2 and value2.isdigit()): result = NzbQueue.do.change_opts(value, int(value2)) return report(output, keyword='status', data=bool((result > 0)))
[ "def", "_api_change_opts", "(", "name", ",", "output", ",", "kwargs", ")", ":", "value", "=", "kwargs", ".", "get", "(", "'value'", ")", "value2", "=", "kwargs", ".", "get", "(", "'value2'", ")", "if", "(", "value", "and", "value2", "and", "value2", ...
api: accepts output .
train
false
18,352
def sequences_to_file(results, outfile_name): f = open(outfile_name, 'w+') for (label, seq) in results: output_lines = [] output_lines.append(('>%s\n' % label)) output_lines.append(('%s\n' % seq)) f.writelines(output_lines) f.close()
[ "def", "sequences_to_file", "(", "results", ",", "outfile_name", ")", ":", "f", "=", "open", "(", "outfile_name", ",", "'w+'", ")", "for", "(", "label", ",", "seq", ")", "in", "results", ":", "output_lines", "=", "[", "]", "output_lines", ".", "append", ...
translate a generator of label .
train
false
18,354
def create_oebbook(log, path_or_stream, opts, reader=None, encoding='utf-8', populate=True, for_regex_wizard=False, specialize=None): from calibre.ebooks.oeb.base import OEBBook html_preprocessor = HTMLPreProcessor(log, opts, regex_wizard_callback=regex_wizard_callback) if (not encoding): encoding = None oeb = OEBBook(log, html_preprocessor, pretty_print=opts.pretty_print, input_encoding=encoding) if (not populate): return oeb if (specialize is not None): oeb = (specialize(oeb) or oeb) log('Parsing all content...') if (reader is None): from calibre.ebooks.oeb.reader import OEBReader reader = OEBReader reader()(oeb, path_or_stream) return oeb
[ "def", "create_oebbook", "(", "log", ",", "path_or_stream", ",", "opts", ",", "reader", "=", "None", ",", "encoding", "=", "'utf-8'", ",", "populate", "=", "True", ",", "for_regex_wizard", "=", "False", ",", "specialize", "=", "None", ")", ":", "from", "...
create an oebbook .
train
false
18,356
def patched(f): def wrapped(*args, **kwargs): kwargs['return_response'] = False kwargs['prefetch'] = True config = kwargs.get('config', {}) config.update(safe_mode=True) kwargs['config'] = config return f(*args, **kwargs) return wrapped
[ "def", "patched", "(", "f", ")", ":", "def", "wrapped", "(", "*", "args", ",", "**", "kwargs", ")", ":", "kwargs", "[", "'return_response'", "]", "=", "False", "kwargs", "[", "'prefetch'", "]", "=", "True", "config", "=", "kwargs", ".", "get", "(", ...
patches calls to run_on_app_servers on instance methods .
train
false
18,357
def format_quote(tweet): screen_name = tweet['user']['screen_name'] text = tweet['text'] tid = str(tweet['id']) if ('#owner' not in c['QUOTE_FORMAT']): printNicely(light_magenta('Quote should contains #owner')) return False if ('#comment' not in c['QUOTE_FORMAT']): printNicely(light_magenta('Quote format should have #comment')) return False formater = '' try: formater = c['QUOTE_FORMAT'] formater = formater.replace('#owner', screen_name) formater = formater.replace('#tweet', text) formater = formater.replace('#tid', tid) formater = emojize(formater) except: pass notice = formater.split() notice = lmap((lambda x: (light_green(x) if (x == '#comment') else x)), notice) notice = lmap((lambda x: (color_func(c['TWEET']['rt'])(x) if (x == 'RT') else x)), notice) notice = lmap((lambda x: (cycle_color(x) if (x[0] == '@') else x)), notice) notice = lmap((lambda x: (color_func(c['TWEET']['link'])(x) if (x[0:4] == 'http') else x)), notice) notice = lmap((lambda x: (color_func(c['TWEET']['hashtag'])(x) if x.startswith('#') else x)), notice) notice = ' '.join(notice) notice = ((light_magenta('Quoting: "') + notice) + light_magenta('"')) printNicely(notice) return formater
[ "def", "format_quote", "(", "tweet", ")", ":", "screen_name", "=", "tweet", "[", "'user'", "]", "[", "'screen_name'", "]", "text", "=", "tweet", "[", "'text'", "]", "tid", "=", "str", "(", "tweet", "[", "'id'", "]", ")", "if", "(", "'#owner'", "not",...
returns a formatted string of a quote .
train
false
18,358
def sort_ids(ids, mapping): def _lookup(id): try: return len(mapping[id]) except KeyError: return 0 deco = [(_lookup(id), id) for id in ids] deco.sort(reverse=True) return [id for (_, id) in deco]
[ "def", "sort_ids", "(", "ids", ",", "mapping", ")", ":", "def", "_lookup", "(", "id", ")", ":", "try", ":", "return", "len", "(", "mapping", "[", "id", "]", ")", "except", "KeyError", ":", "return", "0", "deco", "=", "[", "(", "_lookup", "(", "id...
sorts ids based on their cluster_size .
train
false
18,360
def test_resource_url(): data = jinja.render('test2.html') print data url = QUrl(data) assert url.isValid() assert (url.scheme() == 'file') path = url.path() if (os.name == 'nt'): path = path.lstrip('/') path = path.replace('/', os.sep) with open(path, 'r', encoding='utf-8') as f: assert (f.read().splitlines()[0] == 'Hello World!')
[ "def", "test_resource_url", "(", ")", ":", "data", "=", "jinja", ".", "render", "(", "'test2.html'", ")", "print", "data", "url", "=", "QUrl", "(", "data", ")", "assert", "url", ".", "isValid", "(", ")", "assert", "(", "url", ".", "scheme", "(", ")",...
test resource_url() which can be used from templates .
train
false
18,362
def directed_connected_components(digr): if (not digr.DIRECTED): raise Exception(('%s is not a directed graph' % digr)) finishing_times = DFS_loop(digr.get_transpose()) (nodes_explored, connected_components) = ([], []) for node in finishing_times[::(-1)]: component = [] outer_dfs(digr, node, nodes_explored, component) if component: nodes_explored += component connected_components.append(component) return connected_components
[ "def", "directed_connected_components", "(", "digr", ")", ":", "if", "(", "not", "digr", ".", "DIRECTED", ")", ":", "raise", "Exception", "(", "(", "'%s is not a directed graph'", "%", "digr", ")", ")", "finishing_times", "=", "DFS_loop", "(", "digr", ".", "...
returns a list of strongly connected components in a directed graph using kosarajus two pass algorithm .
train
false
18,363
def read_partition_file(filename): for (record_index, record) in enumerate(screed.open(filename)): (_, partition_id) = record.name.rsplit(' DCTB ', 1) (yield (record_index, record, int(partition_id)))
[ "def", "read_partition_file", "(", "filename", ")", ":", "for", "(", "record_index", ",", "record", ")", "in", "enumerate", "(", "screed", ".", "open", "(", "filename", ")", ")", ":", "(", "_", ",", "partition_id", ")", "=", "record", ".", "name", ".",...
utility function to get partitioned reads from file .
train
false
18,366
def setup_ecobee(hass, network, config): if (network.pin is not None): request_configuration(network, hass, config) return if ('ecobee' in _CONFIGURING): configurator = get_component('configurator') configurator.request_done(_CONFIGURING.pop('ecobee')) hold_temp = config[DOMAIN].get(CONF_HOLD_TEMP) discovery.load_platform(hass, 'climate', DOMAIN, {'hold_temp': hold_temp}, config) discovery.load_platform(hass, 'sensor', DOMAIN, {}, config) discovery.load_platform(hass, 'binary_sensor', DOMAIN, {}, config)
[ "def", "setup_ecobee", "(", "hass", ",", "network", ",", "config", ")", ":", "if", "(", "network", ".", "pin", "is", "not", "None", ")", ":", "request_configuration", "(", "network", ",", "hass", ",", "config", ")", "return", "if", "(", "'ecobee'", "in...
setup ecobee thermostat .
train
false
18,368
def set_default(ruby, runas=None): return _rvm(['alias', 'create', 'default', ruby], runas=runas)
[ "def", "set_default", "(", "ruby", ",", "runas", "=", "None", ")", ":", "return", "_rvm", "(", "[", "'alias'", ",", "'create'", ",", "'default'", ",", "ruby", "]", ",", "runas", "=", "runas", ")" ]
set / add a default value to defaults .
train
false
18,370
@LocalContext def run_shellcode(bytes, **kw): return ELF.from_bytes(bytes, **kw).process()
[ "@", "LocalContext", "def", "run_shellcode", "(", "bytes", ",", "**", "kw", ")", ":", "return", "ELF", ".", "from_bytes", "(", "bytes", ",", "**", "kw", ")", ".", "process", "(", ")" ]
given assembled machine code bytes .
train
false
18,371
@contextmanager def assertNoFDsLeaked(test_case): gc.collect() def process_fds(): path = FilePath('/dev/fd') if (not path.exists()): raise SkipTest('/dev/fd is not available.') return set([child.basename() for child in path.children()]) fds = process_fds() try: (yield) finally: test_case.assertEqual(process_fds(), fds)
[ "@", "contextmanager", "def", "assertNoFDsLeaked", "(", "test_case", ")", ":", "gc", ".", "collect", "(", ")", "def", "process_fds", "(", ")", ":", "path", "=", "FilePath", "(", "'/dev/fd'", ")", "if", "(", "not", "path", ".", "exists", "(", ")", ")", ...
context manager that asserts no file descriptors are leaked .
train
false
18,372
def decode_long(data): nbytes = len(data) if (nbytes == 0): return 0L ashex = _binascii.hexlify(data[::(-1)]) n = long(ashex, 16) if (data[(-1)] >= '\x80'): n -= (1L << (nbytes * 8)) return n
[ "def", "decode_long", "(", "data", ")", ":", "nbytes", "=", "len", "(", "data", ")", "if", "(", "nbytes", "==", "0", ")", ":", "return", "0", "L", "ashex", "=", "_binascii", ".", "hexlify", "(", "data", "[", ":", ":", "(", "-", "1", ")", "]", ...
decode a long from a twos complement little-endian binary string .
train
false
18,373
@mock_ec2 def test_modify_attribute_blockDeviceMapping(): conn = boto.ec2.connect_to_region(u'us-east-1') reservation = conn.run_instances(u'ami-1234abcd') instance = reservation.instances[0] with assert_raises(JSONResponseError) as ex: instance.modify_attribute(u'blockDeviceMapping', {u'/dev/sda1': True}, dry_run=True) ex.exception.reason.should.equal(u'DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal(u'An error occurred (DryRunOperation) when calling the ModifyInstanceAttribute operation: Request would have succeeded, but DryRun flag is set') instance.modify_attribute(u'blockDeviceMapping', {u'/dev/sda1': True}) instance = ec2_backends[conn.region.name].get_instance(instance.id) instance.block_device_mapping.should.have.key(u'/dev/sda1') instance.block_device_mapping[u'/dev/sda1'].delete_on_termination.should.be(True)
[ "@", "mock_ec2", "def", "test_modify_attribute_blockDeviceMapping", "(", ")", ":", "conn", "=", "boto", ".", "ec2", ".", "connect_to_region", "(", "u'us-east-1'", ")", "reservation", "=", "conn", ".", "run_instances", "(", "u'ami-1234abcd'", ")", "instance", "=", ...
reproduces the missing feature explained at [0] .
train
false
18,374
def _repack_pkgs(pkgs, normalize=True): if (normalize and ('pkg.normalize_name' in __salt__)): _normalize_name = __salt__['pkg.normalize_name'] else: _normalize_name = (lambda pkgname: pkgname) return dict([(_normalize_name(str(x)), (str(y) if (y is not None) else y)) for (x, y) in six.iteritems(salt.utils.repack_dictlist(pkgs))])
[ "def", "_repack_pkgs", "(", "pkgs", ",", "normalize", "=", "True", ")", ":", "if", "(", "normalize", "and", "(", "'pkg.normalize_name'", "in", "__salt__", ")", ")", ":", "_normalize_name", "=", "__salt__", "[", "'pkg.normalize_name'", "]", "else", ":", "_nor...
repack packages specified using "pkgs" argument to pkg states into a single dictionary .
train
true
18,375
def get_pkg_names(args): print('\n'.join((pkg_name for (pkg_name, pkg_tests) in get_affected_packages(args))))
[ "def", "get_pkg_names", "(", "args", ")", ":", "print", "(", "'\\n'", ".", "join", "(", "(", "pkg_name", "for", "(", "pkg_name", ",", "pkg_tests", ")", "in", "get_affected_packages", "(", "args", ")", ")", ")", ")" ]
print package names that would be affected .
train
false
18,376
@fixture def db(): global _db if (_db is None): _db = orm.new_session_factory('sqlite:///:memory:', echo=True)() user = orm.User(name=getuser()) user.servers.append(orm.Server()) hub = orm.Hub(server=orm.Server()) _db.add(user) _db.add(hub) _db.commit() return _db
[ "@", "fixture", "def", "db", "(", ")", ":", "global", "_db", "if", "(", "_db", "is", "None", ")", ":", "_db", "=", "orm", ".", "new_session_factory", "(", "'sqlite:///:memory:'", ",", "echo", "=", "True", ")", "(", ")", "user", "=", "orm", ".", "Us...
get a db session .
train
false
18,377
@register.function @jinja2.contextfunction def mkt_breadcrumbs(context, product=None, items=None, crumb_size=40, add_default=True, cls=None): if add_default: crumbs = [(reverse('home'), _('Home'))] else: crumbs = [] if product: if items: url_ = product.get_detail_url() else: url_ = None crumbs += [(None, _('Apps')), (url_, product.name)] if items: crumbs.extend(items) if (len(crumbs) == 1): crumbs = [] crumbs = [(u, truncate(label, crumb_size)) for (u, label) in crumbs] t = env.get_template('site/helpers/breadcrumbs.html').render({'breadcrumbs': crumbs, 'cls': cls}) return jinja2.Markup(t)
[ "@", "register", ".", "function", "@", "jinja2", ".", "contextfunction", "def", "mkt_breadcrumbs", "(", "context", ",", "product", "=", "None", ",", "items", "=", "None", ",", "crumb_size", "=", "40", ",", "add_default", "=", "True", ",", "cls", "=", "No...
wrapper function for breadcrumbs .
train
false
18,379
def _to_report(name): if (name in _REPORTS): return name raise UsageError('Unrecognized report {}. {}'.format(name, _to_report.coerceDoc))
[ "def", "_to_report", "(", "name", ")", ":", "if", "(", "name", "in", "_REPORTS", ")", ":", "return", "name", "raise", "UsageError", "(", "'Unrecognized report {}. {}'", ".", "format", "(", "name", ",", "_to_report", ".", "coerceDoc", ")", ")" ]
ensure that name is a valid report .
train
false
18,380
def model_from_yaml(yaml_string, custom_objects=None): import yaml from keras.utils.layer_utils import layer_from_config config = yaml.load(yaml_string) return layer_from_config(config, custom_objects=custom_objects)
[ "def", "model_from_yaml", "(", "yaml_string", ",", "custom_objects", "=", "None", ")", ":", "import", "yaml", "from", "keras", ".", "utils", ".", "layer_utils", "import", "layer_from_config", "config", "=", "yaml", ".", "load", "(", "yaml_string", ")", "return...
parses a yaml model configuration file and returns a model instance .
train
false
18,381
@login_required def project_version_delete_html(request, project_slug, version_slug): project = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug) version = get_object_or_404(Version.objects.public(user=request.user, project=project, only_active=False), slug=version_slug) if (not version.active): version.built = False version.save() broadcast(type='app', task=tasks.clear_artifacts, args=[version.pk]) else: return HttpResponseBadRequest("Can't delete HTML for an active version.") return HttpResponseRedirect(reverse('project_version_list', kwargs={'project_slug': project_slug}))
[ "@", "login_required", "def", "project_version_delete_html", "(", "request", ",", "project_slug", ",", "version_slug", ")", ":", "project", "=", "get_object_or_404", "(", "Project", ".", "objects", ".", "for_admin_user", "(", "request", ".", "user", ")", ",", "s...
project version delete html this marks a version as not built .
train
false
18,382
def bump_cache_for_item(item): cache.bump_version(_get_namespace_for_item(item))
[ "def", "bump_cache_for_item", "(", "item", ")", ":", "cache", ".", "bump_version", "(", "_get_namespace_for_item", "(", "item", ")", ")" ]
bump cache for given item use this only for non product items .
train
false
18,383
def marketing_link_context_processor(request): marketing_urls = configuration_helpers.get_value('MKTG_URLS', settings.MKTG_URLS) return dict([(('MKTG_URL_' + k), marketing_link(k)) for k in (settings.MKTG_URL_LINK_MAP.viewkeys() | marketing_urls.viewkeys())])
[ "def", "marketing_link_context_processor", "(", "request", ")", ":", "marketing_urls", "=", "configuration_helpers", ".", "get_value", "(", "'MKTG_URLS'", ",", "settings", ".", "MKTG_URLS", ")", "return", "dict", "(", "[", "(", "(", "'MKTG_URL_'", "+", "k", ")",...
a django context processor to give templates access to marketing urls returns a dict whose keys are the marketing link names usable with the marketing_link method prefixed with mktg_url_ and whose values are the corresponding urls as computed by the marketing_link method .
train
false
18,384
def test_invalid_upgrade_strategy_causes_error(script): result = script.pip_install_local('--upgrade', '--upgrade-strategy=bazinga', 'simple', expect_error=True) assert result.returncode assert ('invalid choice' in result.stderr)
[ "def", "test_invalid_upgrade_strategy_causes_error", "(", "script", ")", ":", "result", "=", "script", ".", "pip_install_local", "(", "'--upgrade'", ",", "'--upgrade-strategy=bazinga'", ",", "'simple'", ",", "expect_error", "=", "True", ")", "assert", "result", ".", ...
it errors out when the upgrade-strategy is an invalid/unrecognised one .
train
false
18,385
def pivot_simple(index, columns, values): if ((len(index) != len(columns)) or (len(columns) != len(values))): raise AssertionError('Length of index, columns, and values must be the same') if (len(index) == 0): return DataFrame(index=[]) hindex = MultiIndex.from_arrays([index, columns]) series = Series(values.ravel(), index=hindex) series = series.sort_index(level=0) return series.unstack()
[ "def", "pivot_simple", "(", "index", ",", "columns", ",", "values", ")", ":", "if", "(", "(", "len", "(", "index", ")", "!=", "len", "(", "columns", ")", ")", "or", "(", "len", "(", "columns", ")", "!=", "len", "(", "values", ")", ")", ")", ":"...
produce pivot table based on 3 columns of this dataframe .
train
false
18,387
def blog_post_feed(request, format, **kwargs): try: return {u'rss': PostsRSS, u'atom': PostsAtom}[format](**kwargs)(request) except KeyError: raise Http404()
[ "def", "blog_post_feed", "(", "request", ",", "format", ",", "**", "kwargs", ")", ":", "try", ":", "return", "{", "u'rss'", ":", "PostsRSS", ",", "u'atom'", ":", "PostsAtom", "}", "[", "format", "]", "(", "**", "kwargs", ")", "(", "request", ")", "ex...
blog posts feeds - maps format to the correct feed view .
train
true
18,388
def getuser(): import os for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'): user = os.environ.get(name) if user: return user import pwd return pwd.getpwuid(os.getuid())[0]
[ "def", "getuser", "(", ")", ":", "import", "os", "for", "name", "in", "(", "'LOGNAME'", ",", "'USER'", ",", "'LNAME'", ",", "'USERNAME'", ")", ":", "user", "=", "os", ".", "environ", ".", "get", "(", "name", ")", "if", "user", ":", "return", "user"...
get the username from the environment or password database .
train
false
18,389
def decorator_from_middleware_with_args(middleware_class): return make_middleware_decorator(middleware_class)
[ "def", "decorator_from_middleware_with_args", "(", "middleware_class", ")", ":", "return", "make_middleware_decorator", "(", "middleware_class", ")" ]
like decorator_from_middleware .
train
false
18,390
def _wrap_urlopen(url, timeout=None): try: raw = urlopen(url, timeout=timeout) except IOError as e: msg = (u'Error getting %s: %s' % (url, e)) log.error(msg) raise RequestException(msg) resp = requests.Response() resp.raw = raw orig_read = raw.read resp.raw.read = (lambda size, **kwargs: orig_read(size)) resp.status_code = (raw.code or 200) resp.headers = requests.structures.CaseInsensitiveDict(raw.headers) return resp
[ "def", "_wrap_urlopen", "(", "url", ",", "timeout", "=", "None", ")", ":", "try", ":", "raw", "=", "urlopen", "(", "url", ",", "timeout", "=", "timeout", ")", "except", "IOError", "as", "e", ":", "msg", "=", "(", "u'Error getting %s: %s'", "%", "(", ...
handles alternate schemes using urllib .
train
false
18,391
def _convert_seconds(seconds): return u'{0:.0f}:{1:02.0f}'.format(*divmod(float(seconds), 60))
[ "def", "_convert_seconds", "(", "seconds", ")", ":", "return", "u'{0:.0f}:{1:02.0f}'", ".", "format", "(", "*", "divmod", "(", "float", "(", "seconds", ")", ",", "60", ")", ")" ]
convert seconds to minutes:seconds format .
train
false
18,392
def prefix_to_attr(attr_id): attr_by_prefix = {'fsmt-': 'MountTargetId', 'subnet-': 'SubnetId', 'eni-': 'NetworkInterfaceId', 'sg-': 'SecurityGroups'} prefix = first_or_default(filter((lambda pref: str(attr_id).startswith(pref)), attr_by_prefix.keys())) if prefix: return attr_by_prefix[prefix] return 'IpAddress'
[ "def", "prefix_to_attr", "(", "attr_id", ")", ":", "attr_by_prefix", "=", "{", "'fsmt-'", ":", "'MountTargetId'", ",", "'subnet-'", ":", "'SubnetId'", ",", "'eni-'", ":", "'NetworkInterfaceId'", ",", "'sg-'", ":", "'SecurityGroups'", "}", "prefix", "=", "first_o...
helper method to convert id prefix to mount target attribute .
train
false
18,393
def new_plot(parent=None, subplot_kw=None, **fig_kw): if (subplot_kw is None): subplot_kw = {} canvas = new_canvas(**fig_kw) canvas.setParent(parent) fig = canvas.figure ax = fig.add_subplot(1, 1, 1, **subplot_kw) return (fig, ax)
[ "def", "new_plot", "(", "parent", "=", "None", ",", "subplot_kw", "=", "None", ",", "**", "fig_kw", ")", ":", "if", "(", "subplot_kw", "is", "None", ")", ":", "subplot_kw", "=", "{", "}", "canvas", "=", "new_canvas", "(", "**", "fig_kw", ")", "canvas...
return new figure and axes .
train
false
18,394
def _set_model_dict(resource_type_name, properties_target, prefix, created_at, updated_at): model_dict = {'name': resource_type_name, 'properties_target': properties_target, 'prefix': prefix, 'created_at': created_at, 'updated_at': updated_at} return model_dict
[ "def", "_set_model_dict", "(", "resource_type_name", ",", "properties_target", ",", "prefix", ",", "created_at", ",", "updated_at", ")", ":", "model_dict", "=", "{", "'name'", ":", "resource_type_name", ",", "'properties_target'", ":", "properties_target", ",", "'pr...
return a model dict set with the passed in key values .
train
false
18,395
def onInit(isReload): DEBUG_MSG(('onInit::isReload:%s' % isReload))
[ "def", "onInit", "(", "isReload", ")", ":", "DEBUG_MSG", "(", "(", "'onInit::isReload:%s'", "%", "isReload", ")", ")" ]
kbengine method .
train
false
18,396
def _modify_eni_properties(eni_id, properties=None, vm_=None): if (not isinstance(properties, dict)): raise SaltCloudException('ENI properties must be a dictionary') params = {'Action': 'ModifyNetworkInterfaceAttribute', 'NetworkInterfaceId': eni_id} for (k, v) in six.iteritems(properties): params[k] = v retries = 5 while (retries > 0): retries = (retries - 1) result = aws.query(params, return_root=True, location=get_location(vm_), provider=get_provider(), opts=__opts__, sigver='4') if (isinstance(result, dict) and result.get('error')): time.sleep(1) continue return result raise SaltCloudException("Could not change interface <{0}> attributes <'{1}'> after 5 retries".format(eni_id, properties))
[ "def", "_modify_eni_properties", "(", "eni_id", ",", "properties", "=", "None", ",", "vm_", "=", "None", ")", ":", "if", "(", "not", "isinstance", "(", "properties", ",", "dict", ")", ")", ":", "raise", "SaltCloudException", "(", "'ENI properties must be a dic...
change properties of the interface with id eni_id to the values in properties dict .
train
true
18,398
def _fallback_next(it, default=_unspecified_next_default): if (default is _unspecified_next_default): return it.next() else: try: return it.next() except StopIteration: return default
[ "def", "_fallback_next", "(", "it", ",", "default", "=", "_unspecified_next_default", ")", ":", "if", "(", "default", "is", "_unspecified_next_default", ")", ":", "return", "it", ".", "next", "(", ")", "else", ":", "try", ":", "return", "it", ".", "next", ...
retrieve the next item from the iterator by calling its next() method .
train
false
18,399
def add_all_srs(): q = Subreddit._query(sort=asc('_date')) for sr in fetch_things2(q): for q in all_queries(get_links, sr, ('hot', 'new'), ['all']): q.update() for q in all_queries(get_links, sr, time_filtered_sorts, db_times.keys()): q.update() get_spam_links(sr).update() get_spam_comments(sr).update() get_reported_links(sr).update() get_reported_comments(sr).update()
[ "def", "add_all_srs", "(", ")", ":", "q", "=", "Subreddit", ".", "_query", "(", "sort", "=", "asc", "(", "'_date'", ")", ")", "for", "sr", "in", "fetch_things2", "(", "q", ")", ":", "for", "q", "in", "all_queries", "(", "get_links", ",", "sr", ",",...
recalculates every listing query for every subreddit .
train
false
18,401
def strict_parse_args(parser, raw_args): args = parser.parse_args() unexpected_params = (set(raw_args) - {allowed_arg.name for allowed_arg in parser.args}) if unexpected_params: raise InputError('Unexpected query parameters {}'.format(unexpected_params)) return args
[ "def", "strict_parse_args", "(", "parser", ",", "raw_args", ")", ":", "args", "=", "parser", ".", "parse_args", "(", ")", "unexpected_params", "=", "(", "set", "(", "raw_args", ")", "-", "{", "allowed_arg", ".", "name", "for", "allowed_arg", "in", "parser"...
wrapper around parser .
train
false
18,402
def getEpoch(): return str((int(time.time()) / const.EPOCH_GRANULARITY))
[ "def", "getEpoch", "(", ")", ":", "return", "str", "(", "(", "int", "(", "time", ".", "time", "(", ")", ")", "/", "const", ".", "EPOCH_GRANULARITY", ")", ")" ]
return the unix epoch divided by a constant as string .
train
false
18,404
def genFull(pset, min_, max_, type_=None): def condition(height, depth): 'Expression generation stops when the depth is equal to height.' return (depth == height) return generate(pset, min_, max_, condition, type_)
[ "def", "genFull", "(", "pset", ",", "min_", ",", "max_", ",", "type_", "=", "None", ")", ":", "def", "condition", "(", "height", ",", "depth", ")", ":", "return", "(", "depth", "==", "height", ")", "return", "generate", "(", "pset", ",", "min_", ",...
generate an expression where each leaf has a the same depth between *min* and *max* .
train
false
18,405
def beaker(registry, xml_parent, data): beaker = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.beakerbuilder.BeakerBuilder') jobSource = XML.SubElement(beaker, 'jobSource') if (('content' in data) and ('path' in data)): raise JenkinsJobsException("Use just one of 'content' or 'path'") elif ('content' in data): jobSourceClass = 'org.jenkinsci.plugins.beakerbuilder.StringJobSource' jobSource.set('class', jobSourceClass) XML.SubElement(jobSource, 'jobContent').text = data['content'] elif ('path' in data): jobSourceClass = 'org.jenkinsci.plugins.beakerbuilder.FileJobSource' jobSource.set('class', jobSourceClass) XML.SubElement(jobSource, 'jobPath').text = data['path'] else: raise JenkinsJobsException("Use one of 'content' or 'path'") XML.SubElement(beaker, 'downloadFiles').text = str(data.get('download-logs', False)).lower()
[ "def", "beaker", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "beaker", "=", "XML", ".", "SubElement", "(", "xml_parent", ",", "'org.jenkinsci.plugins.beakerbuilder.BeakerBuilder'", ")", "jobSource", "=", "XML", ".", "SubElement", "(", "beaker", ",...
yaml: beaker execute a beaker build step .
train
false
18,406
def parse_gff_attributes(attr_str): attributes_list = attr_str.split(';') attributes = {} for name_value_pair in attributes_list: pair = name_value_pair.strip().split('=') if (len(pair) == 1): pair = name_value_pair.strip().split('"') if (len(pair) == 1): continue if (pair == ''): continue name = pair[0].strip() if (name == ''): continue value = pair[1].strip(' "') attributes[name] = value if (len(attributes) == 0): attributes['group'] = attr_str return attributes
[ "def", "parse_gff_attributes", "(", "attr_str", ")", ":", "attributes_list", "=", "attr_str", ".", "split", "(", "';'", ")", "attributes", "=", "{", "}", "for", "name_value_pair", "in", "attributes_list", ":", "pair", "=", "name_value_pair", ".", "strip", "(",...
parses a gff/gtf attribute string and returns a dictionary of name-value pairs .
train
false
18,409
def _find_vector_rotation(a, b): R = np.eye(3) v = np.cross(a, b) if np.allclose(v, 0.0): return R s = np.dot(v, v) c = np.dot(a, b) vx = _skew_symmetric_cross(v) R += (vx + ((np.dot(vx, vx) * (1 - c)) / s)) return R
[ "def", "_find_vector_rotation", "(", "a", ",", "b", ")", ":", "R", "=", "np", ".", "eye", "(", "3", ")", "v", "=", "np", ".", "cross", "(", "a", ",", "b", ")", "if", "np", ".", "allclose", "(", "v", ",", "0.0", ")", ":", "return", "R", "s",...
find the rotation matrix that maps unit vector a to b .
train
false
18,410
def ReadFemPreg(dct_file='2002FemPreg.dct', dat_file='2002FemPreg.dat.gz'): dct = thinkstats2.ReadStataDct(dct_file) df = dct.ReadFixedWidth(dat_file, compression='gzip') CleanFemPreg(df) return df
[ "def", "ReadFemPreg", "(", "dct_file", "=", "'2002FemPreg.dct'", ",", "dat_file", "=", "'2002FemPreg.dat.gz'", ")", ":", "dct", "=", "thinkstats2", ".", "ReadStataDct", "(", "dct_file", ")", "df", "=", "dct", ".", "ReadFixedWidth", "(", "dat_file", ",", "compr...
reads the nsfg pregnancy data .
train
false
18,412
def schema_serializer_class(serializer_class, **kwargs): def decorator(func): func.schema_serializer_class = serializer_class func.kwargs = kwargs return func return decorator
[ "def", "schema_serializer_class", "(", "serializer_class", ",", "**", "kwargs", ")", ":", "def", "decorator", "(", "func", ")", ":", "func", ".", "schema_serializer_class", "=", "serializer_class", "func", ".", "kwargs", "=", "kwargs", "return", "func", "return"...
a decorator to set a serializer class in detail or list method of viewsets making it possible to extract the right serializer to generate the proper documentation .
train
false
18,413
def seq3(seq, custom_map=None, undef_code='Xaa'): if (custom_map is None): custom_map = {'*': 'Ter'} threecode = dict((list(IUPACData.protein_letters_1to3_extended.items()) + list(custom_map.items()))) return ''.join((threecode.get(aa, undef_code) for aa in seq))
[ "def", "seq3", "(", "seq", ",", "custom_map", "=", "None", ",", "undef_code", "=", "'Xaa'", ")", ":", "if", "(", "custom_map", "is", "None", ")", ":", "custom_map", "=", "{", "'*'", ":", "'Ter'", "}", "threecode", "=", "dict", "(", "(", "list", "("...
turn a one letter code protein sequence into one with three letter codes .
train
false
18,414
def AppIdWithDefaultPartition(app_id, default_partition): if (not default_partition): return app_id if ('~' in app_id): return app_id return ((default_partition + '~') + app_id)
[ "def", "AppIdWithDefaultPartition", "(", "app_id", ",", "default_partition", ")", ":", "if", "(", "not", "default_partition", ")", ":", "return", "app_id", "if", "(", "'~'", "in", "app_id", ")", ":", "return", "app_id", "return", "(", "(", "default_partition",...
add a partition to an application id if necessary .
train
false
18,416
def GetValidHostsForCert(cert): if ('subjectAltName' in cert): return [x[1] for x in cert['subjectAltName'] if (x[0].lower() == 'dns')] else: return [x[0][1] for x in cert['subject'] if (x[0][0].lower() == 'commonname')]
[ "def", "GetValidHostsForCert", "(", "cert", ")", ":", "if", "(", "'subjectAltName'", "in", "cert", ")", ":", "return", "[", "x", "[", "1", "]", "for", "x", "in", "cert", "[", "'subjectAltName'", "]", "if", "(", "x", "[", "0", "]", ".", "lower", "("...
returns a list of valid host globs for an ssl certificate .
train
true
18,420
def colon_separated_user_group(arg): try: parts = arg.split(':', 1) if (len(parts) == 1): uid = name_to_uid(parts[0]) gid = (-1) else: uid = name_to_uid(parts[0]) gid = name_to_gid(parts[1]) return (uid, gid) except: raise ValueError(('Invalid user:group definition %s' % arg))
[ "def", "colon_separated_user_group", "(", "arg", ")", ":", "try", ":", "parts", "=", "arg", ".", "split", "(", "':'", ",", "1", ")", "if", "(", "len", "(", "parts", ")", "==", "1", ")", ":", "uid", "=", "name_to_uid", "(", "parts", "[", "0", "]",...
find a user id and group id from a string like user:group .
train
false
18,421
def _TestQueryFollowed(tester, user_cookie, request_dict): validator = tester.validator (user_id, device_id) = tester.GetIdsFromCookie(user_cookie) def _MakeViewpointDict(followed): 'Create a viewpoint dict from the followed object plus its\n referenced viewpoint object.\n ' viewpoint = validator.GetModelObject(Viewpoint, followed.viewpoint_id) follower = validator.GetModelObject(Follower, DBKey(followed.user_id, followed.viewpoint_id)) metadata_dict = viewpoint.MakeMetadataDict(follower) if (follower.CanViewContent() and ('cover_photo' in metadata_dict)): photo_dict = metadata_dict['cover_photo'] obj_store = ObjectStore.GetInstance(ObjectStore.PHOTO) _AddPhotoUrls(obj_store, photo_dict) return metadata_dict actual_dict = tester.SendRequest('query_followed', user_cookie, request_dict) followed = validator.QueryModelObjects(Followed, user_id, limit=request_dict.get('limit', None), start_key=request_dict.get('start_key', None)) expected_dict = {'viewpoints': [_MakeViewpointDict(f) for f in followed]} if (len(followed) > 0): expected_dict['last_key'] = followed[(-1)].sort_key tester._CompareResponseDicts('query_followed', user_id, request_dict, expected_dict, actual_dict) return actual_dict
[ "def", "_TestQueryFollowed", "(", "tester", ",", "user_cookie", ",", "request_dict", ")", ":", "validator", "=", "tester", ".", "validator", "(", "user_id", ",", "device_id", ")", "=", "tester", ".", "GetIdsFromCookie", "(", "user_cookie", ")", "def", "_MakeVi...
called by the servicetester in order to test query_followed service api call .
train
false
18,422
def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix): line = init_prefix nesting_stack = ['>'] while True: match = Search('^(.*)([<>(),;\\[\\]])[^<>(),;\\[\\]]*$', line) if match: operator = match.group(2) line = match.group(1) if (nesting_stack[(-1)] == '>'): if (operator in ('>', ')', ']')): nesting_stack.append(operator) elif (operator == '<'): nesting_stack.pop() if (not nesting_stack): return True elif (operator == ','): return True else: return False elif (operator in ('>', ')', ']')): nesting_stack.append(operator) elif (operator in ('(', '[')): nesting_stack.pop() else: linenum -= 1 if (linenum < 0): break line = clean_lines.elided[linenum] return False
[ "def", "FindPreviousMatchingAngleBracket", "(", "clean_lines", ",", "linenum", ",", "init_prefix", ")", ":", "line", "=", "init_prefix", "nesting_stack", "=", "[", "'>'", "]", "while", "True", ":", "match", "=", "Search", "(", "'^(.*)([<>(),;\\\\[\\\\]])[^<>(),;\\\\...
find the corresponding < that started a template .
train
false
18,423
def whathdr(filename): f = open(filename, 'rb') h = f.read(512) for tf in tests: res = tf(h, f) if res: return res return None
[ "def", "whathdr", "(", "filename", ")", ":", "f", "=", "open", "(", "filename", ",", "'rb'", ")", "h", "=", "f", ".", "read", "(", "512", ")", "for", "tf", "in", "tests", ":", "res", "=", "tf", "(", "h", ",", "f", ")", "if", "res", ":", "re...
recognize sound headers .
train
false
18,424
def send_password_changed_notice(user): if config_value('SEND_PASSWORD_CHANGE_EMAIL'): subject = config_value('EMAIL_SUBJECT_PASSWORD_CHANGE_NOTICE') send_mail(subject, user.email, 'change_notice', user=user)
[ "def", "send_password_changed_notice", "(", "user", ")", ":", "if", "config_value", "(", "'SEND_PASSWORD_CHANGE_EMAIL'", ")", ":", "subject", "=", "config_value", "(", "'EMAIL_SUBJECT_PASSWORD_CHANGE_NOTICE'", ")", "send_mail", "(", "subject", ",", "user", ".", "email...
sends the password changed notice email for the specified user .
train
true
18,425
def checksum(*paths): ret = dict() if (not paths): raise CommandExecutionError('No package files has been specified.') for package_file in paths: ret[package_file] = (bool(__salt__['file.file_exists'](package_file)) and (not __salt__['cmd.retcode'](['rpm', '-K', '--quiet', package_file], ignore_retcode=True, output_loglevel='trace', python_shell=False))) return ret
[ "def", "checksum", "(", "*", "paths", ")", ":", "ret", "=", "dict", "(", ")", "if", "(", "not", "paths", ")", ":", "raise", "CommandExecutionError", "(", "'No package files has been specified.'", ")", "for", "package_file", "in", "paths", ":", "ret", "[", ...
calculate standard internet checksum over data starting at startth byte skip_word: if specified .
train
false
18,427
@endpoint(u'/interface-data/tag-browser') def tag_browser(ctx, rd): (db, library_id) = get_library_data(ctx, rd)[:2] etag = (u'%s||%s||%s' % (db.last_modified(), rd.username, library_id)) etag = hashlib.sha1(etag.encode(u'utf-8')).hexdigest() def generate(): (db, library_id) = get_library_data(ctx, rd)[:2] return json(ctx, rd, tag_browser, categories_as_json(ctx, rd, db)) return rd.etagged_dynamic_response(etag, generate)
[ "@", "endpoint", "(", "u'/interface-data/tag-browser'", ")", "def", "tag_browser", "(", "ctx", ",", "rd", ")", ":", "(", "db", ",", "library_id", ")", "=", "get_library_data", "(", "ctx", ",", "rd", ")", "[", ":", "2", "]", "etag", "=", "(", "u'%s||%s|...
get the tag browser serialized as json optional: ?library_id=<default library>&sort_tags_by=name&partition_method=first letter &collapse_at=25&dont_collapse=&hide_empty_categories= .
train
false
18,428
def wait_on_interfaces(interfaces, timeout=10): result = defaultdict(list) timeout = (time.time() + timeout) while ((len(result) < len(interfaces)) and (time.time() < timeout)): rin = [i for i in interfaces.values()] win = [i for i in interfaces.values() if i.unsent_requests] (rout, wout, xout) = select.select(rin, win, [], 1) for interface in wout: interface.send_requests() for interface in rout: responses = interface.get_responses() if responses: result[interface.server].extend(responses) return result
[ "def", "wait_on_interfaces", "(", "interfaces", ",", "timeout", "=", "10", ")", ":", "result", "=", "defaultdict", "(", "list", ")", "timeout", "=", "(", "time", ".", "time", "(", ")", "+", "timeout", ")", "while", "(", "(", "len", "(", "result", ")"...
return a map of servers to a list of tuples .
train
false
18,429
def xml_findall(xpath): def xpath_findall(value): validate(ET.iselement, value) return value.findall(xpath) return transform(xpath_findall)
[ "def", "xml_findall", "(", "xpath", ")", ":", "def", "xpath_findall", "(", "value", ")", ":", "validate", "(", "ET", ".", "iselement", ",", "value", ")", "return", "value", ".", "findall", "(", "xpath", ")", "return", "transform", "(", "xpath_findall", "...
find a list of xml elements via xpath .
train
true
18,430
def _merge_into_reversed(*iterables): return sorted(itertools.chain(*iterables), reverse=True)
[ "def", "_merge_into_reversed", "(", "*", "iterables", ")", ":", "return", "sorted", "(", "itertools", ".", "chain", "(", "*", "iterables", ")", ",", "reverse", "=", "True", ")" ]
merge multiple sorted inputs into a single output in reverse order .
train
false
18,431
def check_user_tags(dt): try: frappe.db.sql((u'select `_user_tags` from `tab%s` limit 1' % dt)) except Exception as e: if (e.args[0] == 1054): DocTags(dt).setup()
[ "def", "check_user_tags", "(", "dt", ")", ":", "try", ":", "frappe", ".", "db", ".", "sql", "(", "(", "u'select `_user_tags` from `tab%s` limit 1'", "%", "dt", ")", ")", "except", "Exception", "as", "e", ":", "if", "(", "e", ".", "args", "[", "0", "]",...
if the user does not have a tags column .
train
false
18,433
def _run_server(local_addr, local_addr_family, local_socket_type, local_linger_args, remote_addr, remote_addr_family, remote_socket_type, queue): class _ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer, object, ): 'Threaded streaming server for forwarding' address_family = local_addr_family socket_type = local_socket_type allow_reuse_address = True def __init__(self): handler_class_factory = partial(_TCPHandler, local_linger_args=local_linger_args, remote_addr=remote_addr, remote_addr_family=remote_addr_family, remote_socket_type=remote_socket_type) super(_ThreadedTCPServer, self).__init__(local_addr, handler_class_factory, bind_and_activate=True) server = _ThreadedTCPServer() queue.put([server.socket.family, server.server_address]) queue.close() server.serve_forever()
[ "def", "_run_server", "(", "local_addr", ",", "local_addr_family", ",", "local_socket_type", ",", "local_linger_args", ",", "remote_addr", ",", "remote_addr_family", ",", "remote_socket_type", ",", "queue", ")", ":", "class", "_ThreadedTCPServer", "(", "SocketServer", ...
run the server; executed in the subprocess .
train
false
18,434
def text_to_segments(text): lines = text.replace(' ', ' \xa0').splitlines() if (not lines): return [] segments = [] for line in lines[:(-1)]: if line: segments.append(hangups.ChatMessageSegment(line)) segments.append(hangups.ChatMessageSegment('\n', hangups.SegmentType.LINE_BREAK)) if lines[(-1)]: segments.append(hangups.ChatMessageSegment(lines[(-1)])) return segments
[ "def", "text_to_segments", "(", "text", ")", ":", "lines", "=", "text", ".", "replace", "(", "' '", ",", "' \\xa0'", ")", ".", "splitlines", "(", ")", "if", "(", "not", "lines", ")", ":", "return", "[", "]", "segments", "=", "[", "]", "for", "line...
create list of message segments from text .
train
false
18,435
def averageOnTime(vectors, numSamples=None): if (vectors.ndim == 1): vectors.shape = ((-1), 1) numTimeSteps = len(vectors) numElements = len(vectors[0]) if (numSamples is None): numSamples = numElements countOn = range(numElements) else: countOn = numpy.random.randint(0, numElements, numSamples) sumOfLengths = 0.0 onTimeFreqCounts = None n = 0 for i in countOn: (onTime, segments, durations) = _listOfOnTimesInVec(vectors[:, i]) if (onTime != 0.0): sumOfLengths += onTime n += segments onTimeFreqCounts = _accumulateFrequencyCounts(durations, onTimeFreqCounts) if (n > 0): return ((sumOfLengths / n), onTimeFreqCounts) else: return (0.0, onTimeFreqCounts)
[ "def", "averageOnTime", "(", "vectors", ",", "numSamples", "=", "None", ")", ":", "if", "(", "vectors", ".", "ndim", "==", "1", ")", ":", "vectors", ".", "shape", "=", "(", "(", "-", "1", ")", ",", "1", ")", "numTimeSteps", "=", "len", "(", "vect...
returns the average on-time .
train
true
18,436
def _mkanchors(ws, hs, x_ctr, y_ctr): ws = ws[:, np.newaxis] hs = hs[:, np.newaxis] anchors = np.hstack(((x_ctr - (0.5 * (ws - 1))), (y_ctr - (0.5 * (hs - 1))), (x_ctr + (0.5 * (ws - 1))), (y_ctr + (0.5 * (hs - 1))))) return anchors
[ "def", "_mkanchors", "(", "ws", ",", "hs", ",", "x_ctr", ",", "y_ctr", ")", ":", "ws", "=", "ws", "[", ":", ",", "np", ".", "newaxis", "]", "hs", "=", "hs", "[", ":", ",", "np", ".", "newaxis", "]", "anchors", "=", "np", ".", "hstack", "(", ...
given a vector of widths and heights around a center .
train
true
18,437
def _decode_string_escape_py2(str_): return str_.decode('string_escape')
[ "def", "_decode_string_escape_py2", "(", "str_", ")", ":", "return", "str_", ".", "decode", "(", "'string_escape'", ")" ]
python2 string escape do not use directly .
train
false
18,438
def setConnection(uri, tables, encoding='utf8', debug=False): kw = {} _uriLower = uri.lower() if _uriLower.startswith('mysql'): kw['use_unicode'] = 1 kw['charset'] = encoding kw['local_infile'] = 1 conn = connectionForURI(uri, **kw) conn.debug = debug for table in tables: table.setConnection(conn) table._cacheValue = False conn.paramstyle = conn.module.paramstyle return conn
[ "def", "setConnection", "(", "uri", ",", "tables", ",", "encoding", "=", "'utf8'", ",", "debug", "=", "False", ")", ":", "kw", "=", "{", "}", "_uriLower", "=", "uri", ".", "lower", "(", ")", "if", "_uriLower", ".", "startswith", "(", "'mysql'", ")", ...
set connection for every table .
train
false
18,440
def libvlc_audio_equalizer_set_amp_at_index(p_equalizer, f_amp, u_band): f = (_Cfunctions.get('libvlc_audio_equalizer_set_amp_at_index', None) or _Cfunction('libvlc_audio_equalizer_set_amp_at_index', ((1,), (1,), (1,)), None, ctypes.c_int, ctypes.c_void_p, ctypes.c_float, ctypes.c_uint)) return f(p_equalizer, f_amp, u_band)
[ "def", "libvlc_audio_equalizer_set_amp_at_index", "(", "p_equalizer", ",", "f_amp", ",", "u_band", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_audio_equalizer_set_amp_at_index'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_audio_equalizer...
set a new amplification value for a particular equalizer frequency band .
train
true
18,441
def createCertRequest(pkey, digest='md5', **name): req = crypto.X509Req() subj = req.get_subject() for (key, value) in name.items(): setattr(subj, key, value) req.set_pubkey(pkey) req.sign(pkey, digest) return req
[ "def", "createCertRequest", "(", "pkey", ",", "digest", "=", "'md5'", ",", "**", "name", ")", ":", "req", "=", "crypto", ".", "X509Req", "(", ")", "subj", "=", "req", ".", "get_subject", "(", ")", "for", "(", "key", ",", "value", ")", "in", "name",...
create a certificate request .
train
true
18,443
@csrf_exempt def openid_login_complete(request, redirect_field_name=REDIRECT_FIELD_NAME, render_failure=None): render_failure = (render_failure or default_render_failure) openid_response = openid_views.parse_openid_response(request) if (not openid_response): return render_failure(request, 'This is an OpenID relying party endpoint.') if (openid_response.status == SUCCESS): external_id = openid_response.identity_url oid_backend = openid_auth.OpenIDBackend() details = oid_backend._extract_user_details(openid_response) log.debug('openid success, details=%s', details) url = getattr(settings, 'OPENID_SSO_SERVER_URL', None) external_domain = '{0}{1}'.format(OPENID_DOMAIN_PREFIX, url) fullname = ('%s %s' % (details.get('first_name', ''), details.get('last_name', ''))) return _external_login_or_signup(request, external_id, external_domain, details, details.get('email', ''), fullname, retfun=functools.partial(redirect, get_next_url_for_login_page(request))) return render_failure(request, 'Openid failure')
[ "@", "csrf_exempt", "def", "openid_login_complete", "(", "request", ",", "redirect_field_name", "=", "REDIRECT_FIELD_NAME", ",", "render_failure", "=", "None", ")", ":", "render_failure", "=", "(", "render_failure", "or", "default_render_failure", ")", "openid_response"...
complete the openid login process .
train
false
18,444
def from_key_val_list(value): if (value is None): return None if isinstance(value, (str, bytes, bool, int)): raise ValueError('cannot encode objects that are not 2-tuples') return OrderedDict(value)
[ "def", "from_key_val_list", "(", "value", ")", ":", "if", "(", "value", "is", "None", ")", ":", "return", "None", "if", "isinstance", "(", "value", ",", "(", "str", ",", "bytes", ",", "bool", ",", "int", ")", ")", ":", "raise", "ValueError", "(", "...
take an object and test to see if it can be represented as a dictionary .
train
true
18,445
def floatformat(text, arg=(-1)): try: input_val = force_unicode(text) d = Decimal(input_val) except UnicodeEncodeError: return u'' except InvalidOperation: if (input_val in special_floats): return input_val try: d = Decimal(force_unicode(float(text))) except (ValueError, InvalidOperation, TypeError, UnicodeEncodeError): return u'' try: p = int(arg) except ValueError: return input_val try: m = (int(d) - d) except (ValueError, OverflowError, InvalidOperation): return input_val if ((not m) and (p < 0)): return mark_safe(formats.number_format((u'%d' % int(d)), 0)) if (p == 0): exp = Decimal(1) else: exp = (Decimal('1.0') / (Decimal(10) ** abs(p))) try: return mark_safe(formats.number_format((u'%s' % str(d.quantize(exp, ROUND_HALF_UP))), abs(p))) except InvalidOperation: return input_val
[ "def", "floatformat", "(", "text", ",", "arg", "=", "(", "-", "1", ")", ")", ":", "try", ":", "input_val", "=", "force_unicode", "(", "text", ")", "d", "=", "Decimal", "(", "input_val", ")", "except", "UnicodeEncodeError", ":", "return", "u''", "except...
if called without an argument .
train
false
18,446
def parse_ntlm_chal(msg2, ack): global challenge_acks Signature = msg2[0:8] try: msg_type = struct.unpack('<I', msg2[8:12])[0] except Exception: return assert (msg_type == 2) ServerChallenge = msg2[24:32].encode('hex') if (len(challenge_acks) > 50): challenge_acks.popitem(last=False) challenge_acks[ack] = ServerChallenge
[ "def", "parse_ntlm_chal", "(", "msg2", ",", "ack", ")", ":", "global", "challenge_acks", "Signature", "=", "msg2", "[", "0", ":", "8", "]", "try", ":", "msg_type", "=", "struct", ".", "unpack", "(", "'<I'", ",", "msg2", "[", "8", ":", "12", "]", ")...
parse server challenge .
train
false
18,447
def default_category_for_module(module): if isinstance(module, str): module = __import__(module, fromlist=['']) name = module.__name__.rsplit('.', 1)[(-1)] qualified_name = module.__name__ return CategoryDescription(name=name, qualified_name=qualified_name)
[ "def", "default_category_for_module", "(", "module", ")", ":", "if", "isinstance", "(", "module", ",", "str", ")", ":", "module", "=", "__import__", "(", "module", ",", "fromlist", "=", "[", "''", "]", ")", "name", "=", "module", ".", "__name__", ".", ...
return a default constructed :class:categorydescription for a module .
train
false
18,448
def cross_domain(origin_check=is_trusted_origin, **options): def cross_domain_wrap(fn): cors_perms = {'origin_check': origin_check, 'allow_credentials': bool(options.get('allow_credentials'))} @wraps(fn) def cross_domain_handler(self, *args, **kwargs): if (request.params.get('hoist') == 'cookie'): if cors_perms['origin_check'](g.origin): name = request.environ['pylons.routes_dict']['action_name'] resp = fn(self, *args, **kwargs) c.cookies.add(('hoist_%s' % name), ''.join(tup(resp))) response.content_type = 'text/html' return '' else: abort(403) else: self.check_cors() return fn(self, *args, **kwargs) cross_domain_handler.cors_perms = cors_perms return cross_domain_handler return cross_domain_wrap
[ "def", "cross_domain", "(", "origin_check", "=", "is_trusted_origin", ",", "**", "options", ")", ":", "def", "cross_domain_wrap", "(", "fn", ")", ":", "cors_perms", "=", "{", "'origin_check'", ":", "origin_check", ",", "'allow_credentials'", ":", "bool", "(", ...
set up cross domain validation and hoisting for a request handler .
train
false
18,449
def IdToCounter(k): if (k > _MAX_SCATTERED_ID): return (0, SCATTERED) elif ((k > _MAX_SEQUENTIAL_ID) and (k <= _MAX_SCATTERED_ID)): return (long((ReverseBitsInt64(k) >> _SCATTER_SHIFT)), SCATTERED) elif (k > 0): return (long(k), SEQUENTIAL) else: raise datastore_errors.BadArgumentError(('invalid id (%d)' % k))
[ "def", "IdToCounter", "(", "k", ")", ":", "if", "(", "k", ">", "_MAX_SCATTERED_ID", ")", ":", "return", "(", "0", ",", "SCATTERED", ")", "elif", "(", "(", "k", ">", "_MAX_SEQUENTIAL_ID", ")", "and", "(", "k", "<=", "_MAX_SCATTERED_ID", ")", ")", ":",...
map id k to the counter value from which it was generated .
train
false
18,450
def get_ofp_module(ofp_version): return get_ofp_modules()[ofp_version]
[ "def", "get_ofp_module", "(", "ofp_version", ")", ":", "return", "get_ofp_modules", "(", ")", "[", "ofp_version", "]" ]
get modules pair for the constants and parser of of-wire of a given of version .
train
false
18,451
def pseudorandom(n, p, random_state=None): import numpy as np p = list(p) cp = np.cumsum(([0] + p)) assert np.allclose(1, cp[(-1)]) assert (len(p) < 256) if (not isinstance(random_state, np.random.RandomState)): random_state = np.random.RandomState(random_state) x = random_state.random_sample(n) out = np.empty(n, dtype='i1') for (i, (low, high)) in enumerate(zip(cp[:(-1)], cp[1:])): out[((x >= low) & (x < high))] = i return out
[ "def", "pseudorandom", "(", "n", ",", "p", ",", "random_state", "=", "None", ")", ":", "import", "numpy", "as", "np", "p", "=", "list", "(", "p", ")", "cp", "=", "np", ".", "cumsum", "(", "(", "[", "0", "]", "+", "p", ")", ")", "assert", "np"...
pseudorandom array of integer indexes .
train
false
18,452
def defer_succeed(result): d = defer.Deferred() reactor.callLater(0, d.callback, result) return d
[ "def", "defer_succeed", "(", "result", ")", ":", "d", "=", "defer", ".", "Deferred", "(", ")", "reactor", ".", "callLater", "(", "0", ",", "d", ".", "callback", ",", "result", ")", "return", "d" ]
same as twisted .
train
false
18,453
def pkcs_ilen(n): i = 0 while (n > 0): n >>= 8 i += 1 return i
[ "def", "pkcs_ilen", "(", "n", ")", ":", "i", "=", "0", "while", "(", "n", ">", "0", ")", ":", "n", ">>=", "8", "i", "+=", "1", "return", "i" ]
this is a log base 256 which determines the minimum octet string length for unequivocal representation of integer n by pkcs_i2osp .
train
false
18,454
def transferClosestPaths(oldOrderedLocation, remainingPaths, skein): while (len(remainingPaths) > 0): transferClosestPath(oldOrderedLocation, remainingPaths, skein)
[ "def", "transferClosestPaths", "(", "oldOrderedLocation", ",", "remainingPaths", ",", "skein", ")", ":", "while", "(", "len", "(", "remainingPaths", ")", ">", "0", ")", ":", "transferClosestPath", "(", "oldOrderedLocation", ",", "remainingPaths", ",", "skein", "...
transfer the closest remaining paths .
train
false
18,455
def test_transforms(): xfm = np.random.randn(4, 4).astype(np.float32) new_xfm = xfm.dot(rotate(180, (1, 0, 0)).dot(rotate((-90), (0, 1, 0)))) new_xfm = new_xfm.dot(rotate(90, (0, 0, 1)).dot(rotate(90, (0, 1, 0)))) new_xfm = new_xfm.dot(rotate(90, (1, 0, 0))) assert_allclose(xfm, new_xfm) new_xfm = translate((1, (-1), 1)).dot(translate(((-1), 1, (-1)))).dot(xfm) assert_allclose(xfm, new_xfm) new_xfm = scale((1, 2, 3)).dot(scale((1, (1.0 / 2.0), (1.0 / 3.0)))).dot(xfm) assert_allclose(xfm, new_xfm) xfm = ortho((-1), 1, (-1), 1, (-1), 1) assert_equal(xfm.shape, (4, 4)) xfm = frustum((-1), 1, (-1), 1, (-1), 1) assert_equal(xfm.shape, (4, 4)) xfm = perspective(1, 1, (-1), 1) assert_equal(xfm.shape, (4, 4))
[ "def", "test_transforms", "(", ")", ":", "xfm", "=", "np", ".", "random", ".", "randn", "(", "4", ",", "4", ")", ".", "astype", "(", "np", ".", "float32", ")", "new_xfm", "=", "xfm", ".", "dot", "(", "rotate", "(", "180", ",", "(", "1", ",", ...
test transformations .
train
false
18,457
def FormatForAX(text): return ExpandTabs(AddCR(text))
[ "def", "FormatForAX", "(", "text", ")", ":", "return", "ExpandTabs", "(", "AddCR", "(", "text", ")", ")" ]
format a string suitable for an ax host .
train
false
18,460
def interface_ip(iface): (iface_info, error) = _get_iface_info(iface) if (error is False): return iface_info.get(iface, {}).get('inet', {})[0].get('address', '') else: return error
[ "def", "interface_ip", "(", "iface", ")", ":", "(", "iface_info", ",", "error", ")", "=", "_get_iface_info", "(", "iface", ")", "if", "(", "error", "is", "False", ")", ":", "return", "iface_info", ".", "get", "(", "iface", ",", "{", "}", ")", ".", ...
return the inet address for a given interface .
train
true
18,461
def importTrialTypes(fileName, returnFieldNames=False): logging.warning('importTrialTypes is DEPRECATED (as of v1.70.00). Please use `importConditions` for identical functionality.') return importConditions(fileName, returnFieldNames)
[ "def", "importTrialTypes", "(", "fileName", ",", "returnFieldNames", "=", "False", ")", ":", "logging", ".", "warning", "(", "'importTrialTypes is DEPRECATED (as of v1.70.00). Please use `importConditions` for identical functionality.'", ")", "return", "importConditions", "(", ...
importtrialtypes is deprecated please use importconditions for identical functionality .
train
false
18,464
def AceIterator(handle): for ace_contig in Ace.parse(handle): consensus_seq_str = ace_contig.sequence if ('U' in consensus_seq_str): if ('T' in consensus_seq_str): alpha = generic_nucleotide else: alpha = generic_rna else: alpha = generic_dna if ('*' in consensus_seq_str): assert ('-' not in consensus_seq_str) consensus_seq = Seq(consensus_seq_str.replace('*', '-'), Gapped(alpha, gap_char='-')) else: consensus_seq = Seq(consensus_seq_str, alpha) seq_record = SeqRecord(consensus_seq, id=ace_contig.name, name=ace_contig.name) quals = [] i = 0 for base in consensus_seq: if (base == '-'): quals.append(0) else: quals.append(ace_contig.quality[i]) i += 1 assert (i == len(ace_contig.quality)) seq_record.letter_annotations['phred_quality'] = quals (yield seq_record)
[ "def", "AceIterator", "(", "handle", ")", ":", "for", "ace_contig", "in", "Ace", ".", "parse", "(", "handle", ")", ":", "consensus_seq_str", "=", "ace_contig", ".", "sequence", "if", "(", "'U'", "in", "consensus_seq_str", ")", ":", "if", "(", "'T'", "in"...
returns seqrecord objects from an ace file .
train
false
18,466
def getMinimumFromVec3List(vec3List): minimum = complex(999999999.0, 999999999.0) for point in vec3List: minimum = getMinimum(minimum, point.dropAxis(2)) return minimum
[ "def", "getMinimumFromVec3List", "(", "vec3List", ")", ":", "minimum", "=", "complex", "(", "999999999.0", ",", "999999999.0", ")", "for", "point", "in", "vec3List", ":", "minimum", "=", "getMinimum", "(", "minimum", ",", "point", ".", "dropAxis", "(", "2", ...
get a complex with each component the minimum of the respective components of a list of vector3s .
train
false
18,468
def get_cpu_info(): try: with open(CPU_INFO_PATH) as fp: content = fp.read() except IOError: return {} lines = content.split('\n') result = [] item = None lines_count = len(lines) for (index, line) in enumerate(lines): line = line.strip() if (not line): if (item and (index != lines_count)): result.append(item) continue split = line.split(':') if (len(split) != 2): continue name = split[0].replace(' DCTB ', '').strip().replace(' ', '_') value = split[1].replace(' DCTB ', '').strip() if (name == 'processor'): item = {} item[name] = value return result
[ "def", "get_cpu_info", "(", ")", ":", "try", ":", "with", "open", "(", "CPU_INFO_PATH", ")", "as", "fp", ":", "content", "=", "fp", ".", "read", "(", ")", "except", "IOError", ":", "return", "{", "}", "lines", "=", "content", ".", "split", "(", "'\...
retrieve cpu information .
train
false
18,469
def group_status(): return s3_rest_controller()
[ "def", "group_status", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
group statuses: restful crud controller .
train
false
18,470
def get_all_alarms(region=None, prefix=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) alarms = conn.describe_alarms() results = odict.OrderedDict() for alarm in alarms: alarm = _metric_alarm_to_dict(alarm) name = alarm['name'] if prefix: if name.startswith(prefix): continue name = (prefix + alarm['name']) del alarm['name'] alarm_sls = [] alarm_sls.append({'name': name}) alarm_sls.append({'attributes': alarm}) results[('manage alarm ' + name)] = {'boto_cloudwatch_alarm.present': alarm_sls} return _safe_dump(results)
[ "def", "get_all_alarms", "(", "region", "=", "None", ",", "prefix", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "ke...
get all alarm details .
train
true
18,471
@register.filter(name='widget_type') def widget_type(field): try: return field.field.widget.__class__.__name__.lower() except AttributeError: return None
[ "@", "register", ".", "filter", "(", "name", "=", "'widget_type'", ")", "def", "widget_type", "(", "field", ")", ":", "try", ":", "return", "field", ".", "field", ".", "widget", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", "except", "Attr...
return the widget type .
train
false
18,473
def str_endswith(arr, pat, na=np.nan): f = (lambda x: x.endswith(pat)) return _na_map(f, arr, na, dtype=bool)
[ "def", "str_endswith", "(", "arr", ",", "pat", ",", "na", "=", "np", ".", "nan", ")", ":", "f", "=", "(", "lambda", "x", ":", "x", ".", "endswith", "(", "pat", ")", ")", "return", "_na_map", "(", "f", ",", "arr", ",", "na", ",", "dtype", "=",...
return boolean series indicating whether each string in the series/index ends with passed pattern .
train
true
18,475
def message_get(context, message_id): return IMPL.message_get(context, message_id)
[ "def", "message_get", "(", "context", ",", "message_id", ")", ":", "return", "IMPL", ".", "message_get", "(", "context", ",", "message_id", ")" ]
return a message with the specified id .
train
false
18,476
def single_source_shortest_path(G, source, cutoff=None): if (source not in G): raise nx.NodeNotFound('Source {} not in G'.format(source)) level = 0 nextlevel = {source: 1} paths = {source: [source]} if (cutoff == 0): return paths while nextlevel: thislevel = nextlevel nextlevel = {} for v in thislevel: for w in G[v]: if (w not in paths): paths[w] = (paths[v] + [w]) nextlevel[w] = 1 level = (level + 1) if ((cutoff is not None) and (cutoff <= level)): break return paths
[ "def", "single_source_shortest_path", "(", "G", ",", "source", ",", "cutoff", "=", "None", ")", ":", "if", "(", "source", "not", "in", "G", ")", ":", "raise", "nx", ".", "NodeNotFound", "(", "'Source {} not in G'", ".", "format", "(", "source", ")", ")",...
compute shortest path between source and all other nodes reachable from source .
train
false
18,478
def check_cdf_slope(cdf): norm_intensity = np.linspace(0, 1, len(cdf)) (slope, intercept) = np.polyfit(norm_intensity, cdf, 1) assert (0.9 < slope < 1.1)
[ "def", "check_cdf_slope", "(", "cdf", ")", ":", "norm_intensity", "=", "np", ".", "linspace", "(", "0", ",", "1", ",", "len", "(", "cdf", ")", ")", "(", "slope", ",", "intercept", ")", "=", "np", ".", "polyfit", "(", "norm_intensity", ",", "cdf", "...
slope of cdf which should equal 1 for an equalized histogram .
train
false
18,479
def _ExpandDirectories(filenames): expanded = set() for filename in filenames: if (not os.path.isdir(filename)): expanded.add(filename) continue for (root, _, files) in os.walk(filename): for loopfile in files: fullname = os.path.join(root, loopfile) if fullname.startswith(('.' + os.path.sep)): fullname = fullname[len(('.' + os.path.sep)):] expanded.add(fullname) filtered = [] for filename in expanded: if (os.path.splitext(filename)[1][1:] in GetAllExtensions()): filtered.append(filename) return filtered
[ "def", "_ExpandDirectories", "(", "filenames", ")", ":", "expanded", "=", "set", "(", ")", "for", "filename", "in", "filenames", ":", "if", "(", "not", "os", ".", "path", ".", "isdir", "(", "filename", ")", ")", ":", "expanded", ".", "add", "(", "fil...
searches a list of filenames and replaces directories in the list with all files descending from those directories .
train
true
18,480
def strip_whitespace(tokens): for (i, token) in enumerate(tokens): if (token.type != u'S'): break else: return [] tokens = tokens[i:] while (tokens and (tokens[(-1)].type == u'S')): tokens.pop() return tokens
[ "def", "strip_whitespace", "(", "tokens", ")", ":", "for", "(", "i", ",", "token", ")", "in", "enumerate", "(", "tokens", ")", ":", "if", "(", "token", ".", "type", "!=", "u'S'", ")", ":", "break", "else", ":", "return", "[", "]", "tokens", "=", ...
remove whitespace from a text or byte string .
train
false
18,481
def convert_missing_indexer(indexer): if isinstance(indexer, dict): indexer = indexer['key'] if isinstance(indexer, bool): raise KeyError('cannot use a single bool to index into setitem') return (indexer, True) return (indexer, False)
[ "def", "convert_missing_indexer", "(", "indexer", ")", ":", "if", "isinstance", "(", "indexer", ",", "dict", ")", ":", "indexer", "=", "indexer", "[", "'key'", "]", "if", "isinstance", "(", "indexer", ",", "bool", ")", ":", "raise", "KeyError", "(", "'ca...
reverse convert a missing indexer .
train
true