repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
cloud9ers/gurumate
environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/setuptools/sandbox.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/setuptools/sandbox.py#L70-L84
def run(self, func): """Run 'func' under os sandboxing""" try: self._copy(self) if _file: __builtin__.file = self._file __builtin__.open = self._open self._active = True return func() finally: self._active = False if _file: __builtin__.file = _file __builtin__.open = _open self._copy(_os)
[ "def", "run", "(", "self", ",", "func", ")", ":", "try", ":", "self", ".", "_copy", "(", "self", ")", "if", "_file", ":", "__builtin__", ".", "file", "=", "self", ".", "_file", "__builtin__", ".", "open", "=", "self", ".", "_open", "self", ".", "...
Run 'func' under os sandboxing
[ "Run", "func", "under", "os", "sandboxing" ]
python
test
dacut/python-aws-sig
awssig/sigv4.py
https://github.com/dacut/python-aws-sig/blob/7f6054dca4b32e67ca3d39db31c1b4be5efe54bd/awssig/sigv4.py#L338-L352
def expected_signature(self): """ The AWS SigV4 signature expected from the request. """ k_secret = b"AWS4" + self.key_mapping[self.access_key].encode("utf-8") k_date = hmac.new(k_secret, self.request_date.encode("utf-8"), sha256).digest() k_region = hmac.new(k_date, self.region.encode("utf-8"), sha256).digest() k_service = hmac.new(k_region, self.service.encode("utf-8"), sha256).digest() k_signing = hmac.new(k_service, _aws4_request_bytes, sha256).digest() return hmac.new(k_signing, self.string_to_sign.encode("utf-8"), sha256).hexdigest()
[ "def", "expected_signature", "(", "self", ")", ":", "k_secret", "=", "b\"AWS4\"", "+", "self", ".", "key_mapping", "[", "self", ".", "access_key", "]", ".", "encode", "(", "\"utf-8\"", ")", "k_date", "=", "hmac", ".", "new", "(", "k_secret", ",", "self",...
The AWS SigV4 signature expected from the request.
[ "The", "AWS", "SigV4", "signature", "expected", "from", "the", "request", "." ]
python
train
fastai/fastai
fastai/torch_core.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/torch_core.py#L136-L139
def trainable_params(m:nn.Module)->ParamList: "Return list of trainable params in `m`." res = filter(lambda p: p.requires_grad, m.parameters()) return res
[ "def", "trainable_params", "(", "m", ":", "nn", ".", "Module", ")", "->", "ParamList", ":", "res", "=", "filter", "(", "lambda", "p", ":", "p", ".", "requires_grad", ",", "m", ".", "parameters", "(", ")", ")", "return", "res" ]
Return list of trainable params in `m`.
[ "Return", "list", "of", "trainable", "params", "in", "m", "." ]
python
train
openpermissions/perch
perch/model.py
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/model.py#L321-L337
def _save(self): """ Save the resource It's better to use the create, updated & delete methods intsead of modifying an instance and calling save, because then we call the can_create, can_update & can_delete methods to check whether a user is permitted to make the changes. """ yield self.validate() db = self.db_client() saved = yield db.save_doc(self._resource) # Allow couch to create Document IDs if '_id' not in self._resource: self._resource['_id'] = saved['id']
[ "def", "_save", "(", "self", ")", ":", "yield", "self", ".", "validate", "(", ")", "db", "=", "self", ".", "db_client", "(", ")", "saved", "=", "yield", "db", ".", "save_doc", "(", "self", ".", "_resource", ")", "# Allow couch to create Document IDs", "i...
Save the resource It's better to use the create, updated & delete methods intsead of modifying an instance and calling save, because then we call the can_create, can_update & can_delete methods to check whether a user is permitted to make the changes.
[ "Save", "the", "resource" ]
python
train
google/grr
grr/server/grr_response_server/flows/general/filesystem.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flows/general/filesystem.py#L753-L812
def ConvertGlobIntoPathComponents(self, pattern): r"""Converts a glob pattern into a list of pathspec components. Wildcards are also converted to regular expressions. The pathspec components do not span directories, and are marked as a regex or a literal component. We also support recursion into directories using the ** notation. For example, /home/**2/foo.txt will find all files named foo.txt recursed 2 directories deep. If the directory depth is omitted, it defaults to 3. Example: /home/test/* -> ['home', 'test', '.*\\Z(?ms)'] Args: pattern: A glob expression with wildcards. Returns: A list of PathSpec instances for each component. Raises: ValueError: If the glob is invalid. """ components = [] for path_component in pattern.split("/"): # A ** in the path component means recurse into directories that match the # pattern. m = rdf_paths.GlobExpression.RECURSION_REGEX.search(path_component) if m: path_component = path_component.replace(m.group(0), "*") component = rdf_paths.PathSpec( path=fnmatch.translate(path_component), pathtype=self.state.pathtype, path_options=rdf_paths.PathSpec.Options.RECURSIVE) # Allow the user to override the recursion depth. if m.group(1): component.recursion_depth = int(m.group(1)) elif self.GLOB_MAGIC_CHECK.search(path_component): component = rdf_paths.PathSpec( path=fnmatch.translate(path_component), pathtype=self.state.pathtype, path_options=rdf_paths.PathSpec.Options.REGEX) else: pathtype = self.state.pathtype # TODO(amoser): This is a backwards compatibility hack. Remove when # all clients reach 3.0.0.2. if (pathtype == rdf_paths.PathSpec.PathType.TSK and re.match("^.:$", path_component)): path_component = "%s\\" % path_component component = rdf_paths.PathSpec( path=path_component, pathtype=pathtype, path_options=rdf_paths.PathSpec.Options.CASE_INSENSITIVE) components.append(component) return components
[ "def", "ConvertGlobIntoPathComponents", "(", "self", ",", "pattern", ")", ":", "components", "=", "[", "]", "for", "path_component", "in", "pattern", ".", "split", "(", "\"/\"", ")", ":", "# A ** in the path component means recurse into directories that match the", "# p...
r"""Converts a glob pattern into a list of pathspec components. Wildcards are also converted to regular expressions. The pathspec components do not span directories, and are marked as a regex or a literal component. We also support recursion into directories using the ** notation. For example, /home/**2/foo.txt will find all files named foo.txt recursed 2 directories deep. If the directory depth is omitted, it defaults to 3. Example: /home/test/* -> ['home', 'test', '.*\\Z(?ms)'] Args: pattern: A glob expression with wildcards. Returns: A list of PathSpec instances for each component. Raises: ValueError: If the glob is invalid.
[ "r", "Converts", "a", "glob", "pattern", "into", "a", "list", "of", "pathspec", "components", "." ]
python
train
enricobacis/limit
limit/limit.py
https://github.com/enricobacis/limit/blob/9bf4f7842990687f605413b7c606dbfd280749ef/limit/limit.py#L5-L31
def limit(limit, every=1): """This decorator factory creates a decorator that can be applied to functions in order to limit the rate the function can be invoked. The rate is `limit` over `every`, where limit is the number of invocation allowed every `every` seconds. limit(4, 60) creates a decorator that limit the function calls to 4 per minute. If not specified, every defaults to 1 second.""" def limitdecorator(fn): """This is the actual decorator that performs the rate-limiting.""" semaphore = _threading.Semaphore(limit) @_functools.wraps(fn) def wrapper(*args, **kwargs): semaphore.acquire() try: return fn(*args, **kwargs) finally: # ensure semaphore release timer = _threading.Timer(every, semaphore.release) timer.setDaemon(True) # allows the timer to be canceled on exit timer.start() return wrapper return limitdecorator
[ "def", "limit", "(", "limit", ",", "every", "=", "1", ")", ":", "def", "limitdecorator", "(", "fn", ")", ":", "\"\"\"This is the actual decorator that performs the rate-limiting.\"\"\"", "semaphore", "=", "_threading", ".", "Semaphore", "(", "limit", ")", "@", "_f...
This decorator factory creates a decorator that can be applied to functions in order to limit the rate the function can be invoked. The rate is `limit` over `every`, where limit is the number of invocation allowed every `every` seconds. limit(4, 60) creates a decorator that limit the function calls to 4 per minute. If not specified, every defaults to 1 second.
[ "This", "decorator", "factory", "creates", "a", "decorator", "that", "can", "be", "applied", "to", "functions", "in", "order", "to", "limit", "the", "rate", "the", "function", "can", "be", "invoked", ".", "The", "rate", "is", "limit", "over", "every", "whe...
python
train
mozilla-b2g/fxos-certsuite
mcts/securitysuite/ssl.py
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/securitysuite/ssl.py#L203-L265
def to_ints(version): """ Turn version string into a numeric representation for easy comparison. Undeclared point versions are assumed to be 0. :param version: a NSS version string :return: array of [major, minor, point, pointpoint, tag value] """ # Example strings: "NSS_3_7_9_RTM", "NSS_3_6_BRANCH_20021026", "NSS_3_6_BETA2", # "3.18 Basic ECC Beta", "3.16.5 Basic ECC" # normalize version strings norm_version = version.replace('NSS_', '').replace('.', '_').replace(' ', '_').upper().split('_') # Asserting minimumum length of 3 as in [major,minor,tag] assert len(norm_version) >= 3 # Asserting the first two fields are numeric major and minor assert norm_version[0].isdigit() and norm_version[1].isdigit() # Asserting last field is always a non-numeric tag or a date tag # CAVE: fails with obscure date dags like certdata.txt-NSS_3_4_20020403_2 assert not norm_version[-1].isdigit() or len(norm_version[-1]) > 2 # fill in missing point and pointpoint versions if not (norm_version[2].isdigit() and len(norm_version[2]) < 4): # <4 to distinguish from numeric date tags norm_version.insert(2, "0") if not (norm_version[3].isdigit() and len(norm_version[3]) < 4): norm_version.insert(3, "0") # Strictly ordering by RTM > RC > BETA > * # CAVE: Order rule may result in bogus sorting of obscure tags (WITH_CBKI*, TPATCH*, BRANCHPOINT*, ...) # Recent versions are tagged non-obscurely and consistently tag_value = 0 for v in norm_version[4:]: if v.startswith('BETA'): tag_value = 100 if len(v[4:]) == 1 or len(v[4:]) == 2: try: tag_value += int(v[4:]) except ValueError: pass for v in norm_version[4:]: if v.startswith('RC'): tag_value = 200 if len(v[3:]) == 1 or len(v[3:]) == 2: try: tag_value += int(v[3:]) except ValueError: pass for v in norm_version[4:]: if v == 'RTM': tag_value = 300 # Special case: "x.y.z Basic ECC" is counted as RTM # TODO: research the set of potential version string formats reported by libnss. if norm_version[-2] == 'BASIC' and norm_version[-1] == 'ECC' and norm_version[-3].isdigit(): tag_value = 300 major, minor, point, pointpoint = (int(x) for x in norm_version[:4]) return [major, minor, point, pointpoint, tag_value]
[ "def", "to_ints", "(", "version", ")", ":", "# Example strings: \"NSS_3_7_9_RTM\", \"NSS_3_6_BRANCH_20021026\", \"NSS_3_6_BETA2\",", "# \"3.18 Basic ECC Beta\", \"3.16.5 Basic ECC\"", "# normalize version strings", "norm_version", "=", "version", ".", "replace", "(", "...
Turn version string into a numeric representation for easy comparison. Undeclared point versions are assumed to be 0. :param version: a NSS version string :return: array of [major, minor, point, pointpoint, tag value]
[ "Turn", "version", "string", "into", "a", "numeric", "representation", "for", "easy", "comparison", ".", "Undeclared", "point", "versions", "are", "assumed", "to", "be", "0", ".", ":", "param", "version", ":", "a", "NSS", "version", "string", ":", "return", ...
python
train
clld/pycdstar
src/pycdstar/commands.py
https://github.com/clld/pycdstar/blob/1a225b472c4e6bf9b8078fa3198f939395c53d22/src/pycdstar/commands.py#L24-L34
def c_metadata(api, args, verbose=False): """ Set or get metadata associated with an object:: usage: cdstar metadata <URL> [<JSON>] <JSON> Path to metadata in JSON, or JSON literal. """ obj = api.get_object(args['<URL>'].split('/')[-1]) if not set_metadata(args['<JSON>'], obj): return json.dumps(obj.metadata.read(), indent=4)
[ "def", "c_metadata", "(", "api", ",", "args", ",", "verbose", "=", "False", ")", ":", "obj", "=", "api", ".", "get_object", "(", "args", "[", "'<URL>'", "]", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ")", "if", "not", "set_metadata", "("...
Set or get metadata associated with an object:: usage: cdstar metadata <URL> [<JSON>] <JSON> Path to metadata in JSON, or JSON literal.
[ "Set", "or", "get", "metadata", "associated", "with", "an", "object", "::" ]
python
train
KimiNewt/pyshark
src/pyshark/packet/layer.py
https://github.com/KimiNewt/pyshark/blob/089ea6208c4321f03bc548f491e00a053285918f/src/pyshark/packet/layer.py#L53-L64
def get_field(self, name): """ Gets the XML field object of the given name. """ # Quicker in case the exact name was used. field = self._all_fields.get(name) if field is not None: return field for field_name, field in self._all_fields.items(): if self._sanitize_field_name(name) == self._sanitize_field_name(field_name): return field
[ "def", "get_field", "(", "self", ",", "name", ")", ":", "# Quicker in case the exact name was used.", "field", "=", "self", ".", "_all_fields", ".", "get", "(", "name", ")", "if", "field", "is", "not", "None", ":", "return", "field", "for", "field_name", ","...
Gets the XML field object of the given name.
[ "Gets", "the", "XML", "field", "object", "of", "the", "given", "name", "." ]
python
train
kyper-data/python-highcharts
highcharts/highmaps/highmaps.py
https://github.com/kyper-data/python-highcharts/blob/a4c488ae5c2e125616efad5a722f3dfd8a9bc450/highcharts/highmaps/highmaps.py#L226-L237
def add_drilldown_data_set(self, data, series_type, id, **kwargs): """set data for drilldown option in highmaps id must be input and corresponding to drilldown arguments in data series """ self.drilldown_data_set_count += 1 if self.drilldown_flag == False: self.drilldown_flag = True kwargs.update({'id':id}) series_data = Series(data, series_type=series_type, **kwargs) series_data.__options__().update(SeriesOptions(series_type=series_type, **kwargs).__options__()) self.drilldown_data_temp.append(series_data)
[ "def", "add_drilldown_data_set", "(", "self", ",", "data", ",", "series_type", ",", "id", ",", "*", "*", "kwargs", ")", ":", "self", ".", "drilldown_data_set_count", "+=", "1", "if", "self", ".", "drilldown_flag", "==", "False", ":", "self", ".", "drilldow...
set data for drilldown option in highmaps id must be input and corresponding to drilldown arguments in data series
[ "set", "data", "for", "drilldown", "option", "in", "highmaps", "id", "must", "be", "input", "and", "corresponding", "to", "drilldown", "arguments", "in", "data", "series" ]
python
train
mnooner256/pyqrcode
pyqrcode/builder.py
https://github.com/mnooner256/pyqrcode/blob/674a77b5eaf850d063f518bd90c243ee34ad6b5d/pyqrcode/builder.py#L519-L577
def add_detection_pattern(self, m): """This method add the detection patterns to the QR code. This lets the scanner orient the pattern. It is required for all QR codes. The detection pattern consists of three boxes located at the upper left, upper right, and lower left corners of the matrix. Also, two special lines called the timing pattern is also necessary. Finally, a single black pixel is added just above the lower left black box. """ #Draw outer black box for i in range(7): inv = -(i+1) for j in [0,6,-1,-7]: m[j][i] = 1 m[i][j] = 1 m[inv][j] = 1 m[j][inv] = 1 #Draw inner white box for i in range(1, 6): inv = -(i+1) for j in [1, 5, -2, -6]: m[j][i] = 0 m[i][j] = 0 m[inv][j] = 0 m[j][inv] = 0 #Draw inner black box for i in range(2, 5): for j in range(2, 5): inv = -(i+1) m[i][j] = 1 m[inv][j] = 1 m[j][inv] = 1 #Draw white border for i in range(8): inv = -(i+1) for j in [7, -8]: m[i][j] = 0 m[j][i] = 0 m[inv][j] = 0 m[j][inv] = 0 #To keep the code short, it draws an extra box #in the lower right corner, this removes it. for i in range(-8, 0): for j in range(-8, 0): m[i][j] = ' ' #Add the timing pattern bit = itertools.cycle([1,0]) for i in range(8, (len(m)-8)): b = next(bit) m[i][6] = b m[6][i] = b #Add the extra black pixel m[-8][8] = 1
[ "def", "add_detection_pattern", "(", "self", ",", "m", ")", ":", "#Draw outer black box", "for", "i", "in", "range", "(", "7", ")", ":", "inv", "=", "-", "(", "i", "+", "1", ")", "for", "j", "in", "[", "0", ",", "6", ",", "-", "1", ",", "-", ...
This method add the detection patterns to the QR code. This lets the scanner orient the pattern. It is required for all QR codes. The detection pattern consists of three boxes located at the upper left, upper right, and lower left corners of the matrix. Also, two special lines called the timing pattern is also necessary. Finally, a single black pixel is added just above the lower left black box.
[ "This", "method", "add", "the", "detection", "patterns", "to", "the", "QR", "code", ".", "This", "lets", "the", "scanner", "orient", "the", "pattern", ".", "It", "is", "required", "for", "all", "QR", "codes", ".", "The", "detection", "pattern", "consists",...
python
train
saltstack/salt
salt/modules/gpg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L360-L477
def create_key(key_type='RSA', key_length=1024, name_real='Autogenerated Key', name_comment='Generated by SaltStack', name_email=None, subkey_type=None, subkey_length=None, expire_date=None, use_passphrase=False, user=None, gnupghome=None): ''' Create a key in the GPG keychain .. note:: GPG key generation requires *a lot* of entropy and randomness. Difficult to do over a remote connection, consider having another process available which is generating randomness for the machine. Also especially difficult on virtual machines, consider the `rng-tools <http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_ package. The create_key process takes awhile so increasing the timeout may be necessary, e.g. -t 15. key_type The type of the primary key to generate. It must be capable of signing. 'RSA' or 'DSA'. key_length The length of the primary key in bits. name_real The real name of the user identity which is represented by the key. name_comment A comment to attach to the user id. name_email An email address for the user. subkey_type The type of the secondary key to generate. subkey_length The length of the secondary key in bits. expire_date The expiration date for the primary and any secondary key. You can specify an ISO date, A number of days/weeks/months/years, an epoch value, or 0 for a non-expiring key. use_passphrase Whether to use a passphrase with the signing key. Passphrase is received from Pillar. user Which user's keychain to access, defaults to user Salt is running as. Passing the user as ``salt`` will set the GnuPG home directory to the ``/etc/salt/gpgkeys``. gnupghome Specify the location where GPG keyring and related files are stored. CLI Example: .. code-block:: bash salt -t 15 '*' gpg.create_key ''' ret = { 'res': True, 'fingerprint': '', 'message': '' } create_params = {'key_type': key_type, 'key_length': key_length, 'name_real': name_real, 'name_comment': name_comment, } gpg = _create_gpg(user, gnupghome) if name_email: create_params['name_email'] = name_email if subkey_type: create_params['subkey_type'] = subkey_type if subkey_length: create_params['subkey_length'] = subkey_length if expire_date: create_params['expire_date'] = expire_date if use_passphrase: gpg_passphrase = __salt__['pillar.get']('gpg_passphrase') if not gpg_passphrase: ret['res'] = False ret['message'] = "gpg_passphrase not available in pillar." return ret else: create_params['passphrase'] = gpg_passphrase input_data = gpg.gen_key_input(**create_params) key = gpg.gen_key(input_data) if key.fingerprint: ret['fingerprint'] = key.fingerprint ret['message'] = 'GPG key pair successfully generated.' else: ret['res'] = False ret['message'] = 'Unable to generate GPG key pair.' return ret
[ "def", "create_key", "(", "key_type", "=", "'RSA'", ",", "key_length", "=", "1024", ",", "name_real", "=", "'Autogenerated Key'", ",", "name_comment", "=", "'Generated by SaltStack'", ",", "name_email", "=", "None", ",", "subkey_type", "=", "None", ",", "subkey_...
Create a key in the GPG keychain .. note:: GPG key generation requires *a lot* of entropy and randomness. Difficult to do over a remote connection, consider having another process available which is generating randomness for the machine. Also especially difficult on virtual machines, consider the `rng-tools <http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_ package. The create_key process takes awhile so increasing the timeout may be necessary, e.g. -t 15. key_type The type of the primary key to generate. It must be capable of signing. 'RSA' or 'DSA'. key_length The length of the primary key in bits. name_real The real name of the user identity which is represented by the key. name_comment A comment to attach to the user id. name_email An email address for the user. subkey_type The type of the secondary key to generate. subkey_length The length of the secondary key in bits. expire_date The expiration date for the primary and any secondary key. You can specify an ISO date, A number of days/weeks/months/years, an epoch value, or 0 for a non-expiring key. use_passphrase Whether to use a passphrase with the signing key. Passphrase is received from Pillar. user Which user's keychain to access, defaults to user Salt is running as. Passing the user as ``salt`` will set the GnuPG home directory to the ``/etc/salt/gpgkeys``. gnupghome Specify the location where GPG keyring and related files are stored. CLI Example: .. code-block:: bash salt -t 15 '*' gpg.create_key
[ "Create", "a", "key", "in", "the", "GPG", "keychain" ]
python
train
Robpol86/libnl
libnl/genl/ctrl.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/genl/ctrl.py#L65-L74
def ctrl_request_update(_, nl_sock_h): """https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/ctrl.c#L37. Positional arguments: nl_sock_h -- nl_sock class instance. Returns: Integer, genl_send_simple() output. """ return int(genl_send_simple(nl_sock_h, GENL_ID_CTRL, CTRL_CMD_GETFAMILY, CTRL_VERSION, NLM_F_DUMP))
[ "def", "ctrl_request_update", "(", "_", ",", "nl_sock_h", ")", ":", "return", "int", "(", "genl_send_simple", "(", "nl_sock_h", ",", "GENL_ID_CTRL", ",", "CTRL_CMD_GETFAMILY", ",", "CTRL_VERSION", ",", "NLM_F_DUMP", ")", ")" ]
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/ctrl.c#L37. Positional arguments: nl_sock_h -- nl_sock class instance. Returns: Integer, genl_send_simple() output.
[ "https", ":", "//", "github", ".", "com", "/", "thom311", "/", "libnl", "/", "blob", "/", "libnl3_2_25", "/", "lib", "/", "genl", "/", "ctrl", ".", "c#L37", "." ]
python
train
django-blog-zinnia/cmsplugin-zinnia
cmsplugin_zinnia/menu.py
https://github.com/django-blog-zinnia/cmsplugin-zinnia/blob/7613c0d9ae29affe9ab97527e4b6d5bef124afdc/cmsplugin_zinnia/menu.py#L118-L130
def get_nodes(self, request): """ Return menu's node for tags """ nodes = [] nodes.append(NavigationNode(_('Tags'), reverse('zinnia:tag_list'), 'tags')) for tag in tags_published(): nodes.append(NavigationNode(tag.name, reverse('zinnia:tag_detail', args=[tag.name]), tag.pk, 'tags')) return nodes
[ "def", "get_nodes", "(", "self", ",", "request", ")", ":", "nodes", "=", "[", "]", "nodes", ".", "append", "(", "NavigationNode", "(", "_", "(", "'Tags'", ")", ",", "reverse", "(", "'zinnia:tag_list'", ")", ",", "'tags'", ")", ")", "for", "tag", "in"...
Return menu's node for tags
[ "Return", "menu", "s", "node", "for", "tags" ]
python
train
xtuml/pyxtuml
xtuml/load.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/xtuml/load.py#L539-L549
def p_statement(self, p): ''' statement : create_table_statement SEMICOLON | insert_into_statement SEMICOLON | create_rop_statement SEMICOLON | create_index_statement SEMICOLON ''' p[0] = p[1] p[0].offset = p.lexpos(1) p[0].lineno = p.lineno(1) p[0].filename = p.lexer.filename
[ "def", "p_statement", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "p", "[", "0", "]", ".", "offset", "=", "p", ".", "lexpos", "(", "1", ")", "p", "[", "0", "]", ".", "lineno", "=", "p", ".", "lineno", "...
statement : create_table_statement SEMICOLON | insert_into_statement SEMICOLON | create_rop_statement SEMICOLON | create_index_statement SEMICOLON
[ "statement", ":", "create_table_statement", "SEMICOLON", "|", "insert_into_statement", "SEMICOLON", "|", "create_rop_statement", "SEMICOLON", "|", "create_index_statement", "SEMICOLON" ]
python
test
pybel/pybel
src/pybel/parser/parse_control.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/parser/parse_control.py#L327-L341
def handle_unset_list(self, line: str, position: int, tokens: ParseResults) -> ParseResults: """Handle ``UNSET {A, B, ...}`` or raises an exception of any of them are not present. Consider that all unsets are in peril if just one of them is wrong! :raises: MissingAnnotationKeyWarning """ for key in tokens['values']: if key in {BEL_KEYWORD_EVIDENCE, BEL_KEYWORD_SUPPORT}: self.evidence = None else: self.validate_unset_command(line, position, key) del self.annotations[key] return tokens
[ "def", "handle_unset_list", "(", "self", ",", "line", ":", "str", ",", "position", ":", "int", ",", "tokens", ":", "ParseResults", ")", "->", "ParseResults", ":", "for", "key", "in", "tokens", "[", "'values'", "]", ":", "if", "key", "in", "{", "BEL_KEY...
Handle ``UNSET {A, B, ...}`` or raises an exception of any of them are not present. Consider that all unsets are in peril if just one of them is wrong! :raises: MissingAnnotationKeyWarning
[ "Handle", "UNSET", "{", "A", "B", "...", "}", "or", "raises", "an", "exception", "of", "any", "of", "them", "are", "not", "present", "." ]
python
train
keon/algorithms
algorithms/maths/combination.py
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/maths/combination.py#L1-L6
def combination(n, r): """This function calculates nCr.""" if n == r or r == 0: return 1 else: return combination(n-1, r-1) + combination(n-1, r)
[ "def", "combination", "(", "n", ",", "r", ")", ":", "if", "n", "==", "r", "or", "r", "==", "0", ":", "return", "1", "else", ":", "return", "combination", "(", "n", "-", "1", ",", "r", "-", "1", ")", "+", "combination", "(", "n", "-", "1", "...
This function calculates nCr.
[ "This", "function", "calculates", "nCr", "." ]
python
train
tBuLi/symfit
symfit/core/fit.py
https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L788-L803
def jacobian(self): """ :return: Jacobian filled with the symbolic expressions for all the partial derivatives. Partial derivatives are of the components of the function with respect to the Parameter's, not the independent Variable's. The return shape is a list over the models components, filled with tha symbolical jacobian for that component, as a list. """ jac = [] for var, expr in self.items(): jac_row = [] for param in self.params: partial_dv = D(var, param) jac_row.append(self.jacobian_model[partial_dv]) jac.append(jac_row) return jac
[ "def", "jacobian", "(", "self", ")", ":", "jac", "=", "[", "]", "for", "var", ",", "expr", "in", "self", ".", "items", "(", ")", ":", "jac_row", "=", "[", "]", "for", "param", "in", "self", ".", "params", ":", "partial_dv", "=", "D", "(", "var"...
:return: Jacobian filled with the symbolic expressions for all the partial derivatives. Partial derivatives are of the components of the function with respect to the Parameter's, not the independent Variable's. The return shape is a list over the models components, filled with tha symbolical jacobian for that component, as a list.
[ ":", "return", ":", "Jacobian", "filled", "with", "the", "symbolic", "expressions", "for", "all", "the", "partial", "derivatives", ".", "Partial", "derivatives", "are", "of", "the", "components", "of", "the", "function", "with", "respect", "to", "the", "Parame...
python
train
spacetelescope/drizzlepac
drizzlepac/catalogs.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/catalogs.py#L732-L749
def plotXYCatalog(self, **kwargs): """ Plots the source catalog positions using matplotlib's `pyplot.plot()` Plotting `kwargs` that can also be passed include any keywords understood by matplotlib's `pyplot.plot()` function such as:: vmin, vmax, cmap, marker """ try: from matplotlib import pyplot as pl except: pl = None if pl is not None: pl.clf() pl.plot(self.xypos[0],self.xypos[1],**kwargs)
[ "def", "plotXYCatalog", "(", "self", ",", "*", "*", "kwargs", ")", ":", "try", ":", "from", "matplotlib", "import", "pyplot", "as", "pl", "except", ":", "pl", "=", "None", "if", "pl", "is", "not", "None", ":", "pl", ".", "clf", "(", ")", "pl", "....
Plots the source catalog positions using matplotlib's `pyplot.plot()` Plotting `kwargs` that can also be passed include any keywords understood by matplotlib's `pyplot.plot()` function such as:: vmin, vmax, cmap, marker
[ "Plots", "the", "source", "catalog", "positions", "using", "matplotlib", "s", "pyplot", ".", "plot", "()" ]
python
train
ssato/python-anyconfig
src/anyconfig/backend/xml.py
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/backend/xml.py#L410-L445
def container_to_etree(obj, parent=None, to_str=None, **options): """ Convert a dict-like object to XML ElementTree. :param obj: Container instance to convert to :param parent: XML ElementTree parent node object or None :param to_str: Callable to convert value to string or None :param options: Keyword options, - tags: Dict of tags for special nodes to keep XML info, attributes, text and children nodes, e.g. {"attrs": "@attrs", "text": "#text"} """ if to_str is None: to_str = _to_str_fn(**options) if not anyconfig.utils.is_dict_like(obj): if parent is not None and obj: parent.text = to_str(obj) # Parent is a leaf text node. return parent # All attributes and text should be set already. options = _complement_tag_options(options) (attrs, text, children) = operator.itemgetter(*_ATC)(options) for key, val in anyconfig.compat.iteritems(obj): if key == attrs: _elem_set_attrs(val, parent, to_str) elif key == text: parent.text = to_str(val) elif key == children: for celem in _elem_from_descendants(val, **options): parent.append(celem) else: parent = _get_or_update_parent(key, val, to_str, parent=parent, **options) return ET.ElementTree(parent)
[ "def", "container_to_etree", "(", "obj", ",", "parent", "=", "None", ",", "to_str", "=", "None", ",", "*", "*", "options", ")", ":", "if", "to_str", "is", "None", ":", "to_str", "=", "_to_str_fn", "(", "*", "*", "options", ")", "if", "not", "anyconfi...
Convert a dict-like object to XML ElementTree. :param obj: Container instance to convert to :param parent: XML ElementTree parent node object or None :param to_str: Callable to convert value to string or None :param options: Keyword options, - tags: Dict of tags for special nodes to keep XML info, attributes, text and children nodes, e.g. {"attrs": "@attrs", "text": "#text"}
[ "Convert", "a", "dict", "-", "like", "object", "to", "XML", "ElementTree", "." ]
python
train
saltstack/salt
salt/master.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L1638-L1681
def _syndic_return(self, load): ''' Receive a syndic minion return and format it to look like returns from individual minions. :param dict load: The minion payload ''' loads = load.get('load') if not isinstance(loads, list): loads = [load] # support old syndics not aggregating returns for load in loads: # Verify the load if any(key not in load for key in ('return', 'jid', 'id')): continue # if we have a load, save it if load.get('load'): fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load['load']) # Register the syndic syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id']) if not os.path.exists(syndic_cache_path): path_name = os.path.split(syndic_cache_path)[0] if not os.path.exists(path_name): os.makedirs(path_name) with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh: wfh.write('') # Format individual return loads for key, item in six.iteritems(load['return']): ret = {'jid': load['jid'], 'id': key} ret.update(item) if 'master_id' in load: ret['master_id'] = load['master_id'] if 'fun' in load: ret['fun'] = load['fun'] if 'arg' in load: ret['fun_args'] = load['arg'] if 'out' in load: ret['out'] = load['out'] if 'sig' in load: ret['sig'] = load['sig'] self._return(ret)
[ "def", "_syndic_return", "(", "self", ",", "load", ")", ":", "loads", "=", "load", ".", "get", "(", "'load'", ")", "if", "not", "isinstance", "(", "loads", ",", "list", ")", ":", "loads", "=", "[", "load", "]", "# support old syndics not aggregating return...
Receive a syndic minion return and format it to look like returns from individual minions. :param dict load: The minion payload
[ "Receive", "a", "syndic", "minion", "return", "and", "format", "it", "to", "look", "like", "returns", "from", "individual", "minions", "." ]
python
train
staggerpkg/stagger
stagger/fileutil.py
https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/fileutil.py#L145-L153
def _copy_chunk(src, dst, length): "Copy length bytes from file src to file dst." BUFSIZE = 128 * 1024 while length > 0: l = min(BUFSIZE, length) buf = src.read(l) assert len(buf) == l dst.write(buf) length -= l
[ "def", "_copy_chunk", "(", "src", ",", "dst", ",", "length", ")", ":", "BUFSIZE", "=", "128", "*", "1024", "while", "length", ">", "0", ":", "l", "=", "min", "(", "BUFSIZE", ",", "length", ")", "buf", "=", "src", ".", "read", "(", "l", ")", "as...
Copy length bytes from file src to file dst.
[ "Copy", "length", "bytes", "from", "file", "src", "to", "file", "dst", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xcalendarwidget/xcalendarscene.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xcalendarwidget/xcalendarscene.py#L538-L553
def setCurrentDate( self, date ): """ Sets the current date displayed by this calendar widget. :return <QDate> """ if ( date == self._currentDate or not date.isValid() ): return self._currentDate = date self.markForRebuild() parent = self.parent() if ( not parent.signalsBlocked() ): parent.currentDateChanged.emit(date) parent.titleChanged.emit(self.title())
[ "def", "setCurrentDate", "(", "self", ",", "date", ")", ":", "if", "(", "date", "==", "self", ".", "_currentDate", "or", "not", "date", ".", "isValid", "(", ")", ")", ":", "return", "self", ".", "_currentDate", "=", "date", "self", ".", "markForRebuild...
Sets the current date displayed by this calendar widget. :return <QDate>
[ "Sets", "the", "current", "date", "displayed", "by", "this", "calendar", "widget", ".", ":", "return", "<QDate", ">" ]
python
train
numenta/nupic
src/nupic/encoders/scalar.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/scalar.py#L691-L711
def closenessScores(self, expValues, actValues, fractional=True): """ See the function description in base.py """ expValue = expValues[0] actValue = actValues[0] if self.periodic: expValue = expValue % self.maxval actValue = actValue % self.maxval err = abs(expValue - actValue) if self.periodic: err = min(err, self.maxval - err) if fractional: pctErr = float(err) / (self.maxval - self.minval) pctErr = min(1.0, pctErr) closeness = 1.0 - pctErr else: closeness = err return numpy.array([closeness])
[ "def", "closenessScores", "(", "self", ",", "expValues", ",", "actValues", ",", "fractional", "=", "True", ")", ":", "expValue", "=", "expValues", "[", "0", "]", "actValue", "=", "actValues", "[", "0", "]", "if", "self", ".", "periodic", ":", "expValue",...
See the function description in base.py
[ "See", "the", "function", "description", "in", "base", ".", "py" ]
python
valid
cloudbase/python-hnvclient
hnv/client.py
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L597-L612
def process_raw_data(cls, raw_data): """Create a new model using raw API response.""" properties = raw_data["properties"] subnetworks = [] for raw_subnet in properties.get("subnets", []): raw_subnet["parentResourceID"] = raw_data["resourceId"] subnetworks.append(LogicalSubnetworks.from_raw_data(raw_subnet)) properties["subnets"] = subnetworks virtual_networks = [] for raw_network in properties.get("virtualNetworks", []): virtual_networks.append(Resource.from_raw_data(raw_network)) properties["virtualNetworks"] = virtual_networks return super(LogicalNetworks, cls).process_raw_data(raw_data)
[ "def", "process_raw_data", "(", "cls", ",", "raw_data", ")", ":", "properties", "=", "raw_data", "[", "\"properties\"", "]", "subnetworks", "=", "[", "]", "for", "raw_subnet", "in", "properties", ".", "get", "(", "\"subnets\"", ",", "[", "]", ")", ":", "...
Create a new model using raw API response.
[ "Create", "a", "new", "model", "using", "raw", "API", "response", "." ]
python
train
mdickinson/bigfloat
bigfloat/core.py
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L537-L556
def hex(self): """Return a hexadecimal representation of a BigFloat.""" sign = '-' if self._sign() else '' e = self._exponent() if isinstance(e, six.string_types): return sign + e m = self._significand() _, digits, _ = _mpfr_get_str2( 16, 0, m, ROUND_TIES_TO_EVEN, ) # only print the number of digits that are actually necessary n = 1 + (self.precision - 1) // 4 assert all(c == '0' for c in digits[n:]) result = '%s0x0.%sp%+d' % (sign, digits[:n], e) return result
[ "def", "hex", "(", "self", ")", ":", "sign", "=", "'-'", "if", "self", ".", "_sign", "(", ")", "else", "''", "e", "=", "self", ".", "_exponent", "(", ")", "if", "isinstance", "(", "e", ",", "six", ".", "string_types", ")", ":", "return", "sign", ...
Return a hexadecimal representation of a BigFloat.
[ "Return", "a", "hexadecimal", "representation", "of", "a", "BigFloat", "." ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/addons/guerilla/guerillamgmt.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/guerilla/guerillamgmt.py#L1885-L1906
def create_asset(self, project, atype=None, shot=None, asset=None): """Create and return a new asset :param project: the project for the asset :type project: :class:`jukeboxcore.djadapter.models.Project` :param atype: the assettype of the asset :type atype: :class:`jukeboxcore.djadapter.models.Atype` :param shot: the shot to add the asset to :type shot: :class:`jukeboxcore.djadapter.models.Shot` :param asset: the asset to add the new asset to :type asset: :class:`jukeboxcore.djadapter.models.Asset` :returns: The created asset or None :rtype: None | :class:`jukeboxcore.djadapter.models.Asset` :raises: None """ element = shot or asset dialog = AssetCreatorDialog(project=project, atype=atype, parent=self) dialog.exec_() asset = dialog.asset if not atype: element.assets.add(asset) return asset
[ "def", "create_asset", "(", "self", ",", "project", ",", "atype", "=", "None", ",", "shot", "=", "None", ",", "asset", "=", "None", ")", ":", "element", "=", "shot", "or", "asset", "dialog", "=", "AssetCreatorDialog", "(", "project", "=", "project", ",...
Create and return a new asset :param project: the project for the asset :type project: :class:`jukeboxcore.djadapter.models.Project` :param atype: the assettype of the asset :type atype: :class:`jukeboxcore.djadapter.models.Atype` :param shot: the shot to add the asset to :type shot: :class:`jukeboxcore.djadapter.models.Shot` :param asset: the asset to add the new asset to :type asset: :class:`jukeboxcore.djadapter.models.Asset` :returns: The created asset or None :rtype: None | :class:`jukeboxcore.djadapter.models.Asset` :raises: None
[ "Create", "and", "return", "a", "new", "asset" ]
python
train
yuma-m/pychord
pychord/analyzer.py
https://github.com/yuma-m/pychord/blob/4aa39189082daae76e36a2701890f91776d86b47/pychord/analyzer.py#L35-L54
def notes_to_positions(notes, root): """ Get notes positions. ex) notes_to_positions(["C", "E", "G"], "C") -> [0, 4, 7] :param list[str] notes: list of notes :param str root: the root note :rtype: list[int] :return: list of note positions """ root_pos = note_to_val(root) current_pos = root_pos positions = [] for note in notes: note_pos = note_to_val(note) if note_pos < current_pos: note_pos += 12 * ((current_pos - note_pos) // 12 + 1) positions.append(note_pos - root_pos) current_pos = note_pos return positions
[ "def", "notes_to_positions", "(", "notes", ",", "root", ")", ":", "root_pos", "=", "note_to_val", "(", "root", ")", "current_pos", "=", "root_pos", "positions", "=", "[", "]", "for", "note", "in", "notes", ":", "note_pos", "=", "note_to_val", "(", "note", ...
Get notes positions. ex) notes_to_positions(["C", "E", "G"], "C") -> [0, 4, 7] :param list[str] notes: list of notes :param str root: the root note :rtype: list[int] :return: list of note positions
[ "Get", "notes", "positions", "." ]
python
train
openstack/hacking
hacking/checks/python23.py
https://github.com/openstack/hacking/blob/10e58f907181cac91d3b2af422c2458b04a1ec79/hacking/checks/python23.py#L81-L102
def hacking_python3x_print_function(logical_line, noqa): r"""Check that all print occurrences look like print functions. Check that all occurrences of print look like functions, not print operator. As of Python 3.x, the print operator has been removed. Okay: print(msg) Okay: print (msg) Okay: print msg # noqa Okay: print() H233: print msg H233: print >>sys.stderr, "hello" H233: print msg, H233: print """ if noqa: return for match in RE_PRINT.finditer(logical_line): yield match.start(0), ( "H233: Python 3.x incompatible use of print operator")
[ "def", "hacking_python3x_print_function", "(", "logical_line", ",", "noqa", ")", ":", "if", "noqa", ":", "return", "for", "match", "in", "RE_PRINT", ".", "finditer", "(", "logical_line", ")", ":", "yield", "match", ".", "start", "(", "0", ")", ",", "(", ...
r"""Check that all print occurrences look like print functions. Check that all occurrences of print look like functions, not print operator. As of Python 3.x, the print operator has been removed. Okay: print(msg) Okay: print (msg) Okay: print msg # noqa Okay: print() H233: print msg H233: print >>sys.stderr, "hello" H233: print msg, H233: print
[ "r", "Check", "that", "all", "print", "occurrences", "look", "like", "print", "functions", "." ]
python
train
cjdrake/pyeda
pyeda/boolalg/expr.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/expr.py#L834-L840
def simplify(self): """Return a simplified expression.""" node = self.node.simplify() if node is self.node: return self else: return _expr(node)
[ "def", "simplify", "(", "self", ")", ":", "node", "=", "self", ".", "node", ".", "simplify", "(", ")", "if", "node", "is", "self", ".", "node", ":", "return", "self", "else", ":", "return", "_expr", "(", "node", ")" ]
Return a simplified expression.
[ "Return", "a", "simplified", "expression", "." ]
python
train
annayqho/TheCannon
TheCannon/normalization.py
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/normalization.py#L223-L271
def _find_cont_fitfunc_regions(fluxes, ivars, contmask, deg, ranges, ffunc, n_proc=1): """ Run fit_cont, dealing with spectrum in regions or chunks This is useful if a spectrum has gaps. Parameters ---------- fluxes: ndarray of shape (nstars, npixels) training set or test set pixel intensities ivars: numpy ndarray of shape (nstars, npixels) inverse variances, parallel to fluxes contmask: numpy ndarray of length (npixels) boolean pixel mask, True indicates that pixel is continuum deg: int degree of fitting function ffunc: str type of fitting function, chebyshev or sinusoid Returns ------- cont: numpy ndarray of shape (nstars, npixels) the continuum, parallel to fluxes """ nstars = fluxes.shape[0] npixels = fluxes.shape[1] cont = np.zeros(fluxes.shape) for chunk in ranges: start = chunk[0] stop = chunk[1] if ffunc=="chebyshev": output = _find_cont_fitfunc(fluxes[:,start:stop], ivars[:,start:stop], contmask[start:stop], deg=deg, ffunc="chebyshev", n_proc=n_proc) elif ffunc=="sinusoid": output = _find_cont_fitfunc(fluxes[:,start:stop], ivars[:,start:stop], contmask[start:stop], deg=deg, ffunc="sinusoid", n_proc=n_proc) cont[:, start:stop] = output return cont
[ "def", "_find_cont_fitfunc_regions", "(", "fluxes", ",", "ivars", ",", "contmask", ",", "deg", ",", "ranges", ",", "ffunc", ",", "n_proc", "=", "1", ")", ":", "nstars", "=", "fluxes", ".", "shape", "[", "0", "]", "npixels", "=", "fluxes", ".", "shape",...
Run fit_cont, dealing with spectrum in regions or chunks This is useful if a spectrum has gaps. Parameters ---------- fluxes: ndarray of shape (nstars, npixels) training set or test set pixel intensities ivars: numpy ndarray of shape (nstars, npixels) inverse variances, parallel to fluxes contmask: numpy ndarray of length (npixels) boolean pixel mask, True indicates that pixel is continuum deg: int degree of fitting function ffunc: str type of fitting function, chebyshev or sinusoid Returns ------- cont: numpy ndarray of shape (nstars, npixels) the continuum, parallel to fluxes
[ "Run", "fit_cont", "dealing", "with", "spectrum", "in", "regions", "or", "chunks" ]
python
train
SheffieldML/GPy
GPy/inference/latent_function_inference/expectation_propagation.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/inference/latent_function_inference/expectation_propagation.py#L404-L424
def to_dict(self): """ Convert the object into a json serializable dictionary. Note: It uses the private method _save_to_input_dict of the parent. :return dict: json serializable dictionary containing the needed information to instantiate the object """ input_dict = super(EP, self)._save_to_input_dict() input_dict["class"] = "GPy.inference.latent_function_inference.expectation_propagation.EP" if self.ga_approx_old is not None: input_dict["ga_approx_old"] = self.ga_approx_old.to_dict() if self._ep_approximation is not None: input_dict["_ep_approximation"] = {} input_dict["_ep_approximation"]["post_params"] = self._ep_approximation[0].to_dict() input_dict["_ep_approximation"]["ga_approx"] = self._ep_approximation[1].to_dict() input_dict["_ep_approximation"]["cav_params"] = self._ep_approximation[2].to_dict() input_dict["_ep_approximation"]["log_Z_tilde"] = self._ep_approximation[3].tolist() return input_dict
[ "def", "to_dict", "(", "self", ")", ":", "input_dict", "=", "super", "(", "EP", ",", "self", ")", ".", "_save_to_input_dict", "(", ")", "input_dict", "[", "\"class\"", "]", "=", "\"GPy.inference.latent_function_inference.expectation_propagation.EP\"", "if", "self", ...
Convert the object into a json serializable dictionary. Note: It uses the private method _save_to_input_dict of the parent. :return dict: json serializable dictionary containing the needed information to instantiate the object
[ "Convert", "the", "object", "into", "a", "json", "serializable", "dictionary", "." ]
python
train
mickybart/python-atlasbroker
atlasbroker/config.py
https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/config.py#L145-L182
def generate_binding_credentials(self, binding): """Generate binding credentials This function will permit to define the configuration to connect to the instance. Those credentials will be stored on a secret and exposed to a a Pod. We should at least returns the 'username' and 'password'. Args: binding (AtlasServiceBinding.Binding): A binding Returns: dict: All credentials and secrets. Raises: ErrClusterConfig: Connection string to the cluster is not available. """ uri = self.clusters.get(binding.instance.get_cluster(), None) if not uri: raise ErrClusterConfig(binding.instance.get_cluster()) # partial credentials creds = {"username" : self.generate_binding_username(binding), "password" : pwgen(32, symbols=False), "database" : binding.instance.get_dbname()} # uri uri = uri % ( creds["username"], creds["password"], creds["database"]) creds["uri"] = uri # return creds return creds
[ "def", "generate_binding_credentials", "(", "self", ",", "binding", ")", ":", "uri", "=", "self", ".", "clusters", ".", "get", "(", "binding", ".", "instance", ".", "get_cluster", "(", ")", ",", "None", ")", "if", "not", "uri", ":", "raise", "ErrClusterC...
Generate binding credentials This function will permit to define the configuration to connect to the instance. Those credentials will be stored on a secret and exposed to a a Pod. We should at least returns the 'username' and 'password'. Args: binding (AtlasServiceBinding.Binding): A binding Returns: dict: All credentials and secrets. Raises: ErrClusterConfig: Connection string to the cluster is not available.
[ "Generate", "binding", "credentials", "This", "function", "will", "permit", "to", "define", "the", "configuration", "to", "connect", "to", "the", "instance", ".", "Those", "credentials", "will", "be", "stored", "on", "a", "secret", "and", "exposed", "to", "a",...
python
train
Azure/azure-multiapi-storage-python
azure/multiapi/storage/v2015_04_05/_http/batchclient.py
https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/_http/batchclient.py#L163-L175
def validate_request_row_key(self, request): ''' Validates that all requests have the different RowKey and adds RowKey to existing RowKey list. request: the request to insert, update or delete entity ''' if self.batch_row_keys: if self.get_request_row_key(request) in self.batch_row_keys: raise AzureBatchValidationError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH) else: self.batch_row_keys.append(self.get_request_row_key(request))
[ "def", "validate_request_row_key", "(", "self", ",", "request", ")", ":", "if", "self", ".", "batch_row_keys", ":", "if", "self", ".", "get_request_row_key", "(", "request", ")", "in", "self", ".", "batch_row_keys", ":", "raise", "AzureBatchValidationError", "("...
Validates that all requests have the different RowKey and adds RowKey to existing RowKey list. request: the request to insert, update or delete entity
[ "Validates", "that", "all", "requests", "have", "the", "different", "RowKey", "and", "adds", "RowKey", "to", "existing", "RowKey", "list", "." ]
python
train
lorien/grab
grab/base.py
https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/base.py#L297-L320
def clone(self, **kwargs): """ Create clone of Grab instance. Cloned instance will have the same state: cookies, referrer, response document data :param **kwargs: overrides settings of cloned grab instance """ grab = Grab(transport=self.transport_param) grab.config = self.dump_config() grab.doc = self.doc.copy() #grab.doc.grab = weakref.proxy(grab) for key in self.clonable_attributes: setattr(grab, key, getattr(self, key)) grab.cookies = deepcopy(self.cookies) if kwargs: grab.setup(**kwargs) return grab
[ "def", "clone", "(", "self", ",", "*", "*", "kwargs", ")", ":", "grab", "=", "Grab", "(", "transport", "=", "self", ".", "transport_param", ")", "grab", ".", "config", "=", "self", ".", "dump_config", "(", ")", "grab", ".", "doc", "=", "self", ".",...
Create clone of Grab instance. Cloned instance will have the same state: cookies, referrer, response document data :param **kwargs: overrides settings of cloned grab instance
[ "Create", "clone", "of", "Grab", "instance", "." ]
python
train
zhmcclient/python-zhmcclient
zhmcclient/_manager.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_manager.py#L427-L455
def _matches_filters(self, obj, filter_args): """ Return a boolean indicating whether a resource object matches a set of filter arguments. This is used for client-side filtering. Depending on the properties specified in the filter arguments, this method retrieves the resource properties from the HMC. Parameters: obj (BaseResource): Resource object. filter_args (dict): Filter arguments. For details, see :ref:`Filtering`. `None` causes the resource to always match. Returns: bool: Boolean indicating whether the resource object matches the filter arguments. """ if filter_args is not None: for prop_name in filter_args: prop_match = filter_args[prop_name] if not self._matches_prop(obj, prop_name, prop_match): return False return True
[ "def", "_matches_filters", "(", "self", ",", "obj", ",", "filter_args", ")", ":", "if", "filter_args", "is", "not", "None", ":", "for", "prop_name", "in", "filter_args", ":", "prop_match", "=", "filter_args", "[", "prop_name", "]", "if", "not", "self", "."...
Return a boolean indicating whether a resource object matches a set of filter arguments. This is used for client-side filtering. Depending on the properties specified in the filter arguments, this method retrieves the resource properties from the HMC. Parameters: obj (BaseResource): Resource object. filter_args (dict): Filter arguments. For details, see :ref:`Filtering`. `None` causes the resource to always match. Returns: bool: Boolean indicating whether the resource object matches the filter arguments.
[ "Return", "a", "boolean", "indicating", "whether", "a", "resource", "object", "matches", "a", "set", "of", "filter", "arguments", ".", "This", "is", "used", "for", "client", "-", "side", "filtering", "." ]
python
train
sergiocorreia/panflute
panflute/tools.py
https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/tools.py#L319-L337
def run_pandoc(text='', args=None): """ Low level function that calls Pandoc with (optionally) some input text and/or arguments """ if args is None: args = [] pandoc_path = which('pandoc') if pandoc_path is None or not os.path.exists(pandoc_path): raise OSError("Path to pandoc executable does not exists") proc = Popen([pandoc_path] + args, stdin=PIPE, stdout=PIPE, stderr=PIPE) out, err = proc.communicate(input=text.encode('utf-8')) exitcode = proc.returncode if exitcode != 0: raise IOError(err) return out.decode('utf-8')
[ "def", "run_pandoc", "(", "text", "=", "''", ",", "args", "=", "None", ")", ":", "if", "args", "is", "None", ":", "args", "=", "[", "]", "pandoc_path", "=", "which", "(", "'pandoc'", ")", "if", "pandoc_path", "is", "None", "or", "not", "os", ".", ...
Low level function that calls Pandoc with (optionally) some input text and/or arguments
[ "Low", "level", "function", "that", "calls", "Pandoc", "with", "(", "optionally", ")", "some", "input", "text", "and", "/", "or", "arguments" ]
python
train
heitzmann/gdspy
gdspy/__init__.py
https://github.com/heitzmann/gdspy/blob/2c8d1313248c544e2066d19095b7ad7158c79bc9/gdspy/__init__.py#L3407-L3597
def read_gds(self, infile, units='skip', rename={}, layers={}, datatypes={}, texttypes={}): """ Read a GDSII file into this library. Parameters ---------- infile : file or string GDSII stream file (or path) to be imported. It must be opened for reading in binary format. units : {'convert', 'import', 'skip'} Controls how to scale and use the units in the imported file. ``'convert'``: the imported geometry is scaled to this library units. ``'import'``: the unit and precision in this library are replaced by those from the imported file. ``'skip'``: the imported geometry is not scaled and units are not replaced; the geometry is imported in the *user units* of the file. rename : dictionary Dictionary used to rename the imported cells. Keys and values must be strings. layers : dictionary Dictionary used to convert the layers in the imported cells. Keys and values must be integers. datatypes : dictionary Dictionary used to convert the datatypes in the imported cells. Keys and values must be integers. texttypes : dictionary Dictionary used to convert the text types in the imported cells. Keys and values must be integers. Returns ------- out : ``GdsLibrary`` This object. Notes ----- Not all features from the GDSII specification are currently supported. A warning will be produced if any unsuported features are found in the imported file. """ self._references = [] if isinstance(infile, basestring): infile = open(infile, 'rb') close = True else: close = False emitted_warnings = [] record = self._read_record(infile) kwargs = {} create_element = None factor = 1 cell = None while record is not None: # LAYER if record[0] == 0x0d: kwargs['layer'] = layers.get(record[1][0], record[1][0]) # DATATYPE elif record[0] == 0x0e: kwargs['datatype'] = datatypes.get(record[1][0], record[1][0]) # TEXTTYPE elif record[0] == 0x16: kwargs['texttype'] = texttypes.get(record[1][0], record[1][0]) # XY elif record[0] == 0x10: kwargs['xy'] = factor * record[1] # WIDTH elif record[0] == 0x0f: kwargs['width'] = factor * abs(record[1][0]) if record[1][0] < 0 and record[0] not in emitted_warnings: warnings.warn( "[GDSPY] Paths with absolute width value are not " "supported. Scaling these paths will also scale " "their width.", stacklevel=2) emitted_warnings.append(record[0]) # ENDEL elif record[0] == 0x11: if create_element is not None: cell.add(create_element(**kwargs)) create_element = None kwargs = {} # BOUNDARY elif record[0] == 0x08: create_element = self._create_polygon # PATH elif record[0] == 0x09: create_element = self._create_path # TEXT elif record[0] == 0x0c: create_element = self._create_label # SNAME elif record[0] == 0x12: kwargs['ref_cell'] = rename.get(record[1], record[1]) # COLROW elif record[0] == 0x13: kwargs['columns'] = record[1][0] kwargs['rows'] = record[1][1] # STRANS elif record[0] == 0x1a: kwargs['x_reflection'] = ((int(record[1][0]) & 0x8000) > 0) if (int(record[1][0]) & 0x0006) and record[0] not in emitted_warnings: warnings.warn( "[GDSPY] Absolute magnification or rotation of " "references is not supported. Transformations will " "be interpreted as relative.", stacklevel=2) emitted_warnings.append(record[0]) # MAG elif record[0] == 0x1b: kwargs['magnification'] = record[1][0] # ANGLE elif record[0] == 0x1c: kwargs['rotation'] = record[1][0] # SREF elif record[0] == 0x0a: create_element = self._create_reference # AREF elif record[0] == 0x0b: create_element = self._create_array # STRNAME elif record[0] == 0x06: name = rename.get(record[1], record[1]) cell = Cell(name, exclude_from_current=True) self.cell_dict[name] = cell # STRING elif record[0] == 0x19: kwargs['text'] = record[1] # ENDSTR elif record[0] == 0x07: cell = None # UNITS elif record[0] == 0x03: if units == 'skip': factor = record[1][0] elif units == 'import': self.unit = record[1][1] / record[1][0] self.precision = record[1][1] factor = record[1][0] elif units == 'convert': factor = record[1][1] / self.unit else: raise ValueError("[GDSPY] units must be one of 'convert', " "'import' or 'skip'.") # LIBNAME elif record[0] == 0x02: self.name = record[1] # PRESENTATION elif record[0] == 0x17: kwargs['anchor'] = GdsLibrary._import_anchors[int(record[1][0]) & 0x000f] # PATHTYPE elif record[0] == 0x21: if record[1][0] > 2: if 0x21 not in emitted_warnings: warnings.warn( "[GDSPY] Path ends with custom size are not " "supported.", RuntimeWarning, stacklevel=2) emitted_warnings.append(0x21) else: kwargs['ends'] = record[1][0] # ENDLIB elif record[0] == 0x04: for ref in self._references: if ref.ref_cell in self.cell_dict: ref.ref_cell = self.cell_dict[ref.ref_cell] elif ref.ref_cell in current_library.cell_dict: ref.ref_cell = current_library.cell_dict[ref.ref_cell] # Not supported elif (record[0] not in emitted_warnings and record[0] not in GdsLibrary._unused_records): warnings.warn( "[GDSPY] Record type {0} ({1:02X}) is not " "supported.".format(GdsLibrary._record_name[record[0]], record[0]), RuntimeWarning, stacklevel=2) emitted_warnings.append(record[0]) record = self._read_record(infile) if close: infile.close() return self
[ "def", "read_gds", "(", "self", ",", "infile", ",", "units", "=", "'skip'", ",", "rename", "=", "{", "}", ",", "layers", "=", "{", "}", ",", "datatypes", "=", "{", "}", ",", "texttypes", "=", "{", "}", ")", ":", "self", ".", "_references", "=", ...
Read a GDSII file into this library. Parameters ---------- infile : file or string GDSII stream file (or path) to be imported. It must be opened for reading in binary format. units : {'convert', 'import', 'skip'} Controls how to scale and use the units in the imported file. ``'convert'``: the imported geometry is scaled to this library units. ``'import'``: the unit and precision in this library are replaced by those from the imported file. ``'skip'``: the imported geometry is not scaled and units are not replaced; the geometry is imported in the *user units* of the file. rename : dictionary Dictionary used to rename the imported cells. Keys and values must be strings. layers : dictionary Dictionary used to convert the layers in the imported cells. Keys and values must be integers. datatypes : dictionary Dictionary used to convert the datatypes in the imported cells. Keys and values must be integers. texttypes : dictionary Dictionary used to convert the text types in the imported cells. Keys and values must be integers. Returns ------- out : ``GdsLibrary`` This object. Notes ----- Not all features from the GDSII specification are currently supported. A warning will be produced if any unsuported features are found in the imported file.
[ "Read", "a", "GDSII", "file", "into", "this", "library", "." ]
python
train
CalebBell/fluids
fluids/optional/pychebfun.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/optional/pychebfun.py#L61-L71
def cheb_range_simplifier(low, high, text=False): ''' >>> low, high = 0.0023046250851646434, 4.7088985707840125 >>> cheb_range_simplifier(low, high, text=True) 'chebval(0.42493574399544564724*(x + -2.3556015979345885647), coeffs)' ''' constant = 0.5*(-low-high) factor = 2.0/(high-low) if text: return 'chebval(%.20g*(x + %.20g), coeffs)' %(factor, constant) return constant, factor
[ "def", "cheb_range_simplifier", "(", "low", ",", "high", ",", "text", "=", "False", ")", ":", "constant", "=", "0.5", "*", "(", "-", "low", "-", "high", ")", "factor", "=", "2.0", "/", "(", "high", "-", "low", ")", "if", "text", ":", "return", "'...
>>> low, high = 0.0023046250851646434, 4.7088985707840125 >>> cheb_range_simplifier(low, high, text=True) 'chebval(0.42493574399544564724*(x + -2.3556015979345885647), coeffs)'
[ ">>>", "low", "high", "=", "0", ".", "0023046250851646434", "4", ".", "7088985707840125", ">>>", "cheb_range_simplifier", "(", "low", "high", "text", "=", "True", ")", "chebval", "(", "0", ".", "42493574399544564724", "*", "(", "x", "+", "-", "2", ".", "...
python
train
cortical-io/retina-sdk.py
retinasdk/full_client.py
https://github.com/cortical-io/retina-sdk.py/blob/474c13ad399fe1e974d2650335537608f4456b07/retinasdk/full_client.py#L285-L298
def getImage(self, body, imageScalar=2, plotShape="circle", imageEncoding="base64/png", sparsity=1.0): """Get images for expressions Args: body, ExpressionOperation: The JSON encoded expression to be evaluated (required) imageScalar, int: The scale of the image (optional) plotShape, str: The image shape (optional) imageEncoding, str: The encoding of the returned image (optional) sparsity, float: Sparsify the resulting expression to this percentage (optional) Returns: str with the raw byte data of the image Raises: CorticalioException: if the request was not successful """ return self._image.getImageForExpression(self._retina, body, imageScalar, plotShape, imageEncoding, sparsity)
[ "def", "getImage", "(", "self", ",", "body", ",", "imageScalar", "=", "2", ",", "plotShape", "=", "\"circle\"", ",", "imageEncoding", "=", "\"base64/png\"", ",", "sparsity", "=", "1.0", ")", ":", "return", "self", ".", "_image", ".", "getImageForExpression",...
Get images for expressions Args: body, ExpressionOperation: The JSON encoded expression to be evaluated (required) imageScalar, int: The scale of the image (optional) plotShape, str: The image shape (optional) imageEncoding, str: The encoding of the returned image (optional) sparsity, float: Sparsify the resulting expression to this percentage (optional) Returns: str with the raw byte data of the image Raises: CorticalioException: if the request was not successful
[ "Get", "images", "for", "expressions", "Args", ":", "body", "ExpressionOperation", ":", "The", "JSON", "encoded", "expression", "to", "be", "evaluated", "(", "required", ")", "imageScalar", "int", ":", "The", "scale", "of", "the", "image", "(", "optional", "...
python
train
rosshamish/catanlog
catanlog.py
https://github.com/rosshamish/catanlog/blob/6f204920d9b67fd53fc6ff6a1c7b6a756b009bf0/catanlog.py#L329-L336
def _log_board_terrain(self, terrain): """ Tiles are logged counterclockwise beginning from the top-left. See module hexgrid (https://github.com/rosshamish/hexgrid) for the tile layout. :param terrain: list of catan.board.Terrain objects """ self._logln('terrain: {0}'.format(' '.join(t.value for t in terrain)))
[ "def", "_log_board_terrain", "(", "self", ",", "terrain", ")", ":", "self", ".", "_logln", "(", "'terrain: {0}'", ".", "format", "(", "' '", ".", "join", "(", "t", ".", "value", "for", "t", "in", "terrain", ")", ")", ")" ]
Tiles are logged counterclockwise beginning from the top-left. See module hexgrid (https://github.com/rosshamish/hexgrid) for the tile layout. :param terrain: list of catan.board.Terrain objects
[ "Tiles", "are", "logged", "counterclockwise", "beginning", "from", "the", "top", "-", "left", ".", "See", "module", "hexgrid", "(", "https", ":", "//", "github", ".", "com", "/", "rosshamish", "/", "hexgrid", ")", "for", "the", "tile", "layout", "." ]
python
train
HumanBrainProject/hbp-service-client
hbp_service_client/storage_service/client.py
https://github.com/HumanBrainProject/hbp-service-client/blob/b338fb41a7f0e7b9d654ff28fcf13a56d03bff4d/hbp_service_client/storage_service/client.py#L50-L88
def list(self, path): '''List the entities found directly under the given path. Args: path (str): The path of the entity to be listed. Must start with a '/'. Returns: The list of entity names directly under the given path: u'/12345/folder_1' Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes ''' self.__validate_storage_path(path) entity = self.api_client.get_entity_by_query(path=path) if entity['entity_type'] not in self.__BROWSABLE_TYPES: raise StorageArgumentException('The entity type "{0}" cannot be' 'listed'.format(entity['entity_type'])) entity_uuid = entity['uuid'] file_names = [] # get files more_pages = True page_number = 1 while more_pages: response = self.api_client.list_folder_content( entity_uuid, page=page_number, ordering='name') more_pages = response['next'] is not None page_number += 1 for child in response['results']: pattern = '/{name}' if child['entity_type'] == 'folder' else '{name}' file_names.append(pattern.format(name=child['name'])) return file_names
[ "def", "list", "(", "self", ",", "path", ")", ":", "self", ".", "__validate_storage_path", "(", "path", ")", "entity", "=", "self", ".", "api_client", ".", "get_entity_by_query", "(", "path", "=", "path", ")", "if", "entity", "[", "'entity_type'", "]", "...
List the entities found directly under the given path. Args: path (str): The path of the entity to be listed. Must start with a '/'. Returns: The list of entity names directly under the given path: u'/12345/folder_1' Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
[ "List", "the", "entities", "found", "directly", "under", "the", "given", "path", "." ]
python
test
ARMmbed/icetea
icetea_lib/ResourceProvider/ResourceConfig.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/ResourceProvider/ResourceConfig.py#L274-L281
def _resolve_process_count(self): """ Calculate amount of process resources. :return: Nothing, adds results to self._process_count """ length = len([d for d in self._dut_requirements if d.get("type") == "process"]) self._process_count = length
[ "def", "_resolve_process_count", "(", "self", ")", ":", "length", "=", "len", "(", "[", "d", "for", "d", "in", "self", ".", "_dut_requirements", "if", "d", ".", "get", "(", "\"type\"", ")", "==", "\"process\"", "]", ")", "self", ".", "_process_count", ...
Calculate amount of process resources. :return: Nothing, adds results to self._process_count
[ "Calculate", "amount", "of", "process", "resources", "." ]
python
train
jacquerie/flask-shell-ptpython
flask_shell_ptpython.py
https://github.com/jacquerie/flask-shell-ptpython/blob/515a502c81eb58474dcbdad7137e4c82a6167670/flask_shell_ptpython.py#L14-L37
def shell_command(): """Runs an interactive Python shell in the context of a given Flask application. The application will populate the default namespace of this shell according to its configuration. This is useful for executing small snippets of management code without having to manually configure the application. """ from flask.globals import _app_ctx_stack from ptpython.repl import embed app = _app_ctx_stack.top.app ctx = {} # Support the regular Python interpreter startup script if someone # is using it. startup = os.environ.get('PYTHONSTARTUP') if startup and os.path.isfile(startup): with open(startup, 'r') as f: eval(compile(f.read(), startup, 'exec'), ctx) ctx.update(app.make_shell_context()) embed(globals=ctx)
[ "def", "shell_command", "(", ")", ":", "from", "flask", ".", "globals", "import", "_app_ctx_stack", "from", "ptpython", ".", "repl", "import", "embed", "app", "=", "_app_ctx_stack", ".", "top", ".", "app", "ctx", "=", "{", "}", "# Support the regular Python in...
Runs an interactive Python shell in the context of a given Flask application. The application will populate the default namespace of this shell according to its configuration. This is useful for executing small snippets of management code without having to manually configure the application.
[ "Runs", "an", "interactive", "Python", "shell", "in", "the", "context", "of", "a", "given", "Flask", "application", ".", "The", "application", "will", "populate", "the", "default", "namespace", "of", "this", "shell", "according", "to", "its", "configuration", ...
python
train
juju/charm-helpers
charmhelpers/contrib/hardening/audits/apache.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/hardening/audits/apache.py#L86-L95
def _disable_module(module): """Disables the specified module in Apache.""" try: subprocess.check_call(['a2dismod', module]) except subprocess.CalledProcessError as e: # Note: catch error here to allow the attempt of disabling # multiple modules in one go rather than failing after the # first module fails. log('Error occurred disabling module %s. ' 'Output is: %s' % (module, e.output), level=ERROR)
[ "def", "_disable_module", "(", "module", ")", ":", "try", ":", "subprocess", ".", "check_call", "(", "[", "'a2dismod'", ",", "module", "]", ")", "except", "subprocess", ".", "CalledProcessError", "as", "e", ":", "# Note: catch error here to allow the attempt of disa...
Disables the specified module in Apache.
[ "Disables", "the", "specified", "module", "in", "Apache", "." ]
python
train
briandilley/ebs-deploy
ebs_deploy/commands/update_environments_command.py
https://github.com/briandilley/ebs-deploy/blob/4178c9c1282a9025fb987dab3470bea28c202e10/ebs_deploy/commands/update_environments_command.py#L4-L9
def add_arguments(parser): """ Args for the init command """ parser.add_argument('-e', '--environment', help='Environment name', required=False, nargs='+') parser.add_argument('-w', '--dont-wait', help='Skip waiting for the app to be deleted', action='store_true')
[ "def", "add_arguments", "(", "parser", ")", ":", "parser", ".", "add_argument", "(", "'-e'", ",", "'--environment'", ",", "help", "=", "'Environment name'", ",", "required", "=", "False", ",", "nargs", "=", "'+'", ")", "parser", ".", "add_argument", "(", "...
Args for the init command
[ "Args", "for", "the", "init", "command" ]
python
valid
StanfordVL/robosuite
robosuite/controllers/sawyer_ik_controller.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/controllers/sawyer_ik_controller.py#L240-L278
def joint_positions_for_eef_command(self, dpos, rotation): """ This function runs inverse kinematics to back out target joint positions from the provided end effector command. Same arguments as @get_control. Returns: A list of size @num_joints corresponding to the target joint angles. """ self.ik_robot_target_pos += dpos * self.user_sensitivity # this rotation accounts for rotating the end effector by 90 degrees # from its rest configuration. The corresponding line in most demo # scripts is: # `env.set_robot_joint_positions([0, -1.18, 0.00, 2.18, 0.00, 0.57, 1.5708])` rotation = rotation.dot( T.rotation_matrix(angle=-np.pi / 2, direction=[0., 0., 1.], point=None)[ :3, :3 ] ) self.ik_robot_target_orn = T.mat2quat(rotation) # convert from target pose in base frame to target pose in bullet world frame world_targets = self.bullet_base_pose_to_world_pose( (self.ik_robot_target_pos, self.ik_robot_target_orn) ) rest_poses = [0, -1.18, 0.00, 2.18, 0.00, 0.57, 3.3161] for bullet_i in range(100): arm_joint_pos = self.inverse_kinematics( world_targets[0], world_targets[1], rest_poses=rest_poses ) self.sync_ik_robot(arm_joint_pos, sync_last=True) return arm_joint_pos
[ "def", "joint_positions_for_eef_command", "(", "self", ",", "dpos", ",", "rotation", ")", ":", "self", ".", "ik_robot_target_pos", "+=", "dpos", "*", "self", ".", "user_sensitivity", "# this rotation accounts for rotating the end effector by 90 degrees", "# from its rest conf...
This function runs inverse kinematics to back out target joint positions from the provided end effector command. Same arguments as @get_control. Returns: A list of size @num_joints corresponding to the target joint angles.
[ "This", "function", "runs", "inverse", "kinematics", "to", "back", "out", "target", "joint", "positions", "from", "the", "provided", "end", "effector", "command", "." ]
python
train
DataONEorg/d1_python
client_cli/src/d1_cli/impl/command_processor.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_processor.py#L223-L225
def science_object_create(self, pid, path, format_id=None): """Create a new Science Object on a Member Node.""" self._queue_science_object_create(pid, path, format_id)
[ "def", "science_object_create", "(", "self", ",", "pid", ",", "path", ",", "format_id", "=", "None", ")", ":", "self", ".", "_queue_science_object_create", "(", "pid", ",", "path", ",", "format_id", ")" ]
Create a new Science Object on a Member Node.
[ "Create", "a", "new", "Science", "Object", "on", "a", "Member", "Node", "." ]
python
train
cloudnativelabs/kube-shell
kubeshell/parser.py
https://github.com/cloudnativelabs/kube-shell/blob/adc801d165e87fe62f82b074ec49996954c3fbe8/kubeshell/parser.py#L92-L128
def treewalk(self, root, parsed, unparsed): """ Recursively walks the syntax tree at root and returns the items parsed, unparsed and possible suggestions """ suggestions = dict() if not unparsed: logger.debug("no tokens left unparsed. returning %s, %s", parsed, suggestions) return parsed, unparsed, suggestions token = unparsed.pop().strip() logger.debug("begin parsing at %s w/ tokens: %s", root.node, unparsed) if root.node == token: logger.debug("root node: %s matches next token:%s", root.node, token) parsed.append(token) if self.peekForOption(unparsed): # check for localFlags and globalFlags logger.debug("option(s) upcoming %s", unparsed) parsed_opts, unparsed, suggestions = self.evalOptions(root, list(), unparsed[:]) if parsed_opts: logger.debug("parsed option(s): %s", parsed_opts) parsed.extend(parsed_opts) if unparsed and not self.peekForOption(unparsed): # unparsed bits without options logger.debug("begin subtree %s parsing", root.node) for child in root.children: parsed_subtree, unparsed, suggestions = self.treewalk(child, list(), unparsed[:]) if parsed_subtree: # subtree returned further parsed tokens parsed.extend(parsed_subtree) logger.debug("subtree at: %s has matches. %s, %s", child.node, parsed, unparsed) break else: # no matches found in command tree # return children of root as suggestions logger.debug("no matches in subtree: %s. returning children as suggestions", root.node) for child in root.children: suggestions[child.node] = child.help else: logger.debug("no token or option match") unparsed.append(token) return parsed, unparsed, suggestions
[ "def", "treewalk", "(", "self", ",", "root", ",", "parsed", ",", "unparsed", ")", ":", "suggestions", "=", "dict", "(", ")", "if", "not", "unparsed", ":", "logger", ".", "debug", "(", "\"no tokens left unparsed. returning %s, %s\"", ",", "parsed", ",", "sugg...
Recursively walks the syntax tree at root and returns the items parsed, unparsed and possible suggestions
[ "Recursively", "walks", "the", "syntax", "tree", "at", "root", "and", "returns", "the", "items", "parsed", "unparsed", "and", "possible", "suggestions" ]
python
train
libtcod/python-tcod
tcod/libtcodpy.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L3432-L3453
def noise_get_turbulence( n: tcod.noise.Noise, f: Sequence[float], oc: float, typ: int = NOISE_DEFAULT, ) -> float: """Return the turbulence noise sampled from the ``f`` coordinate. Args: n (Noise): A Noise instance. f (Sequence[float]): The point to sample the noise from. typ (int): The noise algorithm to use. octaves (float): The level of level. Should be more than 1. Returns: float: The sampled noise value. """ return float( lib.TCOD_noise_get_turbulence_ex( n.noise_c, ffi.new("float[4]", f), oc, typ ) )
[ "def", "noise_get_turbulence", "(", "n", ":", "tcod", ".", "noise", ".", "Noise", ",", "f", ":", "Sequence", "[", "float", "]", ",", "oc", ":", "float", ",", "typ", ":", "int", "=", "NOISE_DEFAULT", ",", ")", "->", "float", ":", "return", "float", ...
Return the turbulence noise sampled from the ``f`` coordinate. Args: n (Noise): A Noise instance. f (Sequence[float]): The point to sample the noise from. typ (int): The noise algorithm to use. octaves (float): The level of level. Should be more than 1. Returns: float: The sampled noise value.
[ "Return", "the", "turbulence", "noise", "sampled", "from", "the", "f", "coordinate", "." ]
python
train
diffeo/rejester
rejester/run.py
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/run.py#L240-L244
def task_master(self): """A `TaskMaster` object for manipulating work""" if self._task_master is None: self._task_master = build_task_master(self.config) return self._task_master
[ "def", "task_master", "(", "self", ")", ":", "if", "self", ".", "_task_master", "is", "None", ":", "self", ".", "_task_master", "=", "build_task_master", "(", "self", ".", "config", ")", "return", "self", ".", "_task_master" ]
A `TaskMaster` object for manipulating work
[ "A", "TaskMaster", "object", "for", "manipulating", "work" ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/corpora.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/corpora.py#L79-L83
def document_ids(self): """returns a list of document IDs used in the PCC""" matches = [PCC_DOCID_RE.match(os.path.basename(fname)) for fname in pcc.tokenization] return sorted(match.groups()[0] for match in matches)
[ "def", "document_ids", "(", "self", ")", ":", "matches", "=", "[", "PCC_DOCID_RE", ".", "match", "(", "os", ".", "path", ".", "basename", "(", "fname", ")", ")", "for", "fname", "in", "pcc", ".", "tokenization", "]", "return", "sorted", "(", "match", ...
returns a list of document IDs used in the PCC
[ "returns", "a", "list", "of", "document", "IDs", "used", "in", "the", "PCC" ]
python
train
Josef-Friedrich/phrydy
phrydy/utils.py
https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/utils.py#L23-L39
def as_string(value): """Convert a value to a Unicode object for matching with a query. None becomes the empty string. Bytestrings are silently decoded. """ if six.PY2: buffer_types = buffer, memoryview # noqa: F821 else: buffer_types = memoryview if value is None: return u'' elif isinstance(value, buffer_types): return bytes(value).decode('utf8', 'ignore') elif isinstance(value, bytes): return value.decode('utf8', 'ignore') else: return six.text_type(value)
[ "def", "as_string", "(", "value", ")", ":", "if", "six", ".", "PY2", ":", "buffer_types", "=", "buffer", ",", "memoryview", "# noqa: F821", "else", ":", "buffer_types", "=", "memoryview", "if", "value", "is", "None", ":", "return", "u''", "elif", "isinstan...
Convert a value to a Unicode object for matching with a query. None becomes the empty string. Bytestrings are silently decoded.
[ "Convert", "a", "value", "to", "a", "Unicode", "object", "for", "matching", "with", "a", "query", ".", "None", "becomes", "the", "empty", "string", ".", "Bytestrings", "are", "silently", "decoded", "." ]
python
train
dougalsutherland/skl-groups
skl_groups/divergences/knn.py
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L1044-L1222
def _parse_specs(specs, Ks): ''' Set up the different functions we need to call. Returns: - a dict mapping base estimator functions to _FuncInfo objects. If the function needs_alpha, then the alphas attribute is an array of alpha values and pos is a corresponding array of indices. Otherwise, alphas is None and pos is a list containing a single index. Indices are >= 0 if they correspond to something in a spec, and negative if they're just used for a meta estimator but not directly requested. - an OrderedDict mapping functions to _MetaFuncInfo objects. alphas and pos are like for _FuncInfo; deps is a list of indices which should be passed to the estimator. Note that these might be other meta functions; this list is guaranteed to be in an order such that all dependencies are resolved before calling that function. If no such order is possible, raise ValueError. - the number of meta-only results # TODO: update doctests for _parse_specs >>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9']) ({<function alpha_div at 0x10954f848>: _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3])}, OrderedDict([ (<function hellinger at 0x10954fc80>, _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])), (<function renyi at 0x10954fcf8>, _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3])) ]), 3) >>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9', 'l2']) ({<function alpha_div at 0x10954f848>: _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3]), <function linear at 0x10954f758>: _FuncInfo(alphas=None, pos=[-4]) }, OrderedDict([ (<function hellinger at 0x10954fc80>, _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])), (<function l2 at 0x10954fde8>, _MetaFuncInfo(alphas=None, pos=[3], deps=[-4])), (<function renyi at 0x10954fcf8>, _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3])) ]), 4) >>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9', 'l2', 'linear']) ({<function alpha_div at 0x10954f848>: _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3]), <function linear at 0x10954f758>: _FuncInfo(alphas=None, pos=[4]) }, OrderedDict([ (<function hellinger at 0x10954fc80>, _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])), (<function l2 at 0x10954fde8>, _MetaFuncInfo(alphas=None, pos=[3], deps=[4])), (<function renyi at 0x10954fcf8>, _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3])) ]), 3) ''' funcs = {} metas = {} meta_deps = defaultdict(set) def add_func(func, alpha=None, pos=None): needs_alpha = getattr(func, 'needs_alpha', False) is_meta = hasattr(func, 'needs_results') d = metas if is_meta else funcs if func not in d: if needs_alpha: args = {'alphas': [alpha], 'pos': [pos]} else: args = {'alphas': None, 'pos': [pos]} if not is_meta: d[func] = _FuncInfo(**args) else: d[func] = _MetaFuncInfo(deps=[], **args) for req in func.needs_results: if callable(req.alpha): req_alpha = req.alpha(alpha) else: req_alpha = req.alpha add_func(req.func, alpha=req_alpha) meta_deps[func].add(req.func) meta_deps[req.func] # make sure required func is in there else: # already have an entry for the func # need to give it this pos, if it's not None # and also make sure that the alpha is present info = d[func] if not needs_alpha: if pos is not None: if info.pos != [None]: msg = "{} passed more than once" raise ValueError(msg.format(func_name)) info.pos[0] = pos else: # needs alpha try: idx = info.alphas.index(alpha) except ValueError: # this is a new alpha value we haven't seen yet info.alphas.append(alpha) info.pos.append(pos) if is_meta: for req in func.needs_results: if callable(req.alpha): req_alpha = req.alpha(alpha) else: req_alpha = req.alpha add_func(req.func, alpha=req_alpha) else: # repeated alpha value if pos is not None: if info.pos[idx] is not None: msg = "{} with alpha {} passed more than once" raise ValueError(msg.format(func_name, alpha)) info.pos[idx] = pos # add functions for each spec for i, spec in enumerate(specs): func_name, alpha = (spec.split(':', 1) + [None])[:2] if alpha is not None: alpha = float(alpha) try: func = func_mapping[func_name] except KeyError: msg = "'{}' is not a known function type" raise ValueError(msg.format(func_name)) needs_alpha = getattr(func, 'needs_alpha', False) if needs_alpha and alpha is None: msg = "{} needs alpha but not passed in spec '{}'" raise ValueError(msg.format(func_name, spec)) elif not needs_alpha and alpha is not None: msg = "{} doesn't need alpha but is passed in spec '{}'" raise ValueError(msg.format(func_name, spec)) add_func(func, alpha, i) # number things that are dependencies only meta_counter = itertools.count(-1, step=-1) for info in itertools.chain(itervalues(funcs), itervalues(metas)): for i, pos in enumerate(info.pos): if pos is None: info.pos[i] = next(meta_counter) # fill in the dependencies for metas for func, info in iteritems(metas): deps = info.deps assert deps == [] for req in func.needs_results: f = req.func req_info = (metas if hasattr(f, 'needs_results') else funcs)[f] if req.alpha is not None: if callable(req.alpha): req_alpha = req.alpha(info.alphas) else: req_alpha = req.alpha find_alpha = np.vectorize(req_info.alphas.index, otypes=[int]) pos = np.asarray(req_info.pos)[find_alpha(req_alpha)] if np.isscalar(pos): deps.append(pos[()]) else: deps.extend(pos) else: pos, = req_info.pos deps.append(pos) # topological sort of metas meta_order = topological_sort(meta_deps) metas_ordered = OrderedDict( (f, metas[f]) for f in meta_order if hasattr(f, 'needs_results')) return funcs, metas_ordered, -next(meta_counter) - 1
[ "def", "_parse_specs", "(", "specs", ",", "Ks", ")", ":", "funcs", "=", "{", "}", "metas", "=", "{", "}", "meta_deps", "=", "defaultdict", "(", "set", ")", "def", "add_func", "(", "func", ",", "alpha", "=", "None", ",", "pos", "=", "None", ")", "...
Set up the different functions we need to call. Returns: - a dict mapping base estimator functions to _FuncInfo objects. If the function needs_alpha, then the alphas attribute is an array of alpha values and pos is a corresponding array of indices. Otherwise, alphas is None and pos is a list containing a single index. Indices are >= 0 if they correspond to something in a spec, and negative if they're just used for a meta estimator but not directly requested. - an OrderedDict mapping functions to _MetaFuncInfo objects. alphas and pos are like for _FuncInfo; deps is a list of indices which should be passed to the estimator. Note that these might be other meta functions; this list is guaranteed to be in an order such that all dependencies are resolved before calling that function. If no such order is possible, raise ValueError. - the number of meta-only results # TODO: update doctests for _parse_specs >>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9']) ({<function alpha_div at 0x10954f848>: _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3])}, OrderedDict([ (<function hellinger at 0x10954fc80>, _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])), (<function renyi at 0x10954fcf8>, _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3])) ]), 3) >>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9', 'l2']) ({<function alpha_div at 0x10954f848>: _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3]), <function linear at 0x10954f758>: _FuncInfo(alphas=None, pos=[-4]) }, OrderedDict([ (<function hellinger at 0x10954fc80>, _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])), (<function l2 at 0x10954fde8>, _MetaFuncInfo(alphas=None, pos=[3], deps=[-4])), (<function renyi at 0x10954fcf8>, _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3])) ]), 4) >>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9', 'l2', 'linear']) ({<function alpha_div at 0x10954f848>: _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3]), <function linear at 0x10954f758>: _FuncInfo(alphas=None, pos=[4]) }, OrderedDict([ (<function hellinger at 0x10954fc80>, _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])), (<function l2 at 0x10954fde8>, _MetaFuncInfo(alphas=None, pos=[3], deps=[4])), (<function renyi at 0x10954fcf8>, _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3])) ]), 3)
[ "Set", "up", "the", "different", "functions", "we", "need", "to", "call", "." ]
python
valid
Nic30/hwt
hwt/simulator/hdlSimulator.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/simulator/hdlSimulator.py#L327-L344
def _conflictResolveStrategy(self, newValue: set)\ -> Tuple[Callable[[Value], bool], bool]: """ This functions resolves write conflicts for signal :param actionSet: set of actions made by process """ invalidate = False resLen = len(newValue) if resLen == 3: # update for item in array val, indexes, isEvDependent = newValue return (mkArrayUpdater(val, indexes, invalidate), isEvDependent) else: # update for simple signal val, isEvDependent = newValue return (mkUpdater(val, invalidate), isEvDependent)
[ "def", "_conflictResolveStrategy", "(", "self", ",", "newValue", ":", "set", ")", "->", "Tuple", "[", "Callable", "[", "[", "Value", "]", ",", "bool", "]", ",", "bool", "]", ":", "invalidate", "=", "False", "resLen", "=", "len", "(", "newValue", ")", ...
This functions resolves write conflicts for signal :param actionSet: set of actions made by process
[ "This", "functions", "resolves", "write", "conflicts", "for", "signal" ]
python
test
wummel/linkchecker
linkcheck/checker/httpurl.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/httpurl.py#L80-L103
def content_allows_robots (self): """ Return False if the content of this URL forbids robots to search for recursive links. """ if not self.is_html(): return True # construct parser object handler = linkparse.MetaRobotsFinder() parser = htmlsax.parser(handler) handler.parser = parser if self.charset: parser.encoding = self.charset # parse try: parser.feed(self.get_content()) parser.flush() except linkparse.StopParse as msg: log.debug(LOG_CHECK, "Stopped parsing: %s", msg) pass # break cyclic dependencies handler.parser = None parser.handler = None return handler.follow
[ "def", "content_allows_robots", "(", "self", ")", ":", "if", "not", "self", ".", "is_html", "(", ")", ":", "return", "True", "# construct parser object", "handler", "=", "linkparse", ".", "MetaRobotsFinder", "(", ")", "parser", "=", "htmlsax", ".", "parser", ...
Return False if the content of this URL forbids robots to search for recursive links.
[ "Return", "False", "if", "the", "content", "of", "this", "URL", "forbids", "robots", "to", "search", "for", "recursive", "links", "." ]
python
train
ray-project/ray
examples/rl_pong/driver.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/examples/rl_pong/driver.py#L35-L47
def preprocess(img): """Preprocess 210x160x3 uint8 frame into 6400 (80x80) 1D float vector.""" # Crop the image. img = img[35:195] # Downsample by factor of 2. img = img[::2, ::2, 0] # Erase background (background type 1). img[img == 144] = 0 # Erase background (background type 2). img[img == 109] = 0 # Set everything else (paddles, ball) to 1. img[img != 0] = 1 return img.astype(np.float).ravel()
[ "def", "preprocess", "(", "img", ")", ":", "# Crop the image.", "img", "=", "img", "[", "35", ":", "195", "]", "# Downsample by factor of 2.", "img", "=", "img", "[", ":", ":", "2", ",", ":", ":", "2", ",", "0", "]", "# Erase background (background type 1)...
Preprocess 210x160x3 uint8 frame into 6400 (80x80) 1D float vector.
[ "Preprocess", "210x160x3", "uint8", "frame", "into", "6400", "(", "80x80", ")", "1D", "float", "vector", "." ]
python
train
consbio/gis-metadata-parser
gis_metadata/utils.py
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L262-L275
def has_property(elem_to_parse, xpath): """ Parse xpath for any attribute reference "path/@attr" and check for root and presence of attribute. :return: True if xpath is present in the element along with any attribute referenced, otherwise False """ xroot, attr = get_xpath_tuple(xpath) if not xroot and not attr: return False elif not attr: return bool(get_elements_text(elem_to_parse, xroot)) else: return bool(get_elements_attributes(elem_to_parse, xroot, attr))
[ "def", "has_property", "(", "elem_to_parse", ",", "xpath", ")", ":", "xroot", ",", "attr", "=", "get_xpath_tuple", "(", "xpath", ")", "if", "not", "xroot", "and", "not", "attr", ":", "return", "False", "elif", "not", "attr", ":", "return", "bool", "(", ...
Parse xpath for any attribute reference "path/@attr" and check for root and presence of attribute. :return: True if xpath is present in the element along with any attribute referenced, otherwise False
[ "Parse", "xpath", "for", "any", "attribute", "reference", "path", "/" ]
python
train
DistrictDataLabs/yellowbrick
yellowbrick/model_selection/learning_curve.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/model_selection/learning_curve.py#L191-L237
def fit(self, X, y=None): """ Fits the learning curve with the wrapped model to the specified data. Draws training and test score curves and saves the scores to the estimator. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. Returns ------- self : instance Returns the instance of the learning curve visualizer for use in pipelines and other sequential transformers. """ # arguments to pass to sk_learning_curve sklc_kwargs = { key: self.get_params()[key] for key in ( 'groups', 'train_sizes', 'cv', 'scoring', 'exploit_incremental_learning', 'n_jobs', 'pre_dispatch', 'shuffle', 'random_state', ) } # compute the learning curve and store the scores on the estimator curve = sk_learning_curve(self.estimator, X, y, **sklc_kwargs) self.train_sizes_, self.train_scores_, self.test_scores_ = curve # compute the mean and standard deviation of the training data self.train_scores_mean_ = np.mean(self.train_scores_, axis=1) self.train_scores_std_ = np.std(self.train_scores_, axis=1) # compute the mean and standard deviation of the test data self.test_scores_mean_ = np.mean(self.test_scores_, axis=1) self.test_scores_std_ = np.std(self.test_scores_, axis=1) # draw the curves on the current axes self.draw() return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "# arguments to pass to sk_learning_curve", "sklc_kwargs", "=", "{", "key", ":", "self", ".", "get_params", "(", ")", "[", "key", "]", "for", "key", "in", "(", "'groups'", ",", "'tra...
Fits the learning curve with the wrapped model to the specified data. Draws training and test score curves and saves the scores to the estimator. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. Returns ------- self : instance Returns the instance of the learning curve visualizer for use in pipelines and other sequential transformers.
[ "Fits", "the", "learning", "curve", "with", "the", "wrapped", "model", "to", "the", "specified", "data", ".", "Draws", "training", "and", "test", "score", "curves", "and", "saves", "the", "scores", "to", "the", "estimator", "." ]
python
train
sebdah/dynamic-dynamodb
dynamic_dynamodb/statistics/table.py
https://github.com/sebdah/dynamic-dynamodb/blob/bfd0ca806b1c3301e724696de90ef0f973410493/dynamic_dynamodb/statistics/table.py#L248-L288
def get_throttled_by_provisioned_write_event_percent( table_name, lookback_window_start=15, lookback_period=5): """ Returns the number of throttled write events during a given time frame :type table_name: str :param table_name: Name of the DynamoDB table :type lookback_window_start: int :param lookback_window_start: Relative start time for the CloudWatch metric :type lookback_period: int :param lookback_period: Number of minutes to look at :returns: float -- Percent of throttled write events by provisioning """ try: metrics = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'WriteThrottleEvents') except BotoServerError: raise if metrics: lookback_seconds = lookback_period * 60 throttled_write_events = float(metrics[0]['Sum']) / float( lookback_seconds) else: throttled_write_events = 0 try: table_write_units = dynamodb.get_provisioned_table_write_units( table_name) throttled_by_provisioned_write_percent = ( float(throttled_write_events) / float(table_write_units) * 100) except JSONResponseError: raise logger.info('{0} - Throttled write percent by provision: {1:.2f}%'.format( table_name, throttled_by_provisioned_write_percent)) return throttled_by_provisioned_write_percent
[ "def", "get_throttled_by_provisioned_write_event_percent", "(", "table_name", ",", "lookback_window_start", "=", "15", ",", "lookback_period", "=", "5", ")", ":", "try", ":", "metrics", "=", "__get_aws_metric", "(", "table_name", ",", "lookback_window_start", ",", "lo...
Returns the number of throttled write events during a given time frame :type table_name: str :param table_name: Name of the DynamoDB table :type lookback_window_start: int :param lookback_window_start: Relative start time for the CloudWatch metric :type lookback_period: int :param lookback_period: Number of minutes to look at :returns: float -- Percent of throttled write events by provisioning
[ "Returns", "the", "number", "of", "throttled", "write", "events", "during", "a", "given", "time", "frame" ]
python
train
3DLIRIOUS/MeshLabXML
meshlabxml/remesh.py
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/remesh.py#L489-L524
def voronoi(script, hole_num=50, target_layer=None, sample_layer=None, thickness=0.5, backward=True): """ Turn a model into a surface with Voronoi style holes in it References: http://meshlabstuff.blogspot.com/2009/03/creating-voronoi-sphere.html http://meshlabstuff.blogspot.com/2009/04/creating-voronoi-sphere-2.html Requires FilterScript object Args: script: the FilterScript object to write the filter to. Does not work with a script filename. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA """ if target_layer is None: target_layer = script.current_layer() if sample_layer is None: # Current layer is currently not changed after poisson_disk is run sampling.poisson_disk(script, sample_num=hole_num) sample_layer = script.last_layer() vert_color.voronoi(script, target_layer=target_layer, source_layer=sample_layer, backward=backward) select.vert_quality(script, min_quality=0.0, max_quality=thickness) if backward: select.invert(script) delete.selected(script) smooth.laplacian(script, iterations=3) return None
[ "def", "voronoi", "(", "script", ",", "hole_num", "=", "50", ",", "target_layer", "=", "None", ",", "sample_layer", "=", "None", ",", "thickness", "=", "0.5", ",", "backward", "=", "True", ")", ":", "if", "target_layer", "is", "None", ":", "target_layer"...
Turn a model into a surface with Voronoi style holes in it References: http://meshlabstuff.blogspot.com/2009/03/creating-voronoi-sphere.html http://meshlabstuff.blogspot.com/2009/04/creating-voronoi-sphere-2.html Requires FilterScript object Args: script: the FilterScript object to write the filter to. Does not work with a script filename. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
[ "Turn", "a", "model", "into", "a", "surface", "with", "Voronoi", "style", "holes", "in", "it" ]
python
test
sentinel-hub/sentinelhub-py
sentinelhub/ogc.py
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/ogc.py#L560-L568
def get_dates(self): """ Returns a list of acquisition times from tile info data :return: List of acquisition times in the order returned by WFS service. :rtype: list(datetime.datetime) """ return [datetime.datetime.strptime('{}T{}'.format(tile_info['properties']['date'], tile_info['properties']['time'].split('.')[0]), '%Y-%m-%dT%H:%M:%S') for tile_info in self]
[ "def", "get_dates", "(", "self", ")", ":", "return", "[", "datetime", ".", "datetime", ".", "strptime", "(", "'{}T{}'", ".", "format", "(", "tile_info", "[", "'properties'", "]", "[", "'date'", "]", ",", "tile_info", "[", "'properties'", "]", "[", "'time...
Returns a list of acquisition times from tile info data :return: List of acquisition times in the order returned by WFS service. :rtype: list(datetime.datetime)
[ "Returns", "a", "list", "of", "acquisition", "times", "from", "tile", "info", "data" ]
python
train
letmaik/lensfunpy
lensfunpy/util.py
https://github.com/letmaik/lensfunpy/blob/e8800496874b1b1360cb9c245e2f137febbd41d7/lensfunpy/util.py#L45-L61
def remap(im, coords): """ Remap an RGB image using the given target coordinate array. If available, OpenCV is used (faster), otherwise SciPy. :type im: ndarray of shape (h,w,3) :param im: RGB image to be remapped :type coords: ndarray of shape (h,w,2) :param coords: target coordinates in x,y order for each pixel :return: remapped RGB image :rtype: ndarray of shape (h,w,3) """ if cv2: return remapOpenCv(im, coords) else: return remapScipy(im, coords)
[ "def", "remap", "(", "im", ",", "coords", ")", ":", "if", "cv2", ":", "return", "remapOpenCv", "(", "im", ",", "coords", ")", "else", ":", "return", "remapScipy", "(", "im", ",", "coords", ")" ]
Remap an RGB image using the given target coordinate array. If available, OpenCV is used (faster), otherwise SciPy. :type im: ndarray of shape (h,w,3) :param im: RGB image to be remapped :type coords: ndarray of shape (h,w,2) :param coords: target coordinates in x,y order for each pixel :return: remapped RGB image :rtype: ndarray of shape (h,w,3)
[ "Remap", "an", "RGB", "image", "using", "the", "given", "target", "coordinate", "array", ".", "If", "available", "OpenCV", "is", "used", "(", "faster", ")", "otherwise", "SciPy", ".", ":", "type", "im", ":", "ndarray", "of", "shape", "(", "h", "w", "3"...
python
train
lemieuxl/pyGenClean
pyGenClean/DupSamples/duplicated_samples.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/DupSamples/duplicated_samples.py#L749-L920
def computeStatistics(tped, tfam, samples, oldSamples, prefix): """Computes the completion and concordance of each samples. :param tped: the ``tped`` containing duplicated samples. :param tfam: the ``tfam`` containing duplicated samples. :param samples: the updated position of the samples in the tped containing only duplicated samples. :param oldSamples: the original duplicated sample positions. :param prefix: the prefix of all the files. :type tped: :py:class:`numpy.array` :type tfam: :py:class:`numpy.array` :type samples: dict :type oldSamples: dict :type prefix: str :returns: a tuple containing the completion (:py:class:`numpy.array`) as first element, and the concordance (:py:class:`dict`) as last element. Reads the ``tped`` file and compute the completion for each duplicated samples and the pairwise concordance between duplicated samples. .. note:: The completion and concordance computation excludes a markers if it's on chromosome 24 and if the sample is a female. .. note:: A missing genotype is encoded by ``0``. .. note:: No percentage is computed here, only the numbers. Percentages are computing in other functions: :py:func:`printStatistics`, for completion, and :py:func:`printConcordance`, for concordance. **Completion** Computes the completion of none zero values (where all genotypes of at least one duplicated sample are no call [*i.e.* ``0``]). The completion of sample :math:`i` (*i.e.* :math:`Comp_i`) is the number of genotypes that have a call divided by the total number of genotypes (the set :math:`G_i`): .. math:: Comp_i = \\frac{||g \\in G_i\\textrm{ where }g \\neq 0||}{||G_i||} .. note:: We consider a genotype as being missing if the sample is a male and if a marker on chromosome 23 or 24 is heterozygous. **Concordance** Computes the pairwise concordance between duplicated samples. For each marker, if both genotypes are not missing, we add one to the total number of compared markers. If both genotypes are the same, we add one to the number of concordant calls. We write the observed genotype difference in the file ``prefix.diff``. The concordance between sample :math:`i` and :math:`j` (*i.e.* :math:`Concordance_{i,j}`) is the number of genotypes that are equal divided by the total number of genotypes (excluding the no calls): .. math:: Concordance_{i,j} = \\frac{ ||g \\in G_i \\cup G_j \\textrm{ where } g_i = g_j \\neq 0|| }{ ||g \\in G_i \\cup G_j \\textrm{ where } g \\neq 0|| } """ # The diff file containing the genotype differences between a pair of # duplicated samples diffOutput = None try: diffOutput = open(prefix + ".diff", "w") except IOError: msg = "%(prefix)s.diff: can't write file" % locals() raise ProgramError(msg) print >>diffOutput, "\t".join(["name", "famID", "indID", "dupIndex_1", "dupIndex_2", "genotype_1", "genotype_2"]) # The completion data type completion = np.array([[0 for i in xrange(len(tped[0][4:]))], [0 for i in xrange(len(tped[0][4:]))]]) # The concordance data type concordance = {} for sampleID in samples.keys(): nbDup = len(samples[sampleID]) concordance[sampleID] = [ np.asmatrix(np.zeros((nbDup, nbDup), dtype=int)), np.asmatrix(np.zeros((nbDup, nbDup), dtype=int)), ] ##################################################################### # Add options for concordance only for hetero SNPs... (all samples) # ##################################################################### for row in tped: genotype = row[4:] chromosome = row[0] snpName = row[1] for key, indexes in samples.iteritems(): for i, index in enumerate(indexes): sex = tfam[oldSamples[key][i]][4] genotype1 = set(genotype[index].split(" ")) # Updating the completion if not ((chromosome == "24") and (sex == "2")): if (sex == "1") and (chromosome in ["23", "24"]): if ("0" not in genotype1) and (len(genotype1) != 2): completion[0][index] += 1 elif "0" not in genotype1: completion[0][index] += 1 completion[1][index] += 1 # Updating the concordance for j in xrange(i + 1, len(indexes)): genotype2 = set(genotype[indexes[j]].split(" ")) # This is for man on chr 23 and 24, if heterozygous!!! # # if (sex == "1") and (chromosome in ["23", "24"]): # if ("0" not in genotype1) and ("0" not in genotype2): # if (len(genotype1) != 2) and (len(genotype2) != 2): # concordance[key][1][i,j] += 1 # concordance[key][1][j,i] += 1 # if genotype1 == genotype2: # concordance[key][0][i,j] += 1 # concordance[key][0][j,i] += 1 # else: # # Print to diff file # toPrint = [snpName, key[0], key[1], # str(index+1), # str(indexes[j]+1)] # toPrint.append(" ".join(genotype1)) # if len(genotype1) == 1: # toPrint[-1] += " %s" % toPrint[-1] # toPrint.append(" ".join(genotype2)) # if len(genotype2) == 1: # toPrint[-1] += " %s" % toPrint[-1] # print >>diffOutput, "\t".join(toPrint) # # elif ("0" not in genotype1) and ("0" not in genotype2): if ("0" not in genotype1) and ("0" not in genotype2): # Both have calls concordance[key][1][i, j] += 1 concordance[key][1][j, i] += 1 if genotype1 == genotype2: concordance[key][0][i, j] += 1 concordance[key][0][j, i] += 1 else: # Print to diff file toPrint = [snpName, key[0], key[1], str(index+1), str(indexes[j]+1)] toPrint.append(" ".join(genotype1)) if len(genotype1) == 1: toPrint[-1] += " %s" % toPrint[-1] toPrint.append(" ".join(genotype2)) if len(genotype2) == 1: toPrint[-1] += " %s" % toPrint[-1] print >>diffOutput, "\t".join(toPrint) diffOutput.close() for key in concordance.iterkeys(): for i in range(len(concordance[key][0])): concordance[key][0][i, i] = 1 concordance[key][1][i, i] = 1 return completion, concordance
[ "def", "computeStatistics", "(", "tped", ",", "tfam", ",", "samples", ",", "oldSamples", ",", "prefix", ")", ":", "# The diff file containing the genotype differences between a pair of", "# duplicated samples", "diffOutput", "=", "None", "try", ":", "diffOutput", "=", "...
Computes the completion and concordance of each samples. :param tped: the ``tped`` containing duplicated samples. :param tfam: the ``tfam`` containing duplicated samples. :param samples: the updated position of the samples in the tped containing only duplicated samples. :param oldSamples: the original duplicated sample positions. :param prefix: the prefix of all the files. :type tped: :py:class:`numpy.array` :type tfam: :py:class:`numpy.array` :type samples: dict :type oldSamples: dict :type prefix: str :returns: a tuple containing the completion (:py:class:`numpy.array`) as first element, and the concordance (:py:class:`dict`) as last element. Reads the ``tped`` file and compute the completion for each duplicated samples and the pairwise concordance between duplicated samples. .. note:: The completion and concordance computation excludes a markers if it's on chromosome 24 and if the sample is a female. .. note:: A missing genotype is encoded by ``0``. .. note:: No percentage is computed here, only the numbers. Percentages are computing in other functions: :py:func:`printStatistics`, for completion, and :py:func:`printConcordance`, for concordance. **Completion** Computes the completion of none zero values (where all genotypes of at least one duplicated sample are no call [*i.e.* ``0``]). The completion of sample :math:`i` (*i.e.* :math:`Comp_i`) is the number of genotypes that have a call divided by the total number of genotypes (the set :math:`G_i`): .. math:: Comp_i = \\frac{||g \\in G_i\\textrm{ where }g \\neq 0||}{||G_i||} .. note:: We consider a genotype as being missing if the sample is a male and if a marker on chromosome 23 or 24 is heterozygous. **Concordance** Computes the pairwise concordance between duplicated samples. For each marker, if both genotypes are not missing, we add one to the total number of compared markers. If both genotypes are the same, we add one to the number of concordant calls. We write the observed genotype difference in the file ``prefix.diff``. The concordance between sample :math:`i` and :math:`j` (*i.e.* :math:`Concordance_{i,j}`) is the number of genotypes that are equal divided by the total number of genotypes (excluding the no calls): .. math:: Concordance_{i,j} = \\frac{ ||g \\in G_i \\cup G_j \\textrm{ where } g_i = g_j \\neq 0|| }{ ||g \\in G_i \\cup G_j \\textrm{ where } g \\neq 0|| }
[ "Computes", "the", "completion", "and", "concordance", "of", "each", "samples", "." ]
python
train
AustralianSynchrotron/lightflow
lightflow/workflows.py
https://github.com/AustralianSynchrotron/lightflow/blob/dc53dbc1d961e20fb144273baca258060705c03e/lightflow/workflows.py#L52-L87
def stop_workflow(config, *, names=None): """ Stop one or more workflows. Args: config (Config): Reference to the configuration object from which the settings for the workflow are retrieved. names (list): List of workflow names, workflow ids or workflow job ids for the workflows that should be stopped. If all workflows should be stopped, set it to None. Returns: tuple: A tuple of the workflow jobs that were successfully stopped and the ones that could not be stopped. """ jobs = list_jobs(config, filter_by_type=JobType.Workflow) if names is not None: filtered_jobs = [] for job in jobs: if (job.id in names) or (job.name in names) or (job.workflow_id in names): filtered_jobs.append(job) else: filtered_jobs = jobs success = [] failed = [] for job in filtered_jobs: client = Client(SignalConnection(**config.signal, auto_connect=True), request_key=job.workflow_id) if client.send(Request(action='stop_workflow')).success: success.append(job) else: failed.append(job) return success, failed
[ "def", "stop_workflow", "(", "config", ",", "*", ",", "names", "=", "None", ")", ":", "jobs", "=", "list_jobs", "(", "config", ",", "filter_by_type", "=", "JobType", ".", "Workflow", ")", "if", "names", "is", "not", "None", ":", "filtered_jobs", "=", "...
Stop one or more workflows. Args: config (Config): Reference to the configuration object from which the settings for the workflow are retrieved. names (list): List of workflow names, workflow ids or workflow job ids for the workflows that should be stopped. If all workflows should be stopped, set it to None. Returns: tuple: A tuple of the workflow jobs that were successfully stopped and the ones that could not be stopped.
[ "Stop", "one", "or", "more", "workflows", "." ]
python
train
pythongssapi/python-gssapi
gssapi/_utils.py
https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/_utils.py#L143-L177
def check_last_err(func, self, *args, **kwargs): """Check and raise deferred errors before running the function This method checks :python:`_last_err` before running the wrapped function. If present and not None, the exception will be raised with its original traceback. """ if self._last_err is not None: try: if six.PY2: six.reraise(type(self._last_err), self._last_err, self._last_tb) else: # NB(directxman12): not using six.reraise in Python 3 leads # to cleaner tracebacks, and raise x is valid # syntax in Python 3 (unlike raise x, y, z) raise self._last_err finally: if six.PY2: del self._last_tb # in case of cycles, break glass self._last_err = None else: return func(self, *args, **kwargs) @deco.decorator def check_last_err(func, self, *args, **kwargs): if self._last_err is not None: try: raise self._last_err finally: self._last_err = None else: return func(self, *args, **kwargs)
[ "def", "check_last_err", "(", "func", ",", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_last_err", "is", "not", "None", ":", "try", ":", "if", "six", ".", "PY2", ":", "six", ".", "reraise", "(", "type", "(", ...
Check and raise deferred errors before running the function This method checks :python:`_last_err` before running the wrapped function. If present and not None, the exception will be raised with its original traceback.
[ "Check", "and", "raise", "deferred", "errors", "before", "running", "the", "function" ]
python
train
saltstack/salt
salt/utils/vmware.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1296-L1323
def create_dvportgroup(dvs_ref, spec): ''' Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec) ''' dvs_name = get_managed_object_name(dvs_ref) log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name) log.trace('spec = %s', spec) try: task = dvs_ref.CreateDVPortgroup_Task(spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) wait_for_task(task, dvs_name, six.text_type(task.__class__))
[ "def", "create_dvportgroup", "(", "dvs_ref", ",", "spec", ")", ":", "dvs_name", "=", "get_managed_object_name", "(", "dvs_ref", ")", "log", ".", "trace", "(", "'Adding portgroup %s to dvs \\'%s\\''", ",", "spec", ".", "name", ",", "dvs_name", ")", "log", ".", ...
Creates a distributed virtual portgroup on a distributed virtual switch (dvs) dvs_ref The dvs reference spec Portgroup spec (vim.DVPortgroupConfigSpec)
[ "Creates", "a", "distributed", "virtual", "portgroup", "on", "a", "distributed", "virtual", "switch", "(", "dvs", ")" ]
python
train
pycontribs/pyrax
pyrax/object_storage.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L2271-L2289
def get_account_details(self): """ Returns a dictionary containing information about the account. """ headers = self._manager.get_account_headers() acct_prefix = "x-account-" meta_prefix = ACCOUNT_META_PREFIX.lower() ret = {} for hkey, hval in list(headers.items()): lowkey = hkey.lower() if lowkey.startswith(acct_prefix): if not lowkey.startswith(meta_prefix): cleaned = hkey.replace(acct_prefix, "").replace("-", "_") try: # Most values are ints ret[cleaned] = int(hval) except ValueError: ret[cleaned] = hval return ret
[ "def", "get_account_details", "(", "self", ")", ":", "headers", "=", "self", ".", "_manager", ".", "get_account_headers", "(", ")", "acct_prefix", "=", "\"x-account-\"", "meta_prefix", "=", "ACCOUNT_META_PREFIX", ".", "lower", "(", ")", "ret", "=", "{", "}", ...
Returns a dictionary containing information about the account.
[ "Returns", "a", "dictionary", "containing", "information", "about", "the", "account", "." ]
python
train
jameshilliard/hlk-sw16
hlk_sw16/protocol.py
https://github.com/jameshilliard/hlk-sw16/blob/4f0c5a7b76b42167f4dc9d2aa6312c7518a8cd56/hlk_sw16/protocol.py#L249-L257
async def turn_on(self, switch=None): """Turn on relay.""" if switch is not None: switch = codecs.decode(switch.rjust(2, '0'), 'hex') packet = self.protocol.format_packet(b"\x10" + switch + b"\x01") else: packet = self.protocol.format_packet(b"\x0a") states = await self._send(packet) return states
[ "async", "def", "turn_on", "(", "self", ",", "switch", "=", "None", ")", ":", "if", "switch", "is", "not", "None", ":", "switch", "=", "codecs", ".", "decode", "(", "switch", ".", "rjust", "(", "2", ",", "'0'", ")", ",", "'hex'", ")", "packet", "...
Turn on relay.
[ "Turn", "on", "relay", "." ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L349-L357
def get_out_subnet_id(cls, tenant_id): """Retrieve the subnet ID of OUT network. """ if 'out' not in cls.ip_db_obj: LOG.error("Fabric not prepared for tenant %s", tenant_id) return None db_obj = cls.ip_db_obj.get('out') out_subnet_dict = cls.get_out_ip_addr(tenant_id) sub = db_obj.get_subnet(out_subnet_dict.get('subnet')) return sub.subnet_id
[ "def", "get_out_subnet_id", "(", "cls", ",", "tenant_id", ")", ":", "if", "'out'", "not", "in", "cls", ".", "ip_db_obj", ":", "LOG", ".", "error", "(", "\"Fabric not prepared for tenant %s\"", ",", "tenant_id", ")", "return", "None", "db_obj", "=", "cls", "....
Retrieve the subnet ID of OUT network.
[ "Retrieve", "the", "subnet", "ID", "of", "OUT", "network", "." ]
python
train
NeuroML/pyNeuroML
pyneuroml/plot/PlotSpikes.py
https://github.com/NeuroML/pyNeuroML/blob/aeba2e3040b360bb26556f643cccbfb3dac3b8fb/pyneuroml/plot/PlotSpikes.py#L56-L105
def process_args(): """ Parse command-line arguments. """ parser = argparse.ArgumentParser(description="A script for plotting files containing spike time data") parser.add_argument('spiketimeFiles', type=str, metavar='<spiketime file>', help='List of text file containing spike times', nargs='+') parser.add_argument('-format', type=str, metavar='<format>', default=DEFAULTS['format'], help='How the spiketimes are represented on each line of file:\n'+\ 'id_t: id of cell, space(s)/tab(s), time of spike (default);\n'+\ 't_id: time of spike, space(s)/tab(s), id of cell;\n'+\ 'sonata: SONATA format HDF5 file containing spike times') parser.add_argument('-rates', action='store_true', default=DEFAULTS['rates'], help='Show a plot of rates') parser.add_argument('-showPlotsAlready', action='store_true', default=DEFAULTS['show_plots_already'], help='Show plots once generated') parser.add_argument('-saveSpikePlotTo', type=str, metavar='<spiketime plot filename>', default=DEFAULTS['save_spike_plot_to'], help='Name of file in which to save spiketime plot') parser.add_argument('-rateWindow', type=int, metavar='<rate window>', default=DEFAULTS['rate_window'], help='Window for rate calculation in ms') parser.add_argument('-rateBins', type=int, metavar='<rate bins>', default=DEFAULTS['rate_bins'], help='Number of bins for rate histogram') return parser.parse_args()
[ "def", "process_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"A script for plotting files containing spike time data\"", ")", "parser", ".", "add_argument", "(", "'spiketimeFiles'", ",", "type", "=", "str", ",", ...
Parse command-line arguments.
[ "Parse", "command", "-", "line", "arguments", "." ]
python
train
mlperf/training
reinforcement/tensorflow/minigo/gtp.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/gtp.py#L44-L70
def make_gtp_instance(load_file, cgos_mode=False, kgs_mode=False, minigui_mode=False): """Takes a path to model files and set up a GTP engine instance.""" n = DualNetwork(load_file) if cgos_mode: player = CGOSPlayer(network=n, seconds_per_move=5, timed_match=True, two_player_mode=True) else: player = MCTSPlayer(network=n, two_player_mode=True) name = "Minigo-" + os.path.basename(load_file) version = "0.2" engine = gtp_engine.Engine() engine.add_cmd_handler( gtp_engine.EngineCmdHandler(engine, name, version)) if kgs_mode: engine.add_cmd_handler(KgsCmdHandler(player)) engine.add_cmd_handler(RegressionsCmdHandler(player)) engine.add_cmd_handler(GoGuiCmdHandler(player)) if minigui_mode: engine.add_cmd_handler(MiniguiBasicCmdHandler(player, courtesy_pass=kgs_mode)) else: engine.add_cmd_handler(BasicCmdHandler(player, courtesy_pass=kgs_mode)) return engine
[ "def", "make_gtp_instance", "(", "load_file", ",", "cgos_mode", "=", "False", ",", "kgs_mode", "=", "False", ",", "minigui_mode", "=", "False", ")", ":", "n", "=", "DualNetwork", "(", "load_file", ")", "if", "cgos_mode", ":", "player", "=", "CGOSPlayer", "...
Takes a path to model files and set up a GTP engine instance.
[ "Takes", "a", "path", "to", "model", "files", "and", "set", "up", "a", "GTP", "engine", "instance", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/bgp.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/bgp.py#L101-L152
def as4_capability(self, **kwargs): """Set Spanning Tree state. Args: enabled (bool): Is AS4 Capability enabled? (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: ValueError: if `enabled` are invalid. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.bgp.local_asn(local_as='65535', ... rbridge_id='225') ... output = dev.bgp.as4_capability( ... rbridge_id='225', enabled=True) ... output = dev.bgp.as4_capability( ... rbridge_id='225', enabled=False) """ enabled = kwargs.pop('enabled', True) callback = kwargs.pop('callback', self._callback) if not isinstance(enabled, bool): raise ValueError('%s must be `True` or `False`.' % repr(enabled)) as4_capability_args = dict(vrf_name=kwargs.pop('vrf', 'default'), rbridge_id=kwargs.pop('rbridge_id', '1')) as4_capability = getattr(self._rbridge, 'rbridge_id_router_router_bgp_router_bgp' '_attributes_capability_as4_enable') config = as4_capability(**as4_capability_args) if not enabled: capability = config.find('.//*capability') capability.set('operation', 'delete') # shutdown = capability.find('.//*as4-enable') # shutdown.set('operation', 'delete') return callback(config)
[ "def", "as4_capability", "(", "self", ",", "*", "*", "kwargs", ")", ":", "enabled", "=", "kwargs", ".", "pop", "(", "'enabled'", ",", "True", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "if", "no...
Set Spanning Tree state. Args: enabled (bool): Is AS4 Capability enabled? (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: ValueError: if `enabled` are invalid. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.bgp.local_asn(local_as='65535', ... rbridge_id='225') ... output = dev.bgp.as4_capability( ... rbridge_id='225', enabled=True) ... output = dev.bgp.as4_capability( ... rbridge_id='225', enabled=False)
[ "Set", "Spanning", "Tree", "state", "." ]
python
train
neherlab/treetime
treetime/treetime.py
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treetime.py#L361-L382
def plot_root_to_tip(self, add_internal=False, label=True, ax=None): """ Plot root-to-tip regression Parameters ---------- add_internal : bool If true, plot inte`rnal node positions label : bool If true, label the plots ax : matplotlib axes If not None, use the provided matplotlib axes to plot the results """ Treg = self.setup_TreeRegression() if self.clock_model and 'cov' in self.clock_model: cf = self.clock_model['valid_confidence'] else: cf = False Treg.clock_plot(ax=ax, add_internal=add_internal, confidence=cf, n_sigma=2, regression=self.clock_model)
[ "def", "plot_root_to_tip", "(", "self", ",", "add_internal", "=", "False", ",", "label", "=", "True", ",", "ax", "=", "None", ")", ":", "Treg", "=", "self", ".", "setup_TreeRegression", "(", ")", "if", "self", ".", "clock_model", "and", "'cov'", "in", ...
Plot root-to-tip regression Parameters ---------- add_internal : bool If true, plot inte`rnal node positions label : bool If true, label the plots ax : matplotlib axes If not None, use the provided matplotlib axes to plot the results
[ "Plot", "root", "-", "to", "-", "tip", "regression" ]
python
test
PyCQA/pylint-django
pylint_django/augmentations/__init__.py
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L759-L851
def apply_augmentations(linter): """Apply augmentation and suppression rules.""" augment_visit(linter, TypeChecker.visit_attribute, foreign_key_sets) augment_visit(linter, TypeChecker.visit_attribute, foreign_key_ids) suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_field_display_method) suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_style_attribute) suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_urls_module_valid_constant) # supress errors when accessing magical class attributes suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manager_attribute) suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_admin_attribute) suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_attribute) suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_field_attribute) suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_charfield_attribute) suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_datefield_attribute) suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_decimalfield_attribute) suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_filefield_attribute) suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_imagefield_attribute) suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_ipfield_attribute) suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_slugfield_attribute) suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_foreignkeyfield_attribute) suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_manytomanyfield_attribute) suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_onetoonefield_attribute) suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_form_attribute) for parents, attrs in VIEW_ATTRS: suppress_message(linter, TypeChecker.visit_attribute, 'no-member', generic_is_view_attribute(parents, attrs)) # formviews have too many ancestors, there's nothing the user of the library can do about that suppress_message(linter, MisdesignChecker.visit_classdef, 'too-many-ancestors', is_class('django.views.generic.edit.FormView')) # model forms have no __init__ method anywhere in their bases suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_class('django.forms.models.ModelForm')) # Meta suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_meta_subclass) pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_meta_subclass) suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_meta_subclass) suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_meta_subclass) suppress_message(linter, ClassChecker.visit_attribute, 'protected-access', allow_meta_protected_access) # Media suppress_message(linter, NameChecker.visit_assignname, 'C0103', is_model_media_valid_attributes) suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_media_subclass) pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_media_subclass) suppress_message(linter, ClassChecker.visit_classdef, 'no-init', is_model_media_subclass) suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_media_subclass) # Admin # Too many public methods (40+/20) # TODO: Count public methods of django.contrib.admin.options.ModelAdmin and increase # MisdesignChecker.config.max_public_methods to this value to count only user' methods. # nb_public_methods = 0 # for method in node.methods(): # if not method.name.startswith('_'): # nb_public_methods += 1 suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_admin_subclass) # Tests suppress_message(linter, MisdesignChecker.leave_classdef, 'R0904', is_model_test_case_subclass) # View # Method could be a function (get, post) suppress_message(linter, ClassChecker.leave_functiondef, 'no-self-use', is_model_view_subclass_method_shouldnt_be_function) # Unused argument 'request' (get, post) suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument', is_model_view_subclass_unused_argument) suppress_message(linter, VariablesChecker.leave_functiondef, 'unused-argument', is_argument_named_request) # django-mptt suppress_message(linter, DocStringChecker.visit_classdef, 'missing-docstring', is_model_mpttmeta_subclass) pylint_newstyle_classdef_compat(linter, 'old-style-class', is_model_mpttmeta_subclass) suppress_message(linter, ClassChecker.visit_classdef, 'W0232', is_model_mpttmeta_subclass) suppress_message(linter, MisdesignChecker.leave_classdef, 'too-few-public-methods', is_model_mpttmeta_subclass) # factory_boy's DjangoModelFactory suppress_message(linter, TypeChecker.visit_attribute, 'no-member', is_model_factory) suppress_message(linter, ClassChecker.visit_functiondef, 'no-self-argument', is_factory_post_generation_method) # ForeignKey and OneToOneField # Must update this in a thread safe way to support the parallel option on pylint (-j) current_leave_module = VariablesChecker.leave_module if current_leave_module.__name__ == 'leave_module': # current_leave_module is not wrapped # Two threads may hit the next assignment concurrently, but the result is the same VariablesChecker.leave_module = wrap(current_leave_module, ignore_import_warnings_for_related_fields) # VariablesChecker.leave_module is now wrapped # else VariablesChecker.leave_module is already wrapped # wsgi.py suppress_message(linter, NameChecker.visit_assignname, 'invalid-name', is_wsgi_application)
[ "def", "apply_augmentations", "(", "linter", ")", ":", "augment_visit", "(", "linter", ",", "TypeChecker", ".", "visit_attribute", ",", "foreign_key_sets", ")", "augment_visit", "(", "linter", ",", "TypeChecker", ".", "visit_attribute", ",", "foreign_key_ids", ")", ...
Apply augmentation and suppression rules.
[ "Apply", "augmentation", "and", "suppression", "rules", "." ]
python
train
Contraz/demosys-py
demosys/loaders/scene/gltf.py
https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/loaders/scene/gltf.py#L135-L164
def load_glb(self): """Loads a binary gltf file""" with open(self.path, 'rb') as fd: # Check header magic = fd.read(4) if magic != GLTF_MAGIC_HEADER: raise ValueError("{} has incorrect header {} != {}".format(self.path, magic, GLTF_MAGIC_HEADER)) version = struct.unpack('<I', fd.read(4))[0] if version != 2: raise ValueError("{} has unsupported version {}".format(self.path, version)) # Total file size including headers _ = struct.unpack('<I', fd.read(4))[0] # noqa # Chunk 0 - json chunk_0_length = struct.unpack('<I', fd.read(4))[0] chunk_0_type = fd.read(4) if chunk_0_type != b'JSON': raise ValueError("Expected JSON chunk, not {} in file {}".format(chunk_0_type, self.path)) json_meta = fd.read(chunk_0_length).decode() # chunk 1 - binary buffer chunk_1_length = struct.unpack('<I', fd.read(4))[0] chunk_1_type = fd.read(4) if chunk_1_type != b'BIN\x00': raise ValueError("Expected BIN chunk, not {} in file {}".format(chunk_1_type, self.path)) self.meta = GLTFMeta(self.path, json.loads(json_meta), binary_buffer=fd.read(chunk_1_length))
[ "def", "load_glb", "(", "self", ")", ":", "with", "open", "(", "self", ".", "path", ",", "'rb'", ")", "as", "fd", ":", "# Check header", "magic", "=", "fd", ".", "read", "(", "4", ")", "if", "magic", "!=", "GLTF_MAGIC_HEADER", ":", "raise", "ValueErr...
Loads a binary gltf file
[ "Loads", "a", "binary", "gltf", "file" ]
python
valid
janpipek/physt
physt/binnings.py
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L688-L701
def integer_binning(data=None, **kwargs) -> StaticBinning: """Construct fixed-width binning schema with bins centered around integers. Parameters ---------- range: Optional[Tuple[int]] min (included) and max integer (excluded) bin bin_width: Optional[int] group "bin_width" integers into one bin (not recommended) """ if "range" in kwargs: kwargs["range"] = tuple(r - 0.5 for r in kwargs["range"]) return fixed_width_binning(data=data, bin_width=kwargs.pop("bin_width", 1), align=True, bin_shift=0.5, **kwargs)
[ "def", "integer_binning", "(", "data", "=", "None", ",", "*", "*", "kwargs", ")", "->", "StaticBinning", ":", "if", "\"range\"", "in", "kwargs", ":", "kwargs", "[", "\"range\"", "]", "=", "tuple", "(", "r", "-", "0.5", "for", "r", "in", "kwargs", "["...
Construct fixed-width binning schema with bins centered around integers. Parameters ---------- range: Optional[Tuple[int]] min (included) and max integer (excluded) bin bin_width: Optional[int] group "bin_width" integers into one bin (not recommended)
[ "Construct", "fixed", "-", "width", "binning", "schema", "with", "bins", "centered", "around", "integers", "." ]
python
train
sirfoga/pyhal
hal/ml/predict.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/ml/predict.py#L21-L34
def train(self, x_data, y_data): """Trains model on inputs :param x_data: x matrix :param y_data: y array """ x_train, _, y_train, _ = train_test_split( x_data, y_data, test_size=0.67, random_state=None ) # cross-split self.model.fit(x_train, y_train)
[ "def", "train", "(", "self", ",", "x_data", ",", "y_data", ")", ":", "x_train", ",", "_", ",", "y_train", ",", "_", "=", "train_test_split", "(", "x_data", ",", "y_data", ",", "test_size", "=", "0.67", ",", "random_state", "=", "None", ")", "# cross-sp...
Trains model on inputs :param x_data: x matrix :param y_data: y array
[ "Trains", "model", "on", "inputs" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/zmq/kernelmanager.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/zmq/kernelmanager.py#L939-L953
def is_alive(self): """Is the kernel process still running?""" if self.has_kernel: if self.kernel.poll() is None: return True else: return False elif self._hb_channel is not None: # We didn't start the kernel with this KernelManager so we # use the heartbeat. return self._hb_channel.is_beating() else: # no heartbeat and not local, we can't tell if it's running, # so naively return True return True
[ "def", "is_alive", "(", "self", ")", ":", "if", "self", ".", "has_kernel", ":", "if", "self", ".", "kernel", ".", "poll", "(", ")", "is", "None", ":", "return", "True", "else", ":", "return", "False", "elif", "self", ".", "_hb_channel", "is", "not", ...
Is the kernel process still running?
[ "Is", "the", "kernel", "process", "still", "running?" ]
python
test
ONSdigital/sdc-rabbit
sdc/rabbit/consumers.py
https://github.com/ONSdigital/sdc-rabbit/blob/985adfdb09cf1b263a1f311438baeb42cbcb503a/sdc/rabbit/consumers.py#L186-L197
def setup_exchange(self, exchange_name): """Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC command. When it is complete, the on_exchange_declareok method will be invoked by pika. :param str|unicode exchange_name: The name of the exchange to declare """ logger.info('Declaring exchange', name=exchange_name) self._channel.exchange_declare(self.on_exchange_declareok, exchange_name, self._exchange_type)
[ "def", "setup_exchange", "(", "self", ",", "exchange_name", ")", ":", "logger", ".", "info", "(", "'Declaring exchange'", ",", "name", "=", "exchange_name", ")", "self", ".", "_channel", ".", "exchange_declare", "(", "self", ".", "on_exchange_declareok", ",", ...
Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC command. When it is complete, the on_exchange_declareok method will be invoked by pika. :param str|unicode exchange_name: The name of the exchange to declare
[ "Setup", "the", "exchange", "on", "RabbitMQ", "by", "invoking", "the", "Exchange", ".", "Declare", "RPC", "command", ".", "When", "it", "is", "complete", "the", "on_exchange_declareok", "method", "will", "be", "invoked", "by", "pika", "." ]
python
train
videntity/django-djmongo
djmongo/console/utils.py
https://github.com/videntity/django-djmongo/blob/7534e0981a2bc12634cf3f1ed03353623dc57565/djmongo/console/utils.py#L151-L166
def mongodb_drop_collection(database_name, collection_name): """Drop Collection""" try: mongodb_client_url = getattr(settings, 'MONGODB_CLIENT', 'mongodb://localhost:27017/') mc = MongoClient(mongodb_client_url,document_class=OrderedDict) dbs = mc[database_name] dbs.drop_collection(collection_name) # print "success" return "" except: # error connecting to mongodb # print str(sys.exc_info()) return str(sys.exc_info())
[ "def", "mongodb_drop_collection", "(", "database_name", ",", "collection_name", ")", ":", "try", ":", "mongodb_client_url", "=", "getattr", "(", "settings", ",", "'MONGODB_CLIENT'", ",", "'mongodb://localhost:27017/'", ")", "mc", "=", "MongoClient", "(", "mongodb_clie...
Drop Collection
[ "Drop", "Collection" ]
python
train
NetEaseGame/ATX
atx/adbkit/device.py
https://github.com/NetEaseGame/ATX/blob/f4415c57b45cb0730e08899cbc92a2af1c047ffb/atx/adbkit/device.py#L47-L56
def run_cmd(self, *args, **kwargs): """ Unix style output, already replace \r\n to \n Args: - timeout (float): timeout for a command exec """ timeout = kwargs.pop('timeout', None) p = self.raw_cmd(*args, **kwargs) return p.communicate(timeout=timeout)[0].decode('utf-8').replace('\r\n', '\n')
[ "def", "run_cmd", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "timeout", "=", "kwargs", ".", "pop", "(", "'timeout'", ",", "None", ")", "p", "=", "self", ".", "raw_cmd", "(", "*", "args", ",", "*", "*", "kwargs", ")", "ret...
Unix style output, already replace \r\n to \n Args: - timeout (float): timeout for a command exec
[ "Unix", "style", "output", "already", "replace", "\\", "r", "\\", "n", "to", "\\", "n" ]
python
train
ForensicArtifacts/artifacts
utils/dependencies.py
https://github.com/ForensicArtifacts/artifacts/blob/044a63bfb4448af33d085c69066c80f9505ae7ca/utils/dependencies.py#L168-L253
def _CheckPythonModuleVersion( self, module_name, module_object, version_property, minimum_version, maximum_version): """Checks the version of a Python module. Args: module_object (module): Python module. module_name (str): name of the Python module. version_property (str): version attribute or function. minimum_version (str): minimum version. maximum_version (str): maximum version. Returns: tuple: consists: bool: True if the Python module is available and conforms to the minimum required version, False otherwise. str: status message. """ module_version = None if not version_property.endswith('()'): module_version = getattr(module_object, version_property, None) else: version_method = getattr( module_object, version_property[:-2], None) if version_method: module_version = version_method() if not module_version: status_message = ( 'unable to determine version information for: {0:s}').format( module_name) return False, status_message # Make sure the module version is a string. module_version = '{0!s}'.format(module_version) # Split the version string and convert every digit into an integer. # A string compare of both version strings will yield an incorrect result. # Strip any semantic suffixes such as a1, b1, pre, post, rc, dev. module_version = self._VERSION_NUMBERS_REGEX.findall(module_version)[0] if module_version[-1] == '.': module_version = module_version[:-1] try: module_version_map = list( map(int, self._VERSION_SPLIT_REGEX.split(module_version))) except ValueError: status_message = 'unable to parse module version: {0:s} {1:s}'.format( module_name, module_version) return False, status_message if minimum_version: try: minimum_version_map = list( map(int, self._VERSION_SPLIT_REGEX.split(minimum_version))) except ValueError: status_message = 'unable to parse minimum version: {0:s} {1:s}'.format( module_name, minimum_version) return False, status_message if module_version_map < minimum_version_map: status_message = ( '{0:s} version: {1!s} is too old, {2!s} or later required').format( module_name, module_version, minimum_version) return False, status_message if maximum_version: try: maximum_version_map = list( map(int, self._VERSION_SPLIT_REGEX.split(maximum_version))) except ValueError: status_message = 'unable to parse maximum version: {0:s} {1:s}'.format( module_name, maximum_version) return False, status_message if module_version_map > maximum_version_map: status_message = ( '{0:s} version: {1!s} is too recent, {2!s} or earlier ' 'required').format(module_name, module_version, maximum_version) return False, status_message status_message = '{0:s} version: {1!s}'.format(module_name, module_version) return True, status_message
[ "def", "_CheckPythonModuleVersion", "(", "self", ",", "module_name", ",", "module_object", ",", "version_property", ",", "minimum_version", ",", "maximum_version", ")", ":", "module_version", "=", "None", "if", "not", "version_property", ".", "endswith", "(", "'()'"...
Checks the version of a Python module. Args: module_object (module): Python module. module_name (str): name of the Python module. version_property (str): version attribute or function. minimum_version (str): minimum version. maximum_version (str): maximum version. Returns: tuple: consists: bool: True if the Python module is available and conforms to the minimum required version, False otherwise. str: status message.
[ "Checks", "the", "version", "of", "a", "Python", "module", "." ]
python
train
dmbee/seglearn
seglearn/pipe.py
https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/pipe.py#L327-L342
def predict_log_proba(self, X): """ Apply transforms, and predict_log_proba of the final estimator Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. Returns ------- y_score : array-like, shape = [n_samples, n_classes] """ Xt, _, _ = self._transform(X) return self._final_estimator.predict_log_proba(Xt)
[ "def", "predict_log_proba", "(", "self", ",", "X", ")", ":", "Xt", ",", "_", ",", "_", "=", "self", ".", "_transform", "(", "X", ")", "return", "self", ".", "_final_estimator", ".", "predict_log_proba", "(", "Xt", ")" ]
Apply transforms, and predict_log_proba of the final estimator Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. Returns ------- y_score : array-like, shape = [n_samples, n_classes]
[ "Apply", "transforms", "and", "predict_log_proba", "of", "the", "final", "estimator" ]
python
train
azavea/python-omgeo
omgeo/postprocessors.py
https://github.com/azavea/python-omgeo/blob/40f4e006f087dbc795a5d954ffa2c0eab433f8c9/omgeo/postprocessors.py#L37-L46
def process(self, candidates): """ :arg list candidates: list of Candidate instances """ for c in candidates[:]: if c.locator not in self.good_locators: # TODO: search string, i.e. find "EU_Street_Name" in "EU_Street_Name.GBR_StreetName" candidates.remove(c) return candidates
[ "def", "process", "(", "self", ",", "candidates", ")", ":", "for", "c", "in", "candidates", "[", ":", "]", ":", "if", "c", ".", "locator", "not", "in", "self", ".", "good_locators", ":", "# TODO: search string, i.e. find \"EU_Street_Name\" in \"EU_Street_Name.GBR_...
:arg list candidates: list of Candidate instances
[ ":", "arg", "list", "candidates", ":", "list", "of", "Candidate", "instances" ]
python
train
ereOn/azmq
azmq/metadata.py
https://github.com/ereOn/azmq/blob/9f40d6d721eea7f7659ec6cc668811976db59854/azmq/metadata.py#L10-L29
def metadata_to_buffers(metadata): """ Transform a dict of metadata into a sequence of buffers. :param metadata: The metadata, as a dict. :returns: A list of buffers. """ results = [] for key, value in metadata.items(): assert len(key) < 256 assert len(value) < 2 ** 32 results.extend([ struct.pack('!B', len(key)), key, struct.pack('!I', len(value)), value, ]) return results
[ "def", "metadata_to_buffers", "(", "metadata", ")", ":", "results", "=", "[", "]", "for", "key", ",", "value", "in", "metadata", ".", "items", "(", ")", ":", "assert", "len", "(", "key", ")", "<", "256", "assert", "len", "(", "value", ")", "<", "2"...
Transform a dict of metadata into a sequence of buffers. :param metadata: The metadata, as a dict. :returns: A list of buffers.
[ "Transform", "a", "dict", "of", "metadata", "into", "a", "sequence", "of", "buffers", "." ]
python
train
trevisanj/f311
f311/hapi.py
https://github.com/trevisanj/f311/blob/9e502a3d1e1f74d4290a8a0bae9a34ef8d7b29f7/f311/hapi.py#L9561-L9604
def partitionSum(M,I,T,step=None): """ INPUT PARAMETERS: M: HITRAN molecule number (required) I: HITRAN isotopologue number (required) T: temperature conditions (required) step: step to calculate temperatures (optional) OUTPUT PARAMETERS: TT: list of temperatures (present only if T is a list) PartSum: partition sums calculated on a list of temperatures --- DESCRIPTION: Calculate range of partition sums at different temperatures. This function uses a python implementation of TIPS-2011 code: Reference: A. L. Laraia, R. R. Gamache, J. Lamouroux, I. E. Gordon, L. S. Rothman. Total internal partition sums to support planetary remote sensing. Icarus, Volume 215, Issue 1, September 2011, Pages 391–400 http://dx.doi.org/10.1016/j.icarus.2011.06.004 Output depends on a structure of input parameter T so that: 1) If T is a scalar/list and step IS NOT provided, then calculate partition sums over each value of T. 2) If T is a list and step parameter IS provided, then calculate partition sums between T[0] and T[1] with a given step. --- EXAMPLE OF USAGE: PartSum = partitionSum(1,1,[296,1000]) TT,PartSum = partitionSum(1,1,[296,1000],step=0.1) --- """ # partitionSum if not step: if type(T) not in set([list,tuple]): return BD_TIPS_2011_PYTHON(M,I,T)[1] else: return [BD_TIPS_2011_PYTHON(M,I,temp)[1] for temp in T] else: #n = (T[1]-T[0])/step #TT = linspace(T[0],T[1],n) TT = arange(T[0],T[1],step) return TT,array([BD_TIPS_2011_PYTHON(M,I,temp)[1] for temp in TT])
[ "def", "partitionSum", "(", "M", ",", "I", ",", "T", ",", "step", "=", "None", ")", ":", "# partitionSum", "if", "not", "step", ":", "if", "type", "(", "T", ")", "not", "in", "set", "(", "[", "list", ",", "tuple", "]", ")", ":", "return", "BD_T...
INPUT PARAMETERS: M: HITRAN molecule number (required) I: HITRAN isotopologue number (required) T: temperature conditions (required) step: step to calculate temperatures (optional) OUTPUT PARAMETERS: TT: list of temperatures (present only if T is a list) PartSum: partition sums calculated on a list of temperatures --- DESCRIPTION: Calculate range of partition sums at different temperatures. This function uses a python implementation of TIPS-2011 code: Reference: A. L. Laraia, R. R. Gamache, J. Lamouroux, I. E. Gordon, L. S. Rothman. Total internal partition sums to support planetary remote sensing. Icarus, Volume 215, Issue 1, September 2011, Pages 391–400 http://dx.doi.org/10.1016/j.icarus.2011.06.004 Output depends on a structure of input parameter T so that: 1) If T is a scalar/list and step IS NOT provided, then calculate partition sums over each value of T. 2) If T is a list and step parameter IS provided, then calculate partition sums between T[0] and T[1] with a given step. --- EXAMPLE OF USAGE: PartSum = partitionSum(1,1,[296,1000]) TT,PartSum = partitionSum(1,1,[296,1000],step=0.1) ---
[ "INPUT", "PARAMETERS", ":", "M", ":", "HITRAN", "molecule", "number", "(", "required", ")", "I", ":", "HITRAN", "isotopologue", "number", "(", "required", ")", "T", ":", "temperature", "conditions", "(", "required", ")", "step", ":", "step", "to", "calcula...
python
train
aio-libs/aioodbc
aioodbc/cursor.py
https://github.com/aio-libs/aioodbc/blob/01245560828d4adce0d7d16930fa566102322a0a/aioodbc/cursor.py#L200-L210
def tables(self, **kw): """Creates a result set of tables in the database that match the given criteria. :param table: the table tname :param catalog: the catalog name :param schema: the schmea name :param tableType: one of TABLE, VIEW, SYSTEM TABLE ... """ fut = self._run_operation(self._impl.tables, **kw) return fut
[ "def", "tables", "(", "self", ",", "*", "*", "kw", ")", ":", "fut", "=", "self", ".", "_run_operation", "(", "self", ".", "_impl", ".", "tables", ",", "*", "*", "kw", ")", "return", "fut" ]
Creates a result set of tables in the database that match the given criteria. :param table: the table tname :param catalog: the catalog name :param schema: the schmea name :param tableType: one of TABLE, VIEW, SYSTEM TABLE ...
[ "Creates", "a", "result", "set", "of", "tables", "in", "the", "database", "that", "match", "the", "given", "criteria", "." ]
python
train
qiniu/python-sdk
qiniu/services/storage/bucket.py
https://github.com/qiniu/python-sdk/blob/a69fbef4e3e6ea1ebe09f4610a5b18bb2c17de59/qiniu/services/storage/bucket.py#L283-L303
def delete_after_days(self, bucket, key, days): """更新文件生命周期 Returns: 一个dict变量,返回结果类似: [ { "code": <HttpCode int>, "data": <Data> }, { "code": <HttpCode int> }, { "code": <HttpCode int> }, { "code": <HttpCode int> }, { "code": <HttpCode int>, "data": { "error": "<ErrorMessage string>" } }, ... ] 一个ResponseInfo对象 Args: bucket: 目标资源空间 key: 目标资源文件名 days: 指定天数 """ resource = entry(bucket, key) return self.__rs_do('deleteAfterDays', resource, days)
[ "def", "delete_after_days", "(", "self", ",", "bucket", ",", "key", ",", "days", ")", ":", "resource", "=", "entry", "(", "bucket", ",", "key", ")", "return", "self", ".", "__rs_do", "(", "'deleteAfterDays'", ",", "resource", ",", "days", ")" ]
更新文件生命周期 Returns: 一个dict变量,返回结果类似: [ { "code": <HttpCode int>, "data": <Data> }, { "code": <HttpCode int> }, { "code": <HttpCode int> }, { "code": <HttpCode int> }, { "code": <HttpCode int>, "data": { "error": "<ErrorMessage string>" } }, ... ] 一个ResponseInfo对象 Args: bucket: 目标资源空间 key: 目标资源文件名 days: 指定天数
[ "更新文件生命周期" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py#L28-L42
def hide_routemap_holder_route_map_action_rm(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop('name') instance_key = ET.SubElement(route_map, "instance") instance_key.text = kwargs.pop('instance') action_rm = ET.SubElement(route_map, "action-rm") action_rm.text = kwargs.pop('action_rm') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "hide_routemap_holder_route_map_action_rm", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "hide_routemap_holder", "=", "ET", ".", "SubElement", "(", "config", ",", "\"hide-routemap-holder\"", ...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
gem/oq-engine
openquake/commonlib/logictree.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/logictree.py#L409-L427
def _apply_uncertainty_to_geometry(self, source, value): """ Modify ``source`` geometry with the uncertainty value ``value`` """ if self.uncertainty_type == 'simpleFaultDipRelative': source.modify('adjust_dip', dict(increment=value)) elif self.uncertainty_type == 'simpleFaultDipAbsolute': source.modify('set_dip', dict(dip=value)) elif self.uncertainty_type == 'simpleFaultGeometryAbsolute': trace, usd, lsd, dip, spacing = value source.modify( 'set_geometry', dict(fault_trace=trace, upper_seismogenic_depth=usd, lower_seismogenic_depth=lsd, dip=dip, spacing=spacing)) elif self.uncertainty_type == 'complexFaultGeometryAbsolute': edges, spacing = value source.modify('set_geometry', dict(edges=edges, spacing=spacing)) elif self.uncertainty_type == 'characteristicFaultGeometryAbsolute': source.modify('set_geometry', dict(surface=value))
[ "def", "_apply_uncertainty_to_geometry", "(", "self", ",", "source", ",", "value", ")", ":", "if", "self", ".", "uncertainty_type", "==", "'simpleFaultDipRelative'", ":", "source", ".", "modify", "(", "'adjust_dip'", ",", "dict", "(", "increment", "=", "value", ...
Modify ``source`` geometry with the uncertainty value ``value``
[ "Modify", "source", "geometry", "with", "the", "uncertainty", "value", "value" ]
python
train
cournape/audiolab
audiolab/pysndfile/matapi.py
https://github.com/cournape/audiolab/blob/e4918832c1e52b56428c5f3535ddeb9d9daff9ac/audiolab/pysndfile/matapi.py#L87-L142
def _reader_factory(name, filetype, descr): """Factory for reader functions ala matlab.""" def basic_reader(filename, last = None, first = 0): """Common "template" to all read functions.""" hdl = Sndfile(filename, 'r') try: if not hdl.format.file_format == filetype: raise ValueError, "%s is not a %s file (is %s)" \ % (filename, filetype, hdl.format.file_format) fs = hdl.samplerate enc = hdl.encoding # Set the pointer to start position nf = hdl.seek(first, 1) if not nf == first: raise IOError("Error while seeking at starting position") if last is None: nframes = hdl.nframes - first data = hdl.read_frames(nframes) else: data = hdl.read_frames(last) finally: hdl.close() return data, fs, enc doc = \ """Simple reader for %(format)s audio files. Parameters ---------- filename : str Name of the file to read last : int Last frame to read. If None, this is equal to the number of frames in the file. first : int First frame to read. If 0, means starting from the beginning of the file. Returns ------- data : array the read data (one column per channel) fs : int the sampling rate enc : str the encoding of the file, such as 'pcm16', 'float32', etc... Notes ----- For total control over options, such as output's dtype, etc..., you should use Sndfile class instances instead""" % {'format': str(descr)} basic_reader.__doc__ = doc basic_reader.__name__ = name return basic_reader
[ "def", "_reader_factory", "(", "name", ",", "filetype", ",", "descr", ")", ":", "def", "basic_reader", "(", "filename", ",", "last", "=", "None", ",", "first", "=", "0", ")", ":", "\"\"\"Common \"template\" to all read functions.\"\"\"", "hdl", "=", "Sndfile", ...
Factory for reader functions ala matlab.
[ "Factory", "for", "reader", "functions", "ala", "matlab", "." ]
python
train
apache/airflow
airflow/_vendor/nvd3/NVD3Chart.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/_vendor/nvd3/NVD3Chart.py#L419-L448
def create_x_axis(self, name, label=None, format=None, date=False, custom_format=False): """Create X-axis""" axis = {} if custom_format and format: axis['tickFormat'] = format elif format: if format == 'AM_PM': axis['tickFormat'] = "function(d) { return get_am_pm(parseInt(d)); }" else: axis['tickFormat'] = "d3.format(',%s')" % format if label: axis['axisLabel'] = "'" + label + "'" # date format : see https://github.com/mbostock/d3/wiki/Time-Formatting if date: self.dateformat = format axis['tickFormat'] = ("function(d) { return d3.time.format('%s')" "(new Date(parseInt(d))) }\n" "" % self.dateformat) # flag is the x Axis is a date if name[0] == 'x': self.x_axis_date = True # Add new axis to list of axis self.axislist[name] = axis # Create x2Axis if focus_enable if name == "xAxis" and self.focus_enable: self.axislist['x2Axis'] = axis
[ "def", "create_x_axis", "(", "self", ",", "name", ",", "label", "=", "None", ",", "format", "=", "None", ",", "date", "=", "False", ",", "custom_format", "=", "False", ")", ":", "axis", "=", "{", "}", "if", "custom_format", "and", "format", ":", "axi...
Create X-axis
[ "Create", "X", "-", "axis" ]
python
test
dmwm/DBS
Client/src/python/dbs/apis/dbsClient.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Client/src/python/dbs/apis/dbsClient.py#L465-L482
def listFileParentsByLumi(self, **kwargs): """ API to list file parents using lumi section info. :param block_name: name of block that has files who's parents needs to be found (Required) :type block_name: str :param logical_file_name: if not all the file parentages under the block needed, this lfn list gives the files that needs to find its parents(optional). :type logical_file_name: list of string :returns: List of dictionaries containing following keys [cid,pid] :rtype: list of dicts """ validParameters = ['block_name', 'logical_file_name'] requiredParameters = {'forced': ['block_name']} checkInputParameter(method="listFileParentsByLumi", parameters=kwargs.keys(), validParameters=validParameters, requiredParameters=requiredParameters) return self.__callServer("fileparentsbylumi", data=kwargs, callmethod='POST')
[ "def", "listFileParentsByLumi", "(", "self", ",", "*", "*", "kwargs", ")", ":", "validParameters", "=", "[", "'block_name'", ",", "'logical_file_name'", "]", "requiredParameters", "=", "{", "'forced'", ":", "[", "'block_name'", "]", "}", "checkInputParameter", "...
API to list file parents using lumi section info. :param block_name: name of block that has files who's parents needs to be found (Required) :type block_name: str :param logical_file_name: if not all the file parentages under the block needed, this lfn list gives the files that needs to find its parents(optional). :type logical_file_name: list of string :returns: List of dictionaries containing following keys [cid,pid] :rtype: list of dicts
[ "API", "to", "list", "file", "parents", "using", "lumi", "section", "info", "." ]
python
train
LCAV/pylocus
pylocus/algorithms.py
https://github.com/LCAV/pylocus/blob/c56a38c251d8a435caf4641a8ae6027ecba2c8c6/pylocus/algorithms.py#L199-L206
def reconstruct_sdp(edm, all_points, W=None, print_out=False, lamda=1000, **kwargs): """ Reconstruct point set using semi-definite rank relaxation. """ from .edm_completion import semidefinite_relaxation edm_complete = semidefinite_relaxation( edm, lamda=lamda, W=W, print_out=print_out, **kwargs) Xhat = reconstruct_mds(edm_complete, all_points, method='geometric') return Xhat, edm_complete
[ "def", "reconstruct_sdp", "(", "edm", ",", "all_points", ",", "W", "=", "None", ",", "print_out", "=", "False", ",", "lamda", "=", "1000", ",", "*", "*", "kwargs", ")", ":", "from", ".", "edm_completion", "import", "semidefinite_relaxation", "edm_complete", ...
Reconstruct point set using semi-definite rank relaxation.
[ "Reconstruct", "point", "set", "using", "semi", "-", "definite", "rank", "relaxation", "." ]
python
train
dwavesystems/dimod
dimod/higherorder/polynomial.py
https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/higherorder/polynomial.py#L283-L341
def normalize(self, bias_range=1, poly_range=None, ignored_terms=None): """Normalizes the biases of the binary polynomial such that they fall in the provided range(s). If `poly_range` is provided, then `bias_range` will be treated as the range for the linear biases and `poly_range` will be used for the range of the other biases. Args: bias_range (number/pair): Value/range by which to normalize the all the biases, or if `poly_range` is provided, just the linear biases. poly_range (number/pair, optional): Value/range by which to normalize the higher order biases. ignored_terms (iterable, optional): Biases associated with these terms are not scaled. """ def parse_range(r): if isinstance(r, Number): return -abs(r), abs(r) return r if ignored_terms is None: ignored_terms = set() else: ignored_terms = {asfrozenset(term) for term in ignored_terms} if poly_range is None: linear_range, poly_range = bias_range, bias_range else: linear_range = bias_range lin_range, poly_range = map(parse_range, (linear_range, poly_range)) # determine the current ranges for linear, higherorder lmin = lmax = 0 pmin = pmax = 0 for term, bias in self.items(): if term in ignored_terms: # we don't use the ignored terms to calculate the scaling continue if len(term) == 1: lmin = min(bias, lmin) lmax = max(bias, lmax) elif len(term) > 1: pmin = min(bias, pmin) pmax = max(bias, pmax) inv_scalar = max(lmin / lin_range[0], lmax / lin_range[1], pmin / poly_range[0], pmax / poly_range[1]) if inv_scalar != 0: self.scale(1 / inv_scalar, ignored_terms=ignored_terms)
[ "def", "normalize", "(", "self", ",", "bias_range", "=", "1", ",", "poly_range", "=", "None", ",", "ignored_terms", "=", "None", ")", ":", "def", "parse_range", "(", "r", ")", ":", "if", "isinstance", "(", "r", ",", "Number", ")", ":", "return", "-",...
Normalizes the biases of the binary polynomial such that they fall in the provided range(s). If `poly_range` is provided, then `bias_range` will be treated as the range for the linear biases and `poly_range` will be used for the range of the other biases. Args: bias_range (number/pair): Value/range by which to normalize the all the biases, or if `poly_range` is provided, just the linear biases. poly_range (number/pair, optional): Value/range by which to normalize the higher order biases. ignored_terms (iterable, optional): Biases associated with these terms are not scaled.
[ "Normalizes", "the", "biases", "of", "the", "binary", "polynomial", "such", "that", "they", "fall", "in", "the", "provided", "range", "(", "s", ")", "." ]
python
train
ipython/ipynb
ipynb/fs/finder.py
https://github.com/ipython/ipynb/blob/2f1526a447104d7d7b97e2a8ab66bee8d2da90ad/ipynb/fs/finder.py#L24-L37
def _get_paths(self, fullname): """ Generate ordered list of paths we should look for fullname module in """ real_path = os.path.join(*fullname[len(self.package_prefix):].split('.')) for base_path in sys.path: if base_path == '': # Empty string means process's cwd base_path = os.getcwd() path = os.path.join(base_path, real_path) yield path + '.ipynb' yield path + '.py' yield os.path.join(path, '__init__.ipynb') yield os.path.join(path, '__init__.py')
[ "def", "_get_paths", "(", "self", ",", "fullname", ")", ":", "real_path", "=", "os", ".", "path", ".", "join", "(", "*", "fullname", "[", "len", "(", "self", ".", "package_prefix", ")", ":", "]", ".", "split", "(", "'.'", ")", ")", "for", "base_pat...
Generate ordered list of paths we should look for fullname module in
[ "Generate", "ordered", "list", "of", "paths", "we", "should", "look", "for", "fullname", "module", "in" ]
python
train
MacHu-GWU/pathlib_mate-project
pathlib_mate/mate_tool_box.py
https://github.com/MacHu-GWU/pathlib_mate-project/blob/f9fb99dd7cc9ea05d1bec8b9ce8f659e8d97b0f1/pathlib_mate/mate_tool_box.py#L291-L310
def autopep8(self, **kwargs): # pragma: no cover """ Auto convert your python code in a directory to pep8 styled code. :param kwargs: arguments for ``autopep8.fix_code`` method. **中文文档** 将目录下的所有Python文件用pep8风格格式化。增加其可读性和规范性。 """ self.assert_is_dir_and_exists() for p in self.select_by_ext(".py"): with open(p.abspath, "rb") as f: code = f.read().decode("utf-8") formatted_code = autopep8.fix_code(code, **kwargs) with open(p.abspath, "wb") as f: f.write(formatted_code.encode("utf-8"))
[ "def", "autopep8", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# pragma: no cover", "self", ".", "assert_is_dir_and_exists", "(", ")", "for", "p", "in", "self", ".", "select_by_ext", "(", "\".py\"", ")", ":", "with", "open", "(", "p", ".", "abspath"...
Auto convert your python code in a directory to pep8 styled code. :param kwargs: arguments for ``autopep8.fix_code`` method. **中文文档** 将目录下的所有Python文件用pep8风格格式化。增加其可读性和规范性。
[ "Auto", "convert", "your", "python", "code", "in", "a", "directory", "to", "pep8", "styled", "code", "." ]
python
valid
krukas/Trionyx
trionyx/trionyx/views/core.py
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L442-L455
def handle_request(self, request, app, model, pk): """Render and return tab""" ModelClass = self.get_model_class() object = ModelClass.objects.get(id=pk) tab_code = request.GET.get('tab') model_alias = request.GET.get('model_alias') model_alias = model_alias if model_alias else '{}.{}'.format(app, model) # TODO permission check item = tabs.get_tab(model_alias, object, tab_code) return item.get_layout(object).render(request)
[ "def", "handle_request", "(", "self", ",", "request", ",", "app", ",", "model", ",", "pk", ")", ":", "ModelClass", "=", "self", ".", "get_model_class", "(", ")", "object", "=", "ModelClass", ".", "objects", ".", "get", "(", "id", "=", "pk", ")", "tab...
Render and return tab
[ "Render", "and", "return", "tab" ]
python
train
coinbase/coinbase-python
coinbase/wallet/model.py
https://github.com/coinbase/coinbase-python/blob/497c28158f529e8c7d0228521b4386a890baf088/coinbase/wallet/model.py#L279-L281
def commit_withdrawal(self, withdrawal_id, **params): """https://developers.coinbase.com/api/v2#commit-a-withdrawal""" return self.api_client.commit_withdrawal(self.id, withdrawal_id, **params)
[ "def", "commit_withdrawal", "(", "self", ",", "withdrawal_id", ",", "*", "*", "params", ")", ":", "return", "self", ".", "api_client", ".", "commit_withdrawal", "(", "self", ".", "id", ",", "withdrawal_id", ",", "*", "*", "params", ")" ]
https://developers.coinbase.com/api/v2#commit-a-withdrawal
[ "https", ":", "//", "developers", ".", "coinbase", ".", "com", "/", "api", "/", "v2#commit", "-", "a", "-", "withdrawal" ]
python
train