repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
SBRG/ssbio
ssbio/protein/sequence/properties/scratch.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/properties/scratch.py#L106-L109
def sspro8_results(self): """Parse the SSpro8 output file and return a dict of secondary structure compositions. """ return ssbio.protein.sequence.utils.fasta.load_fasta_file_as_dict_of_seqs(self.out_sspro8)
[ "def", "sspro8_results", "(", "self", ")", ":", "return", "ssbio", ".", "protein", ".", "sequence", ".", "utils", ".", "fasta", ".", "load_fasta_file_as_dict_of_seqs", "(", "self", ".", "out_sspro8", ")" ]
Parse the SSpro8 output file and return a dict of secondary structure compositions.
[ "Parse", "the", "SSpro8", "output", "file", "and", "return", "a", "dict", "of", "secondary", "structure", "compositions", "." ]
python
train
postlund/pyatv
scripts/autogen_protobuf_extensions.py
https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/scripts/autogen_protobuf_extensions.py#L54-L83
def extract_message_info(): """Get information about all messages of interest.""" base_path = BASE_PACKAGE.replace('.', '/') filename = os.path.join(base_path, 'ProtocolMessage.proto') with open(filename, 'r') as file: types_found = False for line in file: stripped = line.lstrip().rstrip() # Look for the Type enum if stripped == 'enum Type {': types_found = True continue elif types_found and stripped == '}': break elif not types_found: continue constant = stripped.split(' ')[0] title = constant.title().replace( '_', '').replace('Hid', 'HID') # Hack... accessor = title[0].lower() + title[1:] if not os.path.exists(os.path.join(base_path, title + '.proto')): continue yield MessageInfo( title + '_pb2', title, accessor, constant)
[ "def", "extract_message_info", "(", ")", ":", "base_path", "=", "BASE_PACKAGE", ".", "replace", "(", "'.'", ",", "'/'", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "'ProtocolMessage.proto'", ")", "with", "open", "(", "filen...
Get information about all messages of interest.
[ "Get", "information", "about", "all", "messages", "of", "interest", "." ]
python
train
RLBot/RLBot
src/main/python/rlbot/parsing/custom_config.py
https://github.com/RLBot/RLBot/blob/3f9b6bec8b9baf4dcfff0f6cf3103c8744ac6234/src/main/python/rlbot/parsing/custom_config.py#L167-L181
def set_value(self, option, value, index=None): """ Sets the value on the given option. :param option: The name of the option as it appears in the config file :param value: The value that is being applied. If this section is indexed then the value must be a list (to be applied directly) or you must supply the index parameter, which will cause the value to be inserted into an existing list. :param index: If the attribute is indexed, we will use this index to insert the value you have supplied. :return: an instance of itself so that you can chain setting values together. """ if self.is_indexed and index is None and not isinstance(value, list): raise TypeError("Value should be a list when not giving an index in an indexed header") self.values[option].set_value(value=value, index=index) return self
[ "def", "set_value", "(", "self", ",", "option", ",", "value", ",", "index", "=", "None", ")", ":", "if", "self", ".", "is_indexed", "and", "index", "is", "None", "and", "not", "isinstance", "(", "value", ",", "list", ")", ":", "raise", "TypeError", "...
Sets the value on the given option. :param option: The name of the option as it appears in the config file :param value: The value that is being applied. If this section is indexed then the value must be a list (to be applied directly) or you must supply the index parameter, which will cause the value to be inserted into an existing list. :param index: If the attribute is indexed, we will use this index to insert the value you have supplied. :return: an instance of itself so that you can chain setting values together.
[ "Sets", "the", "value", "on", "the", "given", "option", ".", ":", "param", "option", ":", "The", "name", "of", "the", "option", "as", "it", "appears", "in", "the", "config", "file", ":", "param", "value", ":", "The", "value", "that", "is", "being", "...
python
train
remix/partridge
partridge/gtfs.py
https://github.com/remix/partridge/blob/0ba80fa30035e5e09fd8d7a7bdf1f28b93d53d03/partridge/gtfs.py#L154-L164
def _convert_types(self, filename: str, df: pd.DataFrame) -> None: """ Apply type conversions """ if df.empty: return converters = self._config.nodes.get(filename, {}).get("converters", {}) for col, converter in converters.items(): if col in df.columns: df[col] = converter(df[col])
[ "def", "_convert_types", "(", "self", ",", "filename", ":", "str", ",", "df", ":", "pd", ".", "DataFrame", ")", "->", "None", ":", "if", "df", ".", "empty", ":", "return", "converters", "=", "self", ".", "_config", ".", "nodes", ".", "get", "(", "f...
Apply type conversions
[ "Apply", "type", "conversions" ]
python
train
legoktm/fab
phabricator/__init__.py
https://github.com/legoktm/fab/blob/29a8aba9671ae661864cbdb24e2ac9b842f41633/phabricator/__init__.py#L78-L99
def request(self, method, params=None): """ Make a request to a method in the phabricator API :param method: Name of the API method to call :type method: basestring :param params: Optional dict of params to pass :type params: dict """ if params is None: params = {} if not self.phab_session: self.connect() url = '%s/api/%s' % (self.host, method) params['__conduit__'] = self.phab_session req = self.req_session.post(url, data={ 'params': json.dumps(params), 'output': 'json', }) return json.loads( req.content.decode(), object_pairs_hook=collections.OrderedDict )['result']
[ "def", "request", "(", "self", ",", "method", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "if", "not", "self", ".", "phab_session", ":", "self", ".", "connect", "(", ")", "url", "=", "'%s/api/%s...
Make a request to a method in the phabricator API :param method: Name of the API method to call :type method: basestring :param params: Optional dict of params to pass :type params: dict
[ "Make", "a", "request", "to", "a", "method", "in", "the", "phabricator", "API", ":", "param", "method", ":", "Name", "of", "the", "API", "method", "to", "call", ":", "type", "method", ":", "basestring", ":", "param", "params", ":", "Optional", "dict", ...
python
train
pypa/pipenv
pipenv/project.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/project.py#L898-L906
def get_package_name_in_pipfile(self, package_name, dev=False): """Get the equivalent package name in pipfile""" key = "dev-packages" if dev else "packages" section = self.parsed_pipfile.get(key, {}) package_name = pep423_name(package_name) for name in section.keys(): if pep423_name(name) == package_name: return name return None
[ "def", "get_package_name_in_pipfile", "(", "self", ",", "package_name", ",", "dev", "=", "False", ")", ":", "key", "=", "\"dev-packages\"", "if", "dev", "else", "\"packages\"", "section", "=", "self", ".", "parsed_pipfile", ".", "get", "(", "key", ",", "{", ...
Get the equivalent package name in pipfile
[ "Get", "the", "equivalent", "package", "name", "in", "pipfile" ]
python
train
cloudtools/troposphere
troposphere/template_generator.py
https://github.com/cloudtools/troposphere/blob/f7ea5591a7c287a843adc9c184d2f56064cfc632/troposphere/template_generator.py#L200-L299
def _create_instance(self, cls, args, ref=None): """ Returns an instance of `cls` with `args` passed as arguments. Recursively inspects `args` to create nested objects and functions as necessary. `cls` will only be considered only if it's an object we track (i.e.: troposphere objects). If `cls` has a `props` attribute, nested properties will be instanciated as troposphere Property objects as necessary. If `cls` is a list and contains a single troposphere type, the returned value will be a list of instances of that type. """ if isinstance(cls, Sequence): if len(cls) == 1: # a list of 1 type means we must provide a list of such objects if (isinstance(args, basestring) or not isinstance(args, Sequence)): args = [args] return [self._create_instance(cls[0], v) for v in args] if isinstance(cls, Sequence)\ or cls not in self.inspect_members.union(self._custom_members): # this object doesn't map to any known object. could be a string # or int, or a Ref... or a list of types such as # [basestring, FindInMap, Ref] or maybe a # validator such as `integer` or `port_range` return self._convert_definition(args) elif issubclass(cls, AWSHelperFn): # special handling for functions, we want to handle it before # entering the other conditions. try: if issubclass(cls, Tags): arg_dict = {} for d in args: arg_dict[d['Key']] = d['Value'] return cls(arg_dict) if (isinstance(args, Sequence) and not isinstance(args, basestring)): return cls(*self._convert_definition(args)) if issubclass(cls, autoscaling.Metadata): return self._generate_autoscaling_metadata(cls, args) if issubclass(cls, Export): return cls(args['Name']) args = self._convert_definition(args) if isinstance(args, Ref) and issubclass(cls, Ref): # watch out for double-refs... # this can happen if an object's .props has 'Ref' # as the expected type (which is wrong and should be # changed to basestring!) return args return cls(args) except TypeError as ex: if '__init__() takes exactly' not in ex.message: raise # special AWSHelperFn typically take lowercased parameters, # but templates use uppercase. for this reason we cannot # map to most of them, so we fallback with a generic one. # this might not work for all types if they do extra # processing in their init routine return GenericHelperFn(args) elif isinstance(args, Mapping): # we try to build as many troposphere objects as we can by # inspecting its type validation metadata kwargs = {} kwargs.update(args) for prop_name in getattr(cls, 'props', []): if prop_name not in kwargs: continue # the user did not specify this value; skip it expected_type = cls.props[prop_name][0] if (isinstance(expected_type, Sequence) or expected_type in self.inspect_members): kwargs[prop_name] = self._create_instance( expected_type, kwargs[prop_name], prop_name) else: kwargs[prop_name] = self._convert_definition( kwargs[prop_name], prop_name) args = self._convert_definition(kwargs) if isinstance(args, Ref): # use the returned ref instead of creating a new object return args if isinstance(args, AWSHelperFn): return self._convert_definition(kwargs) assert isinstance(args, Mapping) return cls(title=ref, **args) return cls(self._convert_definition(args))
[ "def", "_create_instance", "(", "self", ",", "cls", ",", "args", ",", "ref", "=", "None", ")", ":", "if", "isinstance", "(", "cls", ",", "Sequence", ")", ":", "if", "len", "(", "cls", ")", "==", "1", ":", "# a list of 1 type means we must provide a list of...
Returns an instance of `cls` with `args` passed as arguments. Recursively inspects `args` to create nested objects and functions as necessary. `cls` will only be considered only if it's an object we track (i.e.: troposphere objects). If `cls` has a `props` attribute, nested properties will be instanciated as troposphere Property objects as necessary. If `cls` is a list and contains a single troposphere type, the returned value will be a list of instances of that type.
[ "Returns", "an", "instance", "of", "cls", "with", "args", "passed", "as", "arguments", "." ]
python
train
PmagPy/PmagPy
programs/aniso_magic.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/aniso_magic.py#L16-L98
def main(): """ NAME aniso_magic.py DESCRIPTION plots anisotropy data with either bootstrap or hext ellipses SYNTAX aniso_magic.py [-h] [command line options] OPTIONS -h plots help message and quits -usr USER: set the user name -f AFILE, specify specimens.txt formatted file for input -fsa SAMPFILE, specify samples.txt file (required to plot by site) -fsi SITEFILE, specify site file (required to include location information) -x Hext [1963] and bootstrap -B DON'T do bootstrap, do Hext -par Tauxe [1998] parametric bootstrap -v plot bootstrap eigenvectors instead of ellipses -sit plot by site instead of entire file -crd [s,g,t] coordinate system, default is specimen (g=geographic, t=tilt corrected) -P don't make any plots - just fill in the specimens, samples, sites tables -sav don't make the tables - just save all the plots -fmt [svg, jpg, eps] format for output images, png default -gtc DEC INC dec,inc of pole to great circle [down(up) in green (cyan) -d Vi DEC INC; Vi (1,2,3) to compare to direction DEC INC -n N; specifies the number of bootstraps - default is 1000 DEFAULTS AFILE: specimens.txt plot bootstrap ellipses of Constable & Tauxe [1987] NOTES minor axis: circles major axis: triangles principal axis: squares directions are plotted on the lower hemisphere for bootstrapped eigenvector components: Xs: blue, Ys: red, Zs: black """ args = sys.argv if "-h" in args: print(main.__doc__) sys.exit() verbose = pmagplotlib.verbose dir_path = pmag.get_named_arg("-WD", ".") input_dir_path = pmag.get_named_arg("-ID", "") num_bootstraps = pmag.get_named_arg("-n", 1000) ipar = pmag.get_flag_arg_from_sys("-par", true=1, false=0) ihext = pmag.get_flag_arg_from_sys("-x", true=1, false=0) ivec = pmag.get_flag_arg_from_sys("-v", true=1, false=0) iplot = pmag.get_flag_arg_from_sys("-P", true=0, false=1) isite = pmag.get_flag_arg_from_sys("-sit", true=1, false=0) iboot, vec = 1, 0 infile = pmag.get_named_arg('-f', 'specimens.txt') samp_file = pmag.get_named_arg('-fsa', 'samples.txt') site_file = pmag.get_named_arg('-fsi', 'sites.txt') #outfile = pmag.get_named_arg("-F", "rmag_results.txt") fmt = pmag.get_named_arg("-fmt", "png") crd = pmag.get_named_arg("-crd", "s") comp, Dir, PDir = 0, [], [] user = pmag.get_named_arg("-usr", "") if '-B' in args: iboot, ihext = 0, 1 plots, verbose = 0, True if '-sav' in args: plots = 1 verbose = 0 if '-gtc' in args: ind = args.index('-gtc') d, i = float(args[ind+1]), float(args[ind+2]) PDir.append(d) PDir.append(i) if '-d' in args: comp = 1 ind = args.index('-d') vec = int(args[ind+1])-1 Dir = [float(args[ind+2]), float(args[ind+3])] ipmag.aniso_magic(infile=infile, samp_file=samp_file, site_file=site_file, ipar=ipar, ihext=ihext, ivec=ivec, iplot=iplot, isite=isite, iboot=iboot, vec=vec, Dir=Dir, PDir=PDir, comp=comp, user=user, fmt=fmt, crd=crd, verbose=verbose, plots=plots, num_bootstraps=num_bootstraps, dir_path=dir_path, input_dir_path=input_dir_path)
[ "def", "main", "(", ")", ":", "args", "=", "sys", ".", "argv", "if", "\"-h\"", "in", "args", ":", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", ")", "verbose", "=", "pmagplotlib", ".", "verbose", "dir_path", "=", "pmag", ".", ...
NAME aniso_magic.py DESCRIPTION plots anisotropy data with either bootstrap or hext ellipses SYNTAX aniso_magic.py [-h] [command line options] OPTIONS -h plots help message and quits -usr USER: set the user name -f AFILE, specify specimens.txt formatted file for input -fsa SAMPFILE, specify samples.txt file (required to plot by site) -fsi SITEFILE, specify site file (required to include location information) -x Hext [1963] and bootstrap -B DON'T do bootstrap, do Hext -par Tauxe [1998] parametric bootstrap -v plot bootstrap eigenvectors instead of ellipses -sit plot by site instead of entire file -crd [s,g,t] coordinate system, default is specimen (g=geographic, t=tilt corrected) -P don't make any plots - just fill in the specimens, samples, sites tables -sav don't make the tables - just save all the plots -fmt [svg, jpg, eps] format for output images, png default -gtc DEC INC dec,inc of pole to great circle [down(up) in green (cyan) -d Vi DEC INC; Vi (1,2,3) to compare to direction DEC INC -n N; specifies the number of bootstraps - default is 1000 DEFAULTS AFILE: specimens.txt plot bootstrap ellipses of Constable & Tauxe [1987] NOTES minor axis: circles major axis: triangles principal axis: squares directions are plotted on the lower hemisphere for bootstrapped eigenvector components: Xs: blue, Ys: red, Zs: black
[ "NAME", "aniso_magic", ".", "py" ]
python
train
ambv/retype
retype.py
https://github.com/ambv/retype/blob/03137abd4d9c9845f3cced1006190b5cca64d879/retype.py#L205-L213
def reapply_all(ast_node, lib2to3_node): """Reapplies the typed_ast node into the lib2to3 tree. Also does post-processing. This is done in reverse order to enable placing TypeVars and aliases that depend on one another. """ late_processing = reapply(ast_node, lib2to3_node) for lazy_func in reversed(late_processing): lazy_func()
[ "def", "reapply_all", "(", "ast_node", ",", "lib2to3_node", ")", ":", "late_processing", "=", "reapply", "(", "ast_node", ",", "lib2to3_node", ")", "for", "lazy_func", "in", "reversed", "(", "late_processing", ")", ":", "lazy_func", "(", ")" ]
Reapplies the typed_ast node into the lib2to3 tree. Also does post-processing. This is done in reverse order to enable placing TypeVars and aliases that depend on one another.
[ "Reapplies", "the", "typed_ast", "node", "into", "the", "lib2to3", "tree", "." ]
python
valid
astropy/regions
regions/_utils/examples.py
https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/regions/_utils/examples.py#L223-L229
def _table_to_bintable(table): """Convert `~astropy.table.Table` to `astropy.io.fits.BinTable`.""" data = table.as_array() header = fits.Header() header.update(table.meta) name = table.meta.pop('name', None) return fits.BinTableHDU(data, header, name=name)
[ "def", "_table_to_bintable", "(", "table", ")", ":", "data", "=", "table", ".", "as_array", "(", ")", "header", "=", "fits", ".", "Header", "(", ")", "header", ".", "update", "(", "table", ".", "meta", ")", "name", "=", "table", ".", "meta", ".", "...
Convert `~astropy.table.Table` to `astropy.io.fits.BinTable`.
[ "Convert", "~astropy", ".", "table", ".", "Table", "to", "astropy", ".", "io", ".", "fits", ".", "BinTable", "." ]
python
train
Alignak-monitoring/alignak
alignak/external_command.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L1787-L1804
def del_contact_downtime(self, downtime_id): """Delete a contact downtime Format of the line that triggers function call:: DEL_CONTACT_DOWNTIME;<downtime_id> :param downtime_id: downtime id to delete :type downtime_id: int :return: None """ for item in self.daemon.contacts: if downtime_id in item.downtimes: item.downtimes[downtime_id].cancel(self.daemon.contacts) break else: self.send_an_element(make_monitoring_log( 'warning', 'DEL_CONTACT_DOWNTIME: downtime id: %s does not exist ' 'and cannot be deleted.' % downtime_id))
[ "def", "del_contact_downtime", "(", "self", ",", "downtime_id", ")", ":", "for", "item", "in", "self", ".", "daemon", ".", "contacts", ":", "if", "downtime_id", "in", "item", ".", "downtimes", ":", "item", ".", "downtimes", "[", "downtime_id", "]", ".", ...
Delete a contact downtime Format of the line that triggers function call:: DEL_CONTACT_DOWNTIME;<downtime_id> :param downtime_id: downtime id to delete :type downtime_id: int :return: None
[ "Delete", "a", "contact", "downtime", "Format", "of", "the", "line", "that", "triggers", "function", "call", "::" ]
python
train
serkanyersen/underscore.py
src/underscore.py
https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L1592-L1614
def makeStatic(): """ Provide static access to underscore class """ p = lambda value: inspect.ismethod(value) or inspect.isfunction(value) for eachMethod in inspect.getmembers(underscore, predicate=p): m = eachMethod[0] if not hasattr(_, m): def caller(a): def execute(*args): if len(args) == 1: r = getattr(underscore(args[0]), a)() elif len(args) > 1: rargs = args[1:] r = getattr(underscore(args[0]), a)(*rargs) else: r = getattr(underscore([]), a)() return r return execute _.__setattr__(m, caller(m)) # put the class itself as a parameter so that we can use it on outside _.__setattr__("underscore", underscore) _.templateSettings = {}
[ "def", "makeStatic", "(", ")", ":", "p", "=", "lambda", "value", ":", "inspect", ".", "ismethod", "(", "value", ")", "or", "inspect", ".", "isfunction", "(", "value", ")", "for", "eachMethod", "in", "inspect", ".", "getmembers", "(", "underscore", ",", ...
Provide static access to underscore class
[ "Provide", "static", "access", "to", "underscore", "class" ]
python
train
saltstack/salt
salt/utils/rsax931.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/rsax931.py#L150-L165
def verify(self, signed): ''' Recover the message (digest) from the signature using the public key :param str signed: The signature created with the private key :rtype: str :return: The message (digest) recovered from the signature, or an empty string if the decryption failed ''' # Allocate a buffer large enough for the signature. Freed by ctypes. buf = create_string_buffer(libcrypto.RSA_size(self._rsa)) signed = salt.utils.stringutils.to_bytes(signed) size = libcrypto.RSA_public_decrypt(len(signed), signed, buf, self._rsa, RSA_X931_PADDING) if size < 0: raise ValueError('Unable to decrypt message') return buf[0:size]
[ "def", "verify", "(", "self", ",", "signed", ")", ":", "# Allocate a buffer large enough for the signature. Freed by ctypes.", "buf", "=", "create_string_buffer", "(", "libcrypto", ".", "RSA_size", "(", "self", ".", "_rsa", ")", ")", "signed", "=", "salt", ".", "u...
Recover the message (digest) from the signature using the public key :param str signed: The signature created with the private key :rtype: str :return: The message (digest) recovered from the signature, or an empty string if the decryption failed
[ "Recover", "the", "message", "(", "digest", ")", "from", "the", "signature", "using", "the", "public", "key" ]
python
train
google/jsonnet
case_studies/micro_fractal/tilegen/mandelbrot_service.py
https://github.com/google/jsonnet/blob/c323f5ce5b8aa663585d23dc0fb94d4b166c6f16/case_studies/micro_fractal/tilegen/mandelbrot_service.py#L66-L104
def handle_fractal(): """Get fractal coordinates from query string, call mandelbrot to generate image. Returns: The image, wrapped in an HTML response. """ if check_etag(): return flask.make_response(), 304 level = int(flask.request.args.get("l", "0")) x = float(int(flask.request.args.get("x", "0"))) y = float(int(flask.request.args.get("y", "0"))) if level < 0: level = 0 grid_size = math.pow(2, level) x0 = "%.30g" % ((x - 0) / grid_size) y0 = "%.30g" % ((y - 0) / grid_size) x1 = "%.30g" % ((x + 1) / grid_size) y1 = "%.30g" % ((y + 1) / grid_size) print "Tile: %s %s %s %s" % (x0, y0, x1, y1) width = str(CONF['width']) height = str(CONF['height']) iters = str(CONF['iters']) cmd = ['./mandelbrot', width, height, iters, x0, y0, x1, y1] image_data = subprocess.check_output(cmd) response = flask.make_response(image_data) response.headers["Content-Type"] = "image/png" response.headers["cache-control"] = "public, max-age=600" response.headers["ETag"] = ETAG return response
[ "def", "handle_fractal", "(", ")", ":", "if", "check_etag", "(", ")", ":", "return", "flask", ".", "make_response", "(", ")", ",", "304", "level", "=", "int", "(", "flask", ".", "request", ".", "args", ".", "get", "(", "\"l\"", ",", "\"0\"", ")", "...
Get fractal coordinates from query string, call mandelbrot to generate image. Returns: The image, wrapped in an HTML response.
[ "Get", "fractal", "coordinates", "from", "query", "string", "call", "mandelbrot", "to", "generate", "image", "." ]
python
train
ga4gh/ga4gh-server
ga4gh/server/backend.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/backend.py#L188-L223
def _readGroupSetsGenerator(self, request, numObjects, getByIndexMethod): """ Returns a generator over the results for the specified request, which is over a set of objects of the specified size. The objects are returned by call to the specified method, which must take a single integer as an argument. The returned generator yields a sequence of (object, nextPageToken) pairs, which allows this iteration to be picked up at any point. """ currentIndex = 0 if request.page_token: currentIndex, = paging._parsePageToken( request.page_token, 1) while currentIndex < numObjects: obj = getByIndexMethod(currentIndex) include = True rgsp = obj.toProtocolElement() if request.name and request.name != obj.getLocalId(): include = False if request.biosample_id and include: rgsp.ClearField("read_groups") for readGroup in obj.getReadGroups(): if request.biosample_id == readGroup.getBiosampleId(): rgsp.read_groups.extend( [readGroup.toProtocolElement()]) # If none of the biosamples match and the readgroupset # contains reagroups, don't include in the response if len(rgsp.read_groups) == 0 and \ len(obj.getReadGroups()) != 0: include = False currentIndex += 1 nextPageToken = None if currentIndex < numObjects: nextPageToken = str(currentIndex) if include: yield rgsp, nextPageToken
[ "def", "_readGroupSetsGenerator", "(", "self", ",", "request", ",", "numObjects", ",", "getByIndexMethod", ")", ":", "currentIndex", "=", "0", "if", "request", ".", "page_token", ":", "currentIndex", ",", "=", "paging", ".", "_parsePageToken", "(", "request", ...
Returns a generator over the results for the specified request, which is over a set of objects of the specified size. The objects are returned by call to the specified method, which must take a single integer as an argument. The returned generator yields a sequence of (object, nextPageToken) pairs, which allows this iteration to be picked up at any point.
[ "Returns", "a", "generator", "over", "the", "results", "for", "the", "specified", "request", "which", "is", "over", "a", "set", "of", "objects", "of", "the", "specified", "size", ".", "The", "objects", "are", "returned", "by", "call", "to", "the", "specifi...
python
train
angr/angr
angr/exploration_techniques/unique.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/exploration_techniques/unique.py#L78-L88
def similarity(state_a, state_b): """ The (L2) distance between the counts of the state addresses in the history of the path. :param state_a: The first state to compare :param state_b: The second state to compare """ count_a = Counter(state_a.history.bbl_addrs) count_b = Counter(state_b.history.bbl_addrs) normal_distance = sum((count_a.get(addr, 0) - count_b.get(addr, 0)) ** 2 for addr in set(list(count_a.keys()) + list(count_b.keys()))) ** 0.5 return 1.0 / (1 + normal_distance)
[ "def", "similarity", "(", "state_a", ",", "state_b", ")", ":", "count_a", "=", "Counter", "(", "state_a", ".", "history", ".", "bbl_addrs", ")", "count_b", "=", "Counter", "(", "state_b", ".", "history", ".", "bbl_addrs", ")", "normal_distance", "=", "sum"...
The (L2) distance between the counts of the state addresses in the history of the path. :param state_a: The first state to compare :param state_b: The second state to compare
[ "The", "(", "L2", ")", "distance", "between", "the", "counts", "of", "the", "state", "addresses", "in", "the", "history", "of", "the", "path", ".", ":", "param", "state_a", ":", "The", "first", "state", "to", "compare", ":", "param", "state_b", ":", "T...
python
train
spyder-ide/spyder
spyder/plugins/variableexplorer/widgets/collectionseditor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/collectionseditor.py#L1311-L1314
def get_len(self, key): """Return sequence length""" data = self.model.get_data() return len(data[key])
[ "def", "get_len", "(", "self", ",", "key", ")", ":", "data", "=", "self", ".", "model", ".", "get_data", "(", ")", "return", "len", "(", "data", "[", "key", "]", ")" ]
Return sequence length
[ "Return", "sequence", "length" ]
python
train
mitsei/dlkit
dlkit/services/relationship.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/relationship.py#L414-L422
def get_families(self): """Pass through to provider FamilyLookupSession.get_families""" # Implemented from kitosid template for - # osid.resource.BinLookupSession.get_bins_template catalogs = self._get_provider_session('family_lookup_session').get_families() cat_list = [] for cat in catalogs: cat_list.append(Family(self._provider_manager, cat, self._runtime, self._proxy)) return FamilyList(cat_list)
[ "def", "get_families", "(", "self", ")", ":", "# Implemented from kitosid template for -", "# osid.resource.BinLookupSession.get_bins_template", "catalogs", "=", "self", ".", "_get_provider_session", "(", "'family_lookup_session'", ")", ".", "get_families", "(", ")", "cat_lis...
Pass through to provider FamilyLookupSession.get_families
[ "Pass", "through", "to", "provider", "FamilyLookupSession", ".", "get_families" ]
python
train
siku2/Loglette
loglette/parser/__init__.py
https://github.com/siku2/Loglette/blob/d69f99c3ead2bb24f2aa491a61a7f82cb9ca8095/loglette/parser/__init__.py#L23-L41
def can_handle(self, text: str) -> bool: """Check whether this parser can parse the text""" try: changelogs = self.split_changelogs(text) if not changelogs: return False for changelog in changelogs: _header, _changes = self.split_changelog(changelog) if not any((_header, _changes)): return False header = self.parse_header(_header) changes = self.parse_changes(_changes) if not any((header, changes)): return False except Exception: return False else: return True
[ "def", "can_handle", "(", "self", ",", "text", ":", "str", ")", "->", "bool", ":", "try", ":", "changelogs", "=", "self", ".", "split_changelogs", "(", "text", ")", "if", "not", "changelogs", ":", "return", "False", "for", "changelog", "in", "changelogs"...
Check whether this parser can parse the text
[ "Check", "whether", "this", "parser", "can", "parse", "the", "text" ]
python
train
pyQode/pyqode.core
pyqode/core/panels/marker.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/panels/marker.py#L128-L146
def add_marker(self, marker): """ Adds the marker to the panel. :param marker: Marker to add :type marker: pyqode.core.modes.Marker """ self._markers.append(marker) doc = self.editor.document() assert isinstance(doc, QtGui.QTextDocument) block = doc.findBlockByLineNumber(marker._position) marker.block = block d = TextDecoration(block) d.set_full_width() if self._background: d.set_background(QtGui.QBrush(self._background)) marker.decoration = d self.editor.decorations.append(d) self.repaint()
[ "def", "add_marker", "(", "self", ",", "marker", ")", ":", "self", ".", "_markers", ".", "append", "(", "marker", ")", "doc", "=", "self", ".", "editor", ".", "document", "(", ")", "assert", "isinstance", "(", "doc", ",", "QtGui", ".", "QTextDocument",...
Adds the marker to the panel. :param marker: Marker to add :type marker: pyqode.core.modes.Marker
[ "Adds", "the", "marker", "to", "the", "panel", "." ]
python
train
gunthercox/ChatterBot
chatterbot/parsing.py
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L540-L554
def date_from_quarter(base_date, ordinal, year): """ Extract date from quarter of a year """ interval = 3 month_start = interval * (ordinal - 1) if month_start < 0: month_start = 9 month_end = month_start + interval if month_start == 0: month_start = 1 return [ datetime(year, month_start, 1), datetime(year, month_end, calendar.monthrange(year, month_end)[1]) ]
[ "def", "date_from_quarter", "(", "base_date", ",", "ordinal", ",", "year", ")", ":", "interval", "=", "3", "month_start", "=", "interval", "*", "(", "ordinal", "-", "1", ")", "if", "month_start", "<", "0", ":", "month_start", "=", "9", "month_end", "=", ...
Extract date from quarter of a year
[ "Extract", "date", "from", "quarter", "of", "a", "year" ]
python
train
PythonCharmers/python-future
src/future/backports/datetime.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/datetime.py#L1245-L1260
def replace(self, hour=None, minute=None, second=None, microsecond=None, tzinfo=True): """Return a new time with new values for the specified fields.""" if hour is None: hour = self.hour if minute is None: minute = self.minute if second is None: second = self.second if microsecond is None: microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo _check_time_fields(hour, minute, second, microsecond) _check_tzinfo_arg(tzinfo) return time(hour, minute, second, microsecond, tzinfo)
[ "def", "replace", "(", "self", ",", "hour", "=", "None", ",", "minute", "=", "None", ",", "second", "=", "None", ",", "microsecond", "=", "None", ",", "tzinfo", "=", "True", ")", ":", "if", "hour", "is", "None", ":", "hour", "=", "self", ".", "ho...
Return a new time with new values for the specified fields.
[ "Return", "a", "new", "time", "with", "new", "values", "for", "the", "specified", "fields", "." ]
python
train
jaraco/jaraco.path
jaraco/path.py
https://github.com/jaraco/jaraco.path/blob/39e4da09f325382e21b0917b1b5cd027edce8728/jaraco/path.py#L131-L154
def recursive_glob(root, spec): """ Like iglob, but recurse directories >>> any('path.py' in result for result in recursive_glob('.', '*.py')) True >>> all(result.startswith('.') for result in recursive_glob('.', '*.py')) True >>> len(list(recursive_glob('.', '*.foo'))) 0 """ specs = ( os.path.join(dirpath, dirname, spec) for dirpath, dirnames, filenames in os.walk(root) for dirname in dirnames ) return itertools.chain.from_iterable( glob.iglob(spec) for spec in specs )
[ "def", "recursive_glob", "(", "root", ",", "spec", ")", ":", "specs", "=", "(", "os", ".", "path", ".", "join", "(", "dirpath", ",", "dirname", ",", "spec", ")", "for", "dirpath", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "ro...
Like iglob, but recurse directories >>> any('path.py' in result for result in recursive_glob('.', '*.py')) True >>> all(result.startswith('.') for result in recursive_glob('.', '*.py')) True >>> len(list(recursive_glob('.', '*.foo'))) 0
[ "Like", "iglob", "but", "recurse", "directories", ">>>", "any", "(", "path", ".", "py", "in", "result", "for", "result", "in", "recursive_glob", "(", ".", "*", ".", "py", "))", "True", ">>>", "all", "(", "result", ".", "startswith", "(", ".", ")", "f...
python
valid
joke2k/faker
faker/providers/ssn/no_NO/__init__.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/ssn/no_NO/__init__.py#L9-L25
def checksum(digits, scale): """ Calculate checksum of Norwegian personal identity code. Checksum is calculated with "Module 11" method using a scale. The digits of the personal code are multiplied by the corresponding number in the scale and summed; if remainder of module 11 of the sum is less than 10, checksum is the remainder. If remainder is 0, the checksum is 0. https://no.wikipedia.org/wiki/F%C3%B8dselsnummer """ chk_nbr = 11 - (sum(map(operator.mul, digits, scale)) % 11) if chk_nbr == 11: return 0 return chk_nbr
[ "def", "checksum", "(", "digits", ",", "scale", ")", ":", "chk_nbr", "=", "11", "-", "(", "sum", "(", "map", "(", "operator", ".", "mul", ",", "digits", ",", "scale", ")", ")", "%", "11", ")", "if", "chk_nbr", "==", "11", ":", "return", "0", "r...
Calculate checksum of Norwegian personal identity code. Checksum is calculated with "Module 11" method using a scale. The digits of the personal code are multiplied by the corresponding number in the scale and summed; if remainder of module 11 of the sum is less than 10, checksum is the remainder. If remainder is 0, the checksum is 0. https://no.wikipedia.org/wiki/F%C3%B8dselsnummer
[ "Calculate", "checksum", "of", "Norwegian", "personal", "identity", "code", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xquerybuilderwidget/xqueryrule.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xquerybuilderwidget/xqueryrule.py#L46-L57
def defineOperator( self, operator, widget = -1 ): """ Adds a new operator for this rule. If widget is supplied as -1, then \ a QLineEdit will be used by default. :param operator | <str> widget | <subclass of QWidget> || None || -1 """ if ( widget == -1 ): widget = QLineEdit self._operators[nativestring(operator)] = widget
[ "def", "defineOperator", "(", "self", ",", "operator", ",", "widget", "=", "-", "1", ")", ":", "if", "(", "widget", "==", "-", "1", ")", ":", "widget", "=", "QLineEdit", "self", ".", "_operators", "[", "nativestring", "(", "operator", ")", "]", "=", ...
Adds a new operator for this rule. If widget is supplied as -1, then \ a QLineEdit will be used by default. :param operator | <str> widget | <subclass of QWidget> || None || -1
[ "Adds", "a", "new", "operator", "for", "this", "rule", ".", "If", "widget", "is", "supplied", "as", "-", "1", "then", "\\", "a", "QLineEdit", "will", "be", "used", "by", "default", ".", ":", "param", "operator", "|", "<str", ">", "widget", "|", "<sub...
python
train
timothydmorton/isochrones
isochrones/starmodel_old.py
https://github.com/timothydmorton/isochrones/blob/d84495573044c66db2fd6b959fe69e370757ea14/isochrones/starmodel_old.py#L1095-L1140
def load_hdf(cls, filename, path='', name=None): """ A class method to load a saved StarModel from an HDF5 file. File must have been created by a call to :func:`StarModel.save_hdf`. :param filename: H5 file to load. :param path: (optional) Path within HDF file. :return: :class:`StarModel` object. """ store = pd.HDFStore(filename) try: samples = store['{}/samples'.format(path)] attrs = store.get_storer('{}/samples'.format(path)).attrs except: store.close() raise properties = attrs.properties maxAV = attrs.maxAV max_distance = attrs.max_distance min_logg = attrs.min_logg ic_type = attrs.ic_type use_emcee = attrs.use_emcee basename = attrs._mnest_basename if name is None: try: name = attrs.name except: name = '' store.close() #ic = ic_type() don't need to initialize anymore mod = cls(ic_type, maxAV=maxAV, max_distance=max_distance, use_emcee=use_emcee, name=name, **properties) mod._samples = samples mod._mnest_basename = basename return mod
[ "def", "load_hdf", "(", "cls", ",", "filename", ",", "path", "=", "''", ",", "name", "=", "None", ")", ":", "store", "=", "pd", ".", "HDFStore", "(", "filename", ")", "try", ":", "samples", "=", "store", "[", "'{}/samples'", ".", "format", "(", "pa...
A class method to load a saved StarModel from an HDF5 file. File must have been created by a call to :func:`StarModel.save_hdf`. :param filename: H5 file to load. :param path: (optional) Path within HDF file. :return: :class:`StarModel` object.
[ "A", "class", "method", "to", "load", "a", "saved", "StarModel", "from", "an", "HDF5", "file", "." ]
python
train
fastai/fastai
fastai/vision/transform.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/transform.py#L150-L156
def _crop_default(x, size, row_pct:uniform=0.5, col_pct:uniform=0.5): "Crop `x` to `size` pixels. `row_pct`,`col_pct` select focal point of crop." rows,cols = tis2hw(size) row_pct,col_pct = _minus_epsilon(row_pct,col_pct) row = int((x.size(1)-rows+1) * row_pct) col = int((x.size(2)-cols+1) * col_pct) return x[:, row:row+rows, col:col+cols].contiguous()
[ "def", "_crop_default", "(", "x", ",", "size", ",", "row_pct", ":", "uniform", "=", "0.5", ",", "col_pct", ":", "uniform", "=", "0.5", ")", ":", "rows", ",", "cols", "=", "tis2hw", "(", "size", ")", "row_pct", ",", "col_pct", "=", "_minus_epsilon", "...
Crop `x` to `size` pixels. `row_pct`,`col_pct` select focal point of crop.
[ "Crop", "x", "to", "size", "pixels", ".", "row_pct", "col_pct", "select", "focal", "point", "of", "crop", "." ]
python
train
apache/incubator-heron
third_party/python/cpplint/cpplint.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L6441-L6446
def _FilterExcludedFiles(filenames): """Filters out files listed in the --exclude command line switch. File paths in the switch are evaluated relative to the current working directory """ exclude_paths = [os.path.abspath(f) for f in _excludes] return [f for f in filenames if os.path.abspath(f) not in exclude_paths]
[ "def", "_FilterExcludedFiles", "(", "filenames", ")", ":", "exclude_paths", "=", "[", "os", ".", "path", ".", "abspath", "(", "f", ")", "for", "f", "in", "_excludes", "]", "return", "[", "f", "for", "f", "in", "filenames", "if", "os", ".", "path", "....
Filters out files listed in the --exclude command line switch. File paths in the switch are evaluated relative to the current working directory
[ "Filters", "out", "files", "listed", "in", "the", "--", "exclude", "command", "line", "switch", ".", "File", "paths", "in", "the", "switch", "are", "evaluated", "relative", "to", "the", "current", "working", "directory" ]
python
valid
ralphje/imagemounter
imagemounter/volume.py
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume.py#L667-L734
def _load_fsstat_data(self, timeout=3): """Using :command:`fsstat`, adds some additional information of the volume to the Volume.""" def stats_thread(): try: cmd = ['fsstat', self.get_raw_path(), '-o', str(self.offset // self.disk.block_size)] # Setting the fstype explicitly makes fsstat much faster and more reliable # In some versions, the auto-detect yaffs2 check takes ages for large images fstype = { "ntfs": "ntfs", "fat": "fat", "ext": "ext", "iso": "iso9660", "hfs+": "hfs", "ufs": "ufs", "swap": "swap", "exfat": "exfat", }.get(self.fstype, None) if fstype: cmd.extend(["-f", fstype]) logger.debug('$ {0}'.format(' '.join(cmd))) stats_thread.process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) for line in iter(stats_thread.process.stdout.readline, b''): line = line.decode('utf-8') logger.debug('< {0}'.format(line)) if line.startswith("File System Type:"): self.info['statfstype'] = line[line.index(':') + 2:].strip() elif line.startswith("Last Mount Point:") or line.startswith("Last mounted on:"): self.info['lastmountpoint'] = line[line.index(':') + 2:].strip().replace("//", "/") elif line.startswith("Volume Name:") and not self.info.get('label'): self.info['label'] = line[line.index(':') + 2:].strip() elif line.startswith("Version:"): self.info['version'] = line[line.index(':') + 2:].strip() elif line.startswith("Source OS:"): self.info['version'] = line[line.index(':') + 2:].strip() elif 'CYLINDER GROUP INFORMATION' in line or 'BLOCK GROUP INFORMATION' in line: # noinspection PyBroadException try: stats_thread.process.terminate() logger.debug("Terminated fsstat at cylinder/block group information.") except Exception: pass break if self.info.get('lastmountpoint') and self.info.get('label'): self.info['label'] = "{0} ({1})".format(self.info['lastmountpoint'], self.info['label']) elif self.info.get('lastmountpoint') and not self.info.get('label'): self.info['label'] = self.info['lastmountpoint'] elif not self.info.get('lastmountpoint') and self.info.get('label') and \ self.info['label'].startswith("/"): # e.g. /boot1 if self.info['label'].endswith("1"): self.info['lastmountpoint'] = self.info['label'][:-1] else: self.info['lastmountpoint'] = self.info['label'] except Exception: # ignore any exceptions here. logger.exception("Error while obtaining stats.") stats_thread.process = None thread = threading.Thread(target=stats_thread) thread.start() thread.join(timeout) if thread.is_alive(): # noinspection PyBroadException try: stats_thread.process.terminate() except Exception: pass thread.join() logger.debug("Killed fsstat after {0}s".format(timeout))
[ "def", "_load_fsstat_data", "(", "self", ",", "timeout", "=", "3", ")", ":", "def", "stats_thread", "(", ")", ":", "try", ":", "cmd", "=", "[", "'fsstat'", ",", "self", ".", "get_raw_path", "(", ")", ",", "'-o'", ",", "str", "(", "self", ".", "offs...
Using :command:`fsstat`, adds some additional information of the volume to the Volume.
[ "Using", ":", "command", ":", "fsstat", "adds", "some", "additional", "information", "of", "the", "volume", "to", "the", "Volume", "." ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/assistant_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/assistant_v1.py#L4639-L4678
def _from_dict(cls, _dict): """Initialize a DialogRuntimeResponseGeneric object from a json dictionary.""" args = {} if 'response_type' in _dict: args['response_type'] = _dict.get('response_type') else: raise ValueError( 'Required property \'response_type\' not present in DialogRuntimeResponseGeneric JSON' ) if 'text' in _dict: args['text'] = _dict.get('text') if 'time' in _dict: args['time'] = _dict.get('time') if 'typing' in _dict: args['typing'] = _dict.get('typing') if 'source' in _dict: args['source'] = _dict.get('source') if 'title' in _dict: args['title'] = _dict.get('title') if 'description' in _dict: args['description'] = _dict.get('description') if 'preference' in _dict: args['preference'] = _dict.get('preference') if 'options' in _dict: args['options'] = [ DialogNodeOutputOptionsElement._from_dict(x) for x in (_dict.get('options')) ] if 'message_to_human_agent' in _dict: args['message_to_human_agent'] = _dict.get('message_to_human_agent') if 'topic' in _dict: args['topic'] = _dict.get('topic') if 'dialog_node' in _dict: args['dialog_node'] = _dict.get('dialog_node') if 'suggestions' in _dict: args['suggestions'] = [ DialogSuggestion._from_dict(x) for x in (_dict.get('suggestions')) ] return cls(**args)
[ "def", "_from_dict", "(", "cls", ",", "_dict", ")", ":", "args", "=", "{", "}", "if", "'response_type'", "in", "_dict", ":", "args", "[", "'response_type'", "]", "=", "_dict", ".", "get", "(", "'response_type'", ")", "else", ":", "raise", "ValueError", ...
Initialize a DialogRuntimeResponseGeneric object from a json dictionary.
[ "Initialize", "a", "DialogRuntimeResponseGeneric", "object", "from", "a", "json", "dictionary", "." ]
python
train
jtwhite79/pyemu
pyemu/pst/pst_handler.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/pst/pst_handler.py#L238-L248
def nprior(self): """number of prior information equations Returns ------- nprior : int the number of prior info equations """ self.control_data.nprior = self.prior_information.shape[0] return self.control_data.nprior
[ "def", "nprior", "(", "self", ")", ":", "self", ".", "control_data", ".", "nprior", "=", "self", ".", "prior_information", ".", "shape", "[", "0", "]", "return", "self", ".", "control_data", ".", "nprior" ]
number of prior information equations Returns ------- nprior : int the number of prior info equations
[ "number", "of", "prior", "information", "equations" ]
python
train
etcher-be/epab
epab/cmd/_freeze.py
https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/cmd/_freeze.py#L120-L127
def freeze(ctx, version: str, clean: bool): """ Freeze current package into a single file """ if clean: _clean_spec() ctx.invoke(epab.cmd.compile_qt_resources) _freeze(version)
[ "def", "freeze", "(", "ctx", ",", "version", ":", "str", ",", "clean", ":", "bool", ")", ":", "if", "clean", ":", "_clean_spec", "(", ")", "ctx", ".", "invoke", "(", "epab", ".", "cmd", ".", "compile_qt_resources", ")", "_freeze", "(", "version", ")"...
Freeze current package into a single file
[ "Freeze", "current", "package", "into", "a", "single", "file" ]
python
train
gwastro/pycbc
pycbc/inference/sampler/base_mcmc.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/sampler/base_mcmc.py#L385-L427
def set_p0(self, samples_file=None, prior=None): """Sets the initial position of the walkers. Parameters ---------- samples_file : InferenceFile, optional If provided, use the last iteration in the given file for the starting positions. prior : JointDistribution, optional Use the given prior to set the initial positions rather than ``model``'s prior. Returns ------- p0 : dict A dictionary maping sampling params to the starting positions. """ # if samples are given then use those as initial positions if samples_file is not None: with self.io(samples_file, 'r') as fp: samples = fp.read_samples(self.variable_params, iteration=-1, flatten=False) # remove the (length 1) niterations dimension samples = samples[..., 0] # make sure we have the same shape assert samples.shape == self.base_shape, ( "samples in file {} have shape {}, but I have shape {}". format(samples_file, samples.shape, self.base_shape)) # transform to sampling parameter space if self.model.sampling_transforms is not None: samples = self.model.sampling_transforms.apply(samples) # draw random samples if samples are not provided else: nsamples = numpy.prod(self.base_shape) samples = self.model.prior_rvs(size=nsamples, prior=prior).reshape( self.base_shape) # store as ND array with shape [base_shape] x nparams ndim = len(self.variable_params) p0 = numpy.ones(list(self.base_shape)+[ndim]) for i, param in enumerate(self.sampling_params): p0[..., i] = samples[param] self._p0 = p0 return self.p0
[ "def", "set_p0", "(", "self", ",", "samples_file", "=", "None", ",", "prior", "=", "None", ")", ":", "# if samples are given then use those as initial positions", "if", "samples_file", "is", "not", "None", ":", "with", "self", ".", "io", "(", "samples_file", ","...
Sets the initial position of the walkers. Parameters ---------- samples_file : InferenceFile, optional If provided, use the last iteration in the given file for the starting positions. prior : JointDistribution, optional Use the given prior to set the initial positions rather than ``model``'s prior. Returns ------- p0 : dict A dictionary maping sampling params to the starting positions.
[ "Sets", "the", "initial", "position", "of", "the", "walkers", "." ]
python
train
rsenk330/pylibsass
pylibsass/sass.py
https://github.com/rsenk330/pylibsass/blob/f029490db8e4c2178c9564efeeace95bbf8cceff/pylibsass/sass.py#L50-L75
def _load(self): """Loads the libsass library if it isn't already loaded.""" if self.clib is None: root_path = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__ )), '..')) path1 = os.path.join(root_path, 'sass.so') path2 = os.path.join(root_path, '..', 'sass.so') if os.path.exists(path1): self.clib = cdll.LoadLibrary(path1) elif os.path.exists(path1): self.clib = cdll.LoadLibrary(path2) else: raise Exception("Could not load library") self.clib.sass_new_context.restype = POINTER(SassContext) self.clib.sass_new_file_context.restype = POINTER(SassFileContext) self.clib.sass_new_folder_context.restype = POINTER(SassFolderContext) self.clib.sass_compile.restype = c_int self.clib.sass_compile.argtypes = [POINTER(SassContext)] self.clib.sass_compile_file.restype = c_int self.clib.sass_compile_file.argtypes = [POINTER(SassFileContext)] self.clib.sass_compile_folder.restype = c_int self.clib.sass_compile_folder.argtypes = [POINTER(SassFolderContext)]
[ "def", "_load", "(", "self", ")", ":", "if", "self", ".", "clib", "is", "None", ":", "root_path", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".",...
Loads the libsass library if it isn't already loaded.
[ "Loads", "the", "libsass", "library", "if", "it", "isn", "t", "already", "loaded", "." ]
python
train
OLC-Bioinformatics/ConFindr
confindr_src/confindr.py
https://github.com/OLC-Bioinformatics/ConFindr/blob/4c292617c3f270ebd5ff138cbc5a107f6d01200d/confindr_src/confindr.py#L147-L159
def extract_rmlst_genes(pair, database, forward_out, reverse_out, threads=12, logfile=None): """ Given a pair of reads and an rMLST database, will extract reads that contain sequence from the database. :param pair: List containing path to forward reads at index 0 and path to reverse reads at index 1. :param database: Path to rMLST database, in FASTA format. :param forward_out: :param reverse_out: :param threads: """ out, err, cmd = bbtools.bbduk_bait(database, pair[0], forward_out, reverse_in=pair[1], reverse_out=reverse_out, threads=str(threads), returncmd=True) if logfile: write_to_logfile(logfile, out, err, cmd)
[ "def", "extract_rmlst_genes", "(", "pair", ",", "database", ",", "forward_out", ",", "reverse_out", ",", "threads", "=", "12", ",", "logfile", "=", "None", ")", ":", "out", ",", "err", ",", "cmd", "=", "bbtools", ".", "bbduk_bait", "(", "database", ",", ...
Given a pair of reads and an rMLST database, will extract reads that contain sequence from the database. :param pair: List containing path to forward reads at index 0 and path to reverse reads at index 1. :param database: Path to rMLST database, in FASTA format. :param forward_out: :param reverse_out: :param threads:
[ "Given", "a", "pair", "of", "reads", "and", "an", "rMLST", "database", "will", "extract", "reads", "that", "contain", "sequence", "from", "the", "database", ".", ":", "param", "pair", ":", "List", "containing", "path", "to", "forward", "reads", "at", "inde...
python
train
skyfielders/python-skyfield
skyfield/iokit.py
https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/iokit.py#L338-L369
def parse_deltat_preds(fileobj): """Parse the United States Naval Observatory ``deltat.preds`` file. The old format supplies a floating point year, the value of Delta T, and one or two other fields:: 2015.75 67.97 0.210 0.02 The new format adds a modified Julian day as the first field: 58484.000 2019.00 69.34 -0.152 0.117 This function returns a 2xN array of raw Julian dates and matching Delta T values. """ lines = iter(fileobj) header = next(lines) if header.startswith(b'YEAR'): # Format in use until 2019 February next(lines) # discard blank line year_float, delta_t = np.loadtxt(lines, usecols=[0, 1]).T else: # Format in use since 2019 February year_float, delta_t = np.loadtxt(lines, usecols=[1, 2]).T year = year_float.astype(int) month = 1 + (year_float * 12.0).astype(int) % 12 expiration_date = date(year[0] + 2, month[0], 1) data = np.array((julian_date(year, month, 1), delta_t)) return expiration_date, data
[ "def", "parse_deltat_preds", "(", "fileobj", ")", ":", "lines", "=", "iter", "(", "fileobj", ")", "header", "=", "next", "(", "lines", ")", "if", "header", ".", "startswith", "(", "b'YEAR'", ")", ":", "# Format in use until 2019 February", "next", "(", "line...
Parse the United States Naval Observatory ``deltat.preds`` file. The old format supplies a floating point year, the value of Delta T, and one or two other fields:: 2015.75 67.97 0.210 0.02 The new format adds a modified Julian day as the first field: 58484.000 2019.00 69.34 -0.152 0.117 This function returns a 2xN array of raw Julian dates and matching Delta T values.
[ "Parse", "the", "United", "States", "Naval", "Observatory", "deltat", ".", "preds", "file", "." ]
python
train
juju/charm-helpers
charmhelpers/contrib/network/ip.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/network/ip.py#L227-L233
def resolve_network_cidr(ip_address): ''' Resolves the full address cidr of an ip_address based on configured network interfaces ''' netmask = get_netmask_for_address(ip_address) return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
[ "def", "resolve_network_cidr", "(", "ip_address", ")", ":", "netmask", "=", "get_netmask_for_address", "(", "ip_address", ")", "return", "str", "(", "netaddr", ".", "IPNetwork", "(", "\"%s/%s\"", "%", "(", "ip_address", ",", "netmask", ")", ")", ".", "cidr", ...
Resolves the full address cidr of an ip_address based on configured network interfaces
[ "Resolves", "the", "full", "address", "cidr", "of", "an", "ip_address", "based", "on", "configured", "network", "interfaces" ]
python
train
pypa/pipenv
pipenv/vendor/requirementslib/utils.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requirementslib/utils.py#L409-L458
def get_path(root, path, default=_UNSET): """Retrieve a value from a nested object via a tuple representing the lookup path. >>> root = {'a': {'b': {'c': [[1], [2], [3]]}}} >>> get_path(root, ('a', 'b', 'c', 2, 0)) 3 The path format is intentionally consistent with that of :func:`remap`. One of get_path's chief aims is improved error messaging. EAFP is great, but the error messages are not. For instance, ``root['a']['b']['c'][2][1]`` gives back ``IndexError: list index out of range`` What went out of range where? get_path currently raises ``PathAccessError: could not access 2 from path ('a', 'b', 'c', 2, 1), got error: IndexError('list index out of range',)``, a subclass of IndexError and KeyError. You can also pass a default that covers the entire operation, should the lookup fail at any level. Args: root: The target nesting of dictionaries, lists, or other objects supporting ``__getitem__``. path (tuple): A list of strings and integers to be successively looked up within *root*. default: The value to be returned should any ``PathAccessError`` exceptions be raised. """ if isinstance(path, six.string_types): path = path.split(".") cur = root try: for seg in path: try: cur = cur[seg] except (KeyError, IndexError) as exc: raise PathAccessError(exc, seg, path) except TypeError as exc: # either string index in a list, or a parent that # doesn't support indexing try: seg = int(seg) cur = cur[seg] except (ValueError, KeyError, IndexError, TypeError): if not getattr(cur, "__iter__", None): exc = TypeError("%r object is not indexable" % type(cur).__name__) raise PathAccessError(exc, seg, path) except PathAccessError: if default is _UNSET: raise return default return cur
[ "def", "get_path", "(", "root", ",", "path", ",", "default", "=", "_UNSET", ")", ":", "if", "isinstance", "(", "path", ",", "six", ".", "string_types", ")", ":", "path", "=", "path", ".", "split", "(", "\".\"", ")", "cur", "=", "root", "try", ":", ...
Retrieve a value from a nested object via a tuple representing the lookup path. >>> root = {'a': {'b': {'c': [[1], [2], [3]]}}} >>> get_path(root, ('a', 'b', 'c', 2, 0)) 3 The path format is intentionally consistent with that of :func:`remap`. One of get_path's chief aims is improved error messaging. EAFP is great, but the error messages are not. For instance, ``root['a']['b']['c'][2][1]`` gives back ``IndexError: list index out of range`` What went out of range where? get_path currently raises ``PathAccessError: could not access 2 from path ('a', 'b', 'c', 2, 1), got error: IndexError('list index out of range',)``, a subclass of IndexError and KeyError. You can also pass a default that covers the entire operation, should the lookup fail at any level. Args: root: The target nesting of dictionaries, lists, or other objects supporting ``__getitem__``. path (tuple): A list of strings and integers to be successively looked up within *root*. default: The value to be returned should any ``PathAccessError`` exceptions be raised.
[ "Retrieve", "a", "value", "from", "a", "nested", "object", "via", "a", "tuple", "representing", "the", "lookup", "path", ".", ">>>", "root", "=", "{", "a", ":", "{", "b", ":", "{", "c", ":", "[[", "1", "]", "[", "2", "]", "[", "3", "]]", "}}}",...
python
train
bukun/TorCMS
torcms/script/autocrud/fetch_html_dic.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/script/autocrud/fetch_html_dic.py#L89-L132
def gen_array_crud(): ''' Return the dictionay of the switcher form XLXS file. if valud of the column of the row is `1`, it will be added to the array. ''' if WORK_BOOK: pass else: return False papa_id = 0 switch_dics = {} kind_dics = {} for work_sheet in WORK_BOOK: kind_sig = str(work_sheet['A1'].value).strip() # the number of the categories in a website won't greater than 1000. for row_num in range(3, 1000): # 父类, column A a_cell_value = work_sheet['A{0}'.format(row_num)].value # 子类, column B b_cell_val = work_sheet['B{0}'.format(row_num)].value if a_cell_value or b_cell_val: pass else: break if a_cell_value and a_cell_value != '': papa_id = a_cell_value.strip()[1:] u_dic = __get_switch_arr(work_sheet, row_num) switch_dics['dic_{0}00'.format(papa_id)] = u_dic kind_dics['kind_{0}00'.format(papa_id)] = kind_sig if b_cell_val and b_cell_val != '': sun_id = b_cell_val.strip()[1:] if len(sun_id) == 4: app_uid = sun_id else: app_uid = '{0}{1}'.format(papa_id, sun_id) u_dic = __get_switch_arr(work_sheet, row_num) switch_dics['dic_{0}'.format(app_uid)] = u_dic kind_dics['kind_{0}'.format(app_uid)] = kind_sig return (switch_dics, kind_dics)
[ "def", "gen_array_crud", "(", ")", ":", "if", "WORK_BOOK", ":", "pass", "else", ":", "return", "False", "papa_id", "=", "0", "switch_dics", "=", "{", "}", "kind_dics", "=", "{", "}", "for", "work_sheet", "in", "WORK_BOOK", ":", "kind_sig", "=", "str", ...
Return the dictionay of the switcher form XLXS file. if valud of the column of the row is `1`, it will be added to the array.
[ "Return", "the", "dictionay", "of", "the", "switcher", "form", "XLXS", "file", ".", "if", "valud", "of", "the", "column", "of", "the", "row", "is", "1", "it", "will", "be", "added", "to", "the", "array", "." ]
python
train
JoelBender/bacpypes
py25/bacpypes/netservice.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/netservice.py#L190-L196
def process_npdu(self, npdu): """Encode NPDUs from the service access point and send them downstream.""" if _debug: NetworkAdapter._debug("process_npdu %r (net=%r)", npdu, self.adapterNet) pdu = PDU(user_data=npdu.pduUserData) npdu.encode(pdu) self.request(pdu)
[ "def", "process_npdu", "(", "self", ",", "npdu", ")", ":", "if", "_debug", ":", "NetworkAdapter", ".", "_debug", "(", "\"process_npdu %r (net=%r)\"", ",", "npdu", ",", "self", ".", "adapterNet", ")", "pdu", "=", "PDU", "(", "user_data", "=", "npdu", ".", ...
Encode NPDUs from the service access point and send them downstream.
[ "Encode", "NPDUs", "from", "the", "service", "access", "point", "and", "send", "them", "downstream", "." ]
python
train
saltstack/salt
salt/utils/openstack/nova.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/openstack/nova.py#L991-L1005
def server_list_min(self): ''' List minimal information about servers ''' nt_ks = self.compute_conn ret = {} for item in nt_ks.servers.list(detailed=False): try: ret[item.name] = { 'id': item.id, 'state': 'Running' } except TypeError: pass return ret
[ "def", "server_list_min", "(", "self", ")", ":", "nt_ks", "=", "self", ".", "compute_conn", "ret", "=", "{", "}", "for", "item", "in", "nt_ks", ".", "servers", ".", "list", "(", "detailed", "=", "False", ")", ":", "try", ":", "ret", "[", "item", "....
List minimal information about servers
[ "List", "minimal", "information", "about", "servers" ]
python
train
amperser/proselint
proselint/command_line.py
https://github.com/amperser/proselint/blob/cb619ee4023cc7856f5fb96aec2a33a2c9f1a2e2/proselint/command_line.py#L106-L150
def proselint(paths=None, version=None, clean=None, debug=None, output_json=None, time=None, demo=None, compact=None): """A CLI for proselint, a linter for prose.""" if time: click.echo(timing_test()) return # In debug or clean mode, delete cache & *.pyc files before running. if debug or clean: clear_cache() # Use the demo file by default. if demo: paths = [demo_file] # Expand the list of directories and files. filepaths = extract_files(list(paths)) # Lint the files num_errors = 0 # Use stdin if no paths were specified if len(paths) == 0: filepaths.append('-') for fp in filepaths: try: if fp == '-': fp = '<stdin>' f = sys.stdin else: f = click.open_file( fp, 'r', encoding="utf-8", errors="replace") errors = lint(f, debug=debug) num_errors += len(errors) print_errors(fp, errors, output_json, compact=compact) except Exception: traceback.print_exc() # Return an exit code close_cache_shelves() if num_errors > 0: sys.exit(1) else: sys.exit(0)
[ "def", "proselint", "(", "paths", "=", "None", ",", "version", "=", "None", ",", "clean", "=", "None", ",", "debug", "=", "None", ",", "output_json", "=", "None", ",", "time", "=", "None", ",", "demo", "=", "None", ",", "compact", "=", "None", ")",...
A CLI for proselint, a linter for prose.
[ "A", "CLI", "for", "proselint", "a", "linter", "for", "prose", "." ]
python
train
awslabs/aws-sam-cli
samcli/local/lambda_service/lambda_error_responses.py
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/local/lambda_service/lambda_error_responses.py#L158-L179
def generic_path_not_found(*args): """ Creates a Lambda Service Generic PathNotFound Response Parameters ---------- args list List of arguments Flask passes to the method Returns ------- Flask.Response A response object representing the GenericPathNotFound Error """ exception_tuple = LambdaErrorResponses.PathNotFoundException return BaseLocalService.service_response( LambdaErrorResponses._construct_error_response_body( LambdaErrorResponses.LOCAL_SERVICE_ERROR, "PathNotFoundException"), LambdaErrorResponses._construct_headers(exception_tuple[0]), exception_tuple[1] )
[ "def", "generic_path_not_found", "(", "*", "args", ")", ":", "exception_tuple", "=", "LambdaErrorResponses", ".", "PathNotFoundException", "return", "BaseLocalService", ".", "service_response", "(", "LambdaErrorResponses", ".", "_construct_error_response_body", "(", "Lambda...
Creates a Lambda Service Generic PathNotFound Response Parameters ---------- args list List of arguments Flask passes to the method Returns ------- Flask.Response A response object representing the GenericPathNotFound Error
[ "Creates", "a", "Lambda", "Service", "Generic", "PathNotFound", "Response" ]
python
train
jorgenschaefer/elpy
elpy/server.py
https://github.com/jorgenschaefer/elpy/blob/ffd982f829b11e53f2be187c7b770423341f29bc/elpy/server.py#L74-L79
def rpc_get_oneline_docstring(self, filename, source, offset): """Get a oneline docstring for the symbol at the offset. """ return self._call_backend("rpc_get_oneline_docstring", None, filename, get_source(source), offset)
[ "def", "rpc_get_oneline_docstring", "(", "self", ",", "filename", ",", "source", ",", "offset", ")", ":", "return", "self", ".", "_call_backend", "(", "\"rpc_get_oneline_docstring\"", ",", "None", ",", "filename", ",", "get_source", "(", "source", ")", ",", "o...
Get a oneline docstring for the symbol at the offset.
[ "Get", "a", "oneline", "docstring", "for", "the", "symbol", "at", "the", "offset", "." ]
python
train
rueckstiess/mtools
mtools/util/logevent.py
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logevent.py#L626-L685
def _extract_counters(self): """Extract counters like nscanned and nreturned from the logevent.""" # extract counters (if present) counters = ['nscanned', 'nscannedObjects', 'ntoreturn', 'nreturned', 'ninserted', 'nupdated', 'ndeleted', 'r', 'w', 'numYields', 'planSummary', 'writeConflicts', 'keyUpdates'] # TODO: refactor mtools to use current counter names throughout # Transitionary hack: mapping of current names into prior equivalents counter_equiv = { 'docsExamined': 'nscannedObjects', 'keysExamined': 'nscanned', 'nDeleted': 'ndeleted', 'nInserted': 'ninserted', 'nMatched': 'nreturned', 'nModified': 'nupdated' } counters.extend(counter_equiv.keys()) split_tokens = self.split_tokens # trigger operation evaluation to get access to offset if self.operation: for t, token in enumerate(split_tokens[self.datetime_nextpos + 2:]): for counter in counters: if token.startswith('%s:' % counter): try: # Remap counter to standard name, if applicable counter = counter_equiv.get(counter, counter) vars(self)['_' + counter] = int((token.split(':') [-1]).replace(',', '')) except ValueError: # see if this is a pre-2.5.2 numYields with space # in between (e.g. "numYields: 2") # https://jira.mongodb.org/browse/SERVER-10101 if (counter == 'numYields' and token.startswith('numYields')): try: self._numYields = int((split_tokens[t + 1 + self.datetime_nextpos + 2]).replace(',', '')) except ValueError: pass if (counter == 'planSummary' and token.startswith('planSummary')): try: self._planSummary = split_tokens[t + 1 + self.datetime_nextpos + 2] if self._planSummary: if split_tokens[t + 1 + self.datetime_nextpos + 3] != '{': self._actualPlanSummary = self._planSummary else: self._actualPlanSummary = '%s %s' % ( self._planSummary, self._find_pattern('planSummary: %s' % self._planSummary, actual=True) ) except ValueError: pass # token not parsable, skip break
[ "def", "_extract_counters", "(", "self", ")", ":", "# extract counters (if present)", "counters", "=", "[", "'nscanned'", ",", "'nscannedObjects'", ",", "'ntoreturn'", ",", "'nreturned'", ",", "'ninserted'", ",", "'nupdated'", ",", "'ndeleted'", ",", "'r'", ",", "...
Extract counters like nscanned and nreturned from the logevent.
[ "Extract", "counters", "like", "nscanned", "and", "nreturned", "from", "the", "logevent", "." ]
python
train
tBaxter/tango-contact-manager
build/lib/contact_manager/models.py
https://github.com/tBaxter/tango-contact-manager/blob/7bd5be326a8db8f438cdefff0fbd14849d0474a5/build/lib/contact_manager/models.py#L239-L244
def save(self, *args, **kwargs): """ Create formatted version of body text. """ self.body_formatted = sanetize_text(self.body) super(Contact, self).save()
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "body_formatted", "=", "sanetize_text", "(", "self", ".", "body", ")", "super", "(", "Contact", ",", "self", ")", ".", "save", "(", ")" ]
Create formatted version of body text.
[ "Create", "formatted", "version", "of", "body", "text", "." ]
python
train
aiogram/aiogram
aiogram/bot/bot.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/bot/bot.py#L1916-L1948
async def answer_shipping_query(self, shipping_query_id: base.String, ok: base.Boolean, shipping_options: typing.Union[typing.List[types.ShippingOption], None] = None, error_message: typing.Union[base.String, None] = None) -> base.Boolean: """ If you sent an invoice requesting a shipping address and the parameter is_flexible was specified, the Bot API will send an Update with a shipping_query field to the bot. Source: https://core.telegram.org/bots/api#answershippingquery :param shipping_query_id: Unique identifier for the query to be answered :type shipping_query_id: :obj:`base.String` :param ok: Specify True if delivery to the specified address is possible and False if there are any problems (for example, if delivery to the specified address is not possible) :type ok: :obj:`base.Boolean` :param shipping_options: Required if ok is True. A JSON-serialized array of available shipping options :type shipping_options: :obj:`typing.Union[typing.List[types.ShippingOption], None]` :param error_message: Required if ok is False Error message in human readable form that explains why it is impossible to complete the order (e.g. "Sorry, delivery to your desired address is unavailable'). Telegram will display this message to the user. :type error_message: :obj:`typing.Union[base.String, None]` :return: On success, True is returned :rtype: :obj:`base.Boolean` """ if shipping_options: shipping_options = prepare_arg([shipping_option.to_python() if hasattr(shipping_option, 'to_python') else shipping_option for shipping_option in shipping_options]) payload = generate_payload(**locals()) result = await self.request(api.Methods.ANSWER_SHIPPING_QUERY, payload) return result
[ "async", "def", "answer_shipping_query", "(", "self", ",", "shipping_query_id", ":", "base", ".", "String", ",", "ok", ":", "base", ".", "Boolean", ",", "shipping_options", ":", "typing", ".", "Union", "[", "typing", ".", "List", "[", "types", ".", "Shippi...
If you sent an invoice requesting a shipping address and the parameter is_flexible was specified, the Bot API will send an Update with a shipping_query field to the bot. Source: https://core.telegram.org/bots/api#answershippingquery :param shipping_query_id: Unique identifier for the query to be answered :type shipping_query_id: :obj:`base.String` :param ok: Specify True if delivery to the specified address is possible and False if there are any problems (for example, if delivery to the specified address is not possible) :type ok: :obj:`base.Boolean` :param shipping_options: Required if ok is True. A JSON-serialized array of available shipping options :type shipping_options: :obj:`typing.Union[typing.List[types.ShippingOption], None]` :param error_message: Required if ok is False Error message in human readable form that explains why it is impossible to complete the order (e.g. "Sorry, delivery to your desired address is unavailable'). Telegram will display this message to the user. :type error_message: :obj:`typing.Union[base.String, None]` :return: On success, True is returned :rtype: :obj:`base.Boolean`
[ "If", "you", "sent", "an", "invoice", "requesting", "a", "shipping", "address", "and", "the", "parameter", "is_flexible", "was", "specified", "the", "Bot", "API", "will", "send", "an", "Update", "with", "a", "shipping_query", "field", "to", "the", "bot", "."...
python
train
ChargePoint/pydnp3
examples/master_cmd.py
https://github.com/ChargePoint/pydnp3/blob/5bcd8240d1fc0aa1579e71f2efcab63b4c61c547/examples/master_cmd.py#L68-L71
def do_chan_log_normal(self, line): """Set the channel log level to NORMAL. Command syntax is: chan_log_normal""" self.application.channel.SetLogFilters(openpal.LogFilters(opendnp3.levels.NORMAL)) print('Channel log filtering level is now: {0}'.format(opendnp3.levels.NORMAL))
[ "def", "do_chan_log_normal", "(", "self", ",", "line", ")", ":", "self", ".", "application", ".", "channel", ".", "SetLogFilters", "(", "openpal", ".", "LogFilters", "(", "opendnp3", ".", "levels", ".", "NORMAL", ")", ")", "print", "(", "'Channel log filteri...
Set the channel log level to NORMAL. Command syntax is: chan_log_normal
[ "Set", "the", "channel", "log", "level", "to", "NORMAL", ".", "Command", "syntax", "is", ":", "chan_log_normal" ]
python
valid
3ll3d00d/vibe
backend/src/analyser/resources/measurement.py
https://github.com/3ll3d00d/vibe/blob/124b029f13ac746723e92cb47e9cb56edd2e54b5/backend/src/analyser/resources/measurement.py#L95-L102
def _getAbsoluteTime(self, start, delay): """ Adds the delay in seconds to the start time. :param start: :param delay: :return: a datetimem for the specified point in time. """ return start + datetime.timedelta(days=0, seconds=delay)
[ "def", "_getAbsoluteTime", "(", "self", ",", "start", ",", "delay", ")", ":", "return", "start", "+", "datetime", ".", "timedelta", "(", "days", "=", "0", ",", "seconds", "=", "delay", ")" ]
Adds the delay in seconds to the start time. :param start: :param delay: :return: a datetimem for the specified point in time.
[ "Adds", "the", "delay", "in", "seconds", "to", "the", "start", "time", ".", ":", "param", "start", ":", ":", "param", "delay", ":", ":", "return", ":", "a", "datetimem", "for", "the", "specified", "point", "in", "time", "." ]
python
train
angr/angr
angr/state_plugins/inspect.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/inspect.py#L186-L201
def fire(self, state): """ Trigger the breakpoint. :param state: The state. """ if self.action is None or self.action == BP_IPDB: import ipdb; ipdb.set_trace() #pylint:disable=F0401 elif self.action == BP_IPYTHON: import IPython shell = IPython.terminal.embed.InteractiveShellEmbed() shell.mainloop(display_banner="This is an ipython shell for you to happily debug your state!\n" + \ "The state can be accessed through the variable 'state'. You can\n" +\ "make modifications, then exit this shell to resume your analysis.") else: self.action(state)
[ "def", "fire", "(", "self", ",", "state", ")", ":", "if", "self", ".", "action", "is", "None", "or", "self", ".", "action", "==", "BP_IPDB", ":", "import", "ipdb", "ipdb", ".", "set_trace", "(", ")", "#pylint:disable=F0401", "elif", "self", ".", "actio...
Trigger the breakpoint. :param state: The state.
[ "Trigger", "the", "breakpoint", "." ]
python
train
bwohlberg/sporco
sporco/admm/ccmodmd.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/ccmodmd.py#L524-L529
def obfn_g1(self, Y1): r"""Compute :math:`g_1(\mathbf{y_1})` component of ADMM objective function. """ return np.linalg.norm((self.Pcn(Y1) - Y1))
[ "def", "obfn_g1", "(", "self", ",", "Y1", ")", ":", "return", "np", ".", "linalg", ".", "norm", "(", "(", "self", ".", "Pcn", "(", "Y1", ")", "-", "Y1", ")", ")" ]
r"""Compute :math:`g_1(\mathbf{y_1})` component of ADMM objective function.
[ "r", "Compute", ":", "math", ":", "g_1", "(", "\\", "mathbf", "{", "y_1", "}", ")", "component", "of", "ADMM", "objective", "function", "." ]
python
train
bwohlberg/sporco
sporco/dictlrn/prlcnscdl.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/dictlrn/prlcnscdl.py#L727-L739
def ccmodmd_xstep(k): """Do the X step of the ccmod stage. The only parameter is the slice index `k` and there are no return values; all inputs and outputs are from and to global variables. """ YU0 = mp_D_Y0 - mp_D_U0[k] YU1 = mp_D_Y1[k] + mp_S[k] - mp_D_U1[k] b = sl.rfftn(YU0, None, mp_cri.axisN) + \ np.conj(mp_Zf[k]) * sl.rfftn(YU1, None, mp_cri.axisN) Xf = sl.solvedbi_sm(mp_Zf[k], 1.0, b, axis=mp_cri.axisM) mp_D_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN) mp_DX[k] = sl.irfftn(sl.inner(Xf, mp_Zf[k]), mp_cri.Nv, mp_cri.axisN)
[ "def", "ccmodmd_xstep", "(", "k", ")", ":", "YU0", "=", "mp_D_Y0", "-", "mp_D_U0", "[", "k", "]", "YU1", "=", "mp_D_Y1", "[", "k", "]", "+", "mp_S", "[", "k", "]", "-", "mp_D_U1", "[", "k", "]", "b", "=", "sl", ".", "rfftn", "(", "YU0", ",", ...
Do the X step of the ccmod stage. The only parameter is the slice index `k` and there are no return values; all inputs and outputs are from and to global variables.
[ "Do", "the", "X", "step", "of", "the", "ccmod", "stage", ".", "The", "only", "parameter", "is", "the", "slice", "index", "k", "and", "there", "are", "no", "return", "values", ";", "all", "inputs", "and", "outputs", "are", "from", "and", "to", "global",...
python
train
gboeing/osmnx
osmnx/utils.py
https://github.com/gboeing/osmnx/blob/be59fd313bcb68af8fc79242c56194f1247e26e2/osmnx/utils.py#L311-L357
def get_largest_component(G, strongly=False): """ Return a subgraph of the largest weakly or strongly connected component from a directed graph. Parameters ---------- G : networkx multidigraph strongly : bool if True, return the largest strongly instead of weakly connected component Returns ------- G : networkx multidigraph the largest connected component subgraph from the original graph """ start_time = time.time() original_len = len(list(G.nodes())) if strongly: # if the graph is not connected retain only the largest strongly connected component if not nx.is_strongly_connected(G): # get all the strongly connected components in graph then identify the largest sccs = nx.strongly_connected_components(G) largest_scc = max(sccs, key=len) G = induce_subgraph(G, largest_scc) msg = ('Graph was not connected, retained only the largest strongly ' 'connected component ({:,} of {:,} total nodes) in {:.2f} seconds') log(msg.format(len(list(G.nodes())), original_len, time.time()-start_time)) else: # if the graph is not connected retain only the largest weakly connected component if not nx.is_weakly_connected(G): # get all the weakly connected components in graph then identify the largest wccs = nx.weakly_connected_components(G) largest_wcc = max(wccs, key=len) G = induce_subgraph(G, largest_wcc) msg = ('Graph was not connected, retained only the largest weakly ' 'connected component ({:,} of {:,} total nodes) in {:.2f} seconds') log(msg.format(len(list(G.nodes())), original_len, time.time()-start_time)) return G
[ "def", "get_largest_component", "(", "G", ",", "strongly", "=", "False", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "original_len", "=", "len", "(", "list", "(", "G", ".", "nodes", "(", ")", ")", ")", "if", "strongly", ":", "# if the...
Return a subgraph of the largest weakly or strongly connected component from a directed graph. Parameters ---------- G : networkx multidigraph strongly : bool if True, return the largest strongly instead of weakly connected component Returns ------- G : networkx multidigraph the largest connected component subgraph from the original graph
[ "Return", "a", "subgraph", "of", "the", "largest", "weakly", "or", "strongly", "connected", "component", "from", "a", "directed", "graph", "." ]
python
train
BerkeleyAutomation/autolab_core
autolab_core/tensor_dataset.py
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/tensor_dataset.py#L421-L425
def tensor_index(self, datapoint_index): """ Returns the index of the tensor containing the referenced datapoint. """ if datapoint_index >= self._num_datapoints: raise ValueError('Datapoint index %d is greater than the number of datapoints (%d)' %(datapoint_index, self._num_datapoints)) return self._index_to_file_num[datapoint_index]
[ "def", "tensor_index", "(", "self", ",", "datapoint_index", ")", ":", "if", "datapoint_index", ">=", "self", ".", "_num_datapoints", ":", "raise", "ValueError", "(", "'Datapoint index %d is greater than the number of datapoints (%d)'", "%", "(", "datapoint_index", ",", ...
Returns the index of the tensor containing the referenced datapoint.
[ "Returns", "the", "index", "of", "the", "tensor", "containing", "the", "referenced", "datapoint", "." ]
python
train
cohorte/cohorte-herald
python/snippets/herald_irc/client.py
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/snippets/herald_irc/client.py#L147-L156
def on_welcome(self, connection, event): """ Server welcome: we're connected """ # Start the pool self.__pool.start() logging.info("! Connected to server '%s': %s", event.source, event.arguments[0]) connection.join("#cohorte")
[ "def", "on_welcome", "(", "self", ",", "connection", ",", "event", ")", ":", "# Start the pool", "self", ".", "__pool", ".", "start", "(", ")", "logging", ".", "info", "(", "\"! Connected to server '%s': %s\"", ",", "event", ".", "source", ",", "event", ".",...
Server welcome: we're connected
[ "Server", "welcome", ":", "we", "re", "connected" ]
python
train
b3j0f/utils
b3j0f/utils/property.py
https://github.com/b3j0f/utils/blob/793871b98e90fd1c7ce9ef0dce839cc18fcbc6ff/b3j0f/utils/property.py#L255-L275
def get_first_property(elt, key, default=None, ctx=None): """Get first property related to one input key. :param elt: first property elt. Not None methods. :param str key: property key to get. :param default: default value to return if key does not exist in elt. properties :param ctx: elt ctx from where get properties. Equals elt if None. It allows to get function properties related to a class or instance if related function is defined in base class. """ result = default properties = _get_properties(elt, keys=(key,), ctx=ctx, first=True) # set value if key exists in properties if key in properties: result = properties[key] return result
[ "def", "get_first_property", "(", "elt", ",", "key", ",", "default", "=", "None", ",", "ctx", "=", "None", ")", ":", "result", "=", "default", "properties", "=", "_get_properties", "(", "elt", ",", "keys", "=", "(", "key", ",", ")", ",", "ctx", "=", ...
Get first property related to one input key. :param elt: first property elt. Not None methods. :param str key: property key to get. :param default: default value to return if key does not exist in elt. properties :param ctx: elt ctx from where get properties. Equals elt if None. It allows to get function properties related to a class or instance if related function is defined in base class.
[ "Get", "first", "property", "related", "to", "one", "input", "key", "." ]
python
train
Neurita/boyle
boyle/utils/rcfile.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/utils/rcfile.py#L245-L270
def get_rcfile_section(app_name, section_name): """ Return the dictionary containing the rcfile section configuration variables. Parameters ---------- section_name: str Name of the section in the rcfiles. app_name: str Name of the application to look for its rcfiles. Returns ------- settings: dict Dict with variable values """ try: settings = rcfile(app_name, section_name) except IOError: raise except: raise KeyError('Error looking for section {} in {} ' ' rcfiles.'.format(section_name, app_name)) else: return settings
[ "def", "get_rcfile_section", "(", "app_name", ",", "section_name", ")", ":", "try", ":", "settings", "=", "rcfile", "(", "app_name", ",", "section_name", ")", "except", "IOError", ":", "raise", "except", ":", "raise", "KeyError", "(", "'Error looking for section...
Return the dictionary containing the rcfile section configuration variables. Parameters ---------- section_name: str Name of the section in the rcfiles. app_name: str Name of the application to look for its rcfiles. Returns ------- settings: dict Dict with variable values
[ "Return", "the", "dictionary", "containing", "the", "rcfile", "section", "configuration", "variables", "." ]
python
valid
sjwood/pydvdid
pydvdid/functions.py
https://github.com/sjwood/pydvdid/blob/03914fb7e24283c445e5af724f9d919b23caaf95/pydvdid/functions.py#L105-L112
def _convert_timedelta_to_seconds(timedelta): """Returns the total seconds calculated from the supplied timedelta. (Function provided to enable running on Python 2.6 which lacks timedelta.total_seconds()). """ days_in_seconds = timedelta.days * 24 * 3600 return int((timedelta.microseconds + (timedelta.seconds + days_in_seconds) * 10 ** 6) / 10 ** 6)
[ "def", "_convert_timedelta_to_seconds", "(", "timedelta", ")", ":", "days_in_seconds", "=", "timedelta", ".", "days", "*", "24", "*", "3600", "return", "int", "(", "(", "timedelta", ".", "microseconds", "+", "(", "timedelta", ".", "seconds", "+", "days_in_seco...
Returns the total seconds calculated from the supplied timedelta. (Function provided to enable running on Python 2.6 which lacks timedelta.total_seconds()).
[ "Returns", "the", "total", "seconds", "calculated", "from", "the", "supplied", "timedelta", "." ]
python
train
haizi-zh/scrapy-qiniu
scrapy_qiniu/impl.py
https://github.com/haizi-zh/scrapy-qiniu/blob/9a3dddacd2e665cb3c86308772040946c3b82415/scrapy_qiniu/impl.py#L146-L155
def get_media_requests(self, item, info): """ 根据item中的信息, 构造出需要下载的静态资源的Request对象 :param item: :param info: :return: """ key_generator = item.get(self.QINIU_KEY_GENERATOR_FIELD) return [Request(x, meta={'qiniu_key_generator': key_generator}) for x in item.get(self.FILES_URLS_FIELD, [])]
[ "def", "get_media_requests", "(", "self", ",", "item", ",", "info", ")", ":", "key_generator", "=", "item", ".", "get", "(", "self", ".", "QINIU_KEY_GENERATOR_FIELD", ")", "return", "[", "Request", "(", "x", ",", "meta", "=", "{", "'qiniu_key_generator'", ...
根据item中的信息, 构造出需要下载的静态资源的Request对象 :param item: :param info: :return:
[ "根据item中的信息", "构造出需要下载的静态资源的Request对象" ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/orm_inspect.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/orm_inspect.py#L291-L382
def rewrite_relationships(oldobj: object, newobj: object, objmap: Dict[object, object], debug: bool = False, skip_table_names: List[str] = None) -> None: """ A utility function only. Used in copying objects between SQLAlchemy sessions. Both ``oldobj`` and ``newobj`` are SQLAlchemy instances. The instance ``newobj`` is already a copy of ``oldobj`` but we wish to rewrite its relationships, according to the map ``objmap``, which maps old to new objects. For example: - Suppose a source session has a Customer record and a Sale record containing ``sale.customer_id``, a foreign key to Customer. - We may have corresponding Python SQLAlchemy ORM objects ``customer_1_src`` and ``sale_1_src``. - We copy them into a destination database, where their Python ORM objects are ``customer_1_dest`` and ``sale_1_dest``. - In the process we set up an object map looking like: .. code-block:: none Old session New session ------------------------------- customer_1_src customer_1_dest sale_1_src sale_1_dest - Now, we wish to make ``sale_1_dest`` have a relationship to ``customer_1_dest``, in the same way that ``sale_1_src`` has a relationship to ``customer_1_src``. This function will modify ``sale_1_dest`` accordingly, given this object map. It will observe that ``sale_1_src`` (here ``oldobj``) has a relationship to ``customer_1_src``; it will note that ``objmap`` maps ``customer_1_src`` to ``customer_1_dest``; it will create the relationship from ``sale_1_dest`` (here ``newobj``) to ``customer_1_dest``. Args: oldobj: SQLAlchemy ORM object to read from newobj: SQLAlchemy ORM object to write to objmap: dictionary mapping "source" objects to their corresponding "destination" object. debug: be verbose skip_table_names: if a related table's name is in this (optional) list, that relationship is skipped """ skip_table_names = skip_table_names or [] # type: List[str] insp = inspect(oldobj) # type: InstanceState # insp.mapper.relationships is of type # sqlalchemy.utils._collections.ImmutableProperties, which is basically # a sort of AttrDict. for attrname_rel in insp.mapper.relationships.items(): # type: Tuple[str, RelationshipProperty] # noqa attrname = attrname_rel[0] rel_prop = attrname_rel[1] if rel_prop.viewonly: if debug: log.debug("Skipping viewonly relationship") continue # don't attempt to write viewonly relationships # noqa related_class = rel_prop.mapper.class_ related_table_name = related_class.__tablename__ # type: str if related_table_name in skip_table_names: if debug: log.debug("Skipping relationship for related table {!r}", related_table_name) continue # The relationship is an abstract object (so getting the # relationship from the old object and from the new, with e.g. # newrel = newinsp.mapper.relationships[oldrel.key], # yield the same object. All we need from it is the key name. # rel_key = rel.key # type: str # ... but also available from the mapper as attrname, above related_old = getattr(oldobj, attrname) if rel_prop.uselist: related_new = [objmap[r] for r in related_old] elif related_old is not None: related_new = objmap[related_old] else: related_new = None if debug: log.debug("rewrite_relationships: relationship {} -> {}", attrname, related_new) setattr(newobj, attrname, related_new)
[ "def", "rewrite_relationships", "(", "oldobj", ":", "object", ",", "newobj", ":", "object", ",", "objmap", ":", "Dict", "[", "object", ",", "object", "]", ",", "debug", ":", "bool", "=", "False", ",", "skip_table_names", ":", "List", "[", "str", "]", "...
A utility function only. Used in copying objects between SQLAlchemy sessions. Both ``oldobj`` and ``newobj`` are SQLAlchemy instances. The instance ``newobj`` is already a copy of ``oldobj`` but we wish to rewrite its relationships, according to the map ``objmap``, which maps old to new objects. For example: - Suppose a source session has a Customer record and a Sale record containing ``sale.customer_id``, a foreign key to Customer. - We may have corresponding Python SQLAlchemy ORM objects ``customer_1_src`` and ``sale_1_src``. - We copy them into a destination database, where their Python ORM objects are ``customer_1_dest`` and ``sale_1_dest``. - In the process we set up an object map looking like: .. code-block:: none Old session New session ------------------------------- customer_1_src customer_1_dest sale_1_src sale_1_dest - Now, we wish to make ``sale_1_dest`` have a relationship to ``customer_1_dest``, in the same way that ``sale_1_src`` has a relationship to ``customer_1_src``. This function will modify ``sale_1_dest`` accordingly, given this object map. It will observe that ``sale_1_src`` (here ``oldobj``) has a relationship to ``customer_1_src``; it will note that ``objmap`` maps ``customer_1_src`` to ``customer_1_dest``; it will create the relationship from ``sale_1_dest`` (here ``newobj``) to ``customer_1_dest``. Args: oldobj: SQLAlchemy ORM object to read from newobj: SQLAlchemy ORM object to write to objmap: dictionary mapping "source" objects to their corresponding "destination" object. debug: be verbose skip_table_names: if a related table's name is in this (optional) list, that relationship is skipped
[ "A", "utility", "function", "only", ".", "Used", "in", "copying", "objects", "between", "SQLAlchemy", "sessions", "." ]
python
train
ionelmc/python-redis-lock
src/redis_lock/__init__.py
https://github.com/ionelmc/python-redis-lock/blob/5481cd88b64d86d318e389c79b0575a73464b1f5/src/redis_lock/__init__.py#L129-L140
def _eval_script(redis, script_id, *keys, **kwargs): """Tries to call ``EVALSHA`` with the `hash` and then, if it fails, calls regular ``EVAL`` with the `script`. """ args = kwargs.pop('args', ()) if kwargs: raise TypeError("Unexpected keyword arguments %s" % kwargs.keys()) try: return redis.evalsha(SCRIPTS[script_id], len(keys), *keys + args) except NoScriptError: logger.info("%s not cached.", SCRIPTS[script_id + 2]) return redis.eval(SCRIPTS[script_id + 1], len(keys), *keys + args)
[ "def", "_eval_script", "(", "redis", ",", "script_id", ",", "*", "keys", ",", "*", "*", "kwargs", ")", ":", "args", "=", "kwargs", ".", "pop", "(", "'args'", ",", "(", ")", ")", "if", "kwargs", ":", "raise", "TypeError", "(", "\"Unexpected keyword argu...
Tries to call ``EVALSHA`` with the `hash` and then, if it fails, calls regular ``EVAL`` with the `script`.
[ "Tries", "to", "call", "EVALSHA", "with", "the", "hash", "and", "then", "if", "it", "fails", "calls", "regular", "EVAL", "with", "the", "script", "." ]
python
train
djgagne/hagelslag
hagelslag/util/convert_mrms_grids.py
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/util/convert_mrms_grids.py#L228-L276
def interpolate_to_netcdf(self, in_lon, in_lat, out_path, date_unit="seconds since 1970-01-01T00:00", interp_type="spline"): """ Calls the interpolation function and then saves the MRMS data to a netCDF file. It will also create separate directories for each variable if they are not already available. """ if interp_type == "spline": out_data = self.interpolate_grid(in_lon, in_lat) else: out_data = self.max_neighbor(in_lon, in_lat) if not os.access(out_path + self.variable, os.R_OK): try: os.mkdir(out_path + self.variable) except OSError: print(out_path + self.variable + " already created") out_file = out_path + self.variable + "/" + "{0}_{1}_{2}.nc".format(self.variable, self.start_date.strftime("%Y%m%d-%H:%M"), self.end_date.strftime("%Y%m%d-%H:%M")) out_obj = Dataset(out_file, "w") out_obj.createDimension("time", out_data.shape[0]) out_obj.createDimension("y", out_data.shape[1]) out_obj.createDimension("x", out_data.shape[2]) data_var = out_obj.createVariable(self.variable, "f4", ("time", "y", "x"), zlib=True, fill_value=-9999.0, least_significant_digit=3) data_var[:] = out_data data_var.long_name = self.variable data_var.coordinates = "latitude longitude" if "MESH" in self.variable or "QPE" in self.variable: data_var.units = "mm" elif "Reflectivity" in self.variable: data_var.units = "dBZ" elif "Rotation" in self.variable: data_var.units = "s-1" else: data_var.units = "" out_lon = out_obj.createVariable("longitude", "f4", ("y", "x"), zlib=True) out_lon[:] = in_lon out_lon.units = "degrees_east" out_lat = out_obj.createVariable("latitude", "f4", ("y", "x"), zlib=True) out_lat[:] = in_lat out_lat.units = "degrees_north" dates = out_obj.createVariable("time", "i8", ("time",), zlib=True) dates[:] = np.round(date2num(self.all_dates.to_pydatetime(), date_unit)).astype(np.int64) dates.long_name = "Valid date" dates.units = date_unit out_obj.Conventions="CF-1.6" out_obj.close() return
[ "def", "interpolate_to_netcdf", "(", "self", ",", "in_lon", ",", "in_lat", ",", "out_path", ",", "date_unit", "=", "\"seconds since 1970-01-01T00:00\"", ",", "interp_type", "=", "\"spline\"", ")", ":", "if", "interp_type", "==", "\"spline\"", ":", "out_data", "=",...
Calls the interpolation function and then saves the MRMS data to a netCDF file. It will also create separate directories for each variable if they are not already available.
[ "Calls", "the", "interpolation", "function", "and", "then", "saves", "the", "MRMS", "data", "to", "a", "netCDF", "file", ".", "It", "will", "also", "create", "separate", "directories", "for", "each", "variable", "if", "they", "are", "not", "already", "availa...
python
train
boundlessgeo/gsconfig
src/geoserver/catalog.py
https://github.com/boundlessgeo/gsconfig/blob/532f561f32b91ea8debea0573c503dd20988bf40/src/geoserver/catalog.py#L187-L192
def get_short_version(self): '''obtain the shory geoserver version ''' gs_version = self.get_version() match = re.compile(r'[^\d.]+') return match.sub('', gs_version).strip('.')
[ "def", "get_short_version", "(", "self", ")", ":", "gs_version", "=", "self", ".", "get_version", "(", ")", "match", "=", "re", ".", "compile", "(", "r'[^\\d.]+'", ")", "return", "match", ".", "sub", "(", "''", ",", "gs_version", ")", ".", "strip", "("...
obtain the shory geoserver version
[ "obtain", "the", "shory", "geoserver", "version" ]
python
valid
gtaylor/python-colormath
colormath/chromatic_adaptation.py
https://github.com/gtaylor/python-colormath/blob/1d168613718d2d7d31ec4230524e987ef66823c7/colormath/chromatic_adaptation.py#L101-L119
def apply_chromatic_adaptation_on_color(color, targ_illum, adaptation='bradford'): """ Convenience function to apply an adaptation directly to a Color object. """ xyz_x = color.xyz_x xyz_y = color.xyz_y xyz_z = color.xyz_z orig_illum = color.illuminant targ_illum = targ_illum.lower() observer = color.observer adaptation = adaptation.lower() # Return individual X, Y, and Z coordinates. color.xyz_x, color.xyz_y, color.xyz_z = apply_chromatic_adaptation( xyz_x, xyz_y, xyz_z, orig_illum, targ_illum, observer=observer, adaptation=adaptation) color.set_illuminant(targ_illum) return color
[ "def", "apply_chromatic_adaptation_on_color", "(", "color", ",", "targ_illum", ",", "adaptation", "=", "'bradford'", ")", ":", "xyz_x", "=", "color", ".", "xyz_x", "xyz_y", "=", "color", ".", "xyz_y", "xyz_z", "=", "color", ".", "xyz_z", "orig_illum", "=", "...
Convenience function to apply an adaptation directly to a Color object.
[ "Convenience", "function", "to", "apply", "an", "adaptation", "directly", "to", "a", "Color", "object", "." ]
python
train
hydraplatform/hydra-base
hydra_base/lib/groups.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/groups.py#L43-L54
def add_resourcegroup(group, network_id,**kwargs): """ Add a new group to a network. """ group_i = ResourceGroup() group_i.name = group.name group_i.description = group.description group_i.status = group.status group_i.network_id = network_id db.DBSession.add(group_i) db.DBSession.flush() return group_i
[ "def", "add_resourcegroup", "(", "group", ",", "network_id", ",", "*", "*", "kwargs", ")", ":", "group_i", "=", "ResourceGroup", "(", ")", "group_i", ".", "name", "=", "group", ".", "name", "group_i", ".", "description", "=", "group", ".", "description", ...
Add a new group to a network.
[ "Add", "a", "new", "group", "to", "a", "network", "." ]
python
train
CityOfZion/neo-python
neo/Core/Blockchain.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/Blockchain.py#L99-L128
def GenesisBlock() -> Block: """ Create the GenesisBlock. Returns: BLock: """ prev_hash = UInt256(data=bytearray(32)) timestamp = int(datetime(2016, 7, 15, 15, 8, 21, tzinfo=pytz.utc).timestamp()) index = 0 consensus_data = 2083236893 # Pay tribute To Bitcoin next_consensus = Blockchain.GetConsensusAddress(Blockchain.StandbyValidators()) script = Witness(bytearray(0), bytearray(PUSHT)) mt = MinerTransaction() mt.Nonce = 2083236893 output = TransactionOutput( Blockchain.SystemShare().Hash, Blockchain.SystemShare().Amount, Crypto.ToScriptHash(Contract.CreateMultiSigRedeemScript(int(len(Blockchain.StandbyValidators()) / 2) + 1, Blockchain.StandbyValidators())) ) it = IssueTransaction([], [output], [], [script]) return Block(prev_hash, timestamp, index, consensus_data, next_consensus, script, [mt, Blockchain.SystemShare(), Blockchain.SystemCoin(), it], True)
[ "def", "GenesisBlock", "(", ")", "->", "Block", ":", "prev_hash", "=", "UInt256", "(", "data", "=", "bytearray", "(", "32", ")", ")", "timestamp", "=", "int", "(", "datetime", "(", "2016", ",", "7", ",", "15", ",", "15", ",", "8", ",", "21", ",",...
Create the GenesisBlock. Returns: BLock:
[ "Create", "the", "GenesisBlock", "." ]
python
train
wummel/linkchecker
linkcheck/fileutil.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/fileutil.py#L211-L214
def is_writable_by_others(filename): """Check if file or directory is world writable.""" mode = os.stat(filename)[stat.ST_MODE] return mode & stat.S_IWOTH
[ "def", "is_writable_by_others", "(", "filename", ")", ":", "mode", "=", "os", ".", "stat", "(", "filename", ")", "[", "stat", ".", "ST_MODE", "]", "return", "mode", "&", "stat", ".", "S_IWOTH" ]
Check if file or directory is world writable.
[ "Check", "if", "file", "or", "directory", "is", "world", "writable", "." ]
python
train
python/core-workflow
cherry_picker/cherry_picker/cherry_picker.py
https://github.com/python/core-workflow/blob/b93c76195f6db382cfcefee334380fb4c68d4e21/cherry_picker/cherry_picker/cherry_picker.py#L258-L278
def amend_commit_message(self, cherry_pick_branch): """ prefix the commit message with (X.Y) """ commit_prefix = "" if self.prefix_commit: commit_prefix = f"[{get_base_branch(cherry_pick_branch)}] " updated_commit_message = f"""{commit_prefix}{self.get_commit_message(self.commit_sha1)} (cherry picked from commit {self.commit_sha1}) Co-authored-by: {get_author_info_from_short_sha(self.commit_sha1)}""" if self.dry_run: click.echo(f" dry-run: git commit --amend -m '{updated_commit_message}'") else: cmd = ["git", "commit", "--amend", "-m", updated_commit_message] try: subprocess.check_output(cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as cpe: click.echo("Failed to amend the commit message \u2639") click.echo(cpe.output) return updated_commit_message
[ "def", "amend_commit_message", "(", "self", ",", "cherry_pick_branch", ")", ":", "commit_prefix", "=", "\"\"", "if", "self", ".", "prefix_commit", ":", "commit_prefix", "=", "f\"[{get_base_branch(cherry_pick_branch)}] \"", "updated_commit_message", "=", "f\"\"\"{commit_pref...
prefix the commit message with (X.Y)
[ "prefix", "the", "commit", "message", "with", "(", "X", ".", "Y", ")" ]
python
train
mrallen1/pygett
pygett/request.py
https://github.com/mrallen1/pygett/blob/1e21f8674a3634a901af054226670174b5ce2d87/pygett/request.py#L45-L58
def get(self, endpoint, *args, **kwargs): """ **get** Make a GET call to a remote endpoint Input: * An endpoint relative to the ``base_url`` Output: * A :py:mod:`pygett.request.GettResponse` object """ endpoint = self.base_url + endpoint return self._make_request(endpoint, type='GET')
[ "def", "get", "(", "self", ",", "endpoint", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "endpoint", "=", "self", ".", "base_url", "+", "endpoint", "return", "self", ".", "_make_request", "(", "endpoint", ",", "type", "=", "'GET'", ")" ]
**get** Make a GET call to a remote endpoint Input: * An endpoint relative to the ``base_url`` Output: * A :py:mod:`pygett.request.GettResponse` object
[ "**", "get", "**" ]
python
train
asweigart/pysimplevalidate
src/pysimplevalidate/__init__.py
https://github.com/asweigart/pysimplevalidate/blob/3ca27228abb7355d14bbf8abc225c63366379e44/src/pysimplevalidate/__init__.py#L611-L633
def _validateParamsFor__validateToDateTimeFormat(formats, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None): """Raises PySimpleValidateException if the arguments are invalid. This is called by the validateTime() function to check its arguments. This code was refactored out to a separate function so that the PyInputPlus module (or other modules) could check their parameters' arguments for inputTime(). """ _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) if formats is None: raise PySimpleValidateException('formats parameter must be specified') if isinstance(formats, str): raise PySimpleValidateException('formats argument must be a non-str sequence of strftime format strings') try: len(formats) except: raise PySimpleValidateException('formats argument must be a non-str sequence of strftime format strings') for format in formats: try: time.strftime(format) # This will raise an exception if the format is invalid. except: raise PySimpleValidateException('formats argument contains invalid strftime format strings')
[ "def", "_validateParamsFor__validateToDateTimeFormat", "(", "formats", ",", "blank", "=", "False", ",", "strip", "=", "None", ",", "allowlistRegexes", "=", "None", ",", "blocklistRegexes", "=", "None", ",", "excMsg", "=", "None", ")", ":", "_validateGenericParamet...
Raises PySimpleValidateException if the arguments are invalid. This is called by the validateTime() function to check its arguments. This code was refactored out to a separate function so that the PyInputPlus module (or other modules) could check their parameters' arguments for inputTime().
[ "Raises", "PySimpleValidateException", "if", "the", "arguments", "are", "invalid", ".", "This", "is", "called", "by", "the", "validateTime", "()", "function", "to", "check", "its", "arguments", ".", "This", "code", "was", "refactored", "out", "to", "a", "separ...
python
train
dmlc/gluon-nlp
src/gluonnlp/model/train/embedding.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/train/embedding.py#L120-L131
def hybrid_forward(self, F, words, weight): """Compute embedding of words in batch. Parameters ---------- words : mx.nd.NDArray Array of token indices. """ #pylint: disable=arguments-differ embeddings = F.sparse.dot(words, weight) return embeddings
[ "def", "hybrid_forward", "(", "self", ",", "F", ",", "words", ",", "weight", ")", ":", "#pylint: disable=arguments-differ", "embeddings", "=", "F", ".", "sparse", ".", "dot", "(", "words", ",", "weight", ")", "return", "embeddings" ]
Compute embedding of words in batch. Parameters ---------- words : mx.nd.NDArray Array of token indices.
[ "Compute", "embedding", "of", "words", "in", "batch", "." ]
python
train
sashahart/vex
vex/config.py
https://github.com/sashahart/vex/blob/b7680c40897b8cbe6aae55ec9812b4fb11738192/vex/config.py#L61-L76
def read(self, path, environ): """Read data from file into this vexrc instance. """ try: inp = open(path, 'rb') except FileNotFoundError as error: if error.errno != 2: raise return None parsing = parse_vexrc(inp, environ) for heading, key, value in parsing: heading = self.default_heading if heading is None else heading if heading not in self.headings: self.headings[heading] = OrderedDict() self.headings[heading][key] = value parsing.close()
[ "def", "read", "(", "self", ",", "path", ",", "environ", ")", ":", "try", ":", "inp", "=", "open", "(", "path", ",", "'rb'", ")", "except", "FileNotFoundError", "as", "error", ":", "if", "error", ".", "errno", "!=", "2", ":", "raise", "return", "No...
Read data from file into this vexrc instance.
[ "Read", "data", "from", "file", "into", "this", "vexrc", "instance", "." ]
python
train
NLeSC/scriptcwl
scriptcwl/workflow.py
https://github.com/NLeSC/scriptcwl/blob/33bb847a875379da3a5702c7a98dfa585306b960/scriptcwl/workflow.py#L342-L370
def _get_step(self, name, make_copy=True): """Return step from steps library. Optionally, the step returned is a deep copy from the step in the steps library, so additional information (e.g., about whether the step was scattered) can be stored in the copy. Args: name (str): name of the step in the steps library. make_copy (bool): whether a deep copy of the step should be returned or not (default: True). Returns: Step from steps library. Raises: ValueError: The requested step cannot be found in the steps library. """ self._closed() s = self.steps_library.get_step(name) if s is None: msg = '"{}" not found in steps library. Please check your ' \ 'spelling or load additional steps' raise ValueError(msg.format(name)) if make_copy: s = copy.deepcopy(s) return s
[ "def", "_get_step", "(", "self", ",", "name", ",", "make_copy", "=", "True", ")", ":", "self", ".", "_closed", "(", ")", "s", "=", "self", ".", "steps_library", ".", "get_step", "(", "name", ")", "if", "s", "is", "None", ":", "msg", "=", "'\"{}\" n...
Return step from steps library. Optionally, the step returned is a deep copy from the step in the steps library, so additional information (e.g., about whether the step was scattered) can be stored in the copy. Args: name (str): name of the step in the steps library. make_copy (bool): whether a deep copy of the step should be returned or not (default: True). Returns: Step from steps library. Raises: ValueError: The requested step cannot be found in the steps library.
[ "Return", "step", "from", "steps", "library", "." ]
python
train
DocNow/twarc
twarc/client.py
https://github.com/DocNow/twarc/blob/47dd87d0c00592a4d583412c9d660ba574fc6f26/twarc/client.py#L753-L774
def validate_keys(self): """ Validate the keys provided are authentic credentials. """ url = 'https://api.twitter.com/1.1/account/verify_credentials.json' keys_present = self.consumer_key and self.consumer_secret and \ self.access_token and self.access_token_secret if keys_present: try: # Need to explicitly reconnect to confirm the current creds # are used in the session object. self.connect() self.get(url) except requests.HTTPError as e: if e.response.status_code == 401: raise RuntimeError('Invalid credentials provided.') else: raise e else: raise RuntimeError('Incomplete credentials provided.')
[ "def", "validate_keys", "(", "self", ")", ":", "url", "=", "'https://api.twitter.com/1.1/account/verify_credentials.json'", "keys_present", "=", "self", ".", "consumer_key", "and", "self", ".", "consumer_secret", "and", "self", ".", "access_token", "and", "self", ".",...
Validate the keys provided are authentic credentials.
[ "Validate", "the", "keys", "provided", "are", "authentic", "credentials", "." ]
python
train
dlecocq/nsq-py
nsq/connection.py
https://github.com/dlecocq/nsq-py/blob/3ecacf6ab7719d38031179277113d875554a0c16/nsq/connection.py#L330-L371
def _read(self, limit=1000): '''Return all the responses read''' # It's important to know that it may return no responses or multiple # responses. It depends on how the buffering works out. First, read from # the socket for sock in self.socket(): if sock is None: # Race condition. Connection has been closed. return [] try: packet = sock.recv(4096) except socket.timeout: # If the socket times out, return nothing return [] except socket.error as exc: # Catch (errno, message)-type socket.errors if exc.args[0] in self.WOULD_BLOCK_ERRS: return [] else: raise # Append our newly-read data to our buffer self._buffer += packet responses = [] total = 0 buf = self._buffer remaining = len(buf) while limit and (remaining >= 4): size = struct.unpack('>l', buf[total:(total + 4)])[0] # Now check to see if there's enough left in the buffer to read # the message. if (remaining - 4) >= size: responses.append(Response.from_raw( self, buf[(total + 4):(total + size + 4)])) total += (size + 4) remaining -= (size + 4) limit -= 1 else: break self._buffer = self._buffer[total:] return responses
[ "def", "_read", "(", "self", ",", "limit", "=", "1000", ")", ":", "# It's important to know that it may return no responses or multiple", "# responses. It depends on how the buffering works out. First, read from", "# the socket", "for", "sock", "in", "self", ".", "socket", "(",...
Return all the responses read
[ "Return", "all", "the", "responses", "read" ]
python
train
litl/rauth
rauth/session.py
https://github.com/litl/rauth/blob/a6d887d7737cf21ec896a8104f25c2754c694011/rauth/session.py#L470-L515
def sign(url, app_id, app_secret, hash_meth='sha1', **params): ''' A signature method which generates the necessary Ofly parameters. :param app_id: The oFlyAppId, i.e. "application ID". :type app_id: str :param app_secret: The oFlyAppSecret, i.e. "shared secret". :type app_secret: str :param hash_meth: The hash method to use for signing, defaults to "sha1". :type hash_meth: str :param \*\*params: Additional parameters. :type \*\*\params: dict ''' hash_meth_str = hash_meth if hash_meth == 'sha1': hash_meth = sha1 elif hash_meth == 'md5': hash_meth = md5 else: raise TypeError('hash_meth must be one of "sha1", "md5"') now = datetime.utcnow() milliseconds = now.microsecond // 1000 time_format = '%Y-%m-%dT%H:%M:%S.{0}Z'.format(milliseconds) ofly_params = {'oflyAppId': app_id, 'oflyHashMeth': hash_meth_str.upper(), 'oflyTimestamp': now.strftime(time_format)} url_path = urlsplit(url).path signature_base_string = app_secret + url_path + '?' if len(params): signature_base_string += get_sorted_params(params) + '&' signature_base_string += get_sorted_params(ofly_params) if not isinstance(signature_base_string, bytes): signature_base_string = signature_base_string.encode('utf-8') ofly_params['oflyApiSig'] = \ hash_meth(signature_base_string).hexdigest() all_params = dict(tuple(ofly_params.items()) + tuple(params.items())) return get_sorted_params(all_params)
[ "def", "sign", "(", "url", ",", "app_id", ",", "app_secret", ",", "hash_meth", "=", "'sha1'", ",", "*", "*", "params", ")", ":", "hash_meth_str", "=", "hash_meth", "if", "hash_meth", "==", "'sha1'", ":", "hash_meth", "=", "sha1", "elif", "hash_meth", "==...
A signature method which generates the necessary Ofly parameters. :param app_id: The oFlyAppId, i.e. "application ID". :type app_id: str :param app_secret: The oFlyAppSecret, i.e. "shared secret". :type app_secret: str :param hash_meth: The hash method to use for signing, defaults to "sha1". :type hash_meth: str :param \*\*params: Additional parameters. :type \*\*\params: dict
[ "A", "signature", "method", "which", "generates", "the", "necessary", "Ofly", "parameters", "." ]
python
train
camptocamp/Studio
studio/lib/buildjs/jsmin.py
https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/buildjs/jsmin.py#L107-L130
def _next(self): """get the next character, excluding comments. peek() is used to see if a '/' is followed by a '/' or '*'. """ c = self._get() if c == '/': p = self._peek() if p == '/': c = self._get() while c > '\n': c = self._get() return c if p == '*': c = self._get() while 1: c = self._get() if c == '*': if self._peek() == '/': self._get() return ' ' if c == '\000': raise UnterminatedComment() return c
[ "def", "_next", "(", "self", ")", ":", "c", "=", "self", ".", "_get", "(", ")", "if", "c", "==", "'/'", ":", "p", "=", "self", ".", "_peek", "(", ")", "if", "p", "==", "'/'", ":", "c", "=", "self", ".", "_get", "(", ")", "while", "c", ">"...
get the next character, excluding comments. peek() is used to see if a '/' is followed by a '/' or '*'.
[ "get", "the", "next", "character", "excluding", "comments", ".", "peek", "()", "is", "used", "to", "see", "if", "a", "/", "is", "followed", "by", "a", "/", "or", "*", "." ]
python
train
PythonCharmers/python-future
src/libfuturize/fixer_util.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/libfuturize/fixer_util.py#L75-L94
def indentation(node): """ Returns the indentation for this node Iff a node is in a suite, then it has indentation. """ while node.parent is not None and node.parent.type != syms.suite: node = node.parent if node.parent is None: return u"" # The first three children of a suite are NEWLINE, INDENT, (some other node) # INDENT.value contains the indentation for this suite # anything after (some other node) has the indentation as its prefix. if node.type == token.INDENT: return node.value elif node.prev_sibling is not None and node.prev_sibling.type == token.INDENT: return node.prev_sibling.value elif node.prev_sibling is None: return u"" else: return node.prefix
[ "def", "indentation", "(", "node", ")", ":", "while", "node", ".", "parent", "is", "not", "None", "and", "node", ".", "parent", ".", "type", "!=", "syms", ".", "suite", ":", "node", "=", "node", ".", "parent", "if", "node", ".", "parent", "is", "No...
Returns the indentation for this node Iff a node is in a suite, then it has indentation.
[ "Returns", "the", "indentation", "for", "this", "node", "Iff", "a", "node", "is", "in", "a", "suite", "then", "it", "has", "indentation", "." ]
python
train
Skype4Py/Skype4Py
Skype4Py/chat.py
https://github.com/Skype4Py/Skype4Py/blob/c48d83f7034109fe46315d45a066126002c6e0d4/Skype4Py/chat.py#L119-L130
def SetPassword(self, Password, Hint=''): """Sets the chat password. :Parameters: Password : unicode Password Hint : unicode Password hint """ if ' ' in Password: raise ValueError('Password mut be one word') self._Alter('SETPASSWORD', '%s %s' % (tounicode(Password), tounicode(Hint)))
[ "def", "SetPassword", "(", "self", ",", "Password", ",", "Hint", "=", "''", ")", ":", "if", "' '", "in", "Password", ":", "raise", "ValueError", "(", "'Password mut be one word'", ")", "self", ".", "_Alter", "(", "'SETPASSWORD'", ",", "'%s %s'", "%", "(", ...
Sets the chat password. :Parameters: Password : unicode Password Hint : unicode Password hint
[ "Sets", "the", "chat", "password", "." ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/rst/rs3/rs3tree.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/rst/rs3/rs3tree.py#L111-L134
def dt(self, start_node=None): """main method to create an RSTTree from the output of get_rs3_data(). TODO: add proper documentation """ if start_node is None: return self.root2tree(start_node=start_node) elem_id = start_node if elem_id not in self.elem_dict: return [] elem = self.elem_dict[elem_id] elem_type = elem['element_type'] assert elem_type in ('segment', 'group') if elem_type == 'segment': return self.segment2tree( elem_id, elem, elem_type, start_node=start_node) else: return self.group2tree( elem_id, elem, elem_type, start_node=start_node)
[ "def", "dt", "(", "self", ",", "start_node", "=", "None", ")", ":", "if", "start_node", "is", "None", ":", "return", "self", ".", "root2tree", "(", "start_node", "=", "start_node", ")", "elem_id", "=", "start_node", "if", "elem_id", "not", "in", "self", ...
main method to create an RSTTree from the output of get_rs3_data(). TODO: add proper documentation
[ "main", "method", "to", "create", "an", "RSTTree", "from", "the", "output", "of", "get_rs3_data", "()", "." ]
python
train
eventbrite/eventbrite-sdk-python
eventbrite/access_methods.py
https://github.com/eventbrite/eventbrite-sdk-python/blob/f2e5dc5aa1aa3e45766de13f16fd65722163d91a/eventbrite/access_methods.py#L373-L379
def get_event_teams(self, id, **data): """ GET /events/:id/teams/ Returns a list of :format:`teams` for the event. """ return self.get("/events/{0}/teams/".format(id), data=data)
[ "def", "get_event_teams", "(", "self", ",", "id", ",", "*", "*", "data", ")", ":", "return", "self", ".", "get", "(", "\"/events/{0}/teams/\"", ".", "format", "(", "id", ")", ",", "data", "=", "data", ")" ]
GET /events/:id/teams/ Returns a list of :format:`teams` for the event.
[ "GET", "/", "events", "/", ":", "id", "/", "teams", "/", "Returns", "a", "list", "of", ":", "format", ":", "teams", "for", "the", "event", "." ]
python
train
huge-success/sanic
sanic/blueprints.py
https://github.com/huge-success/sanic/blob/6a4a3f617fdbe1d3ee8bdc9d1b12ad2d0b34acdd/sanic/blueprints.py#L166-L207
def route( self, uri, methods=frozenset({"GET"}), host=None, strict_slashes=None, stream=False, version=None, name=None, ): """Create a blueprint route from a decorated function. :param uri: endpoint at which the route will be accessible. :param methods: list of acceptable HTTP methods. :param host: IP Address of FQDN for the sanic server to use. :param strict_slashes: Enforce the API urls are requested with a training */* :param stream: If the route should provide a streaming support :param version: Blueprint Version :param name: Unique name to identify the Route :return a decorated method that when invoked will return an object of type :class:`FutureRoute` """ if strict_slashes is None: strict_slashes = self.strict_slashes def decorator(handler): route = FutureRoute( handler, uri, methods, host, strict_slashes, stream, version, name, ) self.routes.append(route) return handler return decorator
[ "def", "route", "(", "self", ",", "uri", ",", "methods", "=", "frozenset", "(", "{", "\"GET\"", "}", ")", ",", "host", "=", "None", ",", "strict_slashes", "=", "None", ",", "stream", "=", "False", ",", "version", "=", "None", ",", "name", "=", "Non...
Create a blueprint route from a decorated function. :param uri: endpoint at which the route will be accessible. :param methods: list of acceptable HTTP methods. :param host: IP Address of FQDN for the sanic server to use. :param strict_slashes: Enforce the API urls are requested with a training */* :param stream: If the route should provide a streaming support :param version: Blueprint Version :param name: Unique name to identify the Route :return a decorated method that when invoked will return an object of type :class:`FutureRoute`
[ "Create", "a", "blueprint", "route", "from", "a", "decorated", "function", "." ]
python
train
icometrix/dicom2nifti
dicom2nifti/convert_ge.py
https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_ge.py#L91-L136
def _4d_to_nifti(grouped_dicoms, output_file): """ This function will convert ge 4d series to a nifti """ # Create mosaic block logger.info('Creating data block') full_block = _get_full_block(grouped_dicoms) logger.info('Creating affine') # Create the nifti header info affine, slice_increment = common.create_affine(grouped_dicoms[0]) logger.info('Creating nifti') # Convert to nifti nii_image = nibabel.Nifti1Image(full_block, affine) common.set_tr_te(nii_image, float(grouped_dicoms[0][0].RepetitionTime), float(grouped_dicoms[0][0].EchoTime)) logger.info('Saving nifti to disk %s' % output_file) # Save to disk if output_file is not None: nii_image.to_filename(output_file) if _is_diffusion_imaging(grouped_dicoms): bval_file = None bvec_file = None # Create the bval en bevec files if output_file is not None: base_path = os.path.dirname(output_file) base_name = os.path.splitext(os.path.splitext(os.path.basename(output_file))[0])[0] logger.info('Creating bval en bvec files') bval_file = '%s/%s.bval' % (base_path, base_name) bvec_file = '%s/%s.bvec' % (base_path, base_name) bval, bvec = _create_bvals_bvecs(grouped_dicoms, bval_file, bvec_file) return {'NII_FILE': output_file, 'BVAL_FILE': bval_file, 'BVEC_FILE': bvec_file, 'NII': nii_image, 'BVAL': bval, 'BVEC': bvec, 'MAX_SLICE_INCREMENT': slice_increment } return {'NII_FILE': output_file, 'NII': nii_image}
[ "def", "_4d_to_nifti", "(", "grouped_dicoms", ",", "output_file", ")", ":", "# Create mosaic block", "logger", ".", "info", "(", "'Creating data block'", ")", "full_block", "=", "_get_full_block", "(", "grouped_dicoms", ")", "logger", ".", "info", "(", "'Creating af...
This function will convert ge 4d series to a nifti
[ "This", "function", "will", "convert", "ge", "4d", "series", "to", "a", "nifti" ]
python
train
juju/python-libjuju
juju/client/_client2.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/client/_client2.py#L5174-L5187
async def Save(self, metadata): ''' metadata : typing.Sequence[~CloudImageMetadataList] Returns -> typing.Sequence[~ErrorResult] ''' # map input types to rpc msg _params = dict() msg = dict(type='ImageMetadata', request='Save', version=2, params=_params) _params['metadata'] = metadata reply = await self.rpc(msg) return reply
[ "async", "def", "Save", "(", "self", ",", "metadata", ")", ":", "# map input types to rpc msg", "_params", "=", "dict", "(", ")", "msg", "=", "dict", "(", "type", "=", "'ImageMetadata'", ",", "request", "=", "'Save'", ",", "version", "=", "2", ",", "para...
metadata : typing.Sequence[~CloudImageMetadataList] Returns -> typing.Sequence[~ErrorResult]
[ "metadata", ":", "typing", ".", "Sequence", "[", "~CloudImageMetadataList", "]", "Returns", "-", ">", "typing", ".", "Sequence", "[", "~ErrorResult", "]" ]
python
train
awslabs/sockeye
sockeye/utils.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/utils.py#L608-L701
def acquire_gpus(requested_device_ids: List[int], lock_dir: str = "/tmp", retry_wait_min: int = 10, retry_wait_rand: int = 60, num_gpus_available: Optional[int] = None): """ Acquire a number of GPUs in a transactional way. This method should be used inside a `with` statement. Will try to acquire all the requested number of GPUs. If currently not enough GPUs are available all locks will be released and we wait until we retry. Will retry until enough GPUs become available. :param requested_device_ids: The requested device ids, each number is either negative indicating the number of GPUs that will be allocated, or positive indicating we want to acquire a specific device id. :param lock_dir: The directory for storing the lock file. :param retry_wait_min: The minimum number of seconds to wait between retries. :param retry_wait_rand: Randomly add between 0 and `retry_wait_rand` seconds to the wait time. :param num_gpus_available: The number of GPUs available, if None we will call get_num_gpus(). :return: yields a list of GPU ids. """ if num_gpus_available is None: num_gpus_available = get_num_gpus() if num_gpus_available == 0: raise RuntimeError("Can not acquire GPU, as no GPUs were found on this machine.") if not os.path.exists(lock_dir): raise IOError("Lock directory %s does not exist." % lock_dir) if not os.access(lock_dir, os.W_OK): raise IOError("Lock directory %s is not writeable." % lock_dir) # split the device ids into the specific ids requested and count up the number of arbitrary ids we want # e.g. device_ids = [-3, 2, 5, 7, -5] means we want to acquire device 2, 5 and 7 plus 8 other devices. specific_device_ids = set() # type: Set[int] num_arbitrary_device_ids = 0 for device_id in requested_device_ids: if device_id < 0: num_gpus = -device_id num_arbitrary_device_ids += num_gpus else: if device_id in specific_device_ids: raise ValueError("Requested GPU %d twice." % device_id) specific_device_ids.add(device_id) # make sure we have enough GPUs available num_gpus_requested = len(specific_device_ids) + num_arbitrary_device_ids if num_gpus_requested > num_gpus_available: raise ValueError("Requested %d GPUs, but only %d are available." % (num_gpus_requested, num_gpus_available)) logger.info("Attempting to acquire %d GPUs of %d GPUs. The requested devices are: %s", num_gpus_requested, num_gpus_available, str(requested_device_ids)) # note: it's important to first allocate the specific device ids and then the others to not deadlock ourselves. # for specific device ids we just have the device id itself as a candidate candidates_to_request = [[device_id] for device_id in specific_device_ids] # for the arbitrary device ids we take all remaining device ids as a list of candidates remaining_device_ids = [device_id for device_id in range(num_gpus_available) if device_id not in specific_device_ids] candidates_to_request += [remaining_device_ids for _ in range(num_arbitrary_device_ids)] while True: with ExitStack() as exit_stack: any_failed = False acquired_gpus = [] # type: List[int] with GpuFileLock(candidates=["master_lock"], lock_dir=lock_dir) as master_lock: # type: str # Only one process, determined by the master lock, can try acquiring gpu locks at a time. # This will make sure that we use consecutive device ids whenever possible. if master_lock is not None: for candidates in candidates_to_request: gpu_id = exit_stack.enter_context(GpuFileLock(candidates=candidates, lock_dir=lock_dir)) if gpu_id is not None: acquired_gpus.append(cast(int, gpu_id)) else: if len(candidates) == 1: logger.info("Could not acquire GPU %d. It's currently locked.", candidates[0]) any_failed = True break if master_lock is not None and not any_failed: try: yield acquired_gpus except: # pylint: disable=try-except-raise raise return # randomize so that multiple processes starting at the same time don't retry at a similar point in time if retry_wait_rand > 0: retry_wait_actual = retry_wait_min + random.randint(0, retry_wait_rand) else: retry_wait_actual = retry_wait_min if master_lock is None: logger.info("Another process is acquiring GPUs at the moment will try again in %ss." % retry_wait_actual) else: logger.info("Not enough GPUs available will try again in %ss." % retry_wait_actual) time.sleep(retry_wait_actual)
[ "def", "acquire_gpus", "(", "requested_device_ids", ":", "List", "[", "int", "]", ",", "lock_dir", ":", "str", "=", "\"/tmp\"", ",", "retry_wait_min", ":", "int", "=", "10", ",", "retry_wait_rand", ":", "int", "=", "60", ",", "num_gpus_available", ":", "Op...
Acquire a number of GPUs in a transactional way. This method should be used inside a `with` statement. Will try to acquire all the requested number of GPUs. If currently not enough GPUs are available all locks will be released and we wait until we retry. Will retry until enough GPUs become available. :param requested_device_ids: The requested device ids, each number is either negative indicating the number of GPUs that will be allocated, or positive indicating we want to acquire a specific device id. :param lock_dir: The directory for storing the lock file. :param retry_wait_min: The minimum number of seconds to wait between retries. :param retry_wait_rand: Randomly add between 0 and `retry_wait_rand` seconds to the wait time. :param num_gpus_available: The number of GPUs available, if None we will call get_num_gpus(). :return: yields a list of GPU ids.
[ "Acquire", "a", "number", "of", "GPUs", "in", "a", "transactional", "way", ".", "This", "method", "should", "be", "used", "inside", "a", "with", "statement", ".", "Will", "try", "to", "acquire", "all", "the", "requested", "number", "of", "GPUs", ".", "If...
python
train
IdentityPython/pyop
src/pyop/provider.py
https://github.com/IdentityPython/pyop/blob/7b1385964f079c39752fce5f2dbcf458b8a92e56/src/pyop/provider.py#L202-L216
def _create_subject_identifier(self, user_id, client_id, redirect_uri): # type (str, str, str) -> str """ Creates a subject identifier for the specified client and user see <a href="http://openid.net/specs/openid-connect-core-1_0.html#Terminology"> "OpenID Connect Core 1.0", Section 1.2</a>. :param user_id: local user identifier :param client_id: which client to generate a subject identifier for :param redirect_uri: the clients' redirect_uri :return: a subject identifier for the user intended for client who made the authentication request """ supported_subject_types = self.configuration_information['subject_types_supported'][0] subject_type = self.clients[client_id].get('subject_type', supported_subject_types) sector_identifier = urlparse(redirect_uri).netloc return self.authz_state.get_subject_identifier(subject_type, user_id, sector_identifier)
[ "def", "_create_subject_identifier", "(", "self", ",", "user_id", ",", "client_id", ",", "redirect_uri", ")", ":", "# type (str, str, str) -> str", "supported_subject_types", "=", "self", ".", "configuration_information", "[", "'subject_types_supported'", "]", "[", "0", ...
Creates a subject identifier for the specified client and user see <a href="http://openid.net/specs/openid-connect-core-1_0.html#Terminology"> "OpenID Connect Core 1.0", Section 1.2</a>. :param user_id: local user identifier :param client_id: which client to generate a subject identifier for :param redirect_uri: the clients' redirect_uri :return: a subject identifier for the user intended for client who made the authentication request
[ "Creates", "a", "subject", "identifier", "for", "the", "specified", "client", "and", "user", "see", "<a", "href", "=", "http", ":", "//", "openid", ".", "net", "/", "specs", "/", "openid", "-", "connect", "-", "core", "-", "1_0", ".", "html#Terminology",...
python
train
PiotrDabkowski/Js2Py
js2py/base.py
https://github.com/PiotrDabkowski/Js2Py/blob/c0fa43f5679cf91ca8986c5747fcb07a433dc584/js2py/base.py#L1014-L1025
def PyJsStrictEq(a, b): '''a===b''' tx, ty = Type(a), Type(b) if tx != ty: return false if tx == 'Undefined' or tx == 'Null': return true if a.is_primitive(): #string bool and number case return Js(a.value == b.value) if a.Class == b.Class == 'PyObjectWrapper': return Js(a.obj == b.obj) return Js(a is b)
[ "def", "PyJsStrictEq", "(", "a", ",", "b", ")", ":", "tx", ",", "ty", "=", "Type", "(", "a", ")", ",", "Type", "(", "b", ")", "if", "tx", "!=", "ty", ":", "return", "false", "if", "tx", "==", "'Undefined'", "or", "tx", "==", "'Null'", ":", "r...
a===b
[ "a", "===", "b" ]
python
valid
globality-corp/microcosm-flask
microcosm_flask/paging.py
https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/paging.py#L198-L211
def parse_result(cls, result): """ Parse a simple items result. May either be two item tuple containing items and a context dictionary (see: relation convention) or a list of items. """ if isinstance(result, tuple) == 2: items, context = result else: context = {} items = result return items, context
[ "def", "parse_result", "(", "cls", ",", "result", ")", ":", "if", "isinstance", "(", "result", ",", "tuple", ")", "==", "2", ":", "items", ",", "context", "=", "result", "else", ":", "context", "=", "{", "}", "items", "=", "result", "return", "items"...
Parse a simple items result. May either be two item tuple containing items and a context dictionary (see: relation convention) or a list of items.
[ "Parse", "a", "simple", "items", "result", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xganttwidget/xganttwidgetitem.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xganttwidget/xganttwidgetitem.py#L503-L508
def sync(self, recursive=False): """ Syncs the information from this item to the tree and view. """ self.syncTree(recursive=recursive) self.syncView(recursive=recursive)
[ "def", "sync", "(", "self", ",", "recursive", "=", "False", ")", ":", "self", ".", "syncTree", "(", "recursive", "=", "recursive", ")", "self", ".", "syncView", "(", "recursive", "=", "recursive", ")" ]
Syncs the information from this item to the tree and view.
[ "Syncs", "the", "information", "from", "this", "item", "to", "the", "tree", "and", "view", "." ]
python
train
Genida/dependenpy
src/dependenpy/node.py
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/node.py#L251-L260
def print_treemap(self, format=None, output=sys.stdout, **kwargs): """ Print the matrix for self's nodes. Args: format (str): output format (csv, json or text). output (file): file descriptor on which to write. """ treemap = self.as_treemap() treemap.print(format=format, output=output, **kwargs)
[ "def", "print_treemap", "(", "self", ",", "format", "=", "None", ",", "output", "=", "sys", ".", "stdout", ",", "*", "*", "kwargs", ")", ":", "treemap", "=", "self", ".", "as_treemap", "(", ")", "treemap", ".", "print", "(", "format", "=", "format", ...
Print the matrix for self's nodes. Args: format (str): output format (csv, json or text). output (file): file descriptor on which to write.
[ "Print", "the", "matrix", "for", "self", "s", "nodes", "." ]
python
train
ethereum/lahja
lahja/endpoint.py
https://github.com/ethereum/lahja/blob/e3993c5892232887a11800ed3e66332febcee96b/lahja/endpoint.py#L348-L357
def stop(self) -> None: """ Stop the :class:`~lahja.endpoint.Endpoint` from receiving further events. """ if not self._running: return self._running = False self._receiving_queue.put_nowait((TRANSPARENT_EVENT, None)) self._internal_queue.put_nowait((TRANSPARENT_EVENT, None))
[ "def", "stop", "(", "self", ")", "->", "None", ":", "if", "not", "self", ".", "_running", ":", "return", "self", ".", "_running", "=", "False", "self", ".", "_receiving_queue", ".", "put_nowait", "(", "(", "TRANSPARENT_EVENT", ",", "None", ")", ")", "s...
Stop the :class:`~lahja.endpoint.Endpoint` from receiving further events.
[ "Stop", "the", ":", "class", ":", "~lahja", ".", "endpoint", ".", "Endpoint", "from", "receiving", "further", "events", "." ]
python
train
codelv/enaml-native
src/enamlnative/android/android_grid_layout.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_grid_layout.py#L48-L53
def create_widget(self): """ Create the underlying widget. """ d = self.declaration self.widget = GridLayout(self.get_context(), None, d.style)
[ "def", "create_widget", "(", "self", ")", ":", "d", "=", "self", ".", "declaration", "self", ".", "widget", "=", "GridLayout", "(", "self", ".", "get_context", "(", ")", ",", "None", ",", "d", ".", "style", ")" ]
Create the underlying widget.
[ "Create", "the", "underlying", "widget", "." ]
python
train
kmpm/nodemcu-uploader
nodemcu_uploader/main.py
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/main.py#L20-L45
def destination_from_source(sources, use_glob=True): """ Split each of the sources in the array on ':' First part will be source, second will be destination. Modifies the the original array to contain only sources and returns an array of destinations. """ destinations = [] newsources = [] for i in range(0, len(sources)): srcdst = sources[i].split(':') if len(srcdst) == 2: destinations.append(srcdst[1]) newsources.append(srcdst[0]) #proper list assignment else: if use_glob: listing = glob.glob(srcdst[0]) for filename in listing: newsources.append(filename) #always use forward slash at destination destinations.append(filename.replace('\\', '/')) else: newsources.append(srcdst[0]) destinations.append(srcdst[0]) return [newsources, destinations]
[ "def", "destination_from_source", "(", "sources", ",", "use_glob", "=", "True", ")", ":", "destinations", "=", "[", "]", "newsources", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "sources", ")", ")", ":", "srcdst", "=", "sour...
Split each of the sources in the array on ':' First part will be source, second will be destination. Modifies the the original array to contain only sources and returns an array of destinations.
[ "Split", "each", "of", "the", "sources", "in", "the", "array", "on", ":", "First", "part", "will", "be", "source", "second", "will", "be", "destination", ".", "Modifies", "the", "the", "original", "array", "to", "contain", "only", "sources", "and", "return...
python
valid
nwilming/ocupy
ocupy/datamat.py
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L560-L584
def VectorFactory(fields, parameters, categories = None): ''' Creates a datamat from a dictionary that contains lists/arrays as values. Input: fields: Dictionary The values will be used as fields of the datamat and the keys as field names. parameters: Dictionary A dictionary whose values are added as parameters. Keys are used for parameter names. ''' fm = Datamat(categories = categories) fm._fields = list(fields.keys()) for (field, value) in list(fields.items()): try: fm.__dict__[field] = np.asarray(value) except ValueError: fm.__dict__[field] = np.asarray(value, dtype=np.object) fm._parameters = parameters for (field, value) in list(parameters.items()): fm.__dict__[field] = value fm._num_fix = len(fm.__dict__[list(fields.keys())[0]]) return fm
[ "def", "VectorFactory", "(", "fields", ",", "parameters", ",", "categories", "=", "None", ")", ":", "fm", "=", "Datamat", "(", "categories", "=", "categories", ")", "fm", ".", "_fields", "=", "list", "(", "fields", ".", "keys", "(", ")", ")", "for", ...
Creates a datamat from a dictionary that contains lists/arrays as values. Input: fields: Dictionary The values will be used as fields of the datamat and the keys as field names. parameters: Dictionary A dictionary whose values are added as parameters. Keys are used for parameter names.
[ "Creates", "a", "datamat", "from", "a", "dictionary", "that", "contains", "lists", "/", "arrays", "as", "values", "." ]
python
train
Loudr/pale
pale/adapters/flask.py
https://github.com/Loudr/pale/blob/dc002ee6032c856551143af222ff8f71ed9853fe/pale/adapters/flask.py#L29-L58
def bind_blueprint(pale_api_module, flask_blueprint): """Binds an implemented pale API module to a Flask Blueprint.""" if not isinstance(flask_blueprint, Blueprint): raise TypeError(("pale.flask_adapter.bind_blueprint expected the " "passed in flask_blueprint to be an instance of " "Blueprint, but it was an instance of %s instead.") % (type(flask_blueprint),)) if not pale.is_pale_module(pale_api_module): raise TypeError(("pale.flask_adapter.bind_blueprint expected the " "passed in pale_api_module to be a module, and to " "have a _module_type defined to equal " "pale.ImplementationModule, but it was an instance of " "%s instead.") % (type(pale_api_module),)) endpoints = pale.extract_endpoints(pale_api_module) for endpoint in endpoints: endpoint._set_response_class(RESPONSE_CLASS) method = [endpoint._http_method] name = endpoint._route_name handler = endpoint._execute flask_blueprint.add_url_rule( endpoint._uri, name, view_func=ContextualizedHandler(handler), methods=method)
[ "def", "bind_blueprint", "(", "pale_api_module", ",", "flask_blueprint", ")", ":", "if", "not", "isinstance", "(", "flask_blueprint", ",", "Blueprint", ")", ":", "raise", "TypeError", "(", "(", "\"pale.flask_adapter.bind_blueprint expected the \"", "\"passed in flask_blue...
Binds an implemented pale API module to a Flask Blueprint.
[ "Binds", "an", "implemented", "pale", "API", "module", "to", "a", "Flask", "Blueprint", "." ]
python
train
spacetelescope/stsci.tools
lib/stsci/tools/iterfile.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/iterfile.py#L35-L42
def _shape(self): """ Returns the shape of the data array associated with this file.""" hdu = self.open() _shape = hdu.shape if not self.inmemory: self.close() del hdu return _shape
[ "def", "_shape", "(", "self", ")", ":", "hdu", "=", "self", ".", "open", "(", ")", "_shape", "=", "hdu", ".", "shape", "if", "not", "self", ".", "inmemory", ":", "self", ".", "close", "(", ")", "del", "hdu", "return", "_shape" ]
Returns the shape of the data array associated with this file.
[ "Returns", "the", "shape", "of", "the", "data", "array", "associated", "with", "this", "file", "." ]
python
train
LuqueDaniel/pybooru
pybooru/api_danbooru.py
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L393-L402
def comment_vote(self, comment_id, score): """Lets you vote for a comment (Requires login). Parameters: comment_id (int): score (str): Can be: up, down. """ params = {'score': score} return self._get('comments/{0}/votes.json'.format(comment_id), params, method='POST', auth=True)
[ "def", "comment_vote", "(", "self", ",", "comment_id", ",", "score", ")", ":", "params", "=", "{", "'score'", ":", "score", "}", "return", "self", ".", "_get", "(", "'comments/{0}/votes.json'", ".", "format", "(", "comment_id", ")", ",", "params", ",", "...
Lets you vote for a comment (Requires login). Parameters: comment_id (int): score (str): Can be: up, down.
[ "Lets", "you", "vote", "for", "a", "comment", "(", "Requires", "login", ")", "." ]
python
train
phodge/homely
homely/_utils.py
https://github.com/phodge/homely/blob/98ddcf3e4f29b0749645817b4866baaea8376085/homely/_utils.py#L615-L648
def getstatus(): """Get the status of the previous 'homely update', or any 'homely update' that may be running in another process. """ if exists(RUNFILE): mtime = os.stat(RUNFILE).st_mtime with open(SECTIONFILE) as f: section = f.read().strip() # what section? return UpdateStatus.RUNNING, mtime, section if exists(PAUSEFILE): return UpdateStatus.PAUSED, None, None mtime = None if exists(TIMEFILE): mtime = os.stat(TIMEFILE).st_mtime if exists(FAILFILE): if not mtime: mtime = os.stat(FAILFILE).st_mtime # TODO: return a different error code when the error was inability to # contact one or more remote servers with open(FAILFILE) as f: content = f.read().strip() if content == UpdateStatus.NOCONN: return UpdateStatus.NOCONN, mtime, None elif content == UpdateStatus.DIRTY: return UpdateStatus.DIRTY, mtime, None return UpdateStatus.FAILED, mtime, None if mtime is None: return UpdateStatus.NEVER, None, None return UpdateStatus.OK, mtime, None
[ "def", "getstatus", "(", ")", ":", "if", "exists", "(", "RUNFILE", ")", ":", "mtime", "=", "os", ".", "stat", "(", "RUNFILE", ")", ".", "st_mtime", "with", "open", "(", "SECTIONFILE", ")", "as", "f", ":", "section", "=", "f", ".", "read", "(", ")...
Get the status of the previous 'homely update', or any 'homely update' that may be running in another process.
[ "Get", "the", "status", "of", "the", "previous", "homely", "update", "or", "any", "homely", "update", "that", "may", "be", "running", "in", "another", "process", "." ]
python
train
Yubico/python-pyhsm
pyhsm/util.py
https://github.com/Yubico/python-pyhsm/blob/b6e2744d1ea15c352a0fc1d6ebc5950026b71311/pyhsm/util.py#L75-L86
def input_validate_nonce(nonce, name='nonce', pad = False): """ Input validation for nonces. """ if type(nonce) is not str: raise pyhsm.exception.YHSM_WrongInputType( \ name, str, type(nonce)) if len(nonce) > pyhsm.defines.YSM_AEAD_NONCE_SIZE: raise pyhsm.exception.YHSM_InputTooLong( name, pyhsm.defines.YSM_AEAD_NONCE_SIZE, len(nonce)) if pad: return nonce.ljust(pyhsm.defines.YSM_AEAD_NONCE_SIZE, chr(0x0)) else: return nonce
[ "def", "input_validate_nonce", "(", "nonce", ",", "name", "=", "'nonce'", ",", "pad", "=", "False", ")", ":", "if", "type", "(", "nonce", ")", "is", "not", "str", ":", "raise", "pyhsm", ".", "exception", ".", "YHSM_WrongInputType", "(", "name", ",", "s...
Input validation for nonces.
[ "Input", "validation", "for", "nonces", "." ]
python
train
DarkEnergySurvey/ugali
ugali/scratch/position_angle.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/scratch/position_angle.py#L63-L76
def plot_skyreg(header, data, **kwargs): """ Plot sky region defined by header and data header : FITS header data : Data array """ kwargs.setdefault('cmap','binary') fig = plt.figure() ax = pywcsgrid2.subplot(111, header=header) ax.set_ticklabel_type("dms") im = ax.imshow(data, origin="center", **kwargs) ax.grid() ax.add_compass(loc=1,coord='fk5') ax.add_compass(loc=4,coord='gal') return ax, im
[ "def", "plot_skyreg", "(", "header", ",", "data", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'cmap'", ",", "'binary'", ")", "fig", "=", "plt", ".", "figure", "(", ")", "ax", "=", "pywcsgrid2", ".", "subplot", "(", "111", ...
Plot sky region defined by header and data header : FITS header data : Data array
[ "Plot", "sky", "region", "defined", "by", "header", "and", "data", "header", ":", "FITS", "header", "data", ":", "Data", "array" ]
python
train