repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
sassoftware/saspy
saspy/sasproccommons.py
https://github.com/sassoftware/saspy/blob/e433f71990f249d3a6c3db323ceb11cb2d462cf9/saspy/sasproccommons.py#L206-L304
def _makeProcCallMacro(self, objtype: str, objname: str, data: ['SASdata', str] = None, args: dict = None) -> str: """ This method generates the SAS code from the python objects and included data and arguments. The list of args in this method is largely alphabetical but there are exceptions in order to satisfy the order needs of the statements for the procedure. as an example... http://support.sas.com/documentation/cdl/en/statug/68162/HTML/default/viewer.htm#statug_glm_syntax.htm#statug.glm.glmpostable :param objtype: str -- proc name :param objname: str -- 3 digit code for proc :param data: sas dataset object :param args: dict -- proc arguments :return: str -- the SAS code needed to execute on the server """ plot = '' outmeth = '' procopts = args.pop('procopts', '') # Set ODS graphic generation to True by default ODSGraphics = args.get('ODSGraphics', True) # The different SAS products vary slightly in plotting and out methods. # this block sets the options correctly for plotting and output statements if self.sasproduct.lower() == 'stat' and not ('ODSGraphics' in args.keys() or ODSGraphics == False): plot = 'plot=all' if self.sasproduct.lower() == 'qc': pass if self.sasproduct.lower() == 'ets' and not ('ODSGraphics' in args.keys() or ODSGraphics == False): outmeth = 'out' plot = 'plot=all' if self.sasproduct.lower() == 'em': pass if self.sasproduct.lower() == 'vddml': outmeth = 'out' if self.sasproduct.lower() == 'util': outmeth = 'out' if objtype.lower() =='univariate' and not ('ODSGraphics' in args.keys() or ODSGraphics == False): plot = 'plot' outmeth = '' outds = args.pop('out', None) if outds == None: outds = args.pop('output', None) outcodegen = Codegen.new('out', outds) outcodegen.outmeth = outmeth outcodegen.objname = objname outstr = outcodegen.codestmt self.logger.debug("product caller: " + self.sasproduct.lower()) debug_code= '' code = "%macro proccall(d);\n" # resolve issues withe Proc options, out= and plots= # The procopts statement should be in every procedure as a way to pass arbitrary options to the procedures if 'outmeth' in args: outmeth = args['outmeth'] if 'plot' in args: plot = args['plot'] if len(outmeth) and not outds == None: #outstr = outds.libref + '.' + outds.table code += "proc %s data=%s.%s%s %s %s=%s %s ;\n" % ( objtype, data.libref, data.table, data._dsopts(), plot, outmeth, outstr, procopts) else: code += "proc %s data=%s.%s%s %s %s ;\n" % ( objtype, data.libref, data.table, data._dsopts(), plot, procopts) if outds is not None: args['output'] = outds self.logger.debug("args value: " + str(args)) self.logger.debug("args type: " + str(type(args))) # this list is largely alphabetical but there are exceptions in order to # satisfy the order needs of the statements for the procedures # as an example... # http://support.sas.com/documentation/cdl/en/statug/68162/HTML/default/viewer.htm#statug_glm_syntax.htm#statug.glm.glmpostable uoargs = {} orderedargs = {} keyorder = ['by', 'input', 'target', 'cls', 'model', 'output'] for k, v in args.items(): if k in keyorder: orderedargs[k] = v else: uoargs[k] = v orderedargs = OrderedDict(sorted(orderedargs.items(), key=lambda i: keyorder.index(i[0]))) for k, v in uoargs.items(): orderedargs[k] = v orderedargs.move_to_end(k) for key, value in orderedargs.items(): gen = Codegen.new(key, value) gen.objtype = objtype gen.data = data gen.outmeth = outmeth gen.objname = objname code += gen.codestmt if gen.debug is not None: debug_code += gen.debug code += "run; quit; %mend;\n" code += "%%mangobj(%s,%s,%s);" % (objname, objtype, data.table) if self.logger.level == 10: print("Proc code submission:\n " + str(code)) print("\n\n\n" + debug_code) return code
[ "def", "_makeProcCallMacro", "(", "self", ",", "objtype", ":", "str", ",", "objname", ":", "str", ",", "data", ":", "[", "'SASdata'", ",", "str", "]", "=", "None", ",", "args", ":", "dict", "=", "None", ")", "->", "str", ":", "plot", "=", "''", "...
This method generates the SAS code from the python objects and included data and arguments. The list of args in this method is largely alphabetical but there are exceptions in order to satisfy the order needs of the statements for the procedure. as an example... http://support.sas.com/documentation/cdl/en/statug/68162/HTML/default/viewer.htm#statug_glm_syntax.htm#statug.glm.glmpostable :param objtype: str -- proc name :param objname: str -- 3 digit code for proc :param data: sas dataset object :param args: dict -- proc arguments :return: str -- the SAS code needed to execute on the server
[ "This", "method", "generates", "the", "SAS", "code", "from", "the", "python", "objects", "and", "included", "data", "and", "arguments", ".", "The", "list", "of", "args", "in", "this", "method", "is", "largely", "alphabetical", "but", "there", "are", "excepti...
python
train
evhub/coconut
coconut/compiler/compiler.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L1123-L1134
def endline_handle(self, original, loc, tokens): """Add line number information to end of line.""" internal_assert(len(tokens) == 1, "invalid endline tokens", tokens) lines = tokens[0].splitlines(True) if self.minify: lines = lines[0] out = [] ln = lineno(loc, original) for endline in lines: out.append(self.wrap_line_number(self.adjust(ln)) + endline) ln += 1 return "".join(out)
[ "def", "endline_handle", "(", "self", ",", "original", ",", "loc", ",", "tokens", ")", ":", "internal_assert", "(", "len", "(", "tokens", ")", "==", "1", ",", "\"invalid endline tokens\"", ",", "tokens", ")", "lines", "=", "tokens", "[", "0", "]", ".", ...
Add line number information to end of line.
[ "Add", "line", "number", "information", "to", "end", "of", "line", "." ]
python
train
odlgroup/odl
odl/contrib/tomo/elekta.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/tomo/elekta.py#L23-L114
def elekta_icon_geometry(sad=780.0, sdd=1000.0, piercing_point=(390.0, 0.0), angles=None, num_angles=None, detector_shape=(780, 720)): """Tomographic geometry of the Elekta Icon CBCT system. See the [whitepaper]_ for specific descriptions of each parameter. All measurments are given in millimeters unless otherwise stated. Parameters ---------- sad : float, optional Source to Axis distance. sdd : float, optional Source to Detector distance. piercing_point : sequence of foat, optional Position in the detector (in pixel coordinates) that a beam from the source, passing through the axis of rotation perpendicularly, hits. angles : array-like, optional List of angles given in radians that the projection images were taken at. Exclusive with num_angles. Default: np.linspace(1.2, 5.0, 332) num_angles : int, optional Number of angles. Exclusive with angles. Default: 332 detector_shape : sequence of int, optional Shape of the detector (in pixels). Useful if a sub-sampled system should be studied. Returns ------- elekta_icon_geometry : `ConeFlatGeometry` Examples -------- Create default geometry: >>> from odl.contrib import tomo >>> geometry = tomo.elekta_icon_geometry() Use a smaller detector (improves efficiency): >>> small_geometry = tomo.elekta_icon_geometry(detector_shape=[100, 100]) See Also -------- elekta_icon_space : Default reconstruction space for the Elekta Icon CBCT. elekta_icon_fbp: Default reconstruction method for the Elekta Icon CBCT. References ---------- .. [whitepaper] *Design and performance characteristics of a Cone Beam CT system for Leksell Gamma Knife Icon* """ sad = float(sad) assert sad > 0 sdd = float(sdd) assert sdd > sad piercing_point = np.array(piercing_point, dtype=float) assert piercing_point.shape == (2,) if angles is not None and num_angles is not None: raise ValueError('cannot provide both `angles` and `num_angles`') elif angles is not None: angles = odl.nonuniform_partition(angles) assert angles.ndim == 1 elif num_angles is not None: angles = odl.uniform_partition(1.2, 5.0, num_angles) else: angles = odl.uniform_partition(1.2, 5.0, 332) detector_shape = np.array(detector_shape, dtype=int) # Constant system parameters pixel_size = 0.368 det_extent_mm = np.array([287.04, 264.96]) # Compute the detector partition piercing_point_mm = pixel_size * piercing_point det_min_pt = -piercing_point_mm det_max_pt = det_min_pt + det_extent_mm detector_partition = odl.uniform_partition(min_pt=det_min_pt, max_pt=det_max_pt, shape=detector_shape) # Create the geometry geometry = odl.tomo.ConeFlatGeometry( angles, detector_partition, src_radius=sad, det_radius=sdd - sad) return geometry
[ "def", "elekta_icon_geometry", "(", "sad", "=", "780.0", ",", "sdd", "=", "1000.0", ",", "piercing_point", "=", "(", "390.0", ",", "0.0", ")", ",", "angles", "=", "None", ",", "num_angles", "=", "None", ",", "detector_shape", "=", "(", "780", ",", "720...
Tomographic geometry of the Elekta Icon CBCT system. See the [whitepaper]_ for specific descriptions of each parameter. All measurments are given in millimeters unless otherwise stated. Parameters ---------- sad : float, optional Source to Axis distance. sdd : float, optional Source to Detector distance. piercing_point : sequence of foat, optional Position in the detector (in pixel coordinates) that a beam from the source, passing through the axis of rotation perpendicularly, hits. angles : array-like, optional List of angles given in radians that the projection images were taken at. Exclusive with num_angles. Default: np.linspace(1.2, 5.0, 332) num_angles : int, optional Number of angles. Exclusive with angles. Default: 332 detector_shape : sequence of int, optional Shape of the detector (in pixels). Useful if a sub-sampled system should be studied. Returns ------- elekta_icon_geometry : `ConeFlatGeometry` Examples -------- Create default geometry: >>> from odl.contrib import tomo >>> geometry = tomo.elekta_icon_geometry() Use a smaller detector (improves efficiency): >>> small_geometry = tomo.elekta_icon_geometry(detector_shape=[100, 100]) See Also -------- elekta_icon_space : Default reconstruction space for the Elekta Icon CBCT. elekta_icon_fbp: Default reconstruction method for the Elekta Icon CBCT. References ---------- .. [whitepaper] *Design and performance characteristics of a Cone Beam CT system for Leksell Gamma Knife Icon*
[ "Tomographic", "geometry", "of", "the", "Elekta", "Icon", "CBCT", "system", "." ]
python
train
cltk/cltk
cltk/stem/french/stem.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/stem/french/stem.py#L53-L74
def matchremove_verb_endings(word): """Remove the verb endings""" """verb endings sorted by charlen then alph""" verb_endings =['issiiens', 'isseient', 'issiiez', 'issons', 'issent', 'issant', 'isseie', 'isseit', 'issons', 'isseiz', 'assent', 'issons', 'isseiz', 'issent', 'iiens', 'eient', 'issez', 'oient', 'istes', 'ïstes', 'istes', 'astes', 'erent', 'istes', 'irent', 'ustes', 'urent', 'âmes', 'âtes', 'èrent', 'asses', 'isses', 'issez', 'ssons', 'sseiz', 'ssent', 'erent', 'eies', 'iiez', 'oies', 'iens', 'ions', 'oint', 'eret', 'imes', 'rent', 'ümes', 'ütes', 'ïmes', 'imes', 'asse', 'isse', 'usse', 'ames', 'imes', 'umes', 'asse', 'isse', 'sses', 'ssez', 'ons', 'ent', 'ant', 'eie', 'eit', 'int', 'ist', 'eiz', 'oie', 'oit', 'iez', 'ois', 'oit', 'iez', 'res', 'ert', 'ast', 'ist', 'sse', 'mes', 'er', 'es', 'et', 'ez', 'is', 're', 'oi', 'ïs', 'üs', 'ai', 'as', 'at', 'is', 'it', 'ui', 'us', 'ut', 'st', 's', 't', 'e', 'é', 'z', 'u', 'a', 'i'] for ending in verb_endings: if word == ending: word = word break if word.endswith(ending): word = re.sub(r'{0}$'.format(ending), '', word) break return word
[ "def", "matchremove_verb_endings", "(", "word", ")", ":", "\"\"\"verb endings sorted by charlen then alph\"\"\"", "verb_endings", "=", "[", "'issiiens'", ",", "'isseient'", ",", "'issiiez'", ",", "'issons'", ",", "'issent'", ",", "'issant'", ",", "'isseie'", ",", "'is...
Remove the verb endings
[ "Remove", "the", "verb", "endings" ]
python
train
whtsky/parguments
parguments/cli.py
https://github.com/whtsky/parguments/blob/96aa23af411a67c2f70d856e81fa186bb187daab/parguments/cli.py#L78-L97
def prompt_bool(name, default=False, yes_choices=None, no_choices=None): """ Grabs user input from command line and converts to boolean value. :param name: prompt text :param default: default value if no input provided. :param yes_choices: default 'y', 'yes', '1', 'on', 'true', 't' :param no_choices: default 'n', 'no', '0', 'off', 'false', 'f' """ yes_choices = yes_choices or ('y', 'yes', '1', 'on', 'true', 't') no_choices = no_choices or ('n', 'no', '0', 'off', 'false', 'f') while True: rv = prompt(name + '?', default and yes_choices[0] or no_choices[0]) if rv.lower() in yes_choices: return True elif rv.lower() in no_choices: return False
[ "def", "prompt_bool", "(", "name", ",", "default", "=", "False", ",", "yes_choices", "=", "None", ",", "no_choices", "=", "None", ")", ":", "yes_choices", "=", "yes_choices", "or", "(", "'y'", ",", "'yes'", ",", "'1'", ",", "'on'", ",", "'true'", ",", ...
Grabs user input from command line and converts to boolean value. :param name: prompt text :param default: default value if no input provided. :param yes_choices: default 'y', 'yes', '1', 'on', 'true', 't' :param no_choices: default 'n', 'no', '0', 'off', 'false', 'f'
[ "Grabs", "user", "input", "from", "command", "line", "and", "converts", "to", "boolean", "value", "." ]
python
train
sdispater/poetry
poetry/mixology/partial_solution.py
https://github.com/sdispater/poetry/blob/2d27acd76c165dd49f11934520a7973de7a3762a/poetry/mixology/partial_solution.py#L171-L202
def satisfier(self, term): # type: (Term) -> Assignment """ Returns the first Assignment in this solution such that the sublist of assignments up to and including that entry collectively satisfies term. """ assigned_term = None # type: Term for assignment in self._assignments: if assignment.dependency.name != term.dependency.name: continue if ( not assignment.dependency.is_root and not assignment.dependency.name == term.dependency.name ): if not assignment.is_positive(): continue assert not term.is_positive() return assignment if assigned_term is None: assigned_term = assignment else: assigned_term = assigned_term.intersect(assignment) # As soon as we have enough assignments to satisfy term, return them. if assigned_term.satisfies(term): return assignment raise RuntimeError("[BUG] {} is not satisfied.".format(term))
[ "def", "satisfier", "(", "self", ",", "term", ")", ":", "# type: (Term) -> Assignment", "assigned_term", "=", "None", "# type: Term", "for", "assignment", "in", "self", ".", "_assignments", ":", "if", "assignment", ".", "dependency", ".", "name", "!=", "term", ...
Returns the first Assignment in this solution such that the sublist of assignments up to and including that entry collectively satisfies term.
[ "Returns", "the", "first", "Assignment", "in", "this", "solution", "such", "that", "the", "sublist", "of", "assignments", "up", "to", "and", "including", "that", "entry", "collectively", "satisfies", "term", "." ]
python
train
Parsl/parsl
parsl/app/bash.py
https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/app/bash.py#L12-L111
def remote_side_bash_executor(func, *args, **kwargs): """Execute the bash app type function and return the command line string. This string is reformatted with the *args, and **kwargs from call time. """ import os import time import subprocess import logging import parsl.app.errors as pe logging.basicConfig(filename='/tmp/bashexec.{0}.log'.format(time.time()), level=logging.DEBUG) # start_t = time.time() func_name = func.__name__ partial_cmdline = None # Try to run the func to compose the commandline try: # Execute the func to get the commandline partial_cmdline = func(*args, **kwargs) # Reformat the commandline with current args and kwargs executable = partial_cmdline.format(*args, **kwargs) except AttributeError as e: if partial_cmdline is not None: raise pe.AppBadFormatting("App formatting failed for app '{}' with AttributeError: {}".format(func_name, e)) else: raise pe.BashAppNoReturn("Bash app '{}' did not return a value, or returned none - with this exception: {}".format(func_name, e), None) except IndexError as e: raise pe.AppBadFormatting("App formatting failed for app '{}' with IndexError: {}".format(func_name, e)) except Exception as e: logging.error("Caught exception during formatting of app '{}': {}".format(func_name, e)) raise e logging.debug("Executable: %s", executable) # Updating stdout, stderr if values passed at call time. def open_std_fd(fdname): # fdname is 'stdout' or 'stderr' stdfspec = kwargs.get(fdname) # spec is str name or tuple (name, mode) if stdfspec is None: return None elif isinstance(stdfspec, str): fname = stdfspec mode = 'a+' elif isinstance(stdfspec, tuple): if len(stdfspec) != 2: raise pe.BadStdStreamFile("std descriptor %s has incorrect tuple length %s" % (fdname, len(stdfspec)), TypeError('Bad Tuple Length')) fname, mode = stdfspec else: raise pe.BadStdStreamFile("std descriptor %s has unexpected type %s" % (fdname, str(type(stdfspec))), TypeError('Bad Tuple Type')) try: fd = open(fname, mode) except Exception as e: raise pe.BadStdStreamFile(fname, e) return fd std_out = open_std_fd('stdout') std_err = open_std_fd('stderr') timeout = kwargs.get('walltime') returncode = None try: proc = subprocess.Popen(executable, stdout=std_out, stderr=std_err, shell=True, executable='/bin/bash') proc.wait(timeout=timeout) returncode = proc.returncode except subprocess.TimeoutExpired: # print("Timeout") raise pe.AppTimeout("[{}] App exceeded walltime: {}".format(func_name, timeout)) except Exception as e: # print("Caught exception: ", e) raise pe.AppException("[{}] App caught exception: {}".format(func_name, proc.returncode), e) if returncode != 0: raise pe.AppFailure("[{}] App failed with exit code: {}".format(func_name, proc.returncode), proc.returncode) # TODO : Add support for globs here missing = [] for outputfile in kwargs.get('outputs', []): fpath = outputfile if type(outputfile) != str: fpath = outputfile.filepath if not os.path.exists(fpath): missing.extend([outputfile]) if missing: raise pe.MissingOutputs("[{}] Missing outputs".format(func_name), missing) # exec_duration = time.time() - start_t return returncode
[ "def", "remote_side_bash_executor", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "import", "os", "import", "time", "import", "subprocess", "import", "logging", "import", "parsl", ".", "app", ".", "errors", "as", "pe", "logging", ".", ...
Execute the bash app type function and return the command line string. This string is reformatted with the *args, and **kwargs from call time.
[ "Execute", "the", "bash", "app", "type", "function", "and", "return", "the", "command", "line", "string", "." ]
python
valid
juju/python-libjuju
juju/client/_client3.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/client/_client3.py#L1673-L1688
async def UpdateCredentialsCheckModels(self, credentials, force): ''' credentials : typing.Sequence[~TaggedCredential] force : bool Returns -> typing.Sequence[~UpdateCredentialResult] ''' # map input types to rpc msg _params = dict() msg = dict(type='Cloud', request='UpdateCredentialsCheckModels', version=3, params=_params) _params['credentials'] = credentials _params['force'] = force reply = await self.rpc(msg) return reply
[ "async", "def", "UpdateCredentialsCheckModels", "(", "self", ",", "credentials", ",", "force", ")", ":", "# map input types to rpc msg", "_params", "=", "dict", "(", ")", "msg", "=", "dict", "(", "type", "=", "'Cloud'", ",", "request", "=", "'UpdateCredentialsCh...
credentials : typing.Sequence[~TaggedCredential] force : bool Returns -> typing.Sequence[~UpdateCredentialResult]
[ "credentials", ":", "typing", ".", "Sequence", "[", "~TaggedCredential", "]", "force", ":", "bool", "Returns", "-", ">", "typing", ".", "Sequence", "[", "~UpdateCredentialResult", "]" ]
python
train
ojarva/python-sshpubkeys
sshpubkeys/keys.py
https://github.com/ojarva/python-sshpubkeys/blob/86dc1ab27ce82dcc091ce127416cc3ee219e9bec/sshpubkeys/keys.py#L251-L295
def parse_options(self, options): """Parses ssh options string.""" quote_open = False parsed_options = {} def parse_add_single_option(opt): """Parses and validates a single option, and adds it to parsed_options field.""" if "=" in opt: opt_name, opt_value = opt.split("=", 1) opt_value = opt_value.replace('"', '') else: opt_name = opt opt_value = True if " " in opt_name or not self.OPTION_NAME_RE.match(opt_name): raise InvalidOptionNameError("%s is not valid option name." % opt_name) if self.strict_mode: for valid_opt_name, value_required in self.OPTIONS_SPEC: if opt_name.lower() == valid_opt_name: if value_required and opt_value is True: raise MissingMandatoryOptionValueError("%s is missing mandatory value." % opt_name) break else: raise UnknownOptionNameError("%s is unrecognized option name." % opt_name) if opt_name not in parsed_options: parsed_options[opt_name] = [] parsed_options[opt_name].append(opt_value) start_of_current_opt = 0 i = 1 # Need to be set for empty options strings for i, character in enumerate(options): if character == '"': # only double quotes are allowed, no need to care about single quotes quote_open = not quote_open if quote_open: continue if character == ",": opt = options[start_of_current_opt:i] parse_add_single_option(opt) start_of_current_opt = i + 1 # Data begins after the first space if start_of_current_opt + 1 != i: opt = options[start_of_current_opt:] parse_add_single_option(opt) if quote_open: raise InvalidOptionsError("Unbalanced quotes.") return parsed_options
[ "def", "parse_options", "(", "self", ",", "options", ")", ":", "quote_open", "=", "False", "parsed_options", "=", "{", "}", "def", "parse_add_single_option", "(", "opt", ")", ":", "\"\"\"Parses and validates a single option, and adds it to parsed_options field.\"\"\"", "i...
Parses ssh options string.
[ "Parses", "ssh", "options", "string", "." ]
python
test
gem/oq-engine
openquake/risklib/scientific.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/risklib/scientific.py#L1184-L1196
def broadcast(func, composite_array, *args): """ Broadcast an array function over a composite array """ dic = {} dtypes = [] for name in composite_array.dtype.names: dic[name] = func(composite_array[name], *args) dtypes.append((name, dic[name].dtype)) res = numpy.zeros(dic[name].shape, numpy.dtype(dtypes)) for name in dic: res[name] = dic[name] return res
[ "def", "broadcast", "(", "func", ",", "composite_array", ",", "*", "args", ")", ":", "dic", "=", "{", "}", "dtypes", "=", "[", "]", "for", "name", "in", "composite_array", ".", "dtype", ".", "names", ":", "dic", "[", "name", "]", "=", "func", "(", ...
Broadcast an array function over a composite array
[ "Broadcast", "an", "array", "function", "over", "a", "composite", "array" ]
python
train
fozzle/python-brotherprint
brotherprint/brotherprint.py
https://github.com/fozzle/python-brotherprint/blob/5fb92df11b599c30a7da3d6ac7ed60acff230044/brotherprint/brotherprint.py#L813-L874
def barcode(self, data, format, characters='off', height=48, width='small', parentheses='on', ratio='3:1', equalize='off', rss_symbol='rss14std', horiz_char_rss=2): '''Print a standard barcode in the specified format Args: data: the barcode data format: the barcode type you want. Choose between code39, itf, ean8/upca, upce, codabar, code128, gs1-128, rss characters: Whether you want characters below the bar code. 'off' or 'on' height: Height, in dots. width: width of barcode. Choose 'xsmall' 'small' 'medium' 'large' parentheses: Parentheses deletion on or off. 'on' or 'off' Only matters with GS1-128 ratio: ratio between thick and thin bars. Choose '3:1', '2.5:1', and '2:1' equalize: equalize bar lengths, choose 'off' or 'on' rss_symbol: rss symbols model, choose from 'rss14std', 'rss14trun', 'rss14stacked', 'rss14stackedomni', 'rsslimited', 'rssexpandedstd', 'rssexpandedstacked' horiz_char_rss: for rss expanded stacked, specify the number of horizontal characters, must be an even number b/w 2 and 20. ''' barcodes = {'code39': '0', 'itf': '1', 'ean8/upca': '5', 'upce': '6', 'codabar': '9', 'code128': 'a', 'gs1-128': 'b', 'rss': 'c'} widths = {'xsmall': '0', 'small': '1', 'medium': '2', 'large': '3'} ratios = {'3:1': '0', '2.5:1': '1', '2:1': '2'} rss_symbols = {'rss14std': '0', 'rss14trun': '1', 'rss14stacked': '2', 'rss14stackedomni' : '3', 'rsslimited': '4', 'rssexpandedstd': '5', 'rssexpandedstacked': '6' } character_choices = {'off': '0', 'on' : '1'} parentheses_choices = {'off':'1', 'on': '0'} equalize_choices = {'off': '0', 'on': '1'} sendstr = '' n2 = height/256 n1 = height%256 if format in barcodes and width in widths and ratio in ratios and characters in character_choices and rss_symbol in rss_symbols: sendstr += (chr(27)+'i'+'t'+barcodes[format]+'s'+'p'+'r'+character_choices[characters]+'u'+'x'+'y'+'h' + chr(n1) + chr(n2) + 'w'+widths[width]+'e'+parentheses_choices[parentheses]+'o'+rss_symbols[rss_symbol]+'c'+chr(horiz_char_rss)+'z'+ratios[ratio]+'f'+equalize_choices[equalize] + 'b' + data + chr(92)) if format in ['code128', 'gs1-128']: sendstr += chr(92)+ chr(92) self.send(sendstr) else: raise RuntimeError('Invalid parameters')
[ "def", "barcode", "(", "self", ",", "data", ",", "format", ",", "characters", "=", "'off'", ",", "height", "=", "48", ",", "width", "=", "'small'", ",", "parentheses", "=", "'on'", ",", "ratio", "=", "'3:1'", ",", "equalize", "=", "'off'", ",", "rss_...
Print a standard barcode in the specified format Args: data: the barcode data format: the barcode type you want. Choose between code39, itf, ean8/upca, upce, codabar, code128, gs1-128, rss characters: Whether you want characters below the bar code. 'off' or 'on' height: Height, in dots. width: width of barcode. Choose 'xsmall' 'small' 'medium' 'large' parentheses: Parentheses deletion on or off. 'on' or 'off' Only matters with GS1-128 ratio: ratio between thick and thin bars. Choose '3:1', '2.5:1', and '2:1' equalize: equalize bar lengths, choose 'off' or 'on' rss_symbol: rss symbols model, choose from 'rss14std', 'rss14trun', 'rss14stacked', 'rss14stackedomni', 'rsslimited', 'rssexpandedstd', 'rssexpandedstacked' horiz_char_rss: for rss expanded stacked, specify the number of horizontal characters, must be an even number b/w 2 and 20.
[ "Print", "a", "standard", "barcode", "in", "the", "specified", "format", "Args", ":", "data", ":", "the", "barcode", "data", "format", ":", "the", "barcode", "type", "you", "want", ".", "Choose", "between", "code39", "itf", "ean8", "/", "upca", "upce", "...
python
train
google/grr
grr/core/grr_response_core/lib/rdfvalues/structs.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/rdfvalues/structs.py#L820-L839
def Validate(self, value, **_): """Check that value is a valid enum.""" # None is a valid value - it means the field is not set. if value is None: return # If the value is a string we need to try to convert it to an integer. checked_value = value if isinstance(value, string_types): # NOTE: that when initializing from string, enum values are # case-insensitive. checked_value = self.enum.get(value.upper()) if checked_value is None and value.isdigit(): checked_value = int(value) if checked_value is None: raise type_info.TypeValueError( "Value %s is not a valid enum value for field %s" % (value, self.name)) return EnumNamedValue(checked_value, name=self.reverse_enum.get(value))
[ "def", "Validate", "(", "self", ",", "value", ",", "*", "*", "_", ")", ":", "# None is a valid value - it means the field is not set.", "if", "value", "is", "None", ":", "return", "# If the value is a string we need to try to convert it to an integer.", "checked_value", "="...
Check that value is a valid enum.
[ "Check", "that", "value", "is", "a", "valid", "enum", "." ]
python
train
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L522-L534
def _form_loader(self, _): ''' function to get the data from the urlencoded forms ignore the data passed in and just get the args from wherever they are ''' data = {} for key in self.request.arguments: val = self.get_arguments(key) if len(val) == 1: data[key] = val[0] else: data[key] = val return data
[ "def", "_form_loader", "(", "self", ",", "_", ")", ":", "data", "=", "{", "}", "for", "key", "in", "self", ".", "request", ".", "arguments", ":", "val", "=", "self", ".", "get_arguments", "(", "key", ")", "if", "len", "(", "val", ")", "==", "1", ...
function to get the data from the urlencoded forms ignore the data passed in and just get the args from wherever they are
[ "function", "to", "get", "the", "data", "from", "the", "urlencoded", "forms", "ignore", "the", "data", "passed", "in", "and", "just", "get", "the", "args", "from", "wherever", "they", "are" ]
python
train
anrosent/LT-code
lt/encode/__init__.py
https://github.com/anrosent/LT-code/blob/e13a4c927effc90f9d41ab3884f9fcbd95b9450d/lt/encode/__init__.py#L17-L43
def encoder(f, blocksize, seed=None, c=sampler.DEFAULT_C, delta=sampler.DEFAULT_DELTA): """Generates an infinite sequence of blocks to transmit to the receiver """ # Generate seed if not provided if seed is None: seed = randint(0, 1 << 31 - 1) # get file blocks filesize, blocks = _split_file(f, blocksize) # init stream vars K = len(blocks) prng = sampler.PRNG(params=(K, delta, c)) prng.set_seed(seed) # block generation loop while True: blockseed, d, ix_samples = prng.get_src_blocks() block_data = 0 for ix in ix_samples: block_data ^= blocks[ix] # Generate blocks of XORed data in network byte order block = (filesize, blocksize, blockseed, int.to_bytes(block_data, blocksize, sys.byteorder)) yield pack('!III%ss'%blocksize, *block)
[ "def", "encoder", "(", "f", ",", "blocksize", ",", "seed", "=", "None", ",", "c", "=", "sampler", ".", "DEFAULT_C", ",", "delta", "=", "sampler", ".", "DEFAULT_DELTA", ")", ":", "# Generate seed if not provided", "if", "seed", "is", "None", ":", "seed", ...
Generates an infinite sequence of blocks to transmit to the receiver
[ "Generates", "an", "infinite", "sequence", "of", "blocks", "to", "transmit", "to", "the", "receiver" ]
python
train
Alignak-monitoring/alignak
alignak/external_command.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L3966-L3978
def stop_accepting_passive_svc_checks(self): """Disable passive service check submission (globally) Format of the line that triggers function call:: STOP_ACCEPTING_PASSIVE_SVC_CHECKS :return: None """ if self.my_conf.accept_passive_service_checks: self.my_conf.modified_attributes |= DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value self.my_conf.accept_passive_service_checks = False self.my_conf.explode_global_conf() self.daemon.update_program_status()
[ "def", "stop_accepting_passive_svc_checks", "(", "self", ")", ":", "if", "self", ".", "my_conf", ".", "accept_passive_service_checks", ":", "self", ".", "my_conf", ".", "modified_attributes", "|=", "DICT_MODATTR", "[", "\"MODATTR_PASSIVE_CHECKS_ENABLED\"", "]", ".", "...
Disable passive service check submission (globally) Format of the line that triggers function call:: STOP_ACCEPTING_PASSIVE_SVC_CHECKS :return: None
[ "Disable", "passive", "service", "check", "submission", "(", "globally", ")", "Format", "of", "the", "line", "that", "triggers", "function", "call", "::" ]
python
train
nugget/python-insteonplm
insteonplm/messages/x10received.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/messages/x10received.py#L56-L65
def command_msg(housecode, command): """Create an X10 message to send the house code and a command code.""" house_byte = 0 if isinstance(housecode, str): house_byte = insteonplm.utils.housecode_to_byte(housecode) << 4 elif isinstance(housecode, int) and housecode < 16: house_byte = housecode << 4 else: house_byte = housecode return X10Received(house_byte + command, 0x80)
[ "def", "command_msg", "(", "housecode", ",", "command", ")", ":", "house_byte", "=", "0", "if", "isinstance", "(", "housecode", ",", "str", ")", ":", "house_byte", "=", "insteonplm", ".", "utils", ".", "housecode_to_byte", "(", "housecode", ")", "<<", "4",...
Create an X10 message to send the house code and a command code.
[ "Create", "an", "X10", "message", "to", "send", "the", "house", "code", "and", "a", "command", "code", "." ]
python
train
DasIch/argvard
argvard/__init__.py
https://github.com/DasIch/argvard/blob/2603e323a995e0915ce41fcf49e2a82519556195/argvard/__init__.py#L151-L179
def main(self, signature=''): """ A decorator that is used to register the main function with the given `signature`:: @app.main() def main(context): # do something pass The main function is called, after any options and if no command has been called. """ signature = Signature.from_string(signature, option=False) def decorator(function): if self.main_func is not None: raise RuntimeError('main is already defined') try: function = annotations()(function) except RuntimeError: pass self.main_func = function self.main_signature = signature if function.__doc__: self.description = textwrap.dedent(function.__doc__).strip() return function return decorator
[ "def", "main", "(", "self", ",", "signature", "=", "''", ")", ":", "signature", "=", "Signature", ".", "from_string", "(", "signature", ",", "option", "=", "False", ")", "def", "decorator", "(", "function", ")", ":", "if", "self", ".", "main_func", "is...
A decorator that is used to register the main function with the given `signature`:: @app.main() def main(context): # do something pass The main function is called, after any options and if no command has been called.
[ "A", "decorator", "that", "is", "used", "to", "register", "the", "main", "function", "with", "the", "given", "signature", "::" ]
python
train
scott-griffiths/bitstring
bitstring.py
https://github.com/scott-griffiths/bitstring/blob/ab40ae7f0b43fe223a39b63cbc0529b09f3ef653/bitstring.py#L2411-L2447
def find(self, bs, start=None, end=None, bytealigned=None): """Find first occurrence of substring bs. Returns a single item tuple with the bit position if found, or an empty tuple if not found. The bit position (pos property) will also be set to the start of the substring if it is found. bs -- The bitstring to find. start -- The bit position to start the search. Defaults to 0. end -- The bit position one past the last bit to search. Defaults to self.len. bytealigned -- If True the bitstring will only be found on byte boundaries. Raises ValueError if bs is empty, if start < 0, if end > self.len or if end < start. >>> BitArray('0xc3e').find('0b1111') (6,) """ bs = Bits(bs) if not bs.len: raise ValueError("Cannot find an empty bitstring.") start, end = self._validate_slice(start, end) if bytealigned is None: bytealigned = globals()['bytealigned'] if bytealigned and not bs.len % 8 and not self._datastore.offset: p = self._findbytes(bs.bytes, start, end, bytealigned) else: p = self._findregex(re.compile(bs._getbin()), start, end, bytealigned) # If called from a class that has a pos, set it try: self._pos = p[0] except (AttributeError, IndexError): pass return p
[ "def", "find", "(", "self", ",", "bs", ",", "start", "=", "None", ",", "end", "=", "None", ",", "bytealigned", "=", "None", ")", ":", "bs", "=", "Bits", "(", "bs", ")", "if", "not", "bs", ".", "len", ":", "raise", "ValueError", "(", "\"Cannot fin...
Find first occurrence of substring bs. Returns a single item tuple with the bit position if found, or an empty tuple if not found. The bit position (pos property) will also be set to the start of the substring if it is found. bs -- The bitstring to find. start -- The bit position to start the search. Defaults to 0. end -- The bit position one past the last bit to search. Defaults to self.len. bytealigned -- If True the bitstring will only be found on byte boundaries. Raises ValueError if bs is empty, if start < 0, if end > self.len or if end < start. >>> BitArray('0xc3e').find('0b1111') (6,)
[ "Find", "first", "occurrence", "of", "substring", "bs", "." ]
python
train
craffel/mir_eval
mir_eval/chord.py
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L594-L615
def rotate_bitmaps_to_roots(bitmaps, roots): """Circularly shift a relative bitmaps to asbolute pitch classes. See :func:`rotate_bitmap_to_root` for more information. Parameters ---------- bitmap : np.ndarray, shape=(N, 12) Bitmap of active notes, relative to the given root. root : np.ndarray, shape=(N,) Absolute pitch class number. Returns ------- bitmap : np.ndarray, shape=(N, 12) Absolute bitmaps of active pitch classes. """ abs_bitmaps = [] for bitmap, chord_root in zip(bitmaps, roots): abs_bitmaps.append(rotate_bitmap_to_root(bitmap, chord_root)) return np.asarray(abs_bitmaps)
[ "def", "rotate_bitmaps_to_roots", "(", "bitmaps", ",", "roots", ")", ":", "abs_bitmaps", "=", "[", "]", "for", "bitmap", ",", "chord_root", "in", "zip", "(", "bitmaps", ",", "roots", ")", ":", "abs_bitmaps", ".", "append", "(", "rotate_bitmap_to_root", "(", ...
Circularly shift a relative bitmaps to asbolute pitch classes. See :func:`rotate_bitmap_to_root` for more information. Parameters ---------- bitmap : np.ndarray, shape=(N, 12) Bitmap of active notes, relative to the given root. root : np.ndarray, shape=(N,) Absolute pitch class number. Returns ------- bitmap : np.ndarray, shape=(N, 12) Absolute bitmaps of active pitch classes.
[ "Circularly", "shift", "a", "relative", "bitmaps", "to", "asbolute", "pitch", "classes", "." ]
python
train
saltstack/salt
salt/modules/boto_apigateway.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_apigateway.py#L539-L545
def _api_key_patch_add(conn, apiKey, pvlist): ''' the add patch operation for a list of (path, value) tuples on an ApiKey resource list path ''' response = conn.update_api_key(apiKey=apiKey, patchOperations=_api_key_patchops('add', pvlist)) return response
[ "def", "_api_key_patch_add", "(", "conn", ",", "apiKey", ",", "pvlist", ")", ":", "response", "=", "conn", ".", "update_api_key", "(", "apiKey", "=", "apiKey", ",", "patchOperations", "=", "_api_key_patchops", "(", "'add'", ",", "pvlist", ")", ")", "return",...
the add patch operation for a list of (path, value) tuples on an ApiKey resource list path
[ "the", "add", "patch", "operation", "for", "a", "list", "of", "(", "path", "value", ")", "tuples", "on", "an", "ApiKey", "resource", "list", "path" ]
python
train
lmjohns3/theanets
theanets/graph.py
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/graph.py#L700-L710
def updates(self, **kwargs): '''Return expressions to run as updates during network training. Returns ------- updates : list of (parameter, expression) pairs A list of named parameter update expressions for this network. ''' regs = regularizers.from_kwargs(self, **kwargs) _, updates = self.build_graph(regs) return updates
[ "def", "updates", "(", "self", ",", "*", "*", "kwargs", ")", ":", "regs", "=", "regularizers", ".", "from_kwargs", "(", "self", ",", "*", "*", "kwargs", ")", "_", ",", "updates", "=", "self", ".", "build_graph", "(", "regs", ")", "return", "updates" ...
Return expressions to run as updates during network training. Returns ------- updates : list of (parameter, expression) pairs A list of named parameter update expressions for this network.
[ "Return", "expressions", "to", "run", "as", "updates", "during", "network", "training", "." ]
python
test
lago-project/lago
lago/templates.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/templates.py#L494-L503
def get_hash(self): """ Returns the associated hash for this template version Returns: str: Hash for this version """ if self._hash is None: self._hash = self._source.get_hash(self._handle).strip() return self._hash
[ "def", "get_hash", "(", "self", ")", ":", "if", "self", ".", "_hash", "is", "None", ":", "self", ".", "_hash", "=", "self", ".", "_source", ".", "get_hash", "(", "self", ".", "_handle", ")", ".", "strip", "(", ")", "return", "self", ".", "_hash" ]
Returns the associated hash for this template version Returns: str: Hash for this version
[ "Returns", "the", "associated", "hash", "for", "this", "template", "version" ]
python
train
ianmiell/shutit
shutit_patterns/shutitfile.py
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_patterns/shutitfile.py#L856-L869
def scan_text(text): """Scan text, and replace items that match shutit's pattern format, ie: {{ shutit.THING }} """ while True: match = re.match("(.*){{ shutit.(.*) }}(.*)$", text) if match: before = match.group(1) name = match.group(2) after = match.group(3) text = before + """''' + shutit.cfg[self.module_id][\"""" + name + """\"] + '''""" + after else: break return text
[ "def", "scan_text", "(", "text", ")", ":", "while", "True", ":", "match", "=", "re", ".", "match", "(", "\"(.*){{ shutit.(.*) }}(.*)$\"", ",", "text", ")", "if", "match", ":", "before", "=", "match", ".", "group", "(", "1", ")", "name", "=", "match", ...
Scan text, and replace items that match shutit's pattern format, ie: {{ shutit.THING }}
[ "Scan", "text", "and", "replace", "items", "that", "match", "shutit", "s", "pattern", "format", "ie", ":", "{{", "shutit", ".", "THING", "}}" ]
python
train
wummel/linkchecker
linkcheck/checker/urlbase.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/urlbase.py#L633-L642
def read_content(self): """Return data for this URL. Can be overridden in subclasses.""" buf = StringIO() data = self.read_content_chunk() while data: if buf.tell() + len(data) > self.aggregate.config["maxfilesizedownload"]: raise LinkCheckerError(_("File size too large")) buf.write(data) data = self.read_content_chunk() return buf.getvalue()
[ "def", "read_content", "(", "self", ")", ":", "buf", "=", "StringIO", "(", ")", "data", "=", "self", ".", "read_content_chunk", "(", ")", "while", "data", ":", "if", "buf", ".", "tell", "(", ")", "+", "len", "(", "data", ")", ">", "self", ".", "a...
Return data for this URL. Can be overridden in subclasses.
[ "Return", "data", "for", "this", "URL", ".", "Can", "be", "overridden", "in", "subclasses", "." ]
python
train
fabioz/PyDev.Debugger
third_party/pep8/autopep8.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/autopep8.py#L3757-L3796
def main(argv=None, apply_config=True): """Command-line entry.""" if argv is None: argv = sys.argv try: # Exit on broken pipe. signal.signal(signal.SIGPIPE, signal.SIG_DFL) except AttributeError: # pragma: no cover # SIGPIPE is not available on Windows. pass try: args = parse_args(argv[1:], apply_config=apply_config) if args.list_fixes: for code, description in sorted(supported_fixes()): print('{code} - {description}'.format( code=code, description=description)) return 0 if args.files == ['-']: assert not args.in_place encoding = sys.stdin.encoding or get_encoding() # LineEndingWrapper is unnecessary here due to the symmetry between # standard in and standard out. wrap_output(sys.stdout, encoding=encoding).write( fix_code(sys.stdin.read(), args, encoding=encoding)) else: if args.in_place or args.diff: args.files = list(set(args.files)) else: assert len(args.files) == 1 assert not args.recursive fix_multiple_files(args.files, args, sys.stdout) except KeyboardInterrupt: return 1
[ "def", "main", "(", "argv", "=", "None", ",", "apply_config", "=", "True", ")", ":", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "try", ":", "# Exit on broken pipe.", "signal", ".", "signal", "(", "signal", ".", "SIGPIPE", ",", ...
Command-line entry.
[ "Command", "-", "line", "entry", "." ]
python
train
cs50/lib50
lib50/_api.py
https://github.com/cs50/lib50/blob/941767f6c0a3b81af0cdea48c25c8d5a761086eb/lib50/_api.py#L58-L97
def local(tool, slug, config_loader, offline=False): """ Create/update local copy of github.com/org/repo/branch. Returns path to local copy """ # Parse slug slug = Slug(slug, offline=offline) local_path = Path(LOCAL_PATH).expanduser() / slug.org / slug.repo git = Git(f"-C {shlex.quote(str(local_path))}") if not local_path.exists(): _run(Git()(f"init {shlex.quote(str(local_path))}")) _run(git(f"remote add origin https://github.com/{slug.org}/{slug.repo}")) if not offline: # Get latest version of checks _run(git(f"fetch origin {slug.branch}")) # Ensure that local copy of the repo is identical to remote copy _run(git(f"checkout -f -B {slug.branch} origin/{slug.branch}")) _run(git(f"reset --hard HEAD")) problem_path = (local_path / slug.problem).absolute() if not problem_path.exists(): raise InvalidSlugError(_("{} does not exist at {}/{}").format(slug.problem, slug.org, slug.repo)) # Get config try: with open(problem_path / ".cs50.yaml") as f: try: config = config_loader.load(f.read()) except InvalidConfigError: raise InvalidSlugError( _("Invalid slug for {}. Did you mean something else?").format(tool)) except FileNotFoundError: raise InvalidSlugError(_("Invalid slug. Did you mean something else?")) return problem_path
[ "def", "local", "(", "tool", ",", "slug", ",", "config_loader", ",", "offline", "=", "False", ")", ":", "# Parse slug", "slug", "=", "Slug", "(", "slug", ",", "offline", "=", "offline", ")", "local_path", "=", "Path", "(", "LOCAL_PATH", ")", ".", "expa...
Create/update local copy of github.com/org/repo/branch. Returns path to local copy
[ "Create", "/", "update", "local", "copy", "of", "github", ".", "com", "/", "org", "/", "repo", "/", "branch", ".", "Returns", "path", "to", "local", "copy" ]
python
train
TheHive-Project/Cortex-Analyzers
analyzers/MaxMind/ipaddr.py
https://github.com/TheHive-Project/Cortex-Analyzers/blob/8dae6a8c4cf9af5554ae8c844985c4b44d4bd4bf/analyzers/MaxMind/ipaddr.py#L1609-L1631
def is_reserved(self): """Test if the address is otherwise IETF reserved. Returns: A boolean, True if the address is within one of the reserved IPv6 Network ranges. """ return (self in IPv6Network('::/8') or self in IPv6Network('100::/8') or self in IPv6Network('200::/7') or self in IPv6Network('400::/6') or self in IPv6Network('800::/5') or self in IPv6Network('1000::/4') or self in IPv6Network('4000::/3') or self in IPv6Network('6000::/3') or self in IPv6Network('8000::/3') or self in IPv6Network('A000::/3') or self in IPv6Network('C000::/3') or self in IPv6Network('E000::/4') or self in IPv6Network('F000::/5') or self in IPv6Network('F800::/6') or self in IPv6Network('FE00::/9'))
[ "def", "is_reserved", "(", "self", ")", ":", "return", "(", "self", "in", "IPv6Network", "(", "'::/8'", ")", "or", "self", "in", "IPv6Network", "(", "'100::/8'", ")", "or", "self", "in", "IPv6Network", "(", "'200::/7'", ")", "or", "self", "in", "IPv6Netw...
Test if the address is otherwise IETF reserved. Returns: A boolean, True if the address is within one of the reserved IPv6 Network ranges.
[ "Test", "if", "the", "address", "is", "otherwise", "IETF", "reserved", "." ]
python
train
terrycain/aioboto3
aioboto3/s3/cse.py
https://github.com/terrycain/aioboto3/blob/0fd192175461f7bb192f3ed9a872591caf8474ac/aioboto3/s3/cse.py#L178-L194
async def get_decryption_aes_key(self, key: bytes, material_description: Dict[str, Any]) -> bytes: """ Get decryption key for a given S3 object :param key: Base64 decoded version of x-amz-key :param material_description: JSON decoded x-amz-matdesc :return: Raw AES key bytes """ # So it seems when java just calls Cipher.getInstance('AES') it'll default to AES/ECB/PKCS5Padding aesecb = self._cipher.decryptor() padded_result = await self._loop.run_in_executor(None, lambda: (aesecb.update(key) + aesecb.finalize())) unpadder = PKCS7(AES.block_size).unpadder() result = await self._loop.run_in_executor(None, lambda: (unpadder.update(padded_result) + unpadder.finalize())) return result
[ "async", "def", "get_decryption_aes_key", "(", "self", ",", "key", ":", "bytes", ",", "material_description", ":", "Dict", "[", "str", ",", "Any", "]", ")", "->", "bytes", ":", "# So it seems when java just calls Cipher.getInstance('AES') it'll default to AES/ECB/PKCS5Pad...
Get decryption key for a given S3 object :param key: Base64 decoded version of x-amz-key :param material_description: JSON decoded x-amz-matdesc :return: Raw AES key bytes
[ "Get", "decryption", "key", "for", "a", "given", "S3", "object" ]
python
train
welbornprod/colr
colr/colr.py
https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/colr.py#L1065-L1108
def _iter_text_wave( self, text, numbers, step=1, fore=None, back=None, style=None, rgb_mode=False): """ Yield colorized characters from `text`, using a wave of `numbers`. Arguments: text : String to be colorized. numbers : A list/tuple of numbers (256 colors). step : Number of characters to colorize per color. fore : Fore color to use (name or number). (Back will be gradient) back : Background color to use (name or number). (Fore will be gradient) style : Style name to use. rgb_mode : Use number for rgb value. This should never be used when the numbers are rgb values themselves. """ if fore and back: raise ValueError('Both fore and back colors cannot be specified.') pos = 0 end = len(text) numbergen = self._iter_wave(numbers) def make_color(n): try: r, g, b = n except TypeError: if rgb_mode: return n, n, n return n return r, g, b for value in numbergen: lastchar = pos + step yield self.color( text[pos:lastchar], fore=make_color(value) if fore is None else fore, back=make_color(value) if fore is not None else back, style=style ) if lastchar >= end: numbergen.send(True) pos = lastchar
[ "def", "_iter_text_wave", "(", "self", ",", "text", ",", "numbers", ",", "step", "=", "1", ",", "fore", "=", "None", ",", "back", "=", "None", ",", "style", "=", "None", ",", "rgb_mode", "=", "False", ")", ":", "if", "fore", "and", "back", ":", "...
Yield colorized characters from `text`, using a wave of `numbers`. Arguments: text : String to be colorized. numbers : A list/tuple of numbers (256 colors). step : Number of characters to colorize per color. fore : Fore color to use (name or number). (Back will be gradient) back : Background color to use (name or number). (Fore will be gradient) style : Style name to use. rgb_mode : Use number for rgb value. This should never be used when the numbers are rgb values themselves.
[ "Yield", "colorized", "characters", "from", "text", "using", "a", "wave", "of", "numbers", ".", "Arguments", ":", "text", ":", "String", "to", "be", "colorized", ".", "numbers", ":", "A", "list", "/", "tuple", "of", "numbers", "(", "256", "colors", ")", ...
python
train
BerkeleyAutomation/perception
perception/realsense_sensor.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/realsense_sensor.py#L179-L189
def stop(self): """Stop the sensor. """ # check that everything is running if not self._running: logging.warning('Realsense not running. Aborting stop.') return False self._pipe.stop() self._running = False return True
[ "def", "stop", "(", "self", ")", ":", "# check that everything is running", "if", "not", "self", ".", "_running", ":", "logging", ".", "warning", "(", "'Realsense not running. Aborting stop.'", ")", "return", "False", "self", ".", "_pipe", ".", "stop", "(", ")",...
Stop the sensor.
[ "Stop", "the", "sensor", "." ]
python
train
seequent/properties
properties/basic.py
https://github.com/seequent/properties/blob/096b07012fff86b0a880c8c018320c3b512751b9/properties/basic.py#L34-L53
def accept_kwargs(func): """Wrap a function that may not accept kwargs so they are accepted The output function will always have call signature of :code:`func(val, **kwargs)`, whereas the original function may have call signatures of :code:`func(val)` or :code:`func(val, **kwargs)`. In the case of the former, rather than erroring, kwargs are just ignored. This method is called on serializer/deserializer function; these functions always receive kwargs from serialize, but by using this, the original functions may simply take a single value. """ def wrapped(val, **kwargs): """Perform a function on a value, ignoring kwargs if necessary""" try: return func(val, **kwargs) except TypeError: return func(val) return wrapped
[ "def", "accept_kwargs", "(", "func", ")", ":", "def", "wrapped", "(", "val", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Perform a function on a value, ignoring kwargs if necessary\"\"\"", "try", ":", "return", "func", "(", "val", ",", "*", "*", "kwargs", ")", "...
Wrap a function that may not accept kwargs so they are accepted The output function will always have call signature of :code:`func(val, **kwargs)`, whereas the original function may have call signatures of :code:`func(val)` or :code:`func(val, **kwargs)`. In the case of the former, rather than erroring, kwargs are just ignored. This method is called on serializer/deserializer function; these functions always receive kwargs from serialize, but by using this, the original functions may simply take a single value.
[ "Wrap", "a", "function", "that", "may", "not", "accept", "kwargs", "so", "they", "are", "accepted" ]
python
train
twisted/txaws
txaws/server/call.py
https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/server/call.py#L40-L52
def parse(self, schema, strict=True): """Update C{args} and C{rest}, parsing the raw request arguments. @param schema: The L{Schema} the parameters must be extracted with. @param strict: If C{True} an error is raised if parameters not included in the schema are found, otherwise the extra parameters will be saved in the C{rest} attribute. """ self.args, self.rest = schema.extract(self._raw_params) if strict and self.rest: raise APIError(400, "UnknownParameter", "The parameter %s is not " "recognized" % self.rest.keys()[0])
[ "def", "parse", "(", "self", ",", "schema", ",", "strict", "=", "True", ")", ":", "self", ".", "args", ",", "self", ".", "rest", "=", "schema", ".", "extract", "(", "self", ".", "_raw_params", ")", "if", "strict", "and", "self", ".", "rest", ":", ...
Update C{args} and C{rest}, parsing the raw request arguments. @param schema: The L{Schema} the parameters must be extracted with. @param strict: If C{True} an error is raised if parameters not included in the schema are found, otherwise the extra parameters will be saved in the C{rest} attribute.
[ "Update", "C", "{", "args", "}", "and", "C", "{", "rest", "}", "parsing", "the", "raw", "request", "arguments", "." ]
python
train
Esri/ArcREST
src/arcresthelper/portalautomation.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcresthelper/portalautomation.py#L301-L440
def createGroups(self, configFiles, dateTimeFormat=None): """Parses a JSON configuration file to create groups. Args: configFiles (list): A list of JSON files on disk containing configuration data for creating groups. dateTimeFormat (str): A valid date formatting directive, as understood by :py:meth:`datetime.datetime.strftime`. Defaults to ``None``, i.e., ``'%Y-%m-%d %H:%M'``. """ groupInfo = None groupFile = None iconPath = None startTime = None thumbnail = None result = None config = None sciptPath = None orgTools = None if dateTimeFormat is None: dateTimeFormat = '%Y-%m-%d %H:%M' scriptStartTime = datetime.datetime.now() try: print ("********************Create Groups********************") print ("Script started at %s" % scriptStartTime.strftime(dateTimeFormat)) if self.securityhandler is None or \ self.securityhandler.valid == False: print ("Login required") else: orgTools = orgtools.orgtools(securityinfo=self) if orgTools is None: print ("Error creating orgtools") else: for configFile in configFiles: config = common.init_config_json(config_file=configFile) if config is not None: startTime = datetime.datetime.now() print ("Processing config %s, starting at: %s" % (configFile,startTime.strftime(dateTimeFormat))) groupInfo = config['Groups'] groupFile = groupInfo['GroupInfo'] iconPath = groupInfo['IconPath'] if os.path.isfile(groupFile): with open(groupFile, 'rb') as csvfile: for row in csv.DictReader(csvfile,dialect='excel'): if os.path.isfile(os.path.join(iconPath,row['thumbnail'])): thumbnail = os.path.join(iconPath,row['thumbnail']) if not os.path.isabs(thumbnail): sciptPath = os.getcwd() thumbnail = os.path.join(sciptPath,thumbnail) result = orgTools.createGroup(title=row['title'],description=row['description'],tags=row['tags'],snippet=row['snippet'],phone=row['phone'],access=row['access'],sortField=row['sortField'],sortOrder=row['sortOrder'], \ isViewOnly=row['isViewOnly'],isInvitationOnly=row['isInvitationOnly'],thumbnail=thumbnail) else: result = orgTools.createGroup(title=row['title'],description=row['description'],tags=row['tags'],snippet=row['snippet'],phone=row['phone'],access=row['access'],sortField=row['sortField'],sortOrder=row['sortOrder'], \ isViewOnly=row['isViewOnly'],isInvitationOnly=row['isInvitationOnly']) if result is None: pass else: print ("Group created: " + result.title) print ("Config %s completed, time to complete: %s" % (configFile, str(datetime.datetime.now() - startTime))) else: print ("Config %s not found" % configFile) except(TypeError,ValueError,AttributeError) as e: print (e) except (common.ArcRestHelperError) as e: print ("error in function: %s" % e[0]['function']) print ("error on line: %s" % e[0]['line']) print ("error in file name: %s" % e[0]['filename']) print ("with error message: %s" % e[0]['synerror']) if 'arcpyError' in e[0]: print ("with arcpy message: %s" % e[0]['arcpyError']) except Exception as e: if (reportToolsInstalled): if isinstance(e,(ReportTools.ReportToolsError,DataPrep.DataPrepError)): print ("error in function: %s" % e[0]['function']) print ("error on line: %s" % e[0]['line']) print ("error in file name: %s" % e[0]['filename']) print ("with error message: %s" % e[0]['synerror']) if 'arcpyError' in e[0]: print ("with arcpy message: %s" % e[0]['arcpyError']) else: line, filename, synerror = trace() print ("error on line: %s" % line) print ("error in file name: %s" % filename) print ("with error message: %s" % synerror) else: line, filename, synerror = trace() print ("error on line: %s" % line) print ("error in file name: %s" % filename) print ("with error message: %s" % synerror) finally: print ("Script complete, time to complete: %s" % str(datetime.datetime.now() - scriptStartTime)) print ("###############Create Groups Completed#################") print ("") #if orgTools is not None: #orgTools.dispose() groupInfo = None groupFile = None iconPath = None startTime = None thumbnail = None result = None config = None sciptPath = None orgTools = None del groupInfo del groupFile del iconPath del startTime del thumbnail del result del config del sciptPath del orgTools gc.collect()
[ "def", "createGroups", "(", "self", ",", "configFiles", ",", "dateTimeFormat", "=", "None", ")", ":", "groupInfo", "=", "None", "groupFile", "=", "None", "iconPath", "=", "None", "startTime", "=", "None", "thumbnail", "=", "None", "result", "=", "None", "c...
Parses a JSON configuration file to create groups. Args: configFiles (list): A list of JSON files on disk containing configuration data for creating groups. dateTimeFormat (str): A valid date formatting directive, as understood by :py:meth:`datetime.datetime.strftime`. Defaults to ``None``, i.e., ``'%Y-%m-%d %H:%M'``.
[ "Parses", "a", "JSON", "configuration", "file", "to", "create", "groups", "." ]
python
train
LogicalDash/LiSE
allegedb/allegedb/query.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/allegedb/allegedb/query.py#L282-L293
def graph_val_dump(self): """Yield the entire contents of the graph_val table.""" self._flush_graph_val() for (graph, key, branch, turn, tick, value) in self.sql('graph_val_dump'): yield ( self.unpack(graph), self.unpack(key), branch, turn, tick, self.unpack(value) )
[ "def", "graph_val_dump", "(", "self", ")", ":", "self", ".", "_flush_graph_val", "(", ")", "for", "(", "graph", ",", "key", ",", "branch", ",", "turn", ",", "tick", ",", "value", ")", "in", "self", ".", "sql", "(", "'graph_val_dump'", ")", ":", "yiel...
Yield the entire contents of the graph_val table.
[ "Yield", "the", "entire", "contents", "of", "the", "graph_val", "table", "." ]
python
train
tanghaibao/jcvi
jcvi/formats/gff.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L2669-L2850
def load(args): ''' %prog load gff_file fasta_file [--options] Parses the selected features out of GFF, with subfeatures concatenated. For example, to get the CDS sequences, do this: $ %prog load athaliana.gff athaliana.fa --parents mRNA --children CDS To get 500bp upstream of a genes Transcription Start Site (TSS), do this: $ %prog load athaliana.gff athaliana.fa --feature=upstream:TSS:500 Switch TSS with TrSS for Translation Start Site. ''' from datetime import datetime as dt from jcvi.formats.fasta import Seq, SeqRecord # can request output fasta sequence id to be picked from following attributes valid_id_attributes = ["ID", "Name", "Parent", "Alias", "Target"] p = OptionParser(load.__doc__) p.add_option("--parents", dest="parents", default="mRNA", help="list of features to extract, use comma to separate (e.g." + \ "'gene,mRNA') [default: %default]") p.add_option("--children", dest="children", default="CDS", help="list of features to extract, use comma to separate (e.g." + \ "'five_prime_UTR,CDS,three_prime_UTR') [default: %default]") p.add_option("--feature", dest="feature", help="feature type to extract. e.g. `--feature=CDS` or " + \ "`--feature=upstream:TSS:500` [default: %default]") p.add_option("--id_attribute", choices=valid_id_attributes, help="The attribute field to extract and use as FASTA sequence ID " + \ "[default: %default]") p.add_option("--desc_attribute", default="Note", help="The attribute field to extract and use as FASTA sequence " + \ "description [default: %default]") p.add_option("--full_header", default=None, choices=["default", "tair"], help="Specify if full FASTA header (with seqid, coordinates and datestamp)" + \ " should be generated [default: %default]") g1 = OptionGroup(p, "Optional parameters (if generating full header)") g1.add_option("--sep", dest="sep", default=" ", \ help="Specify separator used to delimiter header elements [default: \"%default\"]") g1.add_option("--datestamp", dest="datestamp", \ help="Specify a datestamp in the format YYYYMMDD or automatically pick `today`" + \ " [default: %default]") g1.add_option("--conf_class", dest="conf_class", default=False, action="store_true", help="Specify if `conf_class` attribute should be parsed and placed in the header" + \ " [default: %default]") p.add_option_group(g1) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(p.print_help()) gff_file, fasta_file = args if opts.feature: opts.feature, opts.parent, opts.children, upstream_site, upstream_len, \ flag, error_msg = parse_feature_param(opts.feature) if flag: sys.exit(error_msg) parents = set(opts.parents.split(',')) children_list = set(opts.children.split(',')) """ In a situation where we want to extract sequence for only the top-level parent feature, specify feature type of parent == child """ skipChildren = True if len(parents.symmetric_difference(children_list)) == 0 \ else False id_attr = opts.id_attribute desc_attr = opts.desc_attribute sep = opts.sep import gffutils g = make_index(gff_file) f = Fasta(fasta_file, index=False) seqlen = {} for seqid, size in f.itersizes(): seqlen[seqid] = size fw = must_open(opts.outfile, "w") for feat in get_parents(gff_file, parents): desc = "" if desc_attr: fparent = feat.attributes['Parent'][0] \ if 'Parent' in feat.attributes else None if fparent: try: g_fparent = g[fparent] except gffutils.exceptions.FeatureNotFoundError: logging.error("{} not found in index .. skipped".format(fparent)) continue if desc_attr in g_fparent.attributes: desc = ",".join(g_fparent.attributes[desc_attr]) elif desc_attr in feat.attributes: desc = ",".join(feat.attributes[desc_attr]) if opts.full_header: desc_parts = [] desc_parts.append(desc) if opts.conf_class and 'conf_class' in feat.attributes: desc_parts.append(feat.attributes['conf_class'][0]) if opts.full_header == "tair": orient = "REVERSE" if feat.strand == "-" else "FORWARD" feat_coords = "{0}:{1}-{2} {3} LENGTH=[LEN]".format(feat.seqid, \ feat.start, feat.end, orient) else: (s, e) = (feat.start, feat.end) if (feat.strand == "+") \ else (feat.end, feat.start) feat_coords = "{0}:{1}-{2}".format(feat.seqid, s, e) desc_parts.append(feat_coords) datestamp = opts.datestamp if opts.datestamp else \ "{0}{1}{2}".format(dt.now().year, dt.now().month, dt.now().day) desc_parts.append(datestamp) desc = sep.join(str(x) for x in desc_parts) desc = "".join(str(x) for x in (sep, desc)).strip() if opts.feature == "upstream": upstream_start, upstream_stop = get_upstream_coords(upstream_site, upstream_len, \ seqlen[feat.seqid], feat, children_list, g) if not upstream_start or not upstream_stop: continue feat_seq = f.sequence(dict(chr=feat.seqid, start=upstream_start, stop=upstream_stop, strand=feat.strand)) (s, e) = (upstream_start, upstream_stop) \ if feat.strand == "+" else \ (upstream_stop, upstream_start) upstream_seq_loc = str(feat.seqid) + ":" + str(s) + "-" + str(e) desc = sep.join(str(x) for x in (desc, upstream_seq_loc, \ "FLANKLEN=" + str(upstream_len))) else: children = [] if not skipChildren: for c in g.children(feat.id, 1): if c.featuretype not in children_list: continue child = f.sequence(dict(chr=c.chrom, start=c.start, stop=c.stop, strand=c.strand)) children.append((child, c)) if not children: print("[warning] %s has no children with type %s" \ % (feat.id, ','.join(children_list)), file=sys.stderr) continue else: child = f.sequence(dict(chr=feat.seqid, start=feat.start, stop=feat.end, strand=feat.strand)) children.append((child, feat)) # sort children in incremental position children.sort(key=lambda x: x[1].start) # reverse children if negative strand if feat.strand == '-': children.reverse() feat_seq = ''.join(x[0] for x in children) desc = desc.replace("\"", "") id = ",".join(feat.attributes[id_attr]) if id_attr \ and feat.attributes[id_attr] else \ feat.id if opts.full_header == "tair": desc = desc.replace("[LEN]", str(len(feat_seq))) rec = SeqRecord(Seq(feat_seq), id=id, description=desc) SeqIO.write([rec], fw, "fasta") fw.flush()
[ "def", "load", "(", "args", ")", ":", "from", "datetime", "import", "datetime", "as", "dt", "from", "jcvi", ".", "formats", ".", "fasta", "import", "Seq", ",", "SeqRecord", "# can request output fasta sequence id to be picked from following attributes", "valid_id_attrib...
%prog load gff_file fasta_file [--options] Parses the selected features out of GFF, with subfeatures concatenated. For example, to get the CDS sequences, do this: $ %prog load athaliana.gff athaliana.fa --parents mRNA --children CDS To get 500bp upstream of a genes Transcription Start Site (TSS), do this: $ %prog load athaliana.gff athaliana.fa --feature=upstream:TSS:500 Switch TSS with TrSS for Translation Start Site.
[ "%prog", "load", "gff_file", "fasta_file", "[", "--", "options", "]" ]
python
train
hugapi/hug
hug/middleware.py
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/hug/middleware.py#L99-L101
def process_request(self, request, response): """Logs the basic endpoint requested""" self.logger.info('Requested: {0} {1} {2}'.format(request.method, request.relative_uri, request.content_type))
[ "def", "process_request", "(", "self", ",", "request", ",", "response", ")", ":", "self", ".", "logger", ".", "info", "(", "'Requested: {0} {1} {2}'", ".", "format", "(", "request", ".", "method", ",", "request", ".", "relative_uri", ",", "request", ".", "...
Logs the basic endpoint requested
[ "Logs", "the", "basic", "endpoint", "requested" ]
python
train
ToucanToco/toucan-data-sdk
toucan_data_sdk/utils/postprocess/filter_by_date.py
https://github.com/ToucanToco/toucan-data-sdk/blob/c3ca874e1b64f4bdcc2edda750a72d45d1561d8a/toucan_data_sdk/utils/postprocess/filter_by_date.py#L95-L112
def add_years(dateobj, nb_years): """return `dateobj` + `nb_years` If landing date doesn't exist (e.g. february, 30th), return the last day of the landing month. >>> add_years(date(2018, 1, 1), 1) datetime.date(2019, 1, 1) >>> add_years(date(2018, 1, 1), -1) datetime.date(2017, 1, 1) >>> add_years(date(2020, 2, 29), 1) datetime.date(2021, 2, 28) >>> add_years(date(2020, 2, 29), -1) datetime.date(2019, 2, 28) """ year = dateobj.year + nb_years lastday = monthrange(year, dateobj.month)[1] return dateobj.replace(year=year, day=min(lastday, dateobj.day))
[ "def", "add_years", "(", "dateobj", ",", "nb_years", ")", ":", "year", "=", "dateobj", ".", "year", "+", "nb_years", "lastday", "=", "monthrange", "(", "year", ",", "dateobj", ".", "month", ")", "[", "1", "]", "return", "dateobj", ".", "replace", "(", ...
return `dateobj` + `nb_years` If landing date doesn't exist (e.g. february, 30th), return the last day of the landing month. >>> add_years(date(2018, 1, 1), 1) datetime.date(2019, 1, 1) >>> add_years(date(2018, 1, 1), -1) datetime.date(2017, 1, 1) >>> add_years(date(2020, 2, 29), 1) datetime.date(2021, 2, 28) >>> add_years(date(2020, 2, 29), -1) datetime.date(2019, 2, 28)
[ "return", "dateobj", "+", "nb_years" ]
python
test
saltstack/salt
salt/cloud/clouds/saltify.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/saltify.py#L203-L210
def show_instance(name, call=None): ''' List the a single node, return dict of grains. ''' local = salt.client.LocalClient() ret = local.cmd(name, 'grains.items') ret.update(_build_required_items(ret)) return ret
[ "def", "show_instance", "(", "name", ",", "call", "=", "None", ")", ":", "local", "=", "salt", ".", "client", ".", "LocalClient", "(", ")", "ret", "=", "local", ".", "cmd", "(", "name", ",", "'grains.items'", ")", "ret", ".", "update", "(", "_build_r...
List the a single node, return dict of grains.
[ "List", "the", "a", "single", "node", "return", "dict", "of", "grains", "." ]
python
train
lsst-sqre/lsst-projectmeta-kit
lsstprojectmeta/tex/lsstdoc.py
https://github.com/lsst-sqre/lsst-projectmeta-kit/blob/ac8d4ff65bb93d8fdeb1b46ae6eb5d7414f1ae14/lsstprojectmeta/tex/lsstdoc.py#L282-L315
def format_title(self, format='html5', deparagraph=True, mathjax=False, smart=True, extra_args=None): """Get the document title in the specified markup format. Parameters ---------- format : `str`, optional Output format (such as ``'html5'`` or ``'plain'``). deparagraph : `bool`, optional Remove the paragraph tags from single paragraph content. mathjax : `bool`, optional Allow pandoc to use MathJax math markup. smart : `True`, optional Allow pandoc to create "smart" unicode punctuation. extra_args : `list`, optional Additional command line flags to pass to Pandoc. See `lsstprojectmeta.pandoc.convert.convert_text`. Returns ------- output_text : `str` Converted content or `None` if the title is not available in the document. """ if self.title is None: return None output_text = convert_lsstdoc_tex( self.title, format, deparagraph=deparagraph, mathjax=mathjax, smart=smart, extra_args=extra_args) return output_text
[ "def", "format_title", "(", "self", ",", "format", "=", "'html5'", ",", "deparagraph", "=", "True", ",", "mathjax", "=", "False", ",", "smart", "=", "True", ",", "extra_args", "=", "None", ")", ":", "if", "self", ".", "title", "is", "None", ":", "ret...
Get the document title in the specified markup format. Parameters ---------- format : `str`, optional Output format (such as ``'html5'`` or ``'plain'``). deparagraph : `bool`, optional Remove the paragraph tags from single paragraph content. mathjax : `bool`, optional Allow pandoc to use MathJax math markup. smart : `True`, optional Allow pandoc to create "smart" unicode punctuation. extra_args : `list`, optional Additional command line flags to pass to Pandoc. See `lsstprojectmeta.pandoc.convert.convert_text`. Returns ------- output_text : `str` Converted content or `None` if the title is not available in the document.
[ "Get", "the", "document", "title", "in", "the", "specified", "markup", "format", "." ]
python
valid
SecurityInnovation/PGPy
pgpy/pgp.py
https://github.com/SecurityInnovation/PGPy/blob/f1c3d68e32c334f5aa14c34580925e97f17f4fde/pgpy/pgp.py#L1539-L1568
def protect(self, passphrase, enc_alg, hash_alg): """ Add a passphrase to a private key. If the key is already passphrase protected, it should be unlocked before a new passphrase can be specified. Has no effect on public keys. :param passphrase: A passphrase to protect the key with :type passphrase: ``str``, ``unicode`` :param enc_alg: Symmetric encryption algorithm to use to protect the key :type enc_alg: :py:obj:`~constants.SymmetricKeyAlgorithm` :param hash_alg: Hash algorithm to use in the String-to-Key specifier :type hash_alg: :py:obj:`~constants.HashAlgorithm` """ ##TODO: specify strong defaults for enc_alg and hash_alg if self.is_public: # we can't protect public keys because only private key material is ever protected warnings.warn("Public keys cannot be passphrase-protected", stacklevel=2) return if self.is_protected and not self.is_unlocked: # we can't protect a key that is already protected unless it is unlocked first warnings.warn("This key is already protected with a passphrase - " "please unlock it before attempting to specify a new passphrase", stacklevel=2) return for sk in itertools.chain([self], self.subkeys.values()): sk._key.protect(passphrase, enc_alg, hash_alg) del passphrase
[ "def", "protect", "(", "self", ",", "passphrase", ",", "enc_alg", ",", "hash_alg", ")", ":", "##TODO: specify strong defaults for enc_alg and hash_alg", "if", "self", ".", "is_public", ":", "# we can't protect public keys because only private key material is ever protected", "w...
Add a passphrase to a private key. If the key is already passphrase protected, it should be unlocked before a new passphrase can be specified. Has no effect on public keys. :param passphrase: A passphrase to protect the key with :type passphrase: ``str``, ``unicode`` :param enc_alg: Symmetric encryption algorithm to use to protect the key :type enc_alg: :py:obj:`~constants.SymmetricKeyAlgorithm` :param hash_alg: Hash algorithm to use in the String-to-Key specifier :type hash_alg: :py:obj:`~constants.HashAlgorithm`
[ "Add", "a", "passphrase", "to", "a", "private", "key", ".", "If", "the", "key", "is", "already", "passphrase", "protected", "it", "should", "be", "unlocked", "before", "a", "new", "passphrase", "can", "be", "specified", "." ]
python
train
FutunnOpen/futuquant
futuquant/examples/TinyQuant/TinyQuantBase.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/examples/TinyQuant/TinyQuantBase.py#L138-L143
def cci(self, n, array=False): """CCI指标""" result = talib.CCI(self.high, self.low, self.close, n) if array: return result return result[-1]
[ "def", "cci", "(", "self", ",", "n", ",", "array", "=", "False", ")", ":", "result", "=", "talib", ".", "CCI", "(", "self", ".", "high", ",", "self", ".", "low", ",", "self", ".", "close", ",", "n", ")", "if", "array", ":", "return", "result", ...
CCI指标
[ "CCI指标" ]
python
train
CityOfZion/neo-python
neo/Wallets/Wallet.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Wallets/Wallet.py#L1184-L1215
def Sign(self, context): """ Sign the verifiable items ( Transaction, Block, etc ) in the context with the Keypairs in this wallet. Args: context (ContractParameterContext): the context to sign. Returns: bool: if signing is successful for all contracts in this wallet. """ success = False for hash in context.ScriptHashes: contract = self.GetContract(hash) if contract is None: logger.info( f"Cannot find key belonging to script_hash {hash}. Make sure the source address you're trying to sign the transaction for is imported in the wallet.") continue key = self.GetKeyByScriptHash(hash) if key is None: continue signature = Helper.Sign(context.Verifiable, key) res = context.AddSignature(contract, key.PublicKey, signature) success |= res return success
[ "def", "Sign", "(", "self", ",", "context", ")", ":", "success", "=", "False", "for", "hash", "in", "context", ".", "ScriptHashes", ":", "contract", "=", "self", ".", "GetContract", "(", "hash", ")", "if", "contract", "is", "None", ":", "logger", ".", ...
Sign the verifiable items ( Transaction, Block, etc ) in the context with the Keypairs in this wallet. Args: context (ContractParameterContext): the context to sign. Returns: bool: if signing is successful for all contracts in this wallet.
[ "Sign", "the", "verifiable", "items", "(", "Transaction", "Block", "etc", ")", "in", "the", "context", "with", "the", "Keypairs", "in", "this", "wallet", "." ]
python
train
MaT1g3R/option
option/option_.py
https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/option_.py#L240-L258
def map(self, callback: Callable[[T], U]) -> 'Option[U]': """ Applies the ``callback`` with the contained value as its argument or returns :py:data:`NONE`. Args: callback: The callback to apply to the contained value. Returns: The ``callback`` result wrapped in an :class:`Option` if the contained value is ``Some``, otherwise :py:data:`NONE` Examples: >>> Some(10).map(lambda x: x * x) Some(100) >>> NONE.map(lambda x: x * x) NONE """ return self._type.Some(callback(self._val)) if self._is_some else cast('Option[U]', NONE)
[ "def", "map", "(", "self", ",", "callback", ":", "Callable", "[", "[", "T", "]", ",", "U", "]", ")", "->", "'Option[U]'", ":", "return", "self", ".", "_type", ".", "Some", "(", "callback", "(", "self", ".", "_val", ")", ")", "if", "self", ".", ...
Applies the ``callback`` with the contained value as its argument or returns :py:data:`NONE`. Args: callback: The callback to apply to the contained value. Returns: The ``callback`` result wrapped in an :class:`Option` if the contained value is ``Some``, otherwise :py:data:`NONE` Examples: >>> Some(10).map(lambda x: x * x) Some(100) >>> NONE.map(lambda x: x * x) NONE
[ "Applies", "the", "callback", "with", "the", "contained", "value", "as", "its", "argument", "or", "returns", ":", "py", ":", "data", ":", "NONE", "." ]
python
train
cuihantao/andes
andes/variables/dae.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/variables/dae.py#L624-L644
def set_jac(self, m, val, row, col): """ Set the values at (row, col) to val in Jacobian m :param m: Jacobian name :param val: values to set :param row: row indices :param col: col indices :return: None """ assert m in ('Fx', 'Fy', 'Gx', 'Gy', 'Fx0', 'Fy0', 'Gx0', 'Gy0'), \ 'Wrong Jacobian matrix name <{0}>'.format(m) if isinstance(val, (int, float)) and isinstance(row, (np.ndarray, matrix, list)): val = val * ones(len(row), 1) self._set[m]['I'] = matrix([self._set[m]['I'], matrix(row)]) self._set[m]['J'] = matrix([self._set[m]['J'], matrix(col)]) self._set[m]['V'] = matrix([self._set[m]['V'], matrix(val)])
[ "def", "set_jac", "(", "self", ",", "m", ",", "val", ",", "row", ",", "col", ")", ":", "assert", "m", "in", "(", "'Fx'", ",", "'Fy'", ",", "'Gx'", ",", "'Gy'", ",", "'Fx0'", ",", "'Fy0'", ",", "'Gx0'", ",", "'Gy0'", ")", ",", "'Wrong Jacobian mat...
Set the values at (row, col) to val in Jacobian m :param m: Jacobian name :param val: values to set :param row: row indices :param col: col indices :return: None
[ "Set", "the", "values", "at", "(", "row", "col", ")", "to", "val", "in", "Jacobian", "m" ]
python
train
bukun/TorCMS
torcms/core/privilege.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/core/privilege.py#L23-L54
def auth_view(method): ''' role for view. ''' def wrapper(self, *args, **kwargs): ''' wrapper. ''' if ROLE_CFG['view'] == '': return method(self, *args, **kwargs) elif self.current_user: if is_prived(self.userinfo.role, ROLE_CFG['view']): return method(self, *args, **kwargs) else: kwd = { 'info': 'No role', } self.render('misc/html/404.html', kwd=kwd, userinfo=self.userinfo) else: kwd = { 'info': 'No role', } self.render('misc/html/404.html', kwd=kwd, userinfo=self.userinfo) return wrapper
[ "def", "auth_view", "(", "method", ")", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "'''\n wrapper.\n '''", "if", "ROLE_CFG", "[", "'view'", "]", "==", "''", ":", "return", "method", "(", "self",...
role for view.
[ "role", "for", "view", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/provenance/system.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/provenance/system.py#L101-L119
def _torque_queue_nodes(queue): """Retrieve the nodes available for a queue. Parses out nodes from `acl_hosts` in qstat -Qf and extracts the initial names of nodes used in pbsnodes. """ qstat_out = subprocess.check_output(["qstat", "-Qf", queue]).decode() hosts = [] in_hosts = False for line in qstat_out.split("\n"): if line.strip().startswith("acl_hosts = "): hosts.extend(line.replace("acl_hosts = ", "").strip().split(",")) in_hosts = True elif in_hosts: if line.find(" = ") > 0: break else: hosts.extend(line.strip().split(",")) return tuple([h.split(".")[0].strip() for h in hosts if h.strip()])
[ "def", "_torque_queue_nodes", "(", "queue", ")", ":", "qstat_out", "=", "subprocess", ".", "check_output", "(", "[", "\"qstat\"", ",", "\"-Qf\"", ",", "queue", "]", ")", ".", "decode", "(", ")", "hosts", "=", "[", "]", "in_hosts", "=", "False", "for", ...
Retrieve the nodes available for a queue. Parses out nodes from `acl_hosts` in qstat -Qf and extracts the initial names of nodes used in pbsnodes.
[ "Retrieve", "the", "nodes", "available", "for", "a", "queue", "." ]
python
train
gofed/gofedlib
gofedlib/snapshot/capturer.py
https://github.com/gofed/gofedlib/blob/0674c248fe3d8706f98f912996b65af469f96b10/gofedlib/snapshot/capturer.py#L25-L51
def capture(self, commit = ""): """Capture the current state of a project based on its provider Commit is relevant only for upstream providers. If empty, the latest commit from provider repository is taken. It is ignored for distribution providers. :param provider: project provider, e.g. upstream repository, distribution builder :type provider: json/dict :param commit: project's original commit :type commit: string """ self._validateProvider(self._provider) # get client for repository # TODO(jchaloup): read config file to switch between local and remove clients # TODO(jchaloup): remote client can cover gofed infratructure or any remove source for repository info client = RepositoryClientBuilder().buildWithRemoteClient(self._provider) if self._provider["provider"] == "github": self._signature = ProjectGithubRepositoryCapturer(self._provider, client).capture(commit).signature() elif self._provider["provider"] == "bitbucket": self._signature = ProjectBitbucketRepositoryCapturer(self._provider, client).capture(commit).signature() else: raise KeyError("Provider '%s' not recognized" % self._provider["provider"]) return self
[ "def", "capture", "(", "self", ",", "commit", "=", "\"\"", ")", ":", "self", ".", "_validateProvider", "(", "self", ".", "_provider", ")", "# get client for repository", "# TODO(jchaloup): read config file to switch between local and remove clients", "# TODO(jchaloup): remote...
Capture the current state of a project based on its provider Commit is relevant only for upstream providers. If empty, the latest commit from provider repository is taken. It is ignored for distribution providers. :param provider: project provider, e.g. upstream repository, distribution builder :type provider: json/dict :param commit: project's original commit :type commit: string
[ "Capture", "the", "current", "state", "of", "a", "project", "based", "on", "its", "provider" ]
python
train
FNNDSC/pfurl
pfurl/pfurl.py
https://github.com/FNNDSC/pfurl/blob/572f634ab582b7b7b7a3fbfd5bf12aadc1ba7958/pfurl/pfurl.py#L266-L330
def man(self, **kwargs): """ Print some man for each understood command """ str_man = 'commands' str_amount = 'full' for k, v in kwargs.items(): if k == 'on': str_man = v if k == 'amount': str_amount = v if str_man == 'commands': str_commands = """ This script/module provides CURL-based GET/PUT/POST communication over http to a remote REST-like service: """ + Colors.GREEN + """ ./pfurl.py [--auth <username:passwd>] [--verb <GET/POST>] \\ --http <IP>[:<port>]</some/path/> """ + Colors.WHITE + """ Where --auth is an optional authorization to pass to the REST API, --verb denotes the REST verb to use and --http specifies the REST URL. Additionally, a 'message' described in JSON syntax can be pushed to the remote service, in the following syntax: """ + Colors.GREEN + """ pfurl [--auth <username:passwd>] [--verb <GET/POST>] \\ --http <IP>[:<port>]</some/path/> \\ [--msg <JSON-formatted-string>] """ + Colors.WHITE + """ In the case of the 'pman' system this --msg flag has very specific contextual syntax, for example: """ + Colors.GREEN + """ pfurl --verb POST --http %s:%s/api/v1/cmd/ --msg \\ '{ "action": "run", "meta": { "cmd": "cal 7 1970", "auid": "rudolphpienaar", "jid": "<jid>-1", "threaded": true } }' """ % (self.str_ip, self.str_port) + Colors.CYAN + """ The following specific action directives are directly handled by script: """ + "\n" + \ self.man_pushPath( description = "short") + "\n" + \ self.man_pullPath( description = "short") + "\n" + \ Colors.YELLOW + \ """ To get detailed help on any of the above commands, type """ + Colors.LIGHT_CYAN + \ """ ./pfurl.py --man <command> """ return str_commands if str_man == 'pushPath': return self.man_pushPath( description = str_amount) if str_man == 'pullPath': return self.man_pullPath( description = str_amount)
[ "def", "man", "(", "self", ",", "*", "*", "kwargs", ")", ":", "str_man", "=", "'commands'", "str_amount", "=", "'full'", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "if", "k", "==", "'on'", ":", "str_man", "=", "v", "if", ...
Print some man for each understood command
[ "Print", "some", "man", "for", "each", "understood", "command" ]
python
train
sunlightlabs/django-mediasync
mediasync/templatetags/media.py
https://github.com/sunlightlabs/django-mediasync/blob/aa8ce4cfff757bbdb488463c64c0863cca6a1932/mediasync/templatetags/media.py#L154-L178
def css_tag(parser, token): """ Renders a tag to include the stylesheet. It takes an optional second parameter for the media attribute; the default media is "screen, projector". Usage:: {% css "<somefile>.css" ["<projection type(s)>"] %} Examples:: {% css "myfile.css" %} {% css "myfile.css" "screen, projection"%} """ path = get_path_from_tokens(token) tokens = token.split_contents() if len(tokens) > 2: # Get the media types from the tag call provided by the user. media_type = tokens[2][1:-1] else: # Default values. media_type = "screen, projection" return CssTagNode(path, media_type=media_type)
[ "def", "css_tag", "(", "parser", ",", "token", ")", ":", "path", "=", "get_path_from_tokens", "(", "token", ")", "tokens", "=", "token", ".", "split_contents", "(", ")", "if", "len", "(", "tokens", ")", ">", "2", ":", "# Get the media types from the tag call...
Renders a tag to include the stylesheet. It takes an optional second parameter for the media attribute; the default media is "screen, projector". Usage:: {% css "<somefile>.css" ["<projection type(s)>"] %} Examples:: {% css "myfile.css" %} {% css "myfile.css" "screen, projection"%}
[ "Renders", "a", "tag", "to", "include", "the", "stylesheet", ".", "It", "takes", "an", "optional", "second", "parameter", "for", "the", "media", "attribute", ";", "the", "default", "media", "is", "screen", "projector", ".", "Usage", "::" ]
python
train
hazelcast/hazelcast-python-client
hazelcast/proxy/multi_map.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/multi_map.py#L19-L53
def add_entry_listener(self, include_value=False, key=None, added_func=None, removed_func=None, clear_all_func=None): """ Adds an entry listener for this multimap. The listener will be notified for all multimap add/remove/clear-all events. :param include_value: (bool), whether received event should include the value or not (optional). :param key: (object), key for filtering the events (optional). :param added_func: Function to be called when an entry is added to map (optional). :param removed_func: Function to be called when an entry is removed_func from map (optional). :param clear_all_func: Function to be called when entries are cleared from map (optional). :return: (str), a registration id which is used as a key to remove the listener. """ if key: key_data = self._to_data(key) request = multi_map_add_entry_listener_to_key_codec.encode_request(name=self.name, key=key_data, include_value=include_value, local_only=False) else: request = multi_map_add_entry_listener_codec.encode_request(name=self.name, include_value=include_value, local_only=False) def handle_event_entry(**_kwargs): event = EntryEvent(self._to_object, **_kwargs) if event.event_type == EntryEventType.added and added_func: added_func(event) elif event.event_type == EntryEventType.removed and removed_func: removed_func(event) elif event.event_type == EntryEventType.clear_all and clear_all_func: clear_all_func(event) return self._start_listening(request, lambda m: multi_map_add_entry_listener_codec.handle(m, handle_event_entry), lambda r: multi_map_add_entry_listener_codec.decode_response(r)[ 'response'])
[ "def", "add_entry_listener", "(", "self", ",", "include_value", "=", "False", ",", "key", "=", "None", ",", "added_func", "=", "None", ",", "removed_func", "=", "None", ",", "clear_all_func", "=", "None", ")", ":", "if", "key", ":", "key_data", "=", "sel...
Adds an entry listener for this multimap. The listener will be notified for all multimap add/remove/clear-all events. :param include_value: (bool), whether received event should include the value or not (optional). :param key: (object), key for filtering the events (optional). :param added_func: Function to be called when an entry is added to map (optional). :param removed_func: Function to be called when an entry is removed_func from map (optional). :param clear_all_func: Function to be called when entries are cleared from map (optional). :return: (str), a registration id which is used as a key to remove the listener.
[ "Adds", "an", "entry", "listener", "for", "this", "multimap", ".", "The", "listener", "will", "be", "notified", "for", "all", "multimap", "add", "/", "remove", "/", "clear", "-", "all", "events", "." ]
python
train
amperser/proselint
proselint/checks/dates_times/dates.py
https://github.com/amperser/proselint/blob/cb619ee4023cc7856f5fb96aec2a33a2c9f1a2e2/proselint/checks/dates_times/dates.py#L21-L29
def check_decade_apostrophes_short(text): """Check the text for dates of the form X0's.""" err = "dates_times.dates" msg = u"Apostrophes aren't needed for decades." regex = "\d0\'s" return existence_check( text, [regex], err, msg, excluded_topics=["50 Cent"])
[ "def", "check_decade_apostrophes_short", "(", "text", ")", ":", "err", "=", "\"dates_times.dates\"", "msg", "=", "u\"Apostrophes aren't needed for decades.\"", "regex", "=", "\"\\d0\\'s\"", "return", "existence_check", "(", "text", ",", "[", "regex", "]", ",", "err", ...
Check the text for dates of the form X0's.
[ "Check", "the", "text", "for", "dates", "of", "the", "form", "X0", "s", "." ]
python
train
timofurrer/colorful
colorful/colors.py
https://github.com/timofurrer/colorful/blob/919fa6da17865cc5e01e6b16119193a97d180dc9/colorful/colors.py#L18-L30
def parse_colors(path): """Parse the given color files. Supported are: * .txt for X11 colors * .json for colornames """ if path.endswith(".txt"): return parse_rgb_txt_file(path) elif path.endswith(".json"): return parse_json_color_file(path) raise TypeError("colorful only supports .txt and .json files for colors")
[ "def", "parse_colors", "(", "path", ")", ":", "if", "path", ".", "endswith", "(", "\".txt\"", ")", ":", "return", "parse_rgb_txt_file", "(", "path", ")", "elif", "path", ".", "endswith", "(", "\".json\"", ")", ":", "return", "parse_json_color_file", "(", "...
Parse the given color files. Supported are: * .txt for X11 colors * .json for colornames
[ "Parse", "the", "given", "color", "files", "." ]
python
valid
rvswift/EB
EB/builder/utilities/classification.py
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/utilities/classification.py#L383-L412
def make_score_structure(molecules, ensemble): """ puts data in the score_structure format for subsequent processing :param molecules: list [mol_object_1, mol_object_2, .... ] mol_objects are instances of common_tools.molecules :return score_structure: list [(id, best_score, best_query, status, net decoy count, net active count), ..., ] """ # sort molecules by their ensemble score #sort_order = get_sort_order(molecules) sort_order = 'asc' sorted_molecules = screener.screener(molecules, ensemble, sort_order) # initiate variables score_structure = [] net_active_count = 0 net_decoy_count = 0 for mol in sorted_molecules: # determine net active count & net decoy count status = mol.GetProp('status') if status == '1': net_active_count += 1 elif status == '0': net_decoy_count += 1 else: continue score_structure.append((mol.GetProp('id'), mol.GetProp('best_score'), mol.GetProp('best_query'), status, net_decoy_count, net_active_count)) return score_structure
[ "def", "make_score_structure", "(", "molecules", ",", "ensemble", ")", ":", "# sort molecules by their ensemble score", "#sort_order = get_sort_order(molecules)", "sort_order", "=", "'asc'", "sorted_molecules", "=", "screener", ".", "screener", "(", "molecules", ",", "ensem...
puts data in the score_structure format for subsequent processing :param molecules: list [mol_object_1, mol_object_2, .... ] mol_objects are instances of common_tools.molecules :return score_structure: list [(id, best_score, best_query, status, net decoy count, net active count), ..., ]
[ "puts", "data", "in", "the", "score_structure", "format", "for", "subsequent", "processing", ":", "param", "molecules", ":", "list", "[", "mol_object_1", "mol_object_2", "....", "]", "mol_objects", "are", "instances", "of", "common_tools", ".", "molecules", ":", ...
python
train
open-mmlab/mmcv
mmcv/video/optflow.py
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/video/optflow.py#L90-L115
def quantize_flow(flow, max_val=0.02, norm=True): """Quantize flow to [0, 255]. After this step, the size of flow will be much smaller, and can be dumped as jpeg images. Args: flow (ndarray): (h, w, 2) array of optical flow. max_val (float): Maximum value of flow, values beyond [-max_val, max_val] will be truncated. norm (bool): Whether to divide flow values by image width/height. Returns: tuple[ndarray]: Quantized dx and dy. """ h, w, _ = flow.shape dx = flow[..., 0] dy = flow[..., 1] if norm: dx = dx / w # avoid inplace operations dy = dy / h # use 255 levels instead of 256 to make sure 0 is 0 after dequantization. flow_comps = [ quantize(d, -max_val, max_val, 255, np.uint8) for d in [dx, dy] ] return tuple(flow_comps)
[ "def", "quantize_flow", "(", "flow", ",", "max_val", "=", "0.02", ",", "norm", "=", "True", ")", ":", "h", ",", "w", ",", "_", "=", "flow", ".", "shape", "dx", "=", "flow", "[", "...", ",", "0", "]", "dy", "=", "flow", "[", "...", ",", "1", ...
Quantize flow to [0, 255]. After this step, the size of flow will be much smaller, and can be dumped as jpeg images. Args: flow (ndarray): (h, w, 2) array of optical flow. max_val (float): Maximum value of flow, values beyond [-max_val, max_val] will be truncated. norm (bool): Whether to divide flow values by image width/height. Returns: tuple[ndarray]: Quantized dx and dy.
[ "Quantize", "flow", "to", "[", "0", "255", "]", "." ]
python
test
watson-developer-cloud/python-sdk
ibm_watson/language_translator_v3.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/language_translator_v3.py#L626-L633
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'language') and self.language is not None: _dict['language'] = self.language if hasattr(self, 'confidence') and self.confidence is not None: _dict['confidence'] = self.confidence return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'language'", ")", "and", "self", ".", "language", "is", "not", "None", ":", "_dict", "[", "'language'", "]", "=", "self", ".", "language", "if", "...
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
ansible/tower-cli
tower_cli/cli/base.py
https://github.com/ansible/tower-cli/blob/a2b151fed93c47725018d3034848cb3a1814bed7/tower_cli/cli/base.py#L60-L69
def format_commands(self, ctx, formatter): """Extra format methods for multi methods that adds all the commands after the options. """ self.format_command_subsection( ctx, formatter, self.list_misc_commands(), 'Commands' ) self.format_command_subsection( ctx, formatter, self.list_resource_commands(), 'Resources' )
[ "def", "format_commands", "(", "self", ",", "ctx", ",", "formatter", ")", ":", "self", ".", "format_command_subsection", "(", "ctx", ",", "formatter", ",", "self", ".", "list_misc_commands", "(", ")", ",", "'Commands'", ")", "self", ".", "format_command_subsec...
Extra format methods for multi methods that adds all the commands after the options.
[ "Extra", "format", "methods", "for", "multi", "methods", "that", "adds", "all", "the", "commands", "after", "the", "options", "." ]
python
valid
balloob/pychromecast
pychromecast/controllers/media.py
https://github.com/balloob/pychromecast/blob/831b09c4fed185a7bffe0ea330b7849d5f4e36b6/pychromecast/controllers/media.py#L459-L471
def _process_media_status(self, data): """ Processes a STATUS message. """ self.status.update(data) self.logger.debug("Media:Received status %s", data) # Update session active threading event if self.status.media_session_id is None: self.session_active_event.clear() else: self.session_active_event.set() self._fire_status_changed()
[ "def", "_process_media_status", "(", "self", ",", "data", ")", ":", "self", ".", "status", ".", "update", "(", "data", ")", "self", ".", "logger", ".", "debug", "(", "\"Media:Received status %s\"", ",", "data", ")", "# Update session active threading event", "if...
Processes a STATUS message.
[ "Processes", "a", "STATUS", "message", "." ]
python
train
AnalogJ/lexicon
lexicon/providers/hetzner.py
https://github.com/AnalogJ/lexicon/blob/9330b871988753cad44fe2876a217b4c67b1fa0e/lexicon/providers/hetzner.py#L677-L696
def _get_zone(self, domain, domain_id): """ Pulls the zone for the current domain from authenticated Hetzner account and returns it as an zone object. """ api = self.api[self.account] for request in api['zone']['GET']: url = (request.copy()).get('url', '/').replace('<id>', domain_id) params = request.get('params', {}).copy() for param in params: params[param] = params[param].replace('<id>', domain_id) response = self._get(url, query_params=params) dom = Provider._filter_dom(response.text, api['filter']) zone_file_filter = [{'name': 'textarea', 'attrs': {'name': api['zone']['file']}}] zone_file = Provider._filter_dom(dom, zone_file_filter).renderContents().decode('UTF-8') hidden = Provider._extract_hidden_data(dom) zone = {'data': dns.zone.from_text(zone_file, origin=domain, relativize=False), 'hidden': hidden} LOGGER.info('Hetzner => Get zone for domain %s', domain) return zone
[ "def", "_get_zone", "(", "self", ",", "domain", ",", "domain_id", ")", ":", "api", "=", "self", ".", "api", "[", "self", ".", "account", "]", "for", "request", "in", "api", "[", "'zone'", "]", "[", "'GET'", "]", ":", "url", "=", "(", "request", "...
Pulls the zone for the current domain from authenticated Hetzner account and returns it as an zone object.
[ "Pulls", "the", "zone", "for", "the", "current", "domain", "from", "authenticated", "Hetzner", "account", "and", "returns", "it", "as", "an", "zone", "object", "." ]
python
train
tensorlayer/tensorlayer
tensorlayer/rein.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/rein.py#L65-L98
def cross_entropy_reward_loss(logits, actions, rewards, name=None): """Calculate the loss for Policy Gradient Network. Parameters ---------- logits : tensor The network outputs without softmax. This function implements softmax inside. actions : tensor or placeholder The agent actions. rewards : tensor or placeholder The rewards. Returns -------- Tensor The TensorFlow loss function. Examples ---------- >>> states_batch_pl = tf.placeholder(tf.float32, shape=[None, D]) >>> network = InputLayer(states_batch_pl, name='input') >>> network = DenseLayer(network, n_units=H, act=tf.nn.relu, name='relu1') >>> network = DenseLayer(network, n_units=3, name='out') >>> probs = network.outputs >>> sampling_prob = tf.nn.softmax(probs) >>> actions_batch_pl = tf.placeholder(tf.int32, shape=[None]) >>> discount_rewards_batch_pl = tf.placeholder(tf.float32, shape=[None]) >>> loss = tl.rein.cross_entropy_reward_loss(probs, actions_batch_pl, discount_rewards_batch_pl) >>> train_op = tf.train.RMSPropOptimizer(learning_rate, decay_rate).minimize(loss) """ cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=actions, logits=logits, name=name) return tf.reduce_sum(tf.multiply(cross_entropy, rewards))
[ "def", "cross_entropy_reward_loss", "(", "logits", ",", "actions", ",", "rewards", ",", "name", "=", "None", ")", ":", "cross_entropy", "=", "tf", ".", "nn", ".", "sparse_softmax_cross_entropy_with_logits", "(", "labels", "=", "actions", ",", "logits", "=", "l...
Calculate the loss for Policy Gradient Network. Parameters ---------- logits : tensor The network outputs without softmax. This function implements softmax inside. actions : tensor or placeholder The agent actions. rewards : tensor or placeholder The rewards. Returns -------- Tensor The TensorFlow loss function. Examples ---------- >>> states_batch_pl = tf.placeholder(tf.float32, shape=[None, D]) >>> network = InputLayer(states_batch_pl, name='input') >>> network = DenseLayer(network, n_units=H, act=tf.nn.relu, name='relu1') >>> network = DenseLayer(network, n_units=3, name='out') >>> probs = network.outputs >>> sampling_prob = tf.nn.softmax(probs) >>> actions_batch_pl = tf.placeholder(tf.int32, shape=[None]) >>> discount_rewards_batch_pl = tf.placeholder(tf.float32, shape=[None]) >>> loss = tl.rein.cross_entropy_reward_loss(probs, actions_batch_pl, discount_rewards_batch_pl) >>> train_op = tf.train.RMSPropOptimizer(learning_rate, decay_rate).minimize(loss)
[ "Calculate", "the", "loss", "for", "Policy", "Gradient", "Network", "." ]
python
valid
lpantano/seqcluster
seqcluster/install.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/install.py#L115-L141
def _install_data(data_dir, path_flavor, args): """Upgrade required genome data files in place. """ try: from bcbio import install as bcb except: raise ImportError("It needs bcbio to do the quick installation.") bio_data = op.join(path_flavor, "../biodata.yaml") s = {"flavor": path_flavor, # "target": "[brew, conda]", "vm_provider": "novm", "hostname": "localhost", "fabricrc_overrides": {"edition": "minimal", "use_sudo": "false", "keep_isolated": "true", "conda_cmd": bcb._get_conda_bin(), "distribution": "__auto__", "dist_name": "__auto__"}} s["actions"] = ["setup_biodata"] s["fabricrc_overrides"]["data_files"] = data_dir s["fabricrc_overrides"]["galaxy_home"] = os.path.join(data_dir, "galaxy") cbl = bcb.get_cloudbiolinux(bcb.REMOTES) s["genomes"] = _get_biodata(bio_data, args) sys.path.insert(0, cbl["dir"]) cbl_deploy = __import__("cloudbio.deploy", fromlist=["deploy"]) cbl_deploy.deploy(s)
[ "def", "_install_data", "(", "data_dir", ",", "path_flavor", ",", "args", ")", ":", "try", ":", "from", "bcbio", "import", "install", "as", "bcb", "except", ":", "raise", "ImportError", "(", "\"It needs bcbio to do the quick installation.\"", ")", "bio_data", "=",...
Upgrade required genome data files in place.
[ "Upgrade", "required", "genome", "data", "files", "in", "place", "." ]
python
train
IRC-SPHERE/HyperStream
hyperstream/tool/base_tool.py
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/tool/base_tool.py#L98-L128
def parameters(self): """ Get the tool parameters :return: The tool parameters along with additional information (whether they are functions or sets) """ parameters = [] for k, v in self.__dict__.items(): if k.startswith("_"): continue is_function = False is_set = False if callable(v): value = pickle.dumps(func_dump(v)) is_function = True elif isinstance(v, set): value = list(v) is_set = True else: value = v parameters.append(dict( key=k, value=value, is_function=is_function, is_set=is_set )) return parameters
[ "def", "parameters", "(", "self", ")", ":", "parameters", "=", "[", "]", "for", "k", ",", "v", "in", "self", ".", "__dict__", ".", "items", "(", ")", ":", "if", "k", ".", "startswith", "(", "\"_\"", ")", ":", "continue", "is_function", "=", "False"...
Get the tool parameters :return: The tool parameters along with additional information (whether they are functions or sets)
[ "Get", "the", "tool", "parameters" ]
python
train
IBMStreams/pypi.streamsx
streamsx/topology/context.py
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/topology/context.py#L409-L414
def _get_java_env(self): "Pass the VCAP through the environment to the java submission" env = super(_StreamingAnalyticsSubmitter, self)._get_java_env() vcap = streamsx.rest._get_vcap_services(self._vcap_services) env['VCAP_SERVICES'] = json.dumps(vcap) return env
[ "def", "_get_java_env", "(", "self", ")", ":", "env", "=", "super", "(", "_StreamingAnalyticsSubmitter", ",", "self", ")", ".", "_get_java_env", "(", ")", "vcap", "=", "streamsx", ".", "rest", ".", "_get_vcap_services", "(", "self", ".", "_vcap_services", ")...
Pass the VCAP through the environment to the java submission
[ "Pass", "the", "VCAP", "through", "the", "environment", "to", "the", "java", "submission" ]
python
train
ontio/ontology-python-sdk
ontology/io/binary_writer.py
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L370-L382
def write_hashes(self, arr): """ Write an array of hashes to the stream. Args: arr (list): a list of 32 byte hashes. """ length = len(arr) self.write_var_int(length) for item in arr: ba = bytearray(binascii.unhexlify(item)) ba.reverse() self.write_bytes(ba)
[ "def", "write_hashes", "(", "self", ",", "arr", ")", ":", "length", "=", "len", "(", "arr", ")", "self", ".", "write_var_int", "(", "length", ")", "for", "item", "in", "arr", ":", "ba", "=", "bytearray", "(", "binascii", ".", "unhexlify", "(", "item"...
Write an array of hashes to the stream. Args: arr (list): a list of 32 byte hashes.
[ "Write", "an", "array", "of", "hashes", "to", "the", "stream", "." ]
python
train
ARMmbed/icetea
icetea_lib/CliResponse.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/CliResponse.py#L90-L110
def verify_trace(self, expected_traces, break_in_fail=True): """ Verifies that expectedResponse is found in self.traces :param expected_traces: response or responses to look for. Must be list or str. :param break_in_fail: If set to True, re-raises exceptions caught or if message was not found :return: True or False :raises: LookupError if message was not found and breakInFail was True. Other Exceptions might also be raised through searcher.verify_message. """ ok = True try: ok = verify_message(self.traces, expected_traces) except (TypeError, LookupError) as inst: ok = False if break_in_fail: raise inst if ok is False and break_in_fail: raise LookupError("Unexpected message found") return ok
[ "def", "verify_trace", "(", "self", ",", "expected_traces", ",", "break_in_fail", "=", "True", ")", ":", "ok", "=", "True", "try", ":", "ok", "=", "verify_message", "(", "self", ".", "traces", ",", "expected_traces", ")", "except", "(", "TypeError", ",", ...
Verifies that expectedResponse is found in self.traces :param expected_traces: response or responses to look for. Must be list or str. :param break_in_fail: If set to True, re-raises exceptions caught or if message was not found :return: True or False :raises: LookupError if message was not found and breakInFail was True. Other Exceptions might also be raised through searcher.verify_message.
[ "Verifies", "that", "expectedResponse", "is", "found", "in", "self", ".", "traces" ]
python
train
iotile/coretools
iotile_ext_cloud/iotile/cloud/apps/ota_updater.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotile_ext_cloud/iotile/cloud/apps/ota_updater.py#L27-L36
def _download_ota_script(script_url): """Download the script from the cloud service and store to temporary file location""" try: blob = requests.get(script_url, stream=True) return blob.content except Exception as e: iprint("Failed to download OTA script") iprint(e) return False
[ "def", "_download_ota_script", "(", "script_url", ")", ":", "try", ":", "blob", "=", "requests", ".", "get", "(", "script_url", ",", "stream", "=", "True", ")", "return", "blob", ".", "content", "except", "Exception", "as", "e", ":", "iprint", "(", "\"Fa...
Download the script from the cloud service and store to temporary file location
[ "Download", "the", "script", "from", "the", "cloud", "service", "and", "store", "to", "temporary", "file", "location" ]
python
train
openmicroanalysis/pyxray
pyxray/composition.py
https://github.com/openmicroanalysis/pyxray/blob/cae89677f00ebcc0952f94d1ab70e6b35e1a51e9/pyxray/composition.py#L92-L120
def convert_formula_to_atomic_fractions(formula): """ Converts a chemical formula to an atomic fraction :class:`dict`. Args: formula (str): chemical formula, like Al2O3. No wildcard are accepted. """ mole_fractions = {} total_mole_fraction = 0.0 for match in CHEMICAL_FORMULA_PATTERN.finditer(formula): symbol, mole_fraction = match.groups() z = pyxray.element_atomic_number(symbol.strip()) if mole_fraction == '': mole_fraction = 1.0 mole_fraction = float(mole_fraction) mole_fraction = float(mole_fraction) mole_fractions[z] = mole_fraction total_mole_fraction += mole_fraction # Calculate atomic fractions atomic_fractions = {} for z, mole_fraction in mole_fractions.items(): atomic_fractions[z] = mole_fraction / total_mole_fraction return atomic_fractions
[ "def", "convert_formula_to_atomic_fractions", "(", "formula", ")", ":", "mole_fractions", "=", "{", "}", "total_mole_fraction", "=", "0.0", "for", "match", "in", "CHEMICAL_FORMULA_PATTERN", ".", "finditer", "(", "formula", ")", ":", "symbol", ",", "mole_fraction", ...
Converts a chemical formula to an atomic fraction :class:`dict`. Args: formula (str): chemical formula, like Al2O3. No wildcard are accepted.
[ "Converts", "a", "chemical", "formula", "to", "an", "atomic", "fraction", ":", "class", ":", "dict", "." ]
python
train
mwickert/scikit-dsp-comm
sk_dsp_comm/multirate_helper.py
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/multirate_helper.py#L120-L126
def dn(self,x,M_change = 12): """ Downsample and filter the signal """ y = signal.lfilter(self.b,[1],x) y = ssd.downsample(y,M_change) return y
[ "def", "dn", "(", "self", ",", "x", ",", "M_change", "=", "12", ")", ":", "y", "=", "signal", ".", "lfilter", "(", "self", ".", "b", ",", "[", "1", "]", ",", "x", ")", "y", "=", "ssd", ".", "downsample", "(", "y", ",", "M_change", ")", "ret...
Downsample and filter the signal
[ "Downsample", "and", "filter", "the", "signal" ]
python
valid
ronhanson/python-tbx
tbx/text.py
https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/text.py#L48-L57
def slugify(text, delim='-'): """Generates an slightly worse ASCII-only slug.""" punctuation_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.:]+') result = [] for word in punctuation_re.split(text.lower()): word = normalize_text(word) if word: result.append(word) return delim.join(result)
[ "def", "slugify", "(", "text", ",", "delim", "=", "'-'", ")", ":", "punctuation_re", "=", "re", ".", "compile", "(", "r'[\\t !\"#$%&\\'()*\\-/<=>?@\\[\\\\\\]^_`{|},.:]+'", ")", "result", "=", "[", "]", "for", "word", "in", "punctuation_re", ".", "split", "(", ...
Generates an slightly worse ASCII-only slug.
[ "Generates", "an", "slightly", "worse", "ASCII", "-", "only", "slug", "." ]
python
train
gwastro/pycbc
pycbc/frame/frame.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/frame/frame.py#L566-L581
def advance(self, blocksize): """Add blocksize seconds more to the buffer, push blocksize seconds from the beginning. Parameters ---------- blocksize: int The number of seconds to attempt to read from the channel """ ts = self._read_frame(blocksize) self.raw_buffer.roll(-len(ts)) self.raw_buffer[-len(ts):] = ts[:] self.read_pos += blocksize self.raw_buffer.start_time += blocksize return ts
[ "def", "advance", "(", "self", ",", "blocksize", ")", ":", "ts", "=", "self", ".", "_read_frame", "(", "blocksize", ")", "self", ".", "raw_buffer", ".", "roll", "(", "-", "len", "(", "ts", ")", ")", "self", ".", "raw_buffer", "[", "-", "len", "(", ...
Add blocksize seconds more to the buffer, push blocksize seconds from the beginning. Parameters ---------- blocksize: int The number of seconds to attempt to read from the channel
[ "Add", "blocksize", "seconds", "more", "to", "the", "buffer", "push", "blocksize", "seconds", "from", "the", "beginning", "." ]
python
train
iwanbk/nyamuk
nyamuk/mqtt_pkt.py
https://github.com/iwanbk/nyamuk/blob/ac4c6028de288a4c8e0b332ae16eae889deb643d/nyamuk/mqtt_pkt.py#L218-L236
def read_string(self): """Read string.""" rc, length = self.read_uint16() if rc != NC.ERR_SUCCESS: return rc, None if self.pos + length > self.remaining_length: return NC.ERR_PROTOCOL, None ba = bytearray(length) if ba is None: return NC.ERR_NO_MEM, None for x in xrange(0, length): ba[x] = self.payload[self.pos] self.pos += 1 return NC.ERR_SUCCESS, ba
[ "def", "read_string", "(", "self", ")", ":", "rc", ",", "length", "=", "self", ".", "read_uint16", "(", ")", "if", "rc", "!=", "NC", ".", "ERR_SUCCESS", ":", "return", "rc", ",", "None", "if", "self", ".", "pos", "+", "length", ">", "self", ".", ...
Read string.
[ "Read", "string", "." ]
python
train
rraadd88/rohan
rohan/dandage/io_strs.py
https://github.com/rraadd88/rohan/blob/b0643a3582a2fffc0165ace69fb80880d92bfb10/rohan/dandage/io_strs.py#L120-L127
def make_pathable_string(s,replacewith='_'): """ Removes symbols from a string to be compatible with directory structure. :param s: string """ import re return re.sub(r'[^\w+/.]',replacewith, s.lower())
[ "def", "make_pathable_string", "(", "s", ",", "replacewith", "=", "'_'", ")", ":", "import", "re", "return", "re", ".", "sub", "(", "r'[^\\w+/.]'", ",", "replacewith", ",", "s", ".", "lower", "(", ")", ")" ]
Removes symbols from a string to be compatible with directory structure. :param s: string
[ "Removes", "symbols", "from", "a", "string", "to", "be", "compatible", "with", "directory", "structure", "." ]
python
train
lemieuxl/pyGenClean
pyGenClean/RelatedSamples/merge_related_samples.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/RelatedSamples/merge_related_samples.py#L175-L193
def checkArgs(args): """Checks the arguments and options. :param args: a an object containing the options of the program. :type args: argparse.Namespace :returns: ``True`` if everything was OK. If there is a problem with an option, an exception is raised using the :py:class:`ProgramError` class, a message is printed to the :class:`sys.stderr` and the program exists with code 1. """ if not os.path.isfile(args.ibs_related): msg = "{}: no such file".format(args.ibs_related) raise ProgramError(msg) return True
[ "def", "checkArgs", "(", "args", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "args", ".", "ibs_related", ")", ":", "msg", "=", "\"{}: no such file\"", ".", "format", "(", "args", ".", "ibs_related", ")", "raise", "ProgramError", "(", ...
Checks the arguments and options. :param args: a an object containing the options of the program. :type args: argparse.Namespace :returns: ``True`` if everything was OK. If there is a problem with an option, an exception is raised using the :py:class:`ProgramError` class, a message is printed to the :class:`sys.stderr` and the program exists with code 1.
[ "Checks", "the", "arguments", "and", "options", "." ]
python
train
blockstack/blockstack-core
blockstack/blockstackd.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L271-L356
def do_POST(self): """ Based on the original, available at https://github.com/python/cpython/blob/2.7/Lib/SimpleXMLRPCServer.py Only difference is that it denies requests bigger than a certain size. Handles the HTTP POST request. Attempts to interpret all HTTP POST requests as XML-RPC calls, which are forwarded to the server's _dispatch method for handling. """ # Check that the path is legal if not self.is_rpc_path_valid(): self.report_404() return # reject gzip, so size-caps will work encoding = self.headers.get("content-encoding", "identity").lower() if encoding != 'identity': log.error("Reject request with encoding '{}'".format(encoding)) self.send_response(501, "encoding %r not supported" % encoding) return try: size_remaining = int(self.headers["content-length"]) if size_remaining > self.MAX_REQUEST_SIZE: if os.environ.get("BLOCKSTACK_DEBUG") == "1": log.error("Request is too big!") self.send_response(400) self.send_header('Content-length', '0') self.end_headers() return if os.environ.get("BLOCKSTACK_DEBUG") == "1": log.debug("Message is small enough to parse ({} bytes)".format(size_remaining)) # Get arguments by reading body of request. # never read more than our max size L = [] while size_remaining: chunk_size = min(size_remaining, self.MAX_REQUEST_SIZE) chunk = self.rfile.read(chunk_size) if not chunk: break L.append(chunk) size_remaining -= len(L[-1]) data = ''.join(L) data = self.decode_request_content(data) if data is None: return #response has been sent # In previous versions of SimpleXMLRPCServer, _dispatch # could be overridden in this class, instead of in # SimpleXMLRPCDispatcher. To maintain backwards compatibility, # check to see if a subclass implements _dispatch and dispatch # using that method if present. response = self.server._marshaled_dispatch( data, getattr(self, '_dispatch', None), self.path ) except Exception, e: # This should only happen if the module is buggy # internal error, report as HTTP server error self.send_response(500) self.send_header("Content-length", "0") self.end_headers() else: # got a valid XML RPC response self.send_response(200) self.send_header("Content-type", "text/xml") if self.encode_threshold is not None: if len(response) > self.encode_threshold: q = self.accept_encodings().get("gzip", 0) if q: try: response = xmlrpclib.gzip_encode(response) self.send_header("Content-Encoding", "gzip") except NotImplementedError: pass self.send_header("Content-length", str(len(response))) self.end_headers() self.wfile.write(response)
[ "def", "do_POST", "(", "self", ")", ":", "# Check that the path is legal", "if", "not", "self", ".", "is_rpc_path_valid", "(", ")", ":", "self", ".", "report_404", "(", ")", "return", "# reject gzip, so size-caps will work", "encoding", "=", "self", ".", "headers"...
Based on the original, available at https://github.com/python/cpython/blob/2.7/Lib/SimpleXMLRPCServer.py Only difference is that it denies requests bigger than a certain size. Handles the HTTP POST request. Attempts to interpret all HTTP POST requests as XML-RPC calls, which are forwarded to the server's _dispatch method for handling.
[ "Based", "on", "the", "original", "available", "at", "https", ":", "//", "github", ".", "com", "/", "python", "/", "cpython", "/", "blob", "/", "2", ".", "7", "/", "Lib", "/", "SimpleXMLRPCServer", ".", "py" ]
python
train
MacHu-GWU/angora-project
angora/gadget/backup.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/gadget/backup.py#L75-L117
def run_backup(filename, root_dir, ignore=[], ignore_ext=[], ignore_pattern=[]): """The backup utility method. :param root_dir: the directory you want to backup :param ignore: file or directory defined in this list will be ignored. :param ignore_ext: file with extensions defined in this list will be ignored. :param ignore_pattern: any file or directory that contains this pattern will be ignored. """ tab = " " # Step 1, calculate files to backup print("Perform backup '%s'..." % root_dir) print(tab + "1. Calculate files...") total_size_in_bytes = 0 init_mode = WinFile.init_mode WinFile.use_regular_init() fc = FileCollection.from_path_except( root_dir, ignore, ignore_ext, ignore_pattern) WinFile.set_initialize_mode(complexity=init_mode) for winfile in fc.iterfiles(): total_size_in_bytes += winfile.size_on_disk # Step 2, write files to zip archive print(tab * 2 + "Done, got %s files, total size is %s." % ( len(fc), string_SizeInBytes(total_size_in_bytes))) print(tab + "2. Backup files...") filename = "%s %s.zip" % ( filename, datetime.now().strftime("%Y-%m-%d %Hh-%Mm-%Ss")) print(tab * 2 + "Write to '%s'..." % filename) current_dir = os.getcwd() with ZipFile(filename, "w") as f: os.chdir(root_dir) for winfile in fc.iterfiles(): relpath = os.path.relpath(winfile.abspath, root_dir) f.write(relpath) os.chdir(current_dir) print(tab + "Complete!")
[ "def", "run_backup", "(", "filename", ",", "root_dir", ",", "ignore", "=", "[", "]", ",", "ignore_ext", "=", "[", "]", ",", "ignore_pattern", "=", "[", "]", ")", ":", "tab", "=", "\" \"", "# Step 1, calculate files to backup", "print", "(", "\"Perform backu...
The backup utility method. :param root_dir: the directory you want to backup :param ignore: file or directory defined in this list will be ignored. :param ignore_ext: file with extensions defined in this list will be ignored. :param ignore_pattern: any file or directory that contains this pattern will be ignored.
[ "The", "backup", "utility", "method", "." ]
python
train
gmr/rejected
rejected/process.py
https://github.com/gmr/rejected/blob/610a3e1401122ecb98d891b6795cca0255e5b044/rejected/process.py#L368-L433
def on_processed(self, message, result, start_time): """Invoked after a message is processed by the consumer and implements the logic for how to deal with a message based upon the result. :param rejected.data.Message message: The message that was processed :param int result: The result of the processing of the message :param float start_time: When the message was received """ duration = max(start_time, time.time()) - start_time self.counters[self.TIME_SPENT] += duration self.measurement.add_duration(self.TIME_SPENT, duration) if result == data.MESSAGE_DROP: LOGGER.debug('Rejecting message due to drop return from consumer') self.reject(message, False) self.counters[self.DROPPED] += 1 elif result == data.MESSAGE_EXCEPTION: LOGGER.debug('Rejecting message due to MessageException') self.reject(message, False) self.counters[self.MESSAGE_EXCEPTION] += 1 elif result == data.PROCESSING_EXCEPTION: LOGGER.debug('Rejecting message due to ProcessingException') self.reject(message, False) self.counters[self.PROCESSING_EXCEPTION] += 1 elif result == data.CONFIGURATION_EXCEPTION: LOGGER.debug('Rejecting message due to ConfigurationException ' 'and shutting down') self.reject(message, False) self.counters[self.CONFIGURATION_EXCEPTION] += 1 self.stop_consumer() self.shutdown_connections() elif result == data.CONSUMER_EXCEPTION: LOGGER.debug('Re-queueing message due to ConsumerException') self.reject(message, True) self.on_processing_error() self.counters[self.CONSUMER_EXCEPTION] += 1 elif result == data.RABBITMQ_EXCEPTION: LOGGER.debug('Processing interrupted due to RabbitMQException') self.on_processing_error() self.counters[self.RABBITMQ_EXCEPTION] += 1 elif result == data.UNHANDLED_EXCEPTION: LOGGER.debug('Re-queueing message due to UnhandledException') self.reject(message, True) self.on_processing_error() self.counters[self.UNHANDLED_EXCEPTION] += 1 elif result == data.MESSAGE_REQUEUE: LOGGER.debug('Re-queueing message due Consumer request') self.reject(message, True) self.counters[self.REQUEUED] += 1 elif result == data.MESSAGE_ACK and not self.no_ack: self.ack_message(message) self.counters[self.PROCESSED] += 1 self.measurement.set_tag(self.PROCESSED, True) self.maybe_submit_measurement() self.reset_state()
[ "def", "on_processed", "(", "self", ",", "message", ",", "result", ",", "start_time", ")", ":", "duration", "=", "max", "(", "start_time", ",", "time", ".", "time", "(", ")", ")", "-", "start_time", "self", ".", "counters", "[", "self", ".", "TIME_SPEN...
Invoked after a message is processed by the consumer and implements the logic for how to deal with a message based upon the result. :param rejected.data.Message message: The message that was processed :param int result: The result of the processing of the message :param float start_time: When the message was received
[ "Invoked", "after", "a", "message", "is", "processed", "by", "the", "consumer", "and", "implements", "the", "logic", "for", "how", "to", "deal", "with", "a", "message", "based", "upon", "the", "result", "." ]
python
train
laplacesdemon/django-youtube
django_youtube/views.py
https://github.com/laplacesdemon/django-youtube/blob/8051ef372473eccb053f773c68e2e5e1b2cfb538/django_youtube/views.py#L51-L90
def video(request, video_id): """ Displays a video in an embed player """ # Check video availability # Available states are: processing api = Api() api.authenticate() availability = api.check_upload_status(video_id) if availability is not True: # Video is not available video = Video.objects.filter(video_id=video_id).get() state = availability["upload_state"] # Add additional states here. I'm not sure what states are available if state == "failed" or state == "rejected": return render_to_response( "django_youtube/video_failed.html", {"video": video, "video_id": video_id, "message": _("Invalid video."), "availability": availability}, context_instance=RequestContext(request) ) else: return render_to_response( "django_youtube/video_unavailable.html", {"video": video, "video_id": video_id, "message": _("This video is currently being processed"), "availability": availability}, context_instance=RequestContext(request) ) video_params = _video_params(request, video_id) return render_to_response( "django_youtube/video.html", video_params, context_instance=RequestContext(request) )
[ "def", "video", "(", "request", ",", "video_id", ")", ":", "# Check video availability", "# Available states are: processing", "api", "=", "Api", "(", ")", "api", ".", "authenticate", "(", ")", "availability", "=", "api", ".", "check_upload_status", "(", "video_id...
Displays a video in an embed player
[ "Displays", "a", "video", "in", "an", "embed", "player" ]
python
test
jhermann/rudiments
src/rudiments/security.py
https://github.com/jhermann/rudiments/blob/028ec7237946115c7b18e50557cbc5f6b824653e/src/rudiments/security.py#L56-L60
def auth_pair(self, force_console=False): """Return username/password tuple, possibly prompting the user for them.""" if not self.auth_valid(): self._get_auth(force_console) return (self.user, self.password)
[ "def", "auth_pair", "(", "self", ",", "force_console", "=", "False", ")", ":", "if", "not", "self", ".", "auth_valid", "(", ")", ":", "self", ".", "_get_auth", "(", "force_console", ")", "return", "(", "self", ".", "user", ",", "self", ".", "password",...
Return username/password tuple, possibly prompting the user for them.
[ "Return", "username", "/", "password", "tuple", "possibly", "prompting", "the", "user", "for", "them", "." ]
python
train
hayalasalah/adhan.py
adhan/calculations.py
https://github.com/hayalasalah/adhan.py/blob/a7c080ba48f70be9801f048451d2c91a7d579602/adhan/calculations.py#L170-L195
def compute_time_at_sun_angle(day, latitude, angle): """Compute the floating point time difference between mid-day and an angle. All the prayers are defined as certain angles from mid-day (Zuhr). This formula is taken from praytimes.org/calculation :param day: The day to which to compute for :param longitude: Longitude of the place of interest :angle: The angle at which to compute the time :returns: The floating point time delta between Zuhr and the angle, the sign of the result corresponds to the sign of the angle """ positive_angle_rad = radians(abs(angle)) angle_sign = abs(angle)/angle latitude_rad = radians(latitude) declination = radians(sun_declination(day)) numerator = -sin(positive_angle_rad) - sin(latitude_rad) * sin(declination) denominator = cos(latitude_rad) * cos(declination) time_diff = degrees(acos(numerator/denominator)) / 15 return time_diff * angle_sign
[ "def", "compute_time_at_sun_angle", "(", "day", ",", "latitude", ",", "angle", ")", ":", "positive_angle_rad", "=", "radians", "(", "abs", "(", "angle", ")", ")", "angle_sign", "=", "abs", "(", "angle", ")", "/", "angle", "latitude_rad", "=", "radians", "(...
Compute the floating point time difference between mid-day and an angle. All the prayers are defined as certain angles from mid-day (Zuhr). This formula is taken from praytimes.org/calculation :param day: The day to which to compute for :param longitude: Longitude of the place of interest :angle: The angle at which to compute the time :returns: The floating point time delta between Zuhr and the angle, the sign of the result corresponds to the sign of the angle
[ "Compute", "the", "floating", "point", "time", "difference", "between", "mid", "-", "day", "and", "an", "angle", "." ]
python
train
bwohlberg/sporco
sporco/admm/bpdn.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/bpdn.py#L1092-L1098
def setdict(self, D): """Set dictionary array.""" self.D = np.asarray(D, dtype=self.dtype) # Factorise dictionary for efficient solves self.lu, self.piv = sl.cho_factor(self.D, 1.0) self.lu = np.asarray(self.lu, dtype=self.dtype)
[ "def", "setdict", "(", "self", ",", "D", ")", ":", "self", ".", "D", "=", "np", ".", "asarray", "(", "D", ",", "dtype", "=", "self", ".", "dtype", ")", "# Factorise dictionary for efficient solves", "self", ".", "lu", ",", "self", ".", "piv", "=", "s...
Set dictionary array.
[ "Set", "dictionary", "array", "." ]
python
train
DataBiosphere/toil
src/toil/provisioners/aws/awsProvisioner.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/aws/awsProvisioner.py#L394-L406
def _waitForIP(cls, instance): """ Wait until the instances has a public IP address assigned to it. :type instance: boto.ec2.instance.Instance """ logger.debug('Waiting for ip...') while True: time.sleep(a_short_time) instance.update() if instance.ip_address or instance.public_dns_name or instance.private_ip_address: logger.debug('...got ip') break
[ "def", "_waitForIP", "(", "cls", ",", "instance", ")", ":", "logger", ".", "debug", "(", "'Waiting for ip...'", ")", "while", "True", ":", "time", ".", "sleep", "(", "a_short_time", ")", "instance", ".", "update", "(", ")", "if", "instance", ".", "ip_add...
Wait until the instances has a public IP address assigned to it. :type instance: boto.ec2.instance.Instance
[ "Wait", "until", "the", "instances", "has", "a", "public", "IP", "address", "assigned", "to", "it", "." ]
python
train
tensorflow/cleverhans
scripts/plot_success_fail_curve.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/scripts/plot_success_fail_curve.py#L25-L38
def main(argv=None): """Takes the path to a directory with reports and renders success fail plots.""" report_paths = argv[1:] fail_names = FLAGS.fail_names.split(',') for report_path in report_paths: plot_report_from_path(report_path, label=report_path, fail_names=fail_names) pyplot.legend() pyplot.xlim(-.01, 1.) pyplot.ylim(0., 1.) pyplot.show()
[ "def", "main", "(", "argv", "=", "None", ")", ":", "report_paths", "=", "argv", "[", "1", ":", "]", "fail_names", "=", "FLAGS", ".", "fail_names", ".", "split", "(", "','", ")", "for", "report_path", "in", "report_paths", ":", "plot_report_from_path", "(...
Takes the path to a directory with reports and renders success fail plots.
[ "Takes", "the", "path", "to", "a", "directory", "with", "reports", "and", "renders", "success", "fail", "plots", "." ]
python
train
saltstack/salt
salt/modules/saltutil.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/saltutil.py#L859-L880
def list_extmods(): ''' .. versionadded:: 2017.7.0 List Salt modules which have been synced externally CLI Examples: .. code-block:: bash salt '*' saltutil.list_extmods ''' ret = {} ext_dir = os.path.join(__opts__['cachedir'], 'extmods') mod_types = os.listdir(ext_dir) for mod_type in mod_types: ret[mod_type] = set() for _, _, files in salt.utils.path.os_walk(os.path.join(ext_dir, mod_type)): for fh_ in files: ret[mod_type].add(fh_.split('.')[0]) ret[mod_type] = list(ret[mod_type]) return ret
[ "def", "list_extmods", "(", ")", ":", "ret", "=", "{", "}", "ext_dir", "=", "os", ".", "path", ".", "join", "(", "__opts__", "[", "'cachedir'", "]", ",", "'extmods'", ")", "mod_types", "=", "os", ".", "listdir", "(", "ext_dir", ")", "for", "mod_type"...
.. versionadded:: 2017.7.0 List Salt modules which have been synced externally CLI Examples: .. code-block:: bash salt '*' saltutil.list_extmods
[ "..", "versionadded", "::", "2017", ".", "7", ".", "0" ]
python
train
PaulHancock/Aegean
AegeanTools/fits_image.py
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fits_image.py#L235-L254
def set_pixels(self, pixels): """ Set the image data. Will not work if the new image has a different shape than the current image. Parameters ---------- pixels : numpy.ndarray New image data Returns ------- None """ if not (pixels.shape == self._pixels.shape): raise AssertionError("Shape mismatch between pixels supplied {0} and existing image pixels {1}".format(pixels.shape,self._pixels.shape)) self._pixels = pixels # reset this so that it is calculated next time the function is called self._rms = None return
[ "def", "set_pixels", "(", "self", ",", "pixels", ")", ":", "if", "not", "(", "pixels", ".", "shape", "==", "self", ".", "_pixels", ".", "shape", ")", ":", "raise", "AssertionError", "(", "\"Shape mismatch between pixels supplied {0} and existing image pixels {1}\"",...
Set the image data. Will not work if the new image has a different shape than the current image. Parameters ---------- pixels : numpy.ndarray New image data Returns ------- None
[ "Set", "the", "image", "data", ".", "Will", "not", "work", "if", "the", "new", "image", "has", "a", "different", "shape", "than", "the", "current", "image", "." ]
python
train
pricingassistant/mrq
mrq/job.py
https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/job.py#L585-L602
def trace_memory_clean_caches(self): """ Avoid polluting results with some builtin python caches """ urllib.parse.clear_cache() re.purge() linecache.clearcache() copyreg.clear_extension_cache() if hasattr(fnmatch, "purge"): fnmatch.purge() # pylint: disable=no-member elif hasattr(fnmatch, "_purge"): fnmatch._purge() # pylint: disable=no-member if hasattr(encodings, "_cache") and len(encodings._cache) > 0: encodings._cache = {} for handler in context.log.handlers: handler.flush()
[ "def", "trace_memory_clean_caches", "(", "self", ")", ":", "urllib", ".", "parse", ".", "clear_cache", "(", ")", "re", ".", "purge", "(", ")", "linecache", ".", "clearcache", "(", ")", "copyreg", ".", "clear_extension_cache", "(", ")", "if", "hasattr", "("...
Avoid polluting results with some builtin python caches
[ "Avoid", "polluting", "results", "with", "some", "builtin", "python", "caches" ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/main_window.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/main_window.py#L408-L432
def focus_notebook_page_of_controller(self, controller): """Puts the focus on the given child controller The method implements focus request of the notebooks in left side-bar of the main window. Thereby it is the master-function of focus pattern of the notebooks in left side-bar. Actual pattern is: * Execution-History is put to focus any time requested (request occur at the moment when the state-machine is started and stopped. * Modification-History one time focused while and one time after execution if requested. :param controller The controller which request to be focused. """ # TODO think about to may substitute Controller- by View-objects it is may the better design if controller not in self.get_child_controllers(): return # logger.info("focus controller {0}".format(controller)) if not self.modification_history_was_focused and isinstance(controller, ModificationHistoryTreeController) and \ self.view is not None: self.view.bring_tab_to_the_top('history') self.modification_history_was_focused = True if self.view is not None and isinstance(controller, ExecutionHistoryTreeController): self.view.bring_tab_to_the_top('execution_history') self.modification_history_was_focused = False
[ "def", "focus_notebook_page_of_controller", "(", "self", ",", "controller", ")", ":", "# TODO think about to may substitute Controller- by View-objects it is may the better design", "if", "controller", "not", "in", "self", ".", "get_child_controllers", "(", ")", ":", "return", ...
Puts the focus on the given child controller The method implements focus request of the notebooks in left side-bar of the main window. Thereby it is the master-function of focus pattern of the notebooks in left side-bar. Actual pattern is: * Execution-History is put to focus any time requested (request occur at the moment when the state-machine is started and stopped. * Modification-History one time focused while and one time after execution if requested. :param controller The controller which request to be focused.
[ "Puts", "the", "focus", "on", "the", "given", "child", "controller" ]
python
train
apache/airflow
scripts/perf/scheduler_ops_metrics.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/scripts/perf/scheduler_ops_metrics.py#L65-L101
def print_stats(self): """ Print operational metrics for the scheduler test. """ session = settings.Session() TI = TaskInstance tis = ( session .query(TI) .filter(TI.dag_id.in_(DAG_IDS)) .all() ) successful_tis = [x for x in tis if x.state == State.SUCCESS] ti_perf = [(ti.dag_id, ti.task_id, ti.execution_date, (ti.queued_dttm - self.start_date).total_seconds(), (ti.start_date - self.start_date).total_seconds(), (ti.end_date - self.start_date).total_seconds(), ti.duration) for ti in successful_tis] ti_perf_df = pd.DataFrame(ti_perf, columns=['dag_id', 'task_id', 'execution_date', 'queue_delay', 'start_delay', 'land_time', 'duration']) print('Performance Results') print('###################') for dag_id in DAG_IDS: print('DAG {}'.format(dag_id)) print(ti_perf_df[ti_perf_df['dag_id'] == dag_id]) print('###################') if len(tis) > len(successful_tis): print("WARNING!! The following task instances haven't completed") print(pd.DataFrame([(ti.dag_id, ti.task_id, ti.execution_date, ti.state) for ti in filter(lambda x: x.state != State.SUCCESS, tis)], columns=['dag_id', 'task_id', 'execution_date', 'state'])) session.commit()
[ "def", "print_stats", "(", "self", ")", ":", "session", "=", "settings", ".", "Session", "(", ")", "TI", "=", "TaskInstance", "tis", "=", "(", "session", ".", "query", "(", "TI", ")", ".", "filter", "(", "TI", ".", "dag_id", ".", "in_", "(", "DAG_I...
Print operational metrics for the scheduler test.
[ "Print", "operational", "metrics", "for", "the", "scheduler", "test", "." ]
python
test
qba73/circleclient
circleclient/circleclient.py
https://github.com/qba73/circleclient/blob/8bf5b093e416c899cc39e43a770c17a5466487b0/circleclient/circleclient.py#L46-L55
def client_post(self, url, **kwargs): """Send POST request with given url and keyword args.""" response = requests.post(self.make_url(url), data=json.dumps(kwargs), headers=self.headers) if not response.ok: raise Exception( '{status}: {reason}.\nCircleCI Status NOT OK'.format( status=response.status_code, reason=response.reason)) return response.json()
[ "def", "client_post", "(", "self", ",", "url", ",", "*", "*", "kwargs", ")", ":", "response", "=", "requests", ".", "post", "(", "self", ".", "make_url", "(", "url", ")", ",", "data", "=", "json", ".", "dumps", "(", "kwargs", ")", ",", "headers", ...
Send POST request with given url and keyword args.
[ "Send", "POST", "request", "with", "given", "url", "and", "keyword", "args", "." ]
python
train
romanz/trezor-agent
libagent/ssh/__init__.py
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/__init__.py#L206-L211
def public_keys(self): """Return a list of SSH public keys (in textual format).""" if not self.public_keys_cache: conn = self.conn_factory() self.public_keys_cache = conn.export_public_keys(self.identities) return self.public_keys_cache
[ "def", "public_keys", "(", "self", ")", ":", "if", "not", "self", ".", "public_keys_cache", ":", "conn", "=", "self", ".", "conn_factory", "(", ")", "self", ".", "public_keys_cache", "=", "conn", ".", "export_public_keys", "(", "self", ".", "identities", "...
Return a list of SSH public keys (in textual format).
[ "Return", "a", "list", "of", "SSH", "public", "keys", "(", "in", "textual", "format", ")", "." ]
python
train
globocom/globomap-loader-api-client
globomap_loader_api_client/update.py
https://github.com/globocom/globomap-loader-api-client/blob/b12347ca77d245de1abd604d1b694162156570e6/globomap_loader_api_client/update.py#L40-L54
def get(self, key): """Return the status from a job. :param key: id of job :type document: dict or list :return: message with location of job :rtype: dict :raises Unauthorized: if API returns status 401 :raises Forbidden: if API returns status 403 :raises NotFound: if API returns status 404 :raises ApiError: if API returns other status """ uri = 'updates/job/{}'.format(key) return self.make_request(method='GET', uri=uri)
[ "def", "get", "(", "self", ",", "key", ")", ":", "uri", "=", "'updates/job/{}'", ".", "format", "(", "key", ")", "return", "self", ".", "make_request", "(", "method", "=", "'GET'", ",", "uri", "=", "uri", ")" ]
Return the status from a job. :param key: id of job :type document: dict or list :return: message with location of job :rtype: dict :raises Unauthorized: if API returns status 401 :raises Forbidden: if API returns status 403 :raises NotFound: if API returns status 404 :raises ApiError: if API returns other status
[ "Return", "the", "status", "from", "a", "job", "." ]
python
train
caffeinehit/django-oauth2-provider
provider/oauth2/models.py
https://github.com/caffeinehit/django-oauth2-provider/blob/6b5bc0d3ad706d2aaa47fa476f38406cddd01236/provider/oauth2/models.py#L149-L166
def get_expire_delta(self, reference=None): """ Return the number of seconds until this token expires. """ if reference is None: reference = now() expiration = self.expires if timezone: if timezone.is_aware(reference) and timezone.is_naive(expiration): # MySQL doesn't support timezone for datetime fields # so we assume that the date was stored in the UTC timezone expiration = timezone.make_aware(expiration, timezone.utc) elif timezone.is_naive(reference) and timezone.is_aware(expiration): reference = timezone.make_aware(reference, timezone.utc) timedelta = expiration - reference return timedelta.days*86400 + timedelta.seconds
[ "def", "get_expire_delta", "(", "self", ",", "reference", "=", "None", ")", ":", "if", "reference", "is", "None", ":", "reference", "=", "now", "(", ")", "expiration", "=", "self", ".", "expires", "if", "timezone", ":", "if", "timezone", ".", "is_aware",...
Return the number of seconds until this token expires.
[ "Return", "the", "number", "of", "seconds", "until", "this", "token", "expires", "." ]
python
train
blockstack/blockstack-core
blockstack/lib/subdomains.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/subdomains.py#L224-L232
def serialize_to_txt(self): """ Serialize this subdomain record to a TXT record. The trailing newline will be omitted """ txtrec = { 'name': self.fqn if self.independent else self.subdomain, 'txt': self.pack_subdomain()[1:] } return blockstack_zones.record_processors.process_txt([txtrec], '{txt}').strip()
[ "def", "serialize_to_txt", "(", "self", ")", ":", "txtrec", "=", "{", "'name'", ":", "self", ".", "fqn", "if", "self", ".", "independent", "else", "self", ".", "subdomain", ",", "'txt'", ":", "self", ".", "pack_subdomain", "(", ")", "[", "1", ":", "]...
Serialize this subdomain record to a TXT record. The trailing newline will be omitted
[ "Serialize", "this", "subdomain", "record", "to", "a", "TXT", "record", ".", "The", "trailing", "newline", "will", "be", "omitted" ]
python
train
Cog-Creators/Red-Lavalink
lavalink/node.py
https://github.com/Cog-Creators/Red-Lavalink/blob/5b3fc6eb31ee5db8bd2b633a523cf69749957111/lavalink/node.py#L335-L368
def get_node(guild_id: int, ignore_ready_status: bool = False) -> Node: """ Gets a node based on a guild ID, useful for noding separation. If the guild ID does not already have a node association, the least used node is returned. Skips over nodes that are not yet ready. Parameters ---------- guild_id : int ignore_ready_status : bool Returns ------- Node """ guild_count = 1e10 least_used = None for node in _nodes: guild_ids = node.player_manager.guild_ids if ignore_ready_status is False and not node.ready.is_set(): continue elif len(guild_ids) < guild_count: guild_count = len(guild_ids) least_used = node if guild_id in guild_ids: return node if least_used is None: raise IndexError("No nodes found.") return least_used
[ "def", "get_node", "(", "guild_id", ":", "int", ",", "ignore_ready_status", ":", "bool", "=", "False", ")", "->", "Node", ":", "guild_count", "=", "1e10", "least_used", "=", "None", "for", "node", "in", "_nodes", ":", "guild_ids", "=", "node", ".", "play...
Gets a node based on a guild ID, useful for noding separation. If the guild ID does not already have a node association, the least used node is returned. Skips over nodes that are not yet ready. Parameters ---------- guild_id : int ignore_ready_status : bool Returns ------- Node
[ "Gets", "a", "node", "based", "on", "a", "guild", "ID", "useful", "for", "noding", "separation", ".", "If", "the", "guild", "ID", "does", "not", "already", "have", "a", "node", "association", "the", "least", "used", "node", "is", "returned", ".", "Skips"...
python
train
llazzaro/analyzerstrategies
analyzerstrategies/sma_portfolio_strategy.py
https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/sma_portfolio_strategy.py#L131-L150
def __placeSellShortOrder(self, tick): ''' place short sell order''' share=math.floor(self.__strategy.getAccountCopy().getCash() / float(tick.close)) sellShortOrder=Order(accountId=self.__strategy.accountId, action=Action.SELL_SHORT, is_market=True, security=self.__security, share=share) if self.__strategy.placeOrder(sellShortOrder): self.__buyOrder=sellShortOrder # place stop order stopOrder=Order(accountId=self.__strategy.accountId, action=Action.BUY_TO_COVER, is_stop=True, security=self.__security, price=tick.close * 1.05, share=0 - share) self.__placeStopOrder(stopOrder)
[ "def", "__placeSellShortOrder", "(", "self", ",", "tick", ")", ":", "share", "=", "math", ".", "floor", "(", "self", ".", "__strategy", ".", "getAccountCopy", "(", ")", ".", "getCash", "(", ")", "/", "float", "(", "tick", ".", "close", ")", ")", "sel...
place short sell order
[ "place", "short", "sell", "order" ]
python
train
dsoprea/NsqSpinner
nsq/connection.py
https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/connection.py#L78-L84
def flush(self): """Return all buffered data, and clear the stack.""" (slice_, self.__buffer) = (self.__buffer, '') self.__size = 0 return slice_
[ "def", "flush", "(", "self", ")", ":", "(", "slice_", ",", "self", ".", "__buffer", ")", "=", "(", "self", ".", "__buffer", ",", "''", ")", "self", ".", "__size", "=", "0", "return", "slice_" ]
Return all buffered data, and clear the stack.
[ "Return", "all", "buffered", "data", "and", "clear", "the", "stack", "." ]
python
train
soravux/scoop
scoop/backports/runpy.py
https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/scoop/backports/runpy.py#L165-L181
def run_module(mod_name, init_globals=None, run_name=None, alter_sys=False): """Execute a module's code without importing it Returns the resulting top level namespace dictionary """ mod_name, loader, code, fname = _get_module_details(mod_name) if run_name is None: run_name = mod_name pkg_name = mod_name.rpartition('.')[0] if alter_sys: return _run_module_code(code, init_globals, run_name, fname, loader, pkg_name) else: # Leave the sys module alone return _run_code(code, {}, init_globals, run_name, fname, loader, pkg_name)
[ "def", "run_module", "(", "mod_name", ",", "init_globals", "=", "None", ",", "run_name", "=", "None", ",", "alter_sys", "=", "False", ")", ":", "mod_name", ",", "loader", ",", "code", ",", "fname", "=", "_get_module_details", "(", "mod_name", ")", "if", ...
Execute a module's code without importing it Returns the resulting top level namespace dictionary
[ "Execute", "a", "module", "s", "code", "without", "importing", "it" ]
python
train
cirruscluster/cirruscluster
cirruscluster/cluster/mapr.py
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/cluster/mapr.py#L1033-L1043
def __CleanUpRootPartition(self): """ Mapr signals an alarm in the web ui when there are files in a nodes /opt/cores dir. This is anoying because we can't see if there is a real problem with the node because of this pointless error message. This removes those files so the alarm goes away. """ instances = self.__GetAllInstances() clean_cores_cmd = """sudo sh -c 'rm -rf /opt/mapr/cache/tmp/*'""" self.__RunCommandOnInstances(clean_cores_cmd, instances) return
[ "def", "__CleanUpRootPartition", "(", "self", ")", ":", "instances", "=", "self", ".", "__GetAllInstances", "(", ")", "clean_cores_cmd", "=", "\"\"\"sudo sh -c 'rm -rf /opt/mapr/cache/tmp/*'\"\"\"", "self", ".", "__RunCommandOnInstances", "(", "clean_cores_cmd", ",", "ins...
Mapr signals an alarm in the web ui when there are files in a nodes /opt/cores dir. This is anoying because we can't see if there is a real problem with the node because of this pointless error message. This removes those files so the alarm goes away.
[ "Mapr", "signals", "an", "alarm", "in", "the", "web", "ui", "when", "there", "are", "files", "in", "a", "nodes", "/", "opt", "/", "cores", "dir", ".", "This", "is", "anoying", "because", "we", "can", "t", "see", "if", "there", "is", "a", "real", "p...
python
train
pytroll/satpy
satpy/readers/fci_l1c_fdhsi.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/fci_l1c_fdhsi.py#L223-L234
def _vis_calibrate(self, data, key): """VIS channel calibration.""" # radiance to reflectance taken as in mipp/xrit/MSG.py # again FCI User Guide is not clear on how to do this sirr = self.nc[ '/data/{}/measured/channel_effective_solar_irradiance' .format(key.name)][...] # reflectance = radiance / sirr * 100 data.data[:] /= sirr data.data[:] *= 100
[ "def", "_vis_calibrate", "(", "self", ",", "data", ",", "key", ")", ":", "# radiance to reflectance taken as in mipp/xrit/MSG.py", "# again FCI User Guide is not clear on how to do this", "sirr", "=", "self", ".", "nc", "[", "'/data/{}/measured/channel_effective_solar_irradiance'...
VIS channel calibration.
[ "VIS", "channel", "calibration", "." ]
python
train
Gorialis/jishaku
jishaku/repl/compilation.py
https://github.com/Gorialis/jishaku/blob/fc7c479b9d510ede189a929c8aa6f7c8ef7f9a6e/jishaku/repl/compilation.py#L49-L106
def wrap_code(code: str, args: str = '') -> ast.Module: """ Compiles Python code into an async function or generator, and automatically adds return if the function body is a single evaluation. Also adds inline import expression support. """ if sys.version_info >= (3, 7): user_code = import_expression.parse(code, mode='exec') injected = '' else: injected = code mod = import_expression.parse(CORO_CODE.format(args, textwrap.indent(injected, ' ' * 8)), mode='exec') definition = mod.body[-1] # async def ...: assert isinstance(definition, ast.AsyncFunctionDef) try_block = definition.body[-1] # try: assert isinstance(try_block, ast.Try) if sys.version_info >= (3, 7): try_block.body.extend(user_code.body) else: ast.increment_lineno(mod, -16) # bring line numbers back in sync with repl ast.fix_missing_locations(mod) is_asyncgen = any(isinstance(node, ast.Yield) for node in ast.walk(try_block)) last_expr = try_block.body[-1] # if the last part isn't an expression, ignore it if not isinstance(last_expr, ast.Expr): return mod # if the last expression is not a yield if not isinstance(last_expr.value, ast.Yield): # copy the expression into a return/yield if is_asyncgen: # copy the value of the expression into a yield yield_stmt = ast.Yield(last_expr.value) ast.copy_location(yield_stmt, last_expr) # place the yield into its own expression yield_expr = ast.Expr(yield_stmt) ast.copy_location(yield_expr, last_expr) # place the yield where the original expression was try_block.body[-1] = yield_expr else: # copy the expression into a return return_stmt = ast.Return(last_expr.value) ast.copy_location(return_stmt, last_expr) # place the return where the original expression was try_block.body[-1] = return_stmt return mod
[ "def", "wrap_code", "(", "code", ":", "str", ",", "args", ":", "str", "=", "''", ")", "->", "ast", ".", "Module", ":", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "7", ")", ":", "user_code", "=", "import_expression", ".", "parse", "(", ...
Compiles Python code into an async function or generator, and automatically adds return if the function body is a single evaluation. Also adds inline import expression support.
[ "Compiles", "Python", "code", "into", "an", "async", "function", "or", "generator", "and", "automatically", "adds", "return", "if", "the", "function", "body", "is", "a", "single", "evaluation", ".", "Also", "adds", "inline", "import", "expression", "support", ...
python
train
Richienb/quilt
src/quilt_lang/__init__.py
https://github.com/Richienb/quilt/blob/4a659cac66f5286ad046d54a12fd850be5606643/src/quilt_lang/__init__.py#L584-L603
def reversetext(contenttoreverse, reconvert=True): """ Reverse any content :type contenttoreverse: string :param contenttoreverse: The content to be reversed :type reeval: boolean :param reeval: Wether or not to reconvert the object back into it's initial state. Default is "True". """ # If reconvert is specified if reconvert is True: # Return the evalated form return eval( str(type(contenttoreverse)).split("'")[1] + "('" + str(contenttoreverse)[::-1] + "')") # Return the raw version return contenttoreverse[::-1]
[ "def", "reversetext", "(", "contenttoreverse", ",", "reconvert", "=", "True", ")", ":", "# If reconvert is specified", "if", "reconvert", "is", "True", ":", "# Return the evalated form", "return", "eval", "(", "str", "(", "type", "(", "contenttoreverse", ")", ")",...
Reverse any content :type contenttoreverse: string :param contenttoreverse: The content to be reversed :type reeval: boolean :param reeval: Wether or not to reconvert the object back into it's initial state. Default is "True".
[ "Reverse", "any", "content" ]
python
train
datascopeanalytics/traces
traces/timeseries.py
https://github.com/datascopeanalytics/traces/blob/420611151a05fea88a07bc5200fefffdc37cc95b/traces/timeseries.py#L244-L250
def exists(self): """returns False when the timeseries has a None value, True otherwise""" result = TimeSeries(default=False if self.default is None else True) for t, v in self: result[t] = False if v is None else True return result
[ "def", "exists", "(", "self", ")", ":", "result", "=", "TimeSeries", "(", "default", "=", "False", "if", "self", ".", "default", "is", "None", "else", "True", ")", "for", "t", ",", "v", "in", "self", ":", "result", "[", "t", "]", "=", "False", "i...
returns False when the timeseries has a None value, True otherwise
[ "returns", "False", "when", "the", "timeseries", "has", "a", "None", "value", "True", "otherwise" ]
python
train