repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
okpy/ok-client
client/utils/network.py
https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/utils/network.py#L14-L24
def check_ssl(): """Attempts to import SSL or raises an exception.""" try: import ssl except: log.warning('Error importing SSL module', stack_info=True) print(SSL_ERROR_MESSAGE) sys.exit(1) else: log.info('SSL module is available') return ssl
[ "def", "check_ssl", "(", ")", ":", "try", ":", "import", "ssl", "except", ":", "log", ".", "warning", "(", "'Error importing SSL module'", ",", "stack_info", "=", "True", ")", "print", "(", "SSL_ERROR_MESSAGE", ")", "sys", ".", "exit", "(", "1", ")", "el...
Attempts to import SSL or raises an exception.
[ "Attempts", "to", "import", "SSL", "or", "raises", "an", "exception", "." ]
python
train
pyqt/python-qt5
PyQt5/uic/__init__.py
https://github.com/pyqt/python-qt5/blob/c9ed180c56f6fd3521ffe5fb70904bc5d3f50e5f/PyQt5/uic/__init__.py#L206-L226
def loadUi(uifile, baseinstance=None, package='', resource_suffix='_rc'): """loadUi(uifile, baseinstance=None, package='') -> widget Load a Qt Designer .ui file and return an instance of the user interface. uifile is a file name or file-like object containing the .ui file. baseinstance is an optional instance of the Qt base class. If specified then the user interface is created in it. Otherwise a new instance of the base class is automatically created. package is the optional package which is used as the base for any relative imports of custom widgets. resource_suffix is the suffix appended to the basename of any resource file specified in the .ui file to create the name of the Python module generated from the resource file by pyrcc4. The default is '_rc', i.e. if the .ui file specified a resource file called foo.qrc then the corresponding Python module is foo_rc. """ from .Loader.loader import DynamicUILoader return DynamicUILoader(package).loadUi(uifile, baseinstance, resource_suffix)
[ "def", "loadUi", "(", "uifile", ",", "baseinstance", "=", "None", ",", "package", "=", "''", ",", "resource_suffix", "=", "'_rc'", ")", ":", "from", ".", "Loader", ".", "loader", "import", "DynamicUILoader", "return", "DynamicUILoader", "(", "package", ")", ...
loadUi(uifile, baseinstance=None, package='') -> widget Load a Qt Designer .ui file and return an instance of the user interface. uifile is a file name or file-like object containing the .ui file. baseinstance is an optional instance of the Qt base class. If specified then the user interface is created in it. Otherwise a new instance of the base class is automatically created. package is the optional package which is used as the base for any relative imports of custom widgets. resource_suffix is the suffix appended to the basename of any resource file specified in the .ui file to create the name of the Python module generated from the resource file by pyrcc4. The default is '_rc', i.e. if the .ui file specified a resource file called foo.qrc then the corresponding Python module is foo_rc.
[ "loadUi", "(", "uifile", "baseinstance", "=", "None", "package", "=", ")", "-", ">", "widget" ]
python
train
dj-stripe/dj-stripe
djstripe/models/core.py
https://github.com/dj-stripe/dj-stripe/blob/a5308a3808cd6e2baba49482f7a699f3a8992518/djstripe/models/core.py#L816-L852
def has_active_subscription(self, plan=None): """ Checks to see if this customer has an active subscription to the given plan. :param plan: The plan for which to check for an active subscription. If plan is None and there exists only one active subscription, this method will check if that subscription is valid. Calling this method with no plan and multiple valid subscriptions for this customer will throw an exception. :type plan: Plan or string (plan ID) :returns: True if there exists an active subscription, False otherwise. :throws: TypeError if ``plan`` is None and more than one active subscription exists for this customer. """ if plan is None: valid_subscriptions = self._get_valid_subscriptions() if len(valid_subscriptions) == 0: return False elif len(valid_subscriptions) == 1: return True else: raise TypeError( "plan cannot be None if more than one valid subscription exists for this customer." ) else: # Convert Plan to id if isinstance(plan, StripeModel): plan = plan.id return any( [ subscription.is_valid() for subscription in self.subscriptions.filter(plan__id=plan) ] )
[ "def", "has_active_subscription", "(", "self", ",", "plan", "=", "None", ")", ":", "if", "plan", "is", "None", ":", "valid_subscriptions", "=", "self", ".", "_get_valid_subscriptions", "(", ")", "if", "len", "(", "valid_subscriptions", ")", "==", "0", ":", ...
Checks to see if this customer has an active subscription to the given plan. :param plan: The plan for which to check for an active subscription. If plan is None and there exists only one active subscription, this method will check if that subscription is valid. Calling this method with no plan and multiple valid subscriptions for this customer will throw an exception. :type plan: Plan or string (plan ID) :returns: True if there exists an active subscription, False otherwise. :throws: TypeError if ``plan`` is None and more than one active subscription exists for this customer.
[ "Checks", "to", "see", "if", "this", "customer", "has", "an", "active", "subscription", "to", "the", "given", "plan", "." ]
python
train
tanghaibao/jcvi
jcvi/assembly/hic.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/hic.py#L268-L273
def evaluate_tour_P(self, tour): """ Use Cythonized version to evaluate the score of a current tour, with better precision on the distance of the contigs. """ from .chic import score_evaluate_P return score_evaluate_P(tour, self.active_sizes, self.P)
[ "def", "evaluate_tour_P", "(", "self", ",", "tour", ")", ":", "from", ".", "chic", "import", "score_evaluate_P", "return", "score_evaluate_P", "(", "tour", ",", "self", ".", "active_sizes", ",", "self", ".", "P", ")" ]
Use Cythonized version to evaluate the score of a current tour, with better precision on the distance of the contigs.
[ "Use", "Cythonized", "version", "to", "evaluate", "the", "score", "of", "a", "current", "tour", "with", "better", "precision", "on", "the", "distance", "of", "the", "contigs", "." ]
python
train
FutunnOpen/futuquant
futuquant/examples/learn/get_realtime_data.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/examples/learn/get_realtime_data.py#L112-L122
def _example_get_trade_days(quote_ctx): """ 获取交易日列表,输出 交易日列表 """ ret_status, ret_data = quote_ctx.get_trading_days("US", None, None) if ret_status != ft.RET_OK: print(ret_data) exit() print("TRADING DAYS") for x in ret_data: print(x)
[ "def", "_example_get_trade_days", "(", "quote_ctx", ")", ":", "ret_status", ",", "ret_data", "=", "quote_ctx", ".", "get_trading_days", "(", "\"US\"", ",", "None", ",", "None", ")", "if", "ret_status", "!=", "ft", ".", "RET_OK", ":", "print", "(", "ret_data"...
获取交易日列表,输出 交易日列表
[ "获取交易日列表,输出", "交易日列表" ]
python
train
projectatomic/atomic-reactor
atomic_reactor/plugins/exit_koji_promote.py
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/plugins/exit_koji_promote.py#L642-L662
def upload_file(self, session, output, serverdir): """ Upload a file to koji :return: str, pathname on server """ name = output.metadata['filename'] self.log.debug("uploading %r to %r as %r", output.file.name, serverdir, name) kwargs = {} if self.blocksize is not None: kwargs['blocksize'] = self.blocksize self.log.debug("using blocksize %d", self.blocksize) upload_logger = KojiUploadLogger(self.log) session.uploadWrapper(output.file.name, serverdir, name=name, callback=upload_logger.callback, **kwargs) path = os.path.join(serverdir, name) self.log.debug("uploaded %r", path) return path
[ "def", "upload_file", "(", "self", ",", "session", ",", "output", ",", "serverdir", ")", ":", "name", "=", "output", ".", "metadata", "[", "'filename'", "]", "self", ".", "log", ".", "debug", "(", "\"uploading %r to %r as %r\"", ",", "output", ".", "file",...
Upload a file to koji :return: str, pathname on server
[ "Upload", "a", "file", "to", "koji" ]
python
train
awslabs/serverless-application-model
samtranslator/intrinsics/actions.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/intrinsics/actions.py#L520-L555
def resolve_parameter_refs(self, input_dict, parameters): """ Recursively resolves "Fn::FindInMap"references that are present in the mappings and returns the value. If it is not in mappings, this method simply returns the input unchanged. :param input_dict: Dictionary representing the FindInMap function. Must contain only one key and it should be "Fn::FindInMap". :param parameters: Dictionary of mappings from the SAM template """ if not self.can_handle(input_dict): return input_dict value = input_dict[self.intrinsic_name] # FindInMap expects an array with 3 values if not isinstance(value, list) or len(value) != 3: raise InvalidDocumentException( [InvalidTemplateException('Invalid FindInMap value {}. FindInMap expects an array with 3 values.' .format(value))]) map_name = self.resolve_parameter_refs(value[0], parameters) top_level_key = self.resolve_parameter_refs(value[1], parameters) second_level_key = self.resolve_parameter_refs(value[2], parameters) if not isinstance(map_name, string_types) or \ not isinstance(top_level_key, string_types) or \ not isinstance(second_level_key, string_types): return input_dict if map_name not in parameters or \ top_level_key not in parameters[map_name] or \ second_level_key not in parameters[map_name][top_level_key]: return input_dict return parameters[map_name][top_level_key][second_level_key]
[ "def", "resolve_parameter_refs", "(", "self", ",", "input_dict", ",", "parameters", ")", ":", "if", "not", "self", ".", "can_handle", "(", "input_dict", ")", ":", "return", "input_dict", "value", "=", "input_dict", "[", "self", ".", "intrinsic_name", "]", "#...
Recursively resolves "Fn::FindInMap"references that are present in the mappings and returns the value. If it is not in mappings, this method simply returns the input unchanged. :param input_dict: Dictionary representing the FindInMap function. Must contain only one key and it should be "Fn::FindInMap". :param parameters: Dictionary of mappings from the SAM template
[ "Recursively", "resolves", "Fn", "::", "FindInMap", "references", "that", "are", "present", "in", "the", "mappings", "and", "returns", "the", "value", ".", "If", "it", "is", "not", "in", "mappings", "this", "method", "simply", "returns", "the", "input", "unc...
python
train
IEMLdev/ieml
ieml/grammar/parser/parser.py
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/grammar/parser/parser.py#L73-L82
def parse(self, s): """Parses the input string, and returns a reference to the created AST's root""" with self.lock: try: return self.parser.parse(s, lexer=self.lexer) except InvalidIEMLObjectArgument as e: raise CannotParse(s, str(e)) except CannotParse as e: e.s = s raise e
[ "def", "parse", "(", "self", ",", "s", ")", ":", "with", "self", ".", "lock", ":", "try", ":", "return", "self", ".", "parser", ".", "parse", "(", "s", ",", "lexer", "=", "self", ".", "lexer", ")", "except", "InvalidIEMLObjectArgument", "as", "e", ...
Parses the input string, and returns a reference to the created AST's root
[ "Parses", "the", "input", "string", "and", "returns", "a", "reference", "to", "the", "created", "AST", "s", "root" ]
python
test
HewlettPackard/python-hpOneView
hpOneView/oneview_client.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/oneview_client.py#L483-L492
def id_pools_vwwn_ranges(self): """ Gets the IdPoolsRanges API Client for VWWN Ranges. Returns: IdPoolsRanges: """ if not self.__id_pools_vwwn_ranges: self.__id_pools_vwwn_ranges = IdPoolsRanges('vwwn', self.__connection) return self.__id_pools_vwwn_ranges
[ "def", "id_pools_vwwn_ranges", "(", "self", ")", ":", "if", "not", "self", ".", "__id_pools_vwwn_ranges", ":", "self", ".", "__id_pools_vwwn_ranges", "=", "IdPoolsRanges", "(", "'vwwn'", ",", "self", ".", "__connection", ")", "return", "self", ".", "__id_pools_v...
Gets the IdPoolsRanges API Client for VWWN Ranges. Returns: IdPoolsRanges:
[ "Gets", "the", "IdPoolsRanges", "API", "Client", "for", "VWWN", "Ranges", "." ]
python
train
serge-sans-paille/pythran
pythran/tables.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/tables.py#L4553-L4583
def save_arguments(module_name, elements): """ Recursively save arguments name and default value. """ for elem, signature in elements.items(): if isinstance(signature, dict): # Submodule case save_arguments(module_name + (elem,), signature) else: # use introspection to get the Python obj try: themodule = __import__(".".join(module_name)) obj = getattr(themodule, elem) spec = getfullargspec(obj) if signature.args.args: logger.warn( "Overriding pythran description with argspec information for: {}".format(".".join(module_name + (elem,))) ) args = [ast.Name(arg, ast.Param(), None) for arg in spec.args] defaults = list(spec.defaults or []) if sys.version_info.major == 3: args += [ast.Name(arg, ast.Param(), None) for arg in spec.kwonlyargs] defaults += [spec.kwonlydefaults[kw] for kw in spec.kwonlyargs] # Avoid use of comprehension to fill "as much args/defauls" as # possible signature.args.args = args[:-len(defaults)] signature.args.defaults = [] for arg, value in zip(args[-len(defaults):], defaults): signature.args.defaults.append(to_ast(value)) signature.args.args.append(arg) except (AttributeError, ImportError, TypeError, ToNotEval): pass
[ "def", "save_arguments", "(", "module_name", ",", "elements", ")", ":", "for", "elem", ",", "signature", "in", "elements", ".", "items", "(", ")", ":", "if", "isinstance", "(", "signature", ",", "dict", ")", ":", "# Submodule case", "save_arguments", "(", ...
Recursively save arguments name and default value.
[ "Recursively", "save", "arguments", "name", "and", "default", "value", "." ]
python
train
trailofbits/manticore
manticore/platforms/evm.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/platforms/evm.py#L1275-L1278
def BYTE(self, offset, value): """Retrieve single byte from word""" offset = Operators.ITEBV(256, offset < 32, (31 - offset) * 8, 256) return Operators.ZEXTEND(Operators.EXTRACT(value, offset, 8), 256)
[ "def", "BYTE", "(", "self", ",", "offset", ",", "value", ")", ":", "offset", "=", "Operators", ".", "ITEBV", "(", "256", ",", "offset", "<", "32", ",", "(", "31", "-", "offset", ")", "*", "8", ",", "256", ")", "return", "Operators", ".", "ZEXTEND...
Retrieve single byte from word
[ "Retrieve", "single", "byte", "from", "word" ]
python
valid
marcomusy/vtkplotter
vtkplotter/vtkio.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/vtkio.py#L281-L289
def loadRectilinearGrid(filename): # not tested """Load a ``vtkRectilinearGrid`` object from file and return a ``Actor(vtkActor)`` object.""" reader = vtk.vtkRectilinearGridReader() reader.SetFileName(filename) reader.Update() gf = vtk.vtkRectilinearGridGeometryFilter() gf.SetInputConnection(reader.GetOutputPort()) gf.Update() return Actor(gf.GetOutput())
[ "def", "loadRectilinearGrid", "(", "filename", ")", ":", "# not tested", "reader", "=", "vtk", ".", "vtkRectilinearGridReader", "(", ")", "reader", ".", "SetFileName", "(", "filename", ")", "reader", ".", "Update", "(", ")", "gf", "=", "vtk", ".", "vtkRectil...
Load a ``vtkRectilinearGrid`` object from file and return a ``Actor(vtkActor)`` object.
[ "Load", "a", "vtkRectilinearGrid", "object", "from", "file", "and", "return", "a", "Actor", "(", "vtkActor", ")", "object", "." ]
python
train
shi-cong/PYSTUDY
PYSTUDY/timelib.py
https://github.com/shi-cong/PYSTUDY/blob/c8da7128ea18ecaa5849f2066d321e70d6f97f70/PYSTUDY/timelib.py#L7-L17
def func_runtime(callback): """ 测试函数执行的时间 :param callback: 函数 :return: 时间 """ now = time.time() print('%s - start: %f' % (callback, now)) callback() now = time.time() - now print('%s - spent: %f Second' % (callback, now))
[ "def", "func_runtime", "(", "callback", ")", ":", "now", "=", "time", ".", "time", "(", ")", "print", "(", "'%s - start: %f'", "%", "(", "callback", ",", "now", ")", ")", "callback", "(", ")", "now", "=", "time", ".", "time", "(", ")", "-", "now", ...
测试函数执行的时间 :param callback: 函数 :return: 时间
[ "测试函数执行的时间", ":", "param", "callback", ":", "函数", ":", "return", ":", "时间" ]
python
train
cwacek/python-jsonschema-objects
python_jsonschema_objects/classbuilder.py
https://github.com/cwacek/python-jsonschema-objects/blob/54c82bfaec9c099c472663742abfc7de373a5e49/python_jsonschema_objects/classbuilder.py#L47-L67
def as_dict(self): """ Return a dictionary containing the current values of the object. Returns: (dict): The object represented as a dictionary """ out = {} for prop in self: propval = getattr(self, prop) if hasattr(propval, 'for_json'): out[prop] = propval.for_json() elif isinstance(propval, list): out[prop] = [getattr(x, 'for_json', lambda:x)() for x in propval] elif isinstance(propval, (ProtocolBase, LiteralValue)): out[prop] = propval.as_dict() elif propval is not None: out[prop] = propval return out
[ "def", "as_dict", "(", "self", ")", ":", "out", "=", "{", "}", "for", "prop", "in", "self", ":", "propval", "=", "getattr", "(", "self", ",", "prop", ")", "if", "hasattr", "(", "propval", ",", "'for_json'", ")", ":", "out", "[", "prop", "]", "=",...
Return a dictionary containing the current values of the object. Returns: (dict): The object represented as a dictionary
[ "Return", "a", "dictionary", "containing", "the", "current", "values", "of", "the", "object", "." ]
python
train
vatlab/SoS
src/sos/actions.py
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/actions.py#L923-L991
def download(URLs, dest_dir='.', dest_file=None, decompress=False, max_jobs=5): '''Download files from specified URL, which should be space, tab or newline separated URLs. The files will be downloaded to specified destination. If `filename.md5` files are downloaded, they are used to validate downloaded `filename`. Unless otherwise specified, compressed files are decompressed. If `max_jobs` is given, a maximum of `max_jobs` concurrent download jobs will be used for each domain. This restriction applies to domain names and will be applied to multiple download instances. ''' if env.config['run_mode'] == 'dryrun': print(f'HINT: download\n{URLs}\n') return None if isinstance(URLs, str): urls = [x.strip() for x in URLs.split() if x.strip()] else: urls = list(URLs) if not urls: env.logger.debug(f'No download URL specified: {URLs}') return # if dest_file is not None and len(urls) != 1: raise RuntimeError( 'Only one URL is allowed if a destination file is specified.') # if dest_file is None: filenames = [] for idx, url in enumerate(urls): token = urllib.parse.urlparse(url) # if no scheme or netloc, the URL is not acceptable if not all([ getattr(token, qualifying_attr) for qualifying_attr in ('scheme', 'netloc') ]): raise ValueError(f'Invalid URL {url}') filename = os.path.split(token.path)[-1] if not filename: raise ValueError(f'Cannot determine destination file for {url}') filenames.append(os.path.join(dest_dir, filename)) else: token = urllib.parse.urlparse(urls[0]) if not all([ getattr(token, qualifying_attr) for qualifying_attr in ('scheme', 'netloc') ]): raise ValueError(f'Invalid URL {url}') filenames = [dest_file] # succ = [(False, None) for x in urls] with ProcessPoolExecutor(max_workers=max_jobs) as executor: for idx, (url, filename) in enumerate(zip(urls, filenames)): # if there is alot, start download succ[idx] = executor.submit(downloadURL, url, filename, decompress, idx) succ = [x.result() for x in succ] # for su, url in zip(succ, urls): # if not su: # env.logger.warning('Failed to download {}'.format(url)) failed = [y for x, y in zip(succ, urls) if not x] if failed: if len(urls) == 1: raise RuntimeError('Failed to download {urls[0]}') else: raise RuntimeError( f'Failed to download {failed[0]} ({len(failed)} out of {len(urls)})' ) return 0
[ "def", "download", "(", "URLs", ",", "dest_dir", "=", "'.'", ",", "dest_file", "=", "None", ",", "decompress", "=", "False", ",", "max_jobs", "=", "5", ")", ":", "if", "env", ".", "config", "[", "'run_mode'", "]", "==", "'dryrun'", ":", "print", "(",...
Download files from specified URL, which should be space, tab or newline separated URLs. The files will be downloaded to specified destination. If `filename.md5` files are downloaded, they are used to validate downloaded `filename`. Unless otherwise specified, compressed files are decompressed. If `max_jobs` is given, a maximum of `max_jobs` concurrent download jobs will be used for each domain. This restriction applies to domain names and will be applied to multiple download instances.
[ "Download", "files", "from", "specified", "URL", "which", "should", "be", "space", "tab", "or", "newline", "separated", "URLs", ".", "The", "files", "will", "be", "downloaded", "to", "specified", "destination", ".", "If", "filename", ".", "md5", "files", "ar...
python
train
romanorac/discomll
discomll/regression/locally_weighted_linear_regression.py
https://github.com/romanorac/discomll/blob/a4703daffb2ba3c9f614bc3dbe45ae55884aea00/discomll/regression/locally_weighted_linear_regression.py#L83-L139
def fit_predict(training_data, fitting_data, tau=1, samples_per_job=0, save_results=True, show=False): from disco.worker.pipeline.worker import Worker, Stage from disco.core import Job, result_iterator from disco.core import Disco """ training_data - training samples fitting_data - dataset to be fitted to training data. tau - controls how quickly the weight of a training sample falls off with distance of its x(i) from the query point x. samples_per_job - define a number of samples that will be processed in single mapreduce job. If 0, algorithm will calculate number of samples per job. """ try: tau = float(tau) if tau <= 0: raise Exception("Parameter tau should be >= 0.") except ValueError: raise Exception("Parameter tau should be numerical.") if fitting_data.params["id_index"] == -1: raise Exception("Predict data should have id_index set.") job = Job(worker=Worker(save_results=save_results)) job.pipeline = [ ("split", Stage("map", input_chain=fitting_data.params["input_chain"], init=simple_init, process=map_predict))] job.params = fitting_data.params job.run(name="lwlr_read_data", input=fitting_data.params["data_tag"]) samples = {} results = [] tau = float(2 * tau ** 2) # calculate tau once counter = 0 for test_id, x in result_iterator(job.wait(show=show)): if samples_per_job == 0: # calculate number of samples per job if len(x) <= 100: # if there is less than 100 attributes samples_per_job = 100 # 100 samples is max per on job else: # there is more than 100 attributes samples_per_job = len(x) * -25 / 900. + 53 # linear function samples[test_id] = x if counter == samples_per_job: results.append(_fit_predict(training_data, samples, tau, save_results, show)) counter = 0 samples = {} counter += 1 if len(samples) > 0: # if there is some samples left in the the dictionary results.append(_fit_predict(training_data, samples, tau, save_results, show)) # merge results of every iteration into a single tag ddfs = Disco().ddfs ddfs.tag(job.name, [[list(ddfs.blobs(tag))[0][0]] for tag in results]) return ["tag://" + job.name]
[ "def", "fit_predict", "(", "training_data", ",", "fitting_data", ",", "tau", "=", "1", ",", "samples_per_job", "=", "0", ",", "save_results", "=", "True", ",", "show", "=", "False", ")", ":", "from", "disco", ".", "worker", ".", "pipeline", ".", "worker"...
training_data - training samples fitting_data - dataset to be fitted to training data. tau - controls how quickly the weight of a training sample falls off with distance of its x(i) from the query point x. samples_per_job - define a number of samples that will be processed in single mapreduce job. If 0, algorithm will calculate number of samples per job.
[ "training_data", "-", "training", "samples", "fitting_data", "-", "dataset", "to", "be", "fitted", "to", "training", "data", ".", "tau", "-", "controls", "how", "quickly", "the", "weight", "of", "a", "training", "sample", "falls", "off", "with", "distance", ...
python
train
marshallward/f90nml
f90nml/namelist.py
https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/namelist.py#L391-L406
def write(self, nml_path, force=False, sort=False): """Write Namelist to a Fortran 90 namelist file. >>> nml = f90nml.read('input.nml') >>> nml.write('out.nml') """ nml_is_file = hasattr(nml_path, 'read') if not force and not nml_is_file and os.path.isfile(nml_path): raise IOError('File {0} already exists.'.format(nml_path)) nml_file = nml_path if nml_is_file else open(nml_path, 'w') try: self._writestream(nml_file, sort) finally: if not nml_is_file: nml_file.close()
[ "def", "write", "(", "self", ",", "nml_path", ",", "force", "=", "False", ",", "sort", "=", "False", ")", ":", "nml_is_file", "=", "hasattr", "(", "nml_path", ",", "'read'", ")", "if", "not", "force", "and", "not", "nml_is_file", "and", "os", ".", "p...
Write Namelist to a Fortran 90 namelist file. >>> nml = f90nml.read('input.nml') >>> nml.write('out.nml')
[ "Write", "Namelist", "to", "a", "Fortran", "90", "namelist", "file", "." ]
python
train
tensorflow/tensorboard
tensorboard/plugins/beholder/beholder.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/beholder/beholder.py#L85-L91
def _write_summary(self, session, frame): '''Writes the frame to disk as a tensor summary.''' summary = session.run(self.summary_op, feed_dict={ self.frame_placeholder: frame }) path = '{}/{}'.format(self.PLUGIN_LOGDIR, SUMMARY_FILENAME) write_file(summary, path)
[ "def", "_write_summary", "(", "self", ",", "session", ",", "frame", ")", ":", "summary", "=", "session", ".", "run", "(", "self", ".", "summary_op", ",", "feed_dict", "=", "{", "self", ".", "frame_placeholder", ":", "frame", "}", ")", "path", "=", "'{}...
Writes the frame to disk as a tensor summary.
[ "Writes", "the", "frame", "to", "disk", "as", "a", "tensor", "summary", "." ]
python
train
chrisrink10/basilisp
src/basilisp/lang/compiler/parser.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/compiler/parser.py#L310-L317
def warn_on_shadowed_var(self) -> bool: """If True, warn when a def'ed Var name is shadowed in an inner scope. Implied by warn_on_shadowed_name. The value of warn_on_shadowed_name supersedes the value of this flag.""" return self.warn_on_shadowed_name or self._opts.entry( WARN_ON_SHADOWED_VAR, False )
[ "def", "warn_on_shadowed_var", "(", "self", ")", "->", "bool", ":", "return", "self", ".", "warn_on_shadowed_name", "or", "self", ".", "_opts", ".", "entry", "(", "WARN_ON_SHADOWED_VAR", ",", "False", ")" ]
If True, warn when a def'ed Var name is shadowed in an inner scope. Implied by warn_on_shadowed_name. The value of warn_on_shadowed_name supersedes the value of this flag.
[ "If", "True", "warn", "when", "a", "def", "ed", "Var", "name", "is", "shadowed", "in", "an", "inner", "scope", "." ]
python
test
mbj4668/pyang
pyang/statements.py
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L3192-L3204
def get_primitive_type(stmt): """Recurses through the typedefs and returns the most primitive YANG type defined. """ type_obj = stmt.search_one('type') type_name = getattr(type_obj, 'arg', None) typedef_obj = getattr(type_obj, 'i_typedef', None) if typedef_obj: type_name = get_primitive_type(typedef_obj) elif type_obj and not check_primitive_type(type_obj): raise Exception('%s is not a primitive! Incomplete parse tree?' % type_name) return type_name
[ "def", "get_primitive_type", "(", "stmt", ")", ":", "type_obj", "=", "stmt", ".", "search_one", "(", "'type'", ")", "type_name", "=", "getattr", "(", "type_obj", ",", "'arg'", ",", "None", ")", "typedef_obj", "=", "getattr", "(", "type_obj", ",", "'i_typed...
Recurses through the typedefs and returns the most primitive YANG type defined.
[ "Recurses", "through", "the", "typedefs", "and", "returns", "the", "most", "primitive", "YANG", "type", "defined", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xtreewidget/xloaderitem.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xloaderitem.py#L55-L71
def autoload(self, state=True): """ Begins the process for autoloading this item when it becomes visible within the tree. :param state | <bool> """ if state and not self._timer: self._timer = QtCore.QTimer() self._timer.setInterval(500) self._timer.timeout.connect(self.testAutoload) if state and self._timer and not self._timer.isActive(): self._timer.start() elif not state and self._timer and self._timer.isActive(): self._timer.stop() del self._timer self._timer = None
[ "def", "autoload", "(", "self", ",", "state", "=", "True", ")", ":", "if", "state", "and", "not", "self", ".", "_timer", ":", "self", ".", "_timer", "=", "QtCore", ".", "QTimer", "(", ")", "self", ".", "_timer", ".", "setInterval", "(", "500", ")",...
Begins the process for autoloading this item when it becomes visible within the tree. :param state | <bool>
[ "Begins", "the", "process", "for", "autoloading", "this", "item", "when", "it", "becomes", "visible", "within", "the", "tree", ".", ":", "param", "state", "|", "<bool", ">" ]
python
train
minio/minio-py
minio/api.py
https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/api.py#L1014-L1028
def remove_object(self, bucket_name, object_name): """ Remove an object from the bucket. :param bucket_name: Bucket of object to remove :param object_name: Name of object to remove :return: None """ is_valid_bucket_name(bucket_name) is_non_empty_string(object_name) # No reason to store successful response, for errors # relevant exceptions are thrown. self._url_open('DELETE', bucket_name=bucket_name, object_name=object_name)
[ "def", "remove_object", "(", "self", ",", "bucket_name", ",", "object_name", ")", ":", "is_valid_bucket_name", "(", "bucket_name", ")", "is_non_empty_string", "(", "object_name", ")", "# No reason to store successful response, for errors", "# relevant exceptions are thrown.", ...
Remove an object from the bucket. :param bucket_name: Bucket of object to remove :param object_name: Name of object to remove :return: None
[ "Remove", "an", "object", "from", "the", "bucket", "." ]
python
train
project-rig/rig
rig/machine_control/machine_controller.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L2482-L2492
def _if_not_closed(f): """Run the method iff. the memory view hasn't been closed and the parent object has not been freed.""" @add_signature_to_docstring(f) @functools.wraps(f) def f_(self, *args, **kwargs): if self.closed or self._parent._freed: raise OSError return f(self, *args, **kwargs) return f_
[ "def", "_if_not_closed", "(", "f", ")", ":", "@", "add_signature_to_docstring", "(", "f", ")", "@", "functools", ".", "wraps", "(", "f", ")", "def", "f_", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "closed"...
Run the method iff. the memory view hasn't been closed and the parent object has not been freed.
[ "Run", "the", "method", "iff", ".", "the", "memory", "view", "hasn", "t", "been", "closed", "and", "the", "parent", "object", "has", "not", "been", "freed", "." ]
python
train
deathbeds/importnb
src/importnb/loader.py
https://github.com/deathbeds/importnb/blob/ec870d1f8ab99fd5b363267f89787a3e442a779f/src/importnb/loader.py#L213-L233
def load(cls, filename, dir=None, main=False, **kwargs): """Import a notebook as a module from a filename. dir: The directory to load the file from. main: Load the module in the __main__ context. > assert Notebook.load('loader.ipynb') """ name = main and "__main__" or Path(filename).stem loader = cls(name, str(filename), **kwargs) module = module_from_spec(FileModuleSpec(name, loader, origin=loader.path)) cwd = str(Path(loader.path).parent) try: with ExitStack() as stack: sys.path.append(cwd) loader.name != "__main__" and stack.enter_context(_installed_safely(module)) loader.exec_module(module) finally: sys.path.pop() return module
[ "def", "load", "(", "cls", ",", "filename", ",", "dir", "=", "None", ",", "main", "=", "False", ",", "*", "*", "kwargs", ")", ":", "name", "=", "main", "and", "\"__main__\"", "or", "Path", "(", "filename", ")", ".", "stem", "loader", "=", "cls", ...
Import a notebook as a module from a filename. dir: The directory to load the file from. main: Load the module in the __main__ context. > assert Notebook.load('loader.ipynb')
[ "Import", "a", "notebook", "as", "a", "module", "from", "a", "filename", ".", "dir", ":", "The", "directory", "to", "load", "the", "file", "from", ".", "main", ":", "Load", "the", "module", "in", "the", "__main__", "context", ".", ">", "assert", "Noteb...
python
train
agile-geoscience/welly
welly/curve.py
https://github.com/agile-geoscience/welly/blob/ed4c991011d6290938fef365553041026ba29f42/welly/curve.py#L159-L169
def describe(self): """ Return basic statistics about the curve. """ stats = {} stats['samples'] = self.shape[0] stats['nulls'] = self[np.isnan(self)].shape[0] stats['mean'] = float(np.nanmean(self.real)) stats['min'] = float(np.nanmin(self.real)) stats['max'] = float(np.nanmax(self.real)) return stats
[ "def", "describe", "(", "self", ")", ":", "stats", "=", "{", "}", "stats", "[", "'samples'", "]", "=", "self", ".", "shape", "[", "0", "]", "stats", "[", "'nulls'", "]", "=", "self", "[", "np", ".", "isnan", "(", "self", ")", "]", ".", "shape",...
Return basic statistics about the curve.
[ "Return", "basic", "statistics", "about", "the", "curve", "." ]
python
train
tjcsl/ion
intranet/utils/urls.py
https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/utils/urls.py#L6-L28
def add_get_parameters(url, parameters, percent_encode=True): """Utility function to add GET parameters to an existing URL. Args: parameters A dictionary of the parameters that should be added. percent_encode Whether the query parameters should be percent encoded. Returns: The updated URL. """ url_parts = list(parse.urlparse(url)) query = dict(parse.parse_qs(url_parts[4])) query.update(parameters) if percent_encode: url_parts[4] = parse.urlencode(query) else: url_parts[4] = "&".join([key + "=" + value for key, value in query.items()]) return parse.urlunparse(url_parts)
[ "def", "add_get_parameters", "(", "url", ",", "parameters", ",", "percent_encode", "=", "True", ")", ":", "url_parts", "=", "list", "(", "parse", ".", "urlparse", "(", "url", ")", ")", "query", "=", "dict", "(", "parse", ".", "parse_qs", "(", "url_parts"...
Utility function to add GET parameters to an existing URL. Args: parameters A dictionary of the parameters that should be added. percent_encode Whether the query parameters should be percent encoded. Returns: The updated URL.
[ "Utility", "function", "to", "add", "GET", "parameters", "to", "an", "existing", "URL", "." ]
python
train
rameshg87/pyremotevbox
pyremotevbox/ZSI/TC.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/TC.py#L1540-L1570
def parse(self, elt, ps, **kw): '''attempt to parse sequentially. No way to know ahead of time what this instance represents. Must be simple type so it can not have attributes nor children, so this isn't too bad. ''' self.setMemberTypeCodes() (nsuri,typeName) = self.checkname(elt, ps) #if (nsuri,typeName) not in self.memberTypes: # raise EvaluateException( # 'Union Type mismatch got (%s,%s) not in %s' % \ # (nsuri, typeName, self.memberTypes), ps.Backtrace(elt)) for indx in range(len(self.memberTypeCodes)): typecode = self.memberTypeCodes[indx] try: pyobj = typecode.parse(elt, ps) except ParseException, ex: continue except Exception, ex: continue if indx > 0: self.memberTypeCodes.remove(typecode) self.memberTypeCodes.insert(0, typecode) break else: raise return pyobj
[ "def", "parse", "(", "self", ",", "elt", ",", "ps", ",", "*", "*", "kw", ")", ":", "self", ".", "setMemberTypeCodes", "(", ")", "(", "nsuri", ",", "typeName", ")", "=", "self", ".", "checkname", "(", "elt", ",", "ps", ")", "#if (nsuri,typeName) not i...
attempt to parse sequentially. No way to know ahead of time what this instance represents. Must be simple type so it can not have attributes nor children, so this isn't too bad.
[ "attempt", "to", "parse", "sequentially", ".", "No", "way", "to", "know", "ahead", "of", "time", "what", "this", "instance", "represents", ".", "Must", "be", "simple", "type", "so", "it", "can", "not", "have", "attributes", "nor", "children", "so", "this",...
python
train
saltstack/salt
salt/modules/portage_config.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/portage_config.py#L294-L323
def _merge_flags(new_flags, old_flags=None, conf='any'): ''' Merges multiple lists of flags removing duplicates and resolving conflicts giving priority to lasts lists. ''' if not old_flags: old_flags = [] args = [old_flags, new_flags] if conf == 'accept_keywords': tmp = new_flags + \ [i for i in old_flags if _check_accept_keywords(new_flags, i)] else: tmp = portage.flatten(args) flags = {} for flag in tmp: if flag[0] == '-': flags[flag[1:]] = False else: flags[flag] = True tmp = [] for key, val in six.iteritems(flags): if val: tmp.append(key) else: tmp.append('-' + key) # Next sort is just aesthetic, can be commented for a small performance # boost tmp.sort(key=lambda x: x.lstrip('-')) return tmp
[ "def", "_merge_flags", "(", "new_flags", ",", "old_flags", "=", "None", ",", "conf", "=", "'any'", ")", ":", "if", "not", "old_flags", ":", "old_flags", "=", "[", "]", "args", "=", "[", "old_flags", ",", "new_flags", "]", "if", "conf", "==", "'accept_k...
Merges multiple lists of flags removing duplicates and resolving conflicts giving priority to lasts lists.
[ "Merges", "multiple", "lists", "of", "flags", "removing", "duplicates", "and", "resolving", "conflicts", "giving", "priority", "to", "lasts", "lists", "." ]
python
train
python-openxml/python-docx
docx/opc/package.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/opc/package.py#L149-L155
def relate_to(self, part, reltype): """ Return rId key of relationship to *part*, from the existing relationship if there is one, otherwise a newly created one. """ rel = self.rels.get_or_add(reltype, part) return rel.rId
[ "def", "relate_to", "(", "self", ",", "part", ",", "reltype", ")", ":", "rel", "=", "self", ".", "rels", ".", "get_or_add", "(", "reltype", ",", "part", ")", "return", "rel", ".", "rId" ]
Return rId key of relationship to *part*, from the existing relationship if there is one, otherwise a newly created one.
[ "Return", "rId", "key", "of", "relationship", "to", "*", "part", "*", "from", "the", "existing", "relationship", "if", "there", "is", "one", "otherwise", "a", "newly", "created", "one", "." ]
python
train
heitzmann/gdspy
gdspy/__init__.py
https://github.com/heitzmann/gdspy/blob/2c8d1313248c544e2066d19095b7ad7158c79bc9/gdspy/__init__.py#L3834-L3840
def close(self): """ Finalize the GDSII stream library. """ self._outfile.write(struct.pack('>2h', 4, 0x0400)) if self._close: self._outfile.close()
[ "def", "close", "(", "self", ")", ":", "self", ".", "_outfile", ".", "write", "(", "struct", ".", "pack", "(", "'>2h'", ",", "4", ",", "0x0400", ")", ")", "if", "self", ".", "_close", ":", "self", ".", "_outfile", ".", "close", "(", ")" ]
Finalize the GDSII stream library.
[ "Finalize", "the", "GDSII", "stream", "library", "." ]
python
train
edx/XBlock
xblock/mixins.py
https://github.com/edx/XBlock/blob/368bf46e2c0ee69bbb21817f428c4684936e18ee/xblock/mixins.py#L513-L522
def _set_field_if_present(cls, block, name, value, attrs): """Sets the field block.name, if block have such a field.""" if name in block.fields: value = (block.fields[name]).from_string(value) if "none" in attrs and attrs["none"] == "true": setattr(block, name, None) else: setattr(block, name, value) else: logging.warning("XBlock %s does not contain field %s", type(block), name)
[ "def", "_set_field_if_present", "(", "cls", ",", "block", ",", "name", ",", "value", ",", "attrs", ")", ":", "if", "name", "in", "block", ".", "fields", ":", "value", "=", "(", "block", ".", "fields", "[", "name", "]", ")", ".", "from_string", "(", ...
Sets the field block.name, if block have such a field.
[ "Sets", "the", "field", "block", ".", "name", "if", "block", "have", "such", "a", "field", "." ]
python
train
Erotemic/utool
utool/util_dev.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L196-L248
def input_timeout(msg='Waiting for input...', timeout=30): """ FIXME: Function does not work quite right yet. Args: msg (str): timeout (int): Returns: ?: ans References: http://stackoverflow.com/questions/1335507/keyboard-input-with-timeout-in-python http://home.wlu.edu/~levys/software/kbhit.py http://stackoverflow.com/questions/3471461/raw-input-and-timeout/3911560#3911560 Example: >>> # DISABLE_DOCTEST >>> from utool.util_dev import * # NOQA >>> msg = 'Waiting for input...' >>> timeout = 30 >>> ans = input_timeout(msg, timeout) >>> print(ans) """ import sys import select import time ans = None print('You have %d seconds to answer!' % timeout) print(msg) if sys.platform.startswith('win32'): import msvcrt start_time = time.time() instr = '' while True: if msvcrt.kbhit(): chr_ = msvcrt.getche() if ord(chr_) == 13: # enter_key # Accept input ans = instr break elif ord(chr_) >= 32: # space_char # Append to input instr += chr_ ellapsed = time.time() - start_time if ellapsed > timeout: ans = None print('') # needed to move to next line else: rlist, o, e = select.select([sys.stdin], [], [], timeout) if rlist: ans = sys.stdin.readline().strip() return ans
[ "def", "input_timeout", "(", "msg", "=", "'Waiting for input...'", ",", "timeout", "=", "30", ")", ":", "import", "sys", "import", "select", "import", "time", "ans", "=", "None", "print", "(", "'You have %d seconds to answer!'", "%", "timeout", ")", "print", "...
FIXME: Function does not work quite right yet. Args: msg (str): timeout (int): Returns: ?: ans References: http://stackoverflow.com/questions/1335507/keyboard-input-with-timeout-in-python http://home.wlu.edu/~levys/software/kbhit.py http://stackoverflow.com/questions/3471461/raw-input-and-timeout/3911560#3911560 Example: >>> # DISABLE_DOCTEST >>> from utool.util_dev import * # NOQA >>> msg = 'Waiting for input...' >>> timeout = 30 >>> ans = input_timeout(msg, timeout) >>> print(ans)
[ "FIXME", ":", "Function", "does", "not", "work", "quite", "right", "yet", "." ]
python
train
tcalmant/ipopo
pelix/shell/remote.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/shell/remote.py#L565-L587
def _run_interpreter(variables, banner): """ Runs a Python interpreter console and blocks until the user exits it. :param variables: Interpreters variables (locals) :param banner: Start-up banners """ # Script-only imports import code try: import readline import rlcompleter readline.set_completer(rlcompleter.Completer(variables).complete) readline.parse_and_bind("tab: complete") except ImportError: # readline is not available: ignore pass # Start the console shell = code.InteractiveConsole(variables) shell.interact(banner)
[ "def", "_run_interpreter", "(", "variables", ",", "banner", ")", ":", "# Script-only imports", "import", "code", "try", ":", "import", "readline", "import", "rlcompleter", "readline", ".", "set_completer", "(", "rlcompleter", ".", "Completer", "(", "variables", ")...
Runs a Python interpreter console and blocks until the user exits it. :param variables: Interpreters variables (locals) :param banner: Start-up banners
[ "Runs", "a", "Python", "interpreter", "console", "and", "blocks", "until", "the", "user", "exits", "it", "." ]
python
train
choldgraf/download
download/download.py
https://github.com/choldgraf/download/blob/26007bb87751ee35791e30e4dfc54dd088bf15e6/download/download.py#L116-L134
def _convert_url_to_downloadable(url): """Convert a url to the proper style depending on its website.""" if 'drive.google.com' in url: # For future support of google drive file_id = url.split('d/')[1].split('/')[0] base_url = 'https://drive.google.com/uc?export=download&id=' out = '{}{}'.format(base_url, file_id) elif 'dropbox.com' in url: if url.endswith('.png'): out = url + '?dl=1' else: out = url.replace('dl=0', 'dl=1') elif 'github.com' in url: out = url.replace('github.com', 'raw.githubusercontent.com') out = out.replace('blob/', '') else: out = url return out
[ "def", "_convert_url_to_downloadable", "(", "url", ")", ":", "if", "'drive.google.com'", "in", "url", ":", "# For future support of google drive", "file_id", "=", "url", ".", "split", "(", "'d/'", ")", "[", "1", "]", ".", "split", "(", "'/'", ")", "[", "0", ...
Convert a url to the proper style depending on its website.
[ "Convert", "a", "url", "to", "the", "proper", "style", "depending", "on", "its", "website", "." ]
python
train
uw-it-aca/uw-restclients
restclients/dao_implementation/mock.py
https://github.com/uw-it-aca/uw-restclients/blob/e12dcd32bf5296b6ebdf71798031594afb7852cb/restclients/dao_implementation/mock.py#L307-L321
def read_resp_data(service_name, implementation_name, url, response): """ Read the (DELETE, PATCH, POST, PUT) response body and header if exist. """ RR = _mockdata_path_root(service_name, implementation_name) for resource_dir in app_resource_dirs: path = os.path.join(resource_dir['path'], service_name, implementation_name) found_header = _read_resp_header(path, url, response) found_body = _read_resp_body(path, url, response) if found_body or found_header: return response response.status = 404 return response
[ "def", "read_resp_data", "(", "service_name", ",", "implementation_name", ",", "url", ",", "response", ")", ":", "RR", "=", "_mockdata_path_root", "(", "service_name", ",", "implementation_name", ")", "for", "resource_dir", "in", "app_resource_dirs", ":", "path", ...
Read the (DELETE, PATCH, POST, PUT) response body and header if exist.
[ "Read", "the", "(", "DELETE", "PATCH", "POST", "PUT", ")", "response", "body", "and", "header", "if", "exist", "." ]
python
train
mitsei/dlkit
dlkit/json_/repository/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/sessions.py#L622-L652
def get_asset_contents_by_genus_type(self, asset_content_genus_type): """Gets an ``AssetContentList`` corresponding to the given asset content genus ``Type`` which does not include asset contents of types derived from the specified ``Type``. In plenary mode, the returned list contains all known asset contents or an error results. Otherwise, the returned list may contain only those asset contents that are accessible through this session. :param asset_content_genus_type: an asset content genus type :type asset_content_genus_type: ``osid.type.Type`` :return: the returned ``AssetContent list`` :rtype: ``osid.repository.AssetContentList`` :raise: ``NullArgument`` -- ``asset_content_genus_type`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ collection = JSONClientValidated('repository', collection='Asset', runtime=self._runtime) results = collection.find( dict({'assetContents.genusTypeId': {'$in': [str(asset_content_genus_type)]}}, **self._view_filter())) # if a match is not found, NotFound exception will be thrown by find_one, so # the below should always work asset_content_maps = [ac for asset in results for ac in asset['assetContents'] if ac['genusTypeId'] == str(asset_content_genus_type)] return objects.AssetContentList(asset_content_maps, runtime=self._runtime, proxy=self._proxy)
[ "def", "get_asset_contents_by_genus_type", "(", "self", ",", "asset_content_genus_type", ")", ":", "collection", "=", "JSONClientValidated", "(", "'repository'", ",", "collection", "=", "'Asset'", ",", "runtime", "=", "self", ".", "_runtime", ")", "results", "=", ...
Gets an ``AssetContentList`` corresponding to the given asset content genus ``Type`` which does not include asset contents of types derived from the specified ``Type``. In plenary mode, the returned list contains all known asset contents or an error results. Otherwise, the returned list may contain only those asset contents that are accessible through this session. :param asset_content_genus_type: an asset content genus type :type asset_content_genus_type: ``osid.type.Type`` :return: the returned ``AssetContent list`` :rtype: ``osid.repository.AssetContentList`` :raise: ``NullArgument`` -- ``asset_content_genus_type`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "an", "AssetContentList", "corresponding", "to", "the", "given", "asset", "content", "genus", "Type", "which", "does", "not", "include", "asset", "contents", "of", "types", "derived", "from", "the", "specified", "Type", "." ]
python
train
peterldowns/djoauth2
example/djoauth2example/api/views.py
https://github.com/peterldowns/djoauth2/blob/151c7619d1d7a91d720397cfecf3a29fcc9747a9/example/djoauth2example/api/views.py#L12-L27
def user_info(access_token, request): """ Return basic information about a user. Limited to OAuth clients that have receieved authorization to the 'user_info' scope. """ user = access_token.user data = { 'username': user.username, 'first_name': user.first_name, 'last_name': user.last_name, 'email': user.email} return HttpResponse(content=json.dumps(data), content_type='application/json', status=200)
[ "def", "user_info", "(", "access_token", ",", "request", ")", ":", "user", "=", "access_token", ".", "user", "data", "=", "{", "'username'", ":", "user", ".", "username", ",", "'first_name'", ":", "user", ".", "first_name", ",", "'last_name'", ":", "user",...
Return basic information about a user. Limited to OAuth clients that have receieved authorization to the 'user_info' scope.
[ "Return", "basic", "information", "about", "a", "user", "." ]
python
train
mila/pyoo
pyoo.py
https://github.com/mila/pyoo/blob/1e024999f608c87ea72cd443e39c89eb0ba3cc62/pyoo.py#L1538-L1546
def __set_formulas(self, formulas): """ Sets formulas in this cell range from an iterable. Any cell values can be set using this method. Actual formulas must start with an equal sign. """ array = tuple((self._clean_formula(v),) for v in formulas) self._get_target().setFormulaArray(array)
[ "def", "__set_formulas", "(", "self", ",", "formulas", ")", ":", "array", "=", "tuple", "(", "(", "self", ".", "_clean_formula", "(", "v", ")", ",", ")", "for", "v", "in", "formulas", ")", "self", ".", "_get_target", "(", ")", ".", "setFormulaArray", ...
Sets formulas in this cell range from an iterable. Any cell values can be set using this method. Actual formulas must start with an equal sign.
[ "Sets", "formulas", "in", "this", "cell", "range", "from", "an", "iterable", "." ]
python
train
PonteIneptique/flask-github-proxy
flask_github_proxy/__init__.py
https://github.com/PonteIneptique/flask-github-proxy/blob/f0a60639342f7c0834360dc12a099bfc3a06d939/flask_github_proxy/__init__.py#L113-L141
def request(self, method, url, **kwargs): """ Unified method to make request to the Github API :param method: HTTP Method to use :param url: URL to reach :param kwargs: dictionary of arguments (params for URL parameters, data for post/put data) :return: Response """ if "data" in kwargs: kwargs["data"] = json.dumps(kwargs["data"]) kwargs["headers"] = { 'Content-Type': 'application/json', 'Authorization': 'token %s' % self.__token__, } req = make_request( method, url, **kwargs ) self.logger.debug( "Request::{}::{}".format(method, url), extra={ "request": kwargs, "response": {"headers": req.headers, "code": req.status_code, "data": req.content} } ) return req
[ "def", "request", "(", "self", ",", "method", ",", "url", ",", "*", "*", "kwargs", ")", ":", "if", "\"data\"", "in", "kwargs", ":", "kwargs", "[", "\"data\"", "]", "=", "json", ".", "dumps", "(", "kwargs", "[", "\"data\"", "]", ")", "kwargs", "[", ...
Unified method to make request to the Github API :param method: HTTP Method to use :param url: URL to reach :param kwargs: dictionary of arguments (params for URL parameters, data for post/put data) :return: Response
[ "Unified", "method", "to", "make", "request", "to", "the", "Github", "API" ]
python
train
jsommers/switchyard
switchyard/lib/socket/socketemu.py
https://github.com/jsommers/switchyard/blob/fdcb3869c937dcedbd6ea7a7822ebd412bf1e2b0/switchyard/lib/socket/socketemu.py#L431-L439
def send(self, data, flags=0): ''' Send data on the socket. A call to connect() must have been previously made for this call to succeed. Flags is currently ignored. ''' if self._remote_addr == (None,None): raise sockerr("ENOTCONN: socket not connected") return self._send(data, self._flowaddr())
[ "def", "send", "(", "self", ",", "data", ",", "flags", "=", "0", ")", ":", "if", "self", ".", "_remote_addr", "==", "(", "None", ",", "None", ")", ":", "raise", "sockerr", "(", "\"ENOTCONN: socket not connected\"", ")", "return", "self", ".", "_send", ...
Send data on the socket. A call to connect() must have been previously made for this call to succeed. Flags is currently ignored.
[ "Send", "data", "on", "the", "socket", ".", "A", "call", "to", "connect", "()", "must", "have", "been", "previously", "made", "for", "this", "call", "to", "succeed", ".", "Flags", "is", "currently", "ignored", "." ]
python
train
nanoporetech/ont_fast5_api
ont_fast5_api/helpers.py
https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/helpers.py#L26-L41
def compare_hdf_files(file1, file2): """ Compare two hdf files. :param file1: First file to compare. :param file2: Second file to compare. :returns True if they are the same. """ data1 = FileToDict() data2 = FileToDict() scanner1 = data1.scan scanner2 = data2.scan with h5py.File(file1, 'r') as fh1: fh1.visititems(scanner1) with h5py.File(file2, 'r') as fh2: fh2.visititems(scanner2) return data1.contents == data2.contents
[ "def", "compare_hdf_files", "(", "file1", ",", "file2", ")", ":", "data1", "=", "FileToDict", "(", ")", "data2", "=", "FileToDict", "(", ")", "scanner1", "=", "data1", ".", "scan", "scanner2", "=", "data2", ".", "scan", "with", "h5py", ".", "File", "("...
Compare two hdf files. :param file1: First file to compare. :param file2: Second file to compare. :returns True if they are the same.
[ "Compare", "two", "hdf", "files", ".", ":", "param", "file1", ":", "First", "file", "to", "compare", ".", ":", "param", "file2", ":", "Second", "file", "to", "compare", "." ]
python
train
dereneaton/ipyrad
ipyrad/plotting/coverageplots.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/plotting/coverageplots.py#L17-L86
def depthplot(data, samples=None, dims=(None,None), canvas=(None,None), xmax=50, log=False, outprefix=None, use_maxdepth=False): """ plots histogram of coverages across clusters""" ## select samples to be plotted, requires depths info if not samples: samples = data.samples.keys() samples.sort() subsamples = OrderedDict([(i, data.samples[i]) for i in samples]) ## get canvas dimensions based on n-samples if any(dims): ## user-supplied dimensions (...) print("userdims") else: if len(subsamples) <= 4: ## set dimension to N samples dims = (1, len(subsamples)) else: dims = (len(subsamples)/4, 4) ## create canvas if any(canvas): print("usercanvas") canvas = toyplot.Canvas(width=canvas[0], height=canvas[1]) else: canvas = toyplot.Canvas(width=200*dims[1], height=150*dims[0]) ## get all of the data arrays for panel, sample in enumerate(subsamples): ## statistical called bins statdat = subsamples[sample].depths statdat = statdat[statdat >= data.paramsdict["mindepth_statistical"]] if use_maxdepth: statdat = {i:j for (i, j) in statdat if \ i < data.paramsdict["maxdepth"]} sdat = np.histogram(statdat, range(50)) ## majrule called bins statdat = subsamples[sample].depths statdat = statdat[statdat < data.paramsdict["mindepth_statistical"]] statdat = statdat[statdat >= data.paramsdict["mindepth_majrule"]] if use_maxdepth: statdat = statdat[statdat < data.paramsdict["maxdepth"]] mdat = np.histogram(statdat, range(50)) ## excluded bins tots = data.samples[sample].depths tots = tots[tots < data.paramsdict["mindepth_majrule"]] if use_maxdepth: tots = tots[tots < data.paramsdict["maxdepth"]] edat = np.histogram(tots, range(50)) ## fill in each panel of canvas with a sample axes = canvas.cartesian(grid=(dims[0], dims[1], panel), gutter=25) axes.x.domain.xmax = xmax axes.label.text = sample if log: axes.y.scale = "log" # heights = np.column_stack((sdat,mdat,edat)) axes.bars(sdat) axes.bars(edat) axes.bars(mdat) ## return objects to be saved... if outprefix: toyplot.html.render(canvas, fobj=outprefix+".html") toyplot.svg.render(canvas, fobj=outprefix+".svg")
[ "def", "depthplot", "(", "data", ",", "samples", "=", "None", ",", "dims", "=", "(", "None", ",", "None", ")", ",", "canvas", "=", "(", "None", ",", "None", ")", ",", "xmax", "=", "50", ",", "log", "=", "False", ",", "outprefix", "=", "None", "...
plots histogram of coverages across clusters
[ "plots", "histogram", "of", "coverages", "across", "clusters" ]
python
valid
aetros/aetros-cli
aetros/backend.py
https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/backend.py#L65-L101
def Popen(*args, **kwargs): """ Executes a command using subprocess.Popen and redirects output to AETROS and stdout. Parses stdout as well for stdout API calls. Use read_line argument to read stdout of command's stdout line by line. Use returned process stdin to communicate with the command. :return: subprocess.Popen """ read_line = None if 'read_line' in kwargs: read_line = kwargs['read_line'] del kwargs['read_line'] p = subprocess.Popen(*args, **kwargs) wait_stdout = None wait_stderr = None if p.stdout: wait_stdout = sys.stdout.attach(p.stdout, read_line=read_line) if p.stderr: wait_stderr = sys.stderr.attach(p.stderr) original_wait = p.wait def wait(): original_wait() if wait_stdout: wait_stdout() if wait_stderr: wait_stderr() p.wait = wait return p
[ "def", "Popen", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "read_line", "=", "None", "if", "'read_line'", "in", "kwargs", ":", "read_line", "=", "kwargs", "[", "'read_line'", "]", "del", "kwargs", "[", "'read_line'", "]", "p", "=", "subproces...
Executes a command using subprocess.Popen and redirects output to AETROS and stdout. Parses stdout as well for stdout API calls. Use read_line argument to read stdout of command's stdout line by line. Use returned process stdin to communicate with the command. :return: subprocess.Popen
[ "Executes", "a", "command", "using", "subprocess", ".", "Popen", "and", "redirects", "output", "to", "AETROS", "and", "stdout", ".", "Parses", "stdout", "as", "well", "for", "stdout", "API", "calls", "." ]
python
train
linkhub-sdk/popbill.py
popbill/htTaxinvoiceService.py
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/htTaxinvoiceService.py#L100-L137
def search(self, CorpNum, JobID, Type, TaxType, PurposeType, TaxRegIDType, TaxRegIDYN, TaxRegID, Page, PerPage, Order, UserID=None): """ 수집 결과 조회 args CorpNum : 팝빌회원 사업자번호 JobID : 작업아이디 Type : 문서형태 배열, N-일반전자세금계산서, M-수정전자세금계산서 TaxType : 과세형태 배열, T-과세, N-면세, Z-영세 PurposeType : 영수/청구, R-영수, C-청구, N-없음 TaxRegIDType : 종사업장번호 사업자유형, S-공급자, B-공급받는자, T-수탁자 TaxRegIDYN : 종사업장번호 유무, 공백-전체조회, 0-종사업장번호 없음, 1-종사업장번호 있음 TaxRegID : 종사업장번호, 콤마(",")로 구분 하여 구성 ex) '0001,0002' Page : 페이지 번호 PerPage : 페이지당 목록 개수, 최대 1000개 Order : 정렬 방향, D-내림차순, A-오름차순 UserID : 팝빌회원 아이디 return 수집 결과 정보 raise PopbillException """ if JobID == None or len(JobID) != 18: raise PopbillException(-99999999, "작업아이디(jobID)가 올바르지 않습니다.") uri = '/HomeTax/Taxinvoice/' + JobID uri += '?Type=' + ','.join(Type) uri += '&TaxType=' + ','.join(TaxType) uri += '&PurposeType=' + ','.join(PurposeType) uri += '&TaxRegIDType=' + TaxRegIDType uri += '&TaxRegID=' + TaxRegID uri += '&Page=' + str(Page) uri += '&PerPage=' + str(PerPage) uri += '&Order=' + Order if TaxRegIDYN != '': uri += '&TaxRegIDYN=' + TaxRegIDYN return self._httpget(uri, CorpNum, UserID)
[ "def", "search", "(", "self", ",", "CorpNum", ",", "JobID", ",", "Type", ",", "TaxType", ",", "PurposeType", ",", "TaxRegIDType", ",", "TaxRegIDYN", ",", "TaxRegID", ",", "Page", ",", "PerPage", ",", "Order", ",", "UserID", "=", "None", ")", ":", "if",...
수집 결과 조회 args CorpNum : 팝빌회원 사업자번호 JobID : 작업아이디 Type : 문서형태 배열, N-일반전자세금계산서, M-수정전자세금계산서 TaxType : 과세형태 배열, T-과세, N-면세, Z-영세 PurposeType : 영수/청구, R-영수, C-청구, N-없음 TaxRegIDType : 종사업장번호 사업자유형, S-공급자, B-공급받는자, T-수탁자 TaxRegIDYN : 종사업장번호 유무, 공백-전체조회, 0-종사업장번호 없음, 1-종사업장번호 있음 TaxRegID : 종사업장번호, 콤마(",")로 구분 하여 구성 ex) '0001,0002' Page : 페이지 번호 PerPage : 페이지당 목록 개수, 최대 1000개 Order : 정렬 방향, D-내림차순, A-오름차순 UserID : 팝빌회원 아이디 return 수집 결과 정보 raise PopbillException
[ "수집", "결과", "조회", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "JobID", ":", "작업아이디", "Type", ":", "문서형태", "배열", "N", "-", "일반전자세금계산서", "M", "-", "수정전자세금계산서", "TaxType", ":", "과세형태", "배열", "T", "-", "과세", "N", "-", "면세", "Z", "-", "영세", "PurposeType", ...
python
train
OpenKMIP/PyKMIP
kmip/pie/objects.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/pie/objects.py#L424-L493
def key_wrapping_data(self): """ Retrieve all of the relevant key wrapping data fields and return them as a dictionary. """ key_wrapping_data = {} encryption_key_info = { 'unique_identifier': self._kdw_eki_unique_identifier, 'cryptographic_parameters': { 'block_cipher_mode': self._kdw_eki_cp_block_cipher_mode, 'padding_method': self._kdw_eki_cp_padding_method, 'hashing_algorithm': self._kdw_eki_cp_hashing_algorithm, 'key_role_type': self._kdw_eki_cp_key_role_type, 'digital_signature_algorithm': self._kdw_eki_cp_digital_signature_algorithm, 'cryptographic_algorithm': self._kdw_eki_cp_cryptographic_algorithm, 'random_iv': self._kdw_eki_cp_random_iv, 'iv_length': self._kdw_eki_cp_iv_length, 'tag_length': self._kdw_eki_cp_tag_length, 'fixed_field_length': self._kdw_eki_cp_fixed_field_length, 'invocation_field_length': self._kdw_eki_cp_invocation_field_length, 'counter_length': self._kdw_eki_cp_counter_length, 'initial_counter_value': self._kdw_eki_cp_initial_counter_value } } if not any(encryption_key_info['cryptographic_parameters'].values()): encryption_key_info['cryptographic_parameters'] = {} if not any(encryption_key_info.values()): encryption_key_info = {} mac_sign_key_info = { 'unique_identifier': self._kdw_mski_unique_identifier, 'cryptographic_parameters': { 'block_cipher_mode': self._kdw_mski_cp_block_cipher_mode, 'padding_method': self._kdw_mski_cp_padding_method, 'hashing_algorithm': self._kdw_mski_cp_hashing_algorithm, 'key_role_type': self._kdw_mski_cp_key_role_type, 'digital_signature_algorithm': self._kdw_mski_cp_digital_signature_algorithm, 'cryptographic_algorithm': self._kdw_mski_cp_cryptographic_algorithm, 'random_iv': self._kdw_mski_cp_random_iv, 'iv_length': self._kdw_mski_cp_iv_length, 'tag_length': self._kdw_mski_cp_tag_length, 'fixed_field_length': self._kdw_mski_cp_fixed_field_length, 'invocation_field_length': self._kdw_mski_cp_invocation_field_length, 'counter_length': self._kdw_mski_cp_counter_length, 'initial_counter_value': self._kdw_mski_cp_initial_counter_value } } if not any(mac_sign_key_info['cryptographic_parameters'].values()): mac_sign_key_info['cryptographic_parameters'] = {} if not any(mac_sign_key_info.values()): mac_sign_key_info = {} key_wrapping_data['wrapping_method'] = self._kdw_wrapping_method key_wrapping_data['encryption_key_information'] = encryption_key_info key_wrapping_data['mac_signature_key_information'] = mac_sign_key_info key_wrapping_data['mac_signature'] = self._kdw_mac_signature key_wrapping_data['iv_counter_nonce'] = self._kdw_iv_counter_nonce key_wrapping_data['encoding_option'] = self._kdw_encoding_option if not any(key_wrapping_data.values()): key_wrapping_data = {} return key_wrapping_data
[ "def", "key_wrapping_data", "(", "self", ")", ":", "key_wrapping_data", "=", "{", "}", "encryption_key_info", "=", "{", "'unique_identifier'", ":", "self", ".", "_kdw_eki_unique_identifier", ",", "'cryptographic_parameters'", ":", "{", "'block_cipher_mode'", ":", "sel...
Retrieve all of the relevant key wrapping data fields and return them as a dictionary.
[ "Retrieve", "all", "of", "the", "relevant", "key", "wrapping", "data", "fields", "and", "return", "them", "as", "a", "dictionary", "." ]
python
test
taxjar/taxjar-python
taxjar/client.py
https://github.com/taxjar/taxjar-python/blob/be9b30d7dc968d24e066c7c133849fee180f8d95/taxjar/client.py#L121-L124
def validate_address(self, address_deets): """Validates a customer address and returns back a collection of address matches.""" request = self._post('addresses/validate', address_deets) return self.responder(request)
[ "def", "validate_address", "(", "self", ",", "address_deets", ")", ":", "request", "=", "self", ".", "_post", "(", "'addresses/validate'", ",", "address_deets", ")", "return", "self", ".", "responder", "(", "request", ")" ]
Validates a customer address and returns back a collection of address matches.
[ "Validates", "a", "customer", "address", "and", "returns", "back", "a", "collection", "of", "address", "matches", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/isis_state/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/isis_state/__init__.py#L221-L244
def _set_ipv4_routes(self, v, load=False): """ Setter method for ipv4_routes, mapped from YANG variable /isis_state/ipv4_routes (container) If this variable is read-only (config: false) in the source YANG file, then _set_ipv4_routes is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ipv4_routes() directly. YANG Description: ISIS IPv4 Route Table """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=ipv4_routes.ipv4_routes, is_container='container', presence=False, yang_name="ipv4-routes", rest_name="ipv4-routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-ipv4-route-table', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """ipv4_routes must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=ipv4_routes.ipv4_routes, is_container='container', presence=False, yang_name="ipv4-routes", rest_name="ipv4-routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-ipv4-route-table', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""", }) self.__ipv4_routes = t if hasattr(self, '_set'): self._set()
[ "def", "_set_ipv4_routes", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "ba...
Setter method for ipv4_routes, mapped from YANG variable /isis_state/ipv4_routes (container) If this variable is read-only (config: false) in the source YANG file, then _set_ipv4_routes is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ipv4_routes() directly. YANG Description: ISIS IPv4 Route Table
[ "Setter", "method", "for", "ipv4_routes", "mapped", "from", "YANG", "variable", "/", "isis_state", "/", "ipv4_routes", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YAN...
python
train
bachiraoun/pyrep
OldRepository.py
https://github.com/bachiraoun/pyrep/blob/0449bf2fad3e3e8dda855d4686a8869efeefd433/OldRepository.py#L999-L1060
def create_package(self, path=None, name=None, mode=None): """ Create a tar file package of all the repository files and directories. Only files and directories that are stored in the repository info are stored in the package tar file. **N.B. On some systems packaging requires root permissions.** :Parameters: #. path (None, string): The real absolute path where to create the package. If None, it will be created in the same directory as the repository If '.' or an empty string is passed, the current working directory will be used. #. name (None, string): The name to give to the package file If None, the package directory name will be used with the appropriate extension added. #. mode (None, string): The writing mode of the tarfile. If None, automatically the best compression mode will be chose. Available modes are ('w', 'w:', 'w:gz', 'w:bz2') """ # check mode assert mode in (None, 'w', 'w:', 'w:gz', 'w:bz2'), 'unkown archive mode %s'%str(mode) if mode is None: mode = 'w:bz2' mode = 'w:' # get root if path is None: root = os.path.split(self.__path)[0] elif path.strip() in ('','.'): root = os.getcwd() else: root = os.path.realpath( os.path.expanduser(path) ) assert os.path.isdir(root), 'absolute path %s is not a valid directory'%path # get name if name is None: ext = mode.split(":") if len(ext) == 2: if len(ext[1]): ext = "."+ext[1] else: ext = '.tar' else: ext = '.tar' name = os.path.split(self.__path)[1]+ext # save repository self.save() # create tar file tarfilePath = os.path.join(root, name) try: tarHandler = tarfile.TarFile.open(tarfilePath, mode=mode) except Exception as e: raise Exception("Unable to create package (%s)"%e) # walk directory and create empty directories for directory in sorted(list(self.walk_directories_relative_path())): t = tarfile.TarInfo( directory ) t.type = tarfile.DIRTYPE tarHandler.addfile(t) # walk files and add to tar for file in self.walk_files_relative_path(): tarHandler.add(os.path.join(self.__path,file), arcname=file) # save repository .pyrepinfo tarHandler.add(os.path.join(self.__path,".pyrepinfo"), arcname=".pyrepinfo") # close tar file tarHandler.close()
[ "def", "create_package", "(", "self", ",", "path", "=", "None", ",", "name", "=", "None", ",", "mode", "=", "None", ")", ":", "# check mode", "assert", "mode", "in", "(", "None", ",", "'w'", ",", "'w:'", ",", "'w:gz'", ",", "'w:bz2'", ")", ",", "'u...
Create a tar file package of all the repository files and directories. Only files and directories that are stored in the repository info are stored in the package tar file. **N.B. On some systems packaging requires root permissions.** :Parameters: #. path (None, string): The real absolute path where to create the package. If None, it will be created in the same directory as the repository If '.' or an empty string is passed, the current working directory will be used. #. name (None, string): The name to give to the package file If None, the package directory name will be used with the appropriate extension added. #. mode (None, string): The writing mode of the tarfile. If None, automatically the best compression mode will be chose. Available modes are ('w', 'w:', 'w:gz', 'w:bz2')
[ "Create", "a", "tar", "file", "package", "of", "all", "the", "repository", "files", "and", "directories", ".", "Only", "files", "and", "directories", "that", "are", "stored", "in", "the", "repository", "info", "are", "stored", "in", "the", "package", "tar", ...
python
valid
saltstack/salt
salt/states/highstate_doc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/highstate_doc.py#L12-L45
def note(name, source=None, contents=None, **kwargs): ''' Add content to a document generated using `highstate_doc.render`. This state does not preform any tasks on the host. It only is used in highstate_doc lowstate proccessers to include extra documents. .. code-block:: yaml {{sls}} example note: highstate_doc.note: - name: example note - require_in: - pkg: somepackage - contents: | example `highstate_doc.note` ------------------ This state does not do anything to the system! It is only used by a `proccesser` you can use `requisites` and `order` to move your docs around the rendered file. .. this message appare aboce the `pkg: somepackage` state. - source: salt://{{tpldir}}/also_include_a_file.md {{sls}} extra help: highstate_doc.note: - name: example - order: 0 - source: salt://{{tpldir}}/HELP.md ''' comment = '' if source: comment += 'include file: {0}\n'.format(source) if contents and len(contents) < 200: comment += contents return {'name': name, 'result': True, 'comment': comment, 'changes': {}}
[ "def", "note", "(", "name", ",", "source", "=", "None", ",", "contents", "=", "None", ",", "*", "*", "kwargs", ")", ":", "comment", "=", "''", "if", "source", ":", "comment", "+=", "'include file: {0}\\n'", ".", "format", "(", "source", ")", "if", "c...
Add content to a document generated using `highstate_doc.render`. This state does not preform any tasks on the host. It only is used in highstate_doc lowstate proccessers to include extra documents. .. code-block:: yaml {{sls}} example note: highstate_doc.note: - name: example note - require_in: - pkg: somepackage - contents: | example `highstate_doc.note` ------------------ This state does not do anything to the system! It is only used by a `proccesser` you can use `requisites` and `order` to move your docs around the rendered file. .. this message appare aboce the `pkg: somepackage` state. - source: salt://{{tpldir}}/also_include_a_file.md {{sls}} extra help: highstate_doc.note: - name: example - order: 0 - source: salt://{{tpldir}}/HELP.md
[ "Add", "content", "to", "a", "document", "generated", "using", "highstate_doc", ".", "render", "." ]
python
train
jeroyang/cateye
cateye/cateye.py
https://github.com/jeroyang/cateye/blob/8f181d6428d113d2928e3eb31703705ce0779eae/cateye/cateye.py#L278-L308
def search(index, query, snippet_folder=SNIPPET_FOLDER, term_freq=term_freq): """ The highest level of search function """ fallback_log = [] code_list = [] tokens = tokenize(query) tokens, abbr_log = abbr_expand(tokens) tokens, correct_log = correct(tokens, term_freq) tokens = lemmatize(tokens) tokens = filterout(tokens) while len(tokens) > 0: # Fallback mechanism code_list = fetch(index, tokens) if len(code_list) > 0: break tokens.sort(key=lambda tk:len(index.get(tk, []))) remove = tokens.pop() fallback_log.append(remove) snippets = get_snippets(code_list, snippet_folder) hints, hint_scores = get_hints(code_list, current_tokens=tokens) response = list(zip(code_list, snippets)) response.sort(key=result_sort_key, reverse=True) # Count search_frequency if len(response) <= MAX_RESULT: # the respone can be shown in one page search_freq.update(code_list) with open(SEARCH_FREQ_JSON, 'w') as f: json.dump(search_freq, f, indent=2) return response, tokens, hints, hint_scores, \ abbr_log, correct_log, fallback_log
[ "def", "search", "(", "index", ",", "query", ",", "snippet_folder", "=", "SNIPPET_FOLDER", ",", "term_freq", "=", "term_freq", ")", ":", "fallback_log", "=", "[", "]", "code_list", "=", "[", "]", "tokens", "=", "tokenize", "(", "query", ")", "tokens", ",...
The highest level of search function
[ "The", "highest", "level", "of", "search", "function" ]
python
train
project-ncl/pnc-cli
pnc_cli/swagger_client/models/build_set_status_changed_event.py
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/swagger_client/models/build_set_status_changed_event.py#L250-L264
def old_status(self, old_status): """ Sets the old_status of this BuildSetStatusChangedEvent. :param old_status: The old_status of this BuildSetStatusChangedEvent. :type: str """ allowed_values = ["NEW", "DONE", "REJECTED"] if old_status not in allowed_values: raise ValueError( "Invalid value for `old_status` ({0}), must be one of {1}" .format(old_status, allowed_values) ) self._old_status = old_status
[ "def", "old_status", "(", "self", ",", "old_status", ")", ":", "allowed_values", "=", "[", "\"NEW\"", ",", "\"DONE\"", ",", "\"REJECTED\"", "]", "if", "old_status", "not", "in", "allowed_values", ":", "raise", "ValueError", "(", "\"Invalid value for `old_status` (...
Sets the old_status of this BuildSetStatusChangedEvent. :param old_status: The old_status of this BuildSetStatusChangedEvent. :type: str
[ "Sets", "the", "old_status", "of", "this", "BuildSetStatusChangedEvent", "." ]
python
train
pandas-dev/pandas
pandas/io/stata.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/stata.py#L367-L461
def _datetime_to_stata_elapsed_vec(dates, fmt): """ Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime Parameters ---------- dates : Series Series or array containing datetime.datetime or datetime64[ns] to convert to the Stata Internal Format given by fmt fmt : str The format to convert to. Can be, tc, td, tw, tm, tq, th, ty """ index = dates.index NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000 US_PER_DAY = NS_PER_DAY / 1000 def parse_dates_safe(dates, delta=False, year=False, days=False): d = {} if is_datetime64_dtype(dates.values): if delta: delta = dates - stata_epoch d['delta'] = delta.values.astype( np.int64) // 1000 # microseconds if days or year: dates = DatetimeIndex(dates) d['year'], d['month'] = dates.year, dates.month if days: days = (dates.astype(np.int64) - to_datetime(d['year'], format='%Y').astype(np.int64)) d['days'] = days // NS_PER_DAY elif infer_dtype(dates, skipna=False) == 'datetime': if delta: delta = dates.values - stata_epoch f = lambda x: \ US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds v = np.vectorize(f) d['delta'] = v(delta) if year: year_month = dates.apply(lambda x: 100 * x.year + x.month) d['year'] = year_month.values // 100 d['month'] = (year_month.values - d['year'] * 100) if days: f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days v = np.vectorize(f) d['days'] = v(dates) else: raise ValueError('Columns containing dates must contain either ' 'datetime64, datetime.datetime or null values.') return DataFrame(d, index=index) bad_loc = isna(dates) index = dates.index if bad_loc.any(): dates = Series(dates) if is_datetime64_dtype(dates): dates[bad_loc] = to_datetime(stata_epoch) else: dates[bad_loc] = stata_epoch if fmt in ["%tc", "tc"]: d = parse_dates_safe(dates, delta=True) conv_dates = d.delta / 1000 elif fmt in ["%tC", "tC"]: warnings.warn("Stata Internal Format tC not supported.") conv_dates = dates elif fmt in ["%td", "td"]: d = parse_dates_safe(dates, delta=True) conv_dates = d.delta // US_PER_DAY elif fmt in ["%tw", "tw"]: d = parse_dates_safe(dates, year=True, days=True) conv_dates = (52 * (d.year - stata_epoch.year) + d.days // 7) elif fmt in ["%tm", "tm"]: d = parse_dates_safe(dates, year=True) conv_dates = (12 * (d.year - stata_epoch.year) + d.month - 1) elif fmt in ["%tq", "tq"]: d = parse_dates_safe(dates, year=True) conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3 elif fmt in ["%th", "th"]: d = parse_dates_safe(dates, year=True) conv_dates = (2 * (d.year - stata_epoch.year) + (d.month > 6).astype(np.int)) elif fmt in ["%ty", "ty"]: d = parse_dates_safe(dates, year=True) conv_dates = d.year else: raise ValueError( "Format {fmt} is not a known Stata date format".format(fmt=fmt)) conv_dates = Series(conv_dates, dtype=np.float64) missing_value = struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0] conv_dates[bad_loc] = missing_value return Series(conv_dates, index=index)
[ "def", "_datetime_to_stata_elapsed_vec", "(", "dates", ",", "fmt", ")", ":", "index", "=", "dates", ".", "index", "NS_PER_DAY", "=", "24", "*", "3600", "*", "1000", "*", "1000", "*", "1000", "US_PER_DAY", "=", "NS_PER_DAY", "/", "1000", "def", "parse_dates...
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime Parameters ---------- dates : Series Series or array containing datetime.datetime or datetime64[ns] to convert to the Stata Internal Format given by fmt fmt : str The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
[ "Convert", "from", "datetime", "to", "SIF", ".", "http", ":", "//", "www", ".", "stata", ".", "com", "/", "help", ".", "cgi?datetime" ]
python
train
saltstack/salt
salt/minion.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L847-L880
def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ).compile_pillar() self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.serializers = salt.loader.serializers(self.opts) self.returners = salt.loader.returners(self.opts, self.functions) self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None) # TODO: remove self.function_errors = {} # Keep the funcs clean self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers) self.rend = salt.loader.render(self.opts, self.functions) # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules self.executors = salt.loader.executors(self.opts)
[ "def", "gen_modules", "(", "self", ",", "initial_load", "=", "False", ")", ":", "self", ".", "opts", "[", "'pillar'", "]", "=", "salt", ".", "pillar", ".", "get_pillar", "(", "self", ".", "opts", ",", "self", ".", "opts", "[", "'grains'", "]", ",", ...
Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules
[ "Tell", "the", "minion", "to", "reload", "the", "execution", "modules" ]
python
train
crs4/pydoop
pydoop/hadut.py
https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hadut.py#L512-L520
def set_exe(self, pipes_code): """ Dump launcher code to the distributed file system. """ if not self.output: raise RuntimeError("no output directory, can't create launcher") parent = hdfs.path.dirname(hdfs.path.abspath(self.output.rstrip("/"))) self.exe = hdfs.path.join(parent, utils.make_random_str()) hdfs.dump(pipes_code, self.exe)
[ "def", "set_exe", "(", "self", ",", "pipes_code", ")", ":", "if", "not", "self", ".", "output", ":", "raise", "RuntimeError", "(", "\"no output directory, can't create launcher\"", ")", "parent", "=", "hdfs", ".", "path", ".", "dirname", "(", "hdfs", ".", "p...
Dump launcher code to the distributed file system.
[ "Dump", "launcher", "code", "to", "the", "distributed", "file", "system", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/process.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/process.py#L1355-L1395
def search(self, pattern, minAddr = None, maxAddr = None): """ Search for the given pattern within the process memory. @type pattern: str, compat.unicode or L{Pattern} @param pattern: Pattern to search for. It may be a byte string, a Unicode string, or an instance of L{Pattern}. The following L{Pattern} subclasses are provided by WinAppDbg: - L{BytePattern} - L{TextPattern} - L{RegExpPattern} - L{HexPattern} You can also write your own subclass of L{Pattern} for customized searches. @type minAddr: int @param minAddr: (Optional) Start the search at this memory address. @type maxAddr: int @param maxAddr: (Optional) Stop the search at this memory address. @rtype: iterator of tuple( int, int, str ) @return: An iterator of tuples. Each tuple contains the following: - The memory address where the pattern was found. - The size of the data that matches the pattern. - The data that matches the pattern. @raise WindowsError: An error occurred when querying or reading the process memory. """ if isinstance(pattern, str): return self.search_bytes(pattern, minAddr, maxAddr) if isinstance(pattern, compat.unicode): return self.search_bytes(pattern.encode("utf-16le"), minAddr, maxAddr) if isinstance(pattern, Pattern): return Search.search_process(self, pattern, minAddr, maxAddr) raise TypeError("Unknown pattern type: %r" % type(pattern))
[ "def", "search", "(", "self", ",", "pattern", ",", "minAddr", "=", "None", ",", "maxAddr", "=", "None", ")", ":", "if", "isinstance", "(", "pattern", ",", "str", ")", ":", "return", "self", ".", "search_bytes", "(", "pattern", ",", "minAddr", ",", "m...
Search for the given pattern within the process memory. @type pattern: str, compat.unicode or L{Pattern} @param pattern: Pattern to search for. It may be a byte string, a Unicode string, or an instance of L{Pattern}. The following L{Pattern} subclasses are provided by WinAppDbg: - L{BytePattern} - L{TextPattern} - L{RegExpPattern} - L{HexPattern} You can also write your own subclass of L{Pattern} for customized searches. @type minAddr: int @param minAddr: (Optional) Start the search at this memory address. @type maxAddr: int @param maxAddr: (Optional) Stop the search at this memory address. @rtype: iterator of tuple( int, int, str ) @return: An iterator of tuples. Each tuple contains the following: - The memory address where the pattern was found. - The size of the data that matches the pattern. - The data that matches the pattern. @raise WindowsError: An error occurred when querying or reading the process memory.
[ "Search", "for", "the", "given", "pattern", "within", "the", "process", "memory", "." ]
python
train
mabuchilab/QNET
src/qnet/algebra/core/abstract_algebra.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/abstract_algebra.py#L262-L295
def show_rules(cls, *names, attr=None): """Print algebraic rules used by :class:`create` Print a summary of the algebraic rules with the given names, or all rules if not names a given. Args: names (str): Names of rules to show attr (None or str): Name of the class attribute from which to get the rules. Cf. :meth:`add_rule`. Raises: AttributeError: If invalid `attr` """ from qnet.printing import srepr try: if attr is None: attr = cls._rules_attr() rules = getattr(cls, attr) except TypeError: rules = {} for (name, rule) in rules.items(): if len(names) > 0 and name not in names: continue pat, repl = rule print(name) print(" PATTERN:") print(textwrap.indent( textwrap.dedent(srepr(pat, indented=True)), prefix=" "*8)) print(" REPLACEMENT:") print(textwrap.indent( textwrap.dedent(inspect.getsource(repl).rstrip()), prefix=" "*8))
[ "def", "show_rules", "(", "cls", ",", "*", "names", ",", "attr", "=", "None", ")", ":", "from", "qnet", ".", "printing", "import", "srepr", "try", ":", "if", "attr", "is", "None", ":", "attr", "=", "cls", ".", "_rules_attr", "(", ")", "rules", "=",...
Print algebraic rules used by :class:`create` Print a summary of the algebraic rules with the given names, or all rules if not names a given. Args: names (str): Names of rules to show attr (None or str): Name of the class attribute from which to get the rules. Cf. :meth:`add_rule`. Raises: AttributeError: If invalid `attr`
[ "Print", "algebraic", "rules", "used", "by", ":", "class", ":", "create" ]
python
train
ARMmbed/icetea
icetea_lib/tools/file/SessionFiles.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/tools/file/SessionFiles.py#L218-L228
def _ends_with(self, string_to_edit, end): # pylint: disable=no-self-use """ Check if string ends with characters in end, if not merge end to string. :param string_to_edit: string to check and edit. :param end: str :return: string_to_edit or string_to_edit + end """ if not string_to_edit.endswith(end): return string_to_edit + end return string_to_edit
[ "def", "_ends_with", "(", "self", ",", "string_to_edit", ",", "end", ")", ":", "# pylint: disable=no-self-use", "if", "not", "string_to_edit", ".", "endswith", "(", "end", ")", ":", "return", "string_to_edit", "+", "end", "return", "string_to_edit" ]
Check if string ends with characters in end, if not merge end to string. :param string_to_edit: string to check and edit. :param end: str :return: string_to_edit or string_to_edit + end
[ "Check", "if", "string", "ends", "with", "characters", "in", "end", "if", "not", "merge", "end", "to", "string", "." ]
python
train
Microsoft/nni
examples/trials/kaggle-tgs-salt/lovasz_losses.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/examples/trials/kaggle-tgs-salt/lovasz_losses.py#L134-L146
def flatten_binary_scores(scores, labels, ignore=None): """ Flattens predictions in the batch (binary case) Remove labels equal to 'ignore' """ scores = scores.view(-1) labels = labels.view(-1) if ignore is None: return scores, labels valid = (labels != ignore) vscores = scores[valid] vlabels = labels[valid] return vscores, vlabels
[ "def", "flatten_binary_scores", "(", "scores", ",", "labels", ",", "ignore", "=", "None", ")", ":", "scores", "=", "scores", ".", "view", "(", "-", "1", ")", "labels", "=", "labels", ".", "view", "(", "-", "1", ")", "if", "ignore", "is", "None", ":...
Flattens predictions in the batch (binary case) Remove labels equal to 'ignore'
[ "Flattens", "predictions", "in", "the", "batch", "(", "binary", "case", ")", "Remove", "labels", "equal", "to", "ignore" ]
python
train
refenv/cijoe
modules/cij/fio.py
https://github.com/refenv/cijoe/blob/21d7b2ed4ff68e0a1457e7df2db27f6334f1a379/modules/cij/fio.py#L113-L118
def start(self): """Run FIO job in thread""" self.__thread = Threads(target=self.run, args=(True, True, False)) self.__thread.setDaemon(True) self.__thread.start()
[ "def", "start", "(", "self", ")", ":", "self", ".", "__thread", "=", "Threads", "(", "target", "=", "self", ".", "run", ",", "args", "=", "(", "True", ",", "True", ",", "False", ")", ")", "self", ".", "__thread", ".", "setDaemon", "(", "True", ")...
Run FIO job in thread
[ "Run", "FIO", "job", "in", "thread" ]
python
valid
tensorflow/tensor2tensor
tensor2tensor/layers/common_layers.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L715-L721
def noam_norm(x, epsilon=1.0, name=None): """One version of layer normalization.""" with tf.name_scope(name, default_name="noam_norm", values=[x]): shape = x.get_shape() ndims = len(shape) return (tf.nn.l2_normalize(x, ndims - 1, epsilon=epsilon) * tf.sqrt( to_float(shape[-1])))
[ "def", "noam_norm", "(", "x", ",", "epsilon", "=", "1.0", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "name", ",", "default_name", "=", "\"noam_norm\"", ",", "values", "=", "[", "x", "]", ")", ":", "shape", "=", "x", ...
One version of layer normalization.
[ "One", "version", "of", "layer", "normalization", "." ]
python
train
praekeltfoundation/molo
molo/core/api/endpoints.py
https://github.com/praekeltfoundation/molo/blob/57702fda4fab261d67591415f7d46bc98fa38525/molo/core/api/endpoints.py#L48-L74
def get_queryset(self): ''' This is overwritten in order to not exclude drafts and pages submitted for moderation ''' request = self.request # Allow pages to be filtered to a specific type if 'type' not in request.GET: model = Page else: model_name = request.GET['type'] try: model = resolve_model_string(model_name) except LookupError: raise BadRequestError("type doesn't exist") if not issubclass(model, Page): raise BadRequestError("type doesn't exist") # This is the overwritten line queryset = model.objects.public() # exclude .live() # Filter by site queryset = queryset.descendant_of( request.site.root_page, inclusive=True) return queryset
[ "def", "get_queryset", "(", "self", ")", ":", "request", "=", "self", ".", "request", "# Allow pages to be filtered to a specific type", "if", "'type'", "not", "in", "request", ".", "GET", ":", "model", "=", "Page", "else", ":", "model_name", "=", "request", "...
This is overwritten in order to not exclude drafts and pages submitted for moderation
[ "This", "is", "overwritten", "in", "order", "to", "not", "exclude", "drafts", "and", "pages", "submitted", "for", "moderation" ]
python
train
pyoceans/python-ctd
ctd/read.py
https://github.com/pyoceans/python-ctd/blob/fa9a9d02da3dfed6d1d60db0e52bbab52adfe666/ctd/read.py#L335-L392
def from_cnv(fname): """ DataFrame constructor to open Seabird CTD CNV-ASCII format. Examples -------- >>> from pathlib import Path >>> import ctd >>> data_path = Path(__file__).parents[1].joinpath("tests", "data") >>> cast = ctd.from_cnv(data_path.joinpath('CTD_big.cnv.bz2')) >>> downcast, upcast = cast.split() >>> ax = downcast['t090C'].plot_cast() """ f = _read_file(fname) metadata = _parse_seabird(f.readlines(), ftype="cnv") f.seek(0) df = pd.read_fwf( f, header=None, index_col=None, names=metadata["names"], skiprows=metadata["skiprows"], delim_whitespace=True, widths=[11] * len(metadata["names"]), ) f.close() key_set = False prkeys = ["prDM", "prdM", "pr"] for prkey in prkeys: try: df.set_index(prkey, drop=True, inplace=True) key_set = True except KeyError: continue if not key_set: raise KeyError( f"Could not find pressure field (supported names are {prkeys})." ) df.index.name = "Pressure [dbar]" name = _basename(fname)[1] dtypes = {"bpos": int, "pumps": bool, "flag": bool} for column in df.columns: if column in dtypes: df[column] = df[column].astype(dtypes[column]) else: try: df[column] = df[column].astype(float) except ValueError: warnings.warn("Could not convert %s to float." % column) metadata["name"] = str(name) setattr(df, "_metadata", metadata) return df
[ "def", "from_cnv", "(", "fname", ")", ":", "f", "=", "_read_file", "(", "fname", ")", "metadata", "=", "_parse_seabird", "(", "f", ".", "readlines", "(", ")", ",", "ftype", "=", "\"cnv\"", ")", "f", ".", "seek", "(", "0", ")", "df", "=", "pd", "....
DataFrame constructor to open Seabird CTD CNV-ASCII format. Examples -------- >>> from pathlib import Path >>> import ctd >>> data_path = Path(__file__).parents[1].joinpath("tests", "data") >>> cast = ctd.from_cnv(data_path.joinpath('CTD_big.cnv.bz2')) >>> downcast, upcast = cast.split() >>> ax = downcast['t090C'].plot_cast()
[ "DataFrame", "constructor", "to", "open", "Seabird", "CTD", "CNV", "-", "ASCII", "format", "." ]
python
train
ska-sa/spead2
spead2/__init__.py
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/__init__.py#L262-L282
def dynamic_shape(self, max_elements): """Determine the dynamic shape, given incoming data that is big enough to hold `max_elements` elements. """ known = 1 unknown_pos = -1 for i, x in enumerate(self.shape): if x is not None: known *= x else: assert unknown_pos == -1, 'Shape has multiple unknown dimensions' unknown_pos = i if unknown_pos == -1: return self.shape else: shape = list(self.shape) if known == 0: shape[unknown_pos] = 0 else: shape[unknown_pos] = max_elements // known return shape
[ "def", "dynamic_shape", "(", "self", ",", "max_elements", ")", ":", "known", "=", "1", "unknown_pos", "=", "-", "1", "for", "i", ",", "x", "in", "enumerate", "(", "self", ".", "shape", ")", ":", "if", "x", "is", "not", "None", ":", "known", "*=", ...
Determine the dynamic shape, given incoming data that is big enough to hold `max_elements` elements.
[ "Determine", "the", "dynamic", "shape", "given", "incoming", "data", "that", "is", "big", "enough", "to", "hold", "max_elements", "elements", "." ]
python
train
SheffieldML/GPy
GPy/likelihoods/binomial.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/likelihoods/binomial.py#L78-L102
def dlogpdf_dlink(self, inv_link_f, y, Y_metadata=None): """ Gradient of the pdf at y, given inverse link of f w.r.t inverse link of f. .. math:: \\frac{d^{2}\\ln p(y_{i}|\\lambda(f_{i}))}{d\\lambda(f)^{2}} = \\frac{y_{i}}{\\lambda(f)} - \\frac{(N-y_{i})}{(1-\\lambda(f))} :param inv_link_f: latent variables inverse link of f. :type inv_link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata must contain 'trials' :returns: gradient of log likelihood evaluated at points inverse link of f. :rtype: Nx1 array """ N = Y_metadata['trials'] np.testing.assert_array_equal(N.shape, y.shape) Ny = N-y t1 = np.zeros(y.shape) t2 = np.zeros(y.shape) t1[y>0] = y[y>0]/inv_link_f[y>0] t2[Ny>0] = (Ny[Ny>0])/(1.-inv_link_f[Ny>0]) return t1 - t2
[ "def", "dlogpdf_dlink", "(", "self", ",", "inv_link_f", ",", "y", ",", "Y_metadata", "=", "None", ")", ":", "N", "=", "Y_metadata", "[", "'trials'", "]", "np", ".", "testing", ".", "assert_array_equal", "(", "N", ".", "shape", ",", "y", ".", "shape", ...
Gradient of the pdf at y, given inverse link of f w.r.t inverse link of f. .. math:: \\frac{d^{2}\\ln p(y_{i}|\\lambda(f_{i}))}{d\\lambda(f)^{2}} = \\frac{y_{i}}{\\lambda(f)} - \\frac{(N-y_{i})}{(1-\\lambda(f))} :param inv_link_f: latent variables inverse link of f. :type inv_link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata must contain 'trials' :returns: gradient of log likelihood evaluated at points inverse link of f. :rtype: Nx1 array
[ "Gradient", "of", "the", "pdf", "at", "y", "given", "inverse", "link", "of", "f", "w", ".", "r", ".", "t", "inverse", "link", "of", "f", "." ]
python
train
ThreatResponse/margaritashotgun
margaritashotgun/repository.py
https://github.com/ThreatResponse/margaritashotgun/blob/6dee53ef267959b214953439968244cc46a19690/margaritashotgun/repository.py#L145-L165
def fetch(self, kernel_version, manifest_type): """ Search repository for kernel module matching kernel_version :type kernel_version: str :param kernel_version: kernel version to search repository on :type manifest_type: str :param manifest_type: kernel module manifest to search on """ metadata = self.get_metadata() logger.debug("parsed metadata: {0}".format(metadata)) manifest = self.get_manifest(metadata['manifests'][manifest_type]) try: module = manifest[kernel_version] logger.debug("found module {0}".format(module)) except KeyError: raise KernelModuleNotFoundError(kernel_version, self.url) path = self.fetch_module(module) return path
[ "def", "fetch", "(", "self", ",", "kernel_version", ",", "manifest_type", ")", ":", "metadata", "=", "self", ".", "get_metadata", "(", ")", "logger", ".", "debug", "(", "\"parsed metadata: {0}\"", ".", "format", "(", "metadata", ")", ")", "manifest", "=", ...
Search repository for kernel module matching kernel_version :type kernel_version: str :param kernel_version: kernel version to search repository on :type manifest_type: str :param manifest_type: kernel module manifest to search on
[ "Search", "repository", "for", "kernel", "module", "matching", "kernel_version" ]
python
train
tariqdaouda/pyGeno
pyGeno/Exon.py
https://github.com/tariqdaouda/pyGeno/blob/474b1250bf78ce5c7e7c3bbbfdbad9635d5a7d14/pyGeno/Exon.py#L138-L143
def pluck(self) : """Returns a plucked object. Plucks the exon off the tree, set the value of self.transcript into str(self.transcript). This effectively disconnects the object and makes it much more lighter in case you'd like to pickle it""" e = copy.copy(self) e.transcript = str(self.transcript) return e
[ "def", "pluck", "(", "self", ")", ":", "e", "=", "copy", ".", "copy", "(", "self", ")", "e", ".", "transcript", "=", "str", "(", "self", ".", "transcript", ")", "return", "e" ]
Returns a plucked object. Plucks the exon off the tree, set the value of self.transcript into str(self.transcript). This effectively disconnects the object and makes it much more lighter in case you'd like to pickle it
[ "Returns", "a", "plucked", "object", ".", "Plucks", "the", "exon", "off", "the", "tree", "set", "the", "value", "of", "self", ".", "transcript", "into", "str", "(", "self", ".", "transcript", ")", ".", "This", "effectively", "disconnects", "the", "object",...
python
train
EmbodiedCognition/pagoda
pagoda/skeleton.py
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/skeleton.py#L319-L338
def enable_motors(self, max_force): '''Enable the joint motors in this skeleton. This method sets the maximum force that can be applied by each joint to attain the desired target velocities. It also enables torque feedback for all joint motors. Parameters ---------- max_force : float The maximum force that each joint is allowed to apply to attain its target velocity. ''' for joint in self.joints: amotor = getattr(joint, 'amotor', joint) amotor.max_forces = max_force if max_force > 0: amotor.enable_feedback() else: amotor.disable_feedback()
[ "def", "enable_motors", "(", "self", ",", "max_force", ")", ":", "for", "joint", "in", "self", ".", "joints", ":", "amotor", "=", "getattr", "(", "joint", ",", "'amotor'", ",", "joint", ")", "amotor", ".", "max_forces", "=", "max_force", "if", "max_force...
Enable the joint motors in this skeleton. This method sets the maximum force that can be applied by each joint to attain the desired target velocities. It also enables torque feedback for all joint motors. Parameters ---------- max_force : float The maximum force that each joint is allowed to apply to attain its target velocity.
[ "Enable", "the", "joint", "motors", "in", "this", "skeleton", "." ]
python
valid
saltstack/salt
salt/states/boto_vpc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_vpc.py#L1458-L1534
def accept_vpc_peering_connection(name=None, conn_id=None, conn_name=None, region=None, key=None, keyid=None, profile=None): ''' Accept a VPC pending requested peering connection between two VPCs. name Name of this state conn_id The connection ID to accept. Exclusive with conn_name. String type. conn_name The name of the VPC peering connection to accept. Exclusive with conn_id. String type. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml boto_vpc.accept_vpc_peering_connection: - conn_name: salt_peering_connection # usage with vpc peering connection id and region boto_vpc.accept_vpc_peering_connection: - conn_id: pbx-1873d472 - region: us-west-2 ''' log.debug('Called state to accept VPC peering connection') pending = __salt__['boto_vpc.is_peering_connection_pending']( conn_id=conn_id, conn_name=conn_name, region=region, key=key, keyid=keyid, profile=profile) ret = { 'name': name, 'result': True, 'changes': {}, 'comment': 'Boto VPC peering state' } if not pending: ret['result'] = True ret['changes'].update({'old': 'No pending VPC peering connection found. Nothing to be done.'}) return ret if __opts__['test']: ret['changes'].update({'old': 'Pending VPC peering connection found and can be accepted'}) return ret fun = 'boto_vpc.accept_vpc_peering_connection' log.debug('Calling `%s()` to accept this VPC peering connection', fun) result = __salt__[fun](conn_id=conn_id, name=conn_name, region=region, key=key, keyid=keyid, profile=profile) if 'error' in result: ret['comment'] = "Failed to accept VPC peering: {0}".format(result['error']) ret['result'] = False return ret ret['changes'].update({'old': '', 'new': result['msg']}) return ret
[ "def", "accept_vpc_peering_connection", "(", "name", "=", "None", ",", "conn_id", "=", "None", ",", "conn_name", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "log", ...
Accept a VPC pending requested peering connection between two VPCs. name Name of this state conn_id The connection ID to accept. Exclusive with conn_name. String type. conn_name The name of the VPC peering connection to accept. Exclusive with conn_id. String type. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml boto_vpc.accept_vpc_peering_connection: - conn_name: salt_peering_connection # usage with vpc peering connection id and region boto_vpc.accept_vpc_peering_connection: - conn_id: pbx-1873d472 - region: us-west-2
[ "Accept", "a", "VPC", "pending", "requested", "peering", "connection", "between", "two", "VPCs", "." ]
python
train
flowersteam/explauto
explauto/sensorimotor_model/inverse/cma.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L8301-L8306
def cornersphere(self, x): """Sphere (squared norm) test objective function constraint to the corner""" nconstr = len(x) - 0 if any(x[:nconstr] < 1): return np.NaN return sum(x**2) - nconstr
[ "def", "cornersphere", "(", "self", ",", "x", ")", ":", "nconstr", "=", "len", "(", "x", ")", "-", "0", "if", "any", "(", "x", "[", ":", "nconstr", "]", "<", "1", ")", ":", "return", "np", ".", "NaN", "return", "sum", "(", "x", "**", "2", "...
Sphere (squared norm) test objective function constraint to the corner
[ "Sphere", "(", "squared", "norm", ")", "test", "objective", "function", "constraint", "to", "the", "corner" ]
python
train
markovmodel/msmtools
msmtools/analysis/dense/pcca.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/analysis/dense/pcca.py#L322-L457
def pcca(P, m): """ PCCA+ spectral clustering method with optimized memberships [1]_ Clusters the first m eigenvectors of a transition matrix in order to cluster the states. This function does not assume that the transition matrix is fully connected. Disconnected sets will automatically define the first metastable states, with perfect membership assignments. Parameters ---------- P : ndarray (n,n) Transition matrix. m : int Number of clusters to group to. Returns ------- chi by default, or (chi,rot) if return_rot = True chi : ndarray (n x m) A matrix containing the probability or membership of each state to be assigned to each cluster. The rows sum to 1. References ---------- [1] S. Roeblitz and M. Weber, Fuzzy spectral clustering by PCCA+: application to Markov state models and data classification. Adv Data Anal Classif 7, 147-179 (2013). [2] F. Noe, multiset PCCA and HMMs, in preparation. """ # imports from msmtools.estimation import connected_sets from msmtools.analysis import eigenvalues, is_transition_matrix, hitting_probability # validate input n = np.shape(P)[0] if (m > n): raise ValueError("Number of metastable states m = " + str(m)+ " exceeds number of states of transition matrix n = " + str(n)) if not is_transition_matrix(P): raise ValueError("Input matrix is not a transition matrix.") # prepare output chi = np.zeros((n, m)) # test connectivity components = connected_sets(P) # print "all labels ",labels n_components = len(components) # (n_components, labels) = connected_components(P, connection='strong') # print 'n_components' # store components as closed (with positive equilibrium distribution) # or as transition states (with vanishing equilibrium distribution) closed_components = [] transition_states = [] for i in range(n_components): component = components[i] # np.argwhere(labels==i).flatten() rest = list(set(range(n)) - set(component)) # is component closed? if (np.sum(P[component, :][:, rest]) == 0): closed_components.append(component) else: transition_states.append(component) n_closed_components = len(closed_components) closed_states = np.concatenate(closed_components) if len(transition_states) == 0: transition_states = np.array([], dtype=int) else: transition_states = np.concatenate(transition_states) # check if we have enough clusters to support the disconnected sets if (m < len(closed_components)): raise ValueError("Number of metastable states m = " + str(m) + " is too small. Transition matrix has " + str(len(closed_components)) + " disconnected components") # We collect eigenvalues in order to decide which closed_components_Psub = [] closed_components_ev = [] closed_components_enum = [] for i in range(n_closed_components): component = closed_components[i] # print "component ",i," ",component # compute eigenvalues in submatrix Psub = P[component, :][:, component] closed_components_Psub.append(Psub) closed_components_ev.append(eigenvalues(Psub)) closed_components_enum.append(i * np.ones((component.size), dtype=int)) # flatten closed_components_ev_flat = np.hstack(closed_components_ev) closed_components_enum_flat = np.hstack(closed_components_enum) # which components should be clustered? component_indexes = closed_components_enum_flat[np.argsort(closed_components_ev_flat)][0:m] # cluster each component ipcca = 0 for i in range(n_closed_components): component = closed_components[i] # how many PCCA states in this component? m_by_component = np.shape(np.argwhere(component_indexes == i))[0] # if 1, then the result is trivial if (m_by_component == 1): chi[component, ipcca] = 1.0 ipcca += 1 elif (m_by_component > 1): #print "submatrix: ",closed_components_Psub[i] chi[component, ipcca:ipcca + m_by_component] = _pcca_connected(closed_components_Psub[i], m_by_component) ipcca += m_by_component else: raise RuntimeError("Component " + str(i) + " spuriously has " + str(m_by_component) + " pcca sets") # finally assign all transition states # print "chi\n", chi # print "transition states: ",transition_states # print "closed states: ", closed_states if (transition_states.size > 0): # make all closed states absorbing, so we can see which closed state we hit first Pabs = P.copy() Pabs[closed_states, :] = 0.0 Pabs[closed_states, closed_states] = 1.0 for i in range(closed_states.size): # hitting probability to each closed state h = hitting_probability(Pabs, closed_states[i]) for j in range(transition_states.size): # transition states belong to closed states with the hitting probability, and inherit their chi chi[transition_states[j]] += h[transition_states[j]] * chi[closed_states[i]] # check if we have m metastable sets. If less than m, we must raise nmeta = np.count_nonzero(chi.sum(axis=0)) assert m <= nmeta, str(m) + " metastable states requested, but transition matrix only has " + str(nmeta) \ + ". Consider using a prior or request less metastable states. " # print "chi\n", chi return chi
[ "def", "pcca", "(", "P", ",", "m", ")", ":", "# imports", "from", "msmtools", ".", "estimation", "import", "connected_sets", "from", "msmtools", ".", "analysis", "import", "eigenvalues", ",", "is_transition_matrix", ",", "hitting_probability", "# validate input", ...
PCCA+ spectral clustering method with optimized memberships [1]_ Clusters the first m eigenvectors of a transition matrix in order to cluster the states. This function does not assume that the transition matrix is fully connected. Disconnected sets will automatically define the first metastable states, with perfect membership assignments. Parameters ---------- P : ndarray (n,n) Transition matrix. m : int Number of clusters to group to. Returns ------- chi by default, or (chi,rot) if return_rot = True chi : ndarray (n x m) A matrix containing the probability or membership of each state to be assigned to each cluster. The rows sum to 1. References ---------- [1] S. Roeblitz and M. Weber, Fuzzy spectral clustering by PCCA+: application to Markov state models and data classification. Adv Data Anal Classif 7, 147-179 (2013). [2] F. Noe, multiset PCCA and HMMs, in preparation.
[ "PCCA", "+", "spectral", "clustering", "method", "with", "optimized", "memberships", "[", "1", "]", "_" ]
python
train
census-instrumentation/opencensus-python
contrib/opencensus-ext-pymongo/opencensus/ext/pymongo/trace.py
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-pymongo/opencensus/ext/pymongo/trace.py#L30-L33
def trace_integration(tracer=None): """Integrate with pymongo to trace it using event listener.""" log.info('Integrated module: {}'.format(MODULE_NAME)) monitoring.register(MongoCommandListener(tracer=tracer))
[ "def", "trace_integration", "(", "tracer", "=", "None", ")", ":", "log", ".", "info", "(", "'Integrated module: {}'", ".", "format", "(", "MODULE_NAME", ")", ")", "monitoring", ".", "register", "(", "MongoCommandListener", "(", "tracer", "=", "tracer", ")", ...
Integrate with pymongo to trace it using event listener.
[ "Integrate", "with", "pymongo", "to", "trace", "it", "using", "event", "listener", "." ]
python
train
alex-kostirin/pyatomac
atomac/AXClasses.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/AXClasses.py#L761-L765
def _generateFind(self, **kwargs): """Generator which yields matches on AXChildren.""" for needle in self._generateChildren(): if needle._match(**kwargs): yield needle
[ "def", "_generateFind", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "needle", "in", "self", ".", "_generateChildren", "(", ")", ":", "if", "needle", ".", "_match", "(", "*", "*", "kwargs", ")", ":", "yield", "needle" ]
Generator which yields matches on AXChildren.
[ "Generator", "which", "yields", "matches", "on", "AXChildren", "." ]
python
valid
TurboGears/gearbox
gearbox/commands/setup_app.py
https://github.com/TurboGears/gearbox/blob/df496ab28050ce6a4cc4c502488f5c5812f2baff/gearbox/commands/setup_app.py#L110-L118
def _import_module(self, s): """ Import a module. """ mod = __import__(s) parts = s.split('.') for part in parts[1:]: mod = getattr(mod, part) return mod
[ "def", "_import_module", "(", "self", ",", "s", ")", ":", "mod", "=", "__import__", "(", "s", ")", "parts", "=", "s", ".", "split", "(", "'.'", ")", "for", "part", "in", "parts", "[", "1", ":", "]", ":", "mod", "=", "getattr", "(", "mod", ",", ...
Import a module.
[ "Import", "a", "module", "." ]
python
train
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/rbac_authorization_v1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/rbac_authorization_v1_api.py#L2215-L2243
def list_role_for_all_namespaces(self, **kwargs): # noqa: E501 """list_role_for_all_namespaces # noqa: E501 list or watch objects of kind Role # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_role_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1RoleList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_role_for_all_namespaces_with_http_info(**kwargs) # noqa: E501 else: (data) = self.list_role_for_all_namespaces_with_http_info(**kwargs) # noqa: E501 return data
[ "def", "list_role_for_all_namespaces", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "list_role_for_...
list_role_for_all_namespaces # noqa: E501 list or watch objects of kind Role # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_role_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1RoleList If the method is called asynchronously, returns the request thread.
[ "list_role_for_all_namespaces", "#", "noqa", ":", "E501" ]
python
train
numenta/nupic
src/nupic/swarming/hypersearch/particle.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch/particle.py#L332-L366
def copyVarStatesFrom(self, particleState, varNames): """Copy specific variables from particleState into this particle. Parameters: -------------------------------------------------------------- particleState: dict produced by a particle's getState() method varNames: which variables to copy """ # Set this to false if you don't want the variable to move anymore # after we set the state allowedToMove = True for varName in particleState['varStates']: if varName in varNames: # If this particle doesn't include this field, don't copy it if varName not in self.permuteVars: continue # Set the best position to the copied position state = copy.deepcopy(particleState['varStates'][varName]) state['_position'] = state['position'] state['bestPosition'] = state['position'] if not allowedToMove: state['velocity'] = 0 # Set the state now self.permuteVars[varName].setState(state) if allowedToMove: # Let the particle move in both directions from the best position # it found previously and set it's initial velocity to a known # fraction of the total distance. self.permuteVars[varName].resetVelocity(self._rng)
[ "def", "copyVarStatesFrom", "(", "self", ",", "particleState", ",", "varNames", ")", ":", "# Set this to false if you don't want the variable to move anymore", "# after we set the state", "allowedToMove", "=", "True", "for", "varName", "in", "particleState", "[", "'varStates...
Copy specific variables from particleState into this particle. Parameters: -------------------------------------------------------------- particleState: dict produced by a particle's getState() method varNames: which variables to copy
[ "Copy", "specific", "variables", "from", "particleState", "into", "this", "particle", "." ]
python
valid
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/visuals/collections/collection.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/collections/collection.py#L197-L209
def draw(self, mode=None): """ Draw collection """ if self._need_update: self._update() program = self._programs[0] mode = mode or self._mode if self._indices_list is not None: program.draw(mode, self._indices_buffer) else: program.draw(mode)
[ "def", "draw", "(", "self", ",", "mode", "=", "None", ")", ":", "if", "self", ".", "_need_update", ":", "self", ".", "_update", "(", ")", "program", "=", "self", ".", "_programs", "[", "0", "]", "mode", "=", "mode", "or", "self", ".", "_mode", "i...
Draw collection
[ "Draw", "collection" ]
python
train
yyuu/botornado
boto/s3/key.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/s3/key.py#L1143-L1169
def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10): """ Get a torrent file (see to get_file) :type fp: file :param fp: The file pointer of where to put the torrent :type headers: dict :param headers: Headers to be passed :type cb: function :param cb: a callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to S3 and the second representing the size of the to be transmitted object. :type cb: int :param num_cb: (optional) If a callback is specified with the cb parameter this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. """ return self.get_file(fp, headers, cb, num_cb, torrent=True)
[ "def", "get_torrent_file", "(", "self", ",", "fp", ",", "headers", "=", "None", ",", "cb", "=", "None", ",", "num_cb", "=", "10", ")", ":", "return", "self", ".", "get_file", "(", "fp", ",", "headers", ",", "cb", ",", "num_cb", ",", "torrent", "=",...
Get a torrent file (see to get_file) :type fp: file :param fp: The file pointer of where to put the torrent :type headers: dict :param headers: Headers to be passed :type cb: function :param cb: a callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to S3 and the second representing the size of the to be transmitted object. :type cb: int :param num_cb: (optional) If a callback is specified with the cb parameter this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer.
[ "Get", "a", "torrent", "file", "(", "see", "to", "get_file", ")" ]
python
train
SystemRDL/systemrdl-compiler
systemrdl/core/properties.py
https://github.com/SystemRDL/systemrdl-compiler/blob/6ae64f2bb6ecbbe9db356e20e8ac94e85bdeed3a/systemrdl/core/properties.py#L1198-L1208
def get_default(self, node): """ Unless specified otherwise, intr fields are implicitly stickybit """ if node.inst.properties.get("intr", False): # Interrupt is set! # Default is implicitly stickybit, unless the mutually-exclusive # sticky property was set instead return not node.inst.properties.get("sticky", False) else: return False
[ "def", "get_default", "(", "self", ",", "node", ")", ":", "if", "node", ".", "inst", ".", "properties", ".", "get", "(", "\"intr\"", ",", "False", ")", ":", "# Interrupt is set!", "# Default is implicitly stickybit, unless the mutually-exclusive", "# sticky property w...
Unless specified otherwise, intr fields are implicitly stickybit
[ "Unless", "specified", "otherwise", "intr", "fields", "are", "implicitly", "stickybit" ]
python
train
DerMitch/fritzbox-smarthome
fritzhome/fritz.py
https://github.com/DerMitch/fritzbox-smarthome/blob/84cbd7c1b33e6256add041b0395ff5fccc01f103/fritzhome/fritz.py#L83-L87
def calculate_response(self, challenge, password): """Calculate response for the challenge-response authentication""" to_hash = (challenge + "-" + password).encode("UTF-16LE") hashed = hashlib.md5(to_hash).hexdigest() return "{0}-{1}".format(challenge, hashed)
[ "def", "calculate_response", "(", "self", ",", "challenge", ",", "password", ")", ":", "to_hash", "=", "(", "challenge", "+", "\"-\"", "+", "password", ")", ".", "encode", "(", "\"UTF-16LE\"", ")", "hashed", "=", "hashlib", ".", "md5", "(", "to_hash", ")...
Calculate response for the challenge-response authentication
[ "Calculate", "response", "for", "the", "challenge", "-", "response", "authentication" ]
python
train
devricks/soft_drf
soft_drf/api/routers/__init__.py
https://github.com/devricks/soft_drf/blob/1869b13f9341bfcebd931059e93de2bc38570da3/soft_drf/api/routers/__init__.py#L36-L78
def register_nested( self, parent_prefix, prefix, viewset, base_name=None, parent_lookup_name=None, depth_level=1 ): """ Register a nested viewset wihtout worrying of instantiate a nested router for registry. """ kwargs = { 'trailing_slash': bool(self.trailing_slash) } if parent_lookup_name is not None: kwargs.update(lookup=parent_lookup_name) # Section for the depth of the route and add more routes if depth_level > 1: routers = filter( lambda r: (r._depth_level == (depth_level - 1)) and r._nested_prefix == parent_prefix, self._nested_object_registry ) try: parent_router = next(routers) except StopIteration: raise RuntimeError('parent registered resource not found') else: parent_router = self nested_router = NestedSimpleRouter( parent_router, parent_prefix, **kwargs ) nested_router._nested_prefix = prefix nested_router._depth_level = depth_level nested_router.register(prefix, viewset, base_name) self._nested_object_registry.append(nested_router)
[ "def", "register_nested", "(", "self", ",", "parent_prefix", ",", "prefix", ",", "viewset", ",", "base_name", "=", "None", ",", "parent_lookup_name", "=", "None", ",", "depth_level", "=", "1", ")", ":", "kwargs", "=", "{", "'trailing_slash'", ":", "bool", ...
Register a nested viewset wihtout worrying of instantiate a nested router for registry.
[ "Register", "a", "nested", "viewset", "wihtout", "worrying", "of", "instantiate", "a", "nested", "router", "for", "registry", "." ]
python
train
gem/oq-engine
openquake/commonlib/readinput.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/readinput.py#L1113-L1142
def get_pmap_from_nrml(oqparam, fname): """ :param oqparam: an :class:`openquake.commonlib.oqvalidation.OqParam` instance :param fname: an XML file containing hazard curves :returns: site mesh, curve array """ hcurves_by_imt = {} oqparam.hazard_imtls = imtls = {} for hcurves in nrml.read(fname): imt = hcurves['IMT'] oqparam.investigation_time = hcurves['investigationTime'] if imt == 'SA': imt += '(%s)' % hcurves['saPeriod'] imtls[imt] = ~hcurves.IMLs data = sorted((~node.Point.pos, ~node.poEs) for node in hcurves[1:]) hcurves_by_imt[imt] = numpy.array([d[1] for d in data]) lons, lats = [], [] for xy, poes in data: lons.append(xy[0]) lats.append(xy[1]) mesh = geo.Mesh(numpy.array(lons), numpy.array(lats)) num_levels = sum(len(v) for v in imtls.values()) array = numpy.zeros((len(mesh), num_levels)) imtls = DictArray(imtls) for imt_ in hcurves_by_imt: array[:, imtls(imt_)] = hcurves_by_imt[imt_] return mesh, ProbabilityMap.from_array(array, range(len(mesh)))
[ "def", "get_pmap_from_nrml", "(", "oqparam", ",", "fname", ")", ":", "hcurves_by_imt", "=", "{", "}", "oqparam", ".", "hazard_imtls", "=", "imtls", "=", "{", "}", "for", "hcurves", "in", "nrml", ".", "read", "(", "fname", ")", ":", "imt", "=", "hcurves...
:param oqparam: an :class:`openquake.commonlib.oqvalidation.OqParam` instance :param fname: an XML file containing hazard curves :returns: site mesh, curve array
[ ":", "param", "oqparam", ":", "an", ":", "class", ":", "openquake", ".", "commonlib", ".", "oqvalidation", ".", "OqParam", "instance", ":", "param", "fname", ":", "an", "XML", "file", "containing", "hazard", "curves", ":", "returns", ":", "site", "mesh", ...
python
train
craffel/mir_eval
mir_eval/separation.py
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L679-L722
def _project(reference_sources, estimated_source, flen): """Least-squares projection of estimated source on the subspace spanned by delayed versions of reference sources, with delays between 0 and flen-1 """ nsrc = reference_sources.shape[0] nsampl = reference_sources.shape[1] # computing coefficients of least squares problem via FFT ## # zero padding and FFT of input data reference_sources = np.hstack((reference_sources, np.zeros((nsrc, flen - 1)))) estimated_source = np.hstack((estimated_source, np.zeros(flen - 1))) n_fft = int(2**np.ceil(np.log2(nsampl + flen - 1.))) sf = scipy.fftpack.fft(reference_sources, n=n_fft, axis=1) sef = scipy.fftpack.fft(estimated_source, n=n_fft) # inner products between delayed versions of reference_sources G = np.zeros((nsrc * flen, nsrc * flen)) for i in range(nsrc): for j in range(nsrc): ssf = sf[i] * np.conj(sf[j]) ssf = np.real(scipy.fftpack.ifft(ssf)) ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])), r=ssf[:flen]) G[i * flen: (i+1) * flen, j * flen: (j+1) * flen] = ss G[j * flen: (j+1) * flen, i * flen: (i+1) * flen] = ss.T # inner products between estimated_source and delayed versions of # reference_sources D = np.zeros(nsrc * flen) for i in range(nsrc): ssef = sf[i] * np.conj(sef) ssef = np.real(scipy.fftpack.ifft(ssef)) D[i * flen: (i+1) * flen] = np.hstack((ssef[0], ssef[-1:-flen:-1])) # Computing projection # Distortion filters try: C = np.linalg.solve(G, D).reshape(flen, nsrc, order='F') except np.linalg.linalg.LinAlgError: C = np.linalg.lstsq(G, D)[0].reshape(flen, nsrc, order='F') # Filtering sproj = np.zeros(nsampl + flen - 1) for i in range(nsrc): sproj += fftconvolve(C[:, i], reference_sources[i])[:nsampl + flen - 1] return sproj
[ "def", "_project", "(", "reference_sources", ",", "estimated_source", ",", "flen", ")", ":", "nsrc", "=", "reference_sources", ".", "shape", "[", "0", "]", "nsampl", "=", "reference_sources", ".", "shape", "[", "1", "]", "# computing coefficients of least squares ...
Least-squares projection of estimated source on the subspace spanned by delayed versions of reference sources, with delays between 0 and flen-1
[ "Least", "-", "squares", "projection", "of", "estimated", "source", "on", "the", "subspace", "spanned", "by", "delayed", "versions", "of", "reference", "sources", "with", "delays", "between", "0", "and", "flen", "-", "1" ]
python
train
ome/omego
omego/db.py
https://github.com/ome/omego/blob/2dadbf3c6342b6c995f9e0dceaf3c0b7fab030fb/omego/db.py#L206-L218
def dump(self): """ Dump the database using the postgres custom format """ dumpfile = self.args.dumpfile if not dumpfile: db, env = self.get_db_args_env() dumpfile = fileutils.timestamp_filename( 'omero-database-%s' % db['name'], 'pgdump') log.info('Dumping database to %s', dumpfile) if not self.args.dry_run: self.pgdump('-Fc', '-f', dumpfile)
[ "def", "dump", "(", "self", ")", ":", "dumpfile", "=", "self", ".", "args", ".", "dumpfile", "if", "not", "dumpfile", ":", "db", ",", "env", "=", "self", ".", "get_db_args_env", "(", ")", "dumpfile", "=", "fileutils", ".", "timestamp_filename", "(", "'...
Dump the database using the postgres custom format
[ "Dump", "the", "database", "using", "the", "postgres", "custom", "format" ]
python
train
polyaxon/polyaxon
polyaxon/streams/consumers/consumers.py
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/streams/consumers/consumers.py#L157-L165
def setup_queue(self, queue_name): """Setup the queue on RabbitMQ by invoking the Queue.Declare RPC command. When it is complete, the on_queue_declareok method will be invoked by pika. :param str|unicode queue_name: The name of the queue to declare. """ _logger.debug('Declaring queue %s', queue_name) self._channel.queue_declare(self.on_queue_declareok, queue_name)
[ "def", "setup_queue", "(", "self", ",", "queue_name", ")", ":", "_logger", ".", "debug", "(", "'Declaring queue %s'", ",", "queue_name", ")", "self", ".", "_channel", ".", "queue_declare", "(", "self", ".", "on_queue_declareok", ",", "queue_name", ")" ]
Setup the queue on RabbitMQ by invoking the Queue.Declare RPC command. When it is complete, the on_queue_declareok method will be invoked by pika. :param str|unicode queue_name: The name of the queue to declare.
[ "Setup", "the", "queue", "on", "RabbitMQ", "by", "invoking", "the", "Queue", ".", "Declare", "RPC", "command", ".", "When", "it", "is", "complete", "the", "on_queue_declareok", "method", "will", "be", "invoked", "by", "pika", "." ]
python
train
serge-sans-paille/pythran
pythran/analyses/range_values.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/analyses/range_values.py#L326-L368
def visit_Compare(self, node): """ Boolean are possible index. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = 2 or 3 ... b = 4 or 5 ... c = a < b ... d = b < 3 ... e = b == 4''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['c'] Interval(low=1, high=1) >>> res['d'] Interval(low=0, high=0) >>> res['e'] Interval(low=0, high=1) """ if any(isinstance(op, (ast.In, ast.NotIn, ast.Is, ast.IsNot)) for op in node.ops): self.generic_visit(node) return self.add(node, Interval(0, 1)) curr = self.visit(node.left) res = [] for op, comparator in zip(node.ops, node.comparators): comparator = self.visit(comparator) fake = ast.Compare(ast.Name('x', ast.Load(), None), [op], [ast.Name('y', ast.Load(), None)]) fake = ast.Expression(fake) ast.fix_missing_locations(fake) expr = compile(ast.gast_to_ast(fake), '<range_values>', 'eval') res.append(eval(expr, {'x': curr, 'y': comparator})) if all(res): return self.add(node, Interval(1, 1)) elif any(r.low == r.high == 0 for r in res): return self.add(node, Interval(0, 0)) else: return self.add(node, Interval(0, 1))
[ "def", "visit_Compare", "(", "self", ",", "node", ")", ":", "if", "any", "(", "isinstance", "(", "op", ",", "(", "ast", ".", "In", ",", "ast", ".", "NotIn", ",", "ast", ".", "Is", ",", "ast", ".", "IsNot", ")", ")", "for", "op", "in", "node", ...
Boolean are possible index. >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse(''' ... def foo(): ... a = 2 or 3 ... b = 4 or 5 ... c = a < b ... d = b < 3 ... e = b == 4''') >>> pm = passmanager.PassManager("test") >>> res = pm.gather(RangeValues, node) >>> res['c'] Interval(low=1, high=1) >>> res['d'] Interval(low=0, high=0) >>> res['e'] Interval(low=0, high=1)
[ "Boolean", "are", "possible", "index", "." ]
python
train
hwmrocker/smtplibaio
smtplibaio/smtp.py
https://github.com/hwmrocker/smtplibaio/blob/84ce8e45b7e706476739d0efcb416c18ecabbbb6/smtplibaio/smtp.py#L846-L859
async def close(self): """ Cleans up after the connection to the SMTP server has been closed (voluntarily or not). """ if self.writer is not None: # Close the transport: try: self.writer.close() except OSError as exc: if exc.errno != errno.ENOTCONN: raise self.reset_state()
[ "async", "def", "close", "(", "self", ")", ":", "if", "self", ".", "writer", "is", "not", "None", ":", "# Close the transport:", "try", ":", "self", ".", "writer", ".", "close", "(", ")", "except", "OSError", "as", "exc", ":", "if", "exc", ".", "errn...
Cleans up after the connection to the SMTP server has been closed (voluntarily or not).
[ "Cleans", "up", "after", "the", "connection", "to", "the", "SMTP", "server", "has", "been", "closed", "(", "voluntarily", "or", "not", ")", "." ]
python
train
JnyJny/Geometry
Geometry/point.py
https://github.com/JnyJny/Geometry/blob/3500f815fa56c535b36d1b6fd0afe69ce5d055be/Geometry/point.py#L1420-L1455
def rotate2d(self, theta, origin=None, axis='z', radians=False): ''' :theta: float radians to rotate self around origin :origin: optional Point, defaults to 0,0,0 Returns a Point rotated by :theta: around :origin:. ''' origin = Point._convert(origin) delta = self - origin p = Point(origin) if not radians: theta = math.radians(theta) cosT = math.cos(theta) sinT = math.sin(theta) if axis == 'z': p.x += (cosT * delta.x) - (sinT * delta.y) p.y += (sinT * delta.x) + (cosT * delta.y) return p if axis == 'y': p.z += (cosT * delta.z) - (sinT * delta.x) p.x += (sinT * delta.z) + (cosT * delta.x) return p if axis == 'x': p.y += (cosT * delta.y) - (sinT * delta.z) p.z += (sinT * delta.y) + (cosT * delta.z) return p raise KeyError('unknown axis {}, expecting x, y or z'.format(axis))
[ "def", "rotate2d", "(", "self", ",", "theta", ",", "origin", "=", "None", ",", "axis", "=", "'z'", ",", "radians", "=", "False", ")", ":", "origin", "=", "Point", ".", "_convert", "(", "origin", ")", "delta", "=", "self", "-", "origin", "p", "=", ...
:theta: float radians to rotate self around origin :origin: optional Point, defaults to 0,0,0 Returns a Point rotated by :theta: around :origin:.
[ ":", "theta", ":", "float", "radians", "to", "rotate", "self", "around", "origin", ":", "origin", ":", "optional", "Point", "defaults", "to", "0", "0", "0" ]
python
train
ChargePoint/pydnp3
examples/outstation.py
https://github.com/ChargePoint/pydnp3/blob/5bcd8240d1fc0aa1579e71f2efcab63b4c61c547/examples/outstation.py#L284-L291
def main(): """The Outstation has been started from the command line. Execute ad-hoc tests if desired.""" app = OutstationApplication() _log.debug('Initialization complete. In command loop.') # Ad-hoc tests can be inserted here if desired. See outstation_cmd.py for examples. app.shutdown() _log.debug('Exiting.') exit()
[ "def", "main", "(", ")", ":", "app", "=", "OutstationApplication", "(", ")", "_log", ".", "debug", "(", "'Initialization complete. In command loop.'", ")", "# Ad-hoc tests can be inserted here if desired. See outstation_cmd.py for examples.", "app", ".", "shutdown", "(", ")...
The Outstation has been started from the command line. Execute ad-hoc tests if desired.
[ "The", "Outstation", "has", "been", "started", "from", "the", "command", "line", ".", "Execute", "ad", "-", "hoc", "tests", "if", "desired", "." ]
python
valid
mikedh/trimesh
trimesh/path/simplify.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/simplify.py#L364-L411
def simplify_spline(path, smooth=None, verbose=False): """ Replace discrete curves with b-spline or Arc and return the result as a new Path2D object. Parameters ------------ path : trimesh.path.Path2D Input geometry smooth : float Distance to smooth Returns ------------ simplified : Path2D Consists of Arc and BSpline entities """ new_vertices = [] new_entities = [] scale = path.scale for discrete in path.discrete: circle = is_circle(discrete, scale=scale, verbose=verbose) if circle is not None: # the points are circular enough for our high standards # so replace them with a closed Arc entity new_entities.append(entities.Arc(points=np.arange(3) + len(new_vertices), closed=True)) new_vertices.extend(circle) continue # entities for this path entity, vertices = points_to_spline_entity(discrete, smooth=smooth) # reindex returned control points entity.points += len(new_vertices) # save entity and vertices new_vertices.extend(vertices) new_entities.append(entity) # create the Path2D object for the result simplified = type(path)(entities=new_entities, vertices=new_vertices) return simplified
[ "def", "simplify_spline", "(", "path", ",", "smooth", "=", "None", ",", "verbose", "=", "False", ")", ":", "new_vertices", "=", "[", "]", "new_entities", "=", "[", "]", "scale", "=", "path", ".", "scale", "for", "discrete", "in", "path", ".", "discrete...
Replace discrete curves with b-spline or Arc and return the result as a new Path2D object. Parameters ------------ path : trimesh.path.Path2D Input geometry smooth : float Distance to smooth Returns ------------ simplified : Path2D Consists of Arc and BSpline entities
[ "Replace", "discrete", "curves", "with", "b", "-", "spline", "or", "Arc", "and", "return", "the", "result", "as", "a", "new", "Path2D", "object", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/states_editor.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/states_editor.py#L491-L514
def activate_state_tab(self, state_m): """Opens the tab for the specified state model The tab with the given state model is opened or set to foreground. :param state_m: The desired state model (the selected state) """ # The current shown state differs from the desired one current_state_m = self.get_current_state_m() if current_state_m is not state_m: state_identifier = self.get_state_identifier(state_m) # The desired state is not open, yet if state_identifier not in self.tabs: # add tab for desired state page_id = self.add_state_editor(state_m) self.view.notebook.set_current_page(page_id) # bring tab for desired state into foreground else: page = self.tabs[state_identifier]['page'] page_id = self.view.notebook.page_num(page) self.view.notebook.set_current_page(page_id) self.keep_only_sticked_and_selected_tabs()
[ "def", "activate_state_tab", "(", "self", ",", "state_m", ")", ":", "# The current shown state differs from the desired one", "current_state_m", "=", "self", ".", "get_current_state_m", "(", ")", "if", "current_state_m", "is", "not", "state_m", ":", "state_identifier", ...
Opens the tab for the specified state model The tab with the given state model is opened or set to foreground. :param state_m: The desired state model (the selected state)
[ "Opens", "the", "tab", "for", "the", "specified", "state", "model" ]
python
train
PythonCharmers/python-future
src/libfuturize/fixer_util.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/libfuturize/fixer_util.py#L96-L110
def indentation_step(node): """ Dirty little trick to get the difference between each indentation level Implemented by finding the shortest indentation string (technically, the "least" of all of the indentation strings, but tabs and spaces mixed won't get this far, so those are synonymous.) """ r = find_root(node) # Collect all indentations into one set. all_indents = set(i.value for i in r.pre_order() if i.type == token.INDENT) if not all_indents: # nothing is indented anywhere, so we get to pick what we want return u" " # four spaces is a popular convention else: return min(all_indents)
[ "def", "indentation_step", "(", "node", ")", ":", "r", "=", "find_root", "(", "node", ")", "# Collect all indentations into one set.", "all_indents", "=", "set", "(", "i", ".", "value", "for", "i", "in", "r", ".", "pre_order", "(", ")", "if", "i", ".", "...
Dirty little trick to get the difference between each indentation level Implemented by finding the shortest indentation string (technically, the "least" of all of the indentation strings, but tabs and spaces mixed won't get this far, so those are synonymous.)
[ "Dirty", "little", "trick", "to", "get", "the", "difference", "between", "each", "indentation", "level", "Implemented", "by", "finding", "the", "shortest", "indentation", "string", "(", "technically", "the", "least", "of", "all", "of", "the", "indentation", "str...
python
train
wangwenpei/cliez
cliez/conf/__init__.py
https://github.com/wangwenpei/cliez/blob/d6fe775544cd380735c56c8a4a79bc2ad22cb6c4/cliez/conf/__init__.py#L46-L59
def settings(path=None, with_path=None): """ Get or set `Settings._wrapped` :param str path: a python module file, if user set it,write config to `Settings._wrapped` :param str with_path: search path :return: A instance of `Settings` """ if path: Settings.bind(path, with_path=with_path) return Settings._wrapped
[ "def", "settings", "(", "path", "=", "None", ",", "with_path", "=", "None", ")", ":", "if", "path", ":", "Settings", ".", "bind", "(", "path", ",", "with_path", "=", "with_path", ")", "return", "Settings", ".", "_wrapped" ]
Get or set `Settings._wrapped` :param str path: a python module file, if user set it,write config to `Settings._wrapped` :param str with_path: search path :return: A instance of `Settings`
[ "Get", "or", "set", "Settings", ".", "_wrapped" ]
python
valid
totalgood/pugnlp
src/pugnlp/util.py
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1328-L1347
def update_file_ext(filename, ext='txt', sep='.'): r"""Force the file or path str to end with the indicated extension Note: a dot (".") is assumed to delimit the extension >>> from __future__ import unicode_literals >>> update_file_ext('/home/hobs/extremofile', 'bac') '/home/hobs/extremofile.bac' >>> update_file_ext('/home/hobs/piano.file/', 'music') '/home/hobs/piano.file/.music' >>> update_file_ext('/home/ninja.hobs/Anglofile', '.uk') '/home/ninja.hobs/Anglofile.uk' >>> update_file_ext('/home/ninja-corsi/audio', 'file', sep='-') '/home/ninja-corsi/audio-file' """ path, filename = os.path.split(filename) if ext and ext[0] == sep: ext = ext[1:] return os.path.join(path, sep.join(filename.split(sep)[:-1 if filename.count(sep) > 1 else 1] + [ext]))
[ "def", "update_file_ext", "(", "filename", ",", "ext", "=", "'txt'", ",", "sep", "=", "'.'", ")", ":", "path", ",", "filename", "=", "os", ".", "path", ".", "split", "(", "filename", ")", "if", "ext", "and", "ext", "[", "0", "]", "==", "sep", ":"...
r"""Force the file or path str to end with the indicated extension Note: a dot (".") is assumed to delimit the extension >>> from __future__ import unicode_literals >>> update_file_ext('/home/hobs/extremofile', 'bac') '/home/hobs/extremofile.bac' >>> update_file_ext('/home/hobs/piano.file/', 'music') '/home/hobs/piano.file/.music' >>> update_file_ext('/home/ninja.hobs/Anglofile', '.uk') '/home/ninja.hobs/Anglofile.uk' >>> update_file_ext('/home/ninja-corsi/audio', 'file', sep='-') '/home/ninja-corsi/audio-file'
[ "r", "Force", "the", "file", "or", "path", "str", "to", "end", "with", "the", "indicated", "extension" ]
python
train
cltk/cltk
cltk/utils/philology.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/utils/philology.py#L196-L211
def return_concordance_all(self, tokens: List[str]) -> List[List[str]]: """Take a list of tokens, iteratively run each word through return_concordance_word and build a list of all. This returns a list of lists. """ coll = pyuca.Collator() # type: pyuca.Collator tokens = sorted(tokens, key=coll.sort_key) #! is the list order preserved? concordance_list = [] # type: List[List[str]] for token in tokens: concordance_list_for_word = self.return_concordance_word(token) # List[str] if concordance_list_for_word: concordance_list.append(concordance_list_for_word) return concordance_list
[ "def", "return_concordance_all", "(", "self", ",", "tokens", ":", "List", "[", "str", "]", ")", "->", "List", "[", "List", "[", "str", "]", "]", ":", "coll", "=", "pyuca", ".", "Collator", "(", ")", "# type: pyuca.Collator", "tokens", "=", "sorted", "(...
Take a list of tokens, iteratively run each word through return_concordance_word and build a list of all. This returns a list of lists.
[ "Take", "a", "list", "of", "tokens", "iteratively", "run", "each", "word", "through", "return_concordance_word", "and", "build", "a", "list", "of", "all", ".", "This", "returns", "a", "list", "of", "lists", "." ]
python
train
secure-systems-lab/securesystemslib
securesystemslib/util.py
https://github.com/secure-systems-lab/securesystemslib/blob/beb3109d5bb462e5a60eed88fb40ed1167bd354e/securesystemslib/util.py#L743-L783
def get_target_hash(target_filepath): """ <Purpose> Compute the hash of 'target_filepath'. This is useful in conjunction with the "path_hash_prefixes" attribute in a delegated targets role, which tells us which paths it is implicitly responsible for. The repository may optionally organize targets into hashed bins to ease target delegations and role metadata management. The use of consistent hashing allows for a uniform distribution of targets into bins. <Arguments> target_filepath: The path to the target file on the repository. This will be relative to the 'targets' (or equivalent) directory on a given mirror. <Exceptions> None. <Side Effects> None. <Returns> The hash of 'target_filepath'. """ # Does 'target_filepath' have the correct format? # Ensure the arguments have the appropriate number of objects and object # types, and that all dict keys are properly named. # Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch. securesystemslib.formats.RELPATH_SCHEMA.check_match(target_filepath) # Calculate the hash of the filepath to determine which bin to find the # target. The client currently assumes the repository uses # 'HASH_FUNCTION' to generate hashes and 'utf-8'. digest_object = securesystemslib.hash.digest(HASH_FUNCTION) encoded_target_filepath = target_filepath.encode('utf-8') digest_object.update(encoded_target_filepath) target_filepath_hash = digest_object.hexdigest() return target_filepath_hash
[ "def", "get_target_hash", "(", "target_filepath", ")", ":", "# Does 'target_filepath' have the correct format?", "# Ensure the arguments have the appropriate number of objects and object", "# types, and that all dict keys are properly named.", "# Raise 'securesystemslib.exceptions.FormatError' if ...
<Purpose> Compute the hash of 'target_filepath'. This is useful in conjunction with the "path_hash_prefixes" attribute in a delegated targets role, which tells us which paths it is implicitly responsible for. The repository may optionally organize targets into hashed bins to ease target delegations and role metadata management. The use of consistent hashing allows for a uniform distribution of targets into bins. <Arguments> target_filepath: The path to the target file on the repository. This will be relative to the 'targets' (or equivalent) directory on a given mirror. <Exceptions> None. <Side Effects> None. <Returns> The hash of 'target_filepath'.
[ "<Purpose", ">", "Compute", "the", "hash", "of", "target_filepath", ".", "This", "is", "useful", "in", "conjunction", "with", "the", "path_hash_prefixes", "attribute", "in", "a", "delegated", "targets", "role", "which", "tells", "us", "which", "paths", "it", "...
python
train
jtwhite79/pyemu
pyemu/pst/pst_handler.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/pst/pst_handler.py#L313-L324
def pars_in_groups(self): """ return a dictionary of parameter names in each parameter group. Returns: dictionary """ pargp = self.par_groups allpars = dict() for cpg in pargp: allpars[cpg] = [i for i in self.parameter_data.loc[self.parameter_data.pargp == cpg, 'parnme']] return allpars
[ "def", "pars_in_groups", "(", "self", ")", ":", "pargp", "=", "self", ".", "par_groups", "allpars", "=", "dict", "(", ")", "for", "cpg", "in", "pargp", ":", "allpars", "[", "cpg", "]", "=", "[", "i", "for", "i", "in", "self", ".", "parameter_data", ...
return a dictionary of parameter names in each parameter group. Returns: dictionary
[ "return", "a", "dictionary", "of", "parameter", "names", "in", "each", "parameter", "group", "." ]
python
train
splunk/splunk-sdk-python
examples/analytics/bottle.py
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/examples/analytics/bottle.py#L734-L760
def wsgi(self, environ, start_response): """ The bottle WSGI-interface. """ try: environ['bottle.app'] = self request.bind(environ) response.bind() out = self._handle(environ) out = self._cast(out, request, response) # rfc2616 section 4.3 if response.status in (100, 101, 204, 304) or request.method == 'HEAD': if hasattr(out, 'close'): out.close() out = [] status = '%d %s' % (response.status, HTTP_CODES[response.status]) start_response(status, response.headerlist) return out except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception as e: if not self.catchall: raise err = '<h1>Critical error while processing request: %s</h1>' \ % environ.get('PATH_INFO', '/') if DEBUG: err += '<h2>Error:</h2>\n<pre>%s</pre>\n' % repr(e) err += '<h2>Traceback:</h2>\n<pre>%s</pre>\n' % format_exc(10) environ['wsgi.errors'].write(err) #TODO: wsgi.error should not get html start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/html')]) return [tob(err)]
[ "def", "wsgi", "(", "self", ",", "environ", ",", "start_response", ")", ":", "try", ":", "environ", "[", "'bottle.app'", "]", "=", "self", "request", ".", "bind", "(", "environ", ")", "response", ".", "bind", "(", ")", "out", "=", "self", ".", "_hand...
The bottle WSGI-interface.
[ "The", "bottle", "WSGI", "-", "interface", "." ]
python
train
eaton-lab/toytree
toytree/Coords.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/Coords.py#L40-L53
def update(self): "Updates cartesian coordinates for drawing tree graph" # get new shape and clear for attrs self.edges = np.zeros((self.ttree.nnodes - 1, 2), dtype=int) self.verts = np.zeros((self.ttree.nnodes, 2), dtype=float) self.lines = [] self.coords = [] # fill with updates self.update_idxs() # get dimensions of tree self.update_fixed_order() # in case ntips changed self.assign_vertices() # get node locations self.assign_coordinates() # get edge locations self.reorient_coordinates()
[ "def", "update", "(", "self", ")", ":", "# get new shape and clear for attrs", "self", ".", "edges", "=", "np", ".", "zeros", "(", "(", "self", ".", "ttree", ".", "nnodes", "-", "1", ",", "2", ")", ",", "dtype", "=", "int", ")", "self", ".", "verts",...
Updates cartesian coordinates for drawing tree graph
[ "Updates", "cartesian", "coordinates", "for", "drawing", "tree", "graph" ]
python
train
chrisspen/burlap
burlap/vm.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/vm.py#L139-L212
def list_instances(show=1, name=None, group=None, release=None, except_release=None): """ Retrieves all virtual machines instances in the current environment. """ from burlap.common import shelf, OrderedDict, get_verbose verbose = get_verbose() require('vm_type', 'vm_group') assert env.vm_type, 'No VM type specified.' env.vm_type = (env.vm_type or '').lower() _name = name _group = group _release = release if verbose: print('name=%s, group=%s, release=%s' % (_name, _group, _release)) env.vm_elastic_ip_mappings = shelf.get('vm_elastic_ip_mappings') data = type(env)() if env.vm_type == EC2: if verbose: print('Checking EC2...') for instance in get_all_running_ec2_instances(): name = instance.tags.get(env.vm_name_tag) group = instance.tags.get(env.vm_group_tag) release = instance.tags.get(env.vm_release_tag) if env.vm_group and env.vm_group != group: if verbose: print(('Skipping instance %s because its group "%s" ' 'does not match env.vm_group "%s".') \ % (instance.public_dns_name, group, env.vm_group)) continue if _group and group != _group: if verbose: print(('Skipping instance %s because its group "%s" ' 'does not match local group "%s".') \ % (instance.public_dns_name, group, _group)) continue if _name and name != _name: if verbose: print(('Skipping instance %s because its name "%s" ' 'does not match name "%s".') \ % (instance.public_dns_name, name, _name)) continue if _release and release != _release: if verbose: print(('Skipping instance %s because its release "%s" ' 'does not match release "%s".') \ % (instance.public_dns_name, release, _release)) continue if except_release and release == except_release: continue if verbose: print('Adding instance %s (%s).' \ % (name, instance.public_dns_name)) data.setdefault(name, type(env)()) data[name]['id'] = instance.id data[name]['public_dns_name'] = instance.public_dns_name if verbose: print('Public DNS: %s' % instance.public_dns_name) if env.vm_elastic_ip_mappings and name in env.vm_elastic_ip_mappings: data[name]['ip'] = env.vm_elastic_ip_mappings[name] else: data[name]['ip'] = socket.gethostbyname(instance.public_dns_name) if int(show): pprint(data, indent=4) return data elif env.vm_type == KVM: #virsh list pass else: raise NotImplementedError
[ "def", "list_instances", "(", "show", "=", "1", ",", "name", "=", "None", ",", "group", "=", "None", ",", "release", "=", "None", ",", "except_release", "=", "None", ")", ":", "from", "burlap", ".", "common", "import", "shelf", ",", "OrderedDict", ",",...
Retrieves all virtual machines instances in the current environment.
[ "Retrieves", "all", "virtual", "machines", "instances", "in", "the", "current", "environment", "." ]
python
valid
abseil/abseil-py
absl/logging/__init__.py
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/logging/__init__.py#L1128-L1159
def _initialize(): """Initializes loggers and handlers.""" global _absl_logger, _absl_handler if _absl_logger: return original_logger_class = logging.getLoggerClass() logging.setLoggerClass(ABSLLogger) _absl_logger = logging.getLogger('absl') logging.setLoggerClass(original_logger_class) python_logging_formatter = PythonFormatter() _absl_handler = ABSLHandler(python_logging_formatter) # The absl handler logs to stderr by default. To prevent double logging to # stderr, the following code tries its best to remove other handlers that emit # to stderr. Those handlers are most commonly added when logging.info/debug is # called before importing this module. handlers = [ h for h in logging.root.handlers if isinstance(h, logging.StreamHandler) and h.stream == sys.stderr] for h in handlers: logging.root.removeHandler(h) # The absl handler will always be attached to root, not the absl logger. if not logging.root.handlers: # Attach the absl handler at import time when there are no other handlers. # Otherwise it means users have explicitly configured logging, and the absl # handler will only be attached later in app.run(). For App Engine apps, # the absl handler is not used. logging.root.addHandler(_absl_handler)
[ "def", "_initialize", "(", ")", ":", "global", "_absl_logger", ",", "_absl_handler", "if", "_absl_logger", ":", "return", "original_logger_class", "=", "logging", ".", "getLoggerClass", "(", ")", "logging", ".", "setLoggerClass", "(", "ABSLLogger", ")", "_absl_log...
Initializes loggers and handlers.
[ "Initializes", "loggers", "and", "handlers", "." ]
python
train