repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Linaro/squad
squad/core/management/commands/users.py
https://github.com/Linaro/squad/blob/27da5375e119312a86f231df95f99c979b9f48f0/squad/core/management/commands/users.py#L194-L206
def handle_details(self, username): """ Print user details """ try: user = User.objects.get(username=username) except User.DoesNotExist: raise CommandError("Unable to find user '%s'" % username) self.stdout.write("username : %s" % username) self.stdout.write("is_active : %s" % user.is_active) self.stdout.write("is_staff : %s" % user.is_staff) self.stdout.write("is_superuser: %s" % user.is_superuser) groups = [g.name for g in user.groups.all().order_by("name")] self.stdout.write("groups : [%s]" % ", ".join(groups))
[ "def", "handle_details", "(", "self", ",", "username", ")", ":", "try", ":", "user", "=", "User", ".", "objects", ".", "get", "(", "username", "=", "username", ")", "except", "User", ".", "DoesNotExist", ":", "raise", "CommandError", "(", "\"Unable to find...
Print user details
[ "Print", "user", "details" ]
python
train
pantsbuild/pants
src/python/pants/pantsd/service/fs_event_service.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/pantsd/service/fs_event_service.py#L86-L99
def register_handler(self, name, metadata, callback): """Register subscriptions and their event handlers. :param str name: the subscription name as used by watchman :param dict metadata: a dictionary of metadata to be serialized and passed to the watchman subscribe command. this should include the match expression as well as any required callback fields. :param func callback: the callback to execute on each matching filesystem event """ assert name not in self._handlers, 'duplicate handler name: {}'.format(name) assert ( isinstance(metadata, dict) and 'fields' in metadata and 'expression' in metadata ), 'invalid handler metadata!' self._handlers[name] = Watchman.EventHandler(name=name, metadata=metadata, callback=callback)
[ "def", "register_handler", "(", "self", ",", "name", ",", "metadata", ",", "callback", ")", ":", "assert", "name", "not", "in", "self", ".", "_handlers", ",", "'duplicate handler name: {}'", ".", "format", "(", "name", ")", "assert", "(", "isinstance", "(", ...
Register subscriptions and their event handlers. :param str name: the subscription name as used by watchman :param dict metadata: a dictionary of metadata to be serialized and passed to the watchman subscribe command. this should include the match expression as well as any required callback fields. :param func callback: the callback to execute on each matching filesystem event
[ "Register", "subscriptions", "and", "their", "event", "handlers", "." ]
python
train
numenta/htmresearch
projects/l2_pooling/topology_experiments.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/l2_pooling/topology_experiments.py#L40-L116
def runExperimentPool(numObjects, numLocations, numFeatures, numColumns, networkType=["MultipleL4L2Columns"], longDistanceConnectionsRange = [0.0], numWorkers=7, nTrials=1, pointRange=1, numPoints=10, numInferenceRpts=1, l2Params=None, l4Params=None, resultsName="convergence_results.pkl"): """ Allows you to run a number of experiments using multiple processes. For each parameter except numWorkers, pass in a list containing valid values for that parameter. The cross product of everything is run, and each combination is run nTrials times. Returns a list of dict containing detailed results from each experiment. Also pickles and saves the results in resultsName for later analysis. Example: results = runExperimentPool( numObjects=[10], numLocations=[5], numFeatures=[5], numColumns=[2,3,4,5,6], numWorkers=8, nTrials=5) """ # Create function arguments for every possibility args = [] for c in reversed(numColumns): for o in reversed(numObjects): for l in numLocations: for f in numFeatures: for n in networkType: for p in longDistanceConnectionsRange: for t in range(nTrials): args.append( {"numObjects": o, "numLocations": l, "numFeatures": f, "numColumns": c, "trialNum": t, "pointRange": pointRange, "numPoints": numPoints, "networkType" : n, "longDistanceConnections" : p, "plotInferenceStats": False, "settlingTime": 3, "numInferenceRpts": numInferenceRpts, "l2Params": l2Params, "l4Params": l4Params } ) print "{} experiments to run, {} workers".format(len(args), numWorkers) # Run the pool if numWorkers > 1: pool = Pool(processes=numWorkers) result = pool.map(runExperiment, args) else: result = [] for arg in args: result.append(runExperiment(arg)) # print "Full results:" # pprint.pprint(result, width=150) # Pickle results for later use with open(resultsName,"wb") as f: cPickle.dump(result,f) return result
[ "def", "runExperimentPool", "(", "numObjects", ",", "numLocations", ",", "numFeatures", ",", "numColumns", ",", "networkType", "=", "[", "\"MultipleL4L2Columns\"", "]", ",", "longDistanceConnectionsRange", "=", "[", "0.0", "]", ",", "numWorkers", "=", "7", ",", ...
Allows you to run a number of experiments using multiple processes. For each parameter except numWorkers, pass in a list containing valid values for that parameter. The cross product of everything is run, and each combination is run nTrials times. Returns a list of dict containing detailed results from each experiment. Also pickles and saves the results in resultsName for later analysis. Example: results = runExperimentPool( numObjects=[10], numLocations=[5], numFeatures=[5], numColumns=[2,3,4,5,6], numWorkers=8, nTrials=5)
[ "Allows", "you", "to", "run", "a", "number", "of", "experiments", "using", "multiple", "processes", ".", "For", "each", "parameter", "except", "numWorkers", "pass", "in", "a", "list", "containing", "valid", "values", "for", "that", "parameter", ".", "The", "...
python
train
kiwiz/gkeepapi
gkeepapi/__init__.py
https://github.com/kiwiz/gkeepapi/blob/78aaae8b988b1cf616e3973f7f15d4c6d5e996cc/gkeepapi/__init__.py#L431-L444
def history(self, storage_version): """Get reminder changes. """ params = { "storageVersion": storage_version, "includeSnoozePresetUpdates": True, } params.update(self.static_params) return self.send( url=self._base_url + 'history', method='POST', json=params )
[ "def", "history", "(", "self", ",", "storage_version", ")", ":", "params", "=", "{", "\"storageVersion\"", ":", "storage_version", ",", "\"includeSnoozePresetUpdates\"", ":", "True", ",", "}", "params", ".", "update", "(", "self", ".", "static_params", ")", "r...
Get reminder changes.
[ "Get", "reminder", "changes", "." ]
python
train
projectshift/shift-schema
shiftschema/schema.py
https://github.com/projectshift/shift-schema/blob/07787b540d3369bb37217ffbfbe629118edaf0eb/shiftschema/schema.py#L199-L222
def filter_properties(self, model, context=None): """ Filter simple properties Runs filters on simple properties changing them in place. :param model: object or dict :param context: object, dict or None :return: None """ if model is None: return for property_name in self.properties: prop = self.properties[property_name] value = self.get(model, property_name) if value is None: continue filtered_value = prop.filter( value=value, model=model, context=context ) if value != filtered_value: # unless changed! self.set(model, property_name, filtered_value)
[ "def", "filter_properties", "(", "self", ",", "model", ",", "context", "=", "None", ")", ":", "if", "model", "is", "None", ":", "return", "for", "property_name", "in", "self", ".", "properties", ":", "prop", "=", "self", ".", "properties", "[", "property...
Filter simple properties Runs filters on simple properties changing them in place. :param model: object or dict :param context: object, dict or None :return: None
[ "Filter", "simple", "properties", "Runs", "filters", "on", "simple", "properties", "changing", "them", "in", "place", ".", ":", "param", "model", ":", "object", "or", "dict", ":", "param", "context", ":", "object", "dict", "or", "None", ":", "return", ":",...
python
train
MycroftAI/mycroft-precise
precise/util.py
https://github.com/MycroftAI/mycroft-precise/blob/e17cebdd171906dbd8a16e282d8a7966fba2eeba/precise/util.py#L98-L101
def find_wavs(folder: str) -> Tuple[List[str], List[str]]: """Finds wake-word and not-wake-word wavs in folder""" return (glob_all(join(folder, 'wake-word'), '*.wav'), glob_all(join(folder, 'not-wake-word'), '*.wav'))
[ "def", "find_wavs", "(", "folder", ":", "str", ")", "->", "Tuple", "[", "List", "[", "str", "]", ",", "List", "[", "str", "]", "]", ":", "return", "(", "glob_all", "(", "join", "(", "folder", ",", "'wake-word'", ")", ",", "'*.wav'", ")", ",", "gl...
Finds wake-word and not-wake-word wavs in folder
[ "Finds", "wake", "-", "word", "and", "not", "-", "wake", "-", "word", "wavs", "in", "folder" ]
python
train
GiulioRossetti/dynetx
dynetx/readwrite/edgelist.py
https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/readwrite/edgelist.py#L71-L91
def read_interactions(path, comments="#", directed=False, delimiter=None, nodetype=None, timestamptype=None, encoding='utf-8', keys=False): """Read a DyNetx graph from interaction list format. Parameters ---------- path : basestring The desired output filename delimiter : character Column delimiter """ ids = None lines = (line.decode(encoding) for line in path) if keys: ids = read_ids(path.name, delimiter=delimiter, timestamptype=timestamptype) return parse_interactions(lines, comments=comments, directed=directed, delimiter=delimiter, nodetype=nodetype, timestamptype=timestamptype, keys=ids)
[ "def", "read_interactions", "(", "path", ",", "comments", "=", "\"#\"", ",", "directed", "=", "False", ",", "delimiter", "=", "None", ",", "nodetype", "=", "None", ",", "timestamptype", "=", "None", ",", "encoding", "=", "'utf-8'", ",", "keys", "=", "Fal...
Read a DyNetx graph from interaction list format. Parameters ---------- path : basestring The desired output filename delimiter : character Column delimiter
[ "Read", "a", "DyNetx", "graph", "from", "interaction", "list", "format", "." ]
python
train
chop-dbhi/varify
varify/context_processors.py
https://github.com/chop-dbhi/varify/blob/5dc721e49ed9bd3582f4b117785fdd1a8b6ba777/varify/context_processors.py#L8-L16
def static(request): "Shorthand static URLs. In debug mode, the JavaScript is not minified." static_url = settings.STATIC_URL prefix = 'src' if settings.DEBUG else 'min' return { 'CSS_URL': os.path.join(static_url, 'stylesheets/css'), 'IMAGES_URL': os.path.join(static_url, 'images'), 'JAVASCRIPT_URL': os.path.join(static_url, 'js', prefix), }
[ "def", "static", "(", "request", ")", ":", "static_url", "=", "settings", ".", "STATIC_URL", "prefix", "=", "'src'", "if", "settings", ".", "DEBUG", "else", "'min'", "return", "{", "'CSS_URL'", ":", "os", ".", "path", ".", "join", "(", "static_url", ",",...
Shorthand static URLs. In debug mode, the JavaScript is not minified.
[ "Shorthand", "static", "URLs", ".", "In", "debug", "mode", "the", "JavaScript", "is", "not", "minified", "." ]
python
train
learningequality/ricecooker
ricecooker/utils/metadata_provider.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L144-L149
def get_metadata_file_path(channeldir, filename): """ Return the path to the metadata file named `filename` that is a sibling of `channeldir`. """ channelparentdir, channeldirname = os.path.split(channeldir) return os.path.join(channelparentdir, filename)
[ "def", "get_metadata_file_path", "(", "channeldir", ",", "filename", ")", ":", "channelparentdir", ",", "channeldirname", "=", "os", ".", "path", ".", "split", "(", "channeldir", ")", "return", "os", ".", "path", ".", "join", "(", "channelparentdir", ",", "f...
Return the path to the metadata file named `filename` that is a sibling of `channeldir`.
[ "Return", "the", "path", "to", "the", "metadata", "file", "named", "filename", "that", "is", "a", "sibling", "of", "channeldir", "." ]
python
train
nickpandolfi/Cyther
cyther/launcher.py
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/launcher.py#L54-L62
def getOutput(self): """ Returns the combined output of stdout and stderr """ output = self.stdout if self.stdout: output += '\r\n' output += self.stderr return output
[ "def", "getOutput", "(", "self", ")", ":", "output", "=", "self", ".", "stdout", "if", "self", ".", "stdout", ":", "output", "+=", "'\\r\\n'", "output", "+=", "self", ".", "stderr", "return", "output" ]
Returns the combined output of stdout and stderr
[ "Returns", "the", "combined", "output", "of", "stdout", "and", "stderr" ]
python
train
Clinical-Genomics/scout
scout/commands/update/institute.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/commands/update/institute.py#L30-L49
def institute(context, institute_id, sanger_recipient, coverage_cutoff, frequency_cutoff, display_name, remove_sanger): """ Update an institute """ adapter = context.obj['adapter'] LOG.info("Running scout update institute") try: adapter.update_institute( internal_id=institute_id, sanger_recipient=sanger_recipient, coverage_cutoff=coverage_cutoff, frequency_cutoff=frequency_cutoff, display_name=display_name, remove_sanger=remove_sanger, ) except Exception as err: LOG.warning(err) context.abort()
[ "def", "institute", "(", "context", ",", "institute_id", ",", "sanger_recipient", ",", "coverage_cutoff", ",", "frequency_cutoff", ",", "display_name", ",", "remove_sanger", ")", ":", "adapter", "=", "context", ".", "obj", "[", "'adapter'", "]", "LOG", ".", "i...
Update an institute
[ "Update", "an", "institute" ]
python
test
docker/docker-py
docker/api/image.py
https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/api/image.py#L421-L480
def push(self, repository, tag=None, stream=False, auth_config=None, decode=False): """ Push an image or a repository to the registry. Similar to the ``docker push`` command. Args: repository (str): The repository to push to tag (str): An optional tag to push stream (bool): Stream the output as a blocking generator auth_config (dict): Override the credentials that are found in the config for this request. ``auth_config`` should contain the ``username`` and ``password`` keys to be valid. decode (bool): Decode the JSON data from the server into dicts. Only applies with ``stream=True`` Returns: (generator or str): The output from the server. Raises: :py:class:`docker.errors.APIError` If the server returns an error. Example: >>> for line in cli.push('yourname/app', stream=True, decode=True): ... print(line) {'status': 'Pushing repository yourname/app (1 tags)'} {'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'} {'status': 'Image already pushed, skipping', 'progressDetail':{}, 'id': '511136ea3c5a'} ... """ if not tag: repository, tag = utils.parse_repository_tag(repository) registry, repo_name = auth.resolve_repository_name(repository) u = self._url("/images/{0}/push", repository) params = { 'tag': tag } headers = {} if auth_config is None: header = auth.get_config_header(self, registry) if header: headers['X-Registry-Auth'] = header else: log.debug('Sending supplied auth config') headers['X-Registry-Auth'] = auth.encode_header(auth_config) response = self._post_json( u, None, headers=headers, stream=stream, params=params ) self._raise_for_status(response) if stream: return self._stream_helper(response, decode=decode) return self._result(response)
[ "def", "push", "(", "self", ",", "repository", ",", "tag", "=", "None", ",", "stream", "=", "False", ",", "auth_config", "=", "None", ",", "decode", "=", "False", ")", ":", "if", "not", "tag", ":", "repository", ",", "tag", "=", "utils", ".", "pars...
Push an image or a repository to the registry. Similar to the ``docker push`` command. Args: repository (str): The repository to push to tag (str): An optional tag to push stream (bool): Stream the output as a blocking generator auth_config (dict): Override the credentials that are found in the config for this request. ``auth_config`` should contain the ``username`` and ``password`` keys to be valid. decode (bool): Decode the JSON data from the server into dicts. Only applies with ``stream=True`` Returns: (generator or str): The output from the server. Raises: :py:class:`docker.errors.APIError` If the server returns an error. Example: >>> for line in cli.push('yourname/app', stream=True, decode=True): ... print(line) {'status': 'Pushing repository yourname/app (1 tags)'} {'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'} {'status': 'Image already pushed, skipping', 'progressDetail':{}, 'id': '511136ea3c5a'} ...
[ "Push", "an", "image", "or", "a", "repository", "to", "the", "registry", ".", "Similar", "to", "the", "docker", "push", "command", "." ]
python
train
numenta/nupic
examples/opf/tools/sp_plotter.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/examples/opf/tools/sp_plotter.py#L194-L206
def getRandomWithMods(inputSpace, maxChanges): """ Returns a random selection from the inputSpace with randomly modified up to maxChanges number of bits. """ size = len(inputSpace) ind = np.random.random_integers(0, size-1, 1)[0] value = copy.deepcopy(inputSpace[ind]) if maxChanges == 0: return value return modifyBits(value, maxChanges)
[ "def", "getRandomWithMods", "(", "inputSpace", ",", "maxChanges", ")", ":", "size", "=", "len", "(", "inputSpace", ")", "ind", "=", "np", ".", "random", ".", "random_integers", "(", "0", ",", "size", "-", "1", ",", "1", ")", "[", "0", "]", "value", ...
Returns a random selection from the inputSpace with randomly modified up to maxChanges number of bits.
[ "Returns", "a", "random", "selection", "from", "the", "inputSpace", "with", "randomly", "modified", "up", "to", "maxChanges", "number", "of", "bits", "." ]
python
valid
summa-tx/riemann
riemann/encoding/cashaddr.py
https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/encoding/cashaddr.py#L29-L45
def encode(data): ''' bytes -> str ''' if riemann.network.CASHADDR_PREFIX is None: raise ValueError('Network {} does not support cashaddresses.' .format(riemann.get_current_network_name())) data = convertbits(data, 8, 5) checksum = calculate_checksum(riemann.network.CASHADDR_PREFIX, data) payload = b32encode(data + checksum) form = '{prefix}:{payload}' return form.format( prefix=riemann.network.CASHADDR_PREFIX, payload=payload)
[ "def", "encode", "(", "data", ")", ":", "if", "riemann", ".", "network", ".", "CASHADDR_PREFIX", "is", "None", ":", "raise", "ValueError", "(", "'Network {} does not support cashaddresses.'", ".", "format", "(", "riemann", ".", "get_current_network_name", "(", ")"...
bytes -> str
[ "bytes", "-", ">", "str" ]
python
train
Gandi/gandi.cli
gandi/cli/modules/network.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/network.py#L46-L53
def update(cls, resource, params, background=False): """ Update this IP """ cls.echo('Updating your IP') result = cls.call('hosting.ip.update', cls.usable_id(resource), params) if not background: cls.display_progress(result) return result
[ "def", "update", "(", "cls", ",", "resource", ",", "params", ",", "background", "=", "False", ")", ":", "cls", ".", "echo", "(", "'Updating your IP'", ")", "result", "=", "cls", ".", "call", "(", "'hosting.ip.update'", ",", "cls", ".", "usable_id", "(", ...
Update this IP
[ "Update", "this", "IP" ]
python
train
smartfile/client-python
smartfile/sync.py
https://github.com/smartfile/client-python/blob/f9ccc40a2870df447c65b53dc0747e37cab62d63/smartfile/sync.py#L67-L72
def signature(self, block_size=None): "Requests a signature for remote file via API." kwargs = {} if block_size: kwargs['block_size'] = block_size return self.api.get('path/sync/signature', self.path, **kwargs)
[ "def", "signature", "(", "self", ",", "block_size", "=", "None", ")", ":", "kwargs", "=", "{", "}", "if", "block_size", ":", "kwargs", "[", "'block_size'", "]", "=", "block_size", "return", "self", ".", "api", ".", "get", "(", "'path/sync/signature'", ",...
Requests a signature for remote file via API.
[ "Requests", "a", "signature", "for", "remote", "file", "via", "API", "." ]
python
train
maxalbert/tohu
tohu/v6/base.py
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v6/base.py#L121-L129
def reset(self, seed): """ Reset this generator's seed generator and any clones. """ logger.debug(f'Resetting {self} (seed={seed})') self.seed_generator.reset(seed) for c in self.clones: c.reset(seed)
[ "def", "reset", "(", "self", ",", "seed", ")", ":", "logger", ".", "debug", "(", "f'Resetting {self} (seed={seed})'", ")", "self", ".", "seed_generator", ".", "reset", "(", "seed", ")", "for", "c", "in", "self", ".", "clones", ":", "c", ".", "reset", "...
Reset this generator's seed generator and any clones.
[ "Reset", "this", "generator", "s", "seed", "generator", "and", "any", "clones", "." ]
python
train
dswah/pyGAM
pygam/terms.py
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/terms.py#L1275-L1298
def build_columns(self, X, verbose=False): """construct the model matrix columns for the term Parameters ---------- X : array-like Input dataset with n rows verbose : bool whether to show warnings Returns ------- scipy sparse array with n rows """ splines = self._terms[0].build_columns(X, verbose=verbose) for term in self._terms[1:]: marginal_splines = term.build_columns(X, verbose=verbose) splines = tensor_product(splines, marginal_splines) if self.by is not None: splines *= X[:, self.by][:, np.newaxis] return sp.sparse.csc_matrix(splines)
[ "def", "build_columns", "(", "self", ",", "X", ",", "verbose", "=", "False", ")", ":", "splines", "=", "self", ".", "_terms", "[", "0", "]", ".", "build_columns", "(", "X", ",", "verbose", "=", "verbose", ")", "for", "term", "in", "self", ".", "_te...
construct the model matrix columns for the term Parameters ---------- X : array-like Input dataset with n rows verbose : bool whether to show warnings Returns ------- scipy sparse array with n rows
[ "construct", "the", "model", "matrix", "columns", "for", "the", "term" ]
python
train
robotools/fontParts
Lib/fontParts/ui.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/ui.py#L87-L99
def Message(message, title='FontParts', informativeText=""): """ An message dialog. Optionally a `message`, `title` and `informativeText` can be provided. :: from fontParts.ui import Message print(Message("This is a message")) """ return dispatcher["Message"](message=message, title=title, informativeText=informativeText)
[ "def", "Message", "(", "message", ",", "title", "=", "'FontParts'", ",", "informativeText", "=", "\"\"", ")", ":", "return", "dispatcher", "[", "\"Message\"", "]", "(", "message", "=", "message", ",", "title", "=", "title", ",", "informativeText", "=", "in...
An message dialog. Optionally a `message`, `title` and `informativeText` can be provided. :: from fontParts.ui import Message print(Message("This is a message"))
[ "An", "message", "dialog", ".", "Optionally", "a", "message", "title", "and", "informativeText", "can", "be", "provided", "." ]
python
train
adamziel/python_translate
python_translate/loaders.py
https://github.com/adamziel/python_translate/blob/0aee83f434bd2d1b95767bcd63adb7ac7036c7df/python_translate/loaders.py#L63-L77
def assert_valid_path(self, path): """ Ensures that the path represents an existing file @type path: str @param path: path to check """ if not isinstance(path, str): raise NotFoundResourceException( "Resource passed to load() method must be a file path") if not os.path.isfile(path): raise NotFoundResourceException( 'File "{0}" does not exist'.format(path))
[ "def", "assert_valid_path", "(", "self", ",", "path", ")", ":", "if", "not", "isinstance", "(", "path", ",", "str", ")", ":", "raise", "NotFoundResourceException", "(", "\"Resource passed to load() method must be a file path\"", ")", "if", "not", "os", ".", "path"...
Ensures that the path represents an existing file @type path: str @param path: path to check
[ "Ensures", "that", "the", "path", "represents", "an", "existing", "file" ]
python
train
wmayner/pyphi
pyphi/validate.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/validate.py#L238-L258
def blackbox_and_coarse_grain(blackbox, coarse_grain): """Validate that a coarse-graining properly combines the outputs of a blackboxing. """ if blackbox is None: return for box in blackbox.partition: # Outputs of the box outputs = set(box) & set(blackbox.output_indices) if coarse_grain is None and len(outputs) > 1: raise ValueError( 'A blackboxing with multiple outputs per box must be ' 'coarse-grained.') if (coarse_grain and not any(outputs.issubset(part) for part in coarse_grain.partition)): raise ValueError( 'Multiple outputs from a blackbox must be partitioned into ' 'the same macro-element of the coarse-graining')
[ "def", "blackbox_and_coarse_grain", "(", "blackbox", ",", "coarse_grain", ")", ":", "if", "blackbox", "is", "None", ":", "return", "for", "box", "in", "blackbox", ".", "partition", ":", "# Outputs of the box", "outputs", "=", "set", "(", "box", ")", "&", "se...
Validate that a coarse-graining properly combines the outputs of a blackboxing.
[ "Validate", "that", "a", "coarse", "-", "graining", "properly", "combines", "the", "outputs", "of", "a", "blackboxing", "." ]
python
train
divio/aldryn-apphooks-config
aldryn_apphooks_config/admin.py
https://github.com/divio/aldryn-apphooks-config/blob/5b8dfc7516982a8746fc08cf919c6ab116335d62/aldryn_apphooks_config/admin.py#L140-L164
def get_form(self, request, obj=None, **kwargs): """ Provides a flexible way to get the right form according to the context For the add view it checks whether the app_config is set; if not, a special form to select the namespace is shown, which is reloaded after namespace selection. If only one namespace exists, the current is selected and the normal form is used. """ form = super(ModelAppHookConfig, self).get_form(request, obj, **kwargs) if self.app_config_attribute not in form.base_fields: return form app_config_default = self._app_config_select(request, obj) if app_config_default: form.base_fields[self.app_config_attribute].initial = app_config_default get = copy.copy(request.GET) get[self.app_config_attribute] = app_config_default.pk request.GET = get elif app_config_default is None and request.method == 'GET': class InitialForm(form): class Meta(form.Meta): fields = (self.app_config_attribute,) form = InitialForm form = self._set_config_defaults(request, form, obj) return form
[ "def", "get_form", "(", "self", ",", "request", ",", "obj", "=", "None", ",", "*", "*", "kwargs", ")", ":", "form", "=", "super", "(", "ModelAppHookConfig", ",", "self", ")", ".", "get_form", "(", "request", ",", "obj", ",", "*", "*", "kwargs", ")"...
Provides a flexible way to get the right form according to the context For the add view it checks whether the app_config is set; if not, a special form to select the namespace is shown, which is reloaded after namespace selection. If only one namespace exists, the current is selected and the normal form is used.
[ "Provides", "a", "flexible", "way", "to", "get", "the", "right", "form", "according", "to", "the", "context" ]
python
train
opencast/pyCA
pyca/ui/jsonapi.py
https://github.com/opencast/pyCA/blob/c89b168d4780d157e1b3f7676628c1b131956a88/pyca/ui/jsonapi.py#L69-L78
def event(uid): '''Return a specific events JSON ''' db = get_session() event = db.query(RecordedEvent).filter(RecordedEvent.uid == uid).first() \ or db.query(UpcomingEvent).filter(UpcomingEvent.uid == uid).first() if event: return make_data_response(event.serialize()) return make_error_response('No event with specified uid', 404)
[ "def", "event", "(", "uid", ")", ":", "db", "=", "get_session", "(", ")", "event", "=", "db", ".", "query", "(", "RecordedEvent", ")", ".", "filter", "(", "RecordedEvent", ".", "uid", "==", "uid", ")", ".", "first", "(", ")", "or", "db", ".", "qu...
Return a specific events JSON
[ "Return", "a", "specific", "events", "JSON" ]
python
test
tariqdaouda/pyGeno
pyGeno/SNP.py
https://github.com/tariqdaouda/pyGeno/blob/474b1250bf78ce5c7e7c3bbbfdbad9635d5a7d14/pyGeno/SNP.py#L18-L25
def getSNPSetsList() : """Return the names of all imported snp sets""" import rabaDB.filters as rfilt f = rfilt.RabaQuery(SNPMaster) names = [] for g in f.iterRun() : names.append(g.setName) return names
[ "def", "getSNPSetsList", "(", ")", ":", "import", "rabaDB", ".", "filters", "as", "rfilt", "f", "=", "rfilt", ".", "RabaQuery", "(", "SNPMaster", ")", "names", "=", "[", "]", "for", "g", "in", "f", ".", "iterRun", "(", ")", ":", "names", ".", "appe...
Return the names of all imported snp sets
[ "Return", "the", "names", "of", "all", "imported", "snp", "sets" ]
python
train
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L6083-L6093
def getSkeletalBoneDataCompressed(self, action, eMotionRange, pvCompressedData, unCompressedSize): """ Reads the state of the skeletal bone data in a compressed form that is suitable for sending over the network. The required buffer size will never exceed ( sizeof(VR_BoneTransform_t)*boneCount + 2). Usually the size will be much smaller. """ fn = self.function_table.getSkeletalBoneDataCompressed punRequiredCompressedSize = c_uint32() result = fn(action, eMotionRange, pvCompressedData, unCompressedSize, byref(punRequiredCompressedSize)) return result, punRequiredCompressedSize.value
[ "def", "getSkeletalBoneDataCompressed", "(", "self", ",", "action", ",", "eMotionRange", ",", "pvCompressedData", ",", "unCompressedSize", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getSkeletalBoneDataCompressed", "punRequiredCompressedSize", "=", "c_uint...
Reads the state of the skeletal bone data in a compressed form that is suitable for sending over the network. The required buffer size will never exceed ( sizeof(VR_BoneTransform_t)*boneCount + 2). Usually the size will be much smaller.
[ "Reads", "the", "state", "of", "the", "skeletal", "bone", "data", "in", "a", "compressed", "form", "that", "is", "suitable", "for", "sending", "over", "the", "network", ".", "The", "required", "buffer", "size", "will", "never", "exceed", "(", "sizeof", "("...
python
train
Alignak-monitoring/alignak
alignak/dependencynode.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/dependencynode.py#L602-L655
def eval_simple_cor_pattern(self, pattern, hosts, services, hostgroups, servicegroups, running=False): """Parse and build recursively a tree of DependencyNode from a simple pattern :param pattern: pattern to parse :type pattern: str :param hosts: hosts list, used to find a specific host :type hosts: alignak.objects.host.Host :param services: services list, used to find a specific service :type services: alignak.objects.service.Service :param running: rules are evaluated at run time and parsing. True means runtime :type running: bool :return: root node of parsed tree :rtype: alignak.dependencynode.DependencyNode """ node = DependencyNode() pattern = self.eval_xof_pattern(node, pattern) # If it's a not value, tag the node and find # the name without this ! operator if pattern.startswith('!'): node.not_value = True pattern = pattern[1:] # Is the pattern an expression to be expanded? if re.search(r"^([%s]+|\*):" % self.host_flags, pattern) or \ re.search(r",\s*([%s]+:.*|\*)$" % self.service_flags, pattern): # o is just extracted its attributes, then trashed. son = self.expand_expression(pattern, hosts, services, hostgroups, servicegroups, running) if node.operand != 'of:': node.operand = '&' node.sons.extend(son.sons) node.configuration_errors.extend(son.configuration_errors) node.switch_zeros_of_values() else: node.operand = 'object' obj, error = self.find_object(pattern, hosts, services) # here we have Alignak SchedulingItem object (Host/Service) if obj is not None: # Set host or service # pylint: disable=E1101 node.operand = obj.__class__.my_type node.sons.append(obj.uuid) # Only store the uuid, not the full object. else: if running is False: node.configuration_errors.append(error) else: # As business rules are re-evaluated at run time on # each scheduling loop, if the rule becomes invalid # because of a badly written macro modulation, it # should be notified upper for the error to be # displayed in the check output. raise Exception(error) return node
[ "def", "eval_simple_cor_pattern", "(", "self", ",", "pattern", ",", "hosts", ",", "services", ",", "hostgroups", ",", "servicegroups", ",", "running", "=", "False", ")", ":", "node", "=", "DependencyNode", "(", ")", "pattern", "=", "self", ".", "eval_xof_pat...
Parse and build recursively a tree of DependencyNode from a simple pattern :param pattern: pattern to parse :type pattern: str :param hosts: hosts list, used to find a specific host :type hosts: alignak.objects.host.Host :param services: services list, used to find a specific service :type services: alignak.objects.service.Service :param running: rules are evaluated at run time and parsing. True means runtime :type running: bool :return: root node of parsed tree :rtype: alignak.dependencynode.DependencyNode
[ "Parse", "and", "build", "recursively", "a", "tree", "of", "DependencyNode", "from", "a", "simple", "pattern" ]
python
train
sirfoga/pyhal
hal/strings/models.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/strings/models.py#L43-L53
def convert_accents(self): """Removes accents from text :return: input with converted accents chars """ nkfd_form = unicodedata.normalize('NFKD', self.string) return "".join([ char for char in nkfd_form if not unicodedata.combining(char) ])
[ "def", "convert_accents", "(", "self", ")", ":", "nkfd_form", "=", "unicodedata", ".", "normalize", "(", "'NFKD'", ",", "self", ".", "string", ")", "return", "\"\"", ".", "join", "(", "[", "char", "for", "char", "in", "nkfd_form", "if", "not", "unicodeda...
Removes accents from text :return: input with converted accents chars
[ "Removes", "accents", "from", "text" ]
python
train
linkedin/luminol
src/luminol/utils.py
https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/utils.py#L66-L82
def to_epoch(t_str): """ Covert a timestamp string to an epoch number. :param str t_str: a timestamp string. :return int: epoch number of the timestamp. """ try: t = float(t_str) return t except ValueError: for format in constants.TIMESTAMP_STR_FORMATS: try: t = datetime.datetime.strptime(t_str, format) return float(time.mktime(t.utctimetuple()) * 1000.0 + t.microsecond / 1000.0) except ValueError: pass raise exceptions.InvalidDataFormat
[ "def", "to_epoch", "(", "t_str", ")", ":", "try", ":", "t", "=", "float", "(", "t_str", ")", "return", "t", "except", "ValueError", ":", "for", "format", "in", "constants", ".", "TIMESTAMP_STR_FORMATS", ":", "try", ":", "t", "=", "datetime", ".", "date...
Covert a timestamp string to an epoch number. :param str t_str: a timestamp string. :return int: epoch number of the timestamp.
[ "Covert", "a", "timestamp", "string", "to", "an", "epoch", "number", ".", ":", "param", "str", "t_str", ":", "a", "timestamp", "string", ".", ":", "return", "int", ":", "epoch", "number", "of", "the", "timestamp", "." ]
python
train
mcs07/MolVS
molvs/standardize.py
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/standardize.py#L281-L286
def canonicalize_tautomer(self): """ :returns: A callable :class:`~molvs.tautomer.TautomerCanonicalizer` instance. """ return TautomerCanonicalizer(transforms=self.tautomer_transforms, scores=self.tautomer_scores, max_tautomers=self.max_tautomers)
[ "def", "canonicalize_tautomer", "(", "self", ")", ":", "return", "TautomerCanonicalizer", "(", "transforms", "=", "self", ".", "tautomer_transforms", ",", "scores", "=", "self", ".", "tautomer_scores", ",", "max_tautomers", "=", "self", ".", "max_tautomers", ")" ]
:returns: A callable :class:`~molvs.tautomer.TautomerCanonicalizer` instance.
[ ":", "returns", ":", "A", "callable", ":", "class", ":", "~molvs", ".", "tautomer", ".", "TautomerCanonicalizer", "instance", "." ]
python
test
zhanglab/psamm
psamm/command.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/command.py#L248-L268
def _create_executor(self, handler, args, cpus_per_worker=1): """Return a new :class:`.Executor` instance.""" if self._args.parallel > 0: workers = self._args.parallel else: try: workers = mp.cpu_count() // cpus_per_worker except NotImplementedError: workers = 1 if workers != 1: logger.info('Using {} parallel worker processes...'.format( workers)) executor = ProcessPoolExecutor( processes=workers, handler_init=handler, handler_args=args) else: logger.info('Using single worker...') executor = SequentialExecutor( handler_init=handler, handler_args=args) return executor
[ "def", "_create_executor", "(", "self", ",", "handler", ",", "args", ",", "cpus_per_worker", "=", "1", ")", ":", "if", "self", ".", "_args", ".", "parallel", ">", "0", ":", "workers", "=", "self", ".", "_args", ".", "parallel", "else", ":", "try", ":...
Return a new :class:`.Executor` instance.
[ "Return", "a", "new", ":", "class", ":", ".", "Executor", "instance", "." ]
python
train
dhermes/bezier
docs/make_images.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/docs/make_images.py#L1002-L1033
def classify_intersection7(s, curve1a, curve1b, curve2): """Image for :func:`._surface_helpers.classify_intersection` docstring.""" if NO_IMAGES: return surface1 = bezier.Surface.from_nodes( np.asfortranarray( [ [0.0, 4.5, 9.0, 0.0, 4.5, 0.0], [0.0, 0.0, 2.25, 1.25, 2.375, 2.5], ] ) ) surface2 = bezier.Surface.from_nodes( np.asfortranarray( [ [11.25, 9.0, 2.75, 8.125, 3.875, 5.0], [0.0, 4.5, 1.0, -0.75, -0.25, -1.5], ] ) ) figure, (ax1, ax2) = plt.subplots(2, 1) classify_help(s, curve1a, surface1, curve2, surface2, None, ax=ax1) surface1._nodes = np.asfortranarray(surface1._nodes[:, (2, 4, 5, 1, 3, 0)]) surface1._edges = None classify_help(0.0, curve1b, surface1, curve2, surface2, 0, ax=ax2) for ax in (ax1, ax2): ax.set_xlim(-0.125, 11.5) ax.set_ylim(-0.125, 2.625) plt.setp(ax1.get_xticklabels(), visible=False) figure.tight_layout(h_pad=-5.0) save_image(figure, "classify_intersection7.png")
[ "def", "classify_intersection7", "(", "s", ",", "curve1a", ",", "curve1b", ",", "curve2", ")", ":", "if", "NO_IMAGES", ":", "return", "surface1", "=", "bezier", ".", "Surface", ".", "from_nodes", "(", "np", ".", "asfortranarray", "(", "[", "[", "0.0", ",...
Image for :func:`._surface_helpers.classify_intersection` docstring.
[ "Image", "for", ":", "func", ":", ".", "_surface_helpers", ".", "classify_intersection", "docstring", "." ]
python
train
woolfson-group/isambard
isambard/add_ons/knobs_into_holes.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/add_ons/knobs_into_holes.py#L655-L677
def tag_residues_with_heptad_register(helices): """ tags Residues in input helices with heptad register. (Helices not required to be the same length). Parameters ---------- helices : [Polypeptide] Returns ------- None """ base_reg = 'abcdefg' start, end = start_and_end_of_reference_axis(helices) for h in helices: ref_axis = gen_reference_primitive(h, start=start, end=end) crangles = crick_angles(h, reference_axis=ref_axis, tag=False)[:-1] reg_fit = fit_heptad_register(crangles) exp_base = base_reg * (len(h) // 7 + 2) hep_pos = reg_fit[0][0] register_string = exp_base[hep_pos:hep_pos + len(h)] for i, register in enumerate(register_string): h[i].tags['register'] = register return
[ "def", "tag_residues_with_heptad_register", "(", "helices", ")", ":", "base_reg", "=", "'abcdefg'", "start", ",", "end", "=", "start_and_end_of_reference_axis", "(", "helices", ")", "for", "h", "in", "helices", ":", "ref_axis", "=", "gen_reference_primitive", "(", ...
tags Residues in input helices with heptad register. (Helices not required to be the same length). Parameters ---------- helices : [Polypeptide] Returns ------- None
[ "tags", "Residues", "in", "input", "helices", "with", "heptad", "register", ".", "(", "Helices", "not", "required", "to", "be", "the", "same", "length", ")", "." ]
python
train
benfred/implicit
implicit/datasets/movielens.py
https://github.com/benfred/implicit/blob/6b16c50d1d514a814f2e5b8cf2a829ff23dbba63/implicit/datasets/movielens.py#L67-L74
def _read_dataframes_20M(path): """ reads in the movielens 20M""" import pandas ratings = pandas.read_csv(os.path.join(path, "ratings.csv")) movies = pandas.read_csv(os.path.join(path, "movies.csv")) return ratings, movies
[ "def", "_read_dataframes_20M", "(", "path", ")", ":", "import", "pandas", "ratings", "=", "pandas", ".", "read_csv", "(", "os", ".", "path", ".", "join", "(", "path", ",", "\"ratings.csv\"", ")", ")", "movies", "=", "pandas", ".", "read_csv", "(", "os", ...
reads in the movielens 20M
[ "reads", "in", "the", "movielens", "20M" ]
python
train
ioos/pyoos
pyoos/parsers/hads.py
https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/hads.py#L160-L246
def _parse_metadata(self, metadata): """ Transforms raw HADS metadata into a dictionary (station code -> props) """ retval = {} # these are the first keys, afterwards follows a var-len list of variables/props # first key always blank so skip it field_keys = [ "nesdis_id", "nwsli", "location_text", "latitude", "longitude", "hsa", "state", "owner", "manufacturer", "channel", "init_transmit", # HHMM "trans_interval", ] # min # repeat in blocks of 7 after field_keys var_keys = [ "pe_code", "data_interval", # min "coefficient", "constant", "time_offset", # min "base_elevation", # ft "gauge_correction", ] # ft lines = metadata.splitlines() for line in lines: if len(line) == 0: continue raw_fields = line.split("|") fields = dict(zip(field_keys, raw_fields[1 : len(field_keys)])) # how many blocks of var_keys after initial fields var_offset = len(field_keys) + 1 var_blocks = (len(raw_fields) - var_offset) // len( var_keys ) # how many variables vars_only = raw_fields[var_offset:] variables = {} for offset in range(var_blocks): var_dict = dict( zip( var_keys, vars_only[ offset * len(var_keys) : (offset + 1) * len(var_keys) ], ) ) variables[var_dict["pe_code"]] = var_dict var_dict["base_elevation"] = float(var_dict["base_elevation"]) var_dict["gauge_correction"] = float( var_dict["gauge_correction"] ) del var_dict["pe_code"] # no need to duplicate line_val = {"variables": variables} line_val.update(fields) # conversions def dms_to_dd(dms): parts = dms.split(" ") sec = int(parts[1]) * 60 + int(parts[2]) return float(parts[0]) + ( sec / 3600.0 ) # negative already in first portion line_val["latitude"] = dms_to_dd(line_val["latitude"]) line_val["longitude"] = dms_to_dd(line_val["longitude"]) retval[line_val["nesdis_id"]] = line_val return retval
[ "def", "_parse_metadata", "(", "self", ",", "metadata", ")", ":", "retval", "=", "{", "}", "# these are the first keys, afterwards follows a var-len list of variables/props", "# first key always blank so skip it", "field_keys", "=", "[", "\"nesdis_id\"", ",", "\"nwsli\"", ","...
Transforms raw HADS metadata into a dictionary (station code -> props)
[ "Transforms", "raw", "HADS", "metadata", "into", "a", "dictionary", "(", "station", "code", "-", ">", "props", ")" ]
python
train
svinota/mdns
mdns/zeroconf.py
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L838-L844
def read_utf(self, offset, len): """Reads a UTF-8 string of a given length from the packet""" try: result = self.data[offset:offset + len].decode('utf-8') except UnicodeDecodeError: result = str('') return result
[ "def", "read_utf", "(", "self", ",", "offset", ",", "len", ")", ":", "try", ":", "result", "=", "self", ".", "data", "[", "offset", ":", "offset", "+", "len", "]", ".", "decode", "(", "'utf-8'", ")", "except", "UnicodeDecodeError", ":", "result", "="...
Reads a UTF-8 string of a given length from the packet
[ "Reads", "a", "UTF", "-", "8", "string", "of", "a", "given", "length", "from", "the", "packet" ]
python
train
yunojuno-archive/django-inbound-email
inbound_email/backends/mandrill.py
https://github.com/yunojuno-archive/django-inbound-email/blob/c0c1186fc2ced56b43d6b223e73cd5e8700dfc48/inbound_email/backends/mandrill.py#L90-L99
def _get_recipients(self, array): """Returns an iterator of objects in the form ["Name <address@example.com", ...] from the array [["address@example.com", "Name"]] """ for address, name in array: if not name: yield address else: yield "\"%s\" <%s>" % (name, address)
[ "def", "_get_recipients", "(", "self", ",", "array", ")", ":", "for", "address", ",", "name", "in", "array", ":", "if", "not", "name", ":", "yield", "address", "else", ":", "yield", "\"\\\"%s\\\" <%s>\"", "%", "(", "name", ",", "address", ")" ]
Returns an iterator of objects in the form ["Name <address@example.com", ...] from the array [["address@example.com", "Name"]]
[ "Returns", "an", "iterator", "of", "objects", "in", "the", "form", "[", "Name", "<address" ]
python
train
huge-success/sanic
sanic/router.py
https://github.com/huge-success/sanic/blob/6a4a3f617fdbe1d3ee8bdc9d1b12ad2d0b34acdd/sanic/router.py#L398-L415
def get(self, request): """Get a request handler based on the URL of the request, or raises an error :param request: Request object :return: handler, arguments, keyword arguments """ # No virtual hosts specified; default behavior if not self.hosts: return self._get(request.path, request.method, "") # virtual hosts specified; try to match route to the host header try: return self._get( request.path, request.method, request.headers.get("Host", "") ) # try default hosts except NotFound: return self._get(request.path, request.method, "")
[ "def", "get", "(", "self", ",", "request", ")", ":", "# No virtual hosts specified; default behavior", "if", "not", "self", ".", "hosts", ":", "return", "self", ".", "_get", "(", "request", ".", "path", ",", "request", ".", "method", ",", "\"\"", ")", "# v...
Get a request handler based on the URL of the request, or raises an error :param request: Request object :return: handler, arguments, keyword arguments
[ "Get", "a", "request", "handler", "based", "on", "the", "URL", "of", "the", "request", "or", "raises", "an", "error" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/serving/query.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/serving/query.py#L63-L76
def make_request_fn(): """Returns a request function.""" if FLAGS.cloud_mlengine_model_name: request_fn = serving_utils.make_cloud_mlengine_request_fn( credentials=GoogleCredentials.get_application_default(), model_name=FLAGS.cloud_mlengine_model_name, version=FLAGS.cloud_mlengine_model_version) else: request_fn = serving_utils.make_grpc_request_fn( servable_name=FLAGS.servable_name, server=FLAGS.server, timeout_secs=FLAGS.timeout_secs) return request_fn
[ "def", "make_request_fn", "(", ")", ":", "if", "FLAGS", ".", "cloud_mlengine_model_name", ":", "request_fn", "=", "serving_utils", ".", "make_cloud_mlengine_request_fn", "(", "credentials", "=", "GoogleCredentials", ".", "get_application_default", "(", ")", ",", "mode...
Returns a request function.
[ "Returns", "a", "request", "function", "." ]
python
train
wright-group/WrightTools
WrightTools/data/_channel.py
https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/data/_channel.py#L87-L89
def minor_extent(self) -> complex: """Minimum deviation from null.""" return min((self.max() - self.null, self.null - self.min()))
[ "def", "minor_extent", "(", "self", ")", "->", "complex", ":", "return", "min", "(", "(", "self", ".", "max", "(", ")", "-", "self", ".", "null", ",", "self", ".", "null", "-", "self", ".", "min", "(", ")", ")", ")" ]
Minimum deviation from null.
[ "Minimum", "deviation", "from", "null", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L386-L400
def imagetransformer_base_8l_8h_big_cond_dr03_dan(): """big 1d model for conditional image generation.2.99 on cifar10.""" hparams = imagetransformer_sep_channels_8l() hparams.block_width = 256 hparams.block_length = 256 hparams.hidden_size = 512 hparams.num_heads = 8 hparams.filter_size = 2048 hparams.batch_size = 4 hparams.max_length = 3075 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.num_decoder_layers = 8 hparams.layer_prepostprocess_dropout = 0.3 return hparams
[ "def", "imagetransformer_base_8l_8h_big_cond_dr03_dan", "(", ")", ":", "hparams", "=", "imagetransformer_sep_channels_8l", "(", ")", "hparams", ".", "block_width", "=", "256", "hparams", ".", "block_length", "=", "256", "hparams", ".", "hidden_size", "=", "512", "hp...
big 1d model for conditional image generation.2.99 on cifar10.
[ "big", "1d", "model", "for", "conditional", "image", "generation", ".", "2", ".", "99", "on", "cifar10", "." ]
python
train
hyperledger/sawtooth-core
validator/sawtooth_validator/database/database.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/database/database.py#L53-L64
def get(self, key, index=None): """Retrieves a value associated with a key from the database Args: key (str): The key to retrieve """ records = self.get_multi([key], index=index) try: return records[0][1] # return the value from the key/value tuple except IndexError: return None
[ "def", "get", "(", "self", ",", "key", ",", "index", "=", "None", ")", ":", "records", "=", "self", ".", "get_multi", "(", "[", "key", "]", ",", "index", "=", "index", ")", "try", ":", "return", "records", "[", "0", "]", "[", "1", "]", "# retur...
Retrieves a value associated with a key from the database Args: key (str): The key to retrieve
[ "Retrieves", "a", "value", "associated", "with", "a", "key", "from", "the", "database" ]
python
train
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/download.py
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/download.py#L103-L123
def get_response(self, url, username=None, password=None): """ does the dirty work of actually getting the rsponse object using urllib2 and its HTTP auth builtins. """ scheme, netloc, path, query, frag = urlparse.urlsplit(url) req = self.get_request(url) stored_username, stored_password = self.passman.find_user_password(None, netloc) # see if we have a password stored if stored_username is None: if username is None and self.prompting: username = urllib.quote(raw_input('User for %s: ' % netloc)) password = urllib.quote(getpass.getpass('Password: ')) if username and password: self.passman.add_password(None, netloc, username, password) stored_username, stored_password = self.passman.find_user_password(None, netloc) authhandler = urllib2.HTTPBasicAuthHandler(self.passman) opener = urllib2.build_opener(authhandler) # FIXME: should catch a 401 and offer to let the user reenter credentials return opener.open(req)
[ "def", "get_response", "(", "self", ",", "url", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "scheme", ",", "netloc", ",", "path", ",", "query", ",", "frag", "=", "urlparse", ".", "urlsplit", "(", "url", ")", "req", "=", "s...
does the dirty work of actually getting the rsponse object using urllib2 and its HTTP auth builtins.
[ "does", "the", "dirty", "work", "of", "actually", "getting", "the", "rsponse", "object", "using", "urllib2", "and", "its", "HTTP", "auth", "builtins", "." ]
python
train
projectatomic/osbs-client
osbs/build/build_request.py
https://github.com/projectatomic/osbs-client/blob/571fe035dab3a7c02e1dccd5d65ffd75be750458/osbs/build/build_request.py#L814-L841
def render_koji(self): """ if there is yum repo specified, don't pick stuff from koji """ phase = 'prebuild_plugins' plugin = 'koji' if not self.dj.dock_json_has_plugin_conf(phase, plugin): return if self.spec.yum_repourls.value: logger.info("removing koji from request " "because there is yum repo specified") self.dj.remove_plugin(phase, plugin) elif not (self.spec.koji_target.value and self.spec.kojiroot.value and self.spec.kojihub.value): logger.info("removing koji from request as not specified") self.dj.remove_plugin(phase, plugin) else: self.dj.dock_json_set_arg(phase, plugin, "target", self.spec.koji_target.value) self.dj.dock_json_set_arg(phase, plugin, "root", self.spec.kojiroot.value) self.dj.dock_json_set_arg(phase, plugin, "hub", self.spec.kojihub.value) if self.spec.proxy.value: self.dj.dock_json_set_arg(phase, plugin, "proxy", self.spec.proxy.value)
[ "def", "render_koji", "(", "self", ")", ":", "phase", "=", "'prebuild_plugins'", "plugin", "=", "'koji'", "if", "not", "self", ".", "dj", ".", "dock_json_has_plugin_conf", "(", "phase", ",", "plugin", ")", ":", "return", "if", "self", ".", "spec", ".", "...
if there is yum repo specified, don't pick stuff from koji
[ "if", "there", "is", "yum", "repo", "specified", "don", "t", "pick", "stuff", "from", "koji" ]
python
train
waqasbhatti/astrobase
astrobase/lcproc/lcpfeatures.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcproc/lcpfeatures.py#L573-L776
def serial_periodicfeatures(pfpkl_list, lcbasedir, outdir, starfeaturesdir=None, fourierorder=5, # these are depth, duration, ingress duration transitparams=(-0.01,0.1,0.1), # these are depth, duration, depth ratio, secphase ebparams=(-0.2,0.3,0.7,0.5), pdiff_threshold=1.0e-4, sidereal_threshold=1.0e-4, sampling_peak_multiplier=5.0, sampling_startp=None, sampling_endp=None, starfeatures=None, timecols=None, magcols=None, errcols=None, lcformat='hat-sql', lcformatdir=None, sigclip=10.0, verbose=False, maxobjects=None): '''This drives the periodicfeatures collection for a list of periodfinding pickles. Parameters ---------- pfpkl_list : list of str The list of period-finding pickles to use. lcbasedir : str The base directory where the associated light curves are located. outdir : str The directory where the results will be written. starfeaturesdir : str or None The directory containing the `starfeatures-<objectid>.pkl` files for each object to use calculate neighbor proximity light curve features. fourierorder : int The Fourier order to use to generate sinusoidal function and fit that to the phased light curve. transitparams : list of floats The transit depth, duration, and ingress duration to use to generate a trapezoid planet transit model fit to the phased light curve. The period used is the one provided in `period`, while the epoch is automatically obtained from a spline fit to the phased light curve. ebparams : list of floats The primary eclipse depth, eclipse duration, the primary-secondary depth ratio, and the phase of the secondary eclipse to use to generate an eclipsing binary model fit to the phased light curve. The period used is the one provided in `period`, while the epoch is automatically obtained from a spline fit to the phased light curve. pdiff_threshold : float This is the max difference between periods to consider them the same. sidereal_threshold : float This is the max difference between any of the 'best' periods and the sidereal day periods to consider them the same. sampling_peak_multiplier : float This is the minimum multiplicative factor of a 'best' period's normalized periodogram peak over the sampling periodogram peak at the same period required to accept the 'best' period as possibly real. sampling_startp, sampling_endp : float If the `pgramlist` doesn't have a time-sampling Lomb-Scargle periodogram, it will be obtained automatically. Use these kwargs to control the minimum and maximum period interval to be searched when generating this periodogram. timecols : list of str or None The timecol keys to use from the lcdict in calculating the features. magcols : list of str or None The magcol keys to use from the lcdict in calculating the features. errcols : list of str or None The errcol keys to use from the lcdict in calculating the features. lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. verbose : bool If True, will indicate progress while working. maxobjects : int The total number of objects to process from `pfpkl_list`. Returns ------- Nothing. ''' try: formatinfo = get_lcformat(lcformat, use_lcformat_dir=lcformatdir) if formatinfo: (fileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = formatinfo else: LOGERROR("can't figure out the light curve format") return None except Exception as e: LOGEXCEPTION("can't figure out the light curve format") return None # make sure to make the output directory if it doesn't exist if not os.path.exists(outdir): os.makedirs(outdir) if maxobjects: pfpkl_list = pfpkl_list[:maxobjects] LOGINFO('%s periodfinding pickles to process' % len(pfpkl_list)) # if the starfeaturedir is provided, try to find a starfeatures pickle for # each periodfinding pickle in pfpkl_list if starfeaturesdir and os.path.exists(starfeaturesdir): starfeatures_list = [] LOGINFO('collecting starfeatures pickles...') for pfpkl in pfpkl_list: sfpkl1 = os.path.basename(pfpkl).replace('periodfinding', 'starfeatures') sfpkl2 = sfpkl1.replace('.gz','') sfpath1 = os.path.join(starfeaturesdir, sfpkl1) sfpath2 = os.path.join(starfeaturesdir, sfpkl2) if os.path.exists(sfpath1): starfeatures_list.append(sfpkl1) elif os.path.exists(sfpath2): starfeatures_list.append(sfpkl2) else: starfeatures_list.append(None) else: starfeatures_list = [None for x in pfpkl_list] # generate the task list kwargs = {'fourierorder':fourierorder, 'transitparams':transitparams, 'ebparams':ebparams, 'pdiff_threshold':pdiff_threshold, 'sidereal_threshold':sidereal_threshold, 'sampling_peak_multiplier':sampling_peak_multiplier, 'sampling_startp':sampling_startp, 'sampling_endp':sampling_endp, 'timecols':timecols, 'magcols':magcols, 'errcols':errcols, 'lcformat':lcformat, 'lcformatdir':lcformatdir, 'sigclip':sigclip, 'verbose':verbose} tasks = [(x, lcbasedir, outdir, y, kwargs) for (x,y) in zip(pfpkl_list, starfeatures_list)] LOGINFO('processing periodfinding pickles...') for task in tqdm(tasks): _periodicfeatures_worker(task)
[ "def", "serial_periodicfeatures", "(", "pfpkl_list", ",", "lcbasedir", ",", "outdir", ",", "starfeaturesdir", "=", "None", ",", "fourierorder", "=", "5", ",", "# these are depth, duration, ingress duration", "transitparams", "=", "(", "-", "0.01", ",", "0.1", ",", ...
This drives the periodicfeatures collection for a list of periodfinding pickles. Parameters ---------- pfpkl_list : list of str The list of period-finding pickles to use. lcbasedir : str The base directory where the associated light curves are located. outdir : str The directory where the results will be written. starfeaturesdir : str or None The directory containing the `starfeatures-<objectid>.pkl` files for each object to use calculate neighbor proximity light curve features. fourierorder : int The Fourier order to use to generate sinusoidal function and fit that to the phased light curve. transitparams : list of floats The transit depth, duration, and ingress duration to use to generate a trapezoid planet transit model fit to the phased light curve. The period used is the one provided in `period`, while the epoch is automatically obtained from a spline fit to the phased light curve. ebparams : list of floats The primary eclipse depth, eclipse duration, the primary-secondary depth ratio, and the phase of the secondary eclipse to use to generate an eclipsing binary model fit to the phased light curve. The period used is the one provided in `period`, while the epoch is automatically obtained from a spline fit to the phased light curve. pdiff_threshold : float This is the max difference between periods to consider them the same. sidereal_threshold : float This is the max difference between any of the 'best' periods and the sidereal day periods to consider them the same. sampling_peak_multiplier : float This is the minimum multiplicative factor of a 'best' period's normalized periodogram peak over the sampling periodogram peak at the same period required to accept the 'best' period as possibly real. sampling_startp, sampling_endp : float If the `pgramlist` doesn't have a time-sampling Lomb-Scargle periodogram, it will be obtained automatically. Use these kwargs to control the minimum and maximum period interval to be searched when generating this periodogram. timecols : list of str or None The timecol keys to use from the lcdict in calculating the features. magcols : list of str or None The magcol keys to use from the lcdict in calculating the features. errcols : list of str or None The errcol keys to use from the lcdict in calculating the features. lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. verbose : bool If True, will indicate progress while working. maxobjects : int The total number of objects to process from `pfpkl_list`. Returns ------- Nothing.
[ "This", "drives", "the", "periodicfeatures", "collection", "for", "a", "list", "of", "periodfinding", "pickles", "." ]
python
valid
pkkid/python-plexapi
plexapi/video.py
https://github.com/pkkid/python-plexapi/blob/9efbde96441c2bfbf410eacfb46e811e108e8bbc/plexapi/video.py#L69-L73
def markWatched(self): """ Mark video as watched. """ key = '/:/scrobble?key=%s&identifier=com.plexapp.plugins.library' % self.ratingKey self._server.query(key) self.reload()
[ "def", "markWatched", "(", "self", ")", ":", "key", "=", "'/:/scrobble?key=%s&identifier=com.plexapp.plugins.library'", "%", "self", ".", "ratingKey", "self", ".", "_server", ".", "query", "(", "key", ")", "self", ".", "reload", "(", ")" ]
Mark video as watched.
[ "Mark", "video", "as", "watched", "." ]
python
train
Yelp/swagger_zipkin
swagger_zipkin/decorate_client.py
https://github.com/Yelp/swagger_zipkin/blob/8159b5d04f2c1089ae0a60851a5f097d7e09a40e/swagger_zipkin/decorate_client.py#L33-L71
def decorate_client(api_client, func, name): """A helper for decorating :class:`bravado.client.SwaggerClient`. :class:`bravado.client.SwaggerClient` can be extended by creating a class which wraps all calls to it. This helper is used in a :func:`__getattr__` to check if the attr exists on the api_client. If the attr does not exist raise :class:`AttributeError`, if it exists and is not callable return it, and if it is callable return a partial function calling `func` with `name`. Example usage: .. code-block:: python class SomeClientDecorator(object): def __init__(self, api_client, ...): self.api_client = api_client # First arg should be suffiently unique to not conflict with any of # the kwargs def wrap_call(self, client_call_name, *args, **kwargs): ... def __getattr__(self, name): return decorate_client(self.api_client, self.wrap_call, name) :param api_client: the client which is being decorated :type api_client: :class:`bravado.client.SwaggerClient` :param func: a callable which accepts `name`, `*args`, `**kwargs` :type func: callable :param name: the attribute being accessed :type name: string :returns: the attribute from the `api_client` or a partial of `func` :raises: :class:`AttributeError` """ client_attr = getattr(api_client, name) if not callable(client_attr): return client_attr return OperationDecorator(client_attr, functools.partial(func, name))
[ "def", "decorate_client", "(", "api_client", ",", "func", ",", "name", ")", ":", "client_attr", "=", "getattr", "(", "api_client", ",", "name", ")", "if", "not", "callable", "(", "client_attr", ")", ":", "return", "client_attr", "return", "OperationDecorator",...
A helper for decorating :class:`bravado.client.SwaggerClient`. :class:`bravado.client.SwaggerClient` can be extended by creating a class which wraps all calls to it. This helper is used in a :func:`__getattr__` to check if the attr exists on the api_client. If the attr does not exist raise :class:`AttributeError`, if it exists and is not callable return it, and if it is callable return a partial function calling `func` with `name`. Example usage: .. code-block:: python class SomeClientDecorator(object): def __init__(self, api_client, ...): self.api_client = api_client # First arg should be suffiently unique to not conflict with any of # the kwargs def wrap_call(self, client_call_name, *args, **kwargs): ... def __getattr__(self, name): return decorate_client(self.api_client, self.wrap_call, name) :param api_client: the client which is being decorated :type api_client: :class:`bravado.client.SwaggerClient` :param func: a callable which accepts `name`, `*args`, `**kwargs` :type func: callable :param name: the attribute being accessed :type name: string :returns: the attribute from the `api_client` or a partial of `func` :raises: :class:`AttributeError`
[ "A", "helper", "for", "decorating", ":", "class", ":", "bravado", ".", "client", ".", "SwaggerClient", ".", ":", "class", ":", "bravado", ".", "client", ".", "SwaggerClient", "can", "be", "extended", "by", "creating", "a", "class", "which", "wraps", "all",...
python
train
Scoppio/RagnarokEngine3
RagnarokEngine3/RE3.py
https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/RagnarokEngine3/RE3.py#L746-L752
def angle(vec1, vec2): """Returns the angle between two vectors""" dot_vec = dot(vec1, vec2) mag1 = vec1.length() mag2 = vec2.length() result = dot_vec / (mag1 * mag2) return math.acos(result)
[ "def", "angle", "(", "vec1", ",", "vec2", ")", ":", "dot_vec", "=", "dot", "(", "vec1", ",", "vec2", ")", "mag1", "=", "vec1", ".", "length", "(", ")", "mag2", "=", "vec2", ".", "length", "(", ")", "result", "=", "dot_vec", "/", "(", "mag1", "*...
Returns the angle between two vectors
[ "Returns", "the", "angle", "between", "two", "vectors" ]
python
train
schettino72/import-deps
import_deps/__init__.py
https://github.com/schettino72/import-deps/blob/311f2badd2c93f743d09664397f21e7eaa16e1f1/import_deps/__init__.py#L65-L75
def _get_fqn(cls, path): """get full qualified name as list of strings :return: (list - str) of path segments from top package to given path """ name_list = [path.stem] current_path = path # move to parent path until parent path is a python package while cls.is_pkg(current_path.parent): name_list.append(current_path.parent.name) current_path = current_path.parent return list(reversed(name_list))
[ "def", "_get_fqn", "(", "cls", ",", "path", ")", ":", "name_list", "=", "[", "path", ".", "stem", "]", "current_path", "=", "path", "# move to parent path until parent path is a python package", "while", "cls", ".", "is_pkg", "(", "current_path", ".", "parent", ...
get full qualified name as list of strings :return: (list - str) of path segments from top package to given path
[ "get", "full", "qualified", "name", "as", "list", "of", "strings", ":", "return", ":", "(", "list", "-", "str", ")", "of", "path", "segments", "from", "top", "package", "to", "given", "path" ]
python
train
paulgb/penkit
penkit/preview.py
https://github.com/paulgb/penkit/blob/da451a30f62cfefbab285b9d6979b8dec3ac4957/penkit/preview.py#L28-L39
def show_plot(plot, width=PREVIEW_WIDTH, height=PREVIEW_HEIGHT): """Preview a plot in a jupyter notebook. Args: plot (list): the plot to display (list of layers) width (int): the width of the preview height (int): the height of the preview Returns: An object that renders in Jupyter as the provided plot """ return SVG(data=plot_to_svg(plot, width, height))
[ "def", "show_plot", "(", "plot", ",", "width", "=", "PREVIEW_WIDTH", ",", "height", "=", "PREVIEW_HEIGHT", ")", ":", "return", "SVG", "(", "data", "=", "plot_to_svg", "(", "plot", ",", "width", ",", "height", ")", ")" ]
Preview a plot in a jupyter notebook. Args: plot (list): the plot to display (list of layers) width (int): the width of the preview height (int): the height of the preview Returns: An object that renders in Jupyter as the provided plot
[ "Preview", "a", "plot", "in", "a", "jupyter", "notebook", "." ]
python
train
ANTsX/ANTsPy
ants/utils/multi_label_morphology.py
https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/utils/multi_label_morphology.py#L9-L98
def multi_label_morphology(image, operation, radius, dilation_mask=None, label_list=None, force=False): """ Morphology on multi label images. Wraps calls to iMath binary morphology. Additionally, dilation and closing operations preserve pre-existing labels. The choices of operation are: Dilation: dilates all labels sequentially, but does not overwrite original labels. This reduces dependence on the intensity ordering of adjoining labels. Ordering dependence can still arise if two or more labels dilate into the same space - in this case, the label with the lowest intensity is retained. With a mask, dilated labels are multiplied by the mask and then added to the original label, thus restricting dilation to the mask region. Erosion: Erodes labels independently, equivalent to calling iMath iteratively. Closing: Close holes in each label sequentially, but does not overwrite original labels. Opening: Opens each label independently, equivalent to calling iMath iteratively. Arguments --------- image : ANTsImage Input image should contain only 0 for background and positive integers for labels. operation : string One of MD, ME, MC, MO, passed to iMath. radius : integer radius of the morphological operation. dilation_mask : ANTsImage Optional binary mask to constrain dilation only (eg dilate cortical label into WM). label_list : list or tuple or numpy.ndarray Optional list of labels, to perform operation upon. Defaults to all unique intensities in image. Returns ------- ANTsImage Example ------- >>> import ants >>> img = ants.image_read(ants.get_data('r16')) >>> labels = ants.get_mask(img,1,150) + ants.get_mask(img,151,225) * 2 >>> labels_dilated = ants.multi_label_morphology(labels, 'MD', 2) >>> # should see original label regions preserved in dilated version >>> # label N should have mean N and 0 variance >>> print(ants.label_stats(labels_dilated, labels)) """ if (label_list is None) or (len(label_list) == 1): label_list = np.sort(np.unique(image[image > 0])) if (len(label_list) > 200) and (not force): raise ValueError('More than 200 labels... Make sure the image is discrete' ' and call this function again with `force=True` if you' ' really want to do this.') image_binary = image.clone() image_binary[image_binary > 1] = 1 # Erosion / opening is simply a case of looping over the input labels if (operation == 'ME') or (operation == 'MO'): output = image.clone() for current_label in label_list: output = output.iMath(operation, radius, current_label) return output if dilation_mask is not None: if int(dilation_mask.max()) != 1: raise ValueError('Mask is either empty or not binary') output = image.clone() for current_label in label_list: current_label_region = image.threshold_image(current_label, current_label) other_labels = output - current_label_region clab_binary_morphed = current_label_region.iMath(operation, radius, 1) if (operation == 'MD') and (dilation_mask is not None): clab_binary_morphed_nooverlap = current_label_region + dilation_mask * clab_binary_morphed - other_labels clab_binary_morphed_nooverlap = clab_binary_morphed_nooverlap.threshold_image(1, 2) else: clab_binary_morphed_nooverlap = clab_binary_morphed - other_labels clab_binary_morphed_nooverlap = clab_binary_morphed_nooverlap.threshold_image(1, 1) output = output + clab_binary_morphed_nooverlap * current_label return output
[ "def", "multi_label_morphology", "(", "image", ",", "operation", ",", "radius", ",", "dilation_mask", "=", "None", ",", "label_list", "=", "None", ",", "force", "=", "False", ")", ":", "if", "(", "label_list", "is", "None", ")", "or", "(", "len", "(", ...
Morphology on multi label images. Wraps calls to iMath binary morphology. Additionally, dilation and closing operations preserve pre-existing labels. The choices of operation are: Dilation: dilates all labels sequentially, but does not overwrite original labels. This reduces dependence on the intensity ordering of adjoining labels. Ordering dependence can still arise if two or more labels dilate into the same space - in this case, the label with the lowest intensity is retained. With a mask, dilated labels are multiplied by the mask and then added to the original label, thus restricting dilation to the mask region. Erosion: Erodes labels independently, equivalent to calling iMath iteratively. Closing: Close holes in each label sequentially, but does not overwrite original labels. Opening: Opens each label independently, equivalent to calling iMath iteratively. Arguments --------- image : ANTsImage Input image should contain only 0 for background and positive integers for labels. operation : string One of MD, ME, MC, MO, passed to iMath. radius : integer radius of the morphological operation. dilation_mask : ANTsImage Optional binary mask to constrain dilation only (eg dilate cortical label into WM). label_list : list or tuple or numpy.ndarray Optional list of labels, to perform operation upon. Defaults to all unique intensities in image. Returns ------- ANTsImage Example ------- >>> import ants >>> img = ants.image_read(ants.get_data('r16')) >>> labels = ants.get_mask(img,1,150) + ants.get_mask(img,151,225) * 2 >>> labels_dilated = ants.multi_label_morphology(labels, 'MD', 2) >>> # should see original label regions preserved in dilated version >>> # label N should have mean N and 0 variance >>> print(ants.label_stats(labels_dilated, labels))
[ "Morphology", "on", "multi", "label", "images", ".", "Wraps", "calls", "to", "iMath", "binary", "morphology", ".", "Additionally", "dilation", "and", "closing", "operations", "preserve", "pre", "-", "existing", "labels", ".", "The", "choices", "of", "operation",...
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/system.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/system.py#L1218-L1237
def start_service(name, argv = None): """ Start the service given by name. @warn: This method requires UAC elevation in Windows Vista and above. @see: L{stop_service}, L{pause_service}, L{resume_service} @type name: str @param name: Service unique name. You can get this value from the C{ServiceName} member of the service descriptors returned by L{get_services} or L{get_active_services}. """ with win32.OpenSCManager( dwDesiredAccess = win32.SC_MANAGER_CONNECT ) as hSCManager: with win32.OpenService(hSCManager, name, dwDesiredAccess = win32.SERVICE_START ) as hService: win32.StartService(hService)
[ "def", "start_service", "(", "name", ",", "argv", "=", "None", ")", ":", "with", "win32", ".", "OpenSCManager", "(", "dwDesiredAccess", "=", "win32", ".", "SC_MANAGER_CONNECT", ")", "as", "hSCManager", ":", "with", "win32", ".", "OpenService", "(", "hSCManag...
Start the service given by name. @warn: This method requires UAC elevation in Windows Vista and above. @see: L{stop_service}, L{pause_service}, L{resume_service} @type name: str @param name: Service unique name. You can get this value from the C{ServiceName} member of the service descriptors returned by L{get_services} or L{get_active_services}.
[ "Start", "the", "service", "given", "by", "name", "." ]
python
train
ethereum/py-evm
eth/db/chain.py
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/db/chain.py#L302-L313
def add_transaction(self, block_header: BlockHeader, index_key: int, transaction: 'BaseTransaction') -> Hash32: """ Adds the given transaction to the provided block header. Returns the updated `transactions_root` for updated block header. """ transaction_db = HexaryTrie(self.db, root_hash=block_header.transaction_root) transaction_db[index_key] = rlp.encode(transaction) return transaction_db.root_hash
[ "def", "add_transaction", "(", "self", ",", "block_header", ":", "BlockHeader", ",", "index_key", ":", "int", ",", "transaction", ":", "'BaseTransaction'", ")", "->", "Hash32", ":", "transaction_db", "=", "HexaryTrie", "(", "self", ".", "db", ",", "root_hash",...
Adds the given transaction to the provided block header. Returns the updated `transactions_root` for updated block header.
[ "Adds", "the", "given", "transaction", "to", "the", "provided", "block", "header", "." ]
python
train
nerdvegas/rez
src/rez/utils/formatting.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/formatting.py#L376-L403
def get_epoch_time_from_str(s): """Convert a string into epoch time. Examples of valid strings: 1418350671 # already epoch time -12s # 12 seconds ago -5.4m # 5.4 minutes ago """ try: return int(s) except: pass try: if s.startswith('-'): chars = {'d': 24 * 60 * 60, 'h': 60 * 60, 'm': 60, 's': 1} m = chars.get(s[-1]) if m: n = float(s[1:-1]) secs = int(n * m) now = int(time.time()) return max((now - secs), 0) except: pass raise ValueError("'%s' is an unrecognised time format." % s)
[ "def", "get_epoch_time_from_str", "(", "s", ")", ":", "try", ":", "return", "int", "(", "s", ")", "except", ":", "pass", "try", ":", "if", "s", ".", "startswith", "(", "'-'", ")", ":", "chars", "=", "{", "'d'", ":", "24", "*", "60", "*", "60", ...
Convert a string into epoch time. Examples of valid strings: 1418350671 # already epoch time -12s # 12 seconds ago -5.4m # 5.4 minutes ago
[ "Convert", "a", "string", "into", "epoch", "time", ".", "Examples", "of", "valid", "strings", ":" ]
python
train
googledatalab/pydatalab
google/datalab/contrib/mlworkbench/_prediction_explainer.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_prediction_explainer.py#L301-L332
def _image_gradients(self, input_csvlines, label, image_column_name): """Compute gradients from prob of label to image. Used by integrated gradients (probe).""" with tf.Graph().as_default() as g, tf.Session() as sess: logging_level = tf.logging.get_verbosity() try: tf.logging.set_verbosity(tf.logging.ERROR) meta_graph_pb = tf.saved_model.loader.load( sess=sess, tags=[tf.saved_model.tag_constants.SERVING], export_dir=self._model_dir) finally: tf.logging.set_verbosity(logging_level) signature = meta_graph_pb.signature_def['serving_default'] input_alias_map = {name: tensor_info_proto.name for (name, tensor_info_proto) in signature.inputs.items()} output_alias_map = {name: tensor_info_proto.name for (name, tensor_info_proto) in signature.outputs.items()} csv_tensor_name = list(input_alias_map.values())[0] # The image tensor is already built into ML Workbench graph. float_image = g.get_tensor_by_name("import/gradients_%s:0" % image_column_name) if label not in output_alias_map: raise ValueError('The label "%s" does not exist in output map.' % label) prob = g.get_tensor_by_name(output_alias_map[label]) grads = tf.gradients(prob, float_image)[0] grads_values = sess.run(fetches=grads, feed_dict={csv_tensor_name: input_csvlines}) return grads_values
[ "def", "_image_gradients", "(", "self", ",", "input_csvlines", ",", "label", ",", "image_column_name", ")", ":", "with", "tf", ".", "Graph", "(", ")", ".", "as_default", "(", ")", "as", "g", ",", "tf", ".", "Session", "(", ")", "as", "sess", ":", "lo...
Compute gradients from prob of label to image. Used by integrated gradients (probe).
[ "Compute", "gradients", "from", "prob", "of", "label", "to", "image", ".", "Used", "by", "integrated", "gradients", "(", "probe", ")", "." ]
python
train
google/grr
grr/server/grr_response_server/databases/mem_cronjobs.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_cronjobs.py#L84-L94
def DeleteCronJob(self, cronjob_id): """Deletes a cronjob along with all its runs.""" if cronjob_id not in self.cronjobs: raise db.UnknownCronJobError("Cron job %s not known." % cronjob_id) del self.cronjobs[cronjob_id] try: del self.cronjob_leases[cronjob_id] except KeyError: pass for job_run in self.ReadCronJobRuns(cronjob_id): del self.cronjob_runs[(cronjob_id, job_run.run_id)]
[ "def", "DeleteCronJob", "(", "self", ",", "cronjob_id", ")", ":", "if", "cronjob_id", "not", "in", "self", ".", "cronjobs", ":", "raise", "db", ".", "UnknownCronJobError", "(", "\"Cron job %s not known.\"", "%", "cronjob_id", ")", "del", "self", ".", "cronjobs...
Deletes a cronjob along with all its runs.
[ "Deletes", "a", "cronjob", "along", "with", "all", "its", "runs", "." ]
python
train
apache/airflow
airflow/contrib/hooks/datastore_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/datastore_hook.py#L102-L121
def commit(self, body): """ Commit a transaction, optionally creating, deleting or modifying some entities. .. seealso:: https://cloud.google.com/datastore/docs/reference/rest/v1/projects/commit :param body: the body of the commit request. :type body: dict :return: the response body of the commit request. :rtype: dict """ conn = self.get_conn() resp = (conn .projects() .commit(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp
[ "def", "commit", "(", "self", ",", "body", ")", ":", "conn", "=", "self", ".", "get_conn", "(", ")", "resp", "=", "(", "conn", ".", "projects", "(", ")", ".", "commit", "(", "projectId", "=", "self", ".", "project_id", ",", "body", "=", "body", "...
Commit a transaction, optionally creating, deleting or modifying some entities. .. seealso:: https://cloud.google.com/datastore/docs/reference/rest/v1/projects/commit :param body: the body of the commit request. :type body: dict :return: the response body of the commit request. :rtype: dict
[ "Commit", "a", "transaction", "optionally", "creating", "deleting", "or", "modifying", "some", "entities", "." ]
python
test
pantsbuild/pants
src/python/pants/util/contextutil.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/util/contextutil.py#L50-L74
def environment_as(**kwargs): """Update the environment to the supplied values, for example: with environment_as(PYTHONPATH='foo:bar:baz', PYTHON='/usr/bin/python2.7'): subprocess.Popen(foo).wait() """ new_environment = kwargs old_environment = {} def setenv(key, val): if val is not None: os.environ[key] = val if PY3 else _os_encode(val) else: if key in os.environ: del os.environ[key] for key, val in new_environment.items(): old_environment[key] = os.environ.get(key) setenv(key, val) try: yield finally: for key, val in old_environment.items(): setenv(key, val)
[ "def", "environment_as", "(", "*", "*", "kwargs", ")", ":", "new_environment", "=", "kwargs", "old_environment", "=", "{", "}", "def", "setenv", "(", "key", ",", "val", ")", ":", "if", "val", "is", "not", "None", ":", "os", ".", "environ", "[", "key"...
Update the environment to the supplied values, for example: with environment_as(PYTHONPATH='foo:bar:baz', PYTHON='/usr/bin/python2.7'): subprocess.Popen(foo).wait()
[ "Update", "the", "environment", "to", "the", "supplied", "values", "for", "example", ":" ]
python
train
DistrictDataLabs/yellowbrick
yellowbrick/utils/types.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/utils/types.py#L30-L43
def is_estimator(model): """ Determines if a model is an estimator using issubclass and isinstance. Parameters ---------- estimator : class or instance The object to test if it is a Scikit-Learn clusterer, especially a Scikit-Learn estimator or Yellowbrick visualizer """ if inspect.isclass(model): return issubclass(model, BaseEstimator) return isinstance(model, BaseEstimator)
[ "def", "is_estimator", "(", "model", ")", ":", "if", "inspect", ".", "isclass", "(", "model", ")", ":", "return", "issubclass", "(", "model", ",", "BaseEstimator", ")", "return", "isinstance", "(", "model", ",", "BaseEstimator", ")" ]
Determines if a model is an estimator using issubclass and isinstance. Parameters ---------- estimator : class or instance The object to test if it is a Scikit-Learn clusterer, especially a Scikit-Learn estimator or Yellowbrick visualizer
[ "Determines", "if", "a", "model", "is", "an", "estimator", "using", "issubclass", "and", "isinstance", "." ]
python
train
SCIP-Interfaces/PySCIPOpt
examples/tutorial/logical.py
https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/tutorial/logical.py#L69-L78
def xorc_constraint(v=0, sense="maximize"): """ XOR (r as variable) custom constraint""" assert v in [0,1], "v must be 0 or 1 instead of %s" % v.__repr__() model, x, y, z = _init() r = model.addVar("r", "B") n = model.addVar("n", "I") # auxiliary model.addCons(r+quicksum([x,y,z]) == 2*n) model.addCons(x==v) model.setObjective(r, sense=sense) _optimize("Custom XOR (as variable)", model)
[ "def", "xorc_constraint", "(", "v", "=", "0", ",", "sense", "=", "\"maximize\"", ")", ":", "assert", "v", "in", "[", "0", ",", "1", "]", ",", "\"v must be 0 or 1 instead of %s\"", "%", "v", ".", "__repr__", "(", ")", "model", ",", "x", ",", "y", ",",...
XOR (r as variable) custom constraint
[ "XOR", "(", "r", "as", "variable", ")", "custom", "constraint" ]
python
train
mikedh/trimesh
trimesh/path/entities.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/entities.py#L395-L412
def discrete(self, vertices, scale=1.0): """ Discretize into a world- space path. Parameters ------------ vertices: (n, dimension) float Points in space scale : float Size of overall scene for numerical comparisons Returns ------------- discrete: (m, dimension) float Path in space composed of line segments """ discrete = self._orient(vertices[self.points]) return discrete
[ "def", "discrete", "(", "self", ",", "vertices", ",", "scale", "=", "1.0", ")", ":", "discrete", "=", "self", ".", "_orient", "(", "vertices", "[", "self", ".", "points", "]", ")", "return", "discrete" ]
Discretize into a world- space path. Parameters ------------ vertices: (n, dimension) float Points in space scale : float Size of overall scene for numerical comparisons Returns ------------- discrete: (m, dimension) float Path in space composed of line segments
[ "Discretize", "into", "a", "world", "-", "space", "path", "." ]
python
train
pettarin/ipapy
ipapy/__main__.py
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/__main__.py#L183-L199
def command_u2a(string, vargs): """ Print the ARPABEY ASCII string corresponding to the given Unicode IPA string. :param str string: the string to act upon :param dict vargs: the command line arguments """ try: l = ARPABETMapper().map_unicode_string( unicode_string=string, ignore=vargs["ignore"], single_char_parsing=vargs["single_char_parsing"], return_as_list=True ) print(vargs["separator"].join(l)) except ValueError as exc: print_error(str(exc))
[ "def", "command_u2a", "(", "string", ",", "vargs", ")", ":", "try", ":", "l", "=", "ARPABETMapper", "(", ")", ".", "map_unicode_string", "(", "unicode_string", "=", "string", ",", "ignore", "=", "vargs", "[", "\"ignore\"", "]", ",", "single_char_parsing", ...
Print the ARPABEY ASCII string corresponding to the given Unicode IPA string. :param str string: the string to act upon :param dict vargs: the command line arguments
[ "Print", "the", "ARPABEY", "ASCII", "string", "corresponding", "to", "the", "given", "Unicode", "IPA", "string", "." ]
python
train
NASA-AMMOS/AIT-Core
ait/core/bsc.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/bsc.py#L708-L710
def start(self): ''' Starts the server. ''' self._app.run(host=self._host, port=self._port)
[ "def", "start", "(", "self", ")", ":", "self", ".", "_app", ".", "run", "(", "host", "=", "self", ".", "_host", ",", "port", "=", "self", ".", "_port", ")" ]
Starts the server.
[ "Starts", "the", "server", "." ]
python
train
eyurtsev/FlowCytometryTools
FlowCytometryTools/core/bases.py
https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L399-L420
def from_files(cls, ID, datafiles, parser, readdata_kwargs={}, readmeta_kwargs={}, **ID_kwargs): """ Create a Collection of measurements from a set of data files. Parameters ---------- {_bases_ID} {_bases_data_files} {_bases_filename_parser} {_bases_ID_kwargs} """ d = _assign_IDS_to_datafiles(datafiles, parser, cls._measurement_class, **ID_kwargs) measurements = [] for sID, dfile in d.items(): try: measurements.append(cls._measurement_class(sID, datafile=dfile, readdata_kwargs=readdata_kwargs, readmeta_kwargs=readmeta_kwargs)) except: msg = 'Error occurred while trying to parse file: %s' % dfile raise IOError(msg) return cls(ID, measurements)
[ "def", "from_files", "(", "cls", ",", "ID", ",", "datafiles", ",", "parser", ",", "readdata_kwargs", "=", "{", "}", ",", "readmeta_kwargs", "=", "{", "}", ",", "*", "*", "ID_kwargs", ")", ":", "d", "=", "_assign_IDS_to_datafiles", "(", "datafiles", ",", ...
Create a Collection of measurements from a set of data files. Parameters ---------- {_bases_ID} {_bases_data_files} {_bases_filename_parser} {_bases_ID_kwargs}
[ "Create", "a", "Collection", "of", "measurements", "from", "a", "set", "of", "data", "files", "." ]
python
train
googleapis/google-cloud-python
error_reporting/google/cloud/error_reporting/client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/error_reporting/google/cloud/error_reporting/client.py#L335-L361
def report_exception(self, http_context=None, user=None): """ Reports the details of the latest exceptions to Stackdriver Error Reporting. :type http_context: :class`google.cloud.error_reporting.HTTPContext` :param http_context: The HTTP request which was processed when the error was triggered. :type user: str :param user: The user who caused or was affected by the crash. This can be a user ID, an email address, or an arbitrary token that uniquely identifies the user. When sending an error report, leave this field empty if the user was not logged in. In this case the Error Reporting system will use other data, such as remote IP address, to distinguish affected users. Example:: >>> try: >>> raise NameError >>> except Exception: >>> client.report_exception() """ self._send_error_report( traceback.format_exc(), http_context=http_context, user=user )
[ "def", "report_exception", "(", "self", ",", "http_context", "=", "None", ",", "user", "=", "None", ")", ":", "self", ".", "_send_error_report", "(", "traceback", ".", "format_exc", "(", ")", ",", "http_context", "=", "http_context", ",", "user", "=", "use...
Reports the details of the latest exceptions to Stackdriver Error Reporting. :type http_context: :class`google.cloud.error_reporting.HTTPContext` :param http_context: The HTTP request which was processed when the error was triggered. :type user: str :param user: The user who caused or was affected by the crash. This can be a user ID, an email address, or an arbitrary token that uniquely identifies the user. When sending an error report, leave this field empty if the user was not logged in. In this case the Error Reporting system will use other data, such as remote IP address, to distinguish affected users. Example:: >>> try: >>> raise NameError >>> except Exception: >>> client.report_exception()
[ "Reports", "the", "details", "of", "the", "latest", "exceptions", "to", "Stackdriver", "Error", "Reporting", "." ]
python
train
xhtml2pdf/xhtml2pdf
xhtml2pdf/util.py
https://github.com/xhtml2pdf/xhtml2pdf/blob/230357a392f48816532d3c2fa082a680b80ece48/xhtml2pdf/util.py#L529-L542
def getvalue(self): """ Get value of file. Work around for second strategy. Always returns bytes """ if self.strategy == 0: return self._delegate.getvalue() self._delegate.flush() self._delegate.seek(0) value = self._delegate.read() if not isinstance(value, six.binary_type): value = value.encode('utf-8') return value
[ "def", "getvalue", "(", "self", ")", ":", "if", "self", ".", "strategy", "==", "0", ":", "return", "self", ".", "_delegate", ".", "getvalue", "(", ")", "self", ".", "_delegate", ".", "flush", "(", ")", "self", ".", "_delegate", ".", "seek", "(", "0...
Get value of file. Work around for second strategy. Always returns bytes
[ "Get", "value", "of", "file", ".", "Work", "around", "for", "second", "strategy", ".", "Always", "returns", "bytes" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_address_table.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_address_table.py#L34-L54
def mac_address_table_static_forward(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") mac_address_table = ET.SubElement(config, "mac-address-table", xmlns="urn:brocade.com:mgmt:brocade-mac-address-table") static = ET.SubElement(mac_address_table, "static") mac_address_key = ET.SubElement(static, "mac-address") mac_address_key.text = kwargs.pop('mac_address') interface_type_key = ET.SubElement(static, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(static, "interface-name") interface_name_key.text = kwargs.pop('interface_name') vlan_key = ET.SubElement(static, "vlan") vlan_key.text = kwargs.pop('vlan') vlanid_key = ET.SubElement(static, "vlanid") vlanid_key.text = kwargs.pop('vlanid') forward = ET.SubElement(static, "forward") forward.text = kwargs.pop('forward') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "mac_address_table_static_forward", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "mac_address_table", "=", "ET", ".", "SubElement", "(", "config", ",", "\"mac-address-table\"", ",", "xmlns"...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
OpenKMIP/PyKMIP
kmip/core/primitives.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/core/primitives.py#L772-L784
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Write the encoding of the Boolean object to the output stream. Args: ostream (Stream): A buffer to contain the encoded bytes of a Boolean object. Usually a BytearrayStream object. Required. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. """ super(Boolean, self).write(ostream, kmip_version=kmip_version) self.write_value(ostream, kmip_version=kmip_version)
[ "def", "write", "(", "self", ",", "ostream", ",", "kmip_version", "=", "enums", ".", "KMIPVersion", ".", "KMIP_1_0", ")", ":", "super", "(", "Boolean", ",", "self", ")", ".", "write", "(", "ostream", ",", "kmip_version", "=", "kmip_version", ")", "self",...
Write the encoding of the Boolean object to the output stream. Args: ostream (Stream): A buffer to contain the encoded bytes of a Boolean object. Usually a BytearrayStream object. Required. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0.
[ "Write", "the", "encoding", "of", "the", "Boolean", "object", "to", "the", "output", "stream", "." ]
python
test
sighingnow/parsec.py
src/parsec/__init__.py
https://github.com/sighingnow/parsec.py/blob/ed50e1e259142757470b925f8d20dfe5ad223af0/src/parsec/__init__.py#L605-L613
def letter(): '''Parse a letter in alphabet.''' @Parser def letter_parser(text, index=0): if index < len(text) and text[index].isalpha(): return Value.success(index + 1, text[index]) else: return Value.failure(index, 'a letter') return letter_parser
[ "def", "letter", "(", ")", ":", "@", "Parser", "def", "letter_parser", "(", "text", ",", "index", "=", "0", ")", ":", "if", "index", "<", "len", "(", "text", ")", "and", "text", "[", "index", "]", ".", "isalpha", "(", ")", ":", "return", "Value",...
Parse a letter in alphabet.
[ "Parse", "a", "letter", "in", "alphabet", "." ]
python
train
PyThaiNLP/pythainlp
pythainlp/ulmfit/__init__.py
https://github.com/PyThaiNLP/pythainlp/blob/e9a300b8a99dfd1a67a955e7c06f62e4afe0fbca/pythainlp/ulmfit/__init__.py#L169-L200
def merge_wgts(em_sz, wgts, itos_pre, itos_new): """ :meth: `merge_wgts` insert pretrained weights and vocab into a new set of weights and vocab; use average if vocab not in pretrained vocab :param int em_sz: embedding size :param wgts: torch model weights :param list itos_pre: pretrained list of vocab :param list itos_new: list of new vocab :return: merged torch model weights """ vocab_size = len(itos_new) enc_wgts = wgts["0.encoder.weight"].numpy() # Average weight of encoding row_m = enc_wgts.mean(0) stoi_pre = collections.defaultdict( lambda: -1, {v: k for k, v in enumerate(itos_pre)} ) # New embedding based on classification dataset new_w = np.zeros((vocab_size, em_sz), dtype=np.float32) for i, w in enumerate(itos_new): r = stoi_pre[w] # Use pretrianed embedding if present; else use the average new_w[i] = enc_wgts[r] if r >= 0 else row_m wgts["0.encoder.weight"] = torch.tensor(new_w) wgts["0.encoder_dp.emb.weight"] = torch.tensor(np.copy(new_w)) wgts["1.decoder.weight"] = torch.tensor(np.copy(new_w)) return wgts
[ "def", "merge_wgts", "(", "em_sz", ",", "wgts", ",", "itos_pre", ",", "itos_new", ")", ":", "vocab_size", "=", "len", "(", "itos_new", ")", "enc_wgts", "=", "wgts", "[", "\"0.encoder.weight\"", "]", ".", "numpy", "(", ")", "# Average weight of encoding", "ro...
:meth: `merge_wgts` insert pretrained weights and vocab into a new set of weights and vocab; use average if vocab not in pretrained vocab :param int em_sz: embedding size :param wgts: torch model weights :param list itos_pre: pretrained list of vocab :param list itos_new: list of new vocab :return: merged torch model weights
[ ":", "meth", ":", "merge_wgts", "insert", "pretrained", "weights", "and", "vocab", "into", "a", "new", "set", "of", "weights", "and", "vocab", ";", "use", "average", "if", "vocab", "not", "in", "pretrained", "vocab", ":", "param", "int", "em_sz", ":", "e...
python
train
Xion/callee
callee/operators.py
https://github.com/Xion/callee/blob/58740f73ff9a76f5fe0075bf18d7345a0f9d961c/callee/operators.py#L104-L109
def _get_placeholder_repr(self): """Return the placeholder part of matcher's ``__repr__``.""" placeholder = '...' if self.TRANSFORM is not None: placeholder = '%s(%s)' % (self.TRANSFORM.__name__, placeholder) return placeholder
[ "def", "_get_placeholder_repr", "(", "self", ")", ":", "placeholder", "=", "'...'", "if", "self", ".", "TRANSFORM", "is", "not", "None", ":", "placeholder", "=", "'%s(%s)'", "%", "(", "self", ".", "TRANSFORM", ".", "__name__", ",", "placeholder", ")", "ret...
Return the placeholder part of matcher's ``__repr__``.
[ "Return", "the", "placeholder", "part", "of", "matcher", "s", "__repr__", "." ]
python
train
markovmodel/PyEMMA
pyemma/thermo/api.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/thermo/api.py#L283-L458
def estimate_multi_temperature( energy_trajs, temp_trajs, dtrajs, energy_unit='kcal/mol', temp_unit='K', reference_temperature=None, maxiter=10000, maxerr=1.0E-15, save_convergence_info=0, estimator='wham', lag=1, dt_traj='1 step', init=None, init_maxiter=10000, init_maxerr=1e-8, **kwargs): r""" This function acts as a wrapper for ``tram()``, ``dtram()``, ``mbar``, and ``wham()`` and handles the calculation of bias energies (``bias``) and thermodynamic state trajectories (``ttrajs``) when the data comes from multi-temperature simulations. Parameters ---------- energy_trajs : list of N arrays, each of shape (T_i,) List of arrays, each having T_i rows, one for each time step, containing the potential energies time series in units of kT, kcal/mol or kJ/mol. temp_trajs : list of N int arrays, each of shape (T_i,) List of arrays, each having T_i rows, one for each time step, containing the heat bath temperature time series (at which temperatures the frames were created) in units of K or C. Alternatively, these trajectories may contain kT values instead of temperatures. dtrajs : list of N int arrays, each of shape (T_i,) The integers are indexes in 0,...,n-1 enumerating the n discrete states or the bins the trajectory is in at any time. energy_unit: str, optional, default='kcal/mol' The physical unit used for energies. Current options: kcal/mol, kJ/mol, kT. temp_unit : str, optional, default='K' The physical unit used for the temperature. Current options: K, C, kT reference_temperature : float or None, optional, default=None Reference temperature against which the bias energies are computed. If not given, the lowest temperature or kT value is used. If given, this parameter must have the same unit as the temp_trajs. maxiter : int, optional, default=10000 The maximum number of self-consistent iterations before the estimator exits unsuccessfully. maxerr : float, optional, default=1E-15 Convergence criterion based on the maximal free energy change in a self-consistent iteration step. save_convergence_info : int, optional, default=0 Every save_convergence_info iteration steps, store the actual increment and the actual loglikelihood; 0 means no storage. estimator : str, optional, default='wham' Specify one of the available estimators | 'wham': use WHAM | 'mbar': use MBAR | 'dtram': use the discrete version of TRAM | 'tram': use TRAM lag : int or list of int, optional, default=1 Integer lag time at which transitions are counted. Providing a list of lag times will trigger one estimation per lag time. dt_traj : str, optional, default='1 step' Description of the physical time corresponding to the lag. May be used by analysis algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e. there is no physical time unit. Specify by a number, whitespace and unit. Permitted units are (* is an arbitrary string): | 'fs', 'femtosecond*' | 'ps', 'picosecond*' | 'ns', 'nanosecond*' | 'us', 'microsecond*' | 'ms', 'millisecond*' | 's', 'second*' init : str, optional, default=None Use a specific initialization for the self-consistent iteration: | None: use a hard-coded guess for free energies and Lagrangian multipliers | 'wham': perform a short WHAM estimate to initialize the free energies (only with dtram) | 'mbar': perform a short MBAR estimate to initialize the free energies (only with tram) init_maxiter : int, optional, default=10000 The maximum number of self-consistent iterations during the initialization. init_maxerr : float, optional, default=1.0E-8 Convergence criterion for the initialization. **kwargs : dict, optional You can use this to pass estimator-specific named parameters to the chosen estimator, which are not already coverd by ``estimate_multi_temperature()``. Returns ------- A :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` or :class:`MEMM <pyemma.thermo.models.memm.MEMM>` object or list thereof The requested estimator/model object, i.e., WHAM, MBAR, DTRAM or TRAM. If multiple lag times are given, a list of objects is returned (one MEMM per lag time). Example ------- We look at 1D simulations at two different kT values 1.0 and 2.0, already clustered data, and we use TRAM for the estimation: >>> from pyemma.thermo import estimate_multi_temperature as estimate_mt >>> import numpy as np >>> energy_trajs = [np.array([1.6, 1.4, 1.0, 1.0, 1.2, 1.0, 1.0]), np.array([0.8, 0.7, 0.5, 0.6, 0.7, 0.8, 0.7])] >>> temp_trajs = [np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]), np.array([2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0])] >>> dtrajs = [np.array([0, 1, 2, 2, 2, 2, 2]), np.array([0, 1, 2, 2, 1, 0, 1])] >>> tram = estimate_mt(energy_trajs, temp_trajs, dtrajs, energy_unit='kT', temp_unit='kT', estimator='tram', lag=1) >>> tram.f # doctest: +ELLIPSIS array([ 2.90..., 1.72..., 0.26...]) Note that alhough we only used one temperature per trajectory, ``estimate_multi_temperature()`` can handle temperature changes as well. See :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` or :class:`MEMM <pyemma.thermo.models.memm.MEMM>` for a full documentation. .. autoclass:: pyemma.thermo.models.multi_therm.MultiThermModel :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel :attributes: .. autoclass:: pyemma.thermo.models.memm.MEMM :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.thermo.models.memm.MEMM :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.thermo.models.memm.MEMM :attributes: """ if estimator not in ['wham', 'mbar', 'dtram', 'tram']: ValueError("unsupported estimator: %s" % estimator) from .util import get_multi_temperature_data as _get_multi_temperature_data ttrajs, btrajs, temperatures, unbiased_state = _get_multi_temperature_data( energy_trajs, temp_trajs, energy_unit, temp_unit, reference_temperature=reference_temperature) estimator_obj = None if estimator == 'wham': estimator_obj = wham( ttrajs, dtrajs, _get_averaged_bias_matrix(btrajs, dtrajs), maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info, dt_traj=dt_traj) elif estimator == 'mbar': allowed_keys = ['direct_space'] parsed_kwargs = dict([(i, kwargs[i]) for i in allowed_keys if i in kwargs]) estimator_obj = mbar( ttrajs, dtrajs, btrajs, maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info, dt_traj=dt_traj, **parsed_kwargs) elif estimator == 'dtram': allowed_keys = ['count_mode', 'connectivity'] parsed_kwargs = dict([(i, kwargs[i]) for i in allowed_keys if i in kwargs]) estimator_obj = dtram( ttrajs, dtrajs, _get_averaged_bias_matrix(btrajs, dtrajs), lag, unbiased_state=unbiased_state, maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info, dt_traj=dt_traj, init=init, init_maxiter=init_maxiter, init_maxerr=init_maxerr, **parsed_kwargs) elif estimator == 'tram': allowed_keys = [ 'count_mode', 'connectivity', 'connectivity_factor','nn', 'direct_space', 'N_dtram_accelerations', 'equilibrium', 'overcounting_factor', 'callback'] parsed_kwargs = dict([(i, kwargs[i]) for i in allowed_keys if i in kwargs]) estimator_obj = tram( ttrajs, dtrajs, btrajs, lag, unbiased_state=unbiased_state, maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info, dt_traj=dt_traj, init=init, init_maxiter=init_maxiter, init_maxerr=init_maxerr, **parsed_kwargs) try: estimator_obj.temperatures = temperatures except AttributeError: for obj in estimator_obj: obj.temperatures = temperatures return estimator_obj
[ "def", "estimate_multi_temperature", "(", "energy_trajs", ",", "temp_trajs", ",", "dtrajs", ",", "energy_unit", "=", "'kcal/mol'", ",", "temp_unit", "=", "'K'", ",", "reference_temperature", "=", "None", ",", "maxiter", "=", "10000", ",", "maxerr", "=", "1.0E-15...
r""" This function acts as a wrapper for ``tram()``, ``dtram()``, ``mbar``, and ``wham()`` and handles the calculation of bias energies (``bias``) and thermodynamic state trajectories (``ttrajs``) when the data comes from multi-temperature simulations. Parameters ---------- energy_trajs : list of N arrays, each of shape (T_i,) List of arrays, each having T_i rows, one for each time step, containing the potential energies time series in units of kT, kcal/mol or kJ/mol. temp_trajs : list of N int arrays, each of shape (T_i,) List of arrays, each having T_i rows, one for each time step, containing the heat bath temperature time series (at which temperatures the frames were created) in units of K or C. Alternatively, these trajectories may contain kT values instead of temperatures. dtrajs : list of N int arrays, each of shape (T_i,) The integers are indexes in 0,...,n-1 enumerating the n discrete states or the bins the trajectory is in at any time. energy_unit: str, optional, default='kcal/mol' The physical unit used for energies. Current options: kcal/mol, kJ/mol, kT. temp_unit : str, optional, default='K' The physical unit used for the temperature. Current options: K, C, kT reference_temperature : float or None, optional, default=None Reference temperature against which the bias energies are computed. If not given, the lowest temperature or kT value is used. If given, this parameter must have the same unit as the temp_trajs. maxiter : int, optional, default=10000 The maximum number of self-consistent iterations before the estimator exits unsuccessfully. maxerr : float, optional, default=1E-15 Convergence criterion based on the maximal free energy change in a self-consistent iteration step. save_convergence_info : int, optional, default=0 Every save_convergence_info iteration steps, store the actual increment and the actual loglikelihood; 0 means no storage. estimator : str, optional, default='wham' Specify one of the available estimators | 'wham': use WHAM | 'mbar': use MBAR | 'dtram': use the discrete version of TRAM | 'tram': use TRAM lag : int or list of int, optional, default=1 Integer lag time at which transitions are counted. Providing a list of lag times will trigger one estimation per lag time. dt_traj : str, optional, default='1 step' Description of the physical time corresponding to the lag. May be used by analysis algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e. there is no physical time unit. Specify by a number, whitespace and unit. Permitted units are (* is an arbitrary string): | 'fs', 'femtosecond*' | 'ps', 'picosecond*' | 'ns', 'nanosecond*' | 'us', 'microsecond*' | 'ms', 'millisecond*' | 's', 'second*' init : str, optional, default=None Use a specific initialization for the self-consistent iteration: | None: use a hard-coded guess for free energies and Lagrangian multipliers | 'wham': perform a short WHAM estimate to initialize the free energies (only with dtram) | 'mbar': perform a short MBAR estimate to initialize the free energies (only with tram) init_maxiter : int, optional, default=10000 The maximum number of self-consistent iterations during the initialization. init_maxerr : float, optional, default=1.0E-8 Convergence criterion for the initialization. **kwargs : dict, optional You can use this to pass estimator-specific named parameters to the chosen estimator, which are not already coverd by ``estimate_multi_temperature()``. Returns ------- A :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` or :class:`MEMM <pyemma.thermo.models.memm.MEMM>` object or list thereof The requested estimator/model object, i.e., WHAM, MBAR, DTRAM or TRAM. If multiple lag times are given, a list of objects is returned (one MEMM per lag time). Example ------- We look at 1D simulations at two different kT values 1.0 and 2.0, already clustered data, and we use TRAM for the estimation: >>> from pyemma.thermo import estimate_multi_temperature as estimate_mt >>> import numpy as np >>> energy_trajs = [np.array([1.6, 1.4, 1.0, 1.0, 1.2, 1.0, 1.0]), np.array([0.8, 0.7, 0.5, 0.6, 0.7, 0.8, 0.7])] >>> temp_trajs = [np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]), np.array([2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0])] >>> dtrajs = [np.array([0, 1, 2, 2, 2, 2, 2]), np.array([0, 1, 2, 2, 1, 0, 1])] >>> tram = estimate_mt(energy_trajs, temp_trajs, dtrajs, energy_unit='kT', temp_unit='kT', estimator='tram', lag=1) >>> tram.f # doctest: +ELLIPSIS array([ 2.90..., 1.72..., 0.26...]) Note that alhough we only used one temperature per trajectory, ``estimate_multi_temperature()`` can handle temperature changes as well. See :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` or :class:`MEMM <pyemma.thermo.models.memm.MEMM>` for a full documentation. .. autoclass:: pyemma.thermo.models.multi_therm.MultiThermModel :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel :attributes: .. autoclass:: pyemma.thermo.models.memm.MEMM :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.thermo.models.memm.MEMM :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.thermo.models.memm.MEMM :attributes:
[ "r", "This", "function", "acts", "as", "a", "wrapper", "for", "tram", "()", "dtram", "()", "mbar", "and", "wham", "()", "and", "handles", "the", "calculation", "of", "bias", "energies", "(", "bias", ")", "and", "thermodynamic", "state", "trajectories", "("...
python
train
jic-dtool/dtoolcore
dtoolcore/storagebroker.py
https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/storagebroker.py#L252-L256
def put_readme(self, content): """Store the readme descriptive metadata.""" logger.debug("Putting readme") key = self.get_readme_key() self.put_text(key, content)
[ "def", "put_readme", "(", "self", ",", "content", ")", ":", "logger", ".", "debug", "(", "\"Putting readme\"", ")", "key", "=", "self", ".", "get_readme_key", "(", ")", "self", ".", "put_text", "(", "key", ",", "content", ")" ]
Store the readme descriptive metadata.
[ "Store", "the", "readme", "descriptive", "metadata", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/mpls_config/router/mpls/mpls_cmds_holder/policy/traffic_engineering/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/mpls_config/router/mpls/mpls_cmds_holder/policy/traffic_engineering/__init__.py#L126-L147
def _set_traffic_eng_ospf(self, v, load=False): """ Setter method for traffic_eng_ospf, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/traffic_engineering/traffic_eng_ospf (container) If this variable is read-only (config: false) in the source YANG file, then _set_traffic_eng_ospf is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_traffic_eng_ospf() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=traffic_eng_ospf.traffic_eng_ospf, is_container='container', presence=False, yang_name="traffic-eng-ospf", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Advertise via OSPF', u'alt-name': u'ospf', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """traffic_eng_ospf must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=traffic_eng_ospf.traffic_eng_ospf, is_container='container', presence=False, yang_name="traffic-eng-ospf", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Advertise via OSPF', u'alt-name': u'ospf', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""", }) self.__traffic_eng_ospf = t if hasattr(self, '_set'): self._set()
[ "def", "_set_traffic_eng_ospf", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", ...
Setter method for traffic_eng_ospf, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/traffic_engineering/traffic_eng_ospf (container) If this variable is read-only (config: false) in the source YANG file, then _set_traffic_eng_ospf is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_traffic_eng_ospf() directly.
[ "Setter", "method", "for", "traffic_eng_ospf", "mapped", "from", "YANG", "variable", "/", "mpls_config", "/", "router", "/", "mpls", "/", "mpls_cmds_holder", "/", "policy", "/", "traffic_engineering", "/", "traffic_eng_ospf", "(", "container", ")", "If", "this", ...
python
train
horazont/aioxmpp
aioxmpp/security_layer.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/security_layer.py#L1142-L1204
def negotiate_sasl(transport, xmlstream, sasl_providers, negotiation_timeout, jid, features): """ Perform SASL authentication on the given :class:`.protocol.XMLStream` `stream`. `transport` must be the :class:`asyncio.Transport` over which the `stream` runs. It is used to detect whether TLS is used and may be required by some SASL mechanisms. `sasl_providers` must be an iterable of :class:`SASLProvider` objects. They will be tried in iteration order to authenticate against the server. If one of the `sasl_providers` fails with a :class:`aiosasl.AuthenticationFailure` exception, the other providers are still tried; only if all providers fail, the last :class:`aiosasl.AuthenticationFailure` exception is re-raised. If no mechanism was able to authenticate but not due to authentication failures (other failures include no matching mechanism on the server side), :class:`aiosasl.SASLUnavailable` is raised. Return the :class:`.nonza.StreamFeatures` obtained after resetting the stream after successful SASL authentication. .. versionadded:: 0.6 .. deprecated:: 0.10 The `negotiation_timeout` argument is ignored. The timeout is controlled using the :attr:`~.XMLStream.deadtime_hard_limit` timeout of the stream. The argument will be removed in version 1.0. To prepare for this, please pass `jid` and `features` as keyword arguments. """ if not transport.get_extra_info("sslcontext"): transport = None last_auth_error = None for sasl_provider in sasl_providers: try: result = yield from sasl_provider.execute( jid, features, xmlstream, transport) except ValueError as err: raise errors.StreamNegotiationFailure( "invalid credentials: {}".format(err) ) from err except aiosasl.AuthenticationFailure as err: last_auth_error = err continue if result: features = yield from protocol.reset_stream_and_get_features( xmlstream ) break else: if last_auth_error: raise last_auth_error else: raise errors.SASLUnavailable("No common mechanisms") return features
[ "def", "negotiate_sasl", "(", "transport", ",", "xmlstream", ",", "sasl_providers", ",", "negotiation_timeout", ",", "jid", ",", "features", ")", ":", "if", "not", "transport", ".", "get_extra_info", "(", "\"sslcontext\"", ")", ":", "transport", "=", "None", "...
Perform SASL authentication on the given :class:`.protocol.XMLStream` `stream`. `transport` must be the :class:`asyncio.Transport` over which the `stream` runs. It is used to detect whether TLS is used and may be required by some SASL mechanisms. `sasl_providers` must be an iterable of :class:`SASLProvider` objects. They will be tried in iteration order to authenticate against the server. If one of the `sasl_providers` fails with a :class:`aiosasl.AuthenticationFailure` exception, the other providers are still tried; only if all providers fail, the last :class:`aiosasl.AuthenticationFailure` exception is re-raised. If no mechanism was able to authenticate but not due to authentication failures (other failures include no matching mechanism on the server side), :class:`aiosasl.SASLUnavailable` is raised. Return the :class:`.nonza.StreamFeatures` obtained after resetting the stream after successful SASL authentication. .. versionadded:: 0.6 .. deprecated:: 0.10 The `negotiation_timeout` argument is ignored. The timeout is controlled using the :attr:`~.XMLStream.deadtime_hard_limit` timeout of the stream. The argument will be removed in version 1.0. To prepare for this, please pass `jid` and `features` as keyword arguments.
[ "Perform", "SASL", "authentication", "on", "the", "given", ":", "class", ":", ".", "protocol", ".", "XMLStream", "stream", ".", "transport", "must", "be", "the", ":", "class", ":", "asyncio", ".", "Transport", "over", "which", "the", "stream", "runs", ".",...
python
train
Azure/azure-cosmos-python
azure/cosmos/cosmos_client.py
https://github.com/Azure/azure-cosmos-python/blob/dd01b3c5d308c6da83cfcaa0ab7083351a476353/azure/cosmos/cosmos_client.py#L1709-L1734
def UpsertAttachment(self, document_link, attachment, options=None): """Upserts an attachment in a document. :param str document_link: The link to the document. :param dict attachment: The Azure Cosmos attachment to upsert. :param dict options: The request options for the request. :return: The upserted Attachment. :rtype: dict """ if options is None: options = {} document_id, path = self._GetItemIdWithPathForAttachment(attachment, document_link) return self.Upsert(attachment, path, 'attachments', document_id, None, options)
[ "def", "UpsertAttachment", "(", "self", ",", "document_link", ",", "attachment", ",", "options", "=", "None", ")", ":", "if", "options", "is", "None", ":", "options", "=", "{", "}", "document_id", ",", "path", "=", "self", ".", "_GetItemIdWithPathForAttachme...
Upserts an attachment in a document. :param str document_link: The link to the document. :param dict attachment: The Azure Cosmos attachment to upsert. :param dict options: The request options for the request. :return: The upserted Attachment. :rtype: dict
[ "Upserts", "an", "attachment", "in", "a", "document", "." ]
python
train
emc-openstack/storops
storops/unity/client.py
https://github.com/emc-openstack/storops/blob/24b4b13bf065c0ef0538dd0b5ebb8f25d24176bd/storops/unity/client.py#L63-L84
def get_all(self, type_name, base_fields=None, the_filter=None, nested_fields=None): """Get the resource by resource id. :param nested_fields: nested resource fields :param base_fields: fields of this resource :param the_filter: dictionary of filter like `{'name': 'abc'}` :param type_name: Resource type. For example, pool, lun, nasServer. :return: List of resource class objects """ fields = self.get_fields(type_name, base_fields, nested_fields) the_filter = self.dict_to_filter_string(the_filter) url = '/api/types/{}/instances'.format(type_name) resp = self.rest_get(url, fields=fields, filter=the_filter) ret = resp while resp.has_next_page: resp = self.rest_get(url, fields=fields, filter=the_filter, page=resp.next_page) ret.entries.extend(resp.entries) return ret
[ "def", "get_all", "(", "self", ",", "type_name", ",", "base_fields", "=", "None", ",", "the_filter", "=", "None", ",", "nested_fields", "=", "None", ")", ":", "fields", "=", "self", ".", "get_fields", "(", "type_name", ",", "base_fields", ",", "nested_fiel...
Get the resource by resource id. :param nested_fields: nested resource fields :param base_fields: fields of this resource :param the_filter: dictionary of filter like `{'name': 'abc'}` :param type_name: Resource type. For example, pool, lun, nasServer. :return: List of resource class objects
[ "Get", "the", "resource", "by", "resource", "id", "." ]
python
train
coleifer/walrus
walrus/containers.py
https://github.com/coleifer/walrus/blob/82bf15a6613487b5b5fefeb488f186d7e0106547/walrus/containers.py#L1432-L1443
def set_id(self, id='$'): """ Set the last-read message id for each stream in the consumer group. By default, this will be the special "$" identifier, meaning all messages are marked as having been read. :param id: id of last-read message (or "$"). """ accum = {} for key in self.keys: accum[key] = self.database.xgroup_setid(key, self.name, id) return accum
[ "def", "set_id", "(", "self", ",", "id", "=", "'$'", ")", ":", "accum", "=", "{", "}", "for", "key", "in", "self", ".", "keys", ":", "accum", "[", "key", "]", "=", "self", ".", "database", ".", "xgroup_setid", "(", "key", ",", "self", ".", "nam...
Set the last-read message id for each stream in the consumer group. By default, this will be the special "$" identifier, meaning all messages are marked as having been read. :param id: id of last-read message (or "$").
[ "Set", "the", "last", "-", "read", "message", "id", "for", "each", "stream", "in", "the", "consumer", "group", ".", "By", "default", "this", "will", "be", "the", "special", "$", "identifier", "meaning", "all", "messages", "are", "marked", "as", "having", ...
python
train
Jammy2211/PyAutoLens
autolens/data/array/grids.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/grids.py#L460-L469
def array_2d_from_array_1d(self, array_1d): """ Map a 1D array the same dimension as the grid to its original masked 2D array. Parameters ----------- array_1d : ndarray The 1D array which is mapped to its masked 2D array. """ return mapping_util.map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two( array_1d=array_1d, shape=self.mask.shape, one_to_two=self.mask.masked_grid_index_to_pixel)
[ "def", "array_2d_from_array_1d", "(", "self", ",", "array_1d", ")", ":", "return", "mapping_util", ".", "map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two", "(", "array_1d", "=", "array_1d", ",", "shape", "=", "self", ".", "mask", ".", "shape", ",", ...
Map a 1D array the same dimension as the grid to its original masked 2D array. Parameters ----------- array_1d : ndarray The 1D array which is mapped to its masked 2D array.
[ "Map", "a", "1D", "array", "the", "same", "dimension", "as", "the", "grid", "to", "its", "original", "masked", "2D", "array", "." ]
python
valid
MillionIntegrals/vel
vel/rl/buffers/backend/circular_vec_buffer_backend.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/buffers/backend/circular_vec_buffer_backend.py#L7-L13
def take_along_axis(large_array, indexes): """ Take along axis """ # Reshape indexes into the right shape if len(large_array.shape) > len(indexes.shape): indexes = indexes.reshape(indexes.shape + tuple([1] * (len(large_array.shape) - len(indexes.shape)))) return np.take_along_axis(large_array, indexes, axis=0)
[ "def", "take_along_axis", "(", "large_array", ",", "indexes", ")", ":", "# Reshape indexes into the right shape", "if", "len", "(", "large_array", ".", "shape", ")", ">", "len", "(", "indexes", ".", "shape", ")", ":", "indexes", "=", "indexes", ".", "reshape",...
Take along axis
[ "Take", "along", "axis" ]
python
train
yoavaviram/python-amazon-simple-product-api
amazon/api.py
https://github.com/yoavaviram/python-amazon-simple-product-api/blob/f1cb0e209145fcfac9444e4c733dd19deb59d31a/amazon/api.py#L358-L372
def cart_clear(self, CartId=None, HMAC=None, **kwargs): """CartClear. Removes all items from cart :param CartId: Id of cart :param HMAC: HMAC of cart. Do not use url encoded :return: An :class:`~.AmazonCart`. """ if not CartId or not HMAC: raise CartException('CartId required for CartClear call') response = self.api.CartClear(CartId=CartId, HMAC=HMAC, **kwargs) root = objectify.fromstring(response) new_cart = AmazonCart(root) self._check_for_cart_error(new_cart) return new_cart
[ "def", "cart_clear", "(", "self", ",", "CartId", "=", "None", ",", "HMAC", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "CartId", "or", "not", "HMAC", ":", "raise", "CartException", "(", "'CartId required for CartClear call'", ")", "respons...
CartClear. Removes all items from cart :param CartId: Id of cart :param HMAC: HMAC of cart. Do not use url encoded :return: An :class:`~.AmazonCart`.
[ "CartClear", ".", "Removes", "all", "items", "from", "cart", ":", "param", "CartId", ":", "Id", "of", "cart", ":", "param", "HMAC", ":", "HMAC", "of", "cart", ".", "Do", "not", "use", "url", "encoded", ":", "return", ":", "An", ":", "class", ":", "...
python
train
cloudtools/stacker
stacker/blueprints/raw.py
https://github.com/cloudtools/stacker/blob/ad6013a03a560c46ba3c63c4d153336273e6da5d/stacker/blueprints/raw.py#L194-L215
def rendered(self): """Return (generating first if needed) rendered template.""" if not self._rendered: template_path = get_template_path(self.raw_template_path) if template_path: with open(template_path, 'r') as template: if len(os.path.splitext(template_path)) == 2 and ( os.path.splitext(template_path)[1] == '.j2'): self._rendered = Template(template.read()).render( context=self.context, mappings=self.mappings, name=self.name, variables=self.resolved_variables ) else: self._rendered = template.read() else: raise InvalidConfig( 'Could not find template %s' % self.raw_template_path ) return self._rendered
[ "def", "rendered", "(", "self", ")", ":", "if", "not", "self", ".", "_rendered", ":", "template_path", "=", "get_template_path", "(", "self", ".", "raw_template_path", ")", "if", "template_path", ":", "with", "open", "(", "template_path", ",", "'r'", ")", ...
Return (generating first if needed) rendered template.
[ "Return", "(", "generating", "first", "if", "needed", ")", "rendered", "template", "." ]
python
train
cstockton/py-gensend
gensend/providers/common.py
https://github.com/cstockton/py-gensend/blob/8c8e911f8e8c386bea42967350beb4636fc19240/gensend/providers/common.py#L102-L114
def istrue(self, *args): """Strict test for 'true' value test. If multiple args are provided it will test them all. ISTRUE:true %{ISTRUE:true} -> 'True' """ def is_true(val): if val is True: return True val = str(val).lower().strip() return val in ('true', 'yes', '1') return all(self._arg_factory(is_true, args))
[ "def", "istrue", "(", "self", ",", "*", "args", ")", ":", "def", "is_true", "(", "val", ")", ":", "if", "val", "is", "True", ":", "return", "True", "val", "=", "str", "(", "val", ")", ".", "lower", "(", ")", ".", "strip", "(", ")", "return", ...
Strict test for 'true' value test. If multiple args are provided it will test them all. ISTRUE:true %{ISTRUE:true} -> 'True'
[ "Strict", "test", "for", "true", "value", "test", ".", "If", "multiple", "args", "are", "provided", "it", "will", "test", "them", "all", ".", "ISTRUE", ":", "true" ]
python
train
tBuLi/symfit
symfit/contrib/interactive_guess/interactive_guess.py
https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/contrib/interactive_guess/interactive_guess.py#L211-L219
def _eval_model(self): """ Convenience method for evaluating the model with the current parameters :return: named tuple with results """ arguments = self._x_grid.copy() arguments.update({param: param.value for param in self.model.params}) return self.model(**key2str(arguments))
[ "def", "_eval_model", "(", "self", ")", ":", "arguments", "=", "self", ".", "_x_grid", ".", "copy", "(", ")", "arguments", ".", "update", "(", "{", "param", ":", "param", ".", "value", "for", "param", "in", "self", ".", "model", ".", "params", "}", ...
Convenience method for evaluating the model with the current parameters :return: named tuple with results
[ "Convenience", "method", "for", "evaluating", "the", "model", "with", "the", "current", "parameters" ]
python
train
Zitrax/nose-dep
nosedep.py
https://github.com/Zitrax/nose-dep/blob/fd29c95e0e5eb2dbd821f6566b72dfcf42631226/nosedep.py#L268-L279
def prepare_suite(self, suite): """Prepare suite and determine test ordering""" all_tests = {} for s in suite: m = re.match(r'(\w+)\s+\(.+\)', str(s)) if m: name = m.group(1) else: name = str(s).split('.')[-1] all_tests[name] = s return self.orderTests(all_tests, suite)
[ "def", "prepare_suite", "(", "self", ",", "suite", ")", ":", "all_tests", "=", "{", "}", "for", "s", "in", "suite", ":", "m", "=", "re", ".", "match", "(", "r'(\\w+)\\s+\\(.+\\)'", ",", "str", "(", "s", ")", ")", "if", "m", ":", "name", "=", "m",...
Prepare suite and determine test ordering
[ "Prepare", "suite", "and", "determine", "test", "ordering" ]
python
train
quintusdias/glymur
glymur/jp2box.py
https://github.com/quintusdias/glymur/blob/8b8fb091130fff00f1028dc82219e69e3f9baf6d/glymur/jp2box.py#L116-L123
def _str_superbox(self): """__str__ method for all superboxes.""" msg = Jp2kBox.__str__(self) for box in self.box: boxstr = str(box) # Indent the child boxes to make the association clear. msg += '\n' + self._indent(boxstr) return msg
[ "def", "_str_superbox", "(", "self", ")", ":", "msg", "=", "Jp2kBox", ".", "__str__", "(", "self", ")", "for", "box", "in", "self", ".", "box", ":", "boxstr", "=", "str", "(", "box", ")", "# Indent the child boxes to make the association clear.", "msg", "+="...
__str__ method for all superboxes.
[ "__str__", "method", "for", "all", "superboxes", "." ]
python
train
jmgilman/Neolib
neolib/pyamf/alias.py
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/alias.py#L353-L418
def getEncodableAttributes(self, obj, codec=None): """ Must return a C{dict} of attributes to be encoded, even if its empty. @param codec: An optional argument that will contain the encoder instance calling this function. @since: 0.5 """ if not self._compiled: self.compile() if self.is_dict: return dict(obj) if self.shortcut_encode and self.dynamic: return obj.__dict__.copy() attrs = {} if self.static_attrs: for attr in self.static_attrs: attrs[attr] = getattr(obj, attr, pyamf.Undefined) if not self.dynamic: if self.non_static_encodable_properties: for attr in self.non_static_encodable_properties: attrs[attr] = getattr(obj, attr) return attrs dynamic_props = util.get_properties(obj) if not self.shortcut_encode: dynamic_props = set(dynamic_props) if self.encodable_properties: dynamic_props.update(self.encodable_properties) if self.static_attrs: dynamic_props.difference_update(self.static_attrs) if self.exclude_attrs: dynamic_props.difference_update(self.exclude_attrs) for attr in dynamic_props: attrs[attr] = getattr(obj, attr) if self.proxy_attrs is not None and attrs and codec: context = codec.context for k, v in attrs.copy().iteritems(): if k in self.proxy_attrs: attrs[k] = context.getProxyForObject(v) if self.synonym_attrs: missing = object() for k, v in self.synonym_attrs.iteritems(): value = attrs.pop(k, missing) if value is missing: continue attrs[v] = value return attrs
[ "def", "getEncodableAttributes", "(", "self", ",", "obj", ",", "codec", "=", "None", ")", ":", "if", "not", "self", ".", "_compiled", ":", "self", ".", "compile", "(", ")", "if", "self", ".", "is_dict", ":", "return", "dict", "(", "obj", ")", "if", ...
Must return a C{dict} of attributes to be encoded, even if its empty. @param codec: An optional argument that will contain the encoder instance calling this function. @since: 0.5
[ "Must", "return", "a", "C", "{", "dict", "}", "of", "attributes", "to", "be", "encoded", "even", "if", "its", "empty", "." ]
python
train
moonso/vcftoolbox
vcftoolbox/add_variant_information.py
https://github.com/moonso/vcftoolbox/blob/438fb1d85a83812c389774b94802eb5921c89e3a/vcftoolbox/add_variant_information.py#L140-L185
def add_vcf_info(keyword, variant_line=None, variant_dict=None, annotation=None): """ Add information to the info field of a vcf variant line. Arguments: variant_line (str): A vcf formatted variant line keyword (str): The info field key annotation (str): If the annotation is a key, value pair this is the string that represents the value Returns: fixed_variant : str if variant line, or dict if variant_dict """ logger = logging.getLogger(__name__) if annotation: new_info = '{0}={1}'.format(keyword, annotation) else: new_info = keyword logger.debug("Adding new variant information {0}".format(new_info)) fixed_variant = None if variant_line: logger.debug("Adding information to a variant line") splitted_variant = variant_line.rstrip('\n').split('\t') logger.debug("Adding information to splitted variant line") old_info = splitted_variant[7] if old_info == '.': splitted_variant[7] = new_info else: splitted_variant[7] = "{0};{1}".format(splitted_variant[7], new_info) fixed_variant = '\t'.join(splitted_variant) elif variant_dict: logger.debug("Adding information to a variant dict") old_info = variant_dict['INFO'] if old_info == '.': variant_dict['INFO'] = new_info else: variant_dict['INFO'] = "{0};{1}".format(old_info, new_info) fixed_variant = variant_dict return fixed_variant
[ "def", "add_vcf_info", "(", "keyword", ",", "variant_line", "=", "None", ",", "variant_dict", "=", "None", ",", "annotation", "=", "None", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "if", "annotation", ":", "new_info", "=",...
Add information to the info field of a vcf variant line. Arguments: variant_line (str): A vcf formatted variant line keyword (str): The info field key annotation (str): If the annotation is a key, value pair this is the string that represents the value Returns: fixed_variant : str if variant line, or dict if variant_dict
[ "Add", "information", "to", "the", "info", "field", "of", "a", "vcf", "variant", "line", ".", "Arguments", ":", "variant_line", "(", "str", ")", ":", "A", "vcf", "formatted", "variant", "line", "keyword", "(", "str", ")", ":", "The", "info", "field", "...
python
train
kennethreitz/maya
maya/core.py
https://github.com/kennethreitz/maya/blob/774b141d91a83a5d77cb5351db3d02bf50564b21/maya/core.py#L710-L739
def when(string, timezone='UTC', prefer_dates_from='current_period'): """"Returns a MayaDT instance for the human moment specified. Powered by dateparser. Useful for scraping websites. Examples: 'next week', 'now', 'tomorrow', '300 years ago', 'August 14, 2015' Keyword Arguments: string -- string to be parsed timezone -- timezone referenced from (default: 'UTC') prefer_dates_from -- what dates are prefered when `string` is ambigous. options are 'past', 'future', and 'current_period' (default: 'current_period'). see: [1] Reference: [1] dateparser.readthedocs.io/en/latest/usage.html#handling-incomplete-dates """ settings = { 'TIMEZONE': timezone, 'RETURN_AS_TIMEZONE_AWARE': True, 'TO_TIMEZONE': 'UTC', 'PREFER_DATES_FROM': prefer_dates_from, } dt = dateparser.parse(string, settings=settings) if dt is None: raise ValueError('invalid datetime input specified.') return MayaDT.from_datetime(dt)
[ "def", "when", "(", "string", ",", "timezone", "=", "'UTC'", ",", "prefer_dates_from", "=", "'current_period'", ")", ":", "settings", "=", "{", "'TIMEZONE'", ":", "timezone", ",", "'RETURN_AS_TIMEZONE_AWARE'", ":", "True", ",", "'TO_TIMEZONE'", ":", "'UTC'", "...
Returns a MayaDT instance for the human moment specified. Powered by dateparser. Useful for scraping websites. Examples: 'next week', 'now', 'tomorrow', '300 years ago', 'August 14, 2015' Keyword Arguments: string -- string to be parsed timezone -- timezone referenced from (default: 'UTC') prefer_dates_from -- what dates are prefered when `string` is ambigous. options are 'past', 'future', and 'current_period' (default: 'current_period'). see: [1] Reference: [1] dateparser.readthedocs.io/en/latest/usage.html#handling-incomplete-dates
[ "Returns", "a", "MayaDT", "instance", "for", "the", "human", "moment", "specified", "." ]
python
train
olitheolix/qtmacs
qtmacs/extensions/qtmacsscintilla_macros.py
https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/extensions/qtmacsscintilla_macros.py#L1463-L1500
def highlightNextMatch(self): """ Select and highlight the next match in the set of matches. """ # If this method was called on an empty input field (ie. # if the user hit <ctrl>+s again) then pick the default # selection. if self.qteText.toPlainText() == '': self.qteText.setText(self.defaultChoice) return # If the mathIdx variable is out of bounds (eg. the last possible # match is already selected) then wrap it around. if self.selMatchIdx < 0: self.selMatchIdx = 0 return if self.selMatchIdx >= len(self.matchList): self.selMatchIdx = 0 return # Shorthand. SCI = self.qteWidget # Undo the highlighting of the previously selected match. start, stop = self.matchList[self.selMatchIdx - 1] line, col = SCI.lineIndexFromPosition(start) SCI.SendScintilla(SCI.SCI_STARTSTYLING, start, 0xFF) SCI.SendScintilla(SCI.SCI_SETSTYLING, stop - start, 30) # Highlight the next match. start, stop = self.matchList[self.selMatchIdx] SCI.SendScintilla(SCI.SCI_STARTSTYLING, start, 0xFF) SCI.SendScintilla(SCI.SCI_SETSTYLING, stop - start, 31) # Place the cursor at the start of the currently selected match. line, col = SCI.lineIndexFromPosition(start) SCI.setCursorPosition(line, col) self.selMatchIdx += 1
[ "def", "highlightNextMatch", "(", "self", ")", ":", "# If this method was called on an empty input field (ie.", "# if the user hit <ctrl>+s again) then pick the default", "# selection.", "if", "self", ".", "qteText", ".", "toPlainText", "(", ")", "==", "''", ":", "self", "....
Select and highlight the next match in the set of matches.
[ "Select", "and", "highlight", "the", "next", "match", "in", "the", "set", "of", "matches", "." ]
python
train
Kronuz/pyScss
scss/util.py
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/util.py#L108-L122
def make_filename_hash(key): """Convert the given key (a simple Python object) to a unique-ish hash suitable for a filename. """ key_repr = repr(key).replace(BASE_DIR, '').encode('utf8') # This is really stupid but necessary for making the repr()s be the same on # Python 2 and 3 and thus allowing the test suite to run on both. # TODO better solutions include: not using a repr, not embedding hashes in # the expected test results if sys.platform == 'win32': # this is to make sure the hash is the same on win and unix platforms key_repr = key_repr.replace(b'\\\\', b'/') key_repr = re.sub(b"\\bu'", b"'", key_repr) key_hash = hashlib.md5(key_repr).digest() return base64.b64encode(key_hash, b'__').decode('ascii').rstrip('=')
[ "def", "make_filename_hash", "(", "key", ")", ":", "key_repr", "=", "repr", "(", "key", ")", ".", "replace", "(", "BASE_DIR", ",", "''", ")", ".", "encode", "(", "'utf8'", ")", "# This is really stupid but necessary for making the repr()s be the same on", "# Python ...
Convert the given key (a simple Python object) to a unique-ish hash suitable for a filename.
[ "Convert", "the", "given", "key", "(", "a", "simple", "Python", "object", ")", "to", "a", "unique", "-", "ish", "hash", "suitable", "for", "a", "filename", "." ]
python
train
gccxml/pygccxml
pygccxml/declarations/namespace.py
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/declarations/namespace.py#L94-L104
def remove_declaration(self, decl): """ Removes declaration from members list. :param decl: declaration to be removed :type decl: :class:`declaration_t` """ del self.declarations[self.declarations.index(decl)] decl.cache.reset()
[ "def", "remove_declaration", "(", "self", ",", "decl", ")", ":", "del", "self", ".", "declarations", "[", "self", ".", "declarations", ".", "index", "(", "decl", ")", "]", "decl", ".", "cache", ".", "reset", "(", ")" ]
Removes declaration from members list. :param decl: declaration to be removed :type decl: :class:`declaration_t`
[ "Removes", "declaration", "from", "members", "list", "." ]
python
train
chrisrink10/basilisp
src/basilisp/lang/util.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/util.py#L56-L67
def demunge(s: str) -> str: """Replace munged string components with their original representation.""" def demunge_replacer(match: Match) -> str: full_match = match.group(0) replacement = _DEMUNGE_REPLACEMENTS.get(full_match, None) if replacement: return replacement return full_match return re.sub(_DEMUNGE_PATTERN, demunge_replacer, s).replace("_", "-")
[ "def", "demunge", "(", "s", ":", "str", ")", "->", "str", ":", "def", "demunge_replacer", "(", "match", ":", "Match", ")", "->", "str", ":", "full_match", "=", "match", ".", "group", "(", "0", ")", "replacement", "=", "_DEMUNGE_REPLACEMENTS", ".", "get...
Replace munged string components with their original representation.
[ "Replace", "munged", "string", "components", "with", "their", "original", "representation", "." ]
python
test
wavefrontHQ/python-client
wavefront_api_client/api/saved_search_api.py
https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/saved_search_api.py#L226-L248
def get_all_entity_type_saved_searches(self, entitytype, **kwargs): # noqa: E501 """Get all saved searches for a specific entity type for a user # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_entity_type_saved_searches(entitytype, async_req=True) >>> result = thread.get() :param async_req bool :param str entitytype: (required) :param int offset: :param int limit: :return: ResponseContainerPagedSavedSearch If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_all_entity_type_saved_searches_with_http_info(entitytype, **kwargs) # noqa: E501 else: (data) = self.get_all_entity_type_saved_searches_with_http_info(entitytype, **kwargs) # noqa: E501 return data
[ "def", "get_all_entity_type_saved_searches", "(", "self", ",", "entitytype", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "se...
Get all saved searches for a specific entity type for a user # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_entity_type_saved_searches(entitytype, async_req=True) >>> result = thread.get() :param async_req bool :param str entitytype: (required) :param int offset: :param int limit: :return: ResponseContainerPagedSavedSearch If the method is called asynchronously, returns the request thread.
[ "Get", "all", "saved", "searches", "for", "a", "specific", "entity", "type", "for", "a", "user", "#", "noqa", ":", "E501" ]
python
train
secdev/scapy
scapy/arch/windows/__init__.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/arch/windows/__init__.py#L283-L287
def get_ip_from_name(ifname, v6=False): """Backward compatibility: indirectly calls get_ips Deprecated.""" iface = IFACES.dev_from_name(ifname) return get_ips(v6=v6).get(iface, [""])[0]
[ "def", "get_ip_from_name", "(", "ifname", ",", "v6", "=", "False", ")", ":", "iface", "=", "IFACES", ".", "dev_from_name", "(", "ifname", ")", "return", "get_ips", "(", "v6", "=", "v6", ")", ".", "get", "(", "iface", ",", "[", "\"\"", "]", ")", "["...
Backward compatibility: indirectly calls get_ips Deprecated.
[ "Backward", "compatibility", ":", "indirectly", "calls", "get_ips", "Deprecated", "." ]
python
train
aliyun/aliyun-odps-python-sdk
odps/lib/tzlocal/unix.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/lib/tzlocal/unix.py#L39-L146
def _get_localzone(_root='/'): """Tries to find the local timezone configuration. This method prefers finding the timezone name and passing that to pytz, over passing in the localtime file, as in the later case the zoneinfo name is unknown. The parameter _root makes the function look for files like /etc/localtime beneath the _root directory. This is primarily used by the tests. In normal usage you call the function without parameters.""" tzenv = _try_tz_from_env() if tzenv: return tzenv # Now look for distribution specific configuration files # that contain the timezone name. for configfile in ('etc/timezone', 'var/db/zoneinfo'): tzpath = os.path.join(_root, configfile) try: with open(tzpath, 'rb') as tzfile: data = tzfile.read() # Issue #3 was that /etc/timezone was a zoneinfo file. # That's a misconfiguration, but we need to handle it gracefully: if data[:5] == b'TZif2': continue etctz = data.strip().decode() if not etctz: # Empty file, skip continue for etctz in data.decode().splitlines(): # Get rid of host definitions and comments: if ' ' in etctz: etctz, dummy = etctz.split(' ', 1) if '#' in etctz: etctz, dummy = etctz.split('#', 1) if not etctz: continue return pytz.timezone(etctz.replace(' ', '_')) except IOError: # File doesn't exist or is a directory continue # CentOS has a ZONE setting in /etc/sysconfig/clock, # OpenSUSE has a TIMEZONE setting in /etc/sysconfig/clock and # Gentoo has a TIMEZONE setting in /etc/conf.d/clock # We look through these files for a timezone: zone_re = re.compile(r'\s*ZONE\s*=\s*\"') timezone_re = re.compile(r'\s*TIMEZONE\s*=\s*\"') end_re = re.compile('\"') for filename in ('etc/sysconfig/clock', 'etc/conf.d/clock'): tzpath = os.path.join(_root, filename) try: with open(tzpath, 'rt') as tzfile: data = tzfile.readlines() for line in data: # Look for the ZONE= setting. match = zone_re.match(line) if match is None: # No ZONE= setting. Look for the TIMEZONE= setting. match = timezone_re.match(line) if match is not None: # Some setting existed line = line[match.end():] etctz = line[:end_re.search(line).start()] # We found a timezone return pytz.timezone(etctz.replace(' ', '_')) except IOError: # File doesn't exist or is a directory continue # systemd distributions use symlinks that include the zone name, # see manpage of localtime(5) and timedatectl(1) tzpath = os.path.join(_root, 'etc/localtime') if os.path.exists(tzpath) and os.path.islink(tzpath): tzpath = os.path.realpath(tzpath) start = tzpath.find("/")+1 while start is not 0: tzpath = tzpath[start:] try: return pytz.timezone(tzpath) except pytz.UnknownTimeZoneError: pass start = tzpath.find("/")+1 # Are we under Termux on Android? It's not officially supported, because # there is no reasonable way to run tests for this, but let's make an effort. if os.path.exists('/system/bin/getprop'): import subprocess androidtz = subprocess.check_output(['getprop', 'persist.sys.timezone']) return pytz.timezone(androidtz.strip().decode()) # No explicit setting existed. Use localtime for filename in ('etc/localtime', 'usr/local/etc/localtime'): tzpath = os.path.join(_root, filename) if not os.path.exists(tzpath): continue with open(tzpath, 'rb') as tzfile: return pytz.tzfile.build_tzinfo('local', tzfile) raise pytz.UnknownTimeZoneError('Can not find any timezone configuration')
[ "def", "_get_localzone", "(", "_root", "=", "'/'", ")", ":", "tzenv", "=", "_try_tz_from_env", "(", ")", "if", "tzenv", ":", "return", "tzenv", "# Now look for distribution specific configuration files", "# that contain the timezone name.", "for", "configfile", "in", "(...
Tries to find the local timezone configuration. This method prefers finding the timezone name and passing that to pytz, over passing in the localtime file, as in the later case the zoneinfo name is unknown. The parameter _root makes the function look for files like /etc/localtime beneath the _root directory. This is primarily used by the tests. In normal usage you call the function without parameters.
[ "Tries", "to", "find", "the", "local", "timezone", "configuration", "." ]
python
train
tchx84/grestful
grestful/helpers.py
https://github.com/tchx84/grestful/blob/5f7ee7eb358cf260c97d41f8680e8f168ef5d843/grestful/helpers.py#L19-L26
def param_upload(field, path): """ Pack upload metadata. """ if not path: return None param = {} param['field'] = field param['path'] = path return param
[ "def", "param_upload", "(", "field", ",", "path", ")", ":", "if", "not", "path", ":", "return", "None", "param", "=", "{", "}", "param", "[", "'field'", "]", "=", "field", "param", "[", "'path'", "]", "=", "path", "return", "param" ]
Pack upload metadata.
[ "Pack", "upload", "metadata", "." ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/cisco_dfa_rest.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/cisco_dfa_rest.py#L431-L437
def config_profile_list(self): """Return config profile list from DCNM.""" these_profiles = self._config_profile_list() or [] profile_list = [q for p in these_profiles for q in [p.get('profileName')]] return profile_list
[ "def", "config_profile_list", "(", "self", ")", ":", "these_profiles", "=", "self", ".", "_config_profile_list", "(", ")", "or", "[", "]", "profile_list", "=", "[", "q", "for", "p", "in", "these_profiles", "for", "q", "in", "[", "p", ".", "get", "(", "...
Return config profile list from DCNM.
[ "Return", "config", "profile", "list", "from", "DCNM", "." ]
python
train
ace0/pyrelic
pyrelic/common.py
https://github.com/ace0/pyrelic/blob/f23d4e6586674675f72304d5938548267d6413bf/pyrelic/common.py#L14-L21
def assertSameType(a, b): """ Raises an exception if @b is not an instance of type(@a) """ if not isinstance(b, type(a)): raise NotImplementedError("This operation is only supported for " \ "elements of the same type. Instead found {} and {}". format(type(a), type(b)))
[ "def", "assertSameType", "(", "a", ",", "b", ")", ":", "if", "not", "isinstance", "(", "b", ",", "type", "(", "a", ")", ")", ":", "raise", "NotImplementedError", "(", "\"This operation is only supported for \"", "\"elements of the same type. Instead found {} and {}\""...
Raises an exception if @b is not an instance of type(@a)
[ "Raises", "an", "exception", "if" ]
python
train
tanghaibao/jcvi
jcvi/formats/bed.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/bed.py#L580-L602
def density(args): """ %prog density bedfile ref.fasta Calculates density of features per seqid. """ p = OptionParser(density.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) bedfile, fastafile = args bed = Bed(bedfile) sizes = Sizes(fastafile).mapping header = "seqid features size density_per_Mb".split() print("\t".join(header)) for seqid, bb in bed.sub_beds(): nfeats = len(bb) size = sizes[seqid] ds = nfeats * 1e6 / size print("\t".join(str(x) for x in \ (seqid, nfeats, size, "{0:.1f}".format(ds))))
[ "def", "density", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "density", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", ...
%prog density bedfile ref.fasta Calculates density of features per seqid.
[ "%prog", "density", "bedfile", "ref", ".", "fasta" ]
python
train
ChrisCummins/labm8
fs.py
https://github.com/ChrisCummins/labm8/blob/dd10d67a757aefb180cb508f86696f99440c94f5/fs.py#L422-L428
def mkopen(p, *args, **kwargs): """ A wrapper for the open() builtin which makes parent directories if needed. """ dir = os.path.dirname(p) mkdir(dir) return open(p, *args, **kwargs)
[ "def", "mkopen", "(", "p", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "dir", "=", "os", ".", "path", ".", "dirname", "(", "p", ")", "mkdir", "(", "dir", ")", "return", "open", "(", "p", ",", "*", "args", ",", "*", "*", "kwargs", "...
A wrapper for the open() builtin which makes parent directories if needed.
[ "A", "wrapper", "for", "the", "open", "()", "builtin", "which", "makes", "parent", "directories", "if", "needed", "." ]
python
train