repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Spinmob/spinmob
egg/_gui.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/egg/_gui.py#L2866-L2876
def _synchronize_controls(self): """ Updates the gui based on button configs. """ # whether the script is visible self.grid_script._widget.setVisible(self.button_script.get_value()) # whether we should be able to edit it. if not self.combo_autoscript.get_index()==0: self.script.disable() else: self.script.enable()
[ "def", "_synchronize_controls", "(", "self", ")", ":", "# whether the script is visible", "self", ".", "grid_script", ".", "_widget", ".", "setVisible", "(", "self", ".", "button_script", ".", "get_value", "(", ")", ")", "# whether we should be able to edit it.", "if"...
Updates the gui based on button configs.
[ "Updates", "the", "gui", "based", "on", "button", "configs", "." ]
python
train
phfaist/pylatexenc
pylatexenc/latexwalker.py
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latexwalker.py#L1296-L1309
def get_latex_braced_group(s, pos, brace_type='{', **parse_flags): """ Reads a latex expression enclosed in braces {...}. The first token of `s[pos:]` must be an opening brace. Returns a tuple `(node, pos, len)`. `pos` is the first char of the expression (which has to be an opening brace), and `len` is its length, including the closing brace. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_braced_group()` instead. """ return LatexWalker(s, **parse_flags).get_latex_braced_group(pos=pos, brace_type=brace_type)
[ "def", "get_latex_braced_group", "(", "s", ",", "pos", ",", "brace_type", "=", "'{'", ",", "*", "*", "parse_flags", ")", ":", "return", "LatexWalker", "(", "s", ",", "*", "*", "parse_flags", ")", ".", "get_latex_braced_group", "(", "pos", "=", "pos", ","...
Reads a latex expression enclosed in braces {...}. The first token of `s[pos:]` must be an opening brace. Returns a tuple `(node, pos, len)`. `pos` is the first char of the expression (which has to be an opening brace), and `len` is its length, including the closing brace. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_braced_group()` instead.
[ "Reads", "a", "latex", "expression", "enclosed", "in", "braces", "{", "...", "}", ".", "The", "first", "token", "of", "s", "[", "pos", ":", "]", "must", "be", "an", "opening", "brace", "." ]
python
test
mwouts/jupytext
jupytext/cell_to_text.py
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/cell_to_text.py#L66-L72
def is_code(self): """Is this cell a code cell?""" if self.cell_type == 'code': return True if self.cell_type == 'raw' and 'active' in self.metadata: return True return False
[ "def", "is_code", "(", "self", ")", ":", "if", "self", ".", "cell_type", "==", "'code'", ":", "return", "True", "if", "self", ".", "cell_type", "==", "'raw'", "and", "'active'", "in", "self", ".", "metadata", ":", "return", "True", "return", "False" ]
Is this cell a code cell?
[ "Is", "this", "cell", "a", "code", "cell?" ]
python
train
utapyngo/django-render-partial
django_render_partial/templatetags/render_partial.py
https://github.com/utapyngo/django-render-partial/blob/37256e32a365adefddd88a58a809db3743790120/django_render_partial/templatetags/render_partial.py#L49-L80
def render_partial(parser, token): """ Inserts the output of a view, using fully qualified view name, or view name from urls.py. {% render_partial view_name arg[ arg2] k=v [k2=v2...] %} IMPORTANT: the calling template must receive a context variable called 'request' containing the original HttpRequest. This means you should be OK with permissions and other session state. (Note that every argument will be evaluated against context except for the names of any keyword arguments.) """ args = [] kwargs = {} tokens = token.split_contents() if len(tokens) < 2: raise TemplateSyntaxError( '%r tag requires one or more arguments' % token.contents.split()[0] ) tokens.pop(0) # tag name view_name = tokens.pop(0) for token in tokens: equals = token.find('=') if equals == -1: args.append(token) else: kwargs[str(token[:equals])] = token[equals+1:] return ViewNode(view_name, args, kwargs)
[ "def", "render_partial", "(", "parser", ",", "token", ")", ":", "args", "=", "[", "]", "kwargs", "=", "{", "}", "tokens", "=", "token", ".", "split_contents", "(", ")", "if", "len", "(", "tokens", ")", "<", "2", ":", "raise", "TemplateSyntaxError", "...
Inserts the output of a view, using fully qualified view name, or view name from urls.py. {% render_partial view_name arg[ arg2] k=v [k2=v2...] %} IMPORTANT: the calling template must receive a context variable called 'request' containing the original HttpRequest. This means you should be OK with permissions and other session state. (Note that every argument will be evaluated against context except for the names of any keyword arguments.)
[ "Inserts", "the", "output", "of", "a", "view", "using", "fully", "qualified", "view", "name", "or", "view", "name", "from", "urls", ".", "py", "." ]
python
train
networks-lab/metaknowledge
metaknowledge/mkCollection.py
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/mkCollection.py#L420-L438
def containsID(self, idVal): """Checks if the collected items contains the give _idVal_ # Parameters _idVal_ : `str` > The queried id string # Returns `bool` > `True` if the item is in the collection """ for i in self: if i.id == idVal: return True return False
[ "def", "containsID", "(", "self", ",", "idVal", ")", ":", "for", "i", "in", "self", ":", "if", "i", ".", "id", "==", "idVal", ":", "return", "True", "return", "False" ]
Checks if the collected items contains the give _idVal_ # Parameters _idVal_ : `str` > The queried id string # Returns `bool` > `True` if the item is in the collection
[ "Checks", "if", "the", "collected", "items", "contains", "the", "give", "_idVal_" ]
python
train
saltstack/salt
salt/proxy/cimc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/cimc.py#L278-L291
def grains(): ''' Get the grains from the proxied device ''' if not DETAILS.get('grains_cache', {}): DETAILS['grains_cache'] = GRAINS_CACHE try: compute_rack = get_config_resolver_class('computeRackUnit', False) DETAILS['grains_cache'] = compute_rack['outConfigs']['computeRackUnit'] except salt.exceptions.CommandExecutionError: pass except Exception as err: log.error(err) return DETAILS['grains_cache']
[ "def", "grains", "(", ")", ":", "if", "not", "DETAILS", ".", "get", "(", "'grains_cache'", ",", "{", "}", ")", ":", "DETAILS", "[", "'grains_cache'", "]", "=", "GRAINS_CACHE", "try", ":", "compute_rack", "=", "get_config_resolver_class", "(", "'computeRackUn...
Get the grains from the proxied device
[ "Get", "the", "grains", "from", "the", "proxied", "device" ]
python
train
mlperf/training
object_detection/pytorch/maskrcnn_benchmark/modeling/utils.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/object_detection/pytorch/maskrcnn_benchmark/modeling/utils.py#L9-L16
def cat(tensors, dim=0): """ Efficient version of torch.cat that avoids a copy if there is only a single element in a list """ assert isinstance(tensors, (list, tuple)) if len(tensors) == 1: return tensors[0] return torch.cat(tensors, dim)
[ "def", "cat", "(", "tensors", ",", "dim", "=", "0", ")", ":", "assert", "isinstance", "(", "tensors", ",", "(", "list", ",", "tuple", ")", ")", "if", "len", "(", "tensors", ")", "==", "1", ":", "return", "tensors", "[", "0", "]", "return", "torch...
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
[ "Efficient", "version", "of", "torch", ".", "cat", "that", "avoids", "a", "copy", "if", "there", "is", "only", "a", "single", "element", "in", "a", "list" ]
python
train
aholkner/bacon
native/Vendor/FreeType/src/tools/docmaker/content.py
https://github.com/aholkner/bacon/blob/edf3810dcb211942d392a8637945871399b0650d/native/Vendor/FreeType/src/tools/docmaker/content.py#L389-L418
def process_content( self, content ): """process a block content and return a list of DocMarkup objects corresponding to it""" markup = None markup_lines = [] first = 1 for line in content: found = None for t in re_markup_tags: m = t.match( line ) if m: found = string.lower( m.group( 1 ) ) prefix = len( m.group( 0 ) ) line = " " * prefix + line[prefix:] # remove markup from line break # is it the start of a new markup section ? if found: first = 0 self.add_markup() # add current markup content self.markup = found if len( string.strip( line ) ) > 0: self.markup_lines.append( line ) elif first == 0: self.markup_lines.append( line ) self.add_markup() return self.markups
[ "def", "process_content", "(", "self", ",", "content", ")", ":", "markup", "=", "None", "markup_lines", "=", "[", "]", "first", "=", "1", "for", "line", "in", "content", ":", "found", "=", "None", "for", "t", "in", "re_markup_tags", ":", "m", "=", "t...
process a block content and return a list of DocMarkup objects corresponding to it
[ "process", "a", "block", "content", "and", "return", "a", "list", "of", "DocMarkup", "objects", "corresponding", "to", "it" ]
python
test
shinux/PyTime
pytime/pytime.py
https://github.com/shinux/PyTime/blob/f2b9f877507e2a1dddf5dd255fdff243a5dbed48/pytime/pytime.py#L306-L318
def thanks(year=None): """ 4rd Thursday in Nov :param year: int :return: Thanksgiving Day """ nov_first = datetime.date(_year, 11, 1) if not year else datetime.date(int(year), 11, 1) weekday_seq = nov_first.weekday() if weekday_seq > 3: current_day = 32 - weekday_seq else: current_day = 25 - weekday_seq return datetime.date(nov_first.year, 11, current_day)
[ "def", "thanks", "(", "year", "=", "None", ")", ":", "nov_first", "=", "datetime", ".", "date", "(", "_year", ",", "11", ",", "1", ")", "if", "not", "year", "else", "datetime", ".", "date", "(", "int", "(", "year", ")", ",", "11", ",", "1", ")"...
4rd Thursday in Nov :param year: int :return: Thanksgiving Day
[ "4rd", "Thursday", "in", "Nov", ":", "param", "year", ":", "int", ":", "return", ":", "Thanksgiving", "Day" ]
python
train
datastax/python-driver
cassandra/cqlengine/models.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cqlengine/models.py#L711-L745
def save(self): """ Saves an object to the database. .. code-block:: python #create a person instance person = Person(first_name='Kimberly', last_name='Eggleston') #saves it to Cassandra person.save() """ # handle polymorphic models if self._is_polymorphic: if self._is_polymorphic_base: raise PolymorphicModelException('cannot save polymorphic base model') else: setattr(self, self._discriminator_column_name, self.__discriminator_value__) self.validate() self.__dmlquery__(self.__class__, self, batch=self._batch, ttl=self._ttl, timestamp=self._timestamp, consistency=self.__consistency__, if_not_exists=self._if_not_exists, conditional=self._conditional, timeout=self._timeout, if_exists=self._if_exists).save() self._set_persisted() self._timestamp = None return self
[ "def", "save", "(", "self", ")", ":", "# handle polymorphic models", "if", "self", ".", "_is_polymorphic", ":", "if", "self", ".", "_is_polymorphic_base", ":", "raise", "PolymorphicModelException", "(", "'cannot save polymorphic base model'", ")", "else", ":", "setatt...
Saves an object to the database. .. code-block:: python #create a person instance person = Person(first_name='Kimberly', last_name='Eggleston') #saves it to Cassandra person.save()
[ "Saves", "an", "object", "to", "the", "database", "." ]
python
train
senaite/senaite.core
bika/lims/subscribers/auditlog.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/subscribers/auditlog.py#L72-L85
def ObjectInitializedEventHandler(obj, event): """Object has been created """ # only snapshot supported objects if not supports_snapshots(obj): return # object has already snapshots if has_snapshots(obj): return # take a new snapshot take_snapshot(obj, action="create")
[ "def", "ObjectInitializedEventHandler", "(", "obj", ",", "event", ")", ":", "# only snapshot supported objects", "if", "not", "supports_snapshots", "(", "obj", ")", ":", "return", "# object has already snapshots", "if", "has_snapshots", "(", "obj", ")", ":", "return",...
Object has been created
[ "Object", "has", "been", "created" ]
python
train
HewlettPackard/python-hpOneView
hpOneView/image_streamer/resources/deployment_plans.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/image_streamer/resources/deployment_plans.py#L131-L142
def get_osdp(self, id_or_uri): """ Retrieves facts about Server Profiles and Server Profile Templates that are using Deployment Plan based on the ID or URI provided. Args: id_or_uri: ID or URI of the Deployment Plan. Returns: dict: Server Profiles and Server Profile Templates """ uri = self._client.build_subresource_uri(resource_id_or_uri=id_or_uri, subresource_path="osdp") return self._client.get(uri)
[ "def", "get_osdp", "(", "self", ",", "id_or_uri", ")", ":", "uri", "=", "self", ".", "_client", ".", "build_subresource_uri", "(", "resource_id_or_uri", "=", "id_or_uri", ",", "subresource_path", "=", "\"osdp\"", ")", "return", "self", ".", "_client", ".", "...
Retrieves facts about Server Profiles and Server Profile Templates that are using Deployment Plan based on the ID or URI provided. Args: id_or_uri: ID or URI of the Deployment Plan. Returns: dict: Server Profiles and Server Profile Templates
[ "Retrieves", "facts", "about", "Server", "Profiles", "and", "Server", "Profile", "Templates", "that", "are", "using", "Deployment", "Plan", "based", "on", "the", "ID", "or", "URI", "provided", "." ]
python
train
nuagenetworks/bambou
bambou/utils/nuremote_attribute.py
https://github.com/nuagenetworks/bambou/blob/d334fea23e384d3df8e552fe1849ad707941c666/bambou/utils/nuremote_attribute.py#L115-L141
def get_default_value(self): """ Get a default value of the attribute_type """ if self.choices: return self.choices[0] value = self.attribute_type() if self.attribute_type is time: value = int(value) elif self.attribute_type is str: value = "A" if self.min_length: if self.attribute_type is str: value = value.ljust(self.min_length, 'a') elif self.attribute_type is int: value = self.min_length elif self.max_length: if self.attribute_type is str: value = value.ljust(self.max_length, 'a') elif self.attribute_type is int: value = self.max_length return value
[ "def", "get_default_value", "(", "self", ")", ":", "if", "self", ".", "choices", ":", "return", "self", ".", "choices", "[", "0", "]", "value", "=", "self", ".", "attribute_type", "(", ")", "if", "self", ".", "attribute_type", "is", "time", ":", "value...
Get a default value of the attribute_type
[ "Get", "a", "default", "value", "of", "the", "attribute_type" ]
python
train
chrlie/frogsay
src/frogsay/__init__.py
https://github.com/chrlie/frogsay/blob/1c21e1401dc24719732218af830d34b842ab10b9/src/frogsay/__init__.py#L17-L30
def cli(): """\ Frogsay generates an ASCII picture of a FROG spouting a FROG tip. FROG tips are fetched from frog.tips's API endpoint when needed, otherwise they are cached locally in an application-specific folder. """ with open_client(cache_dir=get_cache_dir()) as client: tip = client.frog_tip() terminal_width = click.termui.get_terminal_size()[0] wisdom = make_frog_fresco(tip, width=terminal_width) click.echo(wisdom)
[ "def", "cli", "(", ")", ":", "with", "open_client", "(", "cache_dir", "=", "get_cache_dir", "(", ")", ")", "as", "client", ":", "tip", "=", "client", ".", "frog_tip", "(", ")", "terminal_width", "=", "click", ".", "termui", ".", "get_terminal_size", "(",...
\ Frogsay generates an ASCII picture of a FROG spouting a FROG tip. FROG tips are fetched from frog.tips's API endpoint when needed, otherwise they are cached locally in an application-specific folder.
[ "\\", "Frogsay", "generates", "an", "ASCII", "picture", "of", "a", "FROG", "spouting", "a", "FROG", "tip", "." ]
python
train
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L97-L107
def same_sentence(c): """Return True if all Mentions in the given candidate are from the same Sentence. :param c: The candidate whose Mentions are being compared :rtype: boolean """ return all( _to_span(c[i]).sentence is not None and _to_span(c[i]).sentence == _to_span(c[0]).sentence for i in range(len(c)) )
[ "def", "same_sentence", "(", "c", ")", ":", "return", "all", "(", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", "is", "not", "None", "and", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", "==", "_to_span", "(", "c", "[", ...
Return True if all Mentions in the given candidate are from the same Sentence. :param c: The candidate whose Mentions are being compared :rtype: boolean
[ "Return", "True", "if", "all", "Mentions", "in", "the", "given", "candidate", "are", "from", "the", "same", "Sentence", "." ]
python
train
tommyod/streprogen
streprogen/utils.py
https://github.com/tommyod/streprogen/blob/21b903618e8b2d398bceb394d18d7c74ca984def/streprogen/utils.py#L276-L306
def spread(iterable): """Returns the maximal spread of a sorted list of numbers. Parameters ---------- iterable A list of numbers. Returns ------- max_diff The maximal difference when the iterable is sorted. Examples ------- >>> spread([1, 11, 13, 15]) 10 >>> spread([1, 15, 11, 13]) 10 """ if len(iterable) == 1: return 0 iterable = iterable.copy() iterable.sort() max_diff = max(abs(i - j) for (i, j) in zip(iterable[1:], iterable[:-1])) return max_diff
[ "def", "spread", "(", "iterable", ")", ":", "if", "len", "(", "iterable", ")", "==", "1", ":", "return", "0", "iterable", "=", "iterable", ".", "copy", "(", ")", "iterable", ".", "sort", "(", ")", "max_diff", "=", "max", "(", "abs", "(", "i", "-"...
Returns the maximal spread of a sorted list of numbers. Parameters ---------- iterable A list of numbers. Returns ------- max_diff The maximal difference when the iterable is sorted. Examples ------- >>> spread([1, 11, 13, 15]) 10 >>> spread([1, 15, 11, 13]) 10
[ "Returns", "the", "maximal", "spread", "of", "a", "sorted", "list", "of", "numbers", "." ]
python
train
GoogleCloudPlatform/cloud-debug-python
src/googleclouddebugger/uniquifier_computer.py
https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/uniquifier_computer.py#L37-L117
def ComputeApplicationUniquifier(hash_obj): """Computes hash of application files. Application files can be anywhere on the disk. The application is free to import a Python module from an arbitrary path ok the disk. It is also impossible to distinguish application files from third party libraries. Third party libraries are typically installed with "pip" and there is not a good way to guarantee that all instances of the application are going to have exactly the same version of each package. There is also a huge amount of files in all sys.path directories and it will take too much time to traverse them all. We therefore make an assumption that application files are only located in sys.path[0]. When traversing files in sys.path, we can expect both .py and .pyc files. For source deployment, we will find both .py and .pyc files. In this case we will only index .py files and ignored .pyc file. In case of binary deployment, only .pyc file will be there. The naive way to hash files would be to read the file content and compute some sort of a hash (e.g. SHA1). This can be expensive as well, so instead we just hash file name and file size. It is a good enough heuristics to identify modified files across different deployments. Args: hash_obj: hash aggregator to update with application uniquifier. """ def ProcessDirectory(path, relative_path, depth=1): """Recursively computes application uniquifier for a particular directory. Args: path: absolute path of the directory to start. relative_path: path relative to sys.path[0] depth: current recursion depth. """ if depth > _MAX_DEPTH: return try: names = os.listdir(path) except BaseException: return # Sort file names to ensure consistent hash regardless of order returned # by os.listdir. This will also put .py files before .pyc and .pyo files. modules = set() for name in sorted(names): current_path = os.path.join(path, name) if not os.path.isdir(current_path): file_name, ext = os.path.splitext(name) if ext not in ('.py', '.pyc', '.pyo'): continue # This is not an application file. if file_name in modules: continue # This is a .pyc file and we already indexed .py file. modules.add(file_name) ProcessApplicationFile(current_path, os.path.join(relative_path, name)) elif IsPackage(current_path): ProcessDirectory(current_path, os.path.join(relative_path, name), depth + 1) def IsPackage(path): """Checks if the specified directory is a valid Python package.""" init_base_path = os.path.join(path, '__init__.py') return (os.path.isfile(init_base_path) or os.path.isfile(init_base_path + 'c') or os.path.isfile(init_base_path + 'o')) def ProcessApplicationFile(path, relative_path): """Updates the hash with the specified application file.""" hash_obj.update(relative_path.encode()) hash_obj.update(':'.encode()) try: hash_obj.update(str(os.stat(path).st_size).encode()) except BaseException: pass hash_obj.update('\n'.encode()) ProcessDirectory(sys.path[0], '')
[ "def", "ComputeApplicationUniquifier", "(", "hash_obj", ")", ":", "def", "ProcessDirectory", "(", "path", ",", "relative_path", ",", "depth", "=", "1", ")", ":", "\"\"\"Recursively computes application uniquifier for a particular directory.\n\n Args:\n path: absolute path...
Computes hash of application files. Application files can be anywhere on the disk. The application is free to import a Python module from an arbitrary path ok the disk. It is also impossible to distinguish application files from third party libraries. Third party libraries are typically installed with "pip" and there is not a good way to guarantee that all instances of the application are going to have exactly the same version of each package. There is also a huge amount of files in all sys.path directories and it will take too much time to traverse them all. We therefore make an assumption that application files are only located in sys.path[0]. When traversing files in sys.path, we can expect both .py and .pyc files. For source deployment, we will find both .py and .pyc files. In this case we will only index .py files and ignored .pyc file. In case of binary deployment, only .pyc file will be there. The naive way to hash files would be to read the file content and compute some sort of a hash (e.g. SHA1). This can be expensive as well, so instead we just hash file name and file size. It is a good enough heuristics to identify modified files across different deployments. Args: hash_obj: hash aggregator to update with application uniquifier.
[ "Computes", "hash", "of", "application", "files", "." ]
python
train
tamasgal/km3pipe
km3pipe/math.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L445-L474
def qeuler(yaw, pitch, roll): """Convert Euler angle to quaternion. Parameters ---------- yaw: number pitch: number roll: number Returns ------- np.array """ yaw = np.radians(yaw) pitch = np.radians(pitch) roll = np.radians(roll) cy = np.cos(yaw * 0.5) sy = np.sin(yaw * 0.5) cr = np.cos(roll * 0.5) sr = np.sin(roll * 0.5) cp = np.cos(pitch * 0.5) sp = np.sin(pitch * 0.5) q = np.array(( cy * cr * cp + sy * sr * sp, cy * sr * cp - sy * cr * sp, cy * cr * sp + sy * sr * cp, sy * cr * cp - cy * sr * sp )) return q
[ "def", "qeuler", "(", "yaw", ",", "pitch", ",", "roll", ")", ":", "yaw", "=", "np", ".", "radians", "(", "yaw", ")", "pitch", "=", "np", ".", "radians", "(", "pitch", ")", "roll", "=", "np", ".", "radians", "(", "roll", ")", "cy", "=", "np", ...
Convert Euler angle to quaternion. Parameters ---------- yaw: number pitch: number roll: number Returns ------- np.array
[ "Convert", "Euler", "angle", "to", "quaternion", "." ]
python
train
DeepHorizons/iarm
iarm/arm_instructions/data_movement.py
https://github.com/DeepHorizons/iarm/blob/b913c9fd577b793a6bbced78b78a5d8d7cd88de4/iarm/arm_instructions/data_movement.py#L27-L57
def MOVS(self, params): """ MOVS Ra, Rb MOVS Ra, #imm8 Move the value of Rb or imm8 into Ra Ra and Rb must be low registers """ Ra, Rb = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params) if self.is_immediate(Rb): self.check_arguments(low_registers=[Ra], imm8=[Rb]) def MOVS_func(): self.register[Ra] = self.convert_to_integer(Rb[1:]) # Set N and Z status flags self.set_NZ_flags(self.register[Ra]) return MOVS_func elif self.is_register(Rb): self.check_arguments(low_registers=(Ra, Rb)) def MOVS_func(): self.register[Ra] = self.register[Rb] self.set_NZ_flags(self.register[Ra]) return MOVS_func else: raise iarm.exceptions.ParsingError("Unknown parameter: {}".format(Rb))
[ "def", "MOVS", "(", "self", ",", "params", ")", ":", "Ra", ",", "Rb", "=", "self", ".", "get_two_parameters", "(", "self", ".", "TWO_PARAMETER_COMMA_SEPARATED", ",", "params", ")", "if", "self", ".", "is_immediate", "(", "Rb", ")", ":", "self", ".", "c...
MOVS Ra, Rb MOVS Ra, #imm8 Move the value of Rb or imm8 into Ra Ra and Rb must be low registers
[ "MOVS", "Ra", "Rb", "MOVS", "Ra", "#imm8" ]
python
train
sergiocorreia/panflute
panflute/tools.py
https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/tools.py#L518-L587
def get_option(options=None, local_tag=None, doc=None, doc_tag=None, default=None, error_on_none=True): """ fetch an option variable, from either a local (element) level option/attribute tag, document level metadata tag, or a default :type options: ``dict`` :type local_tag: ``str`` :type doc: :class:`Doc` :type doc_tag: ``str`` :type default: ``any`` :type error_on_none: ``bool`` The order of preference is local > document > default, although if a local or document tag returns None, then the next level down is used. Also, if error_on_none=True and the final variable is None, then a ValueError will be raised In this manner you can set global variables, which can be optionally overriden at a local level. For example, to apply different styles to docx text main.md: ------------------ style-div: name: MyStyle ------------------ :::style some text ::: ::: {.style name=MyOtherStyle} some more text ::: style_filter.py: import panflute as pf def action(elem, doc): if type(elem) == pf.Div: style = pf.get_option(elem.attributes, "name", doc, "style-div.name") elem.attributes["custom-style"] = style def main(doc=None): return run_filter(action, doc=doc) if __name__ == "__main__": main() """ variable = None # element level if options is not None and local_tag is not None: if local_tag in options and options[local_tag] is not None: variable = options[local_tag] if variable is not None: return variable # doc level if doc is not None and doc_tag is not None: variable = doc.get_metadata(doc_tag, None) if variable is not None: return variable # default level variable = default if variable is None and error_on_none: raise ValueError("could not retrieve a value for tag; local={0}, doc={1}".format(local_tag, doc_tag)) return variable
[ "def", "get_option", "(", "options", "=", "None", ",", "local_tag", "=", "None", ",", "doc", "=", "None", ",", "doc_tag", "=", "None", ",", "default", "=", "None", ",", "error_on_none", "=", "True", ")", ":", "variable", "=", "None", "# element level", ...
fetch an option variable, from either a local (element) level option/attribute tag, document level metadata tag, or a default :type options: ``dict`` :type local_tag: ``str`` :type doc: :class:`Doc` :type doc_tag: ``str`` :type default: ``any`` :type error_on_none: ``bool`` The order of preference is local > document > default, although if a local or document tag returns None, then the next level down is used. Also, if error_on_none=True and the final variable is None, then a ValueError will be raised In this manner you can set global variables, which can be optionally overriden at a local level. For example, to apply different styles to docx text main.md: ------------------ style-div: name: MyStyle ------------------ :::style some text ::: ::: {.style name=MyOtherStyle} some more text ::: style_filter.py: import panflute as pf def action(elem, doc): if type(elem) == pf.Div: style = pf.get_option(elem.attributes, "name", doc, "style-div.name") elem.attributes["custom-style"] = style def main(doc=None): return run_filter(action, doc=doc) if __name__ == "__main__": main()
[ "fetch", "an", "option", "variable", "from", "either", "a", "local", "(", "element", ")", "level", "option", "/", "attribute", "tag", "document", "level", "metadata", "tag", "or", "a", "default" ]
python
train
IdentityPython/pysaml2
src/saml2/client_base.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/client_base.py#L197-L216
def add_vo_information_about_user(self, name_id): """ Add information to the knowledge I have about the user. This is for Virtual organizations. :param name_id: The subject identifier :return: A possibly extended knowledge. """ ava = {} try: (ava, _) = self.users.get_identity(name_id) except KeyError: pass # is this a Virtual Organization situation if self.vorg: if self.vorg.do_aggregation(name_id): # Get the extended identity ava = self.users.get_identity(name_id)[0] return ava
[ "def", "add_vo_information_about_user", "(", "self", ",", "name_id", ")", ":", "ava", "=", "{", "}", "try", ":", "(", "ava", ",", "_", ")", "=", "self", ".", "users", ".", "get_identity", "(", "name_id", ")", "except", "KeyError", ":", "pass", "# is th...
Add information to the knowledge I have about the user. This is for Virtual organizations. :param name_id: The subject identifier :return: A possibly extended knowledge.
[ "Add", "information", "to", "the", "knowledge", "I", "have", "about", "the", "user", ".", "This", "is", "for", "Virtual", "organizations", "." ]
python
train
CiscoTestAutomation/yang
ncdiff/src/yang/ncdiff/model.py
https://github.com/CiscoTestAutomation/yang/blob/c70ec5ac5a91f276c4060009203770ece92e76b4/ncdiff/src/yang/ncdiff/model.py#L604-L627
def build_dependencies(self): '''build_dependencies High-level api: Briefly compile all yang files and find out dependency infomation of all modules. Returns ------- None Nothing returns. ''' cmd_list = ['pyang', '--plugindir', self.pyang_plugins] cmd_list += ['-p', self.dir_yang] cmd_list += ['-f', 'pyimport'] cmd_list += [self.dir_yang + '/*.yang'] logger.info('Building dependencies: {}'.format(' '.join(cmd_list))) p = Popen(' '.join(cmd_list), shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() logger.info('pyang return code is {}'.format(p.returncode)) logger.debug(stderr.decode()) parser = etree.XMLParser(remove_blank_text=True) self.dependencies = etree.XML(stdout.decode(), parser)
[ "def", "build_dependencies", "(", "self", ")", ":", "cmd_list", "=", "[", "'pyang'", ",", "'--plugindir'", ",", "self", ".", "pyang_plugins", "]", "cmd_list", "+=", "[", "'-p'", ",", "self", ".", "dir_yang", "]", "cmd_list", "+=", "[", "'-f'", ",", "'pyi...
build_dependencies High-level api: Briefly compile all yang files and find out dependency infomation of all modules. Returns ------- None Nothing returns.
[ "build_dependencies" ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_HIL.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_HIL.py#L115-L122
def check_apm_out(self): '''check if we should send new data to the APM''' now = time.time() if now - self.last_apm_send_time < 0.02: return self.last_apm_send_time = now if self.hil_state_msg is not None: self.master.mav.send(self.hil_state_msg)
[ "def", "check_apm_out", "(", "self", ")", ":", "now", "=", "time", ".", "time", "(", ")", "if", "now", "-", "self", ".", "last_apm_send_time", "<", "0.02", ":", "return", "self", ".", "last_apm_send_time", "=", "now", "if", "self", ".", "hil_state_msg", ...
check if we should send new data to the APM
[ "check", "if", "we", "should", "send", "new", "data", "to", "the", "APM" ]
python
train
jobovy/galpy
galpy/snapshot/directnbody.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/snapshot/directnbody.py#L58-L71
def _direct_nbody_step(q,p,m,t,dt,pot,softening,softening_args): """One N-body step: drift-kick-drift""" #drift q12= [symplecticode.leapfrog_leapq(q[ii],p[ii],dt/2.) \ for ii in range(len(q))] #kick force= _direct_nbody_force(q12,m,t+dt/2.,pot,softening,softening_args) #print(force) p= [symplecticode.leapfrog_leapp(p[ii],dt,force[ii]) \ for ii in range(len(p))] #drift q= [symplecticode.leapfrog_leapq(q12[ii],p[ii],dt/2.) \ for ii in range(len(q12))] return (q,p)
[ "def", "_direct_nbody_step", "(", "q", ",", "p", ",", "m", ",", "t", ",", "dt", ",", "pot", ",", "softening", ",", "softening_args", ")", ":", "#drift", "q12", "=", "[", "symplecticode", ".", "leapfrog_leapq", "(", "q", "[", "ii", "]", ",", "p", "[...
One N-body step: drift-kick-drift
[ "One", "N", "-", "body", "step", ":", "drift", "-", "kick", "-", "drift" ]
python
train
wbond/oscrypto
oscrypto/_osx/_core_foundation_ctypes.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_osx/_core_foundation_ctypes.py#L316-L331
def native(cls, value): """ Converts a CF* object into its python equivalent :param value: The CF* object to convert :return: The native python object """ type_id = CoreFoundation.CFGetTypeID(value) if type_id in cls._native_map: return cls._native_map[type_id](value) else: return value
[ "def", "native", "(", "cls", ",", "value", ")", ":", "type_id", "=", "CoreFoundation", ".", "CFGetTypeID", "(", "value", ")", "if", "type_id", "in", "cls", ".", "_native_map", ":", "return", "cls", ".", "_native_map", "[", "type_id", "]", "(", "value", ...
Converts a CF* object into its python equivalent :param value: The CF* object to convert :return: The native python object
[ "Converts", "a", "CF", "*", "object", "into", "its", "python", "equivalent" ]
python
valid
intel-analytics/BigDL
pyspark/bigdl/nn/layer.py
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/nn/layer.py#L263-L284
def backward(self, input, grad_output): """ NB: It's for debug only, please use optimizer.optimize() in production. Performs a back-propagation step through the module, with respect to the given input. In general this method makes the assumption forward(input) has been called before, with the same input. This is necessary for optimization reasons. If you do not respect this rule, backward() will compute incorrect gradients. :param input: ndarray or list of ndarray or JTensor or list of JTensor. :param grad_output: ndarray or list of ndarray or JTensor or list of JTensor. :return: ndarray or list of ndarray """ jinput, input_is_table = self.check_input(input) jgrad_output, grad_output_is_table = self.check_input(grad_output) output = callBigDlFunc(self.bigdl_type, "modelBackward", self.value, jinput, input_is_table, jgrad_output, grad_output_is_table) return self.convert_output(output)
[ "def", "backward", "(", "self", ",", "input", ",", "grad_output", ")", ":", "jinput", ",", "input_is_table", "=", "self", ".", "check_input", "(", "input", ")", "jgrad_output", ",", "grad_output_is_table", "=", "self", ".", "check_input", "(", "grad_output", ...
NB: It's for debug only, please use optimizer.optimize() in production. Performs a back-propagation step through the module, with respect to the given input. In general this method makes the assumption forward(input) has been called before, with the same input. This is necessary for optimization reasons. If you do not respect this rule, backward() will compute incorrect gradients. :param input: ndarray or list of ndarray or JTensor or list of JTensor. :param grad_output: ndarray or list of ndarray or JTensor or list of JTensor. :return: ndarray or list of ndarray
[ "NB", ":", "It", "s", "for", "debug", "only", "please", "use", "optimizer", ".", "optimize", "()", "in", "production", ".", "Performs", "a", "back", "-", "propagation", "step", "through", "the", "module", "with", "respect", "to", "the", "given", "input", ...
python
test
atztogo/phonopy
phonopy/interface/dftbp.py
https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/interface/dftbp.py#L205-L219
def write_supercells_with_displacements(supercell, cells_with_disps, filename="geo.gen"): """Writes perfect supercell and supercells with displacements Args: supercell: perfect supercell cells_with_disps: supercells with displaced atoms filename: root-filename """ # original cell write_dftbp(filename + "S", supercell) # displaced cells for ii in range(len(cells_with_disps)): write_dftbp(filename + "S-{:03d}".format(ii+1), cells_with_disps[ii])
[ "def", "write_supercells_with_displacements", "(", "supercell", ",", "cells_with_disps", ",", "filename", "=", "\"geo.gen\"", ")", ":", "# original cell", "write_dftbp", "(", "filename", "+", "\"S\"", ",", "supercell", ")", "# displaced cells", "for", "ii", "in", "r...
Writes perfect supercell and supercells with displacements Args: supercell: perfect supercell cells_with_disps: supercells with displaced atoms filename: root-filename
[ "Writes", "perfect", "supercell", "and", "supercells", "with", "displacements" ]
python
train
criteo/gourde
gourde/gourde.py
https://github.com/criteo/gourde/blob/9a274e534a2af5d2b2a5e99f10c59010adb94863/gourde/gourde.py#L271-L281
def run(self, **options): """Run the application.""" if not self.is_setup: self.setup() if self.twisted: self.run_with_twisted(**options) elif self.gunicorn: self.run_with_gunicorn(**options) else: self.run_with_werkzeug(**options)
[ "def", "run", "(", "self", ",", "*", "*", "options", ")", ":", "if", "not", "self", ".", "is_setup", ":", "self", ".", "setup", "(", ")", "if", "self", ".", "twisted", ":", "self", ".", "run_with_twisted", "(", "*", "*", "options", ")", "elif", "...
Run the application.
[ "Run", "the", "application", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/layers/util.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/util.py#L219-L257
def deserialize_function(serial, function_type): """Deserializes the Keras-serialized function. (De)serializing Python functions from/to bytecode is unsafe. Therefore we also use the function's type as an anonymous function ('lambda') or named function in the Python environment ('function'). In the latter case, this lets us use the Python scope to obtain the function rather than reload it from bytecode. (Note that both cases are brittle!) Keras-deserialized functions do not perform lexical scoping. Any modules that the function requires must be imported within the function itself. This serialization mimicks the implementation in `tf.keras.layers.Lambda`. Args: serial: Serialized Keras object: typically a dict, string, or bytecode. function_type: Python string denoting 'function' or 'lambda'. Returns: function: Function the serialized Keras object represents. #### Examples ```python serial, function_type = serialize_function(lambda x: x) function = deserialize_function(serial, function_type) assert function(2.3) == 2.3 # function is identity ``` """ if function_type == 'function': # Simple lookup in custom objects function = tf.keras.utils.deserialize_keras_object(serial) elif function_type == 'lambda': # Unsafe deserialization from bytecode function = generic_utils.func_load(serial) else: raise TypeError('Unknown function type:', function_type) return function
[ "def", "deserialize_function", "(", "serial", ",", "function_type", ")", ":", "if", "function_type", "==", "'function'", ":", "# Simple lookup in custom objects", "function", "=", "tf", ".", "keras", ".", "utils", ".", "deserialize_keras_object", "(", "serial", ")",...
Deserializes the Keras-serialized function. (De)serializing Python functions from/to bytecode is unsafe. Therefore we also use the function's type as an anonymous function ('lambda') or named function in the Python environment ('function'). In the latter case, this lets us use the Python scope to obtain the function rather than reload it from bytecode. (Note that both cases are brittle!) Keras-deserialized functions do not perform lexical scoping. Any modules that the function requires must be imported within the function itself. This serialization mimicks the implementation in `tf.keras.layers.Lambda`. Args: serial: Serialized Keras object: typically a dict, string, or bytecode. function_type: Python string denoting 'function' or 'lambda'. Returns: function: Function the serialized Keras object represents. #### Examples ```python serial, function_type = serialize_function(lambda x: x) function = deserialize_function(serial, function_type) assert function(2.3) == 2.3 # function is identity ```
[ "Deserializes", "the", "Keras", "-", "serialized", "function", "." ]
python
test
CamDavidsonPilon/lifelines
lifelines/fitters/coxph_fitter.py
https://github.com/CamDavidsonPilon/lifelines/blob/bdf6be6f1d10eea4c46365ee0ee6a47d8c30edf8/lifelines/fitters/coxph_fitter.py#L984-L1069
def _compute_schoenfeld_within_strata(self, X, T, E, weights): """ A positive value of the residual shows an X value that is higher than expected at that death time. """ # TODO: the diff_against is gross # This uses Efron ties. n, d = X.shape if not np.any(E): # sometimes strata have no deaths. This means nothing is returned # in the below code. return np.zeros((n, d)) # Init risk and tie sums to zero risk_phi, tie_phi = 0, 0 risk_phi_x, tie_phi_x = np.zeros((1, d)), np.zeros((1, d)) # Init number of ties and weights weight_count = 0.0 tie_count = 0 scores = weights * np.exp(np.dot(X, self.hazards_)) diff_against = [] schoenfeld_residuals = np.empty((0, d)) # Iterate backwards to utilize recursive relationship for i in range(n - 1, -1, -1): # Doing it like this to preserve shape ti = T[i] ei = E[i] xi = X[i : i + 1] score = scores[i : i + 1] w = weights[i] # Calculate phi values phi_i = score phi_x_i = phi_i * xi # Calculate sums of Risk set risk_phi = risk_phi + phi_i risk_phi_x = risk_phi_x + phi_x_i # Calculate sums of Ties, if this is an event diff_against.append((xi, ei)) if ei: tie_phi = tie_phi + phi_i tie_phi_x = tie_phi_x + phi_x_i # Keep track of count tie_count += 1 # aka death counts weight_count += w if i > 0 and T[i - 1] == ti: # There are more ties/members of the risk set continue elif tie_count == 0: for _ in diff_against: schoenfeld_residuals = np.append(schoenfeld_residuals, np.zeros((1, d)), axis=0) diff_against = [] continue # There was atleast one event and no more ties remain. Time to sum. weighted_mean = np.zeros((1, d)) for l in range(tie_count): numer = risk_phi_x - l * tie_phi_x / tie_count denom = risk_phi - l * tie_phi / tie_count weighted_mean += numer / (denom * tie_count) for xi, ei in diff_against: schoenfeld_residuals = np.append(schoenfeld_residuals, ei * (xi - weighted_mean), axis=0) # reset tie values tie_count = 0 weight_count = 0.0 tie_phi = 0 tie_phi_x = np.zeros((1, d)) diff_against = [] return schoenfeld_residuals[::-1]
[ "def", "_compute_schoenfeld_within_strata", "(", "self", ",", "X", ",", "T", ",", "E", ",", "weights", ")", ":", "# TODO: the diff_against is gross", "# This uses Efron ties.", "n", ",", "d", "=", "X", ".", "shape", "if", "not", "np", ".", "any", "(", "E", ...
A positive value of the residual shows an X value that is higher than expected at that death time.
[ "A", "positive", "value", "of", "the", "residual", "shows", "an", "X", "value", "that", "is", "higher", "than", "expected", "at", "that", "death", "time", "." ]
python
train
uw-it-aca/uw-restclients
restclients/bridge/user.py
https://github.com/uw-it-aca/uw-restclients/blob/e12dcd32bf5296b6ebdf71798031594afb7852cb/restclients/bridge/user.py#L107-L115
def get_user(uwnetid, include_course_summary=True): """ Return a list of BridgeUsers objects with custom fields """ url = author_uid_url(uwnetid) + "?%s" % CUSTOM_FIELD if include_course_summary: url = "%s&%s" % (url, COURSE_SUMMARY) resp = get_resource(url) return _process_json_resp_data(resp)
[ "def", "get_user", "(", "uwnetid", ",", "include_course_summary", "=", "True", ")", ":", "url", "=", "author_uid_url", "(", "uwnetid", ")", "+", "\"?%s\"", "%", "CUSTOM_FIELD", "if", "include_course_summary", ":", "url", "=", "\"%s&%s\"", "%", "(", "url", ",...
Return a list of BridgeUsers objects with custom fields
[ "Return", "a", "list", "of", "BridgeUsers", "objects", "with", "custom", "fields" ]
python
train
Azure/azure-cosmos-python
azure/cosmos/cosmos_client.py
https://github.com/Azure/azure-cosmos-python/blob/dd01b3c5d308c6da83cfcaa0ab7083351a476353/azure/cosmos/cosmos_client.py#L2532-L2564
def Read(self, path, type, id, initial_headers, options=None): """Reads a Azure Cosmos resource and returns it. :param str path: :param str type: :param str id: :param dict initial_headers: :param dict options: The request options for the request. :return: The upserted Azure Cosmos resource. :rtype: dict """ if options is None: options = {} initial_headers = initial_headers or self.default_headers headers = base.GetHeaders(self, initial_headers, 'get', path, id, type, options) # Read will use ReadEndpoint since it uses GET operation request = request_object._RequestObject(type, documents._OperationType.Read) result, self.last_response_headers = self.__Get(path, request, headers) return result
[ "def", "Read", "(", "self", ",", "path", ",", "type", ",", "id", ",", "initial_headers", ",", "options", "=", "None", ")", ":", "if", "options", "is", "None", ":", "options", "=", "{", "}", "initial_headers", "=", "initial_headers", "or", "self", ".", ...
Reads a Azure Cosmos resource and returns it. :param str path: :param str type: :param str id: :param dict initial_headers: :param dict options: The request options for the request. :return: The upserted Azure Cosmos resource. :rtype: dict
[ "Reads", "a", "Azure", "Cosmos", "resource", "and", "returns", "it", "." ]
python
train
grantmcconnaughey/django-lazy-tags
lazy_tags/templatetags/lazy_tags.py
https://github.com/grantmcconnaughey/django-lazy-tags/blob/c24872c1d9f198abd20669c77380a923092374c2/lazy_tags/templatetags/lazy_tags.py#L12-L35
def lazy_tag(tag, *args, **kwargs): """ Lazily loads a template tag after the page has loaded. Requires jQuery (for now). Usage: {% load lazy_tags %} {% lazy_tag 'tag_lib.tag_name' arg1 arg2 kw1='test' kw2='hello' %} Args: tag (str): the tag library and tag name separated by a period. For a template tag named `do_thing` in a tag library named `thing_tags` the `tag` argument would be `'thing_tags.doc_thing'`. *args: arguments to be passed to the template tag. **kwargs: keyword arguments to be passed to the template tag. """ tag_id = get_tag_id() set_lazy_tag_data(tag_id, tag, args, kwargs) return render_to_string('lazy_tags/lazy_tag.html', { 'tag_id': tag_id, 'STATIC_URL': settings.STATIC_URL, })
[ "def", "lazy_tag", "(", "tag", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "tag_id", "=", "get_tag_id", "(", ")", "set_lazy_tag_data", "(", "tag_id", ",", "tag", ",", "args", ",", "kwargs", ")", "return", "render_to_string", "(", "'lazy_tags/laz...
Lazily loads a template tag after the page has loaded. Requires jQuery (for now). Usage: {% load lazy_tags %} {% lazy_tag 'tag_lib.tag_name' arg1 arg2 kw1='test' kw2='hello' %} Args: tag (str): the tag library and tag name separated by a period. For a template tag named `do_thing` in a tag library named `thing_tags` the `tag` argument would be `'thing_tags.doc_thing'`. *args: arguments to be passed to the template tag. **kwargs: keyword arguments to be passed to the template tag.
[ "Lazily", "loads", "a", "template", "tag", "after", "the", "page", "has", "loaded", ".", "Requires", "jQuery", "(", "for", "now", ")", "." ]
python
train
instacart/lore
lore/ansi.py
https://github.com/instacart/lore/blob/0367bde9a52e69162832906acc61e8d65c5ec5d4/lore/ansi.py#L157-L167
def foreground(color, content, readline=False): """ Color the text of the content :param color: pick a constant, any constant :type color: int :param content: Whatever you want to say... :type content: unicode :return: ansi string :rtype: unicode """ return encode(color, readline=readline) + content + encode(DEFAULT, readline=readline)
[ "def", "foreground", "(", "color", ",", "content", ",", "readline", "=", "False", ")", ":", "return", "encode", "(", "color", ",", "readline", "=", "readline", ")", "+", "content", "+", "encode", "(", "DEFAULT", ",", "readline", "=", "readline", ")" ]
Color the text of the content :param color: pick a constant, any constant :type color: int :param content: Whatever you want to say... :type content: unicode :return: ansi string :rtype: unicode
[ "Color", "the", "text", "of", "the", "content" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/latent_layers.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/latent_layers.py#L294-L312
def compress_encoder_1d(x, hparams, name=None): """Encoder that compresses 1-D inputs by 2**num_compress_steps. Args: x: Tensor of shape [batch, length, channels]. hparams: HParams. name: string, variable scope. Returns: Tensor of shape [batch, latent_length, hparams.hidden_size], where latent_length is hparams.num_latents * length / 2**hparams.num_compress_steps. """ x = tf.expand_dims(x, axis=2) return compress_encoder(x, hparams, strides=(2, 1), kernel_size=(hparams.kernel_size, 1), name=name)
[ "def", "compress_encoder_1d", "(", "x", ",", "hparams", ",", "name", "=", "None", ")", ":", "x", "=", "tf", ".", "expand_dims", "(", "x", ",", "axis", "=", "2", ")", "return", "compress_encoder", "(", "x", ",", "hparams", ",", "strides", "=", "(", ...
Encoder that compresses 1-D inputs by 2**num_compress_steps. Args: x: Tensor of shape [batch, length, channels]. hparams: HParams. name: string, variable scope. Returns: Tensor of shape [batch, latent_length, hparams.hidden_size], where latent_length is hparams.num_latents * length / 2**hparams.num_compress_steps.
[ "Encoder", "that", "compresses", "1", "-", "D", "inputs", "by", "2", "**", "num_compress_steps", "." ]
python
train
lextoumbourou/txstripe
stripe/api_requestor.py
https://github.com/lextoumbourou/txstripe/blob/a69e67f524258026fd1840655a0578311bba3b89/stripe/api_requestor.py#L176-L268
def request_raw(self, method, url, params=None, supplied_headers=None): """ Mechanism for issuing an API call """ from stripe import api_version if self.api_key: my_api_key = self.api_key else: from stripe import api_key my_api_key = api_key if my_api_key is None: raise error.AuthenticationError( 'No API key provided. (HINT: set your API key using ' '"stripe.api_key = <API-KEY>"). You can generate API keys ' 'from the Stripe web interface. See https://stripe.com/api ' 'for details, or email support@stripe.com if you have any ' 'questions.') abs_url = '%s%s' % (self.api_base, url) encoded_params = urllib.urlencode(list(_api_encode(params or {}))) if method == 'get' or method == 'delete': if params: abs_url = _build_api_url(abs_url, encoded_params) post_data = None elif method == 'post': if supplied_headers is not None and \ supplied_headers.get("Content-Type") == \ "multipart/form-data": generator = MultipartDataGenerator() generator.add_params(params or {}) post_data = generator.get_post_data() supplied_headers["Content-Type"] = \ "multipart/form-data; boundary=%s" % (generator.boundary,) else: post_data = encoded_params else: raise error.APIConnectionError( 'Unrecognized HTTP method %r. This may indicate a bug in the ' 'Stripe bindings. Please contact support@stripe.com for ' 'assistance.' % (method,)) ua = { 'bindings_version': version.VERSION, 'lang': 'python', 'publisher': 'stripe', 'httplib': self._client.name, } for attr, func in [['lang_version', platform.python_version], ['platform', platform.platform], ['uname', lambda: ' '.join(platform.uname())]]: try: val = func() except Exception as e: val = "!! %s" % (e,) ua[attr] = val headers = { 'X-Stripe-Client-User-Agent': util.json.dumps(ua), 'User-Agent': 'Stripe/v1 PythonBindings/%s' % (version.VERSION,), 'Authorization': 'Bearer %s' % (my_api_key,) } if self.stripe_account: headers['Stripe-Account'] = self.stripe_account if method == 'post': headers['Content-Type'] = 'application/x-www-form-urlencoded' if api_version is not None: headers['Stripe-Version'] = api_version if supplied_headers is not None: for key, value in supplied_headers.items(): headers[key] = value util.log_info('Request to Stripe api', method=method, path=abs_url) util.log_debug( 'Post details', post_data=post_data, api_version=api_version) rbody, rcode, rheaders = self._client.request( method, abs_url, headers, post_data) util.log_info( 'Stripe API response', path=abs_url, response_code=rcode) util.log_debug('API response body', body=rbody) if 'Request-Id' in rheaders: util.log_debug('Dashboard link for request', link=util.dashboard_link(rheaders['Request-Id'])) return rbody, rcode, rheaders, my_api_key
[ "def", "request_raw", "(", "self", ",", "method", ",", "url", ",", "params", "=", "None", ",", "supplied_headers", "=", "None", ")", ":", "from", "stripe", "import", "api_version", "if", "self", ".", "api_key", ":", "my_api_key", "=", "self", ".", "api_k...
Mechanism for issuing an API call
[ "Mechanism", "for", "issuing", "an", "API", "call" ]
python
train
mcieslik-mctp/papy
src/papy/core.py
https://github.com/mcieslik-mctp/papy/blob/708e50827b5db46bbea081982cb74b9b0e464064/src/papy/core.py#L618-L629
def save(self, filename): """ Saves pipeline as a Python source code file. Arguments: - filename(``path``) Path to save the pipeline source code. """ handle = open(filename, 'wb') handle.write(P_LAY % self._code()) handle.close()
[ "def", "save", "(", "self", ",", "filename", ")", ":", "handle", "=", "open", "(", "filename", ",", "'wb'", ")", "handle", ".", "write", "(", "P_LAY", "%", "self", ".", "_code", "(", ")", ")", "handle", ".", "close", "(", ")" ]
Saves pipeline as a Python source code file. Arguments: - filename(``path``) Path to save the pipeline source code.
[ "Saves", "pipeline", "as", "a", "Python", "source", "code", "file", ".", "Arguments", ":", "-", "filename", "(", "path", ")", "Path", "to", "save", "the", "pipeline", "source", "code", "." ]
python
train
KelSolaar/Manager
manager/components_manager.py
https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L262-L273
def require(self, value): """ Setter for **self.__require** attribute. :param value: Attribute value. :type value: tuple or list """ if value is not None: assert type(value) in (tuple, list), "'{0}' attribute: '{1}' type is not 'tuple' or 'list'!".format( "require", value) self.__require = value
[ "def", "require", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "in", "(", "tuple", ",", "list", ")", ",", "\"'{0}' attribute: '{1}' type is not 'tuple' or 'list'!\"", ".", "format", "(", ...
Setter for **self.__require** attribute. :param value: Attribute value. :type value: tuple or list
[ "Setter", "for", "**", "self", ".", "__require", "**", "attribute", "." ]
python
train
xtuml/pyxtuml
bridgepoint/oal.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/bridgepoint/oal.py#L955-L958
def t_TICKED_PHRASE(self, t): r"\'[^\']*\'" t.endlexpos = t.lexpos + len(t.value) return t
[ "def", "t_TICKED_PHRASE", "(", "self", ",", "t", ")", ":", "t", ".", "endlexpos", "=", "t", ".", "lexpos", "+", "len", "(", "t", ".", "value", ")", "return", "t" ]
r"\'[^\']*\
[ "r", "\\", "[", "^", "\\", "]", "*", "\\" ]
python
test
AguaClara/aguaclara
aguaclara/design/floc.py
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/design/floc.py#L278-L292
def design(self): """Returns the designed values. :returns: list of designed values (G, t, channel_W, obstacle_n) :rtype: int """ floc_dict = {'channel_n': self.channel_n, 'channel_L': self.channel_L, 'channel_W': self.channel_W, 'baffle_S': self.baffle_S, 'obstacle_n': self.obstacle_n, 'G': self.vel_grad_avg, 't': self.retention_time, 'expansion_max_H': self.expansion_max_H, 'drain_ND': self.drain_ND} return floc_dict
[ "def", "design", "(", "self", ")", ":", "floc_dict", "=", "{", "'channel_n'", ":", "self", ".", "channel_n", ",", "'channel_L'", ":", "self", ".", "channel_L", ",", "'channel_W'", ":", "self", ".", "channel_W", ",", "'baffle_S'", ":", "self", ".", "baffl...
Returns the designed values. :returns: list of designed values (G, t, channel_W, obstacle_n) :rtype: int
[ "Returns", "the", "designed", "values", ".", ":", "returns", ":", "list", "of", "designed", "values", "(", "G", "t", "channel_W", "obstacle_n", ")", ":", "rtype", ":", "int" ]
python
train
apache/spark
python/pyspark/sql/dataframe.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L1444-L1452
def union(self, other): """ Return a new :class:`DataFrame` containing union of rows in this and another frame. This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union (that does deduplication of elements), use this function followed by :func:`distinct`. Also as standard in SQL, this function resolves columns by position (not by name). """ return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
[ "def", "union", "(", "self", ",", "other", ")", ":", "return", "DataFrame", "(", "self", ".", "_jdf", ".", "union", "(", "other", ".", "_jdf", ")", ",", "self", ".", "sql_ctx", ")" ]
Return a new :class:`DataFrame` containing union of rows in this and another frame. This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union (that does deduplication of elements), use this function followed by :func:`distinct`. Also as standard in SQL, this function resolves columns by position (not by name).
[ "Return", "a", "new", ":", "class", ":", "DataFrame", "containing", "union", "of", "rows", "in", "this", "and", "another", "frame", "." ]
python
train
jstitch/MambuPy
MambuPy/rest/mambuuser.py
https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/rest/mambuuser.py#L76-L97
def setRoles(self, *args, **kwargs): """Adds the role assigned to this user to a 'role' field. Depends on the 'role' field that comes with a fullDetails=True build of the MambuUser. Returns the number of requests done to Mambu. """ try: role = self.mamburoleclass(entid=self['role']['encodedKey'], *args, **kwargs) except KeyError: return 0 except AttributeError as ae: from .mamburoles import MambuRole self.mamburoleclass = MambuRole try: role = self.mamburoleclass(entid=self['role']['encodedKey'], *args, **kwargs) except KeyError: return 0 self['role']['role'] = role return 1
[ "def", "setRoles", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "role", "=", "self", ".", "mamburoleclass", "(", "entid", "=", "self", "[", "'role'", "]", "[", "'encodedKey'", "]", ",", "*", "args", ",", "*", "*",...
Adds the role assigned to this user to a 'role' field. Depends on the 'role' field that comes with a fullDetails=True build of the MambuUser. Returns the number of requests done to Mambu.
[ "Adds", "the", "role", "assigned", "to", "this", "user", "to", "a", "role", "field", "." ]
python
train
kemingy/cnprep
cnprep/extractor.py
https://github.com/kemingy/cnprep/blob/076ea185167adb7e652bea3b81fb6830e162e880/cnprep/extractor.py#L85-L96
def _clear(self): """ clear attr """ self._email = [] self._telephone = [] self._QQ = [] self._wechat = [] self._url = [] self._emoji = [] self._tex = [] self._blur = []
[ "def", "_clear", "(", "self", ")", ":", "self", ".", "_email", "=", "[", "]", "self", ".", "_telephone", "=", "[", "]", "self", ".", "_QQ", "=", "[", "]", "self", ".", "_wechat", "=", "[", "]", "self", ".", "_url", "=", "[", "]", "self", ".",...
clear attr
[ "clear", "attr" ]
python
train
NarrativeScience/lsi
src/lsi/utils/table.py
https://github.com/NarrativeScience/lsi/blob/7d901b03fdb1a34ef795e5412bfe9685d948e32d/src/lsi/utils/table.py#L28-L53
def render_columns(columns, write_borders=True, column_colors=None): """ Renders a list of columns. :param columns: A list of columns, where each column is a list of strings. :type columns: [[``str``]] :param write_borders: Whether to write the top and bottom borders. :type write_borders: ``bool`` :param column_colors: A list of coloring functions, one for each column. Optional. :type column_colors: [``str`` -> ``str``] or ``NoneType`` :return: The rendered columns. :rtype: ``str`` """ if column_colors is not None and len(column_colors) != len(columns): raise ValueError('Wrong number of column colors') widths = [max(len(cell) for cell in column) for column in columns] max_column_length = max(len(column) for column in columns) result = '\n'.join(render_row(i, columns, widths, column_colors) for i in range(max_column_length)) if write_borders: border = '+%s+' % '|'.join('-' * (w + 2) for w in widths) return '%s\n%s\n%s' % (border, result, border) else: return result
[ "def", "render_columns", "(", "columns", ",", "write_borders", "=", "True", ",", "column_colors", "=", "None", ")", ":", "if", "column_colors", "is", "not", "None", "and", "len", "(", "column_colors", ")", "!=", "len", "(", "columns", ")", ":", "raise", ...
Renders a list of columns. :param columns: A list of columns, where each column is a list of strings. :type columns: [[``str``]] :param write_borders: Whether to write the top and bottom borders. :type write_borders: ``bool`` :param column_colors: A list of coloring functions, one for each column. Optional. :type column_colors: [``str`` -> ``str``] or ``NoneType`` :return: The rendered columns. :rtype: ``str``
[ "Renders", "a", "list", "of", "columns", "." ]
python
test
bitesofcode/projexui
projexui/widgets/xnodewidget/xnodescene.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnodewidget/xnodescene.py#L487-L501
def calculateBoundingRect(self, nodes): """ Returns the bounding rectangle for the inputed nodes. :param nodes | [<XNode>, ..] """ out = QRectF() for node in nodes: rect = node.rect() pos = node.pos() bounding = QRectF(pos.x(), pos.y(), rect.width(), rect.height()) out = out.united(bounding) return out
[ "def", "calculateBoundingRect", "(", "self", ",", "nodes", ")", ":", "out", "=", "QRectF", "(", ")", "for", "node", "in", "nodes", ":", "rect", "=", "node", ".", "rect", "(", ")", "pos", "=", "node", ".", "pos", "(", ")", "bounding", "=", "QRectF",...
Returns the bounding rectangle for the inputed nodes. :param nodes | [<XNode>, ..]
[ "Returns", "the", "bounding", "rectangle", "for", "the", "inputed", "nodes", ".", ":", "param", "nodes", "|", "[", "<XNode", ">", "..", "]" ]
python
train
apache/spark
python/pyspark/sql/column.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/column.py#L121-L129
def _reverse_op(name, doc="binary operator"): """ Create a method for binary operator (this object is on right side) """ def _(self, other): jother = _create_column_from_literal(other) jc = getattr(jother, name)(self._jc) return Column(jc) _.__doc__ = doc return _
[ "def", "_reverse_op", "(", "name", ",", "doc", "=", "\"binary operator\"", ")", ":", "def", "_", "(", "self", ",", "other", ")", ":", "jother", "=", "_create_column_from_literal", "(", "other", ")", "jc", "=", "getattr", "(", "jother", ",", "name", ")", ...
Create a method for binary operator (this object is on right side)
[ "Create", "a", "method", "for", "binary", "operator", "(", "this", "object", "is", "on", "right", "side", ")" ]
python
train
dmlc/gluon-nlp
src/gluonnlp/data/transforms.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/transforms.py#L960-L1007
def _tokenize_wordpiece(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BERTBasicTokenizer. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in self.basic_tokenizer._whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.vocab.unknown_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = ''.join(chars[start:end]) if start > 0: substr = '##' + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.vocab.unknown_token) else: output_tokens.extend(sub_tokens) return output_tokens
[ "def", "_tokenize_wordpiece", "(", "self", ",", "text", ")", ":", "output_tokens", "=", "[", "]", "for", "token", "in", "self", ".", "basic_tokenizer", ".", "_whitespace_tokenize", "(", "text", ")", ":", "chars", "=", "list", "(", "token", ")", "if", "le...
Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BERTBasicTokenizer. Returns: A list of wordpiece tokens.
[ "Tokenizes", "a", "piece", "of", "text", "into", "its", "word", "pieces", "." ]
python
train
Equitable/trump
trump/orm.py
https://github.com/Equitable/trump/blob/a2802692bc642fa32096374159eea7ceca2947b4/trump/orm.py#L1205-L1257
def check_validity(self, checks=None, report=True): """ Runs a Symbol's validity checks. Parameters ---------- checks : str, [str,], optional Only run certain checks. report : bool, optional If set to False, the method will return only the result of the check checks (True/False). Set to True, to have a SymbolReport returned as well. Returns ------- Bool, or a Tuple of the form (Bool, SymbolReport) """ if report: reportpoints = [] allchecks = [] checks_specified=False if isinstance(checks, (str, unicode)): checks = [checks] checks_specified = True elif isinstance(checks, (list, tuple)): checks_specified = True else: checks = [] for val in self.validity: if (val.validator in checks) or (not checks_specified): ValCheck = validitychecks[val.validator] anum = ValCheck.__init__.func_code.co_argcount - 2 args = [] for arg in SymbolValidity.argnames: args.append(getattr(val, arg)) valid = ValCheck(self.datatable_df, *args[:anum]) res = valid.result allchecks.append(res) rp = ReportPoint('validation', val.validator, res, str(args[:anum])) reportpoints.append(rp) if report: return all(allchecks), reportpoints else: return all(allchecks)
[ "def", "check_validity", "(", "self", ",", "checks", "=", "None", ",", "report", "=", "True", ")", ":", "if", "report", ":", "reportpoints", "=", "[", "]", "allchecks", "=", "[", "]", "checks_specified", "=", "False", "if", "isinstance", "(", "checks", ...
Runs a Symbol's validity checks. Parameters ---------- checks : str, [str,], optional Only run certain checks. report : bool, optional If set to False, the method will return only the result of the check checks (True/False). Set to True, to have a SymbolReport returned as well. Returns ------- Bool, or a Tuple of the form (Bool, SymbolReport)
[ "Runs", "a", "Symbol", "s", "validity", "checks", ".", "Parameters", "----------", "checks", ":", "str", "[", "str", "]", "optional", "Only", "run", "certain", "checks", ".", "report", ":", "bool", "optional", "If", "set", "to", "False", "the", "method", ...
python
train
pandas-dev/pandas
pandas/core/tools/datetimes.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/tools/datetimes.py#L97-L132
def _return_parsed_timezone_results(result, timezones, box, tz, name): """ Return results from array_strptime if a %z or %Z directive was passed. Parameters ---------- result : ndarray int64 date representations of the dates timezones : ndarray pytz timezone objects box : boolean True boxes result as an Index-like, False returns an ndarray tz : object None or pytz timezone object name : string, default None Name for a DatetimeIndex Returns ------- tz_result : ndarray of parsed dates with timezone Returns: - Index-like if box=True - ndarray of Timestamps if box=False """ if tz is not None: raise ValueError("Cannot pass a tz argument when " "parsing strings with timezone " "information.") tz_results = np.array([Timestamp(res).tz_localize(zone) for res, zone in zip(result, timezones)]) if box: from pandas import Index return Index(tz_results, name=name) return tz_results
[ "def", "_return_parsed_timezone_results", "(", "result", ",", "timezones", ",", "box", ",", "tz", ",", "name", ")", ":", "if", "tz", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Cannot pass a tz argument when \"", "\"parsing strings with timezone \"", "\...
Return results from array_strptime if a %z or %Z directive was passed. Parameters ---------- result : ndarray int64 date representations of the dates timezones : ndarray pytz timezone objects box : boolean True boxes result as an Index-like, False returns an ndarray tz : object None or pytz timezone object name : string, default None Name for a DatetimeIndex Returns ------- tz_result : ndarray of parsed dates with timezone Returns: - Index-like if box=True - ndarray of Timestamps if box=False
[ "Return", "results", "from", "array_strptime", "if", "a", "%z", "or", "%Z", "directive", "was", "passed", "." ]
python
train
jongracecox/anybadge
anybadge.py
https://github.com/jongracecox/anybadge/blob/1850a9580697e019c601d09f5de490056fed2bab/anybadge.py#L183-L191
def value_is_int(self): """Identify whether the value text is an int.""" try: a = float(self.value) b = int(a) except ValueError: return False else: return a == b
[ "def", "value_is_int", "(", "self", ")", ":", "try", ":", "a", "=", "float", "(", "self", ".", "value", ")", "b", "=", "int", "(", "a", ")", "except", "ValueError", ":", "return", "False", "else", ":", "return", "a", "==", "b" ]
Identify whether the value text is an int.
[ "Identify", "whether", "the", "value", "text", "is", "an", "int", "." ]
python
train
timofurrer/colorful
examples/solarized.py
https://github.com/timofurrer/colorful/blob/919fa6da17865cc5e01e6b16119193a97d180dc9/examples/solarized.py#L17-L49
def show(): """ Show the modifiers and colors """ with colorful.with_style('solarized') as c: # modifiers sys.stdout.write(c.bold('bold') + ' ') sys.stdout.write(c.dimmed('dimmed') + ' ') sys.stdout.write(c.italic('italic') + ' ') sys.stdout.write(c.underlined('underlined') + ' ') sys.stdout.write(c.inversed('inversed') + ' ') sys.stdout.write(c.concealed('concealed') + ' ') sys.stdout.write(c.struckthrough('struckthrough') + '\n') # foreground colors sys.stdout.write(c.yellow('yellow') + ' ') sys.stdout.write(c.red('orange') + ' ') sys.stdout.write(c.red('red') + ' ') sys.stdout.write(c.magenta('magenta') + ' ') sys.stdout.write(c.magenta('violet') + ' ') sys.stdout.write(c.blue('blue') + ' ') sys.stdout.write(c.cyan('cyan') + ' ') sys.stdout.write(c.green('green') + '\n') # background colors sys.stdout.write(c.on_yellow('yellow') + ' ') sys.stdout.write(c.on_red('orange') + ' ') sys.stdout.write(c.on_red('red') + ' ') sys.stdout.write(c.on_magenta('magenta') + ' ') sys.stdout.write(c.on_magenta('violet') + ' ') sys.stdout.write(c.on_blue('blue') + ' ') sys.stdout.write(c.on_cyan('cyan') + ' ') sys.stdout.write(c.on_green('green') + '\n')
[ "def", "show", "(", ")", ":", "with", "colorful", ".", "with_style", "(", "'solarized'", ")", "as", "c", ":", "# modifiers", "sys", ".", "stdout", ".", "write", "(", "c", ".", "bold", "(", "'bold'", ")", "+", "' '", ")", "sys", ".", "stdout", ".", ...
Show the modifiers and colors
[ "Show", "the", "modifiers", "and", "colors" ]
python
valid
tanghaibao/jcvi
jcvi/formats/sbt.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/sbt.py#L63-L97
def parse_names(lstfile): """ This is the alternative format `lstfile`. In this format, there are two sections, starting with [Sequence] and [Manuscript], respectively, then followed by authors separated by comma. """ from jcvi.formats.base import read_block fp = open(lstfile) all_authors = [] for header, seq in read_block(fp, "["): seq = " ".join(seq) authors = [] for au in seq.split(","): au = au.strip() if not au: continue au = string.translate(au, None, string.digits) #au = au.replace("-", '') authors.append(au) all_authors.append(authors) out = [] for authors in all_authors: blocks = [] for au in authors: last, first, initials = get_name_parts(au) suffix = "" nameblock = NameTemplate.format(last=last, first=first, initials=initials, suffix=suffix) blocks.append(nameblock) bigblock = ",\n".join(blocks) out.append(bigblock) return out
[ "def", "parse_names", "(", "lstfile", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "read_block", "fp", "=", "open", "(", "lstfile", ")", "all_authors", "=", "[", "]", "for", "header", ",", "seq", "in", "read_block", "(", "fp", ",", ...
This is the alternative format `lstfile`. In this format, there are two sections, starting with [Sequence] and [Manuscript], respectively, then followed by authors separated by comma.
[ "This", "is", "the", "alternative", "format", "lstfile", ".", "In", "this", "format", "there", "are", "two", "sections", "starting", "with", "[", "Sequence", "]", "and", "[", "Manuscript", "]", "respectively", "then", "followed", "by", "authors", "separated", ...
python
train
qacafe/cdrouter.py
cdrouter/filters.py
https://github.com/qacafe/cdrouter.py/blob/aacf2c6ab0b987250f7b1892f4bba14bb2b7dbe5/cdrouter/filters.py#L190-L200
def contains(self, *args): """Construct an array contains (``@>``) filter. :param args: Filter values :return: :class:`filters.Field <filters.Field>` object :rtype: filters.Field """ self.op = '@>' self.negate_op = None self.value = self._array_value(args) return self
[ "def", "contains", "(", "self", ",", "*", "args", ")", ":", "self", ".", "op", "=", "'@>'", "self", ".", "negate_op", "=", "None", "self", ".", "value", "=", "self", ".", "_array_value", "(", "args", ")", "return", "self" ]
Construct an array contains (``@>``) filter. :param args: Filter values :return: :class:`filters.Field <filters.Field>` object :rtype: filters.Field
[ "Construct", "an", "array", "contains", "(", "@", ">", ")", "filter", "." ]
python
train
bjmorgan/lattice_mc
lattice_mc/lattice.py
https://github.com/bjmorgan/lattice_mc/blob/7fa7be85f2f23a2d8dfd0830ecdb89d0dbf2bfd5/lattice_mc/lattice.py#L407-L447
def connected_sites( self, site_labels=None ): """ Searches the lattice to find sets of sites that are contiguously neighbouring. Mutually exclusive sets of contiguous sites are returned as Cluster objects. Args: site_labels (:obj:(List(Str)|Set(Str)|Str), optional): Labels for sites to be considered in the search. This can be a list:: [ 'A', 'B' ] a set:: ( 'A', 'B' ) or a string:: 'A'. Returns: (List(Cluster)): List of Cluster objects for groups of contiguous sites. """ if site_labels: selected_sites = self.select_sites( site_labels ) else: selected_sites = self.sites initial_clusters = [ cluster.Cluster( [ site ] ) for site in selected_sites ] if site_labels: blocking_sites = self.site_labels - set( site_labels ) for c in initial_clusters: c.remove_sites_from_neighbours( blocking_sites ) final_clusters = [] while initial_clusters: # loop until initial_clusters is empty this_cluster = initial_clusters.pop(0) while this_cluster.neighbours: neighbouring_clusters = [ c for c in initial_clusters if this_cluster.is_neighbouring( c ) ] for nc in neighbouring_clusters: initial_clusters.remove( nc ) this_cluster = this_cluster.merge( nc ) final_clusters.append( this_cluster ) return final_clusters
[ "def", "connected_sites", "(", "self", ",", "site_labels", "=", "None", ")", ":", "if", "site_labels", ":", "selected_sites", "=", "self", ".", "select_sites", "(", "site_labels", ")", "else", ":", "selected_sites", "=", "self", ".", "sites", "initial_clusters...
Searches the lattice to find sets of sites that are contiguously neighbouring. Mutually exclusive sets of contiguous sites are returned as Cluster objects. Args: site_labels (:obj:(List(Str)|Set(Str)|Str), optional): Labels for sites to be considered in the search. This can be a list:: [ 'A', 'B' ] a set:: ( 'A', 'B' ) or a string:: 'A'. Returns: (List(Cluster)): List of Cluster objects for groups of contiguous sites.
[ "Searches", "the", "lattice", "to", "find", "sets", "of", "sites", "that", "are", "contiguously", "neighbouring", ".", "Mutually", "exclusive", "sets", "of", "contiguous", "sites", "are", "returned", "as", "Cluster", "objects", "." ]
python
train
dschreij/python-mediadecoder
mediadecoder/timer.py
https://github.com/dschreij/python-mediadecoder/blob/f01b02d790f2abc52d9792e43076cf4cb7d3ce51/mediadecoder/timer.py#L139-L152
def fps(self,value): """ Sets the frames per second of the current movie the clock is used for. Parameters ---------- value : float The fps value. """ if not value is None: if not type(value) == float: raise ValueError("fps needs to be specified as a float") if value<1.0: raise ValueError("fps needs to be greater than 1.0") self.__fps = value
[ "def", "fps", "(", "self", ",", "value", ")", ":", "if", "not", "value", "is", "None", ":", "if", "not", "type", "(", "value", ")", "==", "float", ":", "raise", "ValueError", "(", "\"fps needs to be specified as a float\"", ")", "if", "value", "<", "1.0"...
Sets the frames per second of the current movie the clock is used for. Parameters ---------- value : float The fps value.
[ "Sets", "the", "frames", "per", "second", "of", "the", "current", "movie", "the", "clock", "is", "used", "for", "." ]
python
train
isislovecruft/python-gnupg
pretty_bad_protocol/_trust.py
https://github.com/isislovecruft/python-gnupg/blob/784571449032e811587249743e183fc5e908a673/pretty_bad_protocol/_trust.py#L33-L40
def _create_trustdb(cls): """Create the trustdb file in our homedir, if it doesn't exist.""" trustdb = os.path.join(cls.homedir, 'trustdb.gpg') if not os.path.isfile(trustdb): log.info("GnuPG complained that your trustdb file was missing. %s" % "This is likely due to changing to a new homedir.") log.info("Creating trustdb.gpg file in your GnuPG homedir.") cls.fix_trustdb(trustdb)
[ "def", "_create_trustdb", "(", "cls", ")", ":", "trustdb", "=", "os", ".", "path", ".", "join", "(", "cls", ".", "homedir", ",", "'trustdb.gpg'", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "trustdb", ")", ":", "log", ".", "info", "(",...
Create the trustdb file in our homedir, if it doesn't exist.
[ "Create", "the", "trustdb", "file", "in", "our", "homedir", "if", "it", "doesn", "t", "exist", "." ]
python
train
juju/charm-helpers
charmhelpers/contrib/storage/linux/ceph.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/storage/linux/ceph.py#L1429-L1459
def is_request_complete_for_rid(request, rid): """Check if a given request has been completed on the given relation @param request: A CephBrokerRq object @param rid: Relation ID """ broker_key = get_broker_rsp_key() for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) if rdata.get(broker_key): rsp = CephBrokerRsp(rdata.get(broker_key)) if rsp.request_id == request.request_id: if not rsp.exit_code: return True else: # The remote unit sent no reply targeted at this unit so either the # remote ceph cluster does not support unit targeted replies or it # has not processed our request yet. if rdata.get('broker_rsp'): request_data = json.loads(rdata['broker_rsp']) if request_data.get('request-id'): log('Ignoring legacy broker_rsp without unit key as remote ' 'service supports unit specific replies', level=DEBUG) else: log('Using legacy broker_rsp as remote service does not ' 'supports unit specific replies', level=DEBUG) rsp = CephBrokerRsp(rdata['broker_rsp']) if not rsp.exit_code: return True return False
[ "def", "is_request_complete_for_rid", "(", "request", ",", "rid", ")", ":", "broker_key", "=", "get_broker_rsp_key", "(", ")", "for", "unit", "in", "related_units", "(", "rid", ")", ":", "rdata", "=", "relation_get", "(", "rid", "=", "rid", ",", "unit", "=...
Check if a given request has been completed on the given relation @param request: A CephBrokerRq object @param rid: Relation ID
[ "Check", "if", "a", "given", "request", "has", "been", "completed", "on", "the", "given", "relation" ]
python
train
pmacosta/peng
peng/wave_functions.py
https://github.com/pmacosta/peng/blob/976935377adaa3de26fc5677aceb2cdfbd6f93a7/peng/wave_functions.py#L290-L329
def average(wave, indep_min=None, indep_max=None): r""" Return the running average of a waveform's dependent variable vector. :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :param indep_min: Independent vector start point of computation :type indep_min: integer or float :param indep_max: Independent vector stop point of computation :type indep_max: integer or float :rtype: :py:class:`peng.eng.Waveform` .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.average :raises: * RuntimeError (Argument \`indep_max\` is not valid) * RuntimeError (Argument \`indep_min\` is not valid) * RuntimeError (Argument \`wave\` is not valid) * RuntimeError (Incongruent \`indep_min\` and \`indep_max\` arguments) .. [[[end]]] """ ret = copy.copy(wave) _bound_waveform(ret, indep_min, indep_max) area = _running_area(ret._indep_vector, ret._dep_vector) area[0] = ret._dep_vector[0] deltas = ret._indep_vector - ret._indep_vector[0] deltas[0] = 1.0 ret._dep_vector = np.divide(area, deltas) ret.dep_name = "average({0})".format(ret._dep_name) return ret
[ "def", "average", "(", "wave", ",", "indep_min", "=", "None", ",", "indep_max", "=", "None", ")", ":", "ret", "=", "copy", ".", "copy", "(", "wave", ")", "_bound_waveform", "(", "ret", ",", "indep_min", ",", "indep_max", ")", "area", "=", "_running_are...
r""" Return the running average of a waveform's dependent variable vector. :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :param indep_min: Independent vector start point of computation :type indep_min: integer or float :param indep_max: Independent vector stop point of computation :type indep_max: integer or float :rtype: :py:class:`peng.eng.Waveform` .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.average :raises: * RuntimeError (Argument \`indep_max\` is not valid) * RuntimeError (Argument \`indep_min\` is not valid) * RuntimeError (Argument \`wave\` is not valid) * RuntimeError (Incongruent \`indep_min\` and \`indep_max\` arguments) .. [[[end]]]
[ "r", "Return", "the", "running", "average", "of", "a", "waveform", "s", "dependent", "variable", "vector", "." ]
python
test
justquick/django-native-tags
native_tags/contrib/generic_markup.py
https://github.com/justquick/django-native-tags/blob/d40b976ee1cb13faeb04f0dedf02933d4274abf2/native_tags/contrib/generic_markup.py#L12-L21
def apply_markup(value, arg=None): """ Applies text-to-HTML conversion. Takes an optional argument to specify the name of a filter to use. """ if arg is not None: return formatter(value, filter_name=arg) return formatter(value)
[ "def", "apply_markup", "(", "value", ",", "arg", "=", "None", ")", ":", "if", "arg", "is", "not", "None", ":", "return", "formatter", "(", "value", ",", "filter_name", "=", "arg", ")", "return", "formatter", "(", "value", ")" ]
Applies text-to-HTML conversion. Takes an optional argument to specify the name of a filter to use.
[ "Applies", "text", "-", "to", "-", "HTML", "conversion", ".", "Takes", "an", "optional", "argument", "to", "specify", "the", "name", "of", "a", "filter", "to", "use", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/service_endpoint/service_endpoint_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/service_endpoint/service_endpoint_client.py#L243-L259
def get_service_endpoint_types(self, type=None, scheme=None): """GetServiceEndpointTypes. [Preview API] Get service endpoint types. :param str type: Type of service endpoint. :param str scheme: Scheme of service endpoint. :rtype: [ServiceEndpointType] """ query_parameters = {} if type is not None: query_parameters['type'] = self._serialize.query('type', type, 'str') if scheme is not None: query_parameters['scheme'] = self._serialize.query('scheme', scheme, 'str') response = self._send(http_method='GET', location_id='5a7938a4-655e-486c-b562-b78c54a7e87b', version='5.0-preview.1', query_parameters=query_parameters) return self._deserialize('[ServiceEndpointType]', self._unwrap_collection(response))
[ "def", "get_service_endpoint_types", "(", "self", ",", "type", "=", "None", ",", "scheme", "=", "None", ")", ":", "query_parameters", "=", "{", "}", "if", "type", "is", "not", "None", ":", "query_parameters", "[", "'type'", "]", "=", "self", ".", "_seria...
GetServiceEndpointTypes. [Preview API] Get service endpoint types. :param str type: Type of service endpoint. :param str scheme: Scheme of service endpoint. :rtype: [ServiceEndpointType]
[ "GetServiceEndpointTypes", ".", "[", "Preview", "API", "]", "Get", "service", "endpoint", "types", ".", ":", "param", "str", "type", ":", "Type", "of", "service", "endpoint", ".", ":", "param", "str", "scheme", ":", "Scheme", "of", "service", "endpoint", "...
python
train
viniciuschiele/flask-io
flask_io/actions.py
https://github.com/viniciuschiele/flask-io/blob/4e559419b3d8e6859f83fa16557b00542d5f3aa7/flask_io/actions.py#L48-L59
def perform_authorization(self): """ Check if the request should be permitted. Raises an appropriate exception if the request is not permitted. """ for permission in self.permissions: if not permission.has_permission(): if request.user: raise errors.PermissionDenied() else: raise errors.NotAuthenticated()
[ "def", "perform_authorization", "(", "self", ")", ":", "for", "permission", "in", "self", ".", "permissions", ":", "if", "not", "permission", ".", "has_permission", "(", ")", ":", "if", "request", ".", "user", ":", "raise", "errors", ".", "PermissionDenied",...
Check if the request should be permitted. Raises an appropriate exception if the request is not permitted.
[ "Check", "if", "the", "request", "should", "be", "permitted", ".", "Raises", "an", "appropriate", "exception", "if", "the", "request", "is", "not", "permitted", "." ]
python
train
influxdata/influxdb-python
influxdb/resultset.py
https://github.com/influxdata/influxdb-python/blob/d5d12499f3755199d5eedd8b363450f1cf4073bd/influxdb/resultset.py#L182-L192
def _get_points_for_series(self, series): """Return generator of dict from columns and values of a series. :param series: One series :return: Generator of dicts """ for point in series.get('values', []): yield self.point_from_cols_vals( series['columns'], point )
[ "def", "_get_points_for_series", "(", "self", ",", "series", ")", ":", "for", "point", "in", "series", ".", "get", "(", "'values'", ",", "[", "]", ")", ":", "yield", "self", ".", "point_from_cols_vals", "(", "series", "[", "'columns'", "]", ",", "point",...
Return generator of dict from columns and values of a series. :param series: One series :return: Generator of dicts
[ "Return", "generator", "of", "dict", "from", "columns", "and", "values", "of", "a", "series", "." ]
python
train
StellarCN/py-stellar-base
stellar_base/horizon.py
https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/horizon.py#L600-L618
def ledger_transactions(self, ledger_id, cursor=None, order='asc', include_failed=False, limit=10): """This endpoint represents all transactions in a given ledger. `GET /ledgers/{id}/transactions{?cursor,limit,order} <https://www.stellar.org/developers/horizon/reference/endpoints/transactions-for-ledger.html>`_ :param int ledger_id: The id of the ledger to look up. :param int cursor: A paging token, specifying where to start returning records from. :param str order: The order in which to return rows, "asc" or "desc". :param int limit: Maximum number of records to return. :param bool include_failed: Set to `True` to include failed transactions in results. :return: The transactions contained in a single ledger. :rtype: dict """ endpoint = '/ledgers/{ledger_id}/transactions'.format( ledger_id=ledger_id) params = self.__query_params(cursor=cursor, order=order, limit=limit, include_failed=include_failed) return self.query(endpoint, params)
[ "def", "ledger_transactions", "(", "self", ",", "ledger_id", ",", "cursor", "=", "None", ",", "order", "=", "'asc'", ",", "include_failed", "=", "False", ",", "limit", "=", "10", ")", ":", "endpoint", "=", "'/ledgers/{ledger_id}/transactions'", ".", "format", ...
This endpoint represents all transactions in a given ledger. `GET /ledgers/{id}/transactions{?cursor,limit,order} <https://www.stellar.org/developers/horizon/reference/endpoints/transactions-for-ledger.html>`_ :param int ledger_id: The id of the ledger to look up. :param int cursor: A paging token, specifying where to start returning records from. :param str order: The order in which to return rows, "asc" or "desc". :param int limit: Maximum number of records to return. :param bool include_failed: Set to `True` to include failed transactions in results. :return: The transactions contained in a single ledger. :rtype: dict
[ "This", "endpoint", "represents", "all", "transactions", "in", "a", "given", "ledger", "." ]
python
train
apache/incubator-heron
heronpy/connectors/textfiles/textfilesgenerator.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heronpy/connectors/textfiles/textfilesgenerator.py#L35-L41
def setup(self, context): """Implements TextFile Generator's setup method""" myindex = context.get_partition_index() self._files_to_consume = self._files[myindex::context.get_num_partitions()] self.logger.info("TextFileSpout files to consume %s" % self._files_to_consume) self._lines_to_consume = self._get_next_lines() self._emit_count = 0
[ "def", "setup", "(", "self", ",", "context", ")", ":", "myindex", "=", "context", ".", "get_partition_index", "(", ")", "self", ".", "_files_to_consume", "=", "self", ".", "_files", "[", "myindex", ":", ":", "context", ".", "get_num_partitions", "(", ")", ...
Implements TextFile Generator's setup method
[ "Implements", "TextFile", "Generator", "s", "setup", "method" ]
python
valid
PyconUK/ConferenceScheduler
src/conference_scheduler/lp_problem/constraints.py
https://github.com/PyconUK/ConferenceScheduler/blob/fb139f0ef2eab5ac8f4919aa4994d94d4e040030/src/conference_scheduler/lp_problem/constraints.py#L75-L87
def _upper_bound_on_event_overflow( events, slots, X, beta, summation_type=None, **kwargs ): """ This is an artificial constraint that is used by the objective function aiming to minimise the maximum overflow in a slot. """ label = 'Artificial upper bound constraint' for row, event in enumerate(events): for col, slot in enumerate(slots): yield Constraint( f'{label} - slot: {col} and event: {row}', event.demand * X[row, col] - slot.capacity <= beta)
[ "def", "_upper_bound_on_event_overflow", "(", "events", ",", "slots", ",", "X", ",", "beta", ",", "summation_type", "=", "None", ",", "*", "*", "kwargs", ")", ":", "label", "=", "'Artificial upper bound constraint'", "for", "row", ",", "event", "in", "enumerat...
This is an artificial constraint that is used by the objective function aiming to minimise the maximum overflow in a slot.
[ "This", "is", "an", "artificial", "constraint", "that", "is", "used", "by", "the", "objective", "function", "aiming", "to", "minimise", "the", "maximum", "overflow", "in", "a", "slot", "." ]
python
train
lucastheis/django-publications
publications/templatetags/publication_extras.py
https://github.com/lucastheis/django-publications/blob/5a75cf88cf794937711b6850ff2acb07fe005f08/publications/templatetags/publication_extras.py#L31-L47
def get_publications(context, template='publications/publications.html'): """ Get all publications. """ types = Type.objects.filter(hidden=False) publications = Publication.objects.select_related() publications = publications.filter(external=False, type__in=types) publications = publications.order_by('-year', '-month', '-id') if not publications: return '' # load custom links and files populate(publications) return render_template(template, context['request'], {'publications': publications})
[ "def", "get_publications", "(", "context", ",", "template", "=", "'publications/publications.html'", ")", ":", "types", "=", "Type", ".", "objects", ".", "filter", "(", "hidden", "=", "False", ")", "publications", "=", "Publication", ".", "objects", ".", "sele...
Get all publications.
[ "Get", "all", "publications", "." ]
python
valid
juju/charm-helpers
charmhelpers/contrib/network/ip.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/network/ip.py#L542-L554
def port_has_listener(address, port): """ Returns True if the address:port is open and being listened to, else False. @param address: an IP address or hostname @param port: integer port Note calls 'zc' via a subprocess shell """ cmd = ['nc', '-z', address, str(port)] result = subprocess.call(cmd) return not(bool(result))
[ "def", "port_has_listener", "(", "address", ",", "port", ")", ":", "cmd", "=", "[", "'nc'", ",", "'-z'", ",", "address", ",", "str", "(", "port", ")", "]", "result", "=", "subprocess", ".", "call", "(", "cmd", ")", "return", "not", "(", "bool", "("...
Returns True if the address:port is open and being listened to, else False. @param address: an IP address or hostname @param port: integer port Note calls 'zc' via a subprocess shell
[ "Returns", "True", "if", "the", "address", ":", "port", "is", "open", "and", "being", "listened", "to", "else", "False", "." ]
python
train
wbond/oscrypto
oscrypto/_osx/symmetric.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_osx/symmetric.py#L271-L312
def rc2_cbc_pkcs5_encrypt(key, data, iv): """ Encrypts plaintext using RC2 with a 64 bit key :param key: The encryption key - a byte string 8 bytes long :param data: The plaintext - a byte string :param iv: The 8-byte initialization vector to use - a byte string - set as None to generate an appropriate one :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A tuple of two byte strings (iv, ciphertext) """ if len(key) < 5 or len(key) > 16: raise ValueError(pretty_message( ''' key must be 5 to 16 bytes (40 to 128 bits) long - is %s ''', len(key) )) if not iv: iv = rand_bytes(8) elif len(iv) != 8: raise ValueError(pretty_message( ''' iv must be 8 bytes long - is %s ''', len(iv) )) return (iv, _encrypt(Security.kSecAttrKeyTypeRC2, key, data, iv, Security.kSecPaddingPKCS5Key))
[ "def", "rc2_cbc_pkcs5_encrypt", "(", "key", ",", "data", ",", "iv", ")", ":", "if", "len", "(", "key", ")", "<", "5", "or", "len", "(", "key", ")", ">", "16", ":", "raise", "ValueError", "(", "pretty_message", "(", "'''\n key must be 5 to 16 byt...
Encrypts plaintext using RC2 with a 64 bit key :param key: The encryption key - a byte string 8 bytes long :param data: The plaintext - a byte string :param iv: The 8-byte initialization vector to use - a byte string - set as None to generate an appropriate one :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A tuple of two byte strings (iv, ciphertext)
[ "Encrypts", "plaintext", "using", "RC2", "with", "a", "64", "bit", "key" ]
python
valid
quantopian/zipline
zipline/pipeline/loaders/blaze/core.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/blaze/core.py#L847-L879
def register_dataset(self, dataset, expr, deltas=None, checkpoints=None, odo_kwargs=None): """Explicitly map a datset to a collection of blaze expressions. Parameters ---------- dataset : DataSet The pipeline dataset to map to the given expressions. expr : Expr The baseline values. deltas : Expr, optional The deltas for the data. checkpoints : Expr, optional The forward fill checkpoints for the data. odo_kwargs : dict, optional The keyword arguments to forward to the odo calls internally. See Also -------- :func:`zipline.pipeline.loaders.blaze.from_blaze` """ expr_data = ExprData( expr, deltas, checkpoints, odo_kwargs, ) for column in dataset.columns: self._table_expressions[column] = expr_data
[ "def", "register_dataset", "(", "self", ",", "dataset", ",", "expr", ",", "deltas", "=", "None", ",", "checkpoints", "=", "None", ",", "odo_kwargs", "=", "None", ")", ":", "expr_data", "=", "ExprData", "(", "expr", ",", "deltas", ",", "checkpoints", ",",...
Explicitly map a datset to a collection of blaze expressions. Parameters ---------- dataset : DataSet The pipeline dataset to map to the given expressions. expr : Expr The baseline values. deltas : Expr, optional The deltas for the data. checkpoints : Expr, optional The forward fill checkpoints for the data. odo_kwargs : dict, optional The keyword arguments to forward to the odo calls internally. See Also -------- :func:`zipline.pipeline.loaders.blaze.from_blaze`
[ "Explicitly", "map", "a", "datset", "to", "a", "collection", "of", "blaze", "expressions", "." ]
python
train
fracpete/python-weka-wrapper3
python/weka/classifiers.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/classifiers.py#L1221-L1237
def summary(self, title=None, complexity=False): """ Generates a summary. :param title: optional title :type title: str :param complexity: whether to print the complexity information as well :type complexity: bool :return: the summary :rtype: str """ if title is None: return javabridge.call( self.jobject, "toSummaryString", "()Ljava/lang/String;") else: return javabridge.call( self.jobject, "toSummaryString", "(Ljava/lang/String;Z)Ljava/lang/String;", title, complexity)
[ "def", "summary", "(", "self", ",", "title", "=", "None", ",", "complexity", "=", "False", ")", ":", "if", "title", "is", "None", ":", "return", "javabridge", ".", "call", "(", "self", ".", "jobject", ",", "\"toSummaryString\"", ",", "\"()Ljava/lang/String...
Generates a summary. :param title: optional title :type title: str :param complexity: whether to print the complexity information as well :type complexity: bool :return: the summary :rtype: str
[ "Generates", "a", "summary", "." ]
python
train
codingjoe/ssdp
ssdp/__init__.py
https://github.com/codingjoe/ssdp/blob/84ff667c792608b221aa726cfd106b554884063d/ssdp/__init__.py#L39-L50
def parse_headers(cls, msg): """ Parse HTTP headers. Args: msg (str): HTTP message. Returns: (List[Tuple[str, str]): List of header tuples. """ return list(email.parser.Parser().parsestr(msg).items())
[ "def", "parse_headers", "(", "cls", ",", "msg", ")", ":", "return", "list", "(", "email", ".", "parser", ".", "Parser", "(", ")", ".", "parsestr", "(", "msg", ")", ".", "items", "(", ")", ")" ]
Parse HTTP headers. Args: msg (str): HTTP message. Returns: (List[Tuple[str, str]): List of header tuples.
[ "Parse", "HTTP", "headers", "." ]
python
train
bunq/sdk_python
bunq/sdk/model/generated/endpoint.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/endpoint.py#L11951-L11998
def create(cls, request_inquiries, total_amount_inquired, monetary_account_id=None, status=None, event_id=None, custom_headers=None): """ Create a request batch by sending an array of single request objects, that will become part of the batch. :type user_id: int :type monetary_account_id: int :param request_inquiries: The list of request inquiries we want to send in 1 batch. :type request_inquiries: list[RequestInquiry] :param total_amount_inquired: The total amount originally inquired for this batch. :type total_amount_inquired: object_.Amount :param status: The status of the request. :type status: str :param event_id: The ID of the associated event if the request batch was made using 'split the bill'. :type event_id: int :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt """ if custom_headers is None: custom_headers = {} request_map = { cls.FIELD_REQUEST_INQUIRIES: request_inquiries, cls.FIELD_STATUS: status, cls.FIELD_TOTAL_AMOUNT_INQUIRED: total_amount_inquired, cls.FIELD_EVENT_ID: event_id } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) api_client = client.ApiClient(cls._get_api_context()) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id(), cls._determine_monetary_account_id( monetary_account_id)) response_raw = api_client.post(endpoint_url, request_bytes, custom_headers) return BunqResponseInt.cast_from_bunq_response( cls._process_for_id(response_raw) )
[ "def", "create", "(", "cls", ",", "request_inquiries", ",", "total_amount_inquired", ",", "monetary_account_id", "=", "None", ",", "status", "=", "None", ",", "event_id", "=", "None", ",", "custom_headers", "=", "None", ")", ":", "if", "custom_headers", "is", ...
Create a request batch by sending an array of single request objects, that will become part of the batch. :type user_id: int :type monetary_account_id: int :param request_inquiries: The list of request inquiries we want to send in 1 batch. :type request_inquiries: list[RequestInquiry] :param total_amount_inquired: The total amount originally inquired for this batch. :type total_amount_inquired: object_.Amount :param status: The status of the request. :type status: str :param event_id: The ID of the associated event if the request batch was made using 'split the bill'. :type event_id: int :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt
[ "Create", "a", "request", "batch", "by", "sending", "an", "array", "of", "single", "request", "objects", "that", "will", "become", "part", "of", "the", "batch", "." ]
python
train
MartijnBraam/pyElectronics
electronics/devices/mcp23017.py
https://github.com/MartijnBraam/pyElectronics/blob/a20878c9fa190135f1e478e9ea0b54ca43ff308e/electronics/devices/mcp23017.py#L201-L213
def write_port(self, port, value): """ Use a whole port as a bus and write a byte to it. :param port: Name of the port ('A' or 'B') :param value: Value to write (0-255) """ if port == 'A': self.GPIOA = value elif port == 'B': self.GPIOB = value else: raise AttributeError('Port {} does not exist, use A or B'.format(port)) self.sync()
[ "def", "write_port", "(", "self", ",", "port", ",", "value", ")", ":", "if", "port", "==", "'A'", ":", "self", ".", "GPIOA", "=", "value", "elif", "port", "==", "'B'", ":", "self", ".", "GPIOB", "=", "value", "else", ":", "raise", "AttributeError", ...
Use a whole port as a bus and write a byte to it. :param port: Name of the port ('A' or 'B') :param value: Value to write (0-255)
[ "Use", "a", "whole", "port", "as", "a", "bus", "and", "write", "a", "byte", "to", "it", "." ]
python
train
denniskempin/safetynet
safetynet.py
https://github.com/denniskempin/safetynet/blob/fbcc4a112370fc20696f003d901114b4fe26d984/safetynet.py#L85-L90
def FindTypecheckParent(cls, parents): """Find parent class that uses this metaclass.""" for parent in parents: if hasattr(parent, "__metaclass__") and parent.__metaclass__ == cls: return parent return None
[ "def", "FindTypecheckParent", "(", "cls", ",", "parents", ")", ":", "for", "parent", "in", "parents", ":", "if", "hasattr", "(", "parent", ",", "\"__metaclass__\"", ")", "and", "parent", ".", "__metaclass__", "==", "cls", ":", "return", "parent", "return", ...
Find parent class that uses this metaclass.
[ "Find", "parent", "class", "that", "uses", "this", "metaclass", "." ]
python
train
LuminosoInsight/langcodes
langcodes/build_data.py
https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/build_data.py#L192-L199
def read_cldr_names(path, language, category): """ Read CLDR's names for things in a particular language. """ filename = data_filename('{}/{}/{}.json'.format(path, language, category)) fulldata = json.load(open(filename, encoding='utf-8')) data = fulldata['main'][language]['localeDisplayNames'][category] return data
[ "def", "read_cldr_names", "(", "path", ",", "language", ",", "category", ")", ":", "filename", "=", "data_filename", "(", "'{}/{}/{}.json'", ".", "format", "(", "path", ",", "language", ",", "category", ")", ")", "fulldata", "=", "json", ".", "load", "(", ...
Read CLDR's names for things in a particular language.
[ "Read", "CLDR", "s", "names", "for", "things", "in", "a", "particular", "language", "." ]
python
train
hollenstein/maspy
maspy/isobar.py
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/isobar.py#L356-L400
def _extractReporterIons(ionArrays, reporterMz, mzTolerance): """Find and a list of reporter ions and return mz and intensity values. Expected reporter mz values are searched in "ionArray['mz']" and reported if the observed relative deviation is less than specified by "mzTolerance". In the case of multiple matches, the one with the minimal deviation is picked. If no matching entries are found numpy.nan is returned for the mz value and an intensity of 0. The returned arrays are in the order of "reporterMz" values. :param ionArrays: a dictionary containing two numpy arrays of equal size, {"i": an array of ion intensities, "mz" an array of ion mz values} :param reporterMz: a list of reporter mz values :param mzTolerance: maximum allowed relative mz deviation :returns: {'mz': numpy.array(), 'i': numpy.array()} """ reporterIons = {'mz': [], 'i': []} for reporterMzValue in reporterMz: limHi = reporterMzValue * (1+mzTolerance) limLo = reporterMzValue * (1-mzTolerance) loPos = bisect.bisect_left(ionArrays['mz'], limLo) upPos = bisect.bisect_right(ionArrays['mz'], limHi) matchingValues = ionArrays['mz'][loPos:upPos] if matchingValues.size == 0: reporterIons['i'].append(0) reporterIons['mz'].append(numpy.nan) elif matchingValues.size == 1: reporterIons['i'].append(ionArrays['i'][loPos]) reporterIons['mz'].append(ionArrays['mz'][loPos]) else: mzDeviations = numpy.abs(matchingValues-reporterMzValue) minDeviationPos = numpy.argmin(mzDeviations) bestMatchArrayPos = range(loPos, upPos)[minDeviationPos] reporterIons['i'].append(ionArrays['i'][bestMatchArrayPos]) reporterIons['mz'].append(ionArrays['mz'][bestMatchArrayPos]) reporterIons['mz'] = numpy.array(reporterIons['mz'], dtype=ionArrays['mz'].dtype ) reporterIons['i'] = numpy.array(reporterIons['i'], dtype=ionArrays['i'].dtype ) return reporterIons
[ "def", "_extractReporterIons", "(", "ionArrays", ",", "reporterMz", ",", "mzTolerance", ")", ":", "reporterIons", "=", "{", "'mz'", ":", "[", "]", ",", "'i'", ":", "[", "]", "}", "for", "reporterMzValue", "in", "reporterMz", ":", "limHi", "=", "reporterMzV...
Find and a list of reporter ions and return mz and intensity values. Expected reporter mz values are searched in "ionArray['mz']" and reported if the observed relative deviation is less than specified by "mzTolerance". In the case of multiple matches, the one with the minimal deviation is picked. If no matching entries are found numpy.nan is returned for the mz value and an intensity of 0. The returned arrays are in the order of "reporterMz" values. :param ionArrays: a dictionary containing two numpy arrays of equal size, {"i": an array of ion intensities, "mz" an array of ion mz values} :param reporterMz: a list of reporter mz values :param mzTolerance: maximum allowed relative mz deviation :returns: {'mz': numpy.array(), 'i': numpy.array()}
[ "Find", "and", "a", "list", "of", "reporter", "ions", "and", "return", "mz", "and", "intensity", "values", "." ]
python
train
portfors-lab/sparkle
sparkle/stim/auto_parameter_model.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/auto_parameter_model.py#L124-L129
def numSteps(self, row): """Gets the number of steps for the parameter at index *row* will yeild """ param = self._parameters[row] return self.nStepsForParam(param)
[ "def", "numSteps", "(", "self", ",", "row", ")", ":", "param", "=", "self", ".", "_parameters", "[", "row", "]", "return", "self", ".", "nStepsForParam", "(", "param", ")" ]
Gets the number of steps for the parameter at index *row* will yeild
[ "Gets", "the", "number", "of", "steps", "for", "the", "parameter", "at", "index", "*", "row", "*", "will", "yeild" ]
python
train
webadmin87/midnight
midnight_main/services.py
https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_main/services.py#L27-L38
def get_comment_init(request, obj): """ Возвращает словарь для инициализации начальных значений модели комментария :param request: запрос :param obj: объект к которому добавляется комментарий :return: """ if request.user.is_authenticated(): init = {'obj': obj, 'username': request.user.username, 'email': request.user.email} else: init = {'obj': obj} return init
[ "def", "get_comment_init", "(", "request", ",", "obj", ")", ":", "if", "request", ".", "user", ".", "is_authenticated", "(", ")", ":", "init", "=", "{", "'obj'", ":", "obj", ",", "'username'", ":", "request", ".", "user", ".", "username", ",", "'email'...
Возвращает словарь для инициализации начальных значений модели комментария :param request: запрос :param obj: объект к которому добавляется комментарий :return:
[ "Возвращает", "словарь", "для", "инициализации", "начальных", "значений", "модели", "комментария", ":", "param", "request", ":", "запрос", ":", "param", "obj", ":", "объект", "к", "которому", "добавляется", "комментарий", ":", "return", ":" ]
python
train
molmod/molmod
molmod/pairff.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/pairff.py#L113-L120
def gradient_component(self, index1): """Compute the gradient of the energy for one atom""" result = np.zeros(3, float) for index2 in range(self.numc): if self.scaling[index1, index2] > 0: for (se, ve), (sg, vg) in zip(self.yield_pair_energies(index1, index2), self.yield_pair_gradients(index1, index2)): result += (sg*self.directions[index1, index2]*ve + se*vg)*self.scaling[index1, index2] return result
[ "def", "gradient_component", "(", "self", ",", "index1", ")", ":", "result", "=", "np", ".", "zeros", "(", "3", ",", "float", ")", "for", "index2", "in", "range", "(", "self", ".", "numc", ")", ":", "if", "self", ".", "scaling", "[", "index1", ",",...
Compute the gradient of the energy for one atom
[ "Compute", "the", "gradient", "of", "the", "energy", "for", "one", "atom" ]
python
train
chrislit/abydos
abydos/distance/_ncd_lzma.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/distance/_ncd_lzma.py#L51-L103
def dist(self, src, tar): """Return the NCD between two strings using LZMA compression. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float Compression distance Raises ------ ValueError Install the PylibLZMA module in order to use LZMA Examples -------- >>> cmp = NCDlzma() >>> cmp.dist('cat', 'hat') 0.08695652173913043 >>> cmp.dist('Niall', 'Neil') 0.16 >>> cmp.dist('aluminum', 'Catalan') 0.16 >>> cmp.dist('ATCG', 'TAGC') 0.08695652173913043 """ if src == tar: return 0.0 src = src.encode('utf-8') tar = tar.encode('utf-8') if lzma is not None: src_comp = lzma.compress(src)[14:] tar_comp = lzma.compress(tar)[14:] concat_comp = lzma.compress(src + tar)[14:] concat_comp2 = lzma.compress(tar + src)[14:] else: # pragma: no cover raise ValueError( 'Install the PylibLZMA module in order to use LZMA' ) return ( min(len(concat_comp), len(concat_comp2)) - min(len(src_comp), len(tar_comp)) ) / max(len(src_comp), len(tar_comp))
[ "def", "dist", "(", "self", ",", "src", ",", "tar", ")", ":", "if", "src", "==", "tar", ":", "return", "0.0", "src", "=", "src", ".", "encode", "(", "'utf-8'", ")", "tar", "=", "tar", ".", "encode", "(", "'utf-8'", ")", "if", "lzma", "is", "not...
Return the NCD between two strings using LZMA compression. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float Compression distance Raises ------ ValueError Install the PylibLZMA module in order to use LZMA Examples -------- >>> cmp = NCDlzma() >>> cmp.dist('cat', 'hat') 0.08695652173913043 >>> cmp.dist('Niall', 'Neil') 0.16 >>> cmp.dist('aluminum', 'Catalan') 0.16 >>> cmp.dist('ATCG', 'TAGC') 0.08695652173913043
[ "Return", "the", "NCD", "between", "two", "strings", "using", "LZMA", "compression", "." ]
python
valid
StagPython/StagPy
stagpy/stagyydata.py
https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/stagyydata.py#L251-L261
def bind(self, isnap, istep): """Register the isnap / istep correspondence. Users of :class:`StagyyData` should not use this method. Args: isnap (int): snapshot index. istep (int): time step index. """ self._isteps[isnap] = istep self.sdat.steps[istep].isnap = isnap
[ "def", "bind", "(", "self", ",", "isnap", ",", "istep", ")", ":", "self", ".", "_isteps", "[", "isnap", "]", "=", "istep", "self", ".", "sdat", ".", "steps", "[", "istep", "]", ".", "isnap", "=", "isnap" ]
Register the isnap / istep correspondence. Users of :class:`StagyyData` should not use this method. Args: isnap (int): snapshot index. istep (int): time step index.
[ "Register", "the", "isnap", "/", "istep", "correspondence", "." ]
python
train
python-gitlab/python-gitlab
gitlab/v4/objects.py
https://github.com/python-gitlab/python-gitlab/blob/16de1b03fde3dbbe8f851614dd1d8c09de102fe5/gitlab/v4/objects.py#L723-L744
def all(self, **kwargs): """List all the members, included inherited ones. Args: all (bool): If True, return all the items, without pagination per_page (int): Number of items to retrieve per request page (int): ID of the page to return (starts with page 1) as_list (bool): If set to False and no pagination option is defined, return a generator instead of a list **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the list could not be retrieved Returns: RESTObjectList: The list of members """ path = '%s/all' % self.path obj = self.gitlab.http_list(path, **kwargs) return [self._obj_cls(self, item) for item in obj]
[ "def", "all", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "'%s/all'", "%", "self", ".", "path", "obj", "=", "self", ".", "gitlab", ".", "http_list", "(", "path", ",", "*", "*", "kwargs", ")", "return", "[", "self", ".", "_obj_cls...
List all the members, included inherited ones. Args: all (bool): If True, return all the items, without pagination per_page (int): Number of items to retrieve per request page (int): ID of the page to return (starts with page 1) as_list (bool): If set to False and no pagination option is defined, return a generator instead of a list **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the list could not be retrieved Returns: RESTObjectList: The list of members
[ "List", "all", "the", "members", "included", "inherited", "ones", "." ]
python
train
cloudendpoints/endpoints-python
endpoints/parameter_converter.py
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/parameter_converter.py#L30-L55
def _check_enum(parameter_name, value, parameter_config): """Checks if an enum value is valid. This is called by the transform_parameter_value function and shouldn't be called directly. This verifies that the value of an enum parameter is valid. Args: parameter_name: A string containing the name of the parameter, which is either just a variable name or the name with the index appended. For example 'var' or 'var[2]'. value: A string containing the value passed in for the parameter. parameter_config: The dictionary containing information specific to the parameter in question. This is retrieved from request.parameters in the method config. Raises: EnumRejectionError: If the given value is not among the accepted enum values in the field parameter. """ enum_values = [enum['backendValue'] for enum in parameter_config['enum'].values() if 'backendValue' in enum] if value not in enum_values: raise errors.EnumRejectionError(parameter_name, value, enum_values)
[ "def", "_check_enum", "(", "parameter_name", ",", "value", ",", "parameter_config", ")", ":", "enum_values", "=", "[", "enum", "[", "'backendValue'", "]", "for", "enum", "in", "parameter_config", "[", "'enum'", "]", ".", "values", "(", ")", "if", "'backendVa...
Checks if an enum value is valid. This is called by the transform_parameter_value function and shouldn't be called directly. This verifies that the value of an enum parameter is valid. Args: parameter_name: A string containing the name of the parameter, which is either just a variable name or the name with the index appended. For example 'var' or 'var[2]'. value: A string containing the value passed in for the parameter. parameter_config: The dictionary containing information specific to the parameter in question. This is retrieved from request.parameters in the method config. Raises: EnumRejectionError: If the given value is not among the accepted enum values in the field parameter.
[ "Checks", "if", "an", "enum", "value", "is", "valid", "." ]
python
train
agamdua/mixtures
mixtures/field_values.py
https://github.com/agamdua/mixtures/blob/9c67f3684ddac53d8a636a4353a266e98d09e54c/mixtures/field_values.py#L22-L41
def get_random_value(field): """ Calls the dispatch method (``get_factory_func``) and passes the field obj argument to the callable returned. Returns: random value depending on field type and constraints in the field object """ func = get_factory_func(field) if field.default is not None: if callable(field.default): return field.default() return field.default if field.choices: return random.choice(field.choices) return func(field)
[ "def", "get_random_value", "(", "field", ")", ":", "func", "=", "get_factory_func", "(", "field", ")", "if", "field", ".", "default", "is", "not", "None", ":", "if", "callable", "(", "field", ".", "default", ")", ":", "return", "field", ".", "default", ...
Calls the dispatch method (``get_factory_func``) and passes the field obj argument to the callable returned. Returns: random value depending on field type and constraints in the field object
[ "Calls", "the", "dispatch", "method", "(", "get_factory_func", ")", "and", "passes", "the", "field", "obj", "argument", "to", "the", "callable", "returned", "." ]
python
train
user-cont/conu
conu/backend/origin/backend.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/origin/backend.py#L224-L241
def get_image_registry_url(self, image_name): """ Helper function for obtain registry url of image from it's name :param image_name: str, short name of an image, example: - conu:0.5.0 :return: str, image registry url, example: - 172.30.1.1:5000/myproject/conu:0.5.0 """ c = self._oc_command(["get", "is", image_name, "--output=jsonpath=\'{ .status.dockerImageRepository }\'"]) try: internal_registry_name = run_cmd(c, return_output=True) except subprocess.CalledProcessError as ex: raise ConuException("oc get is failed: %s" % ex) logger.info("Image registry url: %s", internal_registry_name) return internal_registry_name.replace("'", "").replace('"', '')
[ "def", "get_image_registry_url", "(", "self", ",", "image_name", ")", ":", "c", "=", "self", ".", "_oc_command", "(", "[", "\"get\"", ",", "\"is\"", ",", "image_name", ",", "\"--output=jsonpath=\\'{ .status.dockerImageRepository }\\'\"", "]", ")", "try", ":", "int...
Helper function for obtain registry url of image from it's name :param image_name: str, short name of an image, example: - conu:0.5.0 :return: str, image registry url, example: - 172.30.1.1:5000/myproject/conu:0.5.0
[ "Helper", "function", "for", "obtain", "registry", "url", "of", "image", "from", "it", "s", "name", ":", "param", "image_name", ":", "str", "short", "name", "of", "an", "image", "example", ":", "-", "conu", ":", "0", ".", "5", ".", "0", ":", "return"...
python
train
LIVVkit/LIVVkit
livvkit/components/verification.py
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/verification.py#L100-L111
def _print_summary(case, summary): """ Show some statistics from the run """ for dof, data in summary.items(): b4b = data["Bit for Bit"] conf = data["Configurations"] stdout = data["Std. Out Files"] print(" " + case + " " + str(dof)) print(" --------------------") print(" Bit for bit matches : " + str(b4b[0]) + " of " + str(b4b[1])) print(" Configuration matches : " + str(conf[0]) + " of " + str(conf[1])) print(" Std. Out files parsed : " + str(stdout)) print("")
[ "def", "_print_summary", "(", "case", ",", "summary", ")", ":", "for", "dof", ",", "data", "in", "summary", ".", "items", "(", ")", ":", "b4b", "=", "data", "[", "\"Bit for Bit\"", "]", "conf", "=", "data", "[", "\"Configurations\"", "]", "stdout", "="...
Show some statistics from the run
[ "Show", "some", "statistics", "from", "the", "run" ]
python
train
eaton-lab/toytree
toytree/Toytree.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/Toytree.py#L268-L313
def get_node_values( self, feature=None, show_root=False, show_tips=False, ): """ Returns node values from tree object in node plot order. To modify values you must modify the .treenode object directly by setting new 'features'. For example for node in ttree.treenode.traverse(): node.add_feature("PP", 100) By default node and tip values are hidden (set to "") so that they are not shown on the tree plot. To include values for these nodes use the 'show_root'=True, or 'show_tips'=True arguments. tree.get_node_values("support", True, True) """ # access nodes in the order they will be plotted ndict = self.get_node_dict(return_internal=True, return_nodes=True) nodes = [ndict[i] for i in range(self.nnodes)[::-1]] # get features if feature: vals = [i.__getattribute__(feature) if hasattr(i, feature) else "" for i in nodes] else: vals = [" " for i in nodes] # apply hiding rules if not show_root: vals = [i if not j.is_root() else "" for i, j in zip(vals, nodes)] if not show_tips: vals = [i if not j.is_leaf() else "" for i, j in zip(vals, nodes)] # convert float to ints for prettier printing unless all floats # raise exception and skip if there are true strings (names) try: if all([Decimal(str(i)) % 1 == 0 for i in vals if i]): vals = [int(i) if isinstance(i, float) else i for i in vals] except Exception: pass return vals
[ "def", "get_node_values", "(", "self", ",", "feature", "=", "None", ",", "show_root", "=", "False", ",", "show_tips", "=", "False", ",", ")", ":", "# access nodes in the order they will be plotted", "ndict", "=", "self", ".", "get_node_dict", "(", "return_internal...
Returns node values from tree object in node plot order. To modify values you must modify the .treenode object directly by setting new 'features'. For example for node in ttree.treenode.traverse(): node.add_feature("PP", 100) By default node and tip values are hidden (set to "") so that they are not shown on the tree plot. To include values for these nodes use the 'show_root'=True, or 'show_tips'=True arguments. tree.get_node_values("support", True, True)
[ "Returns", "node", "values", "from", "tree", "object", "in", "node", "plot", "order", ".", "To", "modify", "values", "you", "must", "modify", "the", ".", "treenode", "object", "directly", "by", "setting", "new", "features", ".", "For", "example" ]
python
train
O365/python-o365
O365/utils/windows_tz.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/utils/windows_tz.py#L500-L512
def get_windows_tz(iana_tz): """ Returns a valid windows TimeZone from a given pytz TimeZone (Iana/Olson Timezones) Note: Windows Timezones are SHIT!... no ... really THEY ARE HOLY FUCKING SHIT!. """ timezone = IANA_TO_WIN.get( iana_tz.zone if isinstance(iana_tz, tzinfo) else iana_tz) if timezone is None: raise pytz.UnknownTimeZoneError( "Can't find Iana TimeZone " + iana_tz.zone) return timezone
[ "def", "get_windows_tz", "(", "iana_tz", ")", ":", "timezone", "=", "IANA_TO_WIN", ".", "get", "(", "iana_tz", ".", "zone", "if", "isinstance", "(", "iana_tz", ",", "tzinfo", ")", "else", "iana_tz", ")", "if", "timezone", "is", "None", ":", "raise", "pyt...
Returns a valid windows TimeZone from a given pytz TimeZone (Iana/Olson Timezones) Note: Windows Timezones are SHIT!... no ... really THEY ARE HOLY FUCKING SHIT!.
[ "Returns", "a", "valid", "windows", "TimeZone", "from", "a", "given", "pytz", "TimeZone", "(", "Iana", "/", "Olson", "Timezones", ")", "Note", ":", "Windows", "Timezones", "are", "SHIT!", "...", "no", "...", "really", "THEY", "ARE", "HOLY", "FUCKING", "SHI...
python
train
20c/grainy
grainy/core.py
https://github.com/20c/grainy/blob/cd956fd4144044993abc967974a127aab07a8ef6/grainy/core.py#L274-L319
def update_index(self): """ Regenerates the permission index for this set Called everytime a rule is added / removed / modified in the set """ # update index idx = {} for _, p in sorted(self.permissions.items(), key=lambda x: str(x[0])): branch = idx parent_p = const.PERM_DENY for k in p.namespace.keys: if not k in branch: branch[k] = {"__": parent_p} branch[k].update(__implicit=True) branch = branch[k] parent_p = branch["__"] branch["__"] = p.value branch["__implicit"] = False self.index = idx # update read access map ramap = {} def update_ramap(branch_idx): r = {"__": False} for k, v in list(branch_idx.items()): if k != "__" and k != "__implicit": r[k] = update_ramap(v) if branch_idx["__"] is not None and (branch_idx["__"] & const.PERM_READ) != 0: r["__"] = True return r for k, v in list(idx.items()): ramap[k] = update_ramap(v) self.read_access_map = ramap return self.index
[ "def", "update_index", "(", "self", ")", ":", "# update index", "idx", "=", "{", "}", "for", "_", ",", "p", "in", "sorted", "(", "self", ".", "permissions", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "str", "(", "x", "[", "0", ...
Regenerates the permission index for this set Called everytime a rule is added / removed / modified in the set
[ "Regenerates", "the", "permission", "index", "for", "this", "set" ]
python
train
delfick/harpoon
harpoon/layers.py
https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/layers.py#L36-L44
def layered(self): """Yield list of [[(name, image), ...], [(name, image), ...], ...]""" result = [] for layer in self._layered: nxt = [] for name in layer: nxt.append((name, self.all_images[name])) result.append(nxt) return result
[ "def", "layered", "(", "self", ")", ":", "result", "=", "[", "]", "for", "layer", "in", "self", ".", "_layered", ":", "nxt", "=", "[", "]", "for", "name", "in", "layer", ":", "nxt", ".", "append", "(", "(", "name", ",", "self", ".", "all_images",...
Yield list of [[(name, image), ...], [(name, image), ...], ...]
[ "Yield", "list", "of", "[[", "(", "name", "image", ")", "...", "]", "[", "(", "name", "image", ")", "...", "]", "...", "]" ]
python
train
scanny/python-pptx
pptx/oxml/chart/series.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/chart/series.py#L150-L158
def get_dLbl(self, idx): """ Return the `c:dLbl` element representing the label for the data point at offset *idx* in this series, or |None| if not present. """ dLbls = self.dLbls if dLbls is None: return None return dLbls.get_dLbl_for_point(idx)
[ "def", "get_dLbl", "(", "self", ",", "idx", ")", ":", "dLbls", "=", "self", ".", "dLbls", "if", "dLbls", "is", "None", ":", "return", "None", "return", "dLbls", ".", "get_dLbl_for_point", "(", "idx", ")" ]
Return the `c:dLbl` element representing the label for the data point at offset *idx* in this series, or |None| if not present.
[ "Return", "the", "c", ":", "dLbl", "element", "representing", "the", "label", "for", "the", "data", "point", "at", "offset", "*", "idx", "*", "in", "this", "series", "or", "|None|", "if", "not", "present", "." ]
python
train
dj-stripe/dj-stripe
djstripe/models/base.py
https://github.com/dj-stripe/dj-stripe/blob/a5308a3808cd6e2baba49482f7a699f3a8992518/djstripe/models/base.py#L104-L112
def _api_create(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs): """ Call the stripe API's create operation for this model. :param api_key: The api key to use for this request. Defaults to djstripe_settings.STRIPE_SECRET_KEY. :type api_key: string """ return cls.stripe_class.create(api_key=api_key, **kwargs)
[ "def", "_api_create", "(", "cls", ",", "api_key", "=", "djstripe_settings", ".", "STRIPE_SECRET_KEY", ",", "*", "*", "kwargs", ")", ":", "return", "cls", ".", "stripe_class", ".", "create", "(", "api_key", "=", "api_key", ",", "*", "*", "kwargs", ")" ]
Call the stripe API's create operation for this model. :param api_key: The api key to use for this request. Defaults to djstripe_settings.STRIPE_SECRET_KEY. :type api_key: string
[ "Call", "the", "stripe", "API", "s", "create", "operation", "for", "this", "model", "." ]
python
train
librosa/librosa
librosa/core/spectrum.py
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/spectrum.py#L193-L343
def istft(stft_matrix, hop_length=None, win_length=None, window='hann', center=True, dtype=np.float32, length=None): """ Inverse short-time Fourier transform (ISTFT). Converts a complex-valued spectrogram `stft_matrix` to time-series `y` by minimizing the mean squared error between `stft_matrix` and STFT of `y` as described in [1]_ up to Section 2 (reconstruction from MSTFT). In general, window function, hop length and other parameters should be same as in stft, which mostly leads to perfect reconstruction of a signal from unmodified `stft_matrix`. .. [1] D. W. Griffin and J. S. Lim, "Signal estimation from modified short-time Fourier transform," IEEE Trans. ASSP, vol.32, no.2, pp.236–243, Apr. 1984. Parameters ---------- stft_matrix : np.ndarray [shape=(1 + n_fft/2, t)] STFT matrix from `stft` hop_length : int > 0 [scalar] Number of frames between STFT columns. If unspecified, defaults to `win_length / 4`. win_length : int <= n_fft = 2 * (stft_matrix.shape[0] - 1) When reconstructing the time series, each frame is windowed and each sample is normalized by the sum of squared window according to the `window` function (see below). If unspecified, defaults to `n_fft`. window : string, tuple, number, function, np.ndarray [shape=(n_fft,)] - a window specification (string, tuple, or number); see `scipy.signal.get_window` - a window function, such as `scipy.signal.hanning` - a user-specified window vector of length `n_fft` .. see also:: `filters.get_window` center : boolean - If `True`, `D` is assumed to have centered frames. - If `False`, `D` is assumed to have left-aligned frames. dtype : numeric type Real numeric type for `y`. Default is 32-bit float. length : int > 0, optional If provided, the output `y` is zero-padded or clipped to exactly `length` samples. Returns ------- y : np.ndarray [shape=(n,)] time domain signal reconstructed from `stft_matrix` See Also -------- stft : Short-time Fourier Transform Notes ----- This function caches at level 30. Examples -------- >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> D = librosa.stft(y) >>> y_hat = librosa.istft(D) >>> y_hat array([ -4.812e-06, -4.267e-06, ..., 6.271e-06, 2.827e-07], dtype=float32) Exactly preserving length of the input signal requires explicit padding. Otherwise, a partial frame at the end of `y` will not be represented. >>> n = len(y) >>> n_fft = 2048 >>> y_pad = librosa.util.fix_length(y, n + n_fft // 2) >>> D = librosa.stft(y_pad, n_fft=n_fft) >>> y_out = librosa.istft(D, length=n) >>> np.max(np.abs(y - y_out)) 1.4901161e-07 """ n_fft = 2 * (stft_matrix.shape[0] - 1) # By default, use the entire frame if win_length is None: win_length = n_fft # Set the default hop, if it's not already specified if hop_length is None: hop_length = int(win_length // 4) ifft_window = get_window(window, win_length, fftbins=True) # Pad out to match n_fft, and add a broadcasting axis ifft_window = util.pad_center(ifft_window, n_fft)[:, np.newaxis] n_frames = stft_matrix.shape[1] expected_signal_len = n_fft + hop_length * (n_frames - 1) y = np.zeros(expected_signal_len, dtype=dtype) n_columns = int(util.MAX_MEM_BLOCK // (stft_matrix.shape[0] * stft_matrix.itemsize)) fft = get_fftlib() frame = 0 for bl_s in range(0, n_frames, n_columns): bl_t = min(bl_s + n_columns, n_frames) # invert the block and apply the window function ytmp = ifft_window * fft.irfft(stft_matrix[:, bl_s:bl_t], axis=0) # Overlap-add the istft block starting at the i'th frame __overlap_add(y[frame * hop_length:], ytmp, hop_length) frame += (bl_t - bl_s) # Normalize by sum of squared window ifft_window_sum = window_sumsquare(window, n_frames, win_length=win_length, n_fft=n_fft, hop_length=hop_length, dtype=dtype) approx_nonzero_indices = ifft_window_sum > util.tiny(ifft_window_sum) y[approx_nonzero_indices] /= ifft_window_sum[approx_nonzero_indices] if length is None: # If we don't need to control length, just do the usual center trimming # to eliminate padded data if center: y = y[int(n_fft // 2):-int(n_fft // 2)] else: if center: # If we're centering, crop off the first n_fft//2 samples # and then trim/pad to the target length. # We don't trim the end here, so that if the signal is zero-padded # to a longer duration, the decay is smooth by windowing start = int(n_fft // 2) else: # If we're not centering, start at 0 and trim/pad as necessary start = 0 y = util.fix_length(y[start:], length) return y
[ "def", "istft", "(", "stft_matrix", ",", "hop_length", "=", "None", ",", "win_length", "=", "None", ",", "window", "=", "'hann'", ",", "center", "=", "True", ",", "dtype", "=", "np", ".", "float32", ",", "length", "=", "None", ")", ":", "n_fft", "=",...
Inverse short-time Fourier transform (ISTFT). Converts a complex-valued spectrogram `stft_matrix` to time-series `y` by minimizing the mean squared error between `stft_matrix` and STFT of `y` as described in [1]_ up to Section 2 (reconstruction from MSTFT). In general, window function, hop length and other parameters should be same as in stft, which mostly leads to perfect reconstruction of a signal from unmodified `stft_matrix`. .. [1] D. W. Griffin and J. S. Lim, "Signal estimation from modified short-time Fourier transform," IEEE Trans. ASSP, vol.32, no.2, pp.236–243, Apr. 1984. Parameters ---------- stft_matrix : np.ndarray [shape=(1 + n_fft/2, t)] STFT matrix from `stft` hop_length : int > 0 [scalar] Number of frames between STFT columns. If unspecified, defaults to `win_length / 4`. win_length : int <= n_fft = 2 * (stft_matrix.shape[0] - 1) When reconstructing the time series, each frame is windowed and each sample is normalized by the sum of squared window according to the `window` function (see below). If unspecified, defaults to `n_fft`. window : string, tuple, number, function, np.ndarray [shape=(n_fft,)] - a window specification (string, tuple, or number); see `scipy.signal.get_window` - a window function, such as `scipy.signal.hanning` - a user-specified window vector of length `n_fft` .. see also:: `filters.get_window` center : boolean - If `True`, `D` is assumed to have centered frames. - If `False`, `D` is assumed to have left-aligned frames. dtype : numeric type Real numeric type for `y`. Default is 32-bit float. length : int > 0, optional If provided, the output `y` is zero-padded or clipped to exactly `length` samples. Returns ------- y : np.ndarray [shape=(n,)] time domain signal reconstructed from `stft_matrix` See Also -------- stft : Short-time Fourier Transform Notes ----- This function caches at level 30. Examples -------- >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> D = librosa.stft(y) >>> y_hat = librosa.istft(D) >>> y_hat array([ -4.812e-06, -4.267e-06, ..., 6.271e-06, 2.827e-07], dtype=float32) Exactly preserving length of the input signal requires explicit padding. Otherwise, a partial frame at the end of `y` will not be represented. >>> n = len(y) >>> n_fft = 2048 >>> y_pad = librosa.util.fix_length(y, n + n_fft // 2) >>> D = librosa.stft(y_pad, n_fft=n_fft) >>> y_out = librosa.istft(D, length=n) >>> np.max(np.abs(y - y_out)) 1.4901161e-07
[ "Inverse", "short", "-", "time", "Fourier", "transform", "(", "ISTFT", ")", "." ]
python
test
hawkowl/txctools
txctools/reports/hotspot.py
https://github.com/hawkowl/txctools/blob/14cab033ea179211a7bfd88dc202d576fc336ddc/txctools/reports/hotspot.py#L39-L51
def process(self): """ Process the warnings. """ for filename, warnings in self.warnings.iteritems(): self.fileCounts[filename] = {} fc = self.fileCounts[filename] fc["warning_count"] = len(warnings) fc["warning_breakdown"] = self._warnCount(warnings) self.warningCounts = self._warnCount(warnings, warningCount=self.warningCounts)
[ "def", "process", "(", "self", ")", ":", "for", "filename", ",", "warnings", "in", "self", ".", "warnings", ".", "iteritems", "(", ")", ":", "self", ".", "fileCounts", "[", "filename", "]", "=", "{", "}", "fc", "=", "self", ".", "fileCounts", "[", ...
Process the warnings.
[ "Process", "the", "warnings", "." ]
python
train
batiste/django-page-cms
pages/managers.py
https://github.com/batiste/django-page-cms/blob/3c72111eb7c3997a63c462c1776ffd8ce8c50a5d/pages/managers.py#L208-L218
def get_content_object(self, page, language, ctype): """Gets the latest published :class:`Content <pages.models.Content>` for a particular page, language and placeholder type.""" params = { 'language': language, 'type': ctype, 'page': None if page is fake_page else page } if page.freeze_date: params['creation_date__lte'] = page.freeze_date return self.filter(**params).latest()
[ "def", "get_content_object", "(", "self", ",", "page", ",", "language", ",", "ctype", ")", ":", "params", "=", "{", "'language'", ":", "language", ",", "'type'", ":", "ctype", ",", "'page'", ":", "None", "if", "page", "is", "fake_page", "else", "page", ...
Gets the latest published :class:`Content <pages.models.Content>` for a particular page, language and placeholder type.
[ "Gets", "the", "latest", "published", ":", "class", ":", "Content", "<pages", ".", "models", ".", "Content", ">", "for", "a", "particular", "page", "language", "and", "placeholder", "type", "." ]
python
train
flatangle/flatlib
flatlib/object.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/object.py#L82-L87
def antiscia(self): """ Returns antiscia object. """ obj = self.copy() obj.type = const.OBJ_GENERIC obj.relocate(360 - obj.lon + 180) return obj
[ "def", "antiscia", "(", "self", ")", ":", "obj", "=", "self", ".", "copy", "(", ")", "obj", ".", "type", "=", "const", ".", "OBJ_GENERIC", "obj", ".", "relocate", "(", "360", "-", "obj", ".", "lon", "+", "180", ")", "return", "obj" ]
Returns antiscia object.
[ "Returns", "antiscia", "object", "." ]
python
train
googleapis/google-cloud-python
spanner/google/cloud/spanner_v1/gapic/spanner_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/gapic/spanner_client.py#L203-L291
def create_session( self, database, session=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Creates a new session. A session can be used to perform transactions that read and/or modify data in a Cloud Spanner database. Sessions are meant to be reused for many consecutive transactions. Sessions can only execute one transaction at a time. To execute multiple concurrent read-write/write-only transactions, create multiple sessions. Note that standalone reads and queries use a transaction internally, and count toward the one transaction limit. Cloud Spanner limits the number of sessions that can exist at any given time; thus, it is a good idea to delete idle and/or unneeded sessions. Aside from explicit deletes, Cloud Spanner can delete sessions for which no operations are sent for more than an hour. If a session is deleted, requests to it return ``NOT_FOUND``. Idle sessions can be kept alive by sending a trivial SQL query periodically, e.g., ``"SELECT 1"``. Example: >>> from google.cloud import spanner_v1 >>> >>> client = spanner_v1.SpannerClient() >>> >>> database = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') >>> >>> response = client.create_session(database) Args: database (str): Required. The database in which the new session is created. session (Union[dict, ~google.cloud.spanner_v1.types.Session]): The session to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.Session` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.spanner_v1.types.Session` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "create_session" not in self._inner_api_calls: self._inner_api_calls[ "create_session" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_session, default_retry=self._method_configs["CreateSession"].retry, default_timeout=self._method_configs["CreateSession"].timeout, client_info=self._client_info, ) request = spanner_pb2.CreateSessionRequest(database=database, session=session) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("database", database)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["create_session"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "create_session", "(", "self", ",", "database", ",", "session", "=", "None", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "metho...
Creates a new session. A session can be used to perform transactions that read and/or modify data in a Cloud Spanner database. Sessions are meant to be reused for many consecutive transactions. Sessions can only execute one transaction at a time. To execute multiple concurrent read-write/write-only transactions, create multiple sessions. Note that standalone reads and queries use a transaction internally, and count toward the one transaction limit. Cloud Spanner limits the number of sessions that can exist at any given time; thus, it is a good idea to delete idle and/or unneeded sessions. Aside from explicit deletes, Cloud Spanner can delete sessions for which no operations are sent for more than an hour. If a session is deleted, requests to it return ``NOT_FOUND``. Idle sessions can be kept alive by sending a trivial SQL query periodically, e.g., ``"SELECT 1"``. Example: >>> from google.cloud import spanner_v1 >>> >>> client = spanner_v1.SpannerClient() >>> >>> database = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') >>> >>> response = client.create_session(database) Args: database (str): Required. The database in which the new session is created. session (Union[dict, ~google.cloud.spanner_v1.types.Session]): The session to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.Session` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.spanner_v1.types.Session` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Creates", "a", "new", "session", ".", "A", "session", "can", "be", "used", "to", "perform", "transactions", "that", "read", "and", "/", "or", "modify", "data", "in", "a", "Cloud", "Spanner", "database", ".", "Sessions", "are", "meant", "to", "be", "reus...
python
train
kgiusti/pyngus
examples/rpc-server.py
https://github.com/kgiusti/pyngus/blob/5392392046989f1bb84ba938c30e4d48311075f1/examples/rpc-server.py#L92-L100
def process_input(self): """Called when socket is read-ready""" try: pyngus.read_socket_input(self.connection, self.socket) except Exception as e: LOG.error("Exception on socket read: %s", str(e)) self.connection.close_input() self.connection.close() self.connection.process(time.time())
[ "def", "process_input", "(", "self", ")", ":", "try", ":", "pyngus", ".", "read_socket_input", "(", "self", ".", "connection", ",", "self", ".", "socket", ")", "except", "Exception", "as", "e", ":", "LOG", ".", "error", "(", "\"Exception on socket read: %s\"...
Called when socket is read-ready
[ "Called", "when", "socket", "is", "read", "-", "ready" ]
python
test
EventTeam/beliefs
src/beliefs/belief_utils.py
https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/belief_utils.py#L59-L68
def list_diff(list1, list2): """ Ssymetric list difference """ diff_list = [] for item in list1: if not item in list2: diff_list.append(item) for item in list2: if not item in list1: diff_list.append(item) return diff_list
[ "def", "list_diff", "(", "list1", ",", "list2", ")", ":", "diff_list", "=", "[", "]", "for", "item", "in", "list1", ":", "if", "not", "item", "in", "list2", ":", "diff_list", ".", "append", "(", "item", ")", "for", "item", "in", "list2", ":", "if",...
Ssymetric list difference
[ "Ssymetric", "list", "difference" ]
python
train
Robpol86/libnl
example_list_network_interfaces.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/example_list_network_interfaces.py#L89-L111
def main(): """Main function called upon script execution.""" # First open a socket to the kernel. Same one used for sending and receiving. sk = nl_socket_alloc() # Creates an `nl_sock` instance. ret = nl_connect(sk, NETLINK_ROUTE) # Create file descriptor and bind socket. if ret < 0: reason = errmsg[abs(ret)] return error('nl_connect() returned {0} ({1})'.format(ret, reason)) # Next we send the request to the kernel. rt_hdr = rtgenmsg(rtgen_family=socket.AF_PACKET) ret = nl_send_simple(sk, RTM_GETLINK, NLM_F_REQUEST | NLM_F_DUMP, rt_hdr, rt_hdr.SIZEOF) if ret < 0: reason = errmsg[abs(ret)] return error('nl_send_simple() returned {0} ({1})'.format(ret, reason)) print('Sent {0} bytes to the kernel.'.format(ret)) # Finally we'll retrieve the kernel's answer, process it, and call any callbacks attached to the `nl_sock` instance. nl_socket_modify_cb(sk, NL_CB_VALID, NL_CB_CUSTOM, callback, None) # Add callback to the `nl_sock` instance. ret = nl_recvmsgs_default(sk) # Get kernel's answer, and call attached callbacks. if ret < 0: reason = errmsg[abs(ret)] return error('nl_recvmsgs_default() returned {0} ({1})'.format(ret, reason))
[ "def", "main", "(", ")", ":", "# First open a socket to the kernel. Same one used for sending and receiving.", "sk", "=", "nl_socket_alloc", "(", ")", "# Creates an `nl_sock` instance.", "ret", "=", "nl_connect", "(", "sk", ",", "NETLINK_ROUTE", ")", "# Create file descriptor...
Main function called upon script execution.
[ "Main", "function", "called", "upon", "script", "execution", "." ]
python
train