repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
spyder-ide/spyder
spyder/plugins/variableexplorer/widgets/dataframeeditor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/dataframeeditor.py#L714-L740
def data(self, index, role): """ Get the data for the header. This is used when a header has levels. """ if not index.isValid() or \ index.row() >= self._shape[0] or \ index.column() >= self._shape[1]: return None row, col = ((index.row(), index.column()) if self.axis == 0 else (index.column(), index.row())) if role != Qt.DisplayRole: return None if self.axis == 0 and self._shape[0] <= 1: return None header = self.model.header(self.axis, col, row) # Don't perform any conversion on strings # because it leads to differences between # the data present in the dataframe and # what is shown by Spyder if not is_type_text_string(header): header = to_text_string(header) return header
[ "def", "data", "(", "self", ",", "index", ",", "role", ")", ":", "if", "not", "index", ".", "isValid", "(", ")", "or", "index", ".", "row", "(", ")", ">=", "self", ".", "_shape", "[", "0", "]", "or", "index", ".", "column", "(", ")", ">=", "s...
Get the data for the header. This is used when a header has levels.
[ "Get", "the", "data", "for", "the", "header", ".", "This", "is", "used", "when", "a", "header", "has", "levels", "." ]
python
train
OCHA-DAP/hdx-python-country
src/hdx/location/country.py
https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L226-L245
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None): # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str] """Get ISO2 from ISO3 code Args: iso3 (str): ISO3 code for which to get ISO2 code use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: ISO2 code """ countriesdata = cls.countriesdata(use_live=use_live) iso2 = countriesdata['iso2iso3'].get(iso3.upper()) if iso2 is not None: return iso2 if exception is not None: raise exception return None
[ "def", "get_iso2_from_iso3", "(", "cls", ",", "iso3", ",", "use_live", "=", "True", ",", "exception", "=", "None", ")", ":", "# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]", "countriesdata", "=", "cls", ".", "countriesdata", "(", "use_live", "=",...
Get ISO2 from ISO3 code Args: iso3 (str): ISO3 code for which to get ISO2 code use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: ISO2 code
[ "Get", "ISO2", "from", "ISO3", "code" ]
python
train
Gandi/gandi.cli
gandi/cli/core/base.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/core/base.py#L275-L304
def update_progress(cls, progress, starttime): """ Display an ascii progress bar while processing operation. """ width, _height = click.get_terminal_size() if not width: return duration = datetime.utcnow() - starttime hours, remainder = divmod(duration.seconds, 3600) minutes, seconds = divmod(remainder, 60) size = int(width * .6) status = "" if isinstance(progress, int): progress = float(progress) if not isinstance(progress, float): progress = 0 status = 'error: progress var must be float\n' cls.echo(type(progress)) if progress < 0: progress = 0 status = 'Halt...\n' if progress >= 1: progress = 1 # status = 'Done...\n' block = int(round(size * progress)) text = ('\rProgress: [{0}] {1:.2%} {2} {3:0>2}:{4:0>2}:{5:0>2} ' ''.format('#' * block + '-' * (size - block), progress, status, hours, minutes, seconds)) sys.stdout.write(text) sys.stdout.flush()
[ "def", "update_progress", "(", "cls", ",", "progress", ",", "starttime", ")", ":", "width", ",", "_height", "=", "click", ".", "get_terminal_size", "(", ")", "if", "not", "width", ":", "return", "duration", "=", "datetime", ".", "utcnow", "(", ")", "-", ...
Display an ascii progress bar while processing operation.
[ "Display", "an", "ascii", "progress", "bar", "while", "processing", "operation", "." ]
python
train
hbldh/dlxsudoku
dlxsudoku/sudoku.py
https://github.com/hbldh/dlxsudoku/blob/8d774e0883eb615533d04f07e58a95db716226e0/dlxsudoku/sudoku.py#L301-L353
def _fill_hidden_singles(self): """Look for hidden singles, i.e. cells with only one unique possible value in row, column or box. :return: If any Hidden Single has been found. :rtype: bool """ for i in utils.range_(self.side): box_i = (i // self.order) * self.order for j in utils.range_(self.side): box_j = (j // self.order) * self.order # Skip if this cell is determined already. if self[i][j] > 0: continue # Look for hidden single in rows. p = self._possibles[i][j] for k in utils.range_(self.side): if k == j: continue p = p.difference(self._possibles[i][k]) if len(p) == 1: # Found a hidden single in a row! self.set_cell(i, j, p.pop()) self.solution_steps.append(self._format_step("HIDDEN-ROW", (i, j), self[i][j])) return True # Look for hidden single in columns p = self._possibles[i][j] for k in utils.range_(self.side): if k == i: continue p = p.difference(self._possibles[k][j]) if len(p) == 1: # Found a hidden single in a column! self.set_cell(i, j, p.pop()) self.solution_steps.append(self._format_step("HIDDEN-COL", (i, j), self[i][j])) return True # Look for hidden single in box p = self._possibles[i][j] for k in utils.range_(box_i, box_i + self.order): for kk in utils.range_(box_j, box_j + self.order): if k == i and kk == j: continue p = p.difference(self._possibles[k][kk]) if len(p) == 1: # Found a hidden single in a box! self.set_cell(i, j, p.pop()) self.solution_steps.append(self._format_step("HIDDEN-BOX", (i, j), self[i][j])) return True return False
[ "def", "_fill_hidden_singles", "(", "self", ")", ":", "for", "i", "in", "utils", ".", "range_", "(", "self", ".", "side", ")", ":", "box_i", "=", "(", "i", "//", "self", ".", "order", ")", "*", "self", ".", "order", "for", "j", "in", "utils", "."...
Look for hidden singles, i.e. cells with only one unique possible value in row, column or box. :return: If any Hidden Single has been found. :rtype: bool
[ "Look", "for", "hidden", "singles", "i", ".", "e", ".", "cells", "with", "only", "one", "unique", "possible", "value", "in", "row", "column", "or", "box", "." ]
python
train
spyder-ide/spyder
spyder/plugins/projects/projecttypes/python.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/projects/projecttypes/python.py#L29-L32
def _set_relative_pythonpath(self, value): """Set PYTHONPATH list relative paths""" self.pythonpath = [osp.abspath(osp.join(self.root_path, path)) for path in value]
[ "def", "_set_relative_pythonpath", "(", "self", ",", "value", ")", ":", "self", ".", "pythonpath", "=", "[", "osp", ".", "abspath", "(", "osp", ".", "join", "(", "self", ".", "root_path", ",", "path", ")", ")", "for", "path", "in", "value", "]" ]
Set PYTHONPATH list relative paths
[ "Set", "PYTHONPATH", "list", "relative", "paths" ]
python
train
hearsaycorp/normalize
normalize/visitor.py
https://github.com/hearsaycorp/normalize/blob/8b36522ddca6d41b434580bd848f3bdaa7a999c8/normalize/visitor.py#L469-L478
def produce(cls, mapped_props, aggregated, value_type, visitor): """Like :py:meth:`normalize.visitor.VisitorPattern.reduce`, but constructs instances rather than returning plain dicts. """ kwargs = {} if not mapped_props else dict( (k.name, v) for k, v in mapped_props ) if issubclass(value_type, Collection): kwargs['values'] = aggregated return value_type(**kwargs)
[ "def", "produce", "(", "cls", ",", "mapped_props", ",", "aggregated", ",", "value_type", ",", "visitor", ")", ":", "kwargs", "=", "{", "}", "if", "not", "mapped_props", "else", "dict", "(", "(", "k", ".", "name", ",", "v", ")", "for", "k", ",", "v"...
Like :py:meth:`normalize.visitor.VisitorPattern.reduce`, but constructs instances rather than returning plain dicts.
[ "Like", ":", "py", ":", "meth", ":", "normalize", ".", "visitor", ".", "VisitorPattern", ".", "reduce", "but", "constructs", "instances", "rather", "than", "returning", "plain", "dicts", "." ]
python
train
corydodt/Crosscap
crosscap/openapi.py
https://github.com/corydodt/Crosscap/blob/388a2ec36b8aa85e8f1ed692bb6e43474ba76c8e/crosscap/openapi.py#L141-L151
def representCleanOpenAPIOperation(dumper, data): """ Unpack nonstandard attributes while representing an OpenAPIOperation """ dct = _orderedCleanDict(data) if '_extended' in dct: for k, ext in list(data._extended.items()): dct[k] = ext del dct['_extended'] return dumper.yaml_representers[type(dct)](dumper, dct)
[ "def", "representCleanOpenAPIOperation", "(", "dumper", ",", "data", ")", ":", "dct", "=", "_orderedCleanDict", "(", "data", ")", "if", "'_extended'", "in", "dct", ":", "for", "k", ",", "ext", "in", "list", "(", "data", ".", "_extended", ".", "items", "(...
Unpack nonstandard attributes while representing an OpenAPIOperation
[ "Unpack", "nonstandard", "attributes", "while", "representing", "an", "OpenAPIOperation" ]
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/model.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/model.py#L126-L137
def created(self): """Union[datetime.datetime, None]: Datetime at which the model was created (:data:`None` until set from the server). Read-only. """ value = self._proto.creation_time if value is not None and value != 0: # value will be in milliseconds. return google.cloud._helpers._datetime_from_microseconds( 1000.0 * float(value) )
[ "def", "created", "(", "self", ")", ":", "value", "=", "self", ".", "_proto", ".", "creation_time", "if", "value", "is", "not", "None", "and", "value", "!=", "0", ":", "# value will be in milliseconds.", "return", "google", ".", "cloud", ".", "_helpers", "...
Union[datetime.datetime, None]: Datetime at which the model was created (:data:`None` until set from the server). Read-only.
[ "Union", "[", "datetime", ".", "datetime", "None", "]", ":", "Datetime", "at", "which", "the", "model", "was", "created", "(", ":", "data", ":", "None", "until", "set", "from", "the", "server", ")", "." ]
python
train
devassistant/devassistant
devassistant/lang.py
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/lang.py#L260-L283
def parse_for(control_line): """Returns name of loop control variable(s), iteration type (in/word_in) and expression to iterate on. For example: - given "for $i in $foo", returns (['i'], '$foo') - given "for ${i} in $(ls $foo)", returns (['i'], '$(ls $foo)') - given "for $k, $v in $foo", returns (['k', 'v'], '$foo') """ error = 'For loop call must be in form \'for $var in expression\', got: ' + control_line regex = re.compile(r'for\s+(\${?\S+}?)(?:\s*,\s+(\${?\S+}?))?\s+(in|word_in)\s+(\S.+)') res = regex.match(control_line) if not res: raise exceptions.YamlSyntaxError(error) groups = res.groups() control_vars = [] control_vars.append(get_var_name(groups[0])) if groups[1]: control_vars.append(get_var_name(groups[1])) iter_type = groups[2] expr = groups[3] return (control_vars, iter_type, expr)
[ "def", "parse_for", "(", "control_line", ")", ":", "error", "=", "'For loop call must be in form \\'for $var in expression\\', got: '", "+", "control_line", "regex", "=", "re", ".", "compile", "(", "r'for\\s+(\\${?\\S+}?)(?:\\s*,\\s+(\\${?\\S+}?))?\\s+(in|word_in)\\s+(\\S.+)'", "...
Returns name of loop control variable(s), iteration type (in/word_in) and expression to iterate on. For example: - given "for $i in $foo", returns (['i'], '$foo') - given "for ${i} in $(ls $foo)", returns (['i'], '$(ls $foo)') - given "for $k, $v in $foo", returns (['k', 'v'], '$foo')
[ "Returns", "name", "of", "loop", "control", "variable", "(", "s", ")", "iteration", "type", "(", "in", "/", "word_in", ")", "and", "expression", "to", "iterate", "on", "." ]
python
train
libfuse/python-fuse
fuseparts/subbedopts.py
https://github.com/libfuse/python-fuse/blob/2c088b657ad71faca6975b456f80b7d2c2cea2a7/fuseparts/subbedopts.py#L78-L96
def add(self, opt, val=None): """Add a suboption.""" ov = opt.split('=', 1) o = ov[0] v = len(ov) > 1 and ov[1] or None if (v): if val != None: raise AttributeError("ambiguous option value") val = v if val == False: return if val in (None, True): self.optlist.add(o) else: self.optdict[o] = val
[ "def", "add", "(", "self", ",", "opt", ",", "val", "=", "None", ")", ":", "ov", "=", "opt", ".", "split", "(", "'='", ",", "1", ")", "o", "=", "ov", "[", "0", "]", "v", "=", "len", "(", "ov", ")", ">", "1", "and", "ov", "[", "1", "]", ...
Add a suboption.
[ "Add", "a", "suboption", "." ]
python
train
prompt-toolkit/pymux
pymux/commands/commands.py
https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/commands/commands.py#L658-L663
def show_buffer(pymux, variables): """ Display the clipboard content. """ text = get_app().clipboard.get_data().text pymux.get_client_state().layout_manager.display_popup('show-buffer', text)
[ "def", "show_buffer", "(", "pymux", ",", "variables", ")", ":", "text", "=", "get_app", "(", ")", ".", "clipboard", ".", "get_data", "(", ")", ".", "text", "pymux", ".", "get_client_state", "(", ")", ".", "layout_manager", ".", "display_popup", "(", "'sh...
Display the clipboard content.
[ "Display", "the", "clipboard", "content", "." ]
python
train
kajala/django-jutil
jutil/cache.py
https://github.com/kajala/django-jutil/blob/2abd93ebad51042744eaeb1ee1074ed0eb55ad0c/jutil/cache.py#L40-L46
def update_cached_fields_pre_save(self, update_fields: list): """ Call on pre_save signal for objects (to automatically refresh on save). :param update_fields: list of fields to update """ if self.id and update_fields is None: self.update_cached_fields(commit=False, exceptions=False)
[ "def", "update_cached_fields_pre_save", "(", "self", ",", "update_fields", ":", "list", ")", ":", "if", "self", ".", "id", "and", "update_fields", "is", "None", ":", "self", ".", "update_cached_fields", "(", "commit", "=", "False", ",", "exceptions", "=", "F...
Call on pre_save signal for objects (to automatically refresh on save). :param update_fields: list of fields to update
[ "Call", "on", "pre_save", "signal", "for", "objects", "(", "to", "automatically", "refresh", "on", "save", ")", ".", ":", "param", "update_fields", ":", "list", "of", "fields", "to", "update" ]
python
train
miyakogi/wdom
wdom/element.py
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/element.py#L175-L186
def html(self) -> str: """Return string representation of this. Used in start tag of HTML representation of the Element node. """ if self._owner and self.name in self._owner._special_attr_boolean: return self.name else: value = self.value if isinstance(value, str): value = html_.escape(value) return '{name}="{value}"'.format(name=self.name, value=value)
[ "def", "html", "(", "self", ")", "->", "str", ":", "if", "self", ".", "_owner", "and", "self", ".", "name", "in", "self", ".", "_owner", ".", "_special_attr_boolean", ":", "return", "self", ".", "name", "else", ":", "value", "=", "self", ".", "value"...
Return string representation of this. Used in start tag of HTML representation of the Element node.
[ "Return", "string", "representation", "of", "this", "." ]
python
train
briancappello/flask-unchained
flask_unchained/clips_pattern.py
https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/clips_pattern.py#L344-L391
def pluralize(word, pos=NOUN, custom=None, classical=True): """ Returns the plural of a given word, e.g., child => children. Handles nouns and adjectives, using classical inflection by default (i.e., where "matrix" pluralizes to "matrices" and not "matrixes"). The custom dictionary is for user-defined replacements. """ if custom and word in custom: return custom[word] # Recurse genitives. # Remove the apostrophe and any trailing -s, # form the plural of the resultant noun, and then append an apostrophe (dog's => dogs'). if word.endswith(("'", "'s")): w = word.rstrip("'s") w = pluralize(w, pos, custom, classical) if w.endswith("s"): return w + "'" else: return w + "'s" # Recurse compound words # (e.g., Postmasters General, mothers-in-law, Roman deities). w = word.replace("-", " ").split(" ") if len(w) > 1: if w[1] == "general" or \ w[1] == "General" and \ w[0] not in plural_categories["general-generals"]: return word.replace(w[0], pluralize(w[0], pos, custom, classical)) elif w[1] in plural_prepositions: return word.replace(w[0], pluralize(w[0], pos, custom, classical)) else: return word.replace(w[-1], pluralize(w[-1], pos, custom, classical)) # Only a very few number of adjectives inflect. n = range(len(plural_rules)) if pos.startswith(ADJECTIVE): n = [0, 1] # Apply pluralization rules. for i in n: for suffix, inflection, category, classic in plural_rules[i]: # A general rule, or a classic rule in classical mode. if category is None: if not classic or (classic and classical): if suffix.search(word) is not None: return suffix.sub(inflection, word) # A rule pertaining to a specific category of words. if category is not None: if word in plural_categories[category] and (not classic or (classic and classical)): if suffix.search(word) is not None: return suffix.sub(inflection, word) return word
[ "def", "pluralize", "(", "word", ",", "pos", "=", "NOUN", ",", "custom", "=", "None", ",", "classical", "=", "True", ")", ":", "if", "custom", "and", "word", "in", "custom", ":", "return", "custom", "[", "word", "]", "# Recurse genitives.", "# Remove the...
Returns the plural of a given word, e.g., child => children. Handles nouns and adjectives, using classical inflection by default (i.e., where "matrix" pluralizes to "matrices" and not "matrixes"). The custom dictionary is for user-defined replacements.
[ "Returns", "the", "plural", "of", "a", "given", "word", "e", ".", "g", ".", "child", "=", ">", "children", ".", "Handles", "nouns", "and", "adjectives", "using", "classical", "inflection", "by", "default", "(", "i", ".", "e", ".", "where", "matrix", "p...
python
train
spacetelescope/drizzlepac
drizzlepac/imgclasses.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/imgclasses.py#L340-L368
def buildSkyCatalog(self): """ Convert sky catalog for all chips into a single catalog for the entire field-of-view of this image. """ self.all_radec = None self.all_radec_orig = None ralist = [] declist = [] fluxlist = [] idlist = [] for scichip in self.chip_catalogs: skycat = self.chip_catalogs[scichip]['catalog'].radec xycat = self.chip_catalogs[scichip]['catalog'].xypos if skycat is not None: ralist.append(skycat[0]) declist.append(skycat[1]) if xycat is not None and len(xycat) > 2: fluxlist.append(xycat[2]) idlist.append(xycat[3]) elif len(skycat) > 2: fluxlist.append(skycat[2]) idlist.append(skycat[3]) else: fluxlist.append([999.0]*len(skycat[0])) idlist.append(np.arange(len(skycat[0]))) self.all_radec = [np.concatenate(ralist),np.concatenate(declist), np.concatenate(fluxlist),np.concatenate(idlist)] self.all_radec_orig = copy.deepcopy(self.all_radec)
[ "def", "buildSkyCatalog", "(", "self", ")", ":", "self", ".", "all_radec", "=", "None", "self", ".", "all_radec_orig", "=", "None", "ralist", "=", "[", "]", "declist", "=", "[", "]", "fluxlist", "=", "[", "]", "idlist", "=", "[", "]", "for", "scichip...
Convert sky catalog for all chips into a single catalog for the entire field-of-view of this image.
[ "Convert", "sky", "catalog", "for", "all", "chips", "into", "a", "single", "catalog", "for", "the", "entire", "field", "-", "of", "-", "view", "of", "this", "image", "." ]
python
train
kejbaly2/metrique
metrique/cubes/sqldata/generic.py
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/cubes/sqldata/generic.py#L228-L253
def get_changed_oids(self, last_update=None): ''' Returns a list of object ids of those objects that have changed since `mtime`. This method expects that the changed objects can be determined based on the `delta_mtime` property of the cube which specifies the field name that carries the time of the last change. This method is expected to be overriden in the cube if it is not possible to use a single field to determine the time of the change and if another approach of determining the oids is available. In such cubes the `delta_mtime` property is expected to be set to `True`. If `delta_mtime` evaluates to False then this method is not expected to be used. :param mtime: datetime string used as 'change since date' ''' mtime_columns = self.lconfig.get('delta_mtime', []) if not (mtime_columns and last_update): return [] mtime_columns = str2list(mtime_columns) where = [] for _column in mtime_columns: _sql = "%s >= %s" % (_column, last_update) where.append(_sql) return self.sql_get_oids(where)
[ "def", "get_changed_oids", "(", "self", ",", "last_update", "=", "None", ")", ":", "mtime_columns", "=", "self", ".", "lconfig", ".", "get", "(", "'delta_mtime'", ",", "[", "]", ")", "if", "not", "(", "mtime_columns", "and", "last_update", ")", ":", "ret...
Returns a list of object ids of those objects that have changed since `mtime`. This method expects that the changed objects can be determined based on the `delta_mtime` property of the cube which specifies the field name that carries the time of the last change. This method is expected to be overriden in the cube if it is not possible to use a single field to determine the time of the change and if another approach of determining the oids is available. In such cubes the `delta_mtime` property is expected to be set to `True`. If `delta_mtime` evaluates to False then this method is not expected to be used. :param mtime: datetime string used as 'change since date'
[ "Returns", "a", "list", "of", "object", "ids", "of", "those", "objects", "that", "have", "changed", "since", "mtime", ".", "This", "method", "expects", "that", "the", "changed", "objects", "can", "be", "determined", "based", "on", "the", "delta_mtime", "prop...
python
train
xtrementl/focus
focus/parser/lexer.py
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/parser/lexer.py#L174-L197
def _tokenize(self, stream): """ Tokenizes data from the provided string. ``stream`` ``File``-like object. """ self._tokens = [] self._reset_token() self._state = self.ST_TOKEN for chunk in iter(lambda: stream.read(8192), ''): for char in chunk: if char in self.NEWLINES: self._process_newline(char) else: state = self._state if state == self.ST_STRING: self._process_string(char) elif state == self.ST_TOKEN: self._process_tokens(char)
[ "def", "_tokenize", "(", "self", ",", "stream", ")", ":", "self", ".", "_tokens", "=", "[", "]", "self", ".", "_reset_token", "(", ")", "self", ".", "_state", "=", "self", ".", "ST_TOKEN", "for", "chunk", "in", "iter", "(", "lambda", ":", "stream", ...
Tokenizes data from the provided string. ``stream`` ``File``-like object.
[ "Tokenizes", "data", "from", "the", "provided", "string", "." ]
python
train
canonical-ols/acceptable
acceptable/lint.py
https://github.com/canonical-ols/acceptable/blob/6ccbe969078166a5315d857da38b59b43b29fadc/acceptable/lint.py#L72-L95
def metadata_lint(old, new, locations): """Run the linter over the new metadata, comparing to the old.""" # ensure we don't modify the metadata old = old.copy() new = new.copy() # remove version info old.pop('$version', None) new.pop('$version', None) for old_group_name in old: if old_group_name not in new: yield LintError('', 'api group removed', api_name=old_group_name) for group_name, new_group in new.items(): old_group = old.get(group_name, {'apis': {}}) for name, api in new_group['apis'].items(): old_api = old_group['apis'].get(name, {}) api_locations = locations[name] for message in lint_api(name, old_api, api, api_locations): message.api_name = name if message.location is None: message.location = api_locations['api'] yield message
[ "def", "metadata_lint", "(", "old", ",", "new", ",", "locations", ")", ":", "# ensure we don't modify the metadata", "old", "=", "old", ".", "copy", "(", ")", "new", "=", "new", ".", "copy", "(", ")", "# remove version info", "old", ".", "pop", "(", "'$ver...
Run the linter over the new metadata, comparing to the old.
[ "Run", "the", "linter", "over", "the", "new", "metadata", "comparing", "to", "the", "old", "." ]
python
train
rmed/flask-waffleconf
flask_waffleconf/core.py
https://github.com/rmed/flask-waffleconf/blob/a75ed69101796c9f3f42eff9f91e91dc6dd13869/flask_waffleconf/core.py#L104-L144
def update_db(self, new_values): """Update database values and application configuration. The provided keys must be defined in the ``WAFFLE_CONFS`` setting. Arguments: new_values (dict): dict of configuration variables and their values The dict has the following structure: { 'MY_CONFIG_VAR' : <CONFIG_VAL>, 'MY_CONFIG_VAR1' : <CONFIG_VAL1> } """ confs = self.app.config.get('WAFFLE_CONFS', {}) to_update = {} for key in new_values.keys(): # Some things cannot be changed... if key.startswith('WAFFLE_'): continue # No arbitrary keys if key not in confs.keys(): continue value = new_values[key] self.configstore.put(key, util.serialize(value)) self.configstore.commit() to_update[key] = value # Update config if not to_update: return self.app.config.update(to_update) # Notify other processes if self.app.config.get('WAFFLE_MULTIPROC', False): self.notify(self)
[ "def", "update_db", "(", "self", ",", "new_values", ")", ":", "confs", "=", "self", ".", "app", ".", "config", ".", "get", "(", "'WAFFLE_CONFS'", ",", "{", "}", ")", "to_update", "=", "{", "}", "for", "key", "in", "new_values", ".", "keys", "(", ")...
Update database values and application configuration. The provided keys must be defined in the ``WAFFLE_CONFS`` setting. Arguments: new_values (dict): dict of configuration variables and their values The dict has the following structure: { 'MY_CONFIG_VAR' : <CONFIG_VAL>, 'MY_CONFIG_VAR1' : <CONFIG_VAL1> }
[ "Update", "database", "values", "and", "application", "configuration", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/structure_matcher.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/structure_matcher.py#L1033-L1074
def get_s2_like_s1(self, struct1, struct2, include_ignored_species=True): """ Performs transformations on struct2 to put it in a basis similar to struct1 (without changing any of the inter-site distances) Args: struct1 (Structure): Reference structure struct2 (Structure): Structure to transform. include_ignored_species (bool): Defaults to True, the ignored_species is also transformed to the struct1 lattice orientation, though obviously there is no direct matching to existing sites. Returns: A structure object similar to struct1, obtained by making a supercell, sorting, and translating struct2. """ s1, s2 = self._process_species([struct1, struct2]) trans = self.get_transformation(s1, s2) if trans is None: return None sc, t, mapping = trans sites = [site for site in s2] # Append the ignored sites at the end. sites.extend([site for site in struct2 if site not in s2]) temp = Structure.from_sites(sites) temp.make_supercell(sc) temp.translate_sites(list(range(len(temp))), t) # translate sites to correct unit cell for i, j in enumerate(mapping[:len(s1)]): if j is not None: vec = np.round(struct1[i].frac_coords - temp[j].frac_coords) temp.translate_sites(j, vec, to_unit_cell=False) sites = [temp.sites[i] for i in mapping if i is not None] if include_ignored_species: start = int(round(len(temp) / len(struct2) * len(s2))) sites.extend(temp.sites[start:]) return Structure.from_sites(sites)
[ "def", "get_s2_like_s1", "(", "self", ",", "struct1", ",", "struct2", ",", "include_ignored_species", "=", "True", ")", ":", "s1", ",", "s2", "=", "self", ".", "_process_species", "(", "[", "struct1", ",", "struct2", "]", ")", "trans", "=", "self", ".", ...
Performs transformations on struct2 to put it in a basis similar to struct1 (without changing any of the inter-site distances) Args: struct1 (Structure): Reference structure struct2 (Structure): Structure to transform. include_ignored_species (bool): Defaults to True, the ignored_species is also transformed to the struct1 lattice orientation, though obviously there is no direct matching to existing sites. Returns: A structure object similar to struct1, obtained by making a supercell, sorting, and translating struct2.
[ "Performs", "transformations", "on", "struct2", "to", "put", "it", "in", "a", "basis", "similar", "to", "struct1", "(", "without", "changing", "any", "of", "the", "inter", "-", "site", "distances", ")" ]
python
train
tanghaibao/jcvi
jcvi/assembly/goldenpath.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/goldenpath.py#L1103-L1138
def agp(args): """ %prog agp tpffile certificatefile agpfile Build agpfile from overlap certificates. Tiling Path File (tpf) is a file that lists the component and the gaps. It is a three-column file similar to below, also see jcvi.formats.agp.tpf(): telomere chr1 na AC229737.8 chr1 + AC202463.29 chr1 + Note: the orientation of the component is only used as a guide. If the orientation is derivable from a terminal overlap, it will use it regardless of what the tpf says. See jcvi.assembly.goldenpath.certificate() which generates a list of certificates based on agpfile. At first, it seems counter-productive to convert first agp to certificates then certificates back to agp. The certificates provide a way to edit the overlap information, so that the agpfile can be corrected (without changing agpfile directly). """ from jcvi.formats.base import DictFile p = OptionParser(agp.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) tpffile, certificatefile, agpfile = args orientationguide = DictFile(tpffile, valuepos=2) cert = Certificate(certificatefile) cert.write_AGP(agpfile, orientationguide=orientationguide)
[ "def", "agp", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "DictFile", "p", "=", "OptionParser", "(", "agp", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(",...
%prog agp tpffile certificatefile agpfile Build agpfile from overlap certificates. Tiling Path File (tpf) is a file that lists the component and the gaps. It is a three-column file similar to below, also see jcvi.formats.agp.tpf(): telomere chr1 na AC229737.8 chr1 + AC202463.29 chr1 + Note: the orientation of the component is only used as a guide. If the orientation is derivable from a terminal overlap, it will use it regardless of what the tpf says. See jcvi.assembly.goldenpath.certificate() which generates a list of certificates based on agpfile. At first, it seems counter-productive to convert first agp to certificates then certificates back to agp. The certificates provide a way to edit the overlap information, so that the agpfile can be corrected (without changing agpfile directly).
[ "%prog", "agp", "tpffile", "certificatefile", "agpfile" ]
python
train
controversial/livejson
livejson.py
https://github.com/controversial/livejson/blob/91021de60903d2d8b2cfb7d8d8910bcf27ec003b/livejson.py#L214-L233
def data(self, data): """Overwrite the file with new data. You probably shouldn't do this yourself, it's easy to screw up your whole file with this.""" if self.is_caching: self.cache = data else: fcontents = self.file_contents with open(self.path, "w") as f: try: # Write the file. Keep user settings about indentation, etc indent = self.indent if self.pretty else None json.dump(data, f, sort_keys=self.sort_keys, indent=indent) except Exception as e: # Rollback to prevent data loss f.seek(0) f.truncate() f.write(fcontents) # And re-raise the exception raise e self._updateType()
[ "def", "data", "(", "self", ",", "data", ")", ":", "if", "self", ".", "is_caching", ":", "self", ".", "cache", "=", "data", "else", ":", "fcontents", "=", "self", ".", "file_contents", "with", "open", "(", "self", ".", "path", ",", "\"w\"", ")", "a...
Overwrite the file with new data. You probably shouldn't do this yourself, it's easy to screw up your whole file with this.
[ "Overwrite", "the", "file", "with", "new", "data", ".", "You", "probably", "shouldn", "t", "do", "this", "yourself", "it", "s", "easy", "to", "screw", "up", "your", "whole", "file", "with", "this", "." ]
python
valid
mbj4668/pyang
pyang/statements.py
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/statements.py#L3107-L3139
def mk_path_str(stmt, with_prefixes=False, prefix_onchange=False, prefix_to_module=False, resolve_top_prefix_to_module=False): """Returns the XPath path of the node. with_prefixes indicates whether or not to prefix every node. prefix_onchange modifies the behavior of with_prefixes and only adds prefixes when the prefix changes mid-XPath. prefix_to_module replaces prefixes with the module name of the prefix. resolve_top_prefix_to_module resolves the module-level prefix to the module name. Prefixes may be included in the path if the prefix changes mid-path. """ resolved_names = mk_path_list(stmt) xpath_elements = [] last_prefix = None for index, resolved_name in enumerate(resolved_names): module_name, prefix, node_name = resolved_name xpath_element = node_name if with_prefixes or (prefix_onchange and prefix != last_prefix): new_prefix = prefix if (prefix_to_module or (index == 0 and resolve_top_prefix_to_module)): new_prefix = module_name xpath_element = '%s:%s' % (new_prefix, node_name) xpath_elements.append(xpath_element) last_prefix = prefix return '/%s' % '/'.join(xpath_elements)
[ "def", "mk_path_str", "(", "stmt", ",", "with_prefixes", "=", "False", ",", "prefix_onchange", "=", "False", ",", "prefix_to_module", "=", "False", ",", "resolve_top_prefix_to_module", "=", "False", ")", ":", "resolved_names", "=", "mk_path_list", "(", "stmt", "...
Returns the XPath path of the node. with_prefixes indicates whether or not to prefix every node. prefix_onchange modifies the behavior of with_prefixes and only adds prefixes when the prefix changes mid-XPath. prefix_to_module replaces prefixes with the module name of the prefix. resolve_top_prefix_to_module resolves the module-level prefix to the module name. Prefixes may be included in the path if the prefix changes mid-path.
[ "Returns", "the", "XPath", "path", "of", "the", "node", ".", "with_prefixes", "indicates", "whether", "or", "not", "to", "prefix", "every", "node", "." ]
python
train
fatihsucu/pyzomato
pyzomato/pyzomato.py
https://github.com/fatihsucu/pyzomato/blob/91c805bac8a49c808d497b7b0b6222a48f2d1324/pyzomato/pyzomato.py#L79-L97
def getEstablishments(self, city_id, **kwargs): """ :param city_id: id of the city for which collections are needed :param lat: latitude :param lon: longitude Get a list of restaurant types in a city. The location/City input can be provided in the following ways - Using Zomato City ID - Using coordinates of any location within a city List of all restaurants categorized under a particular restaurant type can obtained using /Search API with Establishment ID and location details as inputs """ params = {"city_id": city_id} optional_params = ["lat", "lon"] for key in optional_params: if key in kwargs: params[key] = kwargs[key] establishments = self.api.get("/establishments", params) return establishments
[ "def", "getEstablishments", "(", "self", ",", "city_id", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "\"city_id\"", ":", "city_id", "}", "optional_params", "=", "[", "\"lat\"", ",", "\"lon\"", "]", "for", "key", "in", "optional_params", ":", "...
:param city_id: id of the city for which collections are needed :param lat: latitude :param lon: longitude Get a list of restaurant types in a city. The location/City input can be provided in the following ways - Using Zomato City ID - Using coordinates of any location within a city List of all restaurants categorized under a particular restaurant type can obtained using /Search API with Establishment ID and location details as inputs
[ ":", "param", "city_id", ":", "id", "of", "the", "city", "for", "which", "collections", "are", "needed", ":", "param", "lat", ":", "latitude", ":", "param", "lon", ":", "longitude", "Get", "a", "list", "of", "restaurant", "types", "in", "a", "city", "....
python
test
andreikop/qutepart
qutepart/syntax/parser.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/syntax/parser.py#L468-L477
def _matchPattern(regExp, string): """Try to match pattern. Returns tuple (whole match, groups) or (None, None) Python function, used by C code """ match = regExp.match(string) if match is not None and match.group(0): return match.group(0), (match.group(0), ) + match.groups() else: return None, None
[ "def", "_matchPattern", "(", "regExp", ",", "string", ")", ":", "match", "=", "regExp", ".", "match", "(", "string", ")", "if", "match", "is", "not", "None", "and", "match", ".", "group", "(", "0", ")", ":", "return", "match", ".", "group", "(", "0...
Try to match pattern. Returns tuple (whole match, groups) or (None, None) Python function, used by C code
[ "Try", "to", "match", "pattern", ".", "Returns", "tuple", "(", "whole", "match", "groups", ")", "or", "(", "None", "None", ")", "Python", "function", "used", "by", "C", "code" ]
python
train
zhanglab/psamm
psamm/lpsolver/cplex.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/lpsolver/cplex.py#L288-L341
def _reset_problem_type(self): """Reset problem type to whatever is appropriate.""" # Only need to reset the type after the first solve. This also works # around a bug in Cplex where get_num_binary() is some rare cases # causes a segfault. if self._solve_count > 0: integer_count = 0 for func in (self._cp.variables.get_num_binary, self._cp.variables.get_num_integer, self._cp.variables.get_num_semicontinuous, self._cp.variables.get_num_semiinteger): integer_count += func() integer = integer_count > 0 quad_constr = self._cp.quadratic_constraints.get_num() > 0 quad_obj = self._cp.objective.get_num_quadratic_variables() > 0 if not integer: if quad_constr: new_type = self._cp.problem_type.QCP elif quad_obj: new_type = self._cp.problem_type.QP else: new_type = self._cp.problem_type.LP else: if quad_constr: new_type = self._cp.problem_type.MIQCP elif quad_obj: new_type = self._cp.problem_type.MIQP else: new_type = self._cp.problem_type.MILP logger.debug('Setting problem type to {}...'.format( self._cp.problem_type[new_type])) self._cp.set_problem_type(new_type) else: logger.debug('Problem type is {}'.format( self._cp.problem_type[self._cp.get_problem_type()])) # Force QP/MIQP solver to look for global optimum. We set it here only # for QP/MIQP problems to avoid the warnings generated for other # problem types when this parameter is set. quad_obj = self._cp.objective.get_num_quadratic_variables() > 0 if hasattr(self._cp.parameters, 'optimalitytarget'): target_param = self._cp.parameters.optimalitytarget else: target_param = self._cp.parameters.solutiontarget if quad_obj: target_param.set(target_param.values.optimal_global) else: target_param.set(target_param.values.auto)
[ "def", "_reset_problem_type", "(", "self", ")", ":", "# Only need to reset the type after the first solve. This also works", "# around a bug in Cplex where get_num_binary() is some rare cases", "# causes a segfault.", "if", "self", ".", "_solve_count", ">", "0", ":", "integer_count",...
Reset problem type to whatever is appropriate.
[ "Reset", "problem", "type", "to", "whatever", "is", "appropriate", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/rl/evaluator.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/evaluator.py#L464-L477
def get_game_for_worker(map_name, directory_id): """Get game for the given worker (directory) id.""" if map_name == "v100unfriendly": games = ["chopper_command", "boxing", "asterix", "seaquest"] worker_per_game = 5 elif map_name == "human_nice": games = gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE worker_per_game = 5 else: raise ValueError("Unknown worker to game map name: %s" % map_name) games.sort() game_id = (directory_id - 1) // worker_per_game tf.logging.info("Getting game %d from %s." % (game_id, games)) return games[game_id]
[ "def", "get_game_for_worker", "(", "map_name", ",", "directory_id", ")", ":", "if", "map_name", "==", "\"v100unfriendly\"", ":", "games", "=", "[", "\"chopper_command\"", ",", "\"boxing\"", ",", "\"asterix\"", ",", "\"seaquest\"", "]", "worker_per_game", "=", "5",...
Get game for the given worker (directory) id.
[ "Get", "game", "for", "the", "given", "worker", "(", "directory", ")", "id", "." ]
python
train
astrocatalogs/astrocats
astrocats/catalog/entry.py
https://github.com/astrocatalogs/astrocats/blob/11abc3131c6366ecd23964369e55ff264add7805/astrocats/catalog/entry.py#L703-L710
def add_photometry(self, compare_to_existing=True, **kwargs): """Add a `Photometry` instance to this entry.""" self._add_cat_dict( Photometry, self._KEYS.PHOTOMETRY, compare_to_existing=compare_to_existing, **kwargs) return
[ "def", "add_photometry", "(", "self", ",", "compare_to_existing", "=", "True", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_add_cat_dict", "(", "Photometry", ",", "self", ".", "_KEYS", ".", "PHOTOMETRY", ",", "compare_to_existing", "=", "compare_to_existi...
Add a `Photometry` instance to this entry.
[ "Add", "a", "Photometry", "instance", "to", "this", "entry", "." ]
python
train
kakwa/ldapcherry
ldapcherry/roles.py
https://github.com/kakwa/ldapcherry/blob/b5e7cb6a44065abc30d164e72981b3713a172dda/ldapcherry/roles.py#L64-L83
def _flatten(self, roles=None, groups=None): """ flatten a (semi) nest roles structure""" if roles is None: roles_in = copy.deepcopy(self.roles_raw) else: roles_in = roles for roleid in roles_in: role = roles_in[roleid] if groups is not None: role['backends_groups'] = self._merge_groups( [role['backends_groups'], groups], ) if 'subroles' in role: self._flatten( role['subroles'], role['backends_groups'], ) del role['subroles'] self.flatten[roleid] = role
[ "def", "_flatten", "(", "self", ",", "roles", "=", "None", ",", "groups", "=", "None", ")", ":", "if", "roles", "is", "None", ":", "roles_in", "=", "copy", ".", "deepcopy", "(", "self", ".", "roles_raw", ")", "else", ":", "roles_in", "=", "roles", ...
flatten a (semi) nest roles structure
[ "flatten", "a", "(", "semi", ")", "nest", "roles", "structure" ]
python
train
KelSolaar/Foundations
foundations/parsers.py
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/parsers.py#L1317-L1349
def filter_values(self, pattern, flags=0): """ | Filters the :meth:`PlistFileParser.elements` class property elements using given pattern. | Will return a list of matching elements values, if you want to get only one element value, use the :meth:`PlistFileParser.get_value` method instead. Usage:: >>> plist_file_parser = PlistFileParser("standard.plist") >>> plist_file_parser.parse() True >>> plist_file_parser.filter_values(r"String A") [u'My Value A'] >>> plist_file_parser.filter_values(r"String.*") [u'My Value C', u'My Value B', u'My Value A'] :param pattern: Regex filtering pattern. :type pattern: unicode :param flags: Regex flags. :type flags: int :return: Values. :rtype: list """ values = [] if not self.__elements: return values for item in foundations.walkers.dictionaries_walker(self.__elements): path, element, value = item if re.search(pattern, element, flags): values.append(value) return values
[ "def", "filter_values", "(", "self", ",", "pattern", ",", "flags", "=", "0", ")", ":", "values", "=", "[", "]", "if", "not", "self", ".", "__elements", ":", "return", "values", "for", "item", "in", "foundations", ".", "walkers", ".", "dictionaries_walker...
| Filters the :meth:`PlistFileParser.elements` class property elements using given pattern. | Will return a list of matching elements values, if you want to get only one element value, use the :meth:`PlistFileParser.get_value` method instead. Usage:: >>> plist_file_parser = PlistFileParser("standard.plist") >>> plist_file_parser.parse() True >>> plist_file_parser.filter_values(r"String A") [u'My Value A'] >>> plist_file_parser.filter_values(r"String.*") [u'My Value C', u'My Value B', u'My Value A'] :param pattern: Regex filtering pattern. :type pattern: unicode :param flags: Regex flags. :type flags: int :return: Values. :rtype: list
[ "|", "Filters", "the", ":", "meth", ":", "PlistFileParser", ".", "elements", "class", "property", "elements", "using", "given", "pattern", ".", "|", "Will", "return", "a", "list", "of", "matching", "elements", "values", "if", "you", "want", "to", "get", "o...
python
train
C4ptainCrunch/ics.py
ics/timeline.py
https://github.com/C4ptainCrunch/ics.py/blob/bd918ec7453a7cf73a906cdcc78bd88eb4bab71b/ics/timeline.py#L55-L67
def overlapping(self, start, stop): """Iterates (in chronological order) over every event that has an intersection with the timespan between `start` and `stop` Args: start : (Arrow object) stop : (Arrow object) """ for event in self: if ((start <= event.begin <= stop # if start is between the bonds or start <= event.end <= stop) # or stop is between the bonds or event.begin <= start and event.end >= stop): # or event is a superset of [start,stop] yield event
[ "def", "overlapping", "(", "self", ",", "start", ",", "stop", ")", ":", "for", "event", "in", "self", ":", "if", "(", "(", "start", "<=", "event", ".", "begin", "<=", "stop", "# if start is between the bonds", "or", "start", "<=", "event", ".", "end", ...
Iterates (in chronological order) over every event that has an intersection with the timespan between `start` and `stop` Args: start : (Arrow object) stop : (Arrow object)
[ "Iterates", "(", "in", "chronological", "order", ")", "over", "every", "event", "that", "has", "an", "intersection", "with", "the", "timespan", "between", "start", "and", "stop" ]
python
train
PaulHancock/Aegean
AegeanTools/catalogs.py
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/catalogs.py#L145-L194
def save_catalog(filename, catalog, meta=None, prefix=None): """ Save a catalogue of sources using filename as a model. Meta data can be written to some file types (fits, votable). Each type of source will be in a separate file: - base_comp.ext :class:`AegeanTools.models.OutputSource` - base_isle.ext :class:`AegeanTools.models.IslandSource` - base_simp.ext :class:`AegeanTools.models.SimpleSource` Where filename = `base.ext` Parameters ---------- filename : str Name of file to write, format is determined by extension. catalog : list A list of sources to write. Sources must be of type :class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`. prefix : str Prepend each column name with "prefix_". Default is to prepend nothing. meta : dict Meta data to be written to the output file. Support for metadata depends on file type. Returns ------- None """ ascii_table_formats = {'csv': 'csv', 'tab': 'tab', 'tex': 'latex', 'html': 'html'} # .ann and .reg are handled by me meta = update_meta_data(meta) extension = os.path.splitext(filename)[1][1:].lower() if extension in ['ann', 'reg']: writeAnn(filename, catalog, extension) elif extension in ['db', 'sqlite']: writeDB(filename, catalog, meta) elif extension in ['hdf5', 'fits', 'vo', 'vot', 'xml']: write_catalog(filename, catalog, extension, meta, prefix=prefix) elif extension in ascii_table_formats.keys(): write_catalog(filename, catalog, fmt=ascii_table_formats[extension], meta=meta, prefix=prefix) else: log.warning("extension not recognised {0}".format(extension)) log.warning("You get tab format") write_catalog(filename, catalog, fmt='tab', prefix=prefix) return
[ "def", "save_catalog", "(", "filename", ",", "catalog", ",", "meta", "=", "None", ",", "prefix", "=", "None", ")", ":", "ascii_table_formats", "=", "{", "'csv'", ":", "'csv'", ",", "'tab'", ":", "'tab'", ",", "'tex'", ":", "'latex'", ",", "'html'", ":"...
Save a catalogue of sources using filename as a model. Meta data can be written to some file types (fits, votable). Each type of source will be in a separate file: - base_comp.ext :class:`AegeanTools.models.OutputSource` - base_isle.ext :class:`AegeanTools.models.IslandSource` - base_simp.ext :class:`AegeanTools.models.SimpleSource` Where filename = `base.ext` Parameters ---------- filename : str Name of file to write, format is determined by extension. catalog : list A list of sources to write. Sources must be of type :class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`. prefix : str Prepend each column name with "prefix_". Default is to prepend nothing. meta : dict Meta data to be written to the output file. Support for metadata depends on file type. Returns ------- None
[ "Save", "a", "catalogue", "of", "sources", "using", "filename", "as", "a", "model", ".", "Meta", "data", "can", "be", "written", "to", "some", "file", "types", "(", "fits", "votable", ")", "." ]
python
train
pvizeli/pydroid-ipcam
pydroid_ipcam.py
https://github.com/pvizeli/pydroid-ipcam/blob/42275a77fafa01e027d0752664f382b813a6f9b0/pydroid_ipcam.py#L238-L246
def set_orientation(self, orientation='landscape'): """Set the video orientation. Return a coroutine. """ if orientation not in ALLOWED_ORIENTATIONS: _LOGGER.debug('%s is not a valid orientation', orientation) return False return self.change_setting('orientation', orientation)
[ "def", "set_orientation", "(", "self", ",", "orientation", "=", "'landscape'", ")", ":", "if", "orientation", "not", "in", "ALLOWED_ORIENTATIONS", ":", "_LOGGER", ".", "debug", "(", "'%s is not a valid orientation'", ",", "orientation", ")", "return", "False", "re...
Set the video orientation. Return a coroutine.
[ "Set", "the", "video", "orientation", "." ]
python
train
sliem/barrett
barrett/util.py
https://github.com/sliem/barrett/blob/d48e96591577d1fcecd50c21a9be71573218cde7/barrett/util.py#L6-L45
def threenum(h5file, var, post_col='mult'): """ Calculates the three number summary for a variable. The three number summary is the minimum, maximum and the mean of the data. Traditionally one would summerise data with the five number summary: max, min, 1st, 2nd (median), 3rd quartile. But quantiles are hard to calculate without sorting the data which hard to do out-of-core. """ f = h5py.File(h5file, 'r') d = f[var] w = f[post_col] s = d.chunks[0] n = d.shape[0] maxval = -np.abs(d[0]) minval = np.abs(d[0]) total = 0 wsum = 0 for x in range(0, n, s): aN = ~np.logical_or(np.isnan(d[x:x+s]), np.isinf(d[x:x+s])) d_c = d[x:x+s][aN] w_c = w[x:x+s][aN] chunk_max = np.max(d_c) chunk_min = np.min(d_c) maxval = chunk_max if chunk_max > maxval else maxval minval = chunk_min if chunk_min < minval else minval total += np.sum(w_c*d_c) wsum += np.sum(w_c) f.close() mean = total/float(wsum) return (minval, maxval, mean)
[ "def", "threenum", "(", "h5file", ",", "var", ",", "post_col", "=", "'mult'", ")", ":", "f", "=", "h5py", ".", "File", "(", "h5file", ",", "'r'", ")", "d", "=", "f", "[", "var", "]", "w", "=", "f", "[", "post_col", "]", "s", "=", "d", ".", ...
Calculates the three number summary for a variable. The three number summary is the minimum, maximum and the mean of the data. Traditionally one would summerise data with the five number summary: max, min, 1st, 2nd (median), 3rd quartile. But quantiles are hard to calculate without sorting the data which hard to do out-of-core.
[ "Calculates", "the", "three", "number", "summary", "for", "a", "variable", "." ]
python
train
ultrabug/py3status
py3status/parse_config.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/parse_config.py#L507-L520
def make_dict(self): """ We are in a dict so get key value pairs until the end token. """ out = {} while True: try: key = self.dict_key() self.separator(separator=":") value = self.value_assign(end_token="]") out[key] = value self.separator(end_token="}") except self.ParseEnd: return out
[ "def", "make_dict", "(", "self", ")", ":", "out", "=", "{", "}", "while", "True", ":", "try", ":", "key", "=", "self", ".", "dict_key", "(", ")", "self", ".", "separator", "(", "separator", "=", "\":\"", ")", "value", "=", "self", ".", "value_assig...
We are in a dict so get key value pairs until the end token.
[ "We", "are", "in", "a", "dict", "so", "get", "key", "value", "pairs", "until", "the", "end", "token", "." ]
python
train
googleapis/gax-python
google/gax/bundling.py
https://github.com/googleapis/gax-python/blob/309aedfcfd48e4c8fa22dd60e9c84c3cc71bb20e/google/gax/bundling.py#L57-L72
def _str_dotted_getattr(obj, name): """Expands extends getattr to allow dots in x to indicate nested objects. Args: obj (object): an object. name (str): a name for a field in the object. Returns: Any: the value of named attribute. Raises: AttributeError: if the named attribute does not exist. """ for part in name.split('.'): obj = getattr(obj, part) return str(obj) if obj else None
[ "def", "_str_dotted_getattr", "(", "obj", ",", "name", ")", ":", "for", "part", "in", "name", ".", "split", "(", "'.'", ")", ":", "obj", "=", "getattr", "(", "obj", ",", "part", ")", "return", "str", "(", "obj", ")", "if", "obj", "else", "None" ]
Expands extends getattr to allow dots in x to indicate nested objects. Args: obj (object): an object. name (str): a name for a field in the object. Returns: Any: the value of named attribute. Raises: AttributeError: if the named attribute does not exist.
[ "Expands", "extends", "getattr", "to", "allow", "dots", "in", "x", "to", "indicate", "nested", "objects", "." ]
python
train
mitsei/dlkit
dlkit/json_/logging_/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/logging_/objects.py#L564-L578
def get_parent_log_nodes(self): """Gets the parents of this log. return: (osid.logging.LogNodeList) - the parents of this log *compliance: mandatory -- This method must be implemented.* """ parent_log_nodes = [] for node in self._my_map['parentNodes']: parent_log_nodes.append(LogNode( node._my_map, runtime=self._runtime, proxy=self._proxy, lookup_session=self._lookup_session)) return LogNodeList(parent_log_nodes)
[ "def", "get_parent_log_nodes", "(", "self", ")", ":", "parent_log_nodes", "=", "[", "]", "for", "node", "in", "self", ".", "_my_map", "[", "'parentNodes'", "]", ":", "parent_log_nodes", ".", "append", "(", "LogNode", "(", "node", ".", "_my_map", ",", "runt...
Gets the parents of this log. return: (osid.logging.LogNodeList) - the parents of this log *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "parents", "of", "this", "log", "." ]
python
train
bwohlberg/sporco
sporco/common.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/common.py#L59-L83
def _fix_dynamic_class_lookup(cls, pstfx): """Fix name lookup problem that prevents pickling of dynamically defined classes. Parameters ---------- cls : class Dynamically generated class to which fix is to be applied pstfx : string Postfix that can be used to identify dynamically generated classes that are equivalent by construction """ # Extended name for the class that will be added to the module namespace extnm = '_' + cls.__name__ + '_' + pstfx # Get the module in which the dynamic class is defined mdl = sys.modules[cls.__module__] # Allow lookup of the dynamically generated class within the module via # its extended name setattr(mdl, extnm, cls) # Change the dynamically generated class name to the extended name if hasattr(cls, '__qualname__'): cls.__qualname__ = extnm else: cls.__name__ = extnm
[ "def", "_fix_dynamic_class_lookup", "(", "cls", ",", "pstfx", ")", ":", "# Extended name for the class that will be added to the module namespace", "extnm", "=", "'_'", "+", "cls", ".", "__name__", "+", "'_'", "+", "pstfx", "# Get the module in which the dynamic class is defi...
Fix name lookup problem that prevents pickling of dynamically defined classes. Parameters ---------- cls : class Dynamically generated class to which fix is to be applied pstfx : string Postfix that can be used to identify dynamically generated classes that are equivalent by construction
[ "Fix", "name", "lookup", "problem", "that", "prevents", "pickling", "of", "dynamically", "defined", "classes", "." ]
python
train
deepmind/pysc2
pysc2/bin/agent_remote.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/bin/agent_remote.py#L155-L229
def human(): """Run a host which expects one player to connect remotely.""" run_config = run_configs.get() map_inst = maps.get(FLAGS.map) if not FLAGS.rgb_screen_size or not FLAGS.rgb_minimap_size: logging.info("Use --rgb_screen_size and --rgb_minimap_size if you want rgb " "observations.") ports = portspicker.pick_contiguous_unused_ports(4) # 2 * num_players host_proc = run_config.start(extra_ports=ports, host=FLAGS.host, timeout_seconds=FLAGS.timeout_seconds, window_loc=(50, 50)) client_proc = run_config.start(extra_ports=ports, host=FLAGS.host, connect=False, window_loc=(700, 50)) create = sc_pb.RequestCreateGame( realtime=FLAGS.realtime, local_map=sc_pb.LocalMap(map_path=map_inst.path)) create.player_setup.add(type=sc_pb.Participant) create.player_setup.add(type=sc_pb.Participant) controller = host_proc.controller controller.save_map(map_inst.path, map_inst.data(run_config)) controller.create_game(create) print("-" * 80) print("Join host: agent_remote --map %s --host %s --host_port %s " "--lan_port %s" % (FLAGS.map, FLAGS.host, client_proc.port, ports[0])) print("-" * 80) sys.stdout.flush() join = sc_pb.RequestJoinGame() join.shared_port = 0 # unused join.server_ports.game_port = ports.pop(0) join.server_ports.base_port = ports.pop(0) join.client_ports.add(game_port=ports.pop(0), base_port=ports.pop(0)) join.race = sc2_env.Race[FLAGS.user_race] join.player_name = FLAGS.user_name if FLAGS.render: join.options.raw = True join.options.score = True if FLAGS.feature_screen_size and FLAGS.feature_minimap_size: fl = join.options.feature_layer fl.width = 24 FLAGS.feature_screen_size.assign_to(fl.resolution) FLAGS.feature_minimap_size.assign_to(fl.minimap_resolution) if FLAGS.rgb_screen_size and FLAGS.rgb_minimap_size: FLAGS.rgb_screen_size.assign_to(join.options.render.resolution) FLAGS.rgb_minimap_size.assign_to(join.options.render.minimap_resolution) controller.join_game(join) if FLAGS.render: renderer = renderer_human.RendererHuman( fps=FLAGS.fps, render_feature_grid=False) renderer.run(run_configs.get(), controller, max_episodes=1) else: # Still step forward so the Mac/Windows renderer works. try: while True: frame_start_time = time.time() if not FLAGS.realtime: controller.step() obs = controller.observe() if obs.player_result: break time.sleep(max(0, frame_start_time - time.time() + 1 / FLAGS.fps)) except KeyboardInterrupt: pass for p in [host_proc, client_proc]: p.close() portspicker.return_ports(ports)
[ "def", "human", "(", ")", ":", "run_config", "=", "run_configs", ".", "get", "(", ")", "map_inst", "=", "maps", ".", "get", "(", "FLAGS", ".", "map", ")", "if", "not", "FLAGS", ".", "rgb_screen_size", "or", "not", "FLAGS", ".", "rgb_minimap_size", ":",...
Run a host which expects one player to connect remotely.
[ "Run", "a", "host", "which", "expects", "one", "player", "to", "connect", "remotely", "." ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py#L772-L783
def logical_chassis_fwdl_sanity_output_fwdl_cmd_msg(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity") config = logical_chassis_fwdl_sanity output = ET.SubElement(logical_chassis_fwdl_sanity, "output") fwdl_cmd_msg = ET.SubElement(output, "fwdl-cmd-msg") fwdl_cmd_msg.text = kwargs.pop('fwdl_cmd_msg') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "logical_chassis_fwdl_sanity_output_fwdl_cmd_msg", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "logical_chassis_fwdl_sanity", "=", "ET", ".", "Element", "(", "\"logical_chassis_fwdl_sanity\"", "...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
DEIB-GECO/PyGMQL
gmql/dataset/parsers/RegionParser.py
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/parsers/RegionParser.py#L167-L182
def get_ordered_attributes(self): """ Returns the ordered list of attributes :return: list of strings """ attrs = self.get_attributes() attr_arr = np.array(attrs) poss = [self.chrPos, self.startPos, self.stopPos] if self.strandPos is not None: poss.append(self.strandPos) if self.otherPos: for o in self.otherPos: poss.append(o[0]) idx_sort = np.array(poss).argsort() return attr_arr[idx_sort].tolist()
[ "def", "get_ordered_attributes", "(", "self", ")", ":", "attrs", "=", "self", ".", "get_attributes", "(", ")", "attr_arr", "=", "np", ".", "array", "(", "attrs", ")", "poss", "=", "[", "self", ".", "chrPos", ",", "self", ".", "startPos", ",", "self", ...
Returns the ordered list of attributes :return: list of strings
[ "Returns", "the", "ordered", "list", "of", "attributes" ]
python
train
nens/turn
turn/console.py
https://github.com/nens/turn/blob/98e806a0749ada0ddfd04b3c29fb04c15bf5ac18/turn/console.py#L43-L60
def get_parser(): """ Return argument parser. """ parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter, description=__doc__, ) # connection to redis server parser.add_argument('--host', default='localhost') parser.add_argument('--port', default=6379, type=int) parser.add_argument('--db', default=0, type=int) # tools parser.add_argument('command', choices=(str('follow'), str('lock'), str('reset'), str('status'))) parser.add_argument('resources', nargs='*', metavar='RESOURCE') return parser
[ "def", "get_parser", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "formatter_class", "=", "argparse", ".", "RawTextHelpFormatter", ",", "description", "=", "__doc__", ",", ")", "# connection to redis server", "parser", ".", "add_argument", ...
Return argument parser.
[ "Return", "argument", "parser", "." ]
python
train
peri-source/peri
peri/opt/optimize.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L792-L872
def _run2(self): """Workhorse for do_run_2""" if self.check_update_J(): self.update_J() else: if self.check_Broyden_J(): self.update_Broyden_J() if self.check_update_eig_J(): self.update_eig_J() #0. Find _last_residuals, _last_error, etc: _last_residuals = self.calc_residuals().copy() _last_error = 1*self.error _last_vals = self.param_vals.copy() #1. Calculate 2 possible steps delta_params_1 = self.find_LM_updates(self.calc_grad(), do_correct_damping=False) self.decrease_damping() delta_params_2 = self.find_LM_updates(self.calc_grad(), do_correct_damping=False) self.decrease_damping(undo_decrease=True) #2. Check which step is best: er1 = self.update_function(self.param_vals + delta_params_1) er2 = self.update_function(self.param_vals + delta_params_2) triplet = (self.error, er1, er2) best_step = find_best_step(triplet) if best_step == 0: #Both bad steps, put back & increase damping: _ = self.update_function(self.param_vals.copy()) grad = self.calc_grad() CLOG.debug('Bad step, increasing damping') CLOG.debug('%f\t%f\t%f' % triplet) for _try in range(self._max_inner_loop): self.increase_damping() delta_vals = self.find_LM_updates(grad) er_new = self.update_function(self.param_vals + delta_vals) good_step = er_new < self.error if good_step: #Update params, error, break: self.update_param_vals(delta_vals, incremental=True) self.error = er_new CLOG.debug('Sufficiently increased damping') CLOG.debug('%f\t%f' % (triplet[0], self.error)) break else: #for-break-else #Throw a warning, put back the parameters CLOG.warn('Stuck!') self.error = self.update_function(self.param_vals.copy()) elif best_step == 1: #er1 <= er2: good_step = True CLOG.debug('Good step, same damping') CLOG.debug('%f\t%f\t%f' % triplet) #Update to er1 params: er1_1 = self.update_function(self.param_vals + delta_params_1) if np.abs(er1_1 - er1) > 1e-6: raise RuntimeError('Function updates are not exact.') self.update_param_vals(delta_params_1, incremental=True) self.error = er1 elif best_step == 2: #er2 < er1: good_step = True self.error = er2 CLOG.debug('Good step, decreasing damping') CLOG.debug('%f\t%f\t%f' % triplet) #-we're already at the correct parameters self.update_param_vals(delta_params_2, incremental=True) self.decrease_damping() #3. Run with current J, damping; update what we need to:: if good_step: self._last_residuals = _last_residuals self._last_error = _last_error self._last_vals = _last_vals self.error self.do_internal_run(initial_count=1)
[ "def", "_run2", "(", "self", ")", ":", "if", "self", ".", "check_update_J", "(", ")", ":", "self", ".", "update_J", "(", ")", "else", ":", "if", "self", ".", "check_Broyden_J", "(", ")", ":", "self", ".", "update_Broyden_J", "(", ")", "if", "self", ...
Workhorse for do_run_2
[ "Workhorse", "for", "do_run_2" ]
python
valid
brentp/cruzdb
cruzdb/__init__.py
https://github.com/brentp/cruzdb/blob/9068d46e25952f4a929dde0242beb31fa4c7e89a/cruzdb/__init__.py#L355-L441
def knearest(self, table, chrom_or_feat, start=None, end=None, k=1, _direction=None): """ Return k-nearest features Parameters ---------- table : str or table table against which to query chrom_or_feat : str or feat either a chromosome, e.g. 'chr3' or a feature with .chrom, .start, .end attributes start : int if `chrom_or_feat` is a chrom, then this must be the integer start end : int if `chrom_or_feat` is a chrom, then this must be the integer end k : int number of downstream neighbors to return _direction : (None, "up", "down") internal (don't use this) """ assert _direction in (None, "up", "down") # they sent in a feature if start is None: assert end is None chrom, start, end = chrom_or_feat.chrom, chrom_or_feat.start, chrom_or_feat.end # if the query is directional and the feature as a strand, # adjust... if _direction in ("up", "down") and getattr(chrom_or_feat, "strand", None) == "-": _direction = "up" if _direction == "down" else "up" else: chrom = chrom_or_feat qstart, qend = long(start), long(end) res = self.bin_query(table, chrom, qstart, qend) i, change = 1, 350 try: while res.count() < k: if _direction in (None, "up"): if qstart == 0 and _direction == "up": break qstart = max(0, qstart - change) if _direction in (None, "down"): qend += change i += 1 change *= (i + 5) res = self.bin_query(table, chrom, qstart, qend) except BigException: return [] def dist(f): d = 0 if start > f.end: d = start - f.end elif f.start > end: d = f.start - end # add dist as an attribute to the feature return d dists = sorted([(dist(f), f) for f in res]) if len(dists) == 0: return [] dists, res = zip(*dists) if len(res) == k: return res if k > len(res): # had to break because of end of chrom if k == 0: return [] k = len(res) ndist = dists[k - 1] # include all features that are the same distance as the nth closest # feature (accounts for ties). while k < len(res) and dists[k] == ndist: k = k + 1 return res[:k]
[ "def", "knearest", "(", "self", ",", "table", ",", "chrom_or_feat", ",", "start", "=", "None", ",", "end", "=", "None", ",", "k", "=", "1", ",", "_direction", "=", "None", ")", ":", "assert", "_direction", "in", "(", "None", ",", "\"up\"", ",", "\"...
Return k-nearest features Parameters ---------- table : str or table table against which to query chrom_or_feat : str or feat either a chromosome, e.g. 'chr3' or a feature with .chrom, .start, .end attributes start : int if `chrom_or_feat` is a chrom, then this must be the integer start end : int if `chrom_or_feat` is a chrom, then this must be the integer end k : int number of downstream neighbors to return _direction : (None, "up", "down") internal (don't use this)
[ "Return", "k", "-", "nearest", "features" ]
python
train
ciena/afkak
afkak/client.py
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/client.py#L727-L762
def _update_brokers(self, brokers, remove=False): """ Update `self._brokers` and `self.clients` Update our self.clients based on brokers in received metadata Take the received dict of brokers and reconcile it with our current list of brokers (self.clients). If there is a new one, bring up a new connection to it, and if remove is True, and any in our current list aren't in the metadata returned, disconnect from it. :param brokers: Iterable of `BrokerMetadata`. A client will be created for every broker given if it doesn't yet exist. :param bool remove: Is this metadata for *all* brokers? If so, clients for brokers which are no longer found in the metadata will be closed. """ log.debug("%r: _update_brokers(%r, remove=%r)", self, brokers, remove) brokers_by_id = {bm.node_id: bm for bm in brokers} self._brokers.update(brokers_by_id) # Update the metadata of broker clients that already exist. for node_id, broker_meta in brokers_by_id.items(): if node_id not in self.clients: continue self.clients[node_id].updateMetadata(broker_meta) # Remove any clients for brokers which no longer exist. if remove: to_close = [ self.clients.pop(node_id) for node_id in set(self.clients) - set(brokers_by_id) ] if to_close: self._close_brokerclients(to_close)
[ "def", "_update_brokers", "(", "self", ",", "brokers", ",", "remove", "=", "False", ")", ":", "log", ".", "debug", "(", "\"%r: _update_brokers(%r, remove=%r)\"", ",", "self", ",", "brokers", ",", "remove", ")", "brokers_by_id", "=", "{", "bm", ".", "node_id"...
Update `self._brokers` and `self.clients` Update our self.clients based on brokers in received metadata Take the received dict of brokers and reconcile it with our current list of brokers (self.clients). If there is a new one, bring up a new connection to it, and if remove is True, and any in our current list aren't in the metadata returned, disconnect from it. :param brokers: Iterable of `BrokerMetadata`. A client will be created for every broker given if it doesn't yet exist. :param bool remove: Is this metadata for *all* brokers? If so, clients for brokers which are no longer found in the metadata will be closed.
[ "Update", "self", ".", "_brokers", "and", "self", ".", "clients" ]
python
train
django-userena-ce/django-userena-ce
userena/models.py
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/models.py#L24-L39
def upload_to_mugshot(instance, filename): """ Uploads a mugshot for a user to the ``USERENA_MUGSHOT_PATH`` and saving it under unique hash for the image. This is for privacy reasons so others can't just browse through the mugshot directory. """ extension = filename.split('.')[-1].lower() salt, hash = generate_sha1(instance.pk) path = userena_settings.USERENA_MUGSHOT_PATH % {'username': instance.user.username, 'id': instance.user.id, 'date': instance.user.date_joined, 'date_now': get_datetime_now().date()} return '%(path)s%(hash)s.%(extension)s' % {'path': path, 'hash': hash[:10], 'extension': extension}
[ "def", "upload_to_mugshot", "(", "instance", ",", "filename", ")", ":", "extension", "=", "filename", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", ".", "lower", "(", ")", "salt", ",", "hash", "=", "generate_sha1", "(", "instance", ".", "pk", ")...
Uploads a mugshot for a user to the ``USERENA_MUGSHOT_PATH`` and saving it under unique hash for the image. This is for privacy reasons so others can't just browse through the mugshot directory.
[ "Uploads", "a", "mugshot", "for", "a", "user", "to", "the", "USERENA_MUGSHOT_PATH", "and", "saving", "it", "under", "unique", "hash", "for", "the", "image", ".", "This", "is", "for", "privacy", "reasons", "so", "others", "can", "t", "just", "browse", "thro...
python
train
hydpy-dev/hydpy
hydpy/auxs/xmltools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/xmltools.py#L1155-L1187
def load_series(self) -> None: """Load time series data as defined by the actual XML `reader` element. >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, TestIO, XMLInterface >>> hp = HydPy('LahnH') >>> with TestIO(): ... hp.prepare_network() ... hp.init_models() ... interface = XMLInterface('single_run.xml') ... interface.update_options() ... interface.update_timegrids() ... series_io = interface.series_io ... series_io.prepare_series() ... series_io.load_series() >>> from hydpy import print_values >>> print_values( ... hp.elements.land_dill.model.sequences.inputs.t.series[:3]) -0.298846, -0.811539, -2.493848 """ kwargs = {} for keyword in ('flattennetcdf', 'isolatenetcdf', 'timeaxisnetcdf'): argument = getattr(hydpy.pub.options, keyword, None) if argument is not None: kwargs[keyword[:-6]] = argument hydpy.pub.sequencemanager.open_netcdf_reader(**kwargs) self.prepare_sequencemanager() for sequence in self._iterate_sequences(): sequence.load_ext() hydpy.pub.sequencemanager.close_netcdf_reader()
[ "def", "load_series", "(", "self", ")", "->", "None", ":", "kwargs", "=", "{", "}", "for", "keyword", "in", "(", "'flattennetcdf'", ",", "'isolatenetcdf'", ",", "'timeaxisnetcdf'", ")", ":", "argument", "=", "getattr", "(", "hydpy", ".", "pub", ".", "opt...
Load time series data as defined by the actual XML `reader` element. >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, TestIO, XMLInterface >>> hp = HydPy('LahnH') >>> with TestIO(): ... hp.prepare_network() ... hp.init_models() ... interface = XMLInterface('single_run.xml') ... interface.update_options() ... interface.update_timegrids() ... series_io = interface.series_io ... series_io.prepare_series() ... series_io.load_series() >>> from hydpy import print_values >>> print_values( ... hp.elements.land_dill.model.sequences.inputs.t.series[:3]) -0.298846, -0.811539, -2.493848
[ "Load", "time", "series", "data", "as", "defined", "by", "the", "actual", "XML", "reader", "element", "." ]
python
train
Jammy2211/PyAutoLens
autolens/model/inversion/util/pixelization_util.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/inversion/util/pixelization_util.py#L5-L30
def rectangular_neighbors_from_shape(shape): """Compute the neighbors of every pixel as a list of the pixel index's each pixel shares a vertex with. The uniformity of the rectangular grid's geometry is used to compute this. """ pixels = shape[0]*shape[1] pixel_neighbors = -1 * np.ones(shape=(pixels, 4)) pixel_neighbors_size = np.zeros(pixels) pixel_neighbors, pixel_neighbors_size = compute_corner_neighbors(pixel_neighbors, pixel_neighbors_size, shape, pixels) pixel_neighbors, pixel_neighbors_size = compute_top_edge_neighbors(pixel_neighbors, pixel_neighbors_size, shape, pixels) pixel_neighbors, pixel_neighbors_size = compute_left_edge_neighbors(pixel_neighbors, pixel_neighbors_size, shape, pixels) pixel_neighbors, pixel_neighbors_size = compute_right_edge_neighbors(pixel_neighbors, pixel_neighbors_size, shape, pixels) pixel_neighbors, pixel_neighbors_size = compute_bottom_edge_neighbors(pixel_neighbors, pixel_neighbors_size, shape, pixels) pixel_neighbors, pixel_neighbors_size = compute_central_neighbors(pixel_neighbors, pixel_neighbors_size, shape, pixels) return pixel_neighbors, pixel_neighbors_size
[ "def", "rectangular_neighbors_from_shape", "(", "shape", ")", ":", "pixels", "=", "shape", "[", "0", "]", "*", "shape", "[", "1", "]", "pixel_neighbors", "=", "-", "1", "*", "np", ".", "ones", "(", "shape", "=", "(", "pixels", ",", "4", ")", ")", "...
Compute the neighbors of every pixel as a list of the pixel index's each pixel shares a vertex with. The uniformity of the rectangular grid's geometry is used to compute this.
[ "Compute", "the", "neighbors", "of", "every", "pixel", "as", "a", "list", "of", "the", "pixel", "index", "s", "each", "pixel", "shares", "a", "vertex", "with", "." ]
python
valid
saltstack/salt
salt/states/pip_state.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pip_state.py#L276-L297
def _pep440_version_cmp(pkg1, pkg2, ignore_epoch=False): ''' Compares two version strings using pkg_resources.parse_version. Return -1 if version1 < version2, 0 if version1 ==version2, and 1 if version1 > version2. Return None if there was a problem making the comparison. ''' normalize = lambda x: six.text_type(x).split('!', 1)[-1] \ if ignore_epoch else six.text_type(x) pkg1 = normalize(pkg1) pkg2 = normalize(pkg2) try: if pkg_resources.parse_version(pkg1) < pkg_resources.parse_version(pkg2): return -1 if pkg_resources.parse_version(pkg1) == pkg_resources.parse_version(pkg2): return 0 if pkg_resources.parse_version(pkg1) > pkg_resources.parse_version(pkg2): return 1 except Exception as exc: log.exception(exc) return None
[ "def", "_pep440_version_cmp", "(", "pkg1", ",", "pkg2", ",", "ignore_epoch", "=", "False", ")", ":", "normalize", "=", "lambda", "x", ":", "six", ".", "text_type", "(", "x", ")", ".", "split", "(", "'!'", ",", "1", ")", "[", "-", "1", "]", "if", ...
Compares two version strings using pkg_resources.parse_version. Return -1 if version1 < version2, 0 if version1 ==version2, and 1 if version1 > version2. Return None if there was a problem making the comparison.
[ "Compares", "two", "version", "strings", "using", "pkg_resources", ".", "parse_version", ".", "Return", "-", "1", "if", "version1", "<", "version2", "0", "if", "version1", "==", "version2", "and", "1", "if", "version1", ">", "version2", ".", "Return", "None"...
python
train
python-beaver/python-beaver
beaver/transports/redis_transport.py
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/transports/redis_transport.py#L146-L151
def _raise_server_index(self): """Round robin magic: Raises the current redis server index and returns it""" self._current_server_index = (self._current_server_index + 1) % len(self._servers) return self._current_server_index
[ "def", "_raise_server_index", "(", "self", ")", ":", "self", ".", "_current_server_index", "=", "(", "self", ".", "_current_server_index", "+", "1", ")", "%", "len", "(", "self", ".", "_servers", ")", "return", "self", ".", "_current_server_index" ]
Round robin magic: Raises the current redis server index and returns it
[ "Round", "robin", "magic", ":", "Raises", "the", "current", "redis", "server", "index", "and", "returns", "it" ]
python
train
stephenmcd/django-socketio
django_socketio/example_project/chat/events.py
https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/example_project/chat/events.py#L41-L52
def finish(request, socket, context): """ Event handler for a socket session ending in a room. Broadcast the user leaving and delete them from the DB. """ try: user = context["user"] except KeyError: return left = {"action": "leave", "name": user.name, "id": user.id} socket.broadcast_channel(left) user.delete()
[ "def", "finish", "(", "request", ",", "socket", ",", "context", ")", ":", "try", ":", "user", "=", "context", "[", "\"user\"", "]", "except", "KeyError", ":", "return", "left", "=", "{", "\"action\"", ":", "\"leave\"", ",", "\"name\"", ":", "user", "."...
Event handler for a socket session ending in a room. Broadcast the user leaving and delete them from the DB.
[ "Event", "handler", "for", "a", "socket", "session", "ending", "in", "a", "room", ".", "Broadcast", "the", "user", "leaving", "and", "delete", "them", "from", "the", "DB", "." ]
python
train
Rapptz/discord.py
discord/message.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/message.py#L387-L391
def raw_role_mentions(self): """A property that returns an array of role IDs matched with the syntax of <@&role_id> in the message content. """ return [int(x) for x in re.findall(r'<@&([0-9]+)>', self.content)]
[ "def", "raw_role_mentions", "(", "self", ")", ":", "return", "[", "int", "(", "x", ")", "for", "x", "in", "re", ".", "findall", "(", "r'<@&([0-9]+)>'", ",", "self", ".", "content", ")", "]" ]
A property that returns an array of role IDs matched with the syntax of <@&role_id> in the message content.
[ "A", "property", "that", "returns", "an", "array", "of", "role", "IDs", "matched", "with", "the", "syntax", "of", "<" ]
python
train
BoboTiG/python-mss
mss/__main__.py
https://github.com/BoboTiG/python-mss/blob/56347f781edb38a0e7a5104080bd683f49c6f074/mss/__main__.py#L20-L87
def main(args=None): # type: (Optional[List[str]]) -> int """ Main logic. """ cli_args = ArgumentParser() cli_args.add_argument( "-c", "--coordinates", default="", type=str, help="the part of the screen to capture: top, left, width, height", ) cli_args.add_argument( "-l", "--level", default=6, type=int, choices=list(range(10)), help="the PNG compression level", ) cli_args.add_argument( "-m", "--monitor", default=0, type=int, help="the monitor to screen shot" ) cli_args.add_argument( "-o", "--output", default="monitor-{mon}.png", help="the output file name" ) cli_args.add_argument( "-q", "--quiet", default=False, action="store_true", help="do not print created files", ) cli_args.add_argument("-v", "--version", action="version", version=__version__) options = cli_args.parse_args(args) kwargs = {"mon": options.monitor, "output": options.output} if options.coordinates: try: top, left, width, height = options.coordinates.split(",") except ValueError: print("Coordinates syntax: top, left, width, height") return 2 kwargs["mon"] = { "top": int(top), "left": int(left), "width": int(width), "height": int(height), } if options.output == "monitor-{mon}.png": kwargs["output"] = "sct-{top}x{left}_{width}x{height}.png" try: with mss() as sct: if options.coordinates: output = kwargs["output"].format(**kwargs["mon"]) sct_img = sct.grab(kwargs["mon"]) to_png(sct_img.rgb, sct_img.size, level=options.level, output=output) if not options.quiet: print(os.path.realpath(output)) else: for file_name in sct.save(**kwargs): if not options.quiet: print(os.path.realpath(file_name)) return 0 except ScreenShotError: return 1
[ "def", "main", "(", "args", "=", "None", ")", ":", "# type: (Optional[List[str]]) -> int", "cli_args", "=", "ArgumentParser", "(", ")", "cli_args", ".", "add_argument", "(", "\"-c\"", ",", "\"--coordinates\"", ",", "default", "=", "\"\"", ",", "type", "=", "st...
Main logic.
[ "Main", "logic", "." ]
python
train
h2non/pook
pook/response.py
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/response.py#L148-L161
def body(self, body): """ Defines response body data. Arguments: body (str|bytes): response body to use. Returns: self: ``pook.Response`` current instance. """ if isinstance(body, bytes): body = body.decode('utf-8') self._body = body
[ "def", "body", "(", "self", ",", "body", ")", ":", "if", "isinstance", "(", "body", ",", "bytes", ")", ":", "body", "=", "body", ".", "decode", "(", "'utf-8'", ")", "self", ".", "_body", "=", "body" ]
Defines response body data. Arguments: body (str|bytes): response body to use. Returns: self: ``pook.Response`` current instance.
[ "Defines", "response", "body", "data", "." ]
python
test
openpermissions/perch
perch/organisation.py
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/organisation.py#L199-L219
def user_organisations(cls, user_id, state=None, include_deactivated=False): """ Get organisations that the user has joined :param user_id: the user ID :param state: the user's "join" state :param include_deactivated: Include deactivated resources in response :returns: list of Organisation instances :raises: SocketError, CouchException """ if state and state not in validators.VALID_STATES: raise exceptions.ValidationError('Invalid "state"') if include_deactivated: organisations = yield views.joined_organisations.get( key=[user_id, state], include_docs=True) else: organisations = yield views.active_joined_organisations.get( key=[user_id, state], include_docs=True) raise Return([cls(**org['doc']) for org in organisations['rows']])
[ "def", "user_organisations", "(", "cls", ",", "user_id", ",", "state", "=", "None", ",", "include_deactivated", "=", "False", ")", ":", "if", "state", "and", "state", "not", "in", "validators", ".", "VALID_STATES", ":", "raise", "exceptions", ".", "Validatio...
Get organisations that the user has joined :param user_id: the user ID :param state: the user's "join" state :param include_deactivated: Include deactivated resources in response :returns: list of Organisation instances :raises: SocketError, CouchException
[ "Get", "organisations", "that", "the", "user", "has", "joined" ]
python
train
twilio/twilio-python
twilio/rest/api/v2010/account/incoming_phone_number/local.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/incoming_phone_number/local.py#L248-L257
def get_instance(self, payload): """ Build an instance of LocalInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.incoming_phone_number.local.LocalInstance :rtype: twilio.rest.api.v2010.account.incoming_phone_number.local.LocalInstance """ return LocalInstance(self._version, payload, account_sid=self._solution['account_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "LocalInstance", "(", "self", ".", "_version", ",", "payload", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", ")" ]
Build an instance of LocalInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.incoming_phone_number.local.LocalInstance :rtype: twilio.rest.api.v2010.account.incoming_phone_number.local.LocalInstance
[ "Build", "an", "instance", "of", "LocalInstance" ]
python
train
KrishnaswamyLab/PHATE
Python/phate/utils.py
https://github.com/KrishnaswamyLab/PHATE/blob/346a4597dcfc523f8bef99bce482e677282b6719/Python/phate/utils.py#L55-L74
def check_in(choices, **params): """Checks parameters are in a list of allowed parameters Parameters ---------- choices : array-like, accepted values params : object Named arguments, parameters to be checked Raises ------ ValueError : unacceptable choice of parameters """ for p in params: if params[p] not in choices: raise ValueError( "{} value {} not recognized. Choose from {}".format( p, params[p], choices))
[ "def", "check_in", "(", "choices", ",", "*", "*", "params", ")", ":", "for", "p", "in", "params", ":", "if", "params", "[", "p", "]", "not", "in", "choices", ":", "raise", "ValueError", "(", "\"{} value {} not recognized. Choose from {}\"", ".", "format", ...
Checks parameters are in a list of allowed parameters Parameters ---------- choices : array-like, accepted values params : object Named arguments, parameters to be checked Raises ------ ValueError : unacceptable choice of parameters
[ "Checks", "parameters", "are", "in", "a", "list", "of", "allowed", "parameters" ]
python
train
Calysto/calysto
calysto/ai/conx.py
https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L140-L147
def randomArray(size, bound): """ Returns an array initialized to random values between -max and max. """ if type(size) == type(1): size = (size,) temp = Numeric.array( ndim(*size) ) * (2.0 * bound) return temp - bound
[ "def", "randomArray", "(", "size", ",", "bound", ")", ":", "if", "type", "(", "size", ")", "==", "type", "(", "1", ")", ":", "size", "=", "(", "size", ",", ")", "temp", "=", "Numeric", ".", "array", "(", "ndim", "(", "*", "size", ")", ")", "*...
Returns an array initialized to random values between -max and max.
[ "Returns", "an", "array", "initialized", "to", "random", "values", "between", "-", "max", "and", "max", "." ]
python
train
dw/mitogen
ansible_mitogen/mixins.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/ansible_mitogen/mixins.py#L193-L203
def _make_tmp_path(self, remote_user=None): """ Create a temporary subdirectory as a child of the temporary directory managed by the remote interpreter. """ LOG.debug('_make_tmp_path(remote_user=%r)', remote_user) path = self._generate_tmp_path() LOG.debug('Temporary directory: %r', path) self._connection.get_chain().call_no_reply(os.mkdir, path) self._connection._shell.tmpdir = path return path
[ "def", "_make_tmp_path", "(", "self", ",", "remote_user", "=", "None", ")", ":", "LOG", ".", "debug", "(", "'_make_tmp_path(remote_user=%r)'", ",", "remote_user", ")", "path", "=", "self", ".", "_generate_tmp_path", "(", ")", "LOG", ".", "debug", "(", "'Temp...
Create a temporary subdirectory as a child of the temporary directory managed by the remote interpreter.
[ "Create", "a", "temporary", "subdirectory", "as", "a", "child", "of", "the", "temporary", "directory", "managed", "by", "the", "remote", "interpreter", "." ]
python
train
projectatomic/atomic-reactor
atomic_reactor/plugins/post_group_manifests.py
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/plugins/post_group_manifests.py#L212-L244
def check_existing_vr_tag(self): """ Checks if version-release tag (primary not floating tag) exists already, and fails plugin if it does. """ primary_images = get_primary_images(self.workflow) if not primary_images: return vr_image = None for image in primary_images: if '-' in image.tag: vr_image = image break if not vr_image: return should_fail = False for registry_name, registry in self.registries.items(): pullspec = vr_image.copy() pullspec.registry = registry_name insecure = registry.get('insecure', False) secret = registry.get('secret', None) manifest_list = get_manifest_list(pullspec, registry_name, insecure, secret) if manifest_list: self.log.error("Primary tag already exists in registry: %s", pullspec) should_fail = True if should_fail: raise RuntimeError("Primary tag already exists in registry")
[ "def", "check_existing_vr_tag", "(", "self", ")", ":", "primary_images", "=", "get_primary_images", "(", "self", ".", "workflow", ")", "if", "not", "primary_images", ":", "return", "vr_image", "=", "None", "for", "image", "in", "primary_images", ":", "if", "'-...
Checks if version-release tag (primary not floating tag) exists already, and fails plugin if it does.
[ "Checks", "if", "version", "-", "release", "tag", "(", "primary", "not", "floating", "tag", ")", "exists", "already", "and", "fails", "plugin", "if", "it", "does", "." ]
python
train
blazelibs/blazeutils
blazeutils/helpers.py
https://github.com/blazelibs/blazeutils/blob/c94476325146007553cbddeeb9ef83394756babf/blazeutils/helpers.py#L147-L162
def unique(seq, preserve_order=True): """ Take a sequence and make it unique. Not preserving order is faster, but that won't matter so much for most uses. copied from: http://www.peterbe.com/plog/uniqifiers-benchmark/uniqifiers_benchmark.py """ if preserve_order: # f8 by Dave Kirby # Order preserving seen = set() seen_add = seen.add # lookup method only once return [x for x in seq if x not in seen and not seen_add(x)] # f9 # Not order preserving return list({}.fromkeys(seq).keys())
[ "def", "unique", "(", "seq", ",", "preserve_order", "=", "True", ")", ":", "if", "preserve_order", ":", "# f8 by Dave Kirby", "# Order preserving", "seen", "=", "set", "(", ")", "seen_add", "=", "seen", ".", "add", "# lookup method only once", "return", "[", "...
Take a sequence and make it unique. Not preserving order is faster, but that won't matter so much for most uses. copied from: http://www.peterbe.com/plog/uniqifiers-benchmark/uniqifiers_benchmark.py
[ "Take", "a", "sequence", "and", "make", "it", "unique", ".", "Not", "preserving", "order", "is", "faster", "but", "that", "won", "t", "matter", "so", "much", "for", "most", "uses", "." ]
python
train
log2timeline/dfvfs
dfvfs/vfs/apfs_file_entry.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/apfs_file_entry.py#L172-L175
def change_time(self): """dfdatetime.DateTimeValues: change time or None if not available.""" timestamp = self._fsapfs_file_entry.get_inode_change_time_as_integer() return dfdatetime_apfs_time.APFSTime(timestamp=timestamp)
[ "def", "change_time", "(", "self", ")", ":", "timestamp", "=", "self", ".", "_fsapfs_file_entry", ".", "get_inode_change_time_as_integer", "(", ")", "return", "dfdatetime_apfs_time", ".", "APFSTime", "(", "timestamp", "=", "timestamp", ")" ]
dfdatetime.DateTimeValues: change time or None if not available.
[ "dfdatetime", ".", "DateTimeValues", ":", "change", "time", "or", "None", "if", "not", "available", "." ]
python
train
manns/pyspread
pyspread/src/gui/_grid.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_grid.py#L1520-L1552
def OnColSize(self, event): """Column size event handler""" col = event.GetRowOrCol() tab = self.grid.current_table colsize = self.grid.GetColSize(col) / self.grid.grid_renderer.zoom # Detect for resizing group of cols cols = self.grid.GetSelectedCols() if len(cols) == 0: cols = [col, ] # Detect for selection of rows spanning all columns selection = self.grid.selection num_rows = self.grid.code_array.shape[0]-1 for box in zip(selection.block_tl, selection.block_br): top_row = box[0][0] bottom_row = box[1][0] if top_row == 0 and bottom_row == num_rows: cols += range(box[0][1], box[1][1]+1) # All column resizing is undone in one click with undo.group(_("Resize Columns")): for col in cols: self.grid.code_array.set_col_width(col, tab, colsize) zoomed_colsize = colsize * self.grid.grid_renderer.zoom self.grid.SetColSize(col, zoomed_colsize) # Mark content as changed post_command_event(self.grid.main_window, self.grid.ContentChangedMsg) event.Skip() self.grid.ForceRefresh()
[ "def", "OnColSize", "(", "self", ",", "event", ")", ":", "col", "=", "event", ".", "GetRowOrCol", "(", ")", "tab", "=", "self", ".", "grid", ".", "current_table", "colsize", "=", "self", ".", "grid", ".", "GetColSize", "(", "col", ")", "/", "self", ...
Column size event handler
[ "Column", "size", "event", "handler" ]
python
train
mugurbil/gnm
gnm/gnm.py
https://github.com/mugurbil/gnm/blob/4f9711fb9d78cc02820c25234bc3ab9615014f11/gnm/gnm.py#L533-L576
def _proposal_params(self, state): """ Proposal parameters Calculate parameters needed for the proposal. Inputs : state : x : the present sample, the place to linearize around f : f(x), function value at x J : f'(x), the jacobian of the function evaluated at x Outputs : state : mu : the mean vector L : the lower triangular cholesky factor of P log_p : log(p(x)) log of the posterior density """ x = state['x'] f = state['f'] J = state['J'] JJ = np.dot(J.T,J) if self._prior: m = self._m H = self._H Hm = self._Hm # LL' = P = H+J'J L = la.cholesky(H+JJ) # mu = (P^-1)(Hm-J'f+J'Jx) mu = la.solve(L.T,la.solve(L,Hm-np.dot(J.T,f)+np.dot(JJ,x))) else: # P = J'J L = la.cholesky(JJ) # mu = x-(P^-1)J'f mu = x-la.solve(L.T,la.solve(L,np.dot(J.T,f))) state['L'] = L state['mu'] = mu state['log_p'] = self._log_post(x,f) return state
[ "def", "_proposal_params", "(", "self", ",", "state", ")", ":", "x", "=", "state", "[", "'x'", "]", "f", "=", "state", "[", "'f'", "]", "J", "=", "state", "[", "'J'", "]", "JJ", "=", "np", ".", "dot", "(", "J", ".", "T", ",", "J", ")", "if"...
Proposal parameters Calculate parameters needed for the proposal. Inputs : state : x : the present sample, the place to linearize around f : f(x), function value at x J : f'(x), the jacobian of the function evaluated at x Outputs : state : mu : the mean vector L : the lower triangular cholesky factor of P log_p : log(p(x)) log of the posterior density
[ "Proposal", "parameters", "Calculate", "parameters", "needed", "for", "the", "proposal", ".", "Inputs", ":", "state", ":", "x", ":", "the", "present", "sample", "the", "place", "to", "linearize", "around", "f", ":", "f", "(", "x", ")", "function", "value",...
python
train
google/grr
grr/core/grr_response_core/lib/utils.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/utils.py#L973-L1016
def WriteFileFooter(self): """Writes the file footer (finished the file).""" if not self._stream: raise ArchiveAlreadyClosedError( "Attempting to write to a ZIP archive that was already closed.") if self.cur_cmpr: buf = self.cur_cmpr.flush() self.cur_compress_size += len(buf) self.cur_zinfo.compress_size = self.cur_compress_size self._stream.write(buf) else: self.cur_zinfo.compress_size = self.cur_file_size self.cur_zinfo.CRC = self.cur_crc self.cur_zinfo.file_size = self.cur_file_size # The zip footer has a 8 bytes limit for sizes so if we compress a # file larger than 4 GB, the code below will not work. The ZIP64 # convention is to write 0xffffffff for compressed and # uncompressed size in those cases. The actual size is written by # the library for us anyways so those fields are redundant. cur_file_size = min(0xffffffff, self.cur_file_size) cur_compress_size = min(0xffffffff, self.cur_compress_size) # Writing data descriptor ZIP64-way by default. We never know how large # the archive may become as we're generating it dynamically. # # crc-32 8 bytes (little endian) # compressed size 8 bytes (little endian) # uncompressed size 8 bytes (little endian) self._stream.write( struct.pack("<LLL", self.cur_crc, cur_compress_size, cur_file_size)) # Register the file in the zip file, so that central directory gets # written correctly. self._zip_fd.filelist.append(self.cur_zinfo) self._zip_fd.NameToInfo[self.cur_zinfo.filename] = self.cur_zinfo self._ResetState() return self._stream.GetValueAndReset()
[ "def", "WriteFileFooter", "(", "self", ")", ":", "if", "not", "self", ".", "_stream", ":", "raise", "ArchiveAlreadyClosedError", "(", "\"Attempting to write to a ZIP archive that was already closed.\"", ")", "if", "self", ".", "cur_cmpr", ":", "buf", "=", "self", "....
Writes the file footer (finished the file).
[ "Writes", "the", "file", "footer", "(", "finished", "the", "file", ")", "." ]
python
train
hannes-brt/hebel
hebel/pycuda_ops/cublas.py
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cublas.py#L2814-L2824
def cublasSsyr(handle, uplo, n, alpha, x, incx, A, lda): """ Rank-1 operation on real symmetric matrix. """ status = _libcublas.cublasSsyr_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_float(alpha)), int(x), incx, int(A), lda) cublasCheckStatus(status)
[ "def", "cublasSsyr", "(", "handle", ",", "uplo", ",", "n", ",", "alpha", ",", "x", ",", "incx", ",", "A", ",", "lda", ")", ":", "status", "=", "_libcublas", ".", "cublasSsyr_v2", "(", "handle", ",", "_CUBLAS_FILL_MODE", "[", "uplo", "]", ",", "n", ...
Rank-1 operation on real symmetric matrix.
[ "Rank", "-", "1", "operation", "on", "real", "symmetric", "matrix", "." ]
python
train
jessevdk/cldoc
cldoc/clang/cindex.py
https://github.com/jessevdk/cldoc/blob/fc7f59405c4a891b8367c80a700f5aa3c5c9230c/cldoc/clang/cindex.py#L2737-L2809
def from_source(cls, filename, args=None, unsaved_files=None, options=0, index=None): """Create a TranslationUnit by parsing source. This is capable of processing source code both from files on the filesystem as well as in-memory contents. Command-line arguments that would be passed to clang are specified as a list via args. These can be used to specify include paths, warnings, etc. e.g. ["-Wall", "-I/path/to/include"]. In-memory file content can be provided via unsaved_files. This is an iterable of 2-tuples. The first element is the str filename. The second element defines the content. Content can be provided as str source code or as file objects (anything with a read() method). If a file object is being used, content will be read until EOF and the read cursor will not be reset to its original position. options is a bitwise or of TranslationUnit.PARSE_XXX flags which will control parsing behavior. index is an Index instance to utilize. If not provided, a new Index will be created for this TranslationUnit. To parse source from the filesystem, the filename of the file to parse is specified by the filename argument. Or, filename could be None and the args list would contain the filename(s) to parse. To parse source from an in-memory buffer, set filename to the virtual filename you wish to associate with this source (e.g. "test.c"). The contents of that file are then provided in unsaved_files. If an error occurs, a TranslationUnitLoadError is raised. Please note that a TranslationUnit with parser errors may be returned. It is the caller's responsibility to check tu.diagnostics for errors. Also note that Clang infers the source language from the extension of the input filename. If you pass in source code containing a C++ class declaration with the filename "test.c" parsing will fail. """ if args is None: args = [] if unsaved_files is None: unsaved_files = [] if index is None: index = Index.create() args_array = None if len(args) > 0: args_array = (c_char_p * len(args))(*[b(x) for x in args]) unsaved_array = None if len(unsaved_files) > 0: unsaved_array = (_CXUnsavedFile * len(unsaved_files))() for i, (name, contents) in enumerate(unsaved_files): if hasattr(contents, "read"): contents = contents.read() unsaved_array[i].name = b(name) unsaved_array[i].contents = b(contents) unsaved_array[i].length = len(contents) ptr = conf.lib.clang_parseTranslationUnit(index, filename, args_array, len(args), unsaved_array, len(unsaved_files), options) if not ptr: raise TranslationUnitLoadError("Error parsing translation unit.") return cls(ptr, index=index)
[ "def", "from_source", "(", "cls", ",", "filename", ",", "args", "=", "None", ",", "unsaved_files", "=", "None", ",", "options", "=", "0", ",", "index", "=", "None", ")", ":", "if", "args", "is", "None", ":", "args", "=", "[", "]", "if", "unsaved_fi...
Create a TranslationUnit by parsing source. This is capable of processing source code both from files on the filesystem as well as in-memory contents. Command-line arguments that would be passed to clang are specified as a list via args. These can be used to specify include paths, warnings, etc. e.g. ["-Wall", "-I/path/to/include"]. In-memory file content can be provided via unsaved_files. This is an iterable of 2-tuples. The first element is the str filename. The second element defines the content. Content can be provided as str source code or as file objects (anything with a read() method). If a file object is being used, content will be read until EOF and the read cursor will not be reset to its original position. options is a bitwise or of TranslationUnit.PARSE_XXX flags which will control parsing behavior. index is an Index instance to utilize. If not provided, a new Index will be created for this TranslationUnit. To parse source from the filesystem, the filename of the file to parse is specified by the filename argument. Or, filename could be None and the args list would contain the filename(s) to parse. To parse source from an in-memory buffer, set filename to the virtual filename you wish to associate with this source (e.g. "test.c"). The contents of that file are then provided in unsaved_files. If an error occurs, a TranslationUnitLoadError is raised. Please note that a TranslationUnit with parser errors may be returned. It is the caller's responsibility to check tu.diagnostics for errors. Also note that Clang infers the source language from the extension of the input filename. If you pass in source code containing a C++ class declaration with the filename "test.c" parsing will fail.
[ "Create", "a", "TranslationUnit", "by", "parsing", "source", "." ]
python
train
ministryofjustice/django-moj-irat
moj_irat/healthchecks.py
https://github.com/ministryofjustice/django-moj-irat/blob/c1588426fffce783bef6d8b9d73395a5e9a833c9/moj_irat/healthchecks.py#L162-L169
def load_healthchecks(self): """ Loads healthchecks. """ self.load_default_healthchecks() if getattr(settings, 'AUTODISCOVER_HEALTHCHECKS', True): self.autodiscover_healthchecks() self._registry_loaded = True
[ "def", "load_healthchecks", "(", "self", ")", ":", "self", ".", "load_default_healthchecks", "(", ")", "if", "getattr", "(", "settings", ",", "'AUTODISCOVER_HEALTHCHECKS'", ",", "True", ")", ":", "self", ".", "autodiscover_healthchecks", "(", ")", "self", ".", ...
Loads healthchecks.
[ "Loads", "healthchecks", "." ]
python
train
sirfoga/pyhal
hal/files/models/system.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/models/system.py#L67-L82
def fix_raw_path(path): """Prettify name of path :param path: path to fix :return: Good name for path """ double_path_separator = PATH_SEPARATOR + PATH_SEPARATOR while path.find( double_path_separator) >= 0: # there are double separators path = path.replace(double_path_separator, PATH_SEPARATOR) # remove double path separator if is_folder(path) and not path.endswith("/"): path = path + "/" return path
[ "def", "fix_raw_path", "(", "path", ")", ":", "double_path_separator", "=", "PATH_SEPARATOR", "+", "PATH_SEPARATOR", "while", "path", ".", "find", "(", "double_path_separator", ")", ">=", "0", ":", "# there are double separators", "path", "=", "path", ".", "replac...
Prettify name of path :param path: path to fix :return: Good name for path
[ "Prettify", "name", "of", "path" ]
python
train
hazelcast/hazelcast-python-client
hazelcast/proxy/queue.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/queue.py#L164-L177
def offer(self, item, timeout=0): """ Inserts the specified element into this queue if it is possible to do so immediately without violating capacity restrictions. Returns ``true`` upon success. If there is no space currently available: * If a timeout is provided, it waits until this timeout elapses and returns the result. * If a timeout is not provided, returns ``false`` immediately. :param item: (object), the item to be added. :param timeout: (long), maximum time in seconds to wait for addition (optional). :return: (bool), ``true`` if the element was added to this queue, ``false`` otherwise. """ check_not_none(item, "Value can't be None") element_data = self._to_data(item) return self._encode_invoke(queue_offer_codec, value=element_data, timeout_millis=to_millis(timeout))
[ "def", "offer", "(", "self", ",", "item", ",", "timeout", "=", "0", ")", ":", "check_not_none", "(", "item", ",", "\"Value can't be None\"", ")", "element_data", "=", "self", ".", "_to_data", "(", "item", ")", "return", "self", ".", "_encode_invoke", "(", ...
Inserts the specified element into this queue if it is possible to do so immediately without violating capacity restrictions. Returns ``true`` upon success. If there is no space currently available: * If a timeout is provided, it waits until this timeout elapses and returns the result. * If a timeout is not provided, returns ``false`` immediately. :param item: (object), the item to be added. :param timeout: (long), maximum time in seconds to wait for addition (optional). :return: (bool), ``true`` if the element was added to this queue, ``false`` otherwise.
[ "Inserts", "the", "specified", "element", "into", "this", "queue", "if", "it", "is", "possible", "to", "do", "so", "immediately", "without", "violating", "capacity", "restrictions", ".", "Returns", "true", "upon", "success", ".", "If", "there", "is", "no", "...
python
train
neighbordog/deviantart
deviantart/api.py
https://github.com/neighbordog/deviantart/blob/5612f1d5e2139a48c9d793d7fd19cde7e162d7b1/deviantart/api.py#L1624-L1640
def move_notes(self, noteids, folderid): """Move notes to a folder :param noteids: The noteids to move :param folderid: The folderid to move notes to """ if self.standard_grant_type is not "authorization_code": raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.") response = self._req('/notes/move', post_data={ 'noteids[]' : noteids, 'folderid' : folderid }) return response
[ "def", "move_notes", "(", "self", ",", "noteids", ",", "folderid", ")", ":", "if", "self", ".", "standard_grant_type", "is", "not", "\"authorization_code\"", ":", "raise", "DeviantartError", "(", "\"Authentication through Authorization Code (Grant Type) is required in order...
Move notes to a folder :param noteids: The noteids to move :param folderid: The folderid to move notes to
[ "Move", "notes", "to", "a", "folder" ]
python
train
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L594-L609
def create_group(self, name): """ Create group :param name: Group name """ parameters = { 'name': name } url = self.TEAM_GROUPS_URL connection = Connection(self.token) connection.set_url(self.production, url) connection.add_params(parameters) return connection.post_request()
[ "def", "create_group", "(", "self", ",", "name", ")", ":", "parameters", "=", "{", "'name'", ":", "name", "}", "url", "=", "self", ".", "TEAM_GROUPS_URL", "connection", "=", "Connection", "(", "self", ".", "token", ")", "connection", ".", "set_url", "(",...
Create group :param name: Group name
[ "Create", "group", ":", "param", "name", ":", "Group", "name" ]
python
train
astropy/regions
ah_bootstrap.py
https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/ah_bootstrap.py#L486-L519
def _directory_import(self): """ Import astropy_helpers from the given path, which will be added to sys.path. Must return True if the import succeeded, and False otherwise. """ # Return True on success, False on failure but download is allowed, and # otherwise raise SystemExit path = os.path.abspath(self.path) # Use an empty WorkingSet rather than the man # pkg_resources.working_set, since on older versions of setuptools this # will invoke a VersionConflict when trying to install an upgrade ws = pkg_resources.WorkingSet([]) ws.add_entry(path) dist = ws.by_key.get(DIST_NAME) if dist is None: # We didn't find an egg-info/dist-info in the given path, but if a # setup.py exists we can generate it setup_py = os.path.join(path, 'setup.py') if os.path.isfile(setup_py): # We use subprocess instead of run_setup from setuptools to # avoid segmentation faults - see the following for more details: # https://github.com/cython/cython/issues/2104 sp.check_output([sys.executable, 'setup.py', 'egg_info'], cwd=path) for dist in pkg_resources.find_distributions(path, True): # There should be only one... return dist return dist
[ "def", "_directory_import", "(", "self", ")", ":", "# Return True on success, False on failure but download is allowed, and", "# otherwise raise SystemExit", "path", "=", "os", ".", "path", ".", "abspath", "(", "self", ".", "path", ")", "# Use an empty WorkingSet rather than ...
Import astropy_helpers from the given path, which will be added to sys.path. Must return True if the import succeeded, and False otherwise.
[ "Import", "astropy_helpers", "from", "the", "given", "path", "which", "will", "be", "added", "to", "sys", ".", "path", "." ]
python
train
saltstack/salt
salt/modules/cyg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cyg.py#L242-L270
def uninstall(packages, cyg_arch='x86_64', mirrors=None): ''' Uninstall one or several packages. packages The packages to uninstall. cyg_arch : x86_64 Specify the architecture to remove the package from Current options are x86 and x86_64 CLI Example: .. code-block:: bash salt '*' cyg.uninstall dos2unix salt '*' cyg.uninstall dos2unix mirrors="[{'http://mirror': 'http://url/to/public/key}]" ''' args = [] if packages is not None: args.append('--remove-packages {pkgs}'.format(pkgs=packages)) LOG.debug('args: %s', args) if not _check_cygwin_installed(cyg_arch): LOG.debug('We\'re convinced cygwin isn\'t installed') return True return _run_silent_cygwin(cyg_arch=cyg_arch, args=args, mirrors=mirrors)
[ "def", "uninstall", "(", "packages", ",", "cyg_arch", "=", "'x86_64'", ",", "mirrors", "=", "None", ")", ":", "args", "=", "[", "]", "if", "packages", "is", "not", "None", ":", "args", ".", "append", "(", "'--remove-packages {pkgs}'", ".", "format", "(",...
Uninstall one or several packages. packages The packages to uninstall. cyg_arch : x86_64 Specify the architecture to remove the package from Current options are x86 and x86_64 CLI Example: .. code-block:: bash salt '*' cyg.uninstall dos2unix salt '*' cyg.uninstall dos2unix mirrors="[{'http://mirror': 'http://url/to/public/key}]"
[ "Uninstall", "one", "or", "several", "packages", "." ]
python
train
koszullab/metaTOR
metator/scripts/hicstuff.py
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1005-L1031
def positions_to_contigs(positions): """Flattens and converts a positions array to a contigs array, if applicable. """ if isinstance(positions, np.ndarray): flattened_positions = positions.flatten() else: try: flattened_positions = np.array( [pos for contig in positions for pos in contig]) except TypeError: flattened_positions = np.array(positions) if (np.diff(positions) == 0).any() and not (0 in set(positions)): warnings.warn("I detected identical consecutive nonzero values.") return positions n = len(flattened_positions) contigs = np.ones(n) counter = 0 for i in range(1, n): if positions[i] == 0: counter += 1 contigs[i] += counter else: contigs[i] = contigs[i - 1] return contigs
[ "def", "positions_to_contigs", "(", "positions", ")", ":", "if", "isinstance", "(", "positions", ",", "np", ".", "ndarray", ")", ":", "flattened_positions", "=", "positions", ".", "flatten", "(", ")", "else", ":", "try", ":", "flattened_positions", "=", "np"...
Flattens and converts a positions array to a contigs array, if applicable.
[ "Flattens", "and", "converts", "a", "positions", "array", "to", "a", "contigs", "array", "if", "applicable", "." ]
python
train
chovanecm/sacredboard
sacredboard/app/data/filestorage/rundao.py
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/data/filestorage/rundao.py#L20-L49
def get_runs(self, sort_by=None, sort_direction=None, start=0, limit=None, query={"type": "and", "filters": []}): """ Return all runs in the file store. If a run is corrupt, e.g. missing files, it is skipped. :param sort_by: NotImplemented :param sort_direction: NotImplemented :param start: NotImplemented :param limit: NotImplemented :param query: NotImplemented :return: FileStoreCursor """ all_run_ids = os.listdir(self.directory) def run_iterator(): blacklist = set(["_sources"]) for id in all_run_ids: if id in blacklist: continue try: yield self.get(id) except FileNotFoundError: # An incomplete experiment is a corrupt experiment. # Skip it for now. # TODO pass count = len(all_run_ids) return FileStoreCursor(count, run_iterator())
[ "def", "get_runs", "(", "self", ",", "sort_by", "=", "None", ",", "sort_direction", "=", "None", ",", "start", "=", "0", ",", "limit", "=", "None", ",", "query", "=", "{", "\"type\"", ":", "\"and\"", ",", "\"filters\"", ":", "[", "]", "}", ")", ":"...
Return all runs in the file store. If a run is corrupt, e.g. missing files, it is skipped. :param sort_by: NotImplemented :param sort_direction: NotImplemented :param start: NotImplemented :param limit: NotImplemented :param query: NotImplemented :return: FileStoreCursor
[ "Return", "all", "runs", "in", "the", "file", "store", "." ]
python
train
nerdvegas/rez
src/rez/wrapper.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/wrapper.py#L66-L81
def run(self, *args): """Invoke the wrapped script. Returns: Return code of the command, or 0 if the command is not run. """ if self.prefix_char is None: prefix_char = config.suite_alias_prefix_char else: prefix_char = self.prefix_char if prefix_char == '': # empty prefix char means we don't support the '+' args return self._run_no_args(args) else: return self._run(prefix_char, args)
[ "def", "run", "(", "self", ",", "*", "args", ")", ":", "if", "self", ".", "prefix_char", "is", "None", ":", "prefix_char", "=", "config", ".", "suite_alias_prefix_char", "else", ":", "prefix_char", "=", "self", ".", "prefix_char", "if", "prefix_char", "=="...
Invoke the wrapped script. Returns: Return code of the command, or 0 if the command is not run.
[ "Invoke", "the", "wrapped", "script", "." ]
python
train
clalancette/pycdlib
pycdlib/pycdlib.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L3153-L3185
def _finish_remove(self, num_bytes_to_remove, is_partition): # type: (int, bool) -> None ''' An internal method to do all of the accounting needed whenever something is removed from the ISO. This method should only be called by public API implementations. Parameters: num_bytes_to_remove - The number of additional bytes to remove from the descriptors. is_partition - Whether these bytes are part of a UDF partition. Returns: Nothing. ''' for pvd in self.pvds: pvd.remove_from_space_size(num_bytes_to_remove) if self.joliet_vd is not None: self.joliet_vd.remove_from_space_size(num_bytes_to_remove) if self.enhanced_vd is not None: self.enhanced_vd.copy_sizes(self.pvd) if self.udf_root is not None and is_partition: num_extents_to_remove = utils.ceiling_div(num_bytes_to_remove, self.pvd.logical_block_size()) self.udf_main_descs.partition.part_length -= num_extents_to_remove self.udf_reserve_descs.partition.part_length -= num_extents_to_remove self.udf_logical_volume_integrity.size_table -= num_extents_to_remove if self._always_consistent: self._reshuffle_extents() else: self._needs_reshuffle = True
[ "def", "_finish_remove", "(", "self", ",", "num_bytes_to_remove", ",", "is_partition", ")", ":", "# type: (int, bool) -> None", "for", "pvd", "in", "self", ".", "pvds", ":", "pvd", ".", "remove_from_space_size", "(", "num_bytes_to_remove", ")", "if", "self", ".", ...
An internal method to do all of the accounting needed whenever something is removed from the ISO. This method should only be called by public API implementations. Parameters: num_bytes_to_remove - The number of additional bytes to remove from the descriptors. is_partition - Whether these bytes are part of a UDF partition. Returns: Nothing.
[ "An", "internal", "method", "to", "do", "all", "of", "the", "accounting", "needed", "whenever", "something", "is", "removed", "from", "the", "ISO", ".", "This", "method", "should", "only", "be", "called", "by", "public", "API", "implementations", "." ]
python
train
Ezhil-Language-Foundation/open-tamil
tamil/tweetparser.py
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/tweetparser.py#L71-L76
def isTamilPredicate(word): """ is Tamil word : boolean True/False""" for c in word: if unicodedata.name(c).split()[0] != u'TAMIL' : return False return True
[ "def", "isTamilPredicate", "(", "word", ")", ":", "for", "c", "in", "word", ":", "if", "unicodedata", ".", "name", "(", "c", ")", ".", "split", "(", ")", "[", "0", "]", "!=", "u'TAMIL'", ":", "return", "False", "return", "True" ]
is Tamil word : boolean True/False
[ "is", "Tamil", "word", ":", "boolean", "True", "/", "False" ]
python
train
saltstack/salt
salt/utils/event.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L356-L370
def unsubscribe(self, tag, match_type=None): ''' Un-subscribe to events matching the passed tag. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.remove([tag, match_func]) old_events = self.pending_events self.pending_events = [] for evt in old_events: if any(pmatch_func(evt['tag'], ptag) for ptag, pmatch_func in self.pending_tags): self.pending_events.append(evt)
[ "def", "unsubscribe", "(", "self", ",", "tag", ",", "match_type", "=", "None", ")", ":", "if", "tag", "is", "None", ":", "return", "match_func", "=", "self", ".", "_get_match_func", "(", "match_type", ")", "self", ".", "pending_tags", ".", "remove", "(",...
Un-subscribe to events matching the passed tag.
[ "Un", "-", "subscribe", "to", "events", "matching", "the", "passed", "tag", "." ]
python
train
leancloud/python-sdk
leancloud/relation.py
https://github.com/leancloud/python-sdk/blob/fea3240257ce65e6a32c7312a5cee1f94a51a587/leancloud/relation.py#L21-L32
def reverse_query(cls, parent_class, relation_key, child): """ 创建一个新的 Query 对象,反向查询所有指向此 Relation 的父对象。 :param parent_class: 父类名称 :param relation_key: 父类中 Relation 的字段名 :param child: 子类对象 :return: leancloud.Query """ q = leancloud.Query(parent_class) q.equal_to(relation_key, child._to_pointer()) return q
[ "def", "reverse_query", "(", "cls", ",", "parent_class", ",", "relation_key", ",", "child", ")", ":", "q", "=", "leancloud", ".", "Query", "(", "parent_class", ")", "q", ".", "equal_to", "(", "relation_key", ",", "child", ".", "_to_pointer", "(", ")", ")...
创建一个新的 Query 对象,反向查询所有指向此 Relation 的父对象。 :param parent_class: 父类名称 :param relation_key: 父类中 Relation 的字段名 :param child: 子类对象 :return: leancloud.Query
[ "创建一个新的", "Query", "对象,反向查询所有指向此", "Relation", "的父对象。" ]
python
train
NeuroML/NeuroMLlite
neuromllite/SonataReader.py
https://github.com/NeuroML/NeuroMLlite/blob/f3fa2ff662e40febfa97c045e7f0e6915ad04161/neuromllite/SonataReader.py#L136-L606
def parse(self, handler): """ Main method to parse the Sonata files and call the appropriate methods in the handler """ ######################################################################## # load the main configuration scripts main_config_filename = os.path.abspath(self.parameters['filename']) config = load_json(main_config_filename) self.init_substitutes = {'.':'%s/'%os.path.dirname(main_config_filename), '../':'%s/'%os.path.dirname(os.path.dirname(main_config_filename))} self.substitutes = {'${configdir}':'%s'%os.path.dirname(main_config_filename)} if 'network' in config: self.network_config = load_json(self.subs(config['network'])) else: self.network_config = config if 'simulation' in config: self.simulation_config = load_json(self.subs(config['simulation'])) else: self.simulation_config = None for m in self.network_config['manifest']: path = self.subs(self.network_config['manifest'][m]) self.substitutes[m] = path if 'id' in self.parameters: id = self.parameters['id'] else: id = 'SonataNetwork' if id[0].isdigit(): # id like 9_cells is not a valid id for NeuroML id='NML2_%s'%id ######################################################################## # Feed the handler the info on the network self.handler = handler notes = "Network read in from Sonata: %s"%main_config_filename handler.handle_document_start(id, notes) handler.handle_network(id, notes) self.node_types = {} ######################################################################## # Get info from nodes files for n in self.network_config['networks']['nodes']: nodes_file = self.subs(n['nodes_file']) node_types_file = self.subs(n['node_types_file']) print_v("\nLoading nodes from %s and %s"%(nodes_file,node_types_file)) h5file=tables.open_file(nodes_file,mode='r') print_v("Opened HDF5 file: %s"%(h5file.filename)) self.parse_group(h5file.root.nodes) h5file.close() self.node_types[self.current_sonata_pop] = load_csv_props(node_types_file) self.current_sonata_pop = None ######################################################################## # Get info from edges files self.edges_info = {} self.conn_info = {} if 'edges' in self.network_config['networks']: for e in self.network_config['networks']['edges']: edges_file = self.subs(e['edges_file']) edge_types_file = self.subs(e['edge_types_file']) print_v("\nLoading edges from %s and %s"%(edges_file,edge_types_file)) h5file=tables.open_file(edges_file,mode='r') print_v("Opened HDF5 file: %s"%(h5file.filename)) self.parse_group(h5file.root.edges) h5file.close() self.edges_info[self.current_edge] = load_csv_props(edge_types_file) self.current_edge = None ######################################################################## # Use extracted node/cell info to create populations for sonata_pop in self.cell_info: types_vs_pops = {} for type in self.cell_info[sonata_pop]['type_count']: node_type_info = self.node_types[sonata_pop][type] model_name_type = node_type_info['model_name'] if 'model_name' in node_type_info \ else (node_type_info['pop_name'] if 'pop_name' in node_type_info else node_type_info['model_type']) model_type = node_type_info['model_type'] model_template = node_type_info['model_template'] if 'model_template' in node_type_info else '- None -' nml_pop_id = '%s_%s_%s'%(sonata_pop,model_name_type,type) print_v(" - Adding population: %s which has model info: %s"%(nml_pop_id, node_type_info)) size = self.cell_info[sonata_pop]['type_count'][type] if model_type=='point_process' and model_template=='nrn:IntFire1': raise Exception('Point process model not currently supported: %s\nTry expressing the I&F cell in NEST format with nest:iaf_psc_alpha'%model_template) pop_comp = 'cell_%s'%nml_pop_id #model_template.replace(':','_') self.pop_comp_info[pop_comp] = {} self.pop_comp_info[pop_comp]['model_type'] = model_type dynamics_params_file = self.subs(self.network_config['components']['point_neuron_models_dir']) +'/'+node_type_info['dynamics_params'] self.pop_comp_info[pop_comp]['dynamics_params'] = load_json(dynamics_params_file) elif model_type=='point_process' and model_template=='nest:iaf_psc_alpha': pop_comp = 'cell_%s'%nml_pop_id # = model_template.replace(':','_') self.pop_comp_info[pop_comp] = {} self.pop_comp_info[pop_comp]['model_type'] = model_type self.pop_comp_info[pop_comp]['model_template'] = model_template dynamics_params_file = self.subs(self.network_config['components']['point_neuron_models_dir']) +'/'+node_type_info['dynamics_params'] self.pop_comp_info[pop_comp]['dynamics_params'] = load_json(dynamics_params_file) else: pop_comp = DUMMY_CELL self.pop_comp_info[pop_comp] = {} self.pop_comp_info[pop_comp]['model_type'] = pop_comp self.nml_pop_vs_comps[nml_pop_id] = pop_comp properties = {} properties['type_id']=type properties['sonata_population']=sonata_pop properties['region']=sonata_pop for i in node_type_info: properties[i]=node_type_info[i] if i=='ei': properties['type']=node_type_info[i].upper() color = '%s %s %s'%(self.myrandom.random(),self.myrandom.random(),self.myrandom.random()) try: import opencortex.utils.color as occ interneuron = 'SOM' in nml_pop_id or 'PV' in nml_pop_id if 'L23' in nml_pop_id: color = occ.L23_INTERNEURON if interneuron else occ.L23_PRINCIPAL_CELL pop.properties.append(neuroml.Property('region','L23')) if 'L4' in nml_pop_id: color = occ.L4_INTERNEURON if interneuron else occ.L4_PRINCIPAL_CELL pop.properties.append(neuroml.Property('region','L4')) if 'L5' in nml_pop_id: color = occ.L5_INTERNEURON if interneuron else occ.L5_PRINCIPAL_CELL pop.properties.append(neuroml.Property('region','L5')) if 'L6' in nml_pop_id: color = occ.L6_INTERNEURON if interneuron else occ.L6_PRINCIPAL_CELL pop.properties.append(neuroml.Property('region','L6')) except: pass # Don't specify a particular color, use random, not a problem... properties['color']=color if True or not 'locations' in self.cell_info[sonata_pop]['0']: properties={} ############# temp for LEMS... if model_type != 'virtual': self.handler.handle_population(nml_pop_id, pop_comp, size, component_obj=None, properties=properties) types_vs_pops[type] = nml_pop_id self.cell_info[sonata_pop]['pop_count'] = {} self.cell_info[sonata_pop]['pop_map'] = {} for i in self.cell_info[sonata_pop]['types']: pop = types_vs_pops[self.cell_info[sonata_pop]['types'][i]] if not pop in self.cell_info[sonata_pop]['pop_count']: self.cell_info[sonata_pop]['pop_count'][pop] = 0 index = self.cell_info[sonata_pop]['pop_count'][pop] self.cell_info[sonata_pop]['pop_map'][i] = (pop, index) if not pop in self.nml_ids_vs_gids: self.nml_ids_vs_gids[pop] = {} self.nml_ids_vs_gids[pop][index] = (sonata_pop, i) if i in self.cell_info[sonata_pop]['0']['locations']: if not pop in self.nml_pops_having_locations: self.nml_pops_having_locations.append(pop) pos = self.cell_info[sonata_pop]['0']['locations'][i] #print('Adding pos %i: %s'%(i,pos)) self.handler.handle_location(index, pop, pop_comp, pos['x'] if 'x' in pos and pos['x'] is not None else 0, pos['y'] if 'y' in pos and pos['y'] is not None else 0, pos['z'] if 'z' in pos and pos['z'] is not None else 0) self.cell_info[sonata_pop]['pop_count'][pop]+=1 ######################################################################## # Load simulation info into self.simulation_config if self.simulation_config: if self.simulation_config: for m in self.simulation_config['manifest']: path = self.subs(self.simulation_config['manifest'][m]) self.substitutes[m] = path for s1 in ['output']: for k in self.simulation_config[s1]: self.simulation_config[s1][k] = self.subs(self.simulation_config[s1][k]) for s1 in ['inputs']: for s2 in self.simulation_config[s1]: for k in self.simulation_config[s1][s2]: self.simulation_config[s1][s2][k] = self.subs(self.simulation_config[s1][s2][k]) if 'node_sets_file' in self.simulation_config: node_sets = load_json(self.subs(self.simulation_config['node_sets_file'])) self.simulation_config['node_sets'] = node_sets if not 'node_sets' in self.simulation_config: self.simulation_config['node_sets'] = {} for sonata_pop in self.cell_info: self.node_set_mappings[sonata_pop] = {} for sindex in self.cell_info[sonata_pop]['pop_map']: nml_pop = self.cell_info[sonata_pop]['pop_map'][sindex][0] nml_index = self.cell_info[sonata_pop]['pop_map'][sindex][1] # Add all in this sonata_pop to a 'node_set' named after the sonata_pop if not nml_pop in self.node_set_mappings[sonata_pop]: self.node_set_mappings[sonata_pop][nml_pop] = [] self.node_set_mappings[sonata_pop][nml_pop].append(nml_index) #pp.pprint(self.simulation_config) #pp.pprint(self.pop_comp_info) for node_set in self.simulation_config['node_sets']: self.node_set_mappings[node_set] = {} node_set_props = self.simulation_config['node_sets'][node_set] #print_v('===========Checking which cells in pops match node_set: %s = %s'%(node_set,node_set_props)) for sonata_pop in self.cell_info: for sindex in self.cell_info[sonata_pop]['pop_map']: #print('Does %s %s match %s?'%(sonata_pop, sindex, node_set_props)) type = self.cell_info[sonata_pop]['types'][sindex] type_info = self.node_types[sonata_pop][type] nml_pop = self.cell_info[sonata_pop]['pop_map'][sindex][0] nml_index = self.cell_info[sonata_pop]['pop_map'][sindex][1] if 'population' in node_set_props and node_set_props['population'] == sonata_pop: if 'node_id' in node_set_props and sindex in node_set_props['node_id']: if not nml_pop in self.node_set_mappings[node_set]: self.node_set_mappings[node_set][nml_pop] = [] self.node_set_mappings[node_set][nml_pop].append(nml_index) matches = _matches_node_set_props(type_info, node_set_props) #print_v('Node %i in %s (NML: %s[%i]) has type %s (%s); matches: %s'%(sindex, sonata_pop, nml_pop, nml_index, type, type_info, matches)) if matches: if not nml_pop in self.node_set_mappings[node_set]: self.node_set_mappings[node_set][nml_pop] = [] self.node_set_mappings[node_set][nml_pop].append(nml_index) ##pp.pprint(self.node_set_mappings) ######################################################################## # Extract info from inputs in simulation_config #pp.pprint(self.simulation_config) for input in self.simulation_config['inputs']: info = self.simulation_config['inputs'][input] #print_v(" - Adding input: %s which has info: %s"%(input, info)) self.input_comp_info[input] = {} self.input_comp_info[input][info['input_type']] = {} node_set = info['node_set'] if info['input_type'] == 'current_clamp': comp = 'PG_%s'%input self.input_comp_info[input][info['input_type']][comp] = {'amp':info['amp'],'delay':info['delay'],'duration':info['duration']} for nml_pop_id in self.node_set_mappings[node_set]: input_list_id = 'il_%s_%s'%(input,nml_pop_id) indices = self.node_set_mappings[node_set][nml_pop_id] self.handler.handle_input_list(input_list_id, nml_pop_id, comp, len(indices)) count = 0 for index in indices: self.handler.handle_single_input(input_list_id, count, cellId = index, segId = 0, fract = 0.5) count+=1 elif info['input_type'] == 'spikes': node_info = self.cell_info[node_set] from pyneuroml.plot.PlotSpikes import read_sonata_spikes_hdf5_file ids_times = read_sonata_spikes_hdf5_file(self.subs(info['input_file'])) for id in ids_times: times = ids_times[id] if id in node_info['pop_map']: nml_pop_id, cell_id = node_info['pop_map'][id] print_v("Cell %i in Sonata node set %s (cell %s in nml pop %s) has %i spikes"%(id, node_set, nml_pop_id, cell_id, len(times))) component = '%s__%i'%(nml_pop_id,cell_id) self.input_comp_info[input][info['input_type']][component] ={'id': cell_id, 'times': times} ''' input_list_id = 'il_%s_%i'%(input,cell_id) self.handler.handle_input_list(input_list_id, nml_pop_id, component, 1) self.handler.handle_single_input(input_list_id, 0, cellId = cell_id, segId = 0, fract = 0.5) ''' else: print_v("Cell %i in Sonata node set %s NOT FOUND!"%(id, node_set)) else: raise Exception("Sonata input type not yet supported: %s"%(info['input_type'])) ######################################################################## # Use extracted edge info to create connections projections_created = [] for conn in self.conn_info: pre_node = self.conn_info[conn]['pre_node'] post_node = self.conn_info[conn]['post_node'] for i in range(len(self.conn_info[conn]['pre_id'])): pre_id = self.conn_info[conn]['pre_id'][i] post_id = self.conn_info[conn]['post_id'][i] nsyns = self.conn_info[conn]['nsyns'][i] if 'nsyns' in self.conn_info[conn] else 1 type = self.conn_info[conn]['edge_type_id'][i] #print_v(' Conn with %i syns, type %s: %s(%s) -> %s(%s)'%(nsyns,type,pre_node,pre_id,post_node,post_id)) pre_pop,pre_i = self.cell_info[pre_node]['pop_map'][pre_id] post_pop,post_i = self.cell_info[post_node]['pop_map'][post_id] #print_v(' Mapped: Conn %s(%s) -> %s(%s)'%(pre_pop,pre_i,post_pop,post_i)) # print self.edges_info[conn][type] #print self.cell_info[pre_node] #print 11 #print self.node_types[pre_node] #print 22 cell_type_pre = self.cell_info[pre_node]['types'][pre_id] #print cell_type_pre #print 444 pop_type_pre = self.node_types[pre_node][cell_type_pre]['model_type'] #print pop_type_pre #print 333 synapse = self.edges_info[conn][type]['dynamics_params'].split('.')[0] self.syn_comp_info[synapse] = {} #print self.edges_info[conn][type] #pp.pprint(self.init_substitutes) #pp.pprint(self.substitutes) dynamics_params_file = self.subs(self.network_config['components']['synaptic_models_dir']) +'/'+self.edges_info[conn][type]['dynamics_params'] #print_v('Adding syn %s (at %s)'%(self.edges_info[conn][type]['dynamics_params'], dynamics_params_file)) #TODO: don't load this file every connection!!! self.syn_comp_info[synapse]['dynamics_params'] = load_json(dynamics_params_file) proj_id = '%s_%s_%s'%(pre_pop,post_pop,synapse) sign = self.syn_comp_info[synapse]['dynamics_params']['sign'] if 'sign' in self.syn_comp_info[synapse]['dynamics_params'] else 1 weight = self.edges_info[conn][type]['syn_weight'] if 'syn_weight' in self.edges_info[conn][type] else 1.0 syn_weight_edge_group_0 = self.conn_info[conn]['syn_weight_edge_group_0'][i] if 'syn_weight_edge_group_0' in self.conn_info[conn] else None # Assume this overrides value from csv file... if syn_weight_edge_group_0: weight = syn_weight_edge_group_0 #print_v('Adding syn %s (at %s), weight: %s, sign: %s, nsyns: %s'%(self.edges_info[conn][type]['dynamics_params'], dynamics_params_file, weight, sign, nsyns)) weight_scale = 0.001 if 'level_of_detail' in self.syn_comp_info[synapse]['dynamics_params']: weight_scale = 1 weight=weight_scale * sign * weight * nsyns delay = self.edges_info[conn][type]['delay'] if 'delay' in self.edges_info[conn][type] else 0 if not pop_type_pre == 'virtual': if not proj_id in projections_created: self.handler.handle_projection(proj_id, pre_pop, post_pop, synapse) projections_created.append(proj_id) self.handler.handle_connection(proj_id, i, pre_pop, post_pop, synapse, \ pre_i, \ post_i, \ weight=weight, \ delay=delay) else: component = '%s__%i'%(pre_pop,pre_i) #print_v(' --- Connecting %s to %s[%s]'%(component, post_pop, post_i)) #self.input_comp_info[input][info['input_type']][component] ={'id': cell_id, 'times': times} input_list_id = 'il_%s_%s_%i_%i'%(component,post_pop,post_i,i) self.handler.handle_input_list(input_list_id, post_pop, component, 1) self.handler.handle_single_input(input_list_id, 0, cellId = post_i, segId = 0, fract = 0.5, weight=weight) """ print('~~~~~~~~~~~~~~~') print('node_types:') pp.pprint(self.node_types) print('~~~~~~~~~~~~~~~') print('cell_info:') pp.pprint(self.cell_info) print('================')"""
[ "def", "parse", "(", "self", ",", "handler", ")", ":", "########################################################################", "# load the main configuration scripts ", "main_config_filename", "=", "os", ".", "path", ".", "abspath", "(", "self", ".", "parameters", "...
Main method to parse the Sonata files and call the appropriate methods in the handler
[ "Main", "method", "to", "parse", "the", "Sonata", "files", "and", "call", "the", "appropriate", "methods", "in", "the", "handler" ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_ti/mappings/task.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti/mappings/task.py#L114-L126
def reminder_date(self, reminder_date): """ Sets the task reminder_date Args: reminder_date: Converted to %Y-%m-%dT%H:%M:%SZ date format """ if not self.can_update(): self._tcex.handle_error(910, [self.type]) reminder_date = self._utils.format_datetime(reminder_date, date_format='%Y-%m-%dT%H:%M:%SZ') self._data['reminderDate'] = reminder_date request = {'reminderDate': reminder_date} return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)
[ "def", "reminder_date", "(", "self", ",", "reminder_date", ")", ":", "if", "not", "self", ".", "can_update", "(", ")", ":", "self", ".", "_tcex", ".", "handle_error", "(", "910", ",", "[", "self", ".", "type", "]", ")", "reminder_date", "=", "self", ...
Sets the task reminder_date Args: reminder_date: Converted to %Y-%m-%dT%H:%M:%SZ date format
[ "Sets", "the", "task", "reminder_date", "Args", ":", "reminder_date", ":", "Converted", "to", "%Y", "-", "%m", "-", "%dT%H", ":", "%M", ":", "%SZ", "date", "format" ]
python
train
blockstack/blockstack-core
blockstack/lib/nameset/namedb.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/namedb.py#L1443-L1459
def is_name_owner( self, name, sender_script_pubkey ): """ Given the fully-qualified name and a sender's script pubkey, determine if the sender owns the name. The name must exist and not be revoked or expired at the current block. """ if not self.is_name_registered( name ): # no one owns it return False owner = self.get_name_owner( name ) if owner != sender_script_pubkey: return False else: return True
[ "def", "is_name_owner", "(", "self", ",", "name", ",", "sender_script_pubkey", ")", ":", "if", "not", "self", ".", "is_name_registered", "(", "name", ")", ":", "# no one owns it ", "return", "False", "owner", "=", "self", ".", "get_name_owner", "(", "name", ...
Given the fully-qualified name and a sender's script pubkey, determine if the sender owns the name. The name must exist and not be revoked or expired at the current block.
[ "Given", "the", "fully", "-", "qualified", "name", "and", "a", "sender", "s", "script", "pubkey", "determine", "if", "the", "sender", "owns", "the", "name", "." ]
python
train
kivy/python-for-android
pythonforandroid/toolchain.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/toolchain.py#L1075-L1087
def distributions(self, _args): """Lists all distributions currently available (i.e. that have already been built).""" ctx = self.ctx dists = Distribution.get_distributions(ctx) if dists: print('{Style.BRIGHT}Distributions currently installed are:' '{Style.RESET_ALL}'.format(Style=Out_Style, Fore=Out_Fore)) pretty_log_dists(dists, print) else: print('{Style.BRIGHT}There are no dists currently built.' '{Style.RESET_ALL}'.format(Style=Out_Style))
[ "def", "distributions", "(", "self", ",", "_args", ")", ":", "ctx", "=", "self", ".", "ctx", "dists", "=", "Distribution", ".", "get_distributions", "(", "ctx", ")", "if", "dists", ":", "print", "(", "'{Style.BRIGHT}Distributions currently installed are:'", "'{S...
Lists all distributions currently available (i.e. that have already been built).
[ "Lists", "all", "distributions", "currently", "available", "(", "i", ".", "e", ".", "that", "have", "already", "been", "built", ")", "." ]
python
train
NoviceLive/intellicoder
intellicoder/synthesizers.py
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/synthesizers.py#L231-L243
def c_module_relocs(self): """Build relocation for the module variable.""" if self.opts.no_structs or self.opts.windll: return '', '' x86 = reloc_var( self.name, self._c_struct_names()[1], self.opts.reloc_delta, self._c_uses_pointer() ) x64 = '{0} *{1} = &_{1};\n'.format( self._c_struct_names()[1], self.name ) if self._c_uses_pointer() else '' return x86, x64
[ "def", "c_module_relocs", "(", "self", ")", ":", "if", "self", ".", "opts", ".", "no_structs", "or", "self", ".", "opts", ".", "windll", ":", "return", "''", ",", "''", "x86", "=", "reloc_var", "(", "self", ".", "name", ",", "self", ".", "_c_struct_n...
Build relocation for the module variable.
[ "Build", "relocation", "for", "the", "module", "variable", "." ]
python
train
swistakm/graceful
src/graceful/validators.py
https://github.com/swistakm/graceful/blob/d4678cb6349a5c843a5e58002fc80140821609e4/src/graceful/validators.py#L48-L62
def choices_validator(choices): """Return validator function that will check if ``value in choices``. Args: max_value (list, set, tuple): allowed choices for new validator """ def validator(value): if value not in choices: # note: make it a list for consistent representation raise ValidationError( "{} is not in {}".format(value, list(choices)) ) return validator
[ "def", "choices_validator", "(", "choices", ")", ":", "def", "validator", "(", "value", ")", ":", "if", "value", "not", "in", "choices", ":", "# note: make it a list for consistent representation", "raise", "ValidationError", "(", "\"{} is not in {}\"", ".", "format",...
Return validator function that will check if ``value in choices``. Args: max_value (list, set, tuple): allowed choices for new validator
[ "Return", "validator", "function", "that", "will", "check", "if", "value", "in", "choices", "." ]
python
train
ic-labs/django-icekit
icekit_events/utils/timeutils.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit_events/utils/timeutils.py#L31-L67
def round_datetime(when=None, precision=60, rounding=ROUND_NEAREST): """ Round a datetime object to a time that matches the given precision. when (datetime), default now The datetime object to be rounded. precision (int, timedelta, str), default 60 The number of seconds, weekday (MON, TUE, WED, etc.) or timedelta object to which the datetime object should be rounded. rounding (str), default ROUND_NEAREST The rounding method to use (ROUND_DOWN, ROUND_NEAREST, ROUND_UP). """ when = when or djtz.now() weekday = WEEKDAYS.get(precision, WEEKDAYS['MON']) if precision in WEEKDAYS: precision = int(timedelta(days=7).total_seconds()) elif isinstance(precision, timedelta): precision = int(precision.total_seconds()) # Get delta between the beginning of time and the given datetime object. # If precision is a weekday, the beginning of time must be that same day. when_min = when.min + timedelta(days=weekday) if djtz.is_aware(when): # It doesn't seem to be possible to localise `datetime.min` without # raising `OverflowError`, so create a timezone aware object manually. when_min = datetime(tzinfo=when.tzinfo, *when_min.timetuple()[:3]) delta = when - when_min remainder = int(delta.total_seconds()) % precision # First round down and strip microseconds. when -= timedelta(seconds=remainder, microseconds=when.microsecond) # Then add precision to round up. if rounding == ROUND_UP or ( rounding == ROUND_NEAREST and remainder >= precision / 2): when += timedelta(seconds=precision) return when
[ "def", "round_datetime", "(", "when", "=", "None", ",", "precision", "=", "60", ",", "rounding", "=", "ROUND_NEAREST", ")", ":", "when", "=", "when", "or", "djtz", ".", "now", "(", ")", "weekday", "=", "WEEKDAYS", ".", "get", "(", "precision", ",", "...
Round a datetime object to a time that matches the given precision. when (datetime), default now The datetime object to be rounded. precision (int, timedelta, str), default 60 The number of seconds, weekday (MON, TUE, WED, etc.) or timedelta object to which the datetime object should be rounded. rounding (str), default ROUND_NEAREST The rounding method to use (ROUND_DOWN, ROUND_NEAREST, ROUND_UP).
[ "Round", "a", "datetime", "object", "to", "a", "time", "that", "matches", "the", "given", "precision", "." ]
python
train
ska-sa/purr
Purr/Plugins/local_pychart/area.py
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Plugins/local_pychart/area.py#L118-L122
def y_pos(self, yval): "Return the y position (on the canvas) corresponding to YVAL." off = self.y_coord.get_canvas_pos(self.size[1], yval, self.y_range[0], self.y_range[1]) return self.loc[1] + off
[ "def", "y_pos", "(", "self", ",", "yval", ")", ":", "off", "=", "self", ".", "y_coord", ".", "get_canvas_pos", "(", "self", ".", "size", "[", "1", "]", ",", "yval", ",", "self", ".", "y_range", "[", "0", "]", ",", "self", ".", "y_range", "[", "...
Return the y position (on the canvas) corresponding to YVAL.
[ "Return", "the", "y", "position", "(", "on", "the", "canvas", ")", "corresponding", "to", "YVAL", "." ]
python
train
randomir/plucky
plucky/__init__.py
https://github.com/randomir/plucky/blob/16b7b59aa19d619d8e619dc15dc7eeffc9fe078a/plucky/__init__.py#L144-L200
def merge(a, b, op=None, recurse_list=False, max_depth=None): """Immutable merge ``a`` structure with ``b`` using binary operator ``op`` on leaf nodes. All nodes at, or below, ``max_depth`` are considered to be leaf nodes. Merged structure is returned, input data structures are not modified. If ``recurse_list=True``, leaf lists of equal length will be merged on a list-element level. Lists are considered to be leaf nodes by default (``recurse_list=False``), and they are merged with user-provided ``op``. Note the difference:: merge([1, 2], [3, 4]) ==> [1, 2, 3, 4] merge([1, 2], [3, 4], recurse_list=True) ==> [4, 6] """ if op is None: op = operator.add if max_depth is not None: if max_depth < 1: return op(a, b) else: max_depth -= 1 if isinstance(a, dict) and isinstance(b, dict): result = {} for key in set(chain(a.keys(), b.keys())): if key in a and key in b: result[key] = merge(a[key], b[key], op=op, recurse_list=recurse_list, max_depth=max_depth) elif key in a: result[key] = deepcopy(a[key]) elif key in b: result[key] = deepcopy(b[key]) return result elif isinstance(a, list) and isinstance(b, list): if recurse_list and len(a) == len(b): # merge subelements result = [] for idx in range(len(a)): result.append(merge(a[idx], b[idx], op=op, recurse_list=recurse_list, max_depth=max_depth)) return result else: # merge lists return op(a, b) # all other merge ops should be handled by ``op``. # default ``operator.add`` will handle addition of numeric types, but fail # with TypeError for incompatible types (eg. str + None, etc.) return op(a, b)
[ "def", "merge", "(", "a", ",", "b", ",", "op", "=", "None", ",", "recurse_list", "=", "False", ",", "max_depth", "=", "None", ")", ":", "if", "op", "is", "None", ":", "op", "=", "operator", ".", "add", "if", "max_depth", "is", "not", "None", ":",...
Immutable merge ``a`` structure with ``b`` using binary operator ``op`` on leaf nodes. All nodes at, or below, ``max_depth`` are considered to be leaf nodes. Merged structure is returned, input data structures are not modified. If ``recurse_list=True``, leaf lists of equal length will be merged on a list-element level. Lists are considered to be leaf nodes by default (``recurse_list=False``), and they are merged with user-provided ``op``. Note the difference:: merge([1, 2], [3, 4]) ==> [1, 2, 3, 4] merge([1, 2], [3, 4], recurse_list=True) ==> [4, 6]
[ "Immutable", "merge", "a", "structure", "with", "b", "using", "binary", "operator", "op", "on", "leaf", "nodes", ".", "All", "nodes", "at", "or", "below", "max_depth", "are", "considered", "to", "be", "leaf", "nodes", "." ]
python
train
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9697-L9712
def squeeze_axes(shape, axes, skip=None): """Return shape and axes with single-dimensional entries removed. Remove unused dimensions unless their axes are listed in 'skip'. >>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC') ((5, 2, 1), 'TYX') """ if len(shape) != len(axes): raise ValueError('dimensions of axes and shape do not match') if skip is None: skip = 'XY' shape, axes = zip(*(i for i in zip(shape, axes) if i[0] > 1 or i[1] in skip)) return tuple(shape), ''.join(axes)
[ "def", "squeeze_axes", "(", "shape", ",", "axes", ",", "skip", "=", "None", ")", ":", "if", "len", "(", "shape", ")", "!=", "len", "(", "axes", ")", ":", "raise", "ValueError", "(", "'dimensions of axes and shape do not match'", ")", "if", "skip", "is", ...
Return shape and axes with single-dimensional entries removed. Remove unused dimensions unless their axes are listed in 'skip'. >>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC') ((5, 2, 1), 'TYX')
[ "Return", "shape", "and", "axes", "with", "single", "-", "dimensional", "entries", "removed", "." ]
python
train
KelSolaar/Foundations
foundations/namespace.py
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/namespace.py#L98-L123
def remove_namespace(attribute, namespace_splitter=NAMESPACE_SPLITTER, root_only=False): """ Returns attribute with stripped foundations.namespace. Usage:: >>> remove_namespace("grandParent|parent|child") u'child' >>> remove_namespace("grandParent|parent|child", root_only=True) u'parent|child' :param attribute: Attribute. :type attribute: unicode :param namespace_splitter: Namespace splitter character. :type namespace_splitter: unicode :param root_only: Remove only root foundations.namespace. :type root_only: bool :return: Attribute without foundations.namespace. :rtype: unicode """ attribute_tokens = attribute.split(namespace_splitter) stripped_attribute = root_only and namespace_splitter.join(attribute_tokens[1:]) or \ attribute_tokens[len(attribute_tokens) - 1] LOGGER.debug("> Attribute: '{0}', stripped attribute: '{1}'.".format(attribute, stripped_attribute)) return stripped_attribute
[ "def", "remove_namespace", "(", "attribute", ",", "namespace_splitter", "=", "NAMESPACE_SPLITTER", ",", "root_only", "=", "False", ")", ":", "attribute_tokens", "=", "attribute", ".", "split", "(", "namespace_splitter", ")", "stripped_attribute", "=", "root_only", "...
Returns attribute with stripped foundations.namespace. Usage:: >>> remove_namespace("grandParent|parent|child") u'child' >>> remove_namespace("grandParent|parent|child", root_only=True) u'parent|child' :param attribute: Attribute. :type attribute: unicode :param namespace_splitter: Namespace splitter character. :type namespace_splitter: unicode :param root_only: Remove only root foundations.namespace. :type root_only: bool :return: Attribute without foundations.namespace. :rtype: unicode
[ "Returns", "attribute", "with", "stripped", "foundations", ".", "namespace", "." ]
python
train
orb-framework/orb
orb/core/query.py
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/query.py#L614-L631
def columns(self, model=None): """ Returns a generator that loops through the columns that are associated with this query. :return <generator>(orb.Column) """ column = self.column(model=model) if column: yield column check = self.__value if not isinstance(check, (list, set, tuple)): check = (check,) for val in check: if isinstance(val, (Query, QueryCompound)): for col in val.columns(model): yield col
[ "def", "columns", "(", "self", ",", "model", "=", "None", ")", ":", "column", "=", "self", ".", "column", "(", "model", "=", "model", ")", "if", "column", ":", "yield", "column", "check", "=", "self", ".", "__value", "if", "not", "isinstance", "(", ...
Returns a generator that loops through the columns that are associated with this query. :return <generator>(orb.Column)
[ "Returns", "a", "generator", "that", "loops", "through", "the", "columns", "that", "are", "associated", "with", "this", "query", ".", ":", "return", "<generator", ">", "(", "orb", ".", "Column", ")" ]
python
train
ppaquette/gym-pull
gym_pull/envs/registration.py
https://github.com/ppaquette/gym-pull/blob/5b2797fd081ba5be26544983d1eba764e6d9f73b/gym_pull/envs/registration.py#L63-L75
def make(self): """Instantiates an instance of the environment with appropriate kwargs""" if self._entry_point is None: raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id)) cls = load(self._entry_point) env = cls(**self._kwargs) # Make the enviroment aware of which spec it came from. env.spec = self env = env.build(extra_wrappers=self._wrappers) return env
[ "def", "make", "(", "self", ")", ":", "if", "self", ".", "_entry_point", "is", "None", ":", "raise", "error", ".", "Error", "(", "'Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'", ".", "format", "(", "self", ".", "id", ...
Instantiates an instance of the environment with appropriate kwargs
[ "Instantiates", "an", "instance", "of", "the", "environment", "with", "appropriate", "kwargs" ]
python
train
pytroll/trollimage
trollimage/image.py
https://github.com/pytroll/trollimage/blob/d35a7665ad475ff230e457085523e21f2cd3f454/trollimage/image.py#L963-L982
def stretch_logarithmic(self, ch_nb, factor=100.): """Move data into range [1:factor] and do a normalized logarithmic enhancement. """ logger.debug("Perform a logarithmic contrast stretch.") if ((self.channels[ch_nb].size == np.ma.count_masked(self.channels[ch_nb])) or (self.channels[ch_nb].min() == self.channels[ch_nb].max())): logger.warning("Nothing to stretch !") return crange = (0., 1.0) arr = self.channels[ch_nb] b__ = float(crange[1] - crange[0]) / np.log(factor) c__ = float(crange[0]) slope = (factor - 1.) / float(arr.max() - arr.min()) arr = 1. + (arr - arr.min()) * slope arr = c__ + b__ * np.log(arr) self.channels[ch_nb] = arr
[ "def", "stretch_logarithmic", "(", "self", ",", "ch_nb", ",", "factor", "=", "100.", ")", ":", "logger", ".", "debug", "(", "\"Perform a logarithmic contrast stretch.\"", ")", "if", "(", "(", "self", ".", "channels", "[", "ch_nb", "]", ".", "size", "==", "...
Move data into range [1:factor] and do a normalized logarithmic enhancement.
[ "Move", "data", "into", "range", "[", "1", ":", "factor", "]", "and", "do", "a", "normalized", "logarithmic", "enhancement", "." ]
python
train
ladybug-tools/ladybug
ladybug/datatype/temperaturetime.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/datatype/temperaturetime.py#L46-L53
def to_si(self, values, from_unit): """Return values in SI and the units to which the values have been converted.""" if from_unit in self._si_units: return values, from_unit elif from_unit == 'degF-hours': return self.to_unit(values, 'degC-hours', from_unit), 'degC-hours' else: return self.to_unit(values, 'degC-days', from_unit), 'degC-days'
[ "def", "to_si", "(", "self", ",", "values", ",", "from_unit", ")", ":", "if", "from_unit", "in", "self", ".", "_si_units", ":", "return", "values", ",", "from_unit", "elif", "from_unit", "==", "'degF-hours'", ":", "return", "self", ".", "to_unit", "(", "...
Return values in SI and the units to which the values have been converted.
[ "Return", "values", "in", "SI", "and", "the", "units", "to", "which", "the", "values", "have", "been", "converted", "." ]
python
train
Esri/ArcREST
src/arcrest/manageorg/_community.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_community.py#L318-L330
def __init(self): """loads the property data into the class""" if self._portalId is None: from .administration import Administration portalSelf = Administration(url=self._securityHandler.org_url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port).portals.portalSelf self._portalId = portalSelf.id self._currentUser = portalSelf.user['username']
[ "def", "__init", "(", "self", ")", ":", "if", "self", ".", "_portalId", "is", "None", ":", "from", ".", "administration", "import", "Administration", "portalSelf", "=", "Administration", "(", "url", "=", "self", ".", "_securityHandler", ".", "org_url", ",", ...
loads the property data into the class
[ "loads", "the", "property", "data", "into", "the", "class" ]
python
train
acutesoftware/AIKIF
aikif/dataTools/cls_datatable.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/dataTools/cls_datatable.py#L68-L74
def describe_contents(self): """ describes various contents of data table """ print('======================================================================') print(self) print('Table = ', str(len(self.header)) + ' cols x ' + str(len(self.arr)) + ' rows') print('HEADER = ', self.get_header()) print('arr = ', self.arr[0:2])
[ "def", "describe_contents", "(", "self", ")", ":", "print", "(", "'======================================================================'", ")", "print", "(", "self", ")", "print", "(", "'Table = '", ",", "str", "(", "len", "(", "self", ".", "header", ")", ")", ...
describes various contents of data table
[ "describes", "various", "contents", "of", "data", "table" ]
python
train
Mxit/python-mxit
mxit/oauth.py
https://github.com/Mxit/python-mxit/blob/6b18a54ef6fbfe1f9d94755ba3d4ad77743c8b0c/mxit/oauth.py#L113-L147
def get_app_token(self, scope): """Gets the app auth token""" app_token = self.__get_app_token(scope) if app_token: return app_token if self.__cache is not None: token = self.__cache.get(self.__app_token_cache_key(scope)) if token: return token self.__app_token = None payload = { 'grant_type': 'client_credentials', 'scope': scope } url = settings.AUTH_ENDPOINT + '/token' r = post(url, data=payload, auth=HTTPBasicAuth(self.__client_id, self.__client_secret), verify=self.__verify_cert) if r.status_code == 200: data = r.json() self.__set_app_token(scope, data[u'access_token']) if self.__cache is not None: self.__cache.set(self.__app_token_cache_key(scope), str(data[u'access_token']), data[u'expires_in'] - 300) app_token = self.__get_app_token(scope) if not app_token: raise MxitAPIException("Failed to retrieve app token for '%s' scope" % scope) return app_token
[ "def", "get_app_token", "(", "self", ",", "scope", ")", ":", "app_token", "=", "self", ".", "__get_app_token", "(", "scope", ")", "if", "app_token", ":", "return", "app_token", "if", "self", ".", "__cache", "is", "not", "None", ":", "token", "=", "self",...
Gets the app auth token
[ "Gets", "the", "app", "auth", "token" ]
python
train
twilio/twilio-python
twilio/rest/sync/v1/service/sync_stream/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/sync/v1/service/sync_stream/__init__.py#L350-L364
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: SyncStreamContext for this SyncStreamInstance :rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamContext """ if self._context is None: self._context = SyncStreamContext( self._version, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "SyncStreamContext", "(", "self", ".", "_version", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", ...
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: SyncStreamContext for this SyncStreamInstance :rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamContext
[ "Generate", "an", "instance", "context", "for", "the", "instance", "the", "context", "is", "capable", "of", "performing", "various", "actions", ".", "All", "instance", "actions", "are", "proxied", "to", "the", "context" ]
python
train