repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
sdispater/orator
orator/dbal/index.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/dbal/index.py#L103-L113
def has_column_at_position(self, column_name, pos=0): """ :type column_name: str :type pos: int :rtype: bool """ column_name = self._trim_quotes(column_name.lower()) index_columns = [c.lower() for c in self.get_unquoted_columns()] return index_columns.index(column_name) == pos
[ "def", "has_column_at_position", "(", "self", ",", "column_name", ",", "pos", "=", "0", ")", ":", "column_name", "=", "self", ".", "_trim_quotes", "(", "column_name", ".", "lower", "(", ")", ")", "index_columns", "=", "[", "c", ".", "lower", "(", ")", ...
:type column_name: str :type pos: int :rtype: bool
[ ":", "type", "column_name", ":", "str", ":", "type", "pos", ":", "int" ]
python
train
mdscruggs/ga
ga/chromosomes.py
https://github.com/mdscruggs/ga/blob/adac7a004e5e22d888e44ab39f313064c3803b38/ga/chromosomes.py#L53-L72
def dna(self, dna): """ Replace this chromosome's DNA with new DNA of equal length, assigning the new DNA to the chromosome's genes sequentially. For example, if a chromosome contains these genes... 1. 100100 2. 011011 ...and the new DNA is 111111000000, the genes become: 1. 111111 2. 000000 """ assert self.length == len(dna) i = 0 for gene in self.genes: gene.dna = dna[i:i + gene.length] i += gene.length
[ "def", "dna", "(", "self", ",", "dna", ")", ":", "assert", "self", ".", "length", "==", "len", "(", "dna", ")", "i", "=", "0", "for", "gene", "in", "self", ".", "genes", ":", "gene", ".", "dna", "=", "dna", "[", "i", ":", "i", "+", "gene", ...
Replace this chromosome's DNA with new DNA of equal length, assigning the new DNA to the chromosome's genes sequentially. For example, if a chromosome contains these genes... 1. 100100 2. 011011 ...and the new DNA is 111111000000, the genes become: 1. 111111 2. 000000
[ "Replace", "this", "chromosome", "s", "DNA", "with", "new", "DNA", "of", "equal", "length", "assigning", "the", "new", "DNA", "to", "the", "chromosome", "s", "genes", "sequentially", ".", "For", "example", "if", "a", "chromosome", "contains", "these", "genes...
python
train
Kentzo/git-archive-all
git_archive_all.py
https://github.com/Kentzo/git-archive-all/blob/fed1f48f1287c84220be08d63181a2816bde7a64/git_archive_all.py#L456-L480
def get_git_version(cls): """ Return version of git current shell points to. If version cannot be parsed None is returned. @rtype: tuple or None """ try: output = cls.run_git_shell('git version') except CalledProcessError: cls.LOG.warning("Unable to get Git version.") return None try: version = output.split()[2] except IndexError: cls.LOG.warning("Unable to parse Git version \"%s\".", output) return None try: return tuple(int(v) for v in version.split('.')) except ValueError: cls.LOG.warning("Unable to parse Git version \"%s\".", version) return None
[ "def", "get_git_version", "(", "cls", ")", ":", "try", ":", "output", "=", "cls", ".", "run_git_shell", "(", "'git version'", ")", "except", "CalledProcessError", ":", "cls", ".", "LOG", ".", "warning", "(", "\"Unable to get Git version.\"", ")", "return", "No...
Return version of git current shell points to. If version cannot be parsed None is returned. @rtype: tuple or None
[ "Return", "version", "of", "git", "current", "shell", "points", "to", "." ]
python
train
cloudera/cm_api
python/src/cm_api/endpoints/parcels.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/parcels.py#L24-L33
def get_parcel(resource_root, product, version, cluster_name="default"): """ Lookup a parcel by name @param resource_root: The root Resource object. @param product: Parcel product name @param version: Parcel version @param cluster_name: Cluster name @return: An ApiService object """ return _get_parcel(resource_root, PARCEL_PATH % (cluster_name, product, version))
[ "def", "get_parcel", "(", "resource_root", ",", "product", ",", "version", ",", "cluster_name", "=", "\"default\"", ")", ":", "return", "_get_parcel", "(", "resource_root", ",", "PARCEL_PATH", "%", "(", "cluster_name", ",", "product", ",", "version", ")", ")" ...
Lookup a parcel by name @param resource_root: The root Resource object. @param product: Parcel product name @param version: Parcel version @param cluster_name: Cluster name @return: An ApiService object
[ "Lookup", "a", "parcel", "by", "name" ]
python
train
airspeed-velocity/asv
asv/extern/asizeof.py
https://github.com/airspeed-velocity/asv/blob/d23bb8b74e8adacbfa3cf5724bda55fb39d56ba6/asv/extern/asizeof.py#L1020-L1029
def format(self): '''Return format dict. ''' c = n = '' if not self.both: c = ' (code only)' if self.leng: n = ' (%s)' % _nameof(self.leng) return _kwds(base=self.base, item=self.item, leng=n, code=c, kind=self.kind)
[ "def", "format", "(", "self", ")", ":", "c", "=", "n", "=", "''", "if", "not", "self", ".", "both", ":", "c", "=", "' (code only)'", "if", "self", ".", "leng", ":", "n", "=", "' (%s)'", "%", "_nameof", "(", "self", ".", "leng", ")", "return", "...
Return format dict.
[ "Return", "format", "dict", "." ]
python
train
trailofbits/manticore
manticore/native/cpu/x86.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L2952-L2982
def LEAVE(cpu): """ High level procedure exit. Releases the stack frame set up by an earlier ENTER instruction. The LEAVE instruction copies the frame pointer (in the EBP register) into the stack pointer register (ESP), which releases the stack space allocated to the stack frame. The old frame pointer (the frame pointer for the calling procedure that was saved by the ENTER instruction) is then popped from the stack into the EBP register, restoring the calling procedure's stack frame. A RET instruction is commonly executed following a LEAVE instruction to return program control to the calling procedure:: IF Stackaddress_bit_size = 32 THEN ESP = EBP; ELSE (* Stackaddress_bit_size = 16*) SP = BP; FI; IF OperandSize = 32 THEN EBP = Pop(); ELSE (* OperandSize = 16*) BP = Pop(); FI; :param cpu: current CPU. """ cpu.STACK = cpu.FRAME cpu.FRAME = cpu.pop(cpu.address_bit_size)
[ "def", "LEAVE", "(", "cpu", ")", ":", "cpu", ".", "STACK", "=", "cpu", ".", "FRAME", "cpu", ".", "FRAME", "=", "cpu", ".", "pop", "(", "cpu", ".", "address_bit_size", ")" ]
High level procedure exit. Releases the stack frame set up by an earlier ENTER instruction. The LEAVE instruction copies the frame pointer (in the EBP register) into the stack pointer register (ESP), which releases the stack space allocated to the stack frame. The old frame pointer (the frame pointer for the calling procedure that was saved by the ENTER instruction) is then popped from the stack into the EBP register, restoring the calling procedure's stack frame. A RET instruction is commonly executed following a LEAVE instruction to return program control to the calling procedure:: IF Stackaddress_bit_size = 32 THEN ESP = EBP; ELSE (* Stackaddress_bit_size = 16*) SP = BP; FI; IF OperandSize = 32 THEN EBP = Pop(); ELSE (* OperandSize = 16*) BP = Pop(); FI; :param cpu: current CPU.
[ "High", "level", "procedure", "exit", "." ]
python
valid
consbio/ncdjango
ncdjango/interfaces/arcgis/form_fields.py
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/interfaces/arcgis/form_fields.py#L47-L92
def to_python(self, value): """ This assumes the value has been preprocessed into a dictionary of the form: {'type': <geometry_type>, 'geometry': <raw_geometry>} """ if not value or isinstance(value, BaseGeometry): return value geometry_type = value['type'] geometry = value['geometry'] try: if geometry_type == 'esriGeometryPoint': if 'x' in geometry: data = json.loads(geometry) x, y = data['x'], data['y'] else: x, y = [float(val) for val in geometry.split(',')] return Point(x, y) elif geometry_type == 'esriGeometryMultipoint': data = json.loads(geometry) return MultiPoint([(p['0'], p['1']) for p in data['points']]) elif geometry_type == 'esriGeometryPolyline': data = json.loads(geometry) return MultiLineString([((l[0][0], l[0][1]), (l[1][0], l[1][1])) for l in data['paths']]) elif geometry_type == 'esriGeometryPolygon': data = json.loads(geometry) rings = [LinearRing([(p[0], p[1]) for p in r]) for r in data['rings']] return Polygon([r for r in rings if not r.is_ccw], interiors=[r for r in rings if r.is_ccw]) elif geometry_type == 'esriGeometryEnvelope': if 'xmin' in geometry: data = json.loads(geometry) xmin, ymin, xmax, ymax = [data[k] for k in ('xmin', 'ymin', 'xmax', 'ymax')] else: xmin, ymin, xmax, ymax = [float(val) for val in geometry.split(',')] return MultiPoint([(xmin, ymin), (xmax, ymax)]).envelope else: raise ValueError except ValueError: raise ValidationError('Invalid geometry')
[ "def", "to_python", "(", "self", ",", "value", ")", ":", "if", "not", "value", "or", "isinstance", "(", "value", ",", "BaseGeometry", ")", ":", "return", "value", "geometry_type", "=", "value", "[", "'type'", "]", "geometry", "=", "value", "[", "'geometr...
This assumes the value has been preprocessed into a dictionary of the form: {'type': <geometry_type>, 'geometry': <raw_geometry>}
[ "This", "assumes", "the", "value", "has", "been", "preprocessed", "into", "a", "dictionary", "of", "the", "form", ":", "{", "type", ":", "<geometry_type", ">", "geometry", ":", "<raw_geometry", ">", "}" ]
python
train
Azure/azure-cli-extensions
src/botservice/azext_bot/custom.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/botservice/azext_bot/custom.py#L21-L38
def __install_node_dependencies(kudu_client): """Installs Node.js dependencies at `site/wwwroot/` for Node.js bots. This method is only called when the detected bot is a Node.js bot. :return: Dictionary with results of the HTTP Kudu request """ if not kudu_client._KuduClient__initialized: # pylint:disable=protected-access kudu_client._KuduClient__initialize() # pylint:disable=protected-access payload = { 'command': 'npm install', 'dir': r'site\wwwroot' } response = requests.post(kudu_client._KuduClient__scm_url + '/api/command', data=json.dumps(payload), # pylint:disable=protected-access headers=kudu_client._KuduClient__get_application_json_headers()) # pylint:disable=protected-access HttpResponseValidator.check_response_status(response) return response.json()
[ "def", "__install_node_dependencies", "(", "kudu_client", ")", ":", "if", "not", "kudu_client", ".", "_KuduClient__initialized", ":", "# pylint:disable=protected-access", "kudu_client", ".", "_KuduClient__initialize", "(", ")", "# pylint:disable=protected-access", "payload", ...
Installs Node.js dependencies at `site/wwwroot/` for Node.js bots. This method is only called when the detected bot is a Node.js bot. :return: Dictionary with results of the HTTP Kudu request
[ "Installs", "Node", ".", "js", "dependencies", "at", "site", "/", "wwwroot", "/", "for", "Node", ".", "js", "bots", "." ]
python
train
jgorset/django-respite
respite/decorators.py
https://github.com/jgorset/django-respite/blob/719469d11baf91d05917bab1623bd82adc543546/respite/decorators.py#L24-L52
def route(regex, method, name): """ Route the decorated view. :param regex: A string describing a regular expression to which the request path will be matched. :param method: A string describing the HTTP method that this view accepts. :param name: A string describing the name of the URL pattern. ``regex`` may also be a lambda that accepts the parent resource's ``prefix`` argument and returns a string describing a regular expression to which the request path will be matched. ``name`` may also be a lambda that accepts the parent resource's ``views`` argument and returns a string describing the name of the URL pattern. """ def decorator(function): function.route = routes.route( regex = regex, view = function.__name__, method = method, name = name ) @wraps(function) def wrapper(self, *args, **kwargs): return function(self, *args, **kwargs) return wrapper return decorator
[ "def", "route", "(", "regex", ",", "method", ",", "name", ")", ":", "def", "decorator", "(", "function", ")", ":", "function", ".", "route", "=", "routes", ".", "route", "(", "regex", "=", "regex", ",", "view", "=", "function", ".", "__name__", ",", ...
Route the decorated view. :param regex: A string describing a regular expression to which the request path will be matched. :param method: A string describing the HTTP method that this view accepts. :param name: A string describing the name of the URL pattern. ``regex`` may also be a lambda that accepts the parent resource's ``prefix`` argument and returns a string describing a regular expression to which the request path will be matched. ``name`` may also be a lambda that accepts the parent resource's ``views`` argument and returns a string describing the name of the URL pattern.
[ "Route", "the", "decorated", "view", "." ]
python
train
nornir-automation/nornir
nornir/core/connections.py
https://github.com/nornir-automation/nornir/blob/3425c47fd870db896cb80f619bae23bd98d50c74/nornir/core/connections.py#L30-L44
def open( self, hostname: Optional[str], username: Optional[str], password: Optional[str], port: Optional[int], platform: Optional[str], extras: Optional[Dict[str, Any]] = None, configuration: Optional[Config] = None, ) -> None: """ Connect to the device and populate the attribute :attr:`connection` with the underlying connection """ pass
[ "def", "open", "(", "self", ",", "hostname", ":", "Optional", "[", "str", "]", ",", "username", ":", "Optional", "[", "str", "]", ",", "password", ":", "Optional", "[", "str", "]", ",", "port", ":", "Optional", "[", "int", "]", ",", "platform", ":"...
Connect to the device and populate the attribute :attr:`connection` with the underlying connection
[ "Connect", "to", "the", "device", "and", "populate", "the", "attribute", ":", "attr", ":", "connection", "with", "the", "underlying", "connection" ]
python
train
nickjj/ansigenome
ansigenome/export.py
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/export.py#L182-L189
def exit_if_missing_graphviz(self): """ Detect the presence of the dot utility to make a png graph. """ (out, err) = utils.capture_shell("which dot") if "dot" not in out: ui.error(c.MESSAGES["dot_missing"])
[ "def", "exit_if_missing_graphviz", "(", "self", ")", ":", "(", "out", ",", "err", ")", "=", "utils", ".", "capture_shell", "(", "\"which dot\"", ")", "if", "\"dot\"", "not", "in", "out", ":", "ui", ".", "error", "(", "c", ".", "MESSAGES", "[", "\"dot_m...
Detect the presence of the dot utility to make a png graph.
[ "Detect", "the", "presence", "of", "the", "dot", "utility", "to", "make", "a", "png", "graph", "." ]
python
train
idlesign/django-sitetree
sitetree/admin.py
https://github.com/idlesign/django-sitetree/blob/61de4608e6e415247c75fe8691027d7c4ed0d1e7/sitetree/admin.py#L207-L214
def get_tree(self, request, tree_id, item_id=None): """Fetches Tree for current or given TreeItem.""" if tree_id is None: tree_id = self.get_object(request, item_id).tree_id self.tree = MODEL_TREE_CLASS._default_manager.get(pk=tree_id) self.tree.verbose_name_plural = self.tree._meta.verbose_name_plural self.tree.urls = _TREE_URLS return self.tree
[ "def", "get_tree", "(", "self", ",", "request", ",", "tree_id", ",", "item_id", "=", "None", ")", ":", "if", "tree_id", "is", "None", ":", "tree_id", "=", "self", ".", "get_object", "(", "request", ",", "item_id", ")", ".", "tree_id", "self", ".", "t...
Fetches Tree for current or given TreeItem.
[ "Fetches", "Tree", "for", "current", "or", "given", "TreeItem", "." ]
python
test
secdev/scapy
scapy/contrib/dce_rpc.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/contrib/dce_rpc.py#L145-L152
def dispatch_hook(cls, _pkt, _underlayer=None, *args, **kargs): """dispatch_hook to choose among different registered payloads""" for klass in cls._payload_class: if hasattr(klass, "can_handle") and \ klass.can_handle(_pkt, _underlayer): return klass print("DCE/RPC payload class not found or undefined (using Raw)") return Raw
[ "def", "dispatch_hook", "(", "cls", ",", "_pkt", ",", "_underlayer", "=", "None", ",", "*", "args", ",", "*", "*", "kargs", ")", ":", "for", "klass", "in", "cls", ".", "_payload_class", ":", "if", "hasattr", "(", "klass", ",", "\"can_handle\"", ")", ...
dispatch_hook to choose among different registered payloads
[ "dispatch_hook", "to", "choose", "among", "different", "registered", "payloads" ]
python
train
shidenggui/easytrader
easytrader/webtrader.py
https://github.com/shidenggui/easytrader/blob/e5ae4daeda4ea125763a95b280dd694c7f68257d/easytrader/webtrader.py#L40-L54
def prepare(self, config_file=None, user=None, password=None, **kwargs): """登录的统一接口 :param config_file 登录数据文件,若无则选择参数登录模式 :param user: 各家券商的账号或者雪球的用户名 :param password: 密码, 券商为加密后的密码,雪球为明文密码 :param account: [雪球登录需要]雪球手机号(邮箱手机二选一) :param portfolio_code: [雪球登录需要]组合代码 :param portfolio_market: [雪球登录需要]交易市场, 可选['cn', 'us', 'hk'] 默认 'cn' """ if config_file is not None: self.read_config(config_file) else: self._prepare_account(user, password, **kwargs) self.autologin()
[ "def", "prepare", "(", "self", ",", "config_file", "=", "None", ",", "user", "=", "None", ",", "password", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "config_file", "is", "not", "None", ":", "self", ".", "read_config", "(", "config_file", ...
登录的统一接口 :param config_file 登录数据文件,若无则选择参数登录模式 :param user: 各家券商的账号或者雪球的用户名 :param password: 密码, 券商为加密后的密码,雪球为明文密码 :param account: [雪球登录需要]雪球手机号(邮箱手机二选一) :param portfolio_code: [雪球登录需要]组合代码 :param portfolio_market: [雪球登录需要]交易市场, 可选['cn', 'us', 'hk'] 默认 'cn'
[ "登录的统一接口", ":", "param", "config_file", "登录数据文件,若无则选择参数登录模式", ":", "param", "user", ":", "各家券商的账号或者雪球的用户名", ":", "param", "password", ":", "密码", "券商为加密后的密码,雪球为明文密码", ":", "param", "account", ":", "[", "雪球登录需要", "]", "雪球手机号", "(", "邮箱手机二选一", ")", ":", "param", ...
python
train
tallforasmurf/byteplay
byteplay3.py
https://github.com/tallforasmurf/byteplay/blob/f6189df69dea763c83e857150244a8a89c082cc2/byteplay3.py#L243-L253
def object_attributes( thing, all=False ) : ''' Return a sorted list of names defined by thing that are not also names in a standard object, except include __doc__. ''' standard_names = set( dir( object() ) ) things_names = set( dir( thing ) ) if not all : things_names -= standard_names things_names |= set( ['__doc__'] ) return sorted( things_names )
[ "def", "object_attributes", "(", "thing", ",", "all", "=", "False", ")", ":", "standard_names", "=", "set", "(", "dir", "(", "object", "(", ")", ")", ")", "things_names", "=", "set", "(", "dir", "(", "thing", ")", ")", "if", "not", "all", ":", "thi...
Return a sorted list of names defined by thing that are not also names in a standard object, except include __doc__.
[ "Return", "a", "sorted", "list", "of", "names", "defined", "by", "thing", "that", "are", "not", "also", "names", "in", "a", "standard", "object", "except", "include", "__doc__", "." ]
python
train
pypa/pipenv
pipenv/vendor/semver.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/semver.py#L41-L73
def parse(version): """Parse version to major, minor, patch, pre-release, build parts. :param version: version string :return: dictionary with the keys 'build', 'major', 'minor', 'patch', and 'prerelease'. The prerelease or build keys can be None if not provided :rtype: dict >>> import semver >>> ver = semver.parse('3.4.5-pre.2+build.4') >>> ver['major'] 3 >>> ver['minor'] 4 >>> ver['patch'] 5 >>> ver['prerelease'] 'pre.2' >>> ver['build'] 'build.4' """ match = _REGEX.match(version) if match is None: raise ValueError('%s is not valid SemVer string' % version) version_parts = match.groupdict() version_parts['major'] = int(version_parts['major']) version_parts['minor'] = int(version_parts['minor']) version_parts['patch'] = int(version_parts['patch']) return version_parts
[ "def", "parse", "(", "version", ")", ":", "match", "=", "_REGEX", ".", "match", "(", "version", ")", "if", "match", "is", "None", ":", "raise", "ValueError", "(", "'%s is not valid SemVer string'", "%", "version", ")", "version_parts", "=", "match", ".", "...
Parse version to major, minor, patch, pre-release, build parts. :param version: version string :return: dictionary with the keys 'build', 'major', 'minor', 'patch', and 'prerelease'. The prerelease or build keys can be None if not provided :rtype: dict >>> import semver >>> ver = semver.parse('3.4.5-pre.2+build.4') >>> ver['major'] 3 >>> ver['minor'] 4 >>> ver['patch'] 5 >>> ver['prerelease'] 'pre.2' >>> ver['build'] 'build.4'
[ "Parse", "version", "to", "major", "minor", "patch", "pre", "-", "release", "build", "parts", "." ]
python
train
cronofy/pycronofy
pycronofy/pagination.py
https://github.com/cronofy/pycronofy/blob/3d807603029478fa9387a9dfb6c3acd9faa4f08e/pycronofy/pagination.py#L27-L37
def all(self): """Return all results as a list by automatically fetching all pages. :return: All results. :rtype: ``list`` """ results = self.data[self.data_type] while self.current < self.total: self.fetch_next_page() results.extend(self.data[self.data_type]) return results
[ "def", "all", "(", "self", ")", ":", "results", "=", "self", ".", "data", "[", "self", ".", "data_type", "]", "while", "self", ".", "current", "<", "self", ".", "total", ":", "self", ".", "fetch_next_page", "(", ")", "results", ".", "extend", "(", ...
Return all results as a list by automatically fetching all pages. :return: All results. :rtype: ``list``
[ "Return", "all", "results", "as", "a", "list", "by", "automatically", "fetching", "all", "pages", "." ]
python
train
csparpa/pyowm
pyowm/weatherapi25/parsers/stationlistparser.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/parsers/stationlistparser.py#L20-L39
def parse_JSON(self, JSON_string): """ Parses a list of *Station* instances out of raw JSON data. Only certain properties of the data are used: if these properties are not found or cannot be parsed, an error is issued. :param JSON_string: a raw JSON string :type JSON_string: str :returns: a list of *Station* instances or ``None`` if no data is available :raises: *ParseResponseError* if it is impossible to find or parse the data needed to build the result, *APIResponseError* if the OWM API returns a HTTP status error """ if JSON_string is None: raise ParseResponseError('JSON data is None') d = json.loads(JSON_string) station_parser = StationParser() return [station_parser.parse_JSON(json.dumps(item)) for item in d]
[ "def", "parse_JSON", "(", "self", ",", "JSON_string", ")", ":", "if", "JSON_string", "is", "None", ":", "raise", "ParseResponseError", "(", "'JSON data is None'", ")", "d", "=", "json", ".", "loads", "(", "JSON_string", ")", "station_parser", "=", "StationPars...
Parses a list of *Station* instances out of raw JSON data. Only certain properties of the data are used: if these properties are not found or cannot be parsed, an error is issued. :param JSON_string: a raw JSON string :type JSON_string: str :returns: a list of *Station* instances or ``None`` if no data is available :raises: *ParseResponseError* if it is impossible to find or parse the data needed to build the result, *APIResponseError* if the OWM API returns a HTTP status error
[ "Parses", "a", "list", "of", "*", "Station", "*", "instances", "out", "of", "raw", "JSON", "data", ".", "Only", "certain", "properties", "of", "the", "data", "are", "used", ":", "if", "these", "properties", "are", "not", "found", "or", "cannot", "be", ...
python
train
davenquinn/Attitude
attitude/error/axes.py
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/error/axes.py#L108-L146
def statistical_axes(fit, **kw): """ Hyperbolic error using a statistical process (either sampling or noise errors) Integrates covariance with error level and degrees of freedom for plotting confidence intervals. Degrees of freedom is set to 2, which is the relevant number of independent dimensions to planar fitting of *a priori* centered data. """ method = kw.pop('method', 'noise') confidence_level = kw.pop('confidence_level', 0.95) dof = kw.pop('dof',2) nominal = fit.eigenvalues if method == 'sampling': cov = sampling_covariance(fit,**kw) elif method == 'noise': cov = noise_covariance(fit,**kw) if kw.pop('chisq', False): # Model the incorrect behaviour of using the # Chi2 distribution instead of the Fisher # distribution (which is a measure of the # ratio between the two). z = chi2.ppf(confidence_level,dof) else: z = fisher_statistic(fit.n,confidence_level,dof=dof) # Apply two fisher F parameters (one along each axis) # Since we apply to each axis without division, # it is as if we are applying N.sqrt(2*F) to the entire # distribution, aligning us with (Francq, 2014) err = z*N.sqrt(cov) return apply_error_scaling(nominal, err, n=fit.n, **kw)
[ "def", "statistical_axes", "(", "fit", ",", "*", "*", "kw", ")", ":", "method", "=", "kw", ".", "pop", "(", "'method'", ",", "'noise'", ")", "confidence_level", "=", "kw", ".", "pop", "(", "'confidence_level'", ",", "0.95", ")", "dof", "=", "kw", "."...
Hyperbolic error using a statistical process (either sampling or noise errors) Integrates covariance with error level and degrees of freedom for plotting confidence intervals. Degrees of freedom is set to 2, which is the relevant number of independent dimensions to planar fitting of *a priori* centered data.
[ "Hyperbolic", "error", "using", "a", "statistical", "process", "(", "either", "sampling", "or", "noise", "errors", ")" ]
python
train
broadinstitute/fiss
firecloud/supervisor.py
https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/supervisor.py#L78-L128
def validate_monitor_tasks(dependencies, args): """ Validate that all entries in the supervisor are valid task configurations and that all permissions requirements are satisfied. """ # Make a list of all task configurations needed to supervise sup_configs = sorted(dependencies.keys()) try: logging.info("Validating supervisor data...") # Make an api call to list task configurations in the workspace r = fapi.list_workspace_configs(args['project'], args['workspace']) fapi._check_response_code(r, 200) space_configs = r.json() # Make a dict for easy lookup later space_configs = { c["name"]: c for c in space_configs} # Also make an api call to list methods you have view permissions for r = fapi.list_repository_methods() fapi._check_response_code(r, 200) repo_methods = r.json() ## Put in a form that is more easily searchable: namespace/name:snapshot repo_methods = {m['namespace'] + '/' + m['name'] + ':' + str(m['snapshotId']) for m in repo_methods if m['entityType'] == 'Workflow'} valid = True for config in sup_configs: # ensure config exists in the workspace if config not in space_configs: logging.error("No task configuration for " + config + " found in " + args['project'] + "/" + args['workspace']) valid = False else: # Check access permissions for the referenced method m = space_configs[config]['methodRepoMethod'] ref_method = m['methodNamespace'] + "/" + m['methodName'] + ":" + str(m['methodVersion']) if ref_method not in repo_methods: logging.error(config+ " -- You don't have permisson to run the referenced method: " + ref_method) valid = False except Exception as e: logging.error("Exception occurred while validating supervisor: " + str(e)) raise return False return valid
[ "def", "validate_monitor_tasks", "(", "dependencies", ",", "args", ")", ":", "# Make a list of all task configurations needed to supervise", "sup_configs", "=", "sorted", "(", "dependencies", ".", "keys", "(", ")", ")", "try", ":", "logging", ".", "info", "(", "\"Va...
Validate that all entries in the supervisor are valid task configurations and that all permissions requirements are satisfied.
[ "Validate", "that", "all", "entries", "in", "the", "supervisor", "are", "valid", "task", "configurations", "and", "that", "all", "permissions", "requirements", "are", "satisfied", "." ]
python
train
pydata/xarray
xarray/backends/file_manager.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/backends/file_manager.py#L151-L178
def acquire(self, needs_lock=True): """Acquiring a file object from the manager. A new file is only opened if it has expired from the least-recently-used cache. This method uses a lock, which ensures that it is thread-safe. You can safely acquire a file in multiple threads at the same time, as long as the underlying file object is thread-safe. Returns ------- An open file object, as returned by ``opener(*args, **kwargs)``. """ with self._optional_lock(needs_lock): try: file = self._cache[self._key] except KeyError: kwargs = self._kwargs if self._mode is not _DEFAULT_MODE: kwargs = kwargs.copy() kwargs['mode'] = self._mode file = self._opener(*self._args, **kwargs) if self._mode == 'w': # ensure file doesn't get overriden when opened again self._mode = 'a' self._cache[self._key] = file return file
[ "def", "acquire", "(", "self", ",", "needs_lock", "=", "True", ")", ":", "with", "self", ".", "_optional_lock", "(", "needs_lock", ")", ":", "try", ":", "file", "=", "self", ".", "_cache", "[", "self", ".", "_key", "]", "except", "KeyError", ":", "kw...
Acquiring a file object from the manager. A new file is only opened if it has expired from the least-recently-used cache. This method uses a lock, which ensures that it is thread-safe. You can safely acquire a file in multiple threads at the same time, as long as the underlying file object is thread-safe. Returns ------- An open file object, as returned by ``opener(*args, **kwargs)``.
[ "Acquiring", "a", "file", "object", "from", "the", "manager", "." ]
python
train
asweigart/pyautogui
pyautogui/__init__.py
https://github.com/asweigart/pyautogui/blob/77524bd47334a89024013fd48e05151c3ac9289a/pyautogui/__init__.py#L1067-L1098
def hotkey(*args, **kwargs): """Performs key down presses on the arguments passed in order, then performs key releases in reverse order. The effect is that calling hotkey('ctrl', 'shift', 'c') would perform a "Ctrl-Shift-C" hotkey/keyboard shortcut press. Args: key(s) (str): The series of keys to press, in order. This can also be a list of key strings to press. interval (float, optional): The number of seconds in between each press. 0.0 by default, for no pause in between presses. Returns: None """ interval = float(kwargs.get('interval', 0.0)) _failSafeCheck() for c in args: if len(c) > 1: c = c.lower() platformModule._keyDown(c) time.sleep(interval) for c in reversed(args): if len(c) > 1: c = c.lower() platformModule._keyUp(c) time.sleep(interval) _autoPause(kwargs.get('pause', None), kwargs.get('_pause', True))
[ "def", "hotkey", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "interval", "=", "float", "(", "kwargs", ".", "get", "(", "'interval'", ",", "0.0", ")", ")", "_failSafeCheck", "(", ")", "for", "c", "in", "args", ":", "if", "len", "(", "c", ...
Performs key down presses on the arguments passed in order, then performs key releases in reverse order. The effect is that calling hotkey('ctrl', 'shift', 'c') would perform a "Ctrl-Shift-C" hotkey/keyboard shortcut press. Args: key(s) (str): The series of keys to press, in order. This can also be a list of key strings to press. interval (float, optional): The number of seconds in between each press. 0.0 by default, for no pause in between presses. Returns: None
[ "Performs", "key", "down", "presses", "on", "the", "arguments", "passed", "in", "order", "then", "performs", "key", "releases", "in", "reverse", "order", "." ]
python
train
avirshup/DockerMake
dockermake/builds.py
https://github.com/avirshup/DockerMake/blob/2173199904f086353ef539ea578788b99f6fea0a/dockermake/builds.py#L64-L119
def build(self, client, nobuild=False, usecache=True, pull=False): """ Drives the build of the final image - get the list of steps and execute them. Args: client (docker.Client): docker client object that will build the image nobuild (bool): just create dockerfiles, don't actually build the image usecache (bool): use docker cache, or rebuild everything from scratch? pull (bool): try to pull new versions of repository images? """ if not nobuild: self.update_source_images(client, usecache=usecache, pull=pull) width = utils.get_console_width() cprint('\n' + '='*width, color='white', attrs=['bold']) line = 'STARTING BUILD for "%s" (image definition "%s" from %s)\n' % ( self.targetname, self.imagename, self.steps[-1].sourcefile) cprint(_centered(line, width), color='blue', attrs=['bold']) for istep, step in enumerate(self.steps): print(colored('* Step','blue'), colored('%d/%d' % (istep+1, len(self.steps)), 'blue', attrs=['bold']), colored('for image', color='blue'), colored(self.imagename, color='blue', attrs=['bold'])) if not nobuild: if step.bust_cache: stackkey = self._get_stack_key(istep) if stackkey in _rebuilt: step.bust_cache = False step.build(client, usecache=usecache) print(colored("* Created intermediate image", 'green'), colored(step.buildname, 'green', attrs=['bold']), end='\n\n') if step.bust_cache: _rebuilt.add(stackkey) finalimage = step.buildname if not nobuild: self.finalizenames(client, finalimage) line = 'FINISHED BUILDING "%s" (image definition "%s" from %s)'%( self.targetname, self.imagename, self.steps[-1].sourcefile) cprint(_centered(line, width), color='green', attrs=['bold']) cprint('=' * width, color='white', attrs=['bold'], end='\n\n')
[ "def", "build", "(", "self", ",", "client", ",", "nobuild", "=", "False", ",", "usecache", "=", "True", ",", "pull", "=", "False", ")", ":", "if", "not", "nobuild", ":", "self", ".", "update_source_images", "(", "client", ",", "usecache", "=", "usecach...
Drives the build of the final image - get the list of steps and execute them. Args: client (docker.Client): docker client object that will build the image nobuild (bool): just create dockerfiles, don't actually build the image usecache (bool): use docker cache, or rebuild everything from scratch? pull (bool): try to pull new versions of repository images?
[ "Drives", "the", "build", "of", "the", "final", "image", "-", "get", "the", "list", "of", "steps", "and", "execute", "them", "." ]
python
train
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/core_v1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/core_v1_api.py#L4035-L4057
def connect_post_namespaced_pod_proxy(self, name, namespace, **kwargs): # noqa: E501 """connect_post_namespaced_pod_proxy # noqa: E501 connect POST requests to proxy of Pod # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_post_namespaced_pod_proxy(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodProxyOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str path: Path is the URL path to use for the current proxy request to pod. :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.connect_post_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501 else: (data) = self.connect_post_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501 return data
[ "def", "connect_post_namespaced_pod_proxy", "(", "self", ",", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", ...
connect_post_namespaced_pod_proxy # noqa: E501 connect POST requests to proxy of Pod # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_post_namespaced_pod_proxy(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodProxyOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str path: Path is the URL path to use for the current proxy request to pod. :return: str If the method is called asynchronously, returns the request thread.
[ "connect_post_namespaced_pod_proxy", "#", "noqa", ":", "E501" ]
python
train
cons3rt/pycons3rt
pycons3rt/asset.py
https://github.com/cons3rt/pycons3rt/blob/f004ab3a35c5bff2f698131fef3b2a8ed5a7596d/pycons3rt/asset.py#L122-L253
def validate_asset_structure(asset_dir_path): """Checks asset structure validity :param asset_dir_path: (str) path to the directory containing the asset :return: (str) Asset name :raises: Cons3rtAssetStructureError """ log = logging.getLogger(mod_logger + '.validate_asset_structure') log.info('Validating asset directory: {d}'.format(d=asset_dir_path)) # Ensure there is an asset.properties file asset_props = os.path.join(asset_dir_path, 'asset.properties') if not os.path.isfile(asset_props): raise Cons3rtAssetStructureError('Asset properties file not found: {f}'.format(f=asset_props)) # Props to find install_script_rel_path = None doc_file_rel_path = None license_file_rel_path = None asset_type = None license_file_path = '' doc_file_path = '' asset_name = None log.info('Reading asset properties file: {f}'.format(f=asset_props)) with open(asset_props, 'r') as f: for line in f: if line.strip().startswith('installScript='): install_script_name = line.strip().split('=')[1] install_script_rel_path = os.path.join('scripts', install_script_name) elif line.strip().startswith('documentationFile='): doc_file_rel_path = line.strip().split('=')[1] elif line.strip().startswith('licenseFile='): license_file_rel_path = line.strip().split('=')[1] elif line.strip().startswith('assetType='): asset_type = line.strip().split('=')[1] asset_type = asset_type.lower() elif line.strip().startswith('name='): asset_name = line.strip().split('=')[1] # Ensure a name was provided if asset_name is None: raise Cons3rtAssetStructureError('Required property [name] not found in asset properties file: {f}'.format( f=asset_props)) if asset_name == '': raise Cons3rtAssetStructureError('Required property [name] found blank in asset properties file: {f}'.format( f=asset_props)) # Ensure asset_type was provided if asset_type is None: raise Cons3rtAssetStructureError('Required property [asset_type] not found in asset properties ' 'file: {f}'.format(f=asset_props)) if asset_type == '': raise Cons3rtAssetStructureError('Required property [asset_type] found blank in asset properties ' 'file: {f}'.format(f=asset_props)) log.info('Found installScript={f}'.format(f=install_script_rel_path)) log.info('Found assetType={f}'.format(f=asset_type)) # Verify the doc file exists if specified if doc_file_rel_path: log.info('Found documentationFile={f}'.format(f=doc_file_rel_path)) doc_file_path = os.path.join(asset_dir_path, doc_file_rel_path) if not os.path.isfile(doc_file_path): raise Cons3rtAssetStructureError('Documentation file not found: {f}'.format(f=doc_file_path)) else: log.info('Verified documentation file: {f}'.format(f=doc_file_path)) else: log.info('The documentationFile property was not specified in asset.properties') # Verify the license file exists if specified if license_file_rel_path: log.info('Found licenseFile={f}'.format(f=license_file_rel_path)) license_file_path = os.path.join(asset_dir_path, license_file_rel_path) if not os.path.isfile(license_file_path): raise Cons3rtAssetStructureError('License file not found: {f}'.format(f=license_file_path)) else: log.info('Verified license file: {f}'.format(f=license_file_path)) else: log.info('The licenseFile property was not specified in asset.properties') if asset_type == 'software': if not install_script_rel_path: raise Cons3rtAssetStructureError('Software asset has an asset.properties missing the installScript ' 'prop: {f}'.format(f=asset_props)) else: install_script_path = os.path.join(asset_dir_path, install_script_rel_path) if not os.path.isfile(install_script_path): raise Cons3rtAssetStructureError('Install script file not found: {f}'.format(f=install_script_path)) else: log.info('Verified install script for software asset: {f}'.format(f=install_script_path)) log.info('Checking items at the root of the asset directory...') for item in os.listdir(asset_dir_path): log.info('Checking item: {i}'.format(i=item)) item_path = os.path.join(asset_dir_path, item) if item_path == license_file_path: continue elif item_path == doc_file_path: continue elif item_path == asset_props: continue elif item in ignore_items: continue elif ignore_by_extension(item_path=item_path): continue elif item in acceptable_dirs and os.path.isdir(item_path): continue else: if item == 'VERSION': os.remove(item_path) log.warn('Deleted file: {f}'.format(f=item_path)) elif item == 'doc': raise Cons3rtAssetStructureError('Found a doc directory at the asset root, this is not allowed') elif item in potential_doc_files: if not doc_file_rel_path: raise Cons3rtAssetStructureError('Documentation file found but not specified in ' 'asset.properties: {f}'.format(f=item_path)) else: raise Cons3rtAssetStructureError('Extra documentation file found: {f}'.format(f=item_path)) elif item in potential_license_files: if not license_file_rel_path: raise Cons3rtAssetStructureError('License file found but not specified in ' 'asset.properties: {f}'.format(f=item_path)) else: raise Cons3rtAssetStructureError('Extra license file found: {f}'.format(f=item_path)) else: raise Cons3rtAssetStructureError('Found illegal item at the asset root dir: {i}'.format(i=item)) log.info('Validated asset directory successfully: {d}'.format(d=asset_dir_path)) return asset_name
[ "def", "validate_asset_structure", "(", "asset_dir_path", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "mod_logger", "+", "'.validate_asset_structure'", ")", "log", ".", "info", "(", "'Validating asset directory: {d}'", ".", "format", "(", "d", "=", "as...
Checks asset structure validity :param asset_dir_path: (str) path to the directory containing the asset :return: (str) Asset name :raises: Cons3rtAssetStructureError
[ "Checks", "asset", "structure", "validity" ]
python
train
inveniosoftware/invenio-access
invenio_access/cli.py
https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/cli.py#L130-L134
def process_allow_action(processors, action, argument): """Process allow action.""" for processor in processors: processor(action, argument) db.session.commit()
[ "def", "process_allow_action", "(", "processors", ",", "action", ",", "argument", ")", ":", "for", "processor", "in", "processors", ":", "processor", "(", "action", ",", "argument", ")", "db", ".", "session", ".", "commit", "(", ")" ]
Process allow action.
[ "Process", "allow", "action", "." ]
python
train
OLC-Bioinformatics/sipprverse
sixteenS/sixteens_full.py
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sixteenS/sixteens_full.py#L31-L38
def main(self): """ Run the required methods in the appropriate order """ self.targets() self.bait(k=49) self.reversebait(maskmiddle='t', k=19) self.subsample_reads()
[ "def", "main", "(", "self", ")", ":", "self", ".", "targets", "(", ")", "self", ".", "bait", "(", "k", "=", "49", ")", "self", ".", "reversebait", "(", "maskmiddle", "=", "'t'", ",", "k", "=", "19", ")", "self", ".", "subsample_reads", "(", ")" ]
Run the required methods in the appropriate order
[ "Run", "the", "required", "methods", "in", "the", "appropriate", "order" ]
python
train
edeposit/edeposit.amqp
bin/edeposit_amqp_tool.py
https://github.com/edeposit/edeposit.amqp/blob/7804b52028b90ab96302d54bc2430f88dc2ebf64/bin/edeposit_amqp_tool.py#L158-L183
def send_message(host, data, timeout=None, properties=None): """ Send message to given `host`. Args: host (str): Specified host: aleph/ftp/whatever available host. data (str): JSON data. timeout (int, default None): How much time wait for connection. """ channel = _get_channel(host, timeout) if not properties: properties = pika.BasicProperties( content_type="application/json", delivery_mode=2, headers={"UUID": str(uuid.uuid4())} ) parameters = settings.get_amqp_settings()[host] channel.basic_publish( exchange=parameters["exchange"], routing_key=parameters["in_key"], properties=properties, body=data )
[ "def", "send_message", "(", "host", ",", "data", ",", "timeout", "=", "None", ",", "properties", "=", "None", ")", ":", "channel", "=", "_get_channel", "(", "host", ",", "timeout", ")", "if", "not", "properties", ":", "properties", "=", "pika", ".", "B...
Send message to given `host`. Args: host (str): Specified host: aleph/ftp/whatever available host. data (str): JSON data. timeout (int, default None): How much time wait for connection.
[ "Send", "message", "to", "given", "host", "." ]
python
train
soravux/scoop
scoop/futures.py
https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/scoop/futures.py#L259-L279
def _createFuture(func, *args, **kwargs): """Helper function to create a future.""" assert callable(func), ( "The provided func parameter is not a callable." ) if scoop.IS_ORIGIN and "SCOOP_WORKER" not in sys.modules: sys.modules["SCOOP_WORKER"] = sys.modules["__main__"] # If function is a lambda or class method, share it (or its parent object) # beforehand lambdaType = type(lambda: None) funcIsLambda = isinstance(func, lambdaType) and func.__name__ == '<lambda>' # Determine if function is a method. Methods derived from external # languages such as C++ aren't detected by ismethod. funcIsMethod = ismethod(func) if funcIsLambda or funcIsMethod: from .shared import SharedElementEncapsulation func = SharedElementEncapsulation(func) return Future(control.current.id, func, *args, **kwargs)
[ "def", "_createFuture", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "assert", "callable", "(", "func", ")", ",", "(", "\"The provided func parameter is not a callable.\"", ")", "if", "scoop", ".", "IS_ORIGIN", "and", "\"SCOOP_WORKER\"", "...
Helper function to create a future.
[ "Helper", "function", "to", "create", "a", "future", "." ]
python
train
booktype/python-ooxml
ooxml/parse.py
https://github.com/booktype/python-ooxml/blob/b56990a5bee2e1bc46839cec5161ff3726dc4d87/ooxml/parse.py#L446-L468
def parse_document(xmlcontent): """Parse document with content. Content is placed in file 'document.xml'. """ document = etree.fromstring(xmlcontent) body = document.xpath('.//w:body', namespaces=NAMESPACES)[0] document = doc.Document() for elem in body: if elem.tag == _name('{{{w}}}p'): document.elements.append(parse_paragraph(document, elem)) if elem.tag == _name('{{{w}}}tbl'): document.elements.append(parse_table(document, elem)) if elem.tag == _name('{{{w}}}sdt'): document.elements.append(doc.TOC()) return document
[ "def", "parse_document", "(", "xmlcontent", ")", ":", "document", "=", "etree", ".", "fromstring", "(", "xmlcontent", ")", "body", "=", "document", ".", "xpath", "(", "'.//w:body'", ",", "namespaces", "=", "NAMESPACES", ")", "[", "0", "]", "document", "=",...
Parse document with content. Content is placed in file 'document.xml'.
[ "Parse", "document", "with", "content", "." ]
python
train
msiemens/PyGitUp
PyGitUp/gitup.py
https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/gitup.py#L555-L576
def check_bundler(self): """ Run the bundler check. """ def get_config(name): return name if self.config('bundler.' + name) else '' from pkg_resources import Requirement, resource_filename relative_path = os.path.join('PyGitUp', 'check-bundler.rb') bundler_script = resource_filename(Requirement.parse('git-up'), relative_path) assert os.path.exists(bundler_script), 'check-bundler.rb doesn\'t ' \ 'exist!' return_value = subprocess.call( ['ruby', bundler_script, get_config('autoinstall'), get_config('local'), get_config('rbenv')] ) if self.testing: assert return_value == 0, 'Errors while executing check-bundler.rb'
[ "def", "check_bundler", "(", "self", ")", ":", "def", "get_config", "(", "name", ")", ":", "return", "name", "if", "self", ".", "config", "(", "'bundler.'", "+", "name", ")", "else", "''", "from", "pkg_resources", "import", "Requirement", ",", "resource_fi...
Run the bundler check.
[ "Run", "the", "bundler", "check", "." ]
python
train
ConsenSys/mythril-classic
mythril/laser/smt/__init__.py
https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/laser/smt/__init__.py#L138-L147
def BitVecFuncVal( value: int, func_name: str, size: int, annotations: Annotations = None, input_: Union[int, "BitVec"] = None, ) -> BitVecFunc: """Creates a new bit vector function with a concrete value.""" raw = z3.BitVecVal(value, size) return BitVecFunc(raw, func_name, input_, annotations)
[ "def", "BitVecFuncVal", "(", "value", ":", "int", ",", "func_name", ":", "str", ",", "size", ":", "int", ",", "annotations", ":", "Annotations", "=", "None", ",", "input_", ":", "Union", "[", "int", ",", "\"BitVec\"", "]", "=", "None", ",", ")", "->"...
Creates a new bit vector function with a concrete value.
[ "Creates", "a", "new", "bit", "vector", "function", "with", "a", "concrete", "value", "." ]
python
train
reillysiemens/layabout
examples/early-connection/example.py
https://github.com/reillysiemens/layabout/blob/a146c47f2558e66bb51cf708d39909b93eaea7f4/examples/early-connection/example.py#L15-L28
def channel_to_id(slack, channel): """ Surely there's a better way to do this... """ channels = slack.api_call('channels.list').get('channels') or [] groups = slack.api_call('groups.list').get('groups') or [] if not channels and not groups: raise RuntimeError("Couldn't get channels and groups.") ids = [c['id'] for c in channels + groups if c['name'] == channel] if not ids: raise ValueError(f"Couldn't find #{channel}") return ids[0]
[ "def", "channel_to_id", "(", "slack", ",", "channel", ")", ":", "channels", "=", "slack", ".", "api_call", "(", "'channels.list'", ")", ".", "get", "(", "'channels'", ")", "or", "[", "]", "groups", "=", "slack", ".", "api_call", "(", "'groups.list'", ")"...
Surely there's a better way to do this...
[ "Surely", "there", "s", "a", "better", "way", "to", "do", "this", "..." ]
python
train
datacamp/antlr-ast
antlr_ast/marshalling.py
https://github.com/datacamp/antlr-ast/blob/d08d5eb2e663bd40501d0eeddc8a731ac7e96b11/antlr_ast/marshalling.py#L27-L38
def decode_ast(registry, ast_json): """JSON decoder for BaseNodes""" if ast_json.get("@type"): subclass = registry.get_cls(ast_json["@type"], tuple(ast_json["@fields"])) return subclass( ast_json["children"], ast_json["field_references"], ast_json["label_references"], position=ast_json["@position"], ) else: return ast_json
[ "def", "decode_ast", "(", "registry", ",", "ast_json", ")", ":", "if", "ast_json", ".", "get", "(", "\"@type\"", ")", ":", "subclass", "=", "registry", ".", "get_cls", "(", "ast_json", "[", "\"@type\"", "]", ",", "tuple", "(", "ast_json", "[", "\"@fields...
JSON decoder for BaseNodes
[ "JSON", "decoder", "for", "BaseNodes" ]
python
train
twilio/twilio-python
twilio/rest/api/v2010/account/token.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/token.py#L35-L52
def create(self, ttl=values.unset): """ Create a new TokenInstance :param unicode ttl: The duration in seconds the credentials are valid :returns: Newly created TokenInstance :rtype: twilio.rest.api.v2010.account.token.TokenInstance """ data = values.of({'Ttl': ttl, }) payload = self._version.create( 'POST', self._uri, data=data, ) return TokenInstance(self._version, payload, account_sid=self._solution['account_sid'], )
[ "def", "create", "(", "self", ",", "ttl", "=", "values", ".", "unset", ")", ":", "data", "=", "values", ".", "of", "(", "{", "'Ttl'", ":", "ttl", ",", "}", ")", "payload", "=", "self", ".", "_version", ".", "create", "(", "'POST'", ",", "self", ...
Create a new TokenInstance :param unicode ttl: The duration in seconds the credentials are valid :returns: Newly created TokenInstance :rtype: twilio.rest.api.v2010.account.token.TokenInstance
[ "Create", "a", "new", "TokenInstance" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_port_profile_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_port_profile_ext.py#L260-L275
def get_port_profile_status_output_port_profile_mac_association_mac(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_port_profile_status = ET.Element("get_port_profile_status") config = get_port_profile_status output = ET.SubElement(get_port_profile_status, "output") port_profile = ET.SubElement(output, "port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') mac_association = ET.SubElement(port_profile, "mac-association") mac = ET.SubElement(mac_association, "mac") mac.text = kwargs.pop('mac') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_port_profile_status_output_port_profile_mac_association_mac", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_port_profile_status", "=", "ET", ".", "Element", "(", "\"get_port_profile_status...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/muppy.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/muppy.py#L136-L154
def get_referents(object, level=1): """Get all referents of an object up to a certain level. The referents will not be returned in a specific order and will not contain duplicate objects. Duplicate objects will be removed. Keyword arguments: level -- level of indirection to which referents considered. This function is recursive. """ res = gc.get_referents(object) level -= 1 if level > 0: for o in res: res.extend(get_referents(o, level)) res = _remove_duplicates(res) return res
[ "def", "get_referents", "(", "object", ",", "level", "=", "1", ")", ":", "res", "=", "gc", ".", "get_referents", "(", "object", ")", "level", "-=", "1", "if", "level", ">", "0", ":", "for", "o", "in", "res", ":", "res", ".", "extend", "(", "get_r...
Get all referents of an object up to a certain level. The referents will not be returned in a specific order and will not contain duplicate objects. Duplicate objects will be removed. Keyword arguments: level -- level of indirection to which referents considered. This function is recursive.
[ "Get", "all", "referents", "of", "an", "object", "up", "to", "a", "certain", "level", "." ]
python
train
adrn/gala
gala/coordinates/greatcircle.py
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/coordinates/greatcircle.py#L34-L70
def reference_to_greatcircle(reference_frame, greatcircle_frame): """Convert a reference coordinate to a great circle frame.""" # Define rotation matrices along the position angle vector, and # relative to the origin. pole = greatcircle_frame.pole.transform_to(coord.ICRS) ra0 = greatcircle_frame.ra0 center = greatcircle_frame.center R_rot = rotation_matrix(greatcircle_frame.rotation, 'z') if not np.isnan(ra0): xaxis = np.array([np.cos(ra0), np.sin(ra0), 0.]) zaxis = pole.cartesian.xyz.value if np.abs(zaxis[2]) >= 1e-15: xaxis[2] = -(zaxis[0]*xaxis[0] + zaxis[1]*xaxis[1]) / zaxis[2] # what? else: xaxis[2] = 0. xaxis = xaxis / np.sqrt(np.sum(xaxis**2)) yaxis = np.cross(zaxis, xaxis) R = np.stack((xaxis, yaxis, zaxis)) elif center is not None: R1 = rotation_matrix(pole.ra, 'z') R2 = rotation_matrix(90*u.deg - pole.dec, 'y') Rtmp = matrix_product(R2, R1) rot = center.cartesian.transform(Rtmp) rot_lon = rot.represent_as(coord.UnitSphericalRepresentation).lon R3 = rotation_matrix(rot_lon, 'z') R = matrix_product(R3, R2, R1) else: R1 = rotation_matrix(pole.ra, 'z') R2 = rotation_matrix(pole.dec, 'y') R = matrix_product(R2, R1) return matrix_product(R_rot, R)
[ "def", "reference_to_greatcircle", "(", "reference_frame", ",", "greatcircle_frame", ")", ":", "# Define rotation matrices along the position angle vector, and", "# relative to the origin.", "pole", "=", "greatcircle_frame", ".", "pole", ".", "transform_to", "(", "coord", ".", ...
Convert a reference coordinate to a great circle frame.
[ "Convert", "a", "reference", "coordinate", "to", "a", "great", "circle", "frame", "." ]
python
train
angr/angr
angr/exploration_techniques/director.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/exploration_techniques/director.py#L434-L456
def _peek_forward(self, simgr): """ Make sure all current basic block on each state shows up in the CFG. For blocks that are not in the CFG, start CFG recovery from them with a maximum basic block depth of 100. :param simgr: :return: """ if self._cfg is None: starts = list(simgr.active) self._cfg_kb = KnowledgeBase(self.project) self._cfg = self.project.analyses.CFGEmulated(kb=self._cfg_kb, starts=starts, max_steps=self._peek_blocks, keep_state=self._cfg_keep_states ) else: starts = list(simgr.active) self._cfg.resume(starts=starts, max_steps=self._peek_blocks)
[ "def", "_peek_forward", "(", "self", ",", "simgr", ")", ":", "if", "self", ".", "_cfg", "is", "None", ":", "starts", "=", "list", "(", "simgr", ".", "active", ")", "self", ".", "_cfg_kb", "=", "KnowledgeBase", "(", "self", ".", "project", ")", "self"...
Make sure all current basic block on each state shows up in the CFG. For blocks that are not in the CFG, start CFG recovery from them with a maximum basic block depth of 100. :param simgr: :return:
[ "Make", "sure", "all", "current", "basic", "block", "on", "each", "state", "shows", "up", "in", "the", "CFG", ".", "For", "blocks", "that", "are", "not", "in", "the", "CFG", "start", "CFG", "recovery", "from", "them", "with", "a", "maximum", "basic", "...
python
train
eventbrite/eventbrite-sdk-python
eventbrite/access_methods.py
https://github.com/eventbrite/eventbrite-sdk-python/blob/f2e5dc5aa1aa3e45766de13f16fd65722163d91a/eventbrite/access_methods.py#L475-L484
def post_series_cancel(self, id, **data): """ POST /series/:id/cancel/ Cancels a repeating event series and all of its occurrences that are not already canceled or deleted. In order for cancel to be permitted, there must be no pending or completed orders for any dates in the series. Returns a boolean indicating success or failure of the cancel. .. _delete-series-by-id: """ return self.post("/series/{0}/cancel/".format(id), data=data)
[ "def", "post_series_cancel", "(", "self", ",", "id", ",", "*", "*", "data", ")", ":", "return", "self", ".", "post", "(", "\"/series/{0}/cancel/\"", ".", "format", "(", "id", ")", ",", "data", "=", "data", ")" ]
POST /series/:id/cancel/ Cancels a repeating event series and all of its occurrences that are not already canceled or deleted. In order for cancel to be permitted, there must be no pending or completed orders for any dates in the series. Returns a boolean indicating success or failure of the cancel. .. _delete-series-by-id:
[ "POST", "/", "series", "/", ":", "id", "/", "cancel", "/", "Cancels", "a", "repeating", "event", "series", "and", "all", "of", "its", "occurrences", "that", "are", "not", "already", "canceled", "or", "deleted", ".", "In", "order", "for", "cancel", "to", ...
python
train
mlavin/argyle
argyle/system.py
https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/system.py#L91-L107
def add_sources_from_file(file_name, update=True): """ Add source urls from a file list. The file should contain the source line to add followed by the key url, if any, enclosed in parentheses. Ex: deb http://example.com/deb lucid main (http://example.com/key) """ key_regex = re.compile(r'(?P<source>[^()]*)(\s+\((?P<key>.*)\))?$') for line in _read_lines_from_file(file_name): kwargs = key_regex.match(line).groupdict() kwargs['update'] = False add_apt_source(**kwargs) if update: update_apt_sources()
[ "def", "add_sources_from_file", "(", "file_name", ",", "update", "=", "True", ")", ":", "key_regex", "=", "re", ".", "compile", "(", "r'(?P<source>[^()]*)(\\s+\\((?P<key>.*)\\))?$'", ")", "for", "line", "in", "_read_lines_from_file", "(", "file_name", ")", ":", "k...
Add source urls from a file list. The file should contain the source line to add followed by the key url, if any, enclosed in parentheses. Ex: deb http://example.com/deb lucid main (http://example.com/key)
[ "Add", "source", "urls", "from", "a", "file", "list", ".", "The", "file", "should", "contain", "the", "source", "line", "to", "add", "followed", "by", "the", "key", "url", "if", "any", "enclosed", "in", "parentheses", "." ]
python
train
greenelab/PathCORE-T
pathcore/network.py
https://github.com/greenelab/PathCORE-T/blob/9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c/pathcore/network.py#L249-L276
def aggregate(self, merge): """Combine this network with another network. The aggregation step takes the union of the edges in the two networks, where we take the sum of weights for edges common to both networks. Parameters ----------- merge : CoNetwork the CoNetwork object being merged into the current network. """ self.features = set() self.n_features += merge.n_features vertex_id_conversion = self.convert_pathway_mapping(merge.pathways) for edge_id, edge in merge.edges.items(): edge_key = self.remapped_edge( vertex_id_conversion, edge_id) if edge_key in self.edges: if self.edges[edge_key].which_features: self.edges[edge_key].which_features = [] self.edges[edge_key].weight += edge.weight else: vertex0_id, vertex1_id = edge_key new_edge_obj = Edge(vertex0_id, vertex1_id, []) new_edge_obj.weight = edge.weight self.edges[edge_key] = new_edge_obj self._add_edge_to_vertex(vertex0_id, new_edge_obj) self._add_edge_to_vertex(vertex1_id, new_edge_obj)
[ "def", "aggregate", "(", "self", ",", "merge", ")", ":", "self", ".", "features", "=", "set", "(", ")", "self", ".", "n_features", "+=", "merge", ".", "n_features", "vertex_id_conversion", "=", "self", ".", "convert_pathway_mapping", "(", "merge", ".", "pa...
Combine this network with another network. The aggregation step takes the union of the edges in the two networks, where we take the sum of weights for edges common to both networks. Parameters ----------- merge : CoNetwork the CoNetwork object being merged into the current network.
[ "Combine", "this", "network", "with", "another", "network", ".", "The", "aggregation", "step", "takes", "the", "union", "of", "the", "edges", "in", "the", "two", "networks", "where", "we", "take", "the", "sum", "of", "weights", "for", "edges", "common", "t...
python
train
Fantomas42/django-blog-zinnia
zinnia/moderator.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/moderator.py#L68-L92
def do_email_notification(self, comment, entry, site): """ Send email notification of a new comment to site staff. """ if not self.mail_comment_notification_recipients: return template = loader.get_template( 'comments/zinnia/entry/email/notification.txt') context = { 'comment': comment, 'entry': entry, 'site': site, 'protocol': PROTOCOL } subject = _('[%(site)s] New comment posted on "%(title)s"') % \ {'site': site.name, 'title': entry.title} message = template.render(context) send_mail( subject, message, settings.DEFAULT_FROM_EMAIL, self.mail_comment_notification_recipients, fail_silently=not settings.DEBUG )
[ "def", "do_email_notification", "(", "self", ",", "comment", ",", "entry", ",", "site", ")", ":", "if", "not", "self", ".", "mail_comment_notification_recipients", ":", "return", "template", "=", "loader", ".", "get_template", "(", "'comments/zinnia/entry/email/noti...
Send email notification of a new comment to site staff.
[ "Send", "email", "notification", "of", "a", "new", "comment", "to", "site", "staff", "." ]
python
train
gtaylor/python-colormath
colormath/color_diff_matrix.py
https://github.com/gtaylor/python-colormath/blob/1d168613718d2d7d31ec4230524e987ef66823c7/colormath/color_diff_matrix.py#L21-L56
def delta_e_cie1994(lab_color_vector, lab_color_matrix, K_L=1, K_C=1, K_H=1, K_1=0.045, K_2=0.015): """ Calculates the Delta E (CIE1994) of two colors. K_l: 0.045 graphic arts 0.048 textiles K_2: 0.015 graphic arts 0.014 textiles K_L: 1 default 2 textiles """ C_1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2))) C_2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1)) delta_lab = lab_color_vector - lab_color_matrix delta_L = delta_lab[:, 0].copy() delta_C = C_1 - C_2 delta_lab[:, 0] = delta_C delta_H_sq = numpy.sum(numpy.power(delta_lab, 2) * numpy.array([-1, 1, 1]), axis=1) # noinspection PyArgumentList delta_H = numpy.sqrt(delta_H_sq.clip(min=0)) S_L = 1 S_C = 1 + K_1 * C_1 S_H = 1 + K_2 * C_1 LCH = numpy.vstack([delta_L, delta_C, delta_H]) params = numpy.array([[K_L * S_L], [K_C * S_C], [K_H * S_H]]) return numpy.sqrt(numpy.sum(numpy.power(LCH / params, 2), axis=0))
[ "def", "delta_e_cie1994", "(", "lab_color_vector", ",", "lab_color_matrix", ",", "K_L", "=", "1", ",", "K_C", "=", "1", ",", "K_H", "=", "1", ",", "K_1", "=", "0.045", ",", "K_2", "=", "0.015", ")", ":", "C_1", "=", "numpy", ".", "sqrt", "(", "nump...
Calculates the Delta E (CIE1994) of two colors. K_l: 0.045 graphic arts 0.048 textiles K_2: 0.015 graphic arts 0.014 textiles K_L: 1 default 2 textiles
[ "Calculates", "the", "Delta", "E", "(", "CIE1994", ")", "of", "two", "colors", "." ]
python
train
rueckstiess/mtools
mtools/mplotqueries/plottypes/histogram_type.py
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/histogram_type.py#L153-L159
def clicked(self, event): """Print group name and number of items in bin.""" group = event.artist._mt_group n = event.artist._mt_n dt = num2date(event.artist._mt_bin) print("%4i %s events in %s sec beginning at %s" % (n, group, self.bucketsize, dt.strftime("%b %d %H:%M:%S")))
[ "def", "clicked", "(", "self", ",", "event", ")", ":", "group", "=", "event", ".", "artist", ".", "_mt_group", "n", "=", "event", ".", "artist", ".", "_mt_n", "dt", "=", "num2date", "(", "event", ".", "artist", ".", "_mt_bin", ")", "print", "(", "\...
Print group name and number of items in bin.
[ "Print", "group", "name", "and", "number", "of", "items", "in", "bin", "." ]
python
train
RedFantom/ttkwidgets
ttkwidgets/table.py
https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/table.py#L377-L411
def column(self, column, option=None, **kw): """ Query or modify the options for the specified column. If `kw` is not given, returns a dict of the column option values. If `option` is specified then the value for that option is returned. Otherwise, sets the options to the corresponding values. :param id: the column's identifier (read-only option) :param anchor: "n", "ne", "e", "se", "s", "sw", "w", "nw", or "center": alignment of the text in this column with respect to the cell :param minwidth: minimum width of the column in pixels :type minwidth: int :param stretch: whether the column's width should be adjusted when the widget is resized :type stretch: bool :param width: width of the column in pixels :type width: int :param type: column's content type (for sorting), default type is `str` :type type: type """ config = False if option == 'type': return self._column_types[column] elif 'type' in kw: config = True self._column_types[column] = kw.pop('type') if kw: self._visual_drag.column(ttk.Treeview.column(self, column, 'id'), option, **kw) if kw or option: return ttk.Treeview.column(self, column, option, **kw) elif not config: res = ttk.Treeview.column(self, column, option, **kw) res['type'] = self._column_types[column] return res
[ "def", "column", "(", "self", ",", "column", ",", "option", "=", "None", ",", "*", "*", "kw", ")", ":", "config", "=", "False", "if", "option", "==", "'type'", ":", "return", "self", ".", "_column_types", "[", "column", "]", "elif", "'type'", "in", ...
Query or modify the options for the specified column. If `kw` is not given, returns a dict of the column option values. If `option` is specified then the value for that option is returned. Otherwise, sets the options to the corresponding values. :param id: the column's identifier (read-only option) :param anchor: "n", "ne", "e", "se", "s", "sw", "w", "nw", or "center": alignment of the text in this column with respect to the cell :param minwidth: minimum width of the column in pixels :type minwidth: int :param stretch: whether the column's width should be adjusted when the widget is resized :type stretch: bool :param width: width of the column in pixels :type width: int :param type: column's content type (for sorting), default type is `str` :type type: type
[ "Query", "or", "modify", "the", "options", "for", "the", "specified", "column", "." ]
python
train
PyCQA/astroid
astroid/node_classes.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/node_classes.py#L1616-L1634
def is_argument(self, name): """Check if the given name is defined in the arguments. :param name: The name to check for. :type name: str :returns: True if the given name is defined in the arguments, False otherwise. :rtype: bool """ if name == self.vararg: return True if name == self.kwarg: return True return ( self.find_argname(name, True)[1] is not None or self.kwonlyargs and _find_arg(name, self.kwonlyargs, True)[1] is not None )
[ "def", "is_argument", "(", "self", ",", "name", ")", ":", "if", "name", "==", "self", ".", "vararg", ":", "return", "True", "if", "name", "==", "self", ".", "kwarg", ":", "return", "True", "return", "(", "self", ".", "find_argname", "(", "name", ",",...
Check if the given name is defined in the arguments. :param name: The name to check for. :type name: str :returns: True if the given name is defined in the arguments, False otherwise. :rtype: bool
[ "Check", "if", "the", "given", "name", "is", "defined", "in", "the", "arguments", "." ]
python
train
datadotworld/data.world-py
datadotworld/client/api.py
https://github.com/datadotworld/data.world-py/blob/ffaeb115f358731ab0b805b0c43b7ff2e3cf0a77/datadotworld/client/api.py#L834-L891
def create_project(self, owner_id, **kwargs): """Create a new project :param owner_id: Username of the creator of a project. :type owner_id: str :param title: Project title (will be used to generate project id on creation) :type title: str :param objective: Short project objective. :type objective: str, optional :param summary: Long-form project summary. :type summary: str, optional :param tags: Project tags. Letters numbers and spaces :type tags: list, optional :param license: Project license :type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY', 'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'} :param visibility: Project visibility :type visibility: {'OPEN', 'PRIVATE'} :param files: File name as dict, source URLs, description and labels() as properties :type files: dict, optional *Description and labels are optional* :param linked_datasets: Initial set of linked datasets. :type linked_datasets: list of object, optional :returns: Newly created project key :rtype: str :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.create_project( ... 'username', title='project testing', ... visibility='PRIVATE', ... linked_datasets=[{'owner': 'someuser', ... 'id': 'somedataset'}]) # doctest: +SKIP """ request = self.__build_project_obj( lambda: _swagger.ProjectCreateRequest( title=kwargs.get('title'), visibility=kwargs.get('visibility') ), lambda name, url, description, labels: _swagger.FileCreateRequest( name=name, source=_swagger.FileSourceCreateRequest(url=url), description=description, labels=labels), kwargs) try: (_, _, headers) = self._projects_api.create_project_with_http_info( owner_id, body=request, _return_http_data_only=False) if 'Location' in headers: return headers['Location'] except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
[ "def", "create_project", "(", "self", ",", "owner_id", ",", "*", "*", "kwargs", ")", ":", "request", "=", "self", ".", "__build_project_obj", "(", "lambda", ":", "_swagger", ".", "ProjectCreateRequest", "(", "title", "=", "kwargs", ".", "get", "(", "'title...
Create a new project :param owner_id: Username of the creator of a project. :type owner_id: str :param title: Project title (will be used to generate project id on creation) :type title: str :param objective: Short project objective. :type objective: str, optional :param summary: Long-form project summary. :type summary: str, optional :param tags: Project tags. Letters numbers and spaces :type tags: list, optional :param license: Project license :type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY', 'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'} :param visibility: Project visibility :type visibility: {'OPEN', 'PRIVATE'} :param files: File name as dict, source URLs, description and labels() as properties :type files: dict, optional *Description and labels are optional* :param linked_datasets: Initial set of linked datasets. :type linked_datasets: list of object, optional :returns: Newly created project key :rtype: str :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.create_project( ... 'username', title='project testing', ... visibility='PRIVATE', ... linked_datasets=[{'owner': 'someuser', ... 'id': 'somedataset'}]) # doctest: +SKIP
[ "Create", "a", "new", "project" ]
python
train
mandiant/ioc_writer
ioc_writer/ioc_api.py
https://github.com/mandiant/ioc_writer/blob/712247f3a10bdc2584fa18ac909fc763f71df21a/ioc_writer/ioc_api.py#L227-L251
def add_parameter(self, indicator_id, content, name='comment', ptype='string'): """ Add a a parameter to the IOC. :param indicator_id: The unique Indicator/IndicatorItem id the parameter is associated with. :param content: The value of the parameter. :param name: The name of the parameter. :param ptype: The type of the parameter content. :return: True :raises: IOCParseError if the indicator_id is not associated with a Indicator or IndicatorItem in the IOC. """ parameters_node = self.parameters criteria_node = self.top_level_indicator.getparent() # first check for duplicate id,name pairs elems = parameters_node.xpath('.//param[@ref-id="{}" and @name="{}"]'.format(indicator_id, name)) if len(elems) > 0: # there is no actual restriction on duplicate parameters log.info('Duplicate (id,name) parameter pair will be inserted [{}][{}].'.format(indicator_id, name)) # now check to make sure the id is present in the IOC logic elems = criteria_node.xpath( './/IndicatorItem[@id="{}"]|.//Indicator[@id="{}"]'.format(indicator_id, indicator_id)) if len(elems) == 0: raise IOCParseError('ID does not exist in the IOC [{}][{}].'.format(str(indicator_id), str(content))) parameters_node.append(ioc_et.make_param_node(indicator_id, content, name, ptype)) return True
[ "def", "add_parameter", "(", "self", ",", "indicator_id", ",", "content", ",", "name", "=", "'comment'", ",", "ptype", "=", "'string'", ")", ":", "parameters_node", "=", "self", ".", "parameters", "criteria_node", "=", "self", ".", "top_level_indicator", ".", ...
Add a a parameter to the IOC. :param indicator_id: The unique Indicator/IndicatorItem id the parameter is associated with. :param content: The value of the parameter. :param name: The name of the parameter. :param ptype: The type of the parameter content. :return: True :raises: IOCParseError if the indicator_id is not associated with a Indicator or IndicatorItem in the IOC.
[ "Add", "a", "a", "parameter", "to", "the", "IOC", "." ]
python
train
kmadac/bitstamp-python-client
bitstamp/client.py
https://github.com/kmadac/bitstamp-python-client/blob/35b9a61f3892cc281de89963d210f7bd5757c717/bitstamp/client.py#L248-L269
def user_transactions(self, offset=0, limit=100, descending=True, base=None, quote=None): """ Returns descending list of transactions. Every transaction (dictionary) contains:: {u'usd': u'-39.25', u'datetime': u'2013-03-26 18:49:13', u'fee': u'0.20', u'btc': u'0.50000000', u'type': 2, u'id': 213642} Instead of the keys btc and usd, it can contain other currency codes """ data = { 'offset': offset, 'limit': limit, 'sort': 'desc' if descending else 'asc', } url = self._construct_url("user_transactions/", base, quote) return self._post(url, data=data, return_json=True, version=2)
[ "def", "user_transactions", "(", "self", ",", "offset", "=", "0", ",", "limit", "=", "100", ",", "descending", "=", "True", ",", "base", "=", "None", ",", "quote", "=", "None", ")", ":", "data", "=", "{", "'offset'", ":", "offset", ",", "'limit'", ...
Returns descending list of transactions. Every transaction (dictionary) contains:: {u'usd': u'-39.25', u'datetime': u'2013-03-26 18:49:13', u'fee': u'0.20', u'btc': u'0.50000000', u'type': 2, u'id': 213642} Instead of the keys btc and usd, it can contain other currency codes
[ "Returns", "descending", "list", "of", "transactions", ".", "Every", "transaction", "(", "dictionary", ")", "contains", "::" ]
python
train
spyder-ide/spyder
spyder/app/mainwindow.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/app/mainwindow.py#L2741-L2755
def open_file(self, fname, external=False): """ Open filename with the appropriate application Redirect to the right widget (txt -> editor, spydata -> workspace, ...) or open file outside Spyder (if extension is not supported) """ fname = to_text_string(fname) ext = osp.splitext(fname)[1] if encoding.is_text_file(fname): self.editor.load(fname) elif self.variableexplorer is not None and ext in IMPORT_EXT: self.variableexplorer.import_data(fname) elif not external: fname = file_uri(fname) programs.start_file(fname)
[ "def", "open_file", "(", "self", ",", "fname", ",", "external", "=", "False", ")", ":", "fname", "=", "to_text_string", "(", "fname", ")", "ext", "=", "osp", ".", "splitext", "(", "fname", ")", "[", "1", "]", "if", "encoding", ".", "is_text_file", "(...
Open filename with the appropriate application Redirect to the right widget (txt -> editor, spydata -> workspace, ...) or open file outside Spyder (if extension is not supported)
[ "Open", "filename", "with", "the", "appropriate", "application", "Redirect", "to", "the", "right", "widget", "(", "txt", "-", ">", "editor", "spydata", "-", ">", "workspace", "...", ")", "or", "open", "file", "outside", "Spyder", "(", "if", "extension", "i...
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Environment.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Environment.py#L597-L604
def AddMethod(self, function, name=None): """ Adds the specified function as a method of this construction environment with the specified name. If the name is omitted, the default name is the name of the function itself. """ method = MethodWrapper(self, function, name) self.added_methods.append(method)
[ "def", "AddMethod", "(", "self", ",", "function", ",", "name", "=", "None", ")", ":", "method", "=", "MethodWrapper", "(", "self", ",", "function", ",", "name", ")", "self", ".", "added_methods", ".", "append", "(", "method", ")" ]
Adds the specified function as a method of this construction environment with the specified name. If the name is omitted, the default name is the name of the function itself.
[ "Adds", "the", "specified", "function", "as", "a", "method", "of", "this", "construction", "environment", "with", "the", "specified", "name", ".", "If", "the", "name", "is", "omitted", "the", "default", "name", "is", "the", "name", "of", "the", "function", ...
python
train
log2timeline/dfvfs
dfvfs/file_io/file_io.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/file_io.py#L55-L84
def open(self, path_spec=None, mode='rb'): """Opens the file-like object defined by path specification. Args: path_spec (Optional[PathSpec]): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object was already opened or the open failed. OSError: if the file-like object was already opened or the open failed. PathSpecError: if the path specification is incorrect. ValueError: if the path specification or mode is invalid. """ if self._is_open and not self._is_cached: raise IOError('Already open.') if mode != 'rb': raise ValueError('Unsupported mode: {0:s}.'.format(mode)) if not self._is_open: self._Open(path_spec=path_spec, mode=mode) self._is_open = True if path_spec and not self._resolver_context.GetFileObject(path_spec): self._resolver_context.CacheFileObject(path_spec, self) self._is_cached = True if self._is_cached: self._resolver_context.GrabFileObject(path_spec)
[ "def", "open", "(", "self", ",", "path_spec", "=", "None", ",", "mode", "=", "'rb'", ")", ":", "if", "self", ".", "_is_open", "and", "not", "self", ".", "_is_cached", ":", "raise", "IOError", "(", "'Already open.'", ")", "if", "mode", "!=", "'rb'", "...
Opens the file-like object defined by path specification. Args: path_spec (Optional[PathSpec]): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object was already opened or the open failed. OSError: if the file-like object was already opened or the open failed. PathSpecError: if the path specification is incorrect. ValueError: if the path specification or mode is invalid.
[ "Opens", "the", "file", "-", "like", "object", "defined", "by", "path", "specification", "." ]
python
train
saltstack/salt
salt/modules/yumpkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/yumpkg.py#L3252-L3286
def complete_transaction(cleanup_only=False, recursive=False, max_attempts=3): ''' .. versionadded:: Fluorine Execute ``yum-complete-transaction``, which is provided by the ``yum-utils`` package. cleanup_only Specify if the ``--cleanup-only`` option should be supplied. recursive Specify if ``yum-complete-transaction`` should be called recursively (it only completes one transaction at a time). max_attempts If ``recursive`` is ``True``, the maximum times ``yum-complete-transaction`` should be called. .. note:: Recursive calls will stop once ``No unfinished transactions left.`` is in the returned output. .. note:: ``yum-utils`` will already be installed on the minion if the package was installed from the Fedora / EPEL repositories. CLI example: .. code-block:: bash salt '*' pkg.complete_transaction salt '*' pkg.complete_transaction cleanup_only=True salt '*' pkg.complete_transaction recursive=True max_attempts=5 ''' return _complete_transaction(cleanup_only, recursive, max_attempts, 1, [])
[ "def", "complete_transaction", "(", "cleanup_only", "=", "False", ",", "recursive", "=", "False", ",", "max_attempts", "=", "3", ")", ":", "return", "_complete_transaction", "(", "cleanup_only", ",", "recursive", ",", "max_attempts", ",", "1", ",", "[", "]", ...
.. versionadded:: Fluorine Execute ``yum-complete-transaction``, which is provided by the ``yum-utils`` package. cleanup_only Specify if the ``--cleanup-only`` option should be supplied. recursive Specify if ``yum-complete-transaction`` should be called recursively (it only completes one transaction at a time). max_attempts If ``recursive`` is ``True``, the maximum times ``yum-complete-transaction`` should be called. .. note:: Recursive calls will stop once ``No unfinished transactions left.`` is in the returned output. .. note:: ``yum-utils`` will already be installed on the minion if the package was installed from the Fedora / EPEL repositories. CLI example: .. code-block:: bash salt '*' pkg.complete_transaction salt '*' pkg.complete_transaction cleanup_only=True salt '*' pkg.complete_transaction recursive=True max_attempts=5
[ "..", "versionadded", "::", "Fluorine" ]
python
train
alpacahq/alpaca-trade-api-python
alpaca_trade_api/rest.py
https://github.com/alpacahq/alpaca-trade-api-python/blob/9c9dea3b4a37c909f88391b202e86ff356a8b4d7/alpaca_trade_api/rest.py#L220-L227
def list_assets(self, status=None, asset_class=None): '''Get a list of assets''' params = { 'status': status, 'assert_class': asset_class, } resp = self.get('/assets', params) return [Asset(o) for o in resp]
[ "def", "list_assets", "(", "self", ",", "status", "=", "None", ",", "asset_class", "=", "None", ")", ":", "params", "=", "{", "'status'", ":", "status", ",", "'assert_class'", ":", "asset_class", ",", "}", "resp", "=", "self", ".", "get", "(", "'/asset...
Get a list of assets
[ "Get", "a", "list", "of", "assets" ]
python
test
jaraco/svg.charts
svg/charts/graph.py
https://github.com/jaraco/svg.charts/blob/23053497b3f1af4e760f355050107ae3bc05909d/svg/charts/graph.py#L470-L480
def draw_y_guidelines(self, label_height, count): "Draw the Y-axis guidelines" if not self.show_y_guidelines: return for count in range(1, count): move = 'M 0 {start} h{stop}'.format( start=self.graph_height - label_height * count, stop=self.graph_width, ) path = {'d': move, 'class': 'guideLines'} etree.SubElement(self.graph, 'path', path)
[ "def", "draw_y_guidelines", "(", "self", ",", "label_height", ",", "count", ")", ":", "if", "not", "self", ".", "show_y_guidelines", ":", "return", "for", "count", "in", "range", "(", "1", ",", "count", ")", ":", "move", "=", "'M 0 {start} h{stop}'", ".", ...
Draw the Y-axis guidelines
[ "Draw", "the", "Y", "-", "axis", "guidelines" ]
python
test
DiamondLightSource/python-workflows
workflows/services/common_service.py
https://github.com/DiamondLightSource/python-workflows/blob/7ef47b457655b96f4d2ef7ee9863cf1b6d20e023/workflows/services/common_service.py#L435-L456
def process_uncaught_exception(self, e): """This is called to handle otherwise uncaught exceptions from the service. The service will terminate either way, but here we can do things such as gathering useful environment information and logging for posterity.""" # Add information about the actual exception to the log message # This includes the file, line and piece of code causing the exception. # exc_info=True adds the full stack trace to the log message. exc_file_fullpath, exc_file, exc_lineno, exc_func, exc_line = ( workflows.logging.get_exception_source() ) added_information = { "workflows_exc_lineno": exc_lineno, "workflows_exc_funcName": exc_func, "workflows_exc_line": exc_line, "workflows_exc_pathname": exc_file_fullpath, "workflows_exc_filename": exc_file, } for field in filter(lambda x: x.startswith("workflows_log_"), dir(e)): added_information[field[14:]] = getattr(e, field, None) self.log.critical( "Unhandled service exception: %s", e, exc_info=True, extra=added_information )
[ "def", "process_uncaught_exception", "(", "self", ",", "e", ")", ":", "# Add information about the actual exception to the log message", "# This includes the file, line and piece of code causing the exception.", "# exc_info=True adds the full stack trace to the log message.", "exc_file_fullpat...
This is called to handle otherwise uncaught exceptions from the service. The service will terminate either way, but here we can do things such as gathering useful environment information and logging for posterity.
[ "This", "is", "called", "to", "handle", "otherwise", "uncaught", "exceptions", "from", "the", "service", ".", "The", "service", "will", "terminate", "either", "way", "but", "here", "we", "can", "do", "things", "such", "as", "gathering", "useful", "environment"...
python
train
portfors-lab/sparkle
sparkle/gui/plotting/pyqtgraph_widgets.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/plotting/pyqtgraph_widgets.py#L483-L496
def editColormap(self): """Prompts the user with a dialog to change colormap""" self.editor = pg.ImageView() # remove the ROI and Norm buttons self.editor.ui.roiBtn.setVisible(False) self.editor.ui.menuBtn.setVisible(False) self.editor.setImage(self.imageArray) if self.imgArgs['state'] is not None: self.editor.getHistogramWidget().item.gradient.restoreState(self.imgArgs['state']) self.editor.getHistogramWidget().item.setLevels(*self.imgArgs['levels']) self.editor.closeEvent = self._editor_close self.editor.setWindowModality(QtCore.Qt.ApplicationModal) self.editor.show()
[ "def", "editColormap", "(", "self", ")", ":", "self", ".", "editor", "=", "pg", ".", "ImageView", "(", ")", "# remove the ROI and Norm buttons", "self", ".", "editor", ".", "ui", ".", "roiBtn", ".", "setVisible", "(", "False", ")", "self", ".", "editor", ...
Prompts the user with a dialog to change colormap
[ "Prompts", "the", "user", "with", "a", "dialog", "to", "change", "colormap" ]
python
train
pytroll/pyspectral
pyspectral/rayleigh.py
https://github.com/pytroll/pyspectral/blob/fd296c0e0bdf5364fa180134a1292665d6bc50a3/pyspectral/rayleigh.py#L195-L208
def get_reflectance_lut(self): """Read the LUT with reflectances as a function of wavelength, satellite zenith secant, azimuth difference angle, and sun zenith secant """ if self._rayl is None: lut_vars = get_reflectance_lut(self.reflectance_lut_filename) self._rayl = lut_vars[0] self._wvl_coord = lut_vars[1] self._azid_coord = lut_vars[2] self._satz_sec_coord = lut_vars[3] self._sunz_sec_coord = lut_vars[4] return self._rayl, self._wvl_coord, self._azid_coord,\ self._satz_sec_coord, self._sunz_sec_coord
[ "def", "get_reflectance_lut", "(", "self", ")", ":", "if", "self", ".", "_rayl", "is", "None", ":", "lut_vars", "=", "get_reflectance_lut", "(", "self", ".", "reflectance_lut_filename", ")", "self", ".", "_rayl", "=", "lut_vars", "[", "0", "]", "self", "."...
Read the LUT with reflectances as a function of wavelength, satellite zenith secant, azimuth difference angle, and sun zenith secant
[ "Read", "the", "LUT", "with", "reflectances", "as", "a", "function", "of", "wavelength", "satellite", "zenith", "secant", "azimuth", "difference", "angle", "and", "sun", "zenith", "secant" ]
python
train
acsone/bobtemplates.odoo
bobtemplates/odoo/hooks.py
https://github.com/acsone/bobtemplates.odoo/blob/6e8c3cb12747d8b5af5a9821f995f285251e4d4d/bobtemplates/odoo/hooks.py#L58-L68
def _insert_manifest_item(configurator, key, item): """ Insert an item in the list of an existing manifest key """ with _open_manifest(configurator) as f: manifest = f.read() if item in ast.literal_eval(manifest).get(key, []): return pattern = """(["']{}["']:\\s*\\[)""".format(key) repl = """\\1\n '{}',""".format(item) manifest = re.sub(pattern, repl, manifest, re.MULTILINE) with _open_manifest(configurator, "w") as f: f.write(manifest)
[ "def", "_insert_manifest_item", "(", "configurator", ",", "key", ",", "item", ")", ":", "with", "_open_manifest", "(", "configurator", ")", "as", "f", ":", "manifest", "=", "f", ".", "read", "(", ")", "if", "item", "in", "ast", ".", "literal_eval", "(", ...
Insert an item in the list of an existing manifest key
[ "Insert", "an", "item", "in", "the", "list", "of", "an", "existing", "manifest", "key" ]
python
train
pyviz/holoviews
holoviews/plotting/mpl/util.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/mpl/util.py#L35-L47
def is_color(color): """ Checks if supplied object is a valid color spec. """ if not isinstance(color, basestring): return False elif RGB_HEX_REGEX.match(color): return True elif color in COLOR_ALIASES: return True elif color in cnames: return True return False
[ "def", "is_color", "(", "color", ")", ":", "if", "not", "isinstance", "(", "color", ",", "basestring", ")", ":", "return", "False", "elif", "RGB_HEX_REGEX", ".", "match", "(", "color", ")", ":", "return", "True", "elif", "color", "in", "COLOR_ALIASES", "...
Checks if supplied object is a valid color spec.
[ "Checks", "if", "supplied", "object", "is", "a", "valid", "color", "spec", "." ]
python
train
saltstack/salt
salt/template.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/template.py#L153-L186
def template_shebang(template, renderers, default, blacklist, whitelist, input_data): ''' Check the template shebang line and return the list of renderers specified in the pipe. Example shebang lines:: #!yaml_jinja #!yaml_mako #!mako|yaml #!jinja|yaml #!jinja|mako|yaml #!mako|yaml|stateconf #!jinja|yaml|stateconf #!mako|yaml_odict #!mako|yaml_odict|stateconf ''' line = '' # Open up the first line of the sls template if template == ':string:': line = input_data.split()[0] else: with salt.utils.files.fopen(template, 'r') as ifile: line = salt.utils.stringutils.to_unicode(ifile.readline()) # Check if it starts with a shebang and not a path if line.startswith('#!') and not line.startswith('#!/'): # pull out the shebang data # If the shebang does not contain recognized/not-blacklisted/whitelisted # renderers, do not fall back to the default renderer return check_render_pipe_str(line.strip()[2:], renderers, blacklist, whitelist) else: return check_render_pipe_str(default, renderers, blacklist, whitelist)
[ "def", "template_shebang", "(", "template", ",", "renderers", ",", "default", ",", "blacklist", ",", "whitelist", ",", "input_data", ")", ":", "line", "=", "''", "# Open up the first line of the sls template", "if", "template", "==", "':string:'", ":", "line", "="...
Check the template shebang line and return the list of renderers specified in the pipe. Example shebang lines:: #!yaml_jinja #!yaml_mako #!mako|yaml #!jinja|yaml #!jinja|mako|yaml #!mako|yaml|stateconf #!jinja|yaml|stateconf #!mako|yaml_odict #!mako|yaml_odict|stateconf
[ "Check", "the", "template", "shebang", "line", "and", "return", "the", "list", "of", "renderers", "specified", "in", "the", "pipe", "." ]
python
train
casastorta/python-sar
sar/parser.py
https://github.com/casastorta/python-sar/blob/e6d8bb86524102d677f37e985302fad34e3297c1/sar/parser.py#L216-L272
def _parse_file(self, sar_parts): """ Parses splitted file to get proper information from split parts. :param sar_parts: Array of SAR file parts :return: ``Dictionary``-style info (but still non-parsed) \ from SAR file, split into sections we want to check """ usage = {} output = {} # If sar_parts is a list if type(sar_parts) is list: restart_pattern = re.compile(PATTERN_RESTART) """ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! """ """ ********** ATTENTION ******* """ """ THERE CAN BE MORE THAN ONE SAME SECTION IN ONE FILE """ """ IF SYSTEM WAS REBOOTED DURING THE DAY """ """ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! """ for PATTERNSNAME in ALL_PATTERNS: patterns = ALL_PATTERNS[PATTERNSNAME] rgxpattern = re.compile(patterns['PATTERN']) for part in sar_parts: if rgxpattern.search(part): if PATTERNSNAME in usage: usage[PATTERNSNAME] += '\n' + part else: usage[PATTERNSNAME] = part try: first_line = part.split('\n')[0] except IndexError: first_line = part self.__fields[PATTERNSNAME] = self.__find_column(patterns['FIELDS'], first_line) # Try to match restart time if restart_pattern.search(part): pieces = part.split() self.__restart_times.append(pieces[0]) del pieces del sar_parts # Now we have parts pulled out and combined, do further # processing. for PATTERNSNAME in ALL_PATTERNS: patterns = ALL_PATTERNS[PATTERNSNAME] output[PATTERNSNAME] = self.__split_info(usage[PATTERNSNAME], PATTERNSNAME, patterns) del usage return output return output
[ "def", "_parse_file", "(", "self", ",", "sar_parts", ")", ":", "usage", "=", "{", "}", "output", "=", "{", "}", "# If sar_parts is a list", "if", "type", "(", "sar_parts", ")", "is", "list", ":", "restart_pattern", "=", "re", ".", "compile", "(", "PATTER...
Parses splitted file to get proper information from split parts. :param sar_parts: Array of SAR file parts :return: ``Dictionary``-style info (but still non-parsed) \ from SAR file, split into sections we want to check
[ "Parses", "splitted", "file", "to", "get", "proper", "information", "from", "split", "parts", ".", ":", "param", "sar_parts", ":", "Array", "of", "SAR", "file", "parts", ":", "return", ":", "Dictionary", "-", "style", "info", "(", "but", "still", "non", ...
python
train
sods/ods
pods/datasets.py
https://github.com/sods/ods/blob/3995c659f25a0a640f6009ed7fcc2559ce659b1d/pods/datasets.py#L519-L540
def epomeo_gpx(data_set='epomeo_gpx', sample_every=4): """Data set of three GPS traces of the same movement on Mt Epomeo in Ischia. Requires gpxpy to run.""" import gpxpy import gpxpy.gpx if not data_available(data_set): download_data(data_set) files = ['endomondo_1', 'endomondo_2', 'garmin_watch_via_endomondo','viewranger_phone', 'viewranger_tablet'] X = [] for file in files: gpx_file = open(os.path.join(data_path, 'epomeo_gpx', file + '.gpx'), 'r') gpx = gpxpy.parse(gpx_file) segment = gpx.tracks[0].segments[0] points = [point for track in gpx.tracks for segment in track.segments for point in segment.points] data = [[(point.time-datetime.datetime(2013,8,21)).total_seconds(), point.latitude, point.longitude, point.elevation] for point in points] X.append(np.asarray(data)[::sample_every, :]) gpx_file.close() if pandas_available: X = pd.DataFrame(X[0], columns=['seconds', 'latitude', 'longitude', 'elevation']) X.set_index(keys='seconds', inplace=True) return data_details_return({'X' : X, 'info' : 'Data is an array containing time in seconds, latitude, longitude and elevation in that order.'}, data_set)
[ "def", "epomeo_gpx", "(", "data_set", "=", "'epomeo_gpx'", ",", "sample_every", "=", "4", ")", ":", "import", "gpxpy", "import", "gpxpy", ".", "gpx", "if", "not", "data_available", "(", "data_set", ")", ":", "download_data", "(", "data_set", ")", "files", ...
Data set of three GPS traces of the same movement on Mt Epomeo in Ischia. Requires gpxpy to run.
[ "Data", "set", "of", "three", "GPS", "traces", "of", "the", "same", "movement", "on", "Mt", "Epomeo", "in", "Ischia", ".", "Requires", "gpxpy", "to", "run", "." ]
python
train
C-Pro/pgdocgen
pgdocgen/utils.py
https://github.com/C-Pro/pgdocgen/blob/b5d95c1bc1b38e3c7977aeddc20793a7b0f5d0fe/pgdocgen/utils.py#L41-L56
def init_logging(settings): '''Set up logger''' lg_format = '%(asctime)s : - %(message)s' lg_dateformat = '%Y.%m.%d %H:%M:%S' logging.basicConfig(format=lg_format, datefmt=lg_dateformat) log = get_logger() handler = logging.handlers.WatchedFileHandler( filename=settings['log_file'] \ if 'log_file' in settings.keys() else None, encoding='utf-8') formatter = logging.Formatter(fmt=lg_format, datefmt=lg_dateformat) handler.setFormatter(formatter) log.addHandler(handler) return log
[ "def", "init_logging", "(", "settings", ")", ":", "lg_format", "=", "'%(asctime)s : - %(message)s'", "lg_dateformat", "=", "'%Y.%m.%d %H:%M:%S'", "logging", ".", "basicConfig", "(", "format", "=", "lg_format", ",", "datefmt", "=", "lg_dateformat", ")", "log", "=", ...
Set up logger
[ "Set", "up", "logger" ]
python
train
alephdata/pantomime
pantomime/__init__.py
https://github.com/alephdata/pantomime/blob/818fe5d799ba045c1d908935f24c94a8438c3a60/pantomime/__init__.py#L19-L26
def useful_mimetype(text): """Check to see if the given mime type is a MIME type which is useful in terms of how to treat this file. """ if text is None: return False mimetype = normalize_mimetype(text) return mimetype not in [DEFAULT, PLAIN, None]
[ "def", "useful_mimetype", "(", "text", ")", ":", "if", "text", "is", "None", ":", "return", "False", "mimetype", "=", "normalize_mimetype", "(", "text", ")", "return", "mimetype", "not", "in", "[", "DEFAULT", ",", "PLAIN", ",", "None", "]" ]
Check to see if the given mime type is a MIME type which is useful in terms of how to treat this file.
[ "Check", "to", "see", "if", "the", "given", "mime", "type", "is", "a", "MIME", "type", "which", "is", "useful", "in", "terms", "of", "how", "to", "treat", "this", "file", "." ]
python
train
HumanBrainProject/hbp-service-client
hbp_service_client/storage_service/client.py
https://github.com/HumanBrainProject/hbp-service-client/blob/b338fb41a7f0e7b9d654ff28fcf13a56d03bff4d/hbp_service_client/storage_service/client.py#L167-L185
def mkdir(self, path): '''Create a folder in the storage service pointed by the given path. Args: path (str): The path of the folder to be created Returns: None Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes ''' self.__validate_storage_path(path, projects_allowed=False) parent_metadata = self.get_parent(path) self.api_client.create_folder(path.split('/')[-1], parent_metadata['uuid'])
[ "def", "mkdir", "(", "self", ",", "path", ")", ":", "self", ".", "__validate_storage_path", "(", "path", ",", "projects_allowed", "=", "False", ")", "parent_metadata", "=", "self", ".", "get_parent", "(", "path", ")", "self", ".", "api_client", ".", "creat...
Create a folder in the storage service pointed by the given path. Args: path (str): The path of the folder to be created Returns: None Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
[ "Create", "a", "folder", "in", "the", "storage", "service", "pointed", "by", "the", "given", "path", "." ]
python
test
pivotal-energy-solutions/django-datatable-view
datatableview/utils.py
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/utils.py#L72-L88
def get_model_at_related_field(model, attr): """ Looks up ``attr`` as a field of ``model`` and returns the related model class. If ``attr`` is not a relationship field, ``ValueError`` is raised. """ field = model._meta.get_field(attr) if hasattr(field, 'related_model'): return field.related_model raise ValueError("{model}.{attr} ({klass}) is not a relationship field.".format(**{ 'model': model.__name__, 'attr': attr, 'klass': field.__class__.__name__, }))
[ "def", "get_model_at_related_field", "(", "model", ",", "attr", ")", ":", "field", "=", "model", ".", "_meta", ".", "get_field", "(", "attr", ")", "if", "hasattr", "(", "field", ",", "'related_model'", ")", ":", "return", "field", ".", "related_model", "ra...
Looks up ``attr`` as a field of ``model`` and returns the related model class. If ``attr`` is not a relationship field, ``ValueError`` is raised.
[ "Looks", "up", "attr", "as", "a", "field", "of", "model", "and", "returns", "the", "related", "model", "class", ".", "If", "attr", "is", "not", "a", "relationship", "field", "ValueError", "is", "raised", "." ]
python
train
poldracklab/niworkflows
niworkflows/utils/bids.py
https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/utils/bids.py#L217-L306
def group_multiecho(bold_sess): """ Multiplexes multi-echo EPIs into arrays. Dual-echo is a special case of multi-echo, which is treated as single-echo data. >>> bold_sess = ["sub-01_task-rest_echo-1_run-01_bold.nii.gz", ... "sub-01_task-rest_echo-2_run-01_bold.nii.gz", ... "sub-01_task-rest_echo-1_run-02_bold.nii.gz", ... "sub-01_task-rest_echo-2_run-02_bold.nii.gz", ... "sub-01_task-rest_echo-3_run-02_bold.nii.gz", ... "sub-01_task-rest_run-03_bold.nii.gz"] >>> group_multiecho(bold_sess) ['sub-01_task-rest_echo-1_run-01_bold.nii.gz', 'sub-01_task-rest_echo-2_run-01_bold.nii.gz', ['sub-01_task-rest_echo-1_run-02_bold.nii.gz', 'sub-01_task-rest_echo-2_run-02_bold.nii.gz', 'sub-01_task-rest_echo-3_run-02_bold.nii.gz'], 'sub-01_task-rest_run-03_bold.nii.gz'] >>> bold_sess.insert(2, "sub-01_task-rest_echo-3_run-01_bold.nii.gz") >>> group_multiecho(bold_sess) [['sub-01_task-rest_echo-1_run-01_bold.nii.gz', 'sub-01_task-rest_echo-2_run-01_bold.nii.gz', 'sub-01_task-rest_echo-3_run-01_bold.nii.gz'], ['sub-01_task-rest_echo-1_run-02_bold.nii.gz', 'sub-01_task-rest_echo-2_run-02_bold.nii.gz', 'sub-01_task-rest_echo-3_run-02_bold.nii.gz'], 'sub-01_task-rest_run-03_bold.nii.gz'] >>> bold_sess += ["sub-01_task-beh_echo-1_run-01_bold.nii.gz", ... "sub-01_task-beh_echo-2_run-01_bold.nii.gz", ... "sub-01_task-beh_echo-1_run-02_bold.nii.gz", ... "sub-01_task-beh_echo-2_run-02_bold.nii.gz", ... "sub-01_task-beh_echo-3_run-02_bold.nii.gz", ... "sub-01_task-beh_run-03_bold.nii.gz"] >>> group_multiecho(bold_sess) [['sub-01_task-rest_echo-1_run-01_bold.nii.gz', 'sub-01_task-rest_echo-2_run-01_bold.nii.gz', 'sub-01_task-rest_echo-3_run-01_bold.nii.gz'], ['sub-01_task-rest_echo-1_run-02_bold.nii.gz', 'sub-01_task-rest_echo-2_run-02_bold.nii.gz', 'sub-01_task-rest_echo-3_run-02_bold.nii.gz'], 'sub-01_task-rest_run-03_bold.nii.gz', 'sub-01_task-beh_echo-1_run-01_bold.nii.gz', 'sub-01_task-beh_echo-2_run-01_bold.nii.gz', ['sub-01_task-beh_echo-1_run-02_bold.nii.gz', 'sub-01_task-beh_echo-2_run-02_bold.nii.gz', 'sub-01_task-beh_echo-3_run-02_bold.nii.gz'], 'sub-01_task-beh_run-03_bold.nii.gz'] Some tests from https://neurostars.org/t/fmriprep-from\ -singularity-unboundlocalerror/3299/7 >>> bold_sess = ['sub-01_task-AudLoc_echo-1_bold.nii', ... 'sub-01_task-AudLoc_echo-2_bold.nii', ... 'sub-01_task-FJT_echo-1_bold.nii', ... 'sub-01_task-FJT_echo-2_bold.nii', ... 'sub-01_task-LDT_echo-1_bold.nii', ... 'sub-01_task-LDT_echo-2_bold.nii', ... 'sub-01_task-MotLoc_echo-1_bold.nii', ... 'sub-01_task-MotLoc_echo-2_bold.nii'] >>> group_multiecho(bold_sess) == bold_sess True >>> bold_sess += ['sub-01_task-MotLoc_echo-3_bold.nii'] >>> groups = group_multiecho(bold_sess) >>> len(groups[:-1]) 6 >>> [isinstance(g, list) for g in groups] [False, False, False, False, False, False, True] >>> len(groups[-1]) 3 """ from itertools import groupby def _grp_echos(x): if '_echo-' not in x: return x echo = re.search("_echo-\\d*", x).group(0) return x.replace(echo, "_echo-?") ses_uids = [] for _, bold in groupby(bold_sess, key=_grp_echos): bold = list(bold) # If single- or dual-echo, flatten list; keep list otherwise. action = getattr(ses_uids, 'append' if len(bold) > 2 else 'extend') action(bold) return ses_uids
[ "def", "group_multiecho", "(", "bold_sess", ")", ":", "from", "itertools", "import", "groupby", "def", "_grp_echos", "(", "x", ")", ":", "if", "'_echo-'", "not", "in", "x", ":", "return", "x", "echo", "=", "re", ".", "search", "(", "\"_echo-\\\\d*\"", ",...
Multiplexes multi-echo EPIs into arrays. Dual-echo is a special case of multi-echo, which is treated as single-echo data. >>> bold_sess = ["sub-01_task-rest_echo-1_run-01_bold.nii.gz", ... "sub-01_task-rest_echo-2_run-01_bold.nii.gz", ... "sub-01_task-rest_echo-1_run-02_bold.nii.gz", ... "sub-01_task-rest_echo-2_run-02_bold.nii.gz", ... "sub-01_task-rest_echo-3_run-02_bold.nii.gz", ... "sub-01_task-rest_run-03_bold.nii.gz"] >>> group_multiecho(bold_sess) ['sub-01_task-rest_echo-1_run-01_bold.nii.gz', 'sub-01_task-rest_echo-2_run-01_bold.nii.gz', ['sub-01_task-rest_echo-1_run-02_bold.nii.gz', 'sub-01_task-rest_echo-2_run-02_bold.nii.gz', 'sub-01_task-rest_echo-3_run-02_bold.nii.gz'], 'sub-01_task-rest_run-03_bold.nii.gz'] >>> bold_sess.insert(2, "sub-01_task-rest_echo-3_run-01_bold.nii.gz") >>> group_multiecho(bold_sess) [['sub-01_task-rest_echo-1_run-01_bold.nii.gz', 'sub-01_task-rest_echo-2_run-01_bold.nii.gz', 'sub-01_task-rest_echo-3_run-01_bold.nii.gz'], ['sub-01_task-rest_echo-1_run-02_bold.nii.gz', 'sub-01_task-rest_echo-2_run-02_bold.nii.gz', 'sub-01_task-rest_echo-3_run-02_bold.nii.gz'], 'sub-01_task-rest_run-03_bold.nii.gz'] >>> bold_sess += ["sub-01_task-beh_echo-1_run-01_bold.nii.gz", ... "sub-01_task-beh_echo-2_run-01_bold.nii.gz", ... "sub-01_task-beh_echo-1_run-02_bold.nii.gz", ... "sub-01_task-beh_echo-2_run-02_bold.nii.gz", ... "sub-01_task-beh_echo-3_run-02_bold.nii.gz", ... "sub-01_task-beh_run-03_bold.nii.gz"] >>> group_multiecho(bold_sess) [['sub-01_task-rest_echo-1_run-01_bold.nii.gz', 'sub-01_task-rest_echo-2_run-01_bold.nii.gz', 'sub-01_task-rest_echo-3_run-01_bold.nii.gz'], ['sub-01_task-rest_echo-1_run-02_bold.nii.gz', 'sub-01_task-rest_echo-2_run-02_bold.nii.gz', 'sub-01_task-rest_echo-3_run-02_bold.nii.gz'], 'sub-01_task-rest_run-03_bold.nii.gz', 'sub-01_task-beh_echo-1_run-01_bold.nii.gz', 'sub-01_task-beh_echo-2_run-01_bold.nii.gz', ['sub-01_task-beh_echo-1_run-02_bold.nii.gz', 'sub-01_task-beh_echo-2_run-02_bold.nii.gz', 'sub-01_task-beh_echo-3_run-02_bold.nii.gz'], 'sub-01_task-beh_run-03_bold.nii.gz'] Some tests from https://neurostars.org/t/fmriprep-from\ -singularity-unboundlocalerror/3299/7 >>> bold_sess = ['sub-01_task-AudLoc_echo-1_bold.nii', ... 'sub-01_task-AudLoc_echo-2_bold.nii', ... 'sub-01_task-FJT_echo-1_bold.nii', ... 'sub-01_task-FJT_echo-2_bold.nii', ... 'sub-01_task-LDT_echo-1_bold.nii', ... 'sub-01_task-LDT_echo-2_bold.nii', ... 'sub-01_task-MotLoc_echo-1_bold.nii', ... 'sub-01_task-MotLoc_echo-2_bold.nii'] >>> group_multiecho(bold_sess) == bold_sess True >>> bold_sess += ['sub-01_task-MotLoc_echo-3_bold.nii'] >>> groups = group_multiecho(bold_sess) >>> len(groups[:-1]) 6 >>> [isinstance(g, list) for g in groups] [False, False, False, False, False, False, True] >>> len(groups[-1]) 3
[ "Multiplexes", "multi", "-", "echo", "EPIs", "into", "arrays", ".", "Dual", "-", "echo", "is", "a", "special", "case", "of", "multi", "-", "echo", "which", "is", "treated", "as", "single", "-", "echo", "data", "." ]
python
train
jeffbuttars/upkg
upkg/conf.py
https://github.com/jeffbuttars/upkg/blob/7d65a0b2eb4469aac5856b963ef2d429f2920dae/upkg/conf.py#L7-L19
def _clean_path(p): """todo: Docstring for _clen_path :param p: arg description :type p: type description :return: :rtype: """ np = os.path.expanduser(p) np = os.path.abspath(np) return np
[ "def", "_clean_path", "(", "p", ")", ":", "np", "=", "os", ".", "path", ".", "expanduser", "(", "p", ")", "np", "=", "os", ".", "path", ".", "abspath", "(", "np", ")", "return", "np" ]
todo: Docstring for _clen_path :param p: arg description :type p: type description :return: :rtype:
[ "todo", ":", "Docstring", "for", "_clen_path" ]
python
train
PMEAL/OpenPNM
openpnm/topotools/topotools.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/topotools/topotools.py#L292-L326
def find_connecting_bonds(sites, am): r""" Given pairs of sites, finds the bonds which connects each pair. Parameters ---------- sites : array_like A 2-column vector containing pairs of site indices on each row. am : scipy.sparse matrix The adjacency matrix of the network. Must be symmetrical such that if sites *i* and *j* are connected, the matrix contains non-zero values at locations (i, j) and (j, i). Returns ------- Returns a list the same length as P1 (and P2) with each element containing the throat number that connects the corresponding pores, or `None`` if pores are not connected. Notes ----- The returned list can be converted to an ND-array, which will convert the ``None`` values to ``nan``. These can then be found using ``scipy.isnan``. """ if am.format != 'dok': am = am.todok(copy=False) sites = sp.array(sites, ndmin=2) if sites.size == 0: return [] z = tuple(zip(sites[:, 0], sites[:, 1])) neighbors = [am.get(z[i], None) for i in range(len(z))] return neighbors
[ "def", "find_connecting_bonds", "(", "sites", ",", "am", ")", ":", "if", "am", ".", "format", "!=", "'dok'", ":", "am", "=", "am", ".", "todok", "(", "copy", "=", "False", ")", "sites", "=", "sp", ".", "array", "(", "sites", ",", "ndmin", "=", "2...
r""" Given pairs of sites, finds the bonds which connects each pair. Parameters ---------- sites : array_like A 2-column vector containing pairs of site indices on each row. am : scipy.sparse matrix The adjacency matrix of the network. Must be symmetrical such that if sites *i* and *j* are connected, the matrix contains non-zero values at locations (i, j) and (j, i). Returns ------- Returns a list the same length as P1 (and P2) with each element containing the throat number that connects the corresponding pores, or `None`` if pores are not connected. Notes ----- The returned list can be converted to an ND-array, which will convert the ``None`` values to ``nan``. These can then be found using ``scipy.isnan``.
[ "r", "Given", "pairs", "of", "sites", "finds", "the", "bonds", "which", "connects", "each", "pair", "." ]
python
train
PythonCharmers/python-future
src/future/standard_library/__init__.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/standard_library/__init__.py#L429-L440
def restore_sys_modules(scrubbed): """ Add any previously scrubbed modules back to the sys.modules cache, but only if it's safe to do so. """ clash = set(sys.modules) & set(scrubbed) if len(clash) != 0: # If several, choose one arbitrarily to raise an exception about first = list(clash)[0] raise ImportError('future module {} clashes with Py2 module' .format(first)) sys.modules.update(scrubbed)
[ "def", "restore_sys_modules", "(", "scrubbed", ")", ":", "clash", "=", "set", "(", "sys", ".", "modules", ")", "&", "set", "(", "scrubbed", ")", "if", "len", "(", "clash", ")", "!=", "0", ":", "# If several, choose one arbitrarily to raise an exception about", ...
Add any previously scrubbed modules back to the sys.modules cache, but only if it's safe to do so.
[ "Add", "any", "previously", "scrubbed", "modules", "back", "to", "the", "sys", ".", "modules", "cache", "but", "only", "if", "it", "s", "safe", "to", "do", "so", "." ]
python
train
openstates/billy
billy/utils/__init__.py
https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/utils/__init__.py#L147-L158
def cd(path): '''Creates the path if it doesn't exist''' old_dir = os.getcwd() try: os.makedirs(path) except OSError: pass os.chdir(path) try: yield finally: os.chdir(old_dir)
[ "def", "cd", "(", "path", ")", ":", "old_dir", "=", "os", ".", "getcwd", "(", ")", "try", ":", "os", ".", "makedirs", "(", "path", ")", "except", "OSError", ":", "pass", "os", ".", "chdir", "(", "path", ")", "try", ":", "yield", "finally", ":", ...
Creates the path if it doesn't exist
[ "Creates", "the", "path", "if", "it", "doesn", "t", "exist" ]
python
train
CI-WATER/gsshapy
gsshapy/modeling/event.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L497-L506
def _update_gmt(self): """ Based on timezone and start date, the GMT card is updated """ if self.simulation_start is not None: # NOTE: Because of daylight savings time, # offset result depends on time of the year offset_string = str(self.simulation_start.replace(tzinfo=self.tz) .utcoffset().total_seconds()/3600.) self._update_card('GMT', offset_string)
[ "def", "_update_gmt", "(", "self", ")", ":", "if", "self", ".", "simulation_start", "is", "not", "None", ":", "# NOTE: Because of daylight savings time,", "# offset result depends on time of the year", "offset_string", "=", "str", "(", "self", ".", "simulation_start", "...
Based on timezone and start date, the GMT card is updated
[ "Based", "on", "timezone", "and", "start", "date", "the", "GMT", "card", "is", "updated" ]
python
train
tanghaibao/jcvi
jcvi/assembly/kmer.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/kmer.py#L611-L632
def bin(args): """ %prog bin filename filename.bin Serialize counts to bitarrays. """ from bitarray import bitarray p = OptionParser(bin.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) inp, outp = args fp = must_open(inp) fw = must_open(outp, "w") a = bitarray() for row in fp: c = row.split()[-1] a.append(int(c)) a.tofile(fw) fw.close()
[ "def", "bin", "(", "args", ")", ":", "from", "bitarray", "import", "bitarray", "p", "=", "OptionParser", "(", "bin", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ...
%prog bin filename filename.bin Serialize counts to bitarrays.
[ "%prog", "bin", "filename", "filename", ".", "bin" ]
python
train
user-cont/conu
conu/backend/k8s/pod.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/k8s/pod.py#L79-L95
def delete(self): """ delete pod from the Kubernetes cluster :return: None """ body = client.V1DeleteOptions() try: status = self.core_api.delete_namespaced_pod(self.name, self.namespace, body) logger.info("Deleting Pod %s in namespace %s", self.name, self.namespace) self.phase = PodPhase.TERMINATING except ApiException as e: raise ConuException( "Exception when calling Kubernetes API - delete_namespaced_pod: %s\n" % e) if status.status == 'Failure': raise ConuException("Deletion of Pod failed")
[ "def", "delete", "(", "self", ")", ":", "body", "=", "client", ".", "V1DeleteOptions", "(", ")", "try", ":", "status", "=", "self", ".", "core_api", ".", "delete_namespaced_pod", "(", "self", ".", "name", ",", "self", ".", "namespace", ",", "body", ")"...
delete pod from the Kubernetes cluster :return: None
[ "delete", "pod", "from", "the", "Kubernetes", "cluster", ":", "return", ":", "None" ]
python
train
cohorte/cohorte-herald
python/snippets/herald_irc/client.py
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/snippets/herald_irc/client.py#L117-L122
def wait(self, timeout=None): """ Waits for the client to stop its loop """ self.__stopped.wait(timeout) return self.__stopped.is_set()
[ "def", "wait", "(", "self", ",", "timeout", "=", "None", ")", ":", "self", ".", "__stopped", ".", "wait", "(", "timeout", ")", "return", "self", ".", "__stopped", ".", "is_set", "(", ")" ]
Waits for the client to stop its loop
[ "Waits", "for", "the", "client", "to", "stop", "its", "loop" ]
python
train
rootpy/rootpy
rootpy/plotting/root2matplotlib.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/plotting/root2matplotlib.py#L141-L265
def hist(hists, stacked=True, reverse=False, xpadding=0, ypadding=.1, yerror_in_padding=True, logy=None, snap=True, axes=None, **kwargs): """ Make a matplotlib hist plot from a ROOT histogram, stack or list of histograms. Parameters ---------- hists : Hist, list of Hist, HistStack The histogram(s) to be plotted stacked : bool, optional (default=True) If True then stack the histograms with the first histogram on the bottom, otherwise overlay them with the first histogram in the background. reverse : bool, optional (default=False) If True then reverse the order of the stack or overlay. xpadding : float or 2-tuple of floats, optional (default=0) Padding to add on the left and right sides of the plot as a fraction of the axes width after the padding has been added. Specify unique left and right padding with a 2-tuple. ypadding : float or 2-tuple of floats, optional (default=.1) Padding to add on the top and bottom of the plot as a fraction of the axes height after the padding has been added. Specify unique top and bottom padding with a 2-tuple. yerror_in_padding : bool, optional (default=True) If True then make the padding inclusive of the y errors otherwise only pad around the y values. logy : bool, optional (default=None) Apply special treatment of a log-scale y-axis to display the histogram correctly. If None (the default) then automatically determine if the y-axis is log-scale. snap : bool, optional (default=True) If True (the default) then the origin is an implicit lower bound of the histogram unless the histogram has both positive and negative bins. axes : matplotlib Axes instance, optional (default=None) The axes to plot on. If None then use the global current axes. kwargs : additional keyword arguments, optional All additional keyword arguments are passed to matplotlib's fill_between for the filled regions and matplotlib's step function for the edges. Returns ------- The return value from matplotlib's hist function, or list of such return values if a stack or list of histograms was plotted. """ if axes is None: axes = plt.gca() if logy is None: logy = axes.get_yscale() == 'log' curr_xlim = axes.get_xlim() curr_ylim = axes.get_ylim() was_empty = not axes.has_data() returns = [] if isinstance(hists, _Hist): # This is a single plottable object. returns = _hist(hists, axes=axes, logy=logy, **kwargs) _set_bounds(hists, axes=axes, was_empty=was_empty, prev_xlim=curr_xlim, prev_ylim=curr_ylim, xpadding=xpadding, ypadding=ypadding, yerror_in_padding=yerror_in_padding, snap=snap, logy=logy) elif stacked: # draw the top histogram first so its edges don't cover the histograms # beneath it in the stack if not reverse: hists = list(hists)[::-1] for i, h in enumerate(hists): kwargs_local = kwargs.copy() if i == len(hists) - 1: low = h.Clone() low.Reset() else: low = sum(hists[i + 1:]) high = h + low high.alpha = getattr(h, 'alpha', None) proxy = _hist(high, bottom=low, axes=axes, logy=logy, **kwargs) returns.append(proxy) if not reverse: returns = returns[::-1] _set_bounds(sum(hists), axes=axes, was_empty=was_empty, prev_xlim=curr_xlim, prev_ylim=curr_ylim, xpadding=xpadding, ypadding=ypadding, yerror_in_padding=yerror_in_padding, snap=snap, logy=logy) else: for h in _maybe_reversed(hists, reverse): returns.append(_hist(h, axes=axes, logy=logy, **kwargs)) if reverse: returns = returns[::-1] _set_bounds(hists[max(range(len(hists)), key=lambda idx: hists[idx].max())], axes=axes, was_empty=was_empty, prev_xlim=curr_xlim, prev_ylim=curr_ylim, xpadding=xpadding, ypadding=ypadding, yerror_in_padding=yerror_in_padding, snap=snap, logy=logy) return returns
[ "def", "hist", "(", "hists", ",", "stacked", "=", "True", ",", "reverse", "=", "False", ",", "xpadding", "=", "0", ",", "ypadding", "=", ".1", ",", "yerror_in_padding", "=", "True", ",", "logy", "=", "None", ",", "snap", "=", "True", ",", "axes", "...
Make a matplotlib hist plot from a ROOT histogram, stack or list of histograms. Parameters ---------- hists : Hist, list of Hist, HistStack The histogram(s) to be plotted stacked : bool, optional (default=True) If True then stack the histograms with the first histogram on the bottom, otherwise overlay them with the first histogram in the background. reverse : bool, optional (default=False) If True then reverse the order of the stack or overlay. xpadding : float or 2-tuple of floats, optional (default=0) Padding to add on the left and right sides of the plot as a fraction of the axes width after the padding has been added. Specify unique left and right padding with a 2-tuple. ypadding : float or 2-tuple of floats, optional (default=.1) Padding to add on the top and bottom of the plot as a fraction of the axes height after the padding has been added. Specify unique top and bottom padding with a 2-tuple. yerror_in_padding : bool, optional (default=True) If True then make the padding inclusive of the y errors otherwise only pad around the y values. logy : bool, optional (default=None) Apply special treatment of a log-scale y-axis to display the histogram correctly. If None (the default) then automatically determine if the y-axis is log-scale. snap : bool, optional (default=True) If True (the default) then the origin is an implicit lower bound of the histogram unless the histogram has both positive and negative bins. axes : matplotlib Axes instance, optional (default=None) The axes to plot on. If None then use the global current axes. kwargs : additional keyword arguments, optional All additional keyword arguments are passed to matplotlib's fill_between for the filled regions and matplotlib's step function for the edges. Returns ------- The return value from matplotlib's hist function, or list of such return values if a stack or list of histograms was plotted.
[ "Make", "a", "matplotlib", "hist", "plot", "from", "a", "ROOT", "histogram", "stack", "or", "list", "of", "histograms", "." ]
python
train
quantmind/pulsar
pulsar/apps/wsgi/routers.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/wsgi/routers.py#L326-L350
def _resolve(self, path, method, urlargs=None): '''Resolve a path and return a ``(handler, urlargs)`` tuple or ``None`` if the path could not be resolved. ''' match = self.route.match(path) if match is None: if not self.route.is_leaf: # no match return elif '__remaining__' in match: path = match.pop('__remaining__') urlargs = update_args(urlargs, match) else: handler = getattr(self, method, None) if handler is None: raise MethodNotAllowed response_wrapper = self.response_wrapper if response_wrapper: handler = partial(response_wrapper, handler) return Handler(self, handler, update_args(urlargs, match)) # for handler in self.routes: view_args = handler._resolve(path, method, urlargs) if view_args is None: continue return view_args
[ "def", "_resolve", "(", "self", ",", "path", ",", "method", ",", "urlargs", "=", "None", ")", ":", "match", "=", "self", ".", "route", ".", "match", "(", "path", ")", "if", "match", "is", "None", ":", "if", "not", "self", ".", "route", ".", "is_l...
Resolve a path and return a ``(handler, urlargs)`` tuple or ``None`` if the path could not be resolved.
[ "Resolve", "a", "path", "and", "return", "a", "(", "handler", "urlargs", ")", "tuple", "or", "None", "if", "the", "path", "could", "not", "be", "resolved", "." ]
python
train
RedHatInsights/insights-core
insights/contrib/soscleaner.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/soscleaner.py#L570-L601
def _clean_file(self, f): '''this will take a given file path, scrub it accordingly, and save a new copy of the file in the same location''' if os.path.exists(f) and not os.path.islink(f): tmp_file = tempfile.TemporaryFile(mode='w+b') try: fh = open(f, 'r') data = fh.readlines() fh.close() if len(data) > 0: #if the file isn't empty: for l in data: new_l = self._clean_line(l) tmp_file.write(new_l.encode('utf-8')) tmp_file.seek(0) except Exception as e: # pragma: no cover self.logger.exception(e) raise Exception("CleanFile Error: Cannot Open File For Reading - %s" % f) try: if len(data) > 0: new_fh = open(f, 'wb') for line in tmp_file: new_fh.write(line) new_fh.close() except Exception as e: # pragma: no cover self.logger.exception(e) raise Exception("CleanFile Error: Cannot Write to New File - %s" % f) finally: tmp_file.close()
[ "def", "_clean_file", "(", "self", ",", "f", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "f", ")", "and", "not", "os", ".", "path", ".", "islink", "(", "f", ")", ":", "tmp_file", "=", "tempfile", ".", "TemporaryFile", "(", "mode", "=",...
this will take a given file path, scrub it accordingly, and save a new copy of the file in the same location
[ "this", "will", "take", "a", "given", "file", "path", "scrub", "it", "accordingly", "and", "save", "a", "new", "copy", "of", "the", "file", "in", "the", "same", "location" ]
python
train
xenadevel/PyXenaManager
xenamanager/xena_app.py
https://github.com/xenadevel/PyXenaManager/blob/384ca265f73044b8a8b471f5dd7a6103fc54f4df/xenamanager/xena_app.py#L258-L269
def inventory(self, modules_inventory=False): """ Get chassis inventory. :param modules_inventory: True - read modules inventory, false - don't read. """ self.c_info = self.get_attributes() for m_index, m_portcounts in enumerate(self.c_info['c_portcounts'].split()): if int(m_portcounts): module = XenaModule(parent=self, index=m_index) if modules_inventory: module.inventory()
[ "def", "inventory", "(", "self", ",", "modules_inventory", "=", "False", ")", ":", "self", ".", "c_info", "=", "self", ".", "get_attributes", "(", ")", "for", "m_index", ",", "m_portcounts", "in", "enumerate", "(", "self", ".", "c_info", "[", "'c_portcount...
Get chassis inventory. :param modules_inventory: True - read modules inventory, false - don't read.
[ "Get", "chassis", "inventory", "." ]
python
train
lltk/lltk
lltk/scraping.py
https://github.com/lltk/lltk/blob/d171de55c1b97695fddedf4b02401ae27bf1d634/lltk/scraping.py#L147-L154
def merge(self, elements): ''' Merges all scraping results to a list sorted by frequency of occurrence. ''' from collections import Counter from lltk.utils import list2tuple, tuple2list # The list2tuple conversion is necessary because mutable objects (e.g. lists) are not hashable merged = tuple2list([value for value, count in Counter(list2tuple(list(elements))).most_common()]) return merged
[ "def", "merge", "(", "self", ",", "elements", ")", ":", "from", "collections", "import", "Counter", "from", "lltk", ".", "utils", "import", "list2tuple", ",", "tuple2list", "# The list2tuple conversion is necessary because mutable objects (e.g. lists) are not hashable", "me...
Merges all scraping results to a list sorted by frequency of occurrence.
[ "Merges", "all", "scraping", "results", "to", "a", "list", "sorted", "by", "frequency", "of", "occurrence", "." ]
python
train
pycontribs/pyrax
pyrax/object_storage.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L2256-L2261
def _configure_manager(self): """ Creates a manager to handle interacting with Containers. """ self._manager = ContainerManager(self, resource_class=Container, response_key="", uri_base="")
[ "def", "_configure_manager", "(", "self", ")", ":", "self", ".", "_manager", "=", "ContainerManager", "(", "self", ",", "resource_class", "=", "Container", ",", "response_key", "=", "\"\"", ",", "uri_base", "=", "\"\"", ")" ]
Creates a manager to handle interacting with Containers.
[ "Creates", "a", "manager", "to", "handle", "interacting", "with", "Containers", "." ]
python
train
python-openxml/python-docx
docx/image/jpeg.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/image/jpeg.py#L128-L135
def app1(self): """ First APP1 marker in image markers. """ for m in self._markers: if m.marker_code == JPEG_MARKER_CODE.APP1: return m raise KeyError('no APP1 marker in image')
[ "def", "app1", "(", "self", ")", ":", "for", "m", "in", "self", ".", "_markers", ":", "if", "m", ".", "marker_code", "==", "JPEG_MARKER_CODE", ".", "APP1", ":", "return", "m", "raise", "KeyError", "(", "'no APP1 marker in image'", ")" ]
First APP1 marker in image markers.
[ "First", "APP1", "marker", "in", "image", "markers", "." ]
python
train
ARMmbed/autoversion
src/auto_version/semver.py
https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/semver.py#L53-L71
def make_new_semver(current_semver, all_triggers, **overrides): """Defines how to increment semver based on which significant figure is triggered""" new_semver = {} bumped = False for sig_fig in SemVerSigFig: # iterate sig figs in order of significance value = getattr(current_semver, sig_fig) override = overrides.get(sig_fig) if override is not None: new_semver[sig_fig] = override if int(override) > int(value): bumped = True elif bumped: new_semver[sig_fig] = "0" elif sig_fig in all_triggers: new_semver[sig_fig] = str(int(value) + 1) bumped = True else: new_semver[sig_fig] = value return SemVer(**new_semver)
[ "def", "make_new_semver", "(", "current_semver", ",", "all_triggers", ",", "*", "*", "overrides", ")", ":", "new_semver", "=", "{", "}", "bumped", "=", "False", "for", "sig_fig", "in", "SemVerSigFig", ":", "# iterate sig figs in order of significance", "value", "=...
Defines how to increment semver based on which significant figure is triggered
[ "Defines", "how", "to", "increment", "semver", "based", "on", "which", "significant", "figure", "is", "triggered" ]
python
train
newville/wxmplot
wxmplot/stackedplotframe.py
https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/wxmplot/stackedplotframe.py#L38-L45
def plot(self, x, y, panel='top', xlabel=None, **kws): """plot after clearing current plot """ panel = self.get_panel(panel) panel.plot(x, y, **kws) if xlabel is not None: self.xlabel = xlabel if self.xlabel is not None: self.panel_bot.set_xlabel(self.xlabel)
[ "def", "plot", "(", "self", ",", "x", ",", "y", ",", "panel", "=", "'top'", ",", "xlabel", "=", "None", ",", "*", "*", "kws", ")", ":", "panel", "=", "self", ".", "get_panel", "(", "panel", ")", "panel", ".", "plot", "(", "x", ",", "y", ",", ...
plot after clearing current plot
[ "plot", "after", "clearing", "current", "plot" ]
python
train
hammerlab/cohorts
cohorts/cohort.py
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L572-L626
def _load_single_patient_variants(self, patient, filter_fn, use_cache=True, **kwargs): """ Load filtered, merged variants for a single patient, optionally using cache Note that filtered variants are first merged before filtering, and each step is cached independently. Turn on debug statements for more details about cached files. Use `_load_single_patient_merged_variants` to see merged variants without filtering. """ if filter_fn is None: use_filtered_cache = False else: filter_fn_name = self._get_function_name(filter_fn) logger.debug("loading variants for patient {} with filter_fn {}".format(patient.id, filter_fn_name)) use_filtered_cache = use_cache ## confirm that we can get cache-name (else don't use filtered cache) if use_filtered_cache: logger.debug("... identifying filtered-cache file name") try: ## try to load filtered variants from cache filtered_cache_file_name = "%s-variants.%s.pkl" % (self.merge_type, self._hash_filter_fn(filter_fn, **kwargs)) except: logger.warning("... error identifying filtered-cache file name for patient {}: {}".format( patient.id, filter_fn_name)) use_filtered_cache = False else: logger.debug("... trying to load filtered variants from cache: {}".format(filtered_cache_file_name)) try: cached = self.load_from_cache(self.cache_names["variant"], patient.id, filtered_cache_file_name) if cached is not None: return cached except: logger.warning("Error loading variants from cache for patient: {}".format(patient.id)) pass ## get merged variants logger.debug("... getting merged variants for: {}".format(patient.id)) merged_variants = self._load_single_patient_merged_variants(patient, use_cache=use_cache) # Note None here is different from 0. We want to preserve None if merged_variants is None: logger.info("Variants did not exist for patient %s" % patient.id) return None logger.debug("... applying filters to variants for: {}".format(patient.id)) filtered_variants = filter_variants(variant_collection=merged_variants, patient=patient, filter_fn=filter_fn, **kwargs) if use_filtered_cache: logger.debug("... saving filtered variants to cache: {}".format(filtered_cache_file_name)) self.save_to_cache(filtered_variants, self.cache_names["variant"], patient.id, filtered_cache_file_name) return filtered_variants
[ "def", "_load_single_patient_variants", "(", "self", ",", "patient", ",", "filter_fn", ",", "use_cache", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "filter_fn", "is", "None", ":", "use_filtered_cache", "=", "False", "else", ":", "filter_fn_name", ...
Load filtered, merged variants for a single patient, optionally using cache Note that filtered variants are first merged before filtering, and each step is cached independently. Turn on debug statements for more details about cached files. Use `_load_single_patient_merged_variants` to see merged variants without filtering.
[ "Load", "filtered", "merged", "variants", "for", "a", "single", "patient", "optionally", "using", "cache" ]
python
train
openvax/datacache
datacache/cache.py
https://github.com/openvax/datacache/blob/73bcac02d37cf153710a07fbdc636aa55cb214ca/datacache/cache.py#L74-L104
def fetch( self, url, filename=None, decompress=False, force=False, timeout=None, use_wget_if_available=True): """ Return the local path to the downloaded copy of a given URL. Don't download the file again if it's already present, unless `force` is True. """ key = (url, decompress) if not force and key in self._local_paths: path = self._local_paths[key] if exists(path): return path else: del self._local_paths[key] path = download.fetch_file( url, filename=filename, decompress=decompress, subdir=self.subdir, force=force, timeout=timeout, use_wget_if_available=use_wget_if_available) self._local_paths[key] = path return path
[ "def", "fetch", "(", "self", ",", "url", ",", "filename", "=", "None", ",", "decompress", "=", "False", ",", "force", "=", "False", ",", "timeout", "=", "None", ",", "use_wget_if_available", "=", "True", ")", ":", "key", "=", "(", "url", ",", "decomp...
Return the local path to the downloaded copy of a given URL. Don't download the file again if it's already present, unless `force` is True.
[ "Return", "the", "local", "path", "to", "the", "downloaded", "copy", "of", "a", "given", "URL", ".", "Don", "t", "download", "the", "file", "again", "if", "it", "s", "already", "present", "unless", "force", "is", "True", "." ]
python
train
SBRG/ssbio
ssbio/protein/structure/homology/itasser/itasserprep.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/homology/itasser/itasserprep.py#L107-L120
def prep_folder(self, seq): """Take in a sequence string and prepares the folder for the I-TASSER run.""" itasser_dir = op.join(self.root_dir, self.id) if not op.exists(itasser_dir): os.makedirs(itasser_dir) tmp = {self.id: seq} fasta.write_fasta_file_from_dict(indict=tmp, outname='seq', outext='.fasta', outdir=itasser_dir) return itasser_dir
[ "def", "prep_folder", "(", "self", ",", "seq", ")", ":", "itasser_dir", "=", "op", ".", "join", "(", "self", ".", "root_dir", ",", "self", ".", "id", ")", "if", "not", "op", ".", "exists", "(", "itasser_dir", ")", ":", "os", ".", "makedirs", "(", ...
Take in a sequence string and prepares the folder for the I-TASSER run.
[ "Take", "in", "a", "sequence", "string", "and", "prepares", "the", "folder", "for", "the", "I", "-", "TASSER", "run", "." ]
python
train
tilde-lab/tilde
tilde/core/settings.py
https://github.com/tilde-lab/tilde/blob/59841578b3503075aa85c76f9ae647b3ff92b0a3/tilde/core/settings.py#L126-L177
def get_hierarchy(settings): ''' Gets main mapping source according to what a data classification is made Gets the hierarchy groups (only for GUI) Gets the hierarchy values ''' hierarchy, hierarchy_groups, hierarchy_values = [], [], {} hgroup_ids, enumerated_vals = {}, set() session = connect_database(settings) for item in session.query(model.Hierarchy_value).all(): try: hierarchy_values[item.cid].update({item.num: item.name}) except KeyError: hierarchy_values[item.cid] = {item.num: item.name} enumerated_vals.add(item.cid) try: for item in session.query(model.Hierarchy).all(): if item.has_facet and not item.has_topic: raise RuntimeError('Fatal error: "has_facet" implies "has_topic"') if item.slider and not '.' in item.slider: raise RuntimeError('Fatal error: "has_slider" must have a reference to some table field') hierarchy.append({ 'cid':item.cid, 'category':item.name, 'source':item.source, 'html':item.html, 'has_slider':item.slider, 'sort':item.sort, 'multiple':item.multiple, 'optional':item.optional, 'has_summary_contrb':item.has_summary_contrb, 'has_column':item.has_column, 'has_facet':item.has_facet, 'creates_topic':item.has_topic, 'is_chem_formula':item.chem_formula, 'plottable':item.plottable, 'enumerated':True if item.cid in enumerated_vals else False }) try: hgroup_ids[item.hgroup_id].append(item.cid) except KeyError: hgroup_ids[item.hgroup_id] = [item.cid] except RuntimeError as e: session.close() sys.exit(e) for item in session.query(model.Hierarchy_group).all(): hierarchy_groups.append({ 'id': item.hgroup_id, 'category': item.name, 'html_pocket': '', # specially for JavaScript client 'landing_group': item.landing_group, 'settings_group': item.settings_group, 'includes': hgroup_ids[item.hgroup_id] }) session.close() return hierarchy, hierarchy_groups, hierarchy_values
[ "def", "get_hierarchy", "(", "settings", ")", ":", "hierarchy", ",", "hierarchy_groups", ",", "hierarchy_values", "=", "[", "]", ",", "[", "]", ",", "{", "}", "hgroup_ids", ",", "enumerated_vals", "=", "{", "}", ",", "set", "(", ")", "session", "=", "c...
Gets main mapping source according to what a data classification is made Gets the hierarchy groups (only for GUI) Gets the hierarchy values
[ "Gets", "main", "mapping", "source", "according", "to", "what", "a", "data", "classification", "is", "made", "Gets", "the", "hierarchy", "groups", "(", "only", "for", "GUI", ")", "Gets", "the", "hierarchy", "values" ]
python
train
ui/django-post_office
post_office/utils.py
https://github.com/ui/django-post_office/blob/03e1ffb69829b475402f0f3ecd9f8a90af7da4bd/post_office/utils.py#L13-L34
def send_mail(subject, message, from_email, recipient_list, html_message='', scheduled_time=None, headers=None, priority=PRIORITY.medium): """ Add a new message to the mail queue. This is a replacement for Django's ``send_mail`` core email method. """ subject = force_text(subject) status = None if priority == PRIORITY.now else STATUS.queued emails = [] for address in recipient_list: emails.append( Email.objects.create( from_email=from_email, to=address, subject=subject, message=message, html_message=html_message, status=status, headers=headers, priority=priority, scheduled_time=scheduled_time ) ) if priority == PRIORITY.now: for email in emails: email.dispatch() return emails
[ "def", "send_mail", "(", "subject", ",", "message", ",", "from_email", ",", "recipient_list", ",", "html_message", "=", "''", ",", "scheduled_time", "=", "None", ",", "headers", "=", "None", ",", "priority", "=", "PRIORITY", ".", "medium", ")", ":", "subje...
Add a new message to the mail queue. This is a replacement for Django's ``send_mail`` core email method.
[ "Add", "a", "new", "message", "to", "the", "mail", "queue", ".", "This", "is", "a", "replacement", "for", "Django", "s", "send_mail", "core", "email", "method", "." ]
python
train
FutunnOpen/futuquant
futuquant/examples/TinyQuant/FutuDataEvent.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/examples/TinyQuant/FutuDataEvent.py#L332-L363
def process_quote(self, data): """报价推送""" for ix, row in data.iterrows(): symbol = row['code'] tick = self._tick_dict.get(symbol, None) if not tick: tick = TinyQuoteData() tick.symbol = symbol self._tick_dict[symbol] = tick tick.date = row['data_date'].replace('-', '') tick.time = row['data_time'] # with GLOBAL.dt_lock: if tick.date and tick.time: tick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S') else: return tick.openPrice = row['open_price'] tick.highPrice = row['high_price'] tick.lowPrice = row['low_price'] tick.preClosePrice = row['prev_close_price'] # 1.25 新增摆盘价差,方便计算正确的订单提交价格 要求牛牛最低版本 v3.42.4961.125 if 'price_spread' in row: tick.priceSpread = row['price_spread'] tick.lastPrice = row['last_price'] tick.volume = row['volume'] new_tick = copy(tick) self._notify_new_tick_event(new_tick)
[ "def", "process_quote", "(", "self", ",", "data", ")", ":", "for", "ix", ",", "row", "in", "data", ".", "iterrows", "(", ")", ":", "symbol", "=", "row", "[", "'code'", "]", "tick", "=", "self", ".", "_tick_dict", ".", "get", "(", "symbol", ",", "...
报价推送
[ "报价推送" ]
python
train
openstack/networking-arista
networking_arista/common/db_lib.py
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/common/db_lib.py#L71-L78
def filter_network_type(query): """Filter unsupported segment types""" segment_model = segment_models.NetworkSegment query = (query .filter( segment_model.network_type.in_( utils.SUPPORTED_NETWORK_TYPES))) return query
[ "def", "filter_network_type", "(", "query", ")", ":", "segment_model", "=", "segment_models", ".", "NetworkSegment", "query", "=", "(", "query", ".", "filter", "(", "segment_model", ".", "network_type", ".", "in_", "(", "utils", ".", "SUPPORTED_NETWORK_TYPES", "...
Filter unsupported segment types
[ "Filter", "unsupported", "segment", "types" ]
python
train
hammerlab/cohorts
cohorts/varcode_utils.py
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/varcode_utils.py#L107-L157
def filter_effects(effect_collection, variant_collection, patient, filter_fn, all_effects, **kwargs): """Filter variants from the Effect Collection Parameters ---------- effect_collection : varcode.EffectCollection variant_collection : varcode.VariantCollection patient : cohorts.Patient filter_fn : function Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved. all_effects : boolean Return the single, top-priority effect if False. If True, return all effects (don't filter to top-priority). Returns ------- varcode.EffectCollection Filtered effect collection, with only the variants passing the filter """ def top_priority_maybe(effect_collection): """ Always (unless all_effects=True) take the top priority effect per variant so we end up with a single effect per variant. """ if all_effects: return effect_collection return EffectCollection(list(effect_collection.top_priority_effect_per_variant().values())) def apply_filter_fn(filter_fn, effect): """ Return True if filter_fn is true for the effect or its alternate_effect. If no alternate_effect, then just return True if filter_fn is True. """ applied = filter_fn(FilterableEffect( effect=effect, variant_collection=variant_collection, patient=patient), **kwargs) if hasattr(effect, "alternate_effect"): applied_alternate = filter_fn(FilterableEffect( effect=effect.alternate_effect, variant_collection=variant_collection, patient=patient), **kwargs) return applied or applied_alternate return applied if filter_fn: return top_priority_maybe(EffectCollection([ effect for effect in effect_collection if apply_filter_fn(filter_fn, effect)])) else: return top_priority_maybe(effect_collection)
[ "def", "filter_effects", "(", "effect_collection", ",", "variant_collection", ",", "patient", ",", "filter_fn", ",", "all_effects", ",", "*", "*", "kwargs", ")", ":", "def", "top_priority_maybe", "(", "effect_collection", ")", ":", "\"\"\"\n Always (unless all_...
Filter variants from the Effect Collection Parameters ---------- effect_collection : varcode.EffectCollection variant_collection : varcode.VariantCollection patient : cohorts.Patient filter_fn : function Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved. all_effects : boolean Return the single, top-priority effect if False. If True, return all effects (don't filter to top-priority). Returns ------- varcode.EffectCollection Filtered effect collection, with only the variants passing the filter
[ "Filter", "variants", "from", "the", "Effect", "Collection" ]
python
train
zenodo/zenodo-accessrequests
zenodo_accessrequests/tokens.py
https://github.com/zenodo/zenodo-accessrequests/blob/ce2cf3f1425d02ba4f3ad3202cfca43a1892558a/zenodo_accessrequests/tokens.py#L98-L109
def engine(self): """Get cryptographic engine.""" if not hasattr(self, '_engine'): from cryptography.fernet import Fernet from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes digest = hashes.Hash(hashes.SHA256(), backend=default_backend()) digest.update(current_app.config['SECRET_KEY'].encode('utf8')) fernet_key = urlsafe_b64encode(digest.finalize()) self._engine = Fernet(fernet_key) return self._engine
[ "def", "engine", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_engine'", ")", ":", "from", "cryptography", ".", "fernet", "import", "Fernet", "from", "cryptography", ".", "hazmat", ".", "backends", "import", "default_backend", "from", ...
Get cryptographic engine.
[ "Get", "cryptographic", "engine", "." ]
python
test
Valassis-Digital-Media/spylon
spylon/spark/progress.py
https://github.com/Valassis-Digital-Media/spylon/blob/ac00e285fa1c790674606b793819c3e5baee0d48/spylon/spark/progress.py#L34-L59
def _pretty_time_delta(td): """Creates a string representation of a time delta. Parameters ---------- td : :class:`datetime.timedelta` Returns ------- pretty_formatted_datetime : str """ seconds = td.total_seconds() sign_string = '-' if seconds < 0 else '' seconds = abs(int(seconds)) days, seconds = divmod(seconds, 86400) hours, seconds = divmod(seconds, 3600) minutes, seconds = divmod(seconds, 60) d = dict(sign=sign_string, days=days, hours=hours, minutes=minutes, seconds=seconds) if days > 0: return '{sign}{days}d{hours:02d}h{minutes:02d}m:{seconds:02d}s'.format(**d) elif hours > 0: return '{sign}{hours:02d}h{minutes:02d}m:{seconds:02d}s'.format(**d) elif minutes > 0: return '{sign}{minutes:02d}m:{seconds:02d}s'.format(**d) else: return '{sign}{seconds:02d}s'.format(**d)
[ "def", "_pretty_time_delta", "(", "td", ")", ":", "seconds", "=", "td", ".", "total_seconds", "(", ")", "sign_string", "=", "'-'", "if", "seconds", "<", "0", "else", "''", "seconds", "=", "abs", "(", "int", "(", "seconds", ")", ")", "days", ",", "sec...
Creates a string representation of a time delta. Parameters ---------- td : :class:`datetime.timedelta` Returns ------- pretty_formatted_datetime : str
[ "Creates", "a", "string", "representation", "of", "a", "time", "delta", "." ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/salt/layers.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/salt/layers.py#L84-L105
def to_etree(self): """ creates an etree element of a ``SaltLayer`` that mimicks a SaltXMI <layers> element """ nodes_attrib_val = ' '.join('//@nodes.{}'.format(node_id) for node_id in self.nodes) edges_attrib_val = ' '.join('//@edges.{}'.format(edge_id) for edge_id in self.edges) attribs = { '{{{pre}}}type'.format(pre=NAMESPACES['xsi']): self.xsi_type, 'nodes': nodes_attrib_val, 'edges': edges_attrib_val} # a layer might have no nodes or edges attributed to it non_empty_attribs = {key: val for (key, val) in attribs.items() if val is not None} E = ElementMaker() layer = E('layers', non_empty_attribs) label_elements = (label.to_etree() for label in self.labels) layer.extend(label_elements) return layer
[ "def", "to_etree", "(", "self", ")", ":", "nodes_attrib_val", "=", "' '", ".", "join", "(", "'//@nodes.{}'", ".", "format", "(", "node_id", ")", "for", "node_id", "in", "self", ".", "nodes", ")", "edges_attrib_val", "=", "' '", ".", "join", "(", "'//@edg...
creates an etree element of a ``SaltLayer`` that mimicks a SaltXMI <layers> element
[ "creates", "an", "etree", "element", "of", "a", "SaltLayer", "that", "mimicks", "a", "SaltXMI", "<layers", ">", "element" ]
python
train
SFDO-Tooling/CumulusCI
cumulusci/tasks/release_notes/generator.py
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/tasks/release_notes/generator.py#L52-L59
def render(self): """ Returns the rendered release notes from all parsers as a string """ release_notes = [] for parser in self.parsers: parser_content = parser.render() if parser_content is not None: release_notes.append(parser_content) return u"\r\n\r\n".join(release_notes)
[ "def", "render", "(", "self", ")", ":", "release_notes", "=", "[", "]", "for", "parser", "in", "self", ".", "parsers", ":", "parser_content", "=", "parser", ".", "render", "(", ")", "if", "parser_content", "is", "not", "None", ":", "release_notes", ".", ...
Returns the rendered release notes from all parsers as a string
[ "Returns", "the", "rendered", "release", "notes", "from", "all", "parsers", "as", "a", "string" ]
python
train
F5Networks/f5-common-python
f5/bigip/tm/gtm/topology.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/bigip/tm/gtm/topology.py#L149-L153
def exists(self, **kwargs): """Providing a partition is not necessary on topology; causes errors""" kwargs.pop('partition', None) kwargs['transform_name'] = True return self._exists(**kwargs)
[ "def", "exists", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "pop", "(", "'partition'", ",", "None", ")", "kwargs", "[", "'transform_name'", "]", "=", "True", "return", "self", ".", "_exists", "(", "*", "*", "kwargs", ")" ]
Providing a partition is not necessary on topology; causes errors
[ "Providing", "a", "partition", "is", "not", "necessary", "on", "topology", ";", "causes", "errors" ]
python
train
pycontribs/pyrax
pyrax/cloudmonitoring.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudmonitoring.py#L82-L86
def update(self, agent=None, metadata=None): """ Only the agent_id and metadata are able to be updated via the API. """ self.manager.update_entity(self, agent=agent, metadata=metadata)
[ "def", "update", "(", "self", ",", "agent", "=", "None", ",", "metadata", "=", "None", ")", ":", "self", ".", "manager", ".", "update_entity", "(", "self", ",", "agent", "=", "agent", ",", "metadata", "=", "metadata", ")" ]
Only the agent_id and metadata are able to be updated via the API.
[ "Only", "the", "agent_id", "and", "metadata", "are", "able", "to", "be", "updated", "via", "the", "API", "." ]
python
train