repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/Graph.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/Graph.py#L279-L284
def describe_edge(self, edge): """ return edge, edge data, head, tail for edge """ head, tail, data = self.edges[edge] return edge, data, head, tail
[ "def", "describe_edge", "(", "self", ",", "edge", ")", ":", "head", ",", "tail", ",", "data", "=", "self", ".", "edges", "[", "edge", "]", "return", "edge", ",", "data", ",", "head", ",", "tail" ]
return edge, edge data, head, tail for edge
[ "return", "edge", "edge", "data", "head", "tail", "for", "edge" ]
python
train
mgedmin/findimports
findimports.py
https://github.com/mgedmin/findimports/blob/c20a50b497390fed15aa3835476f4fad57313e8a/findimports.py#L513-L560
def isModule(self, dotted_name, extrapath=None): """Is ``dotted_name`` the name of a module?""" try: return self._module_cache[(dotted_name, extrapath)] except KeyError: pass if dotted_name in sys.modules or dotted_name in self.builtin_modules: return dotted_name filename = dotted_name.replace('.', os.path.sep) if extrapath: for ext in self._exts: candidate = os.path.join(extrapath, filename) + ext if os.path.exists(candidate): modname = self.filenameToModname(candidate) self._module_cache[(dotted_name, extrapath)] = modname return modname try: return self._module_cache[(dotted_name, None)] except KeyError: pass for dir in self.path: if os.path.isfile(dir): if dir.endswith('.egg-info'): # distribute creates a setuptools-blah-blah.egg-info # that ends up in sys.path continue try: zf = zipfile.ZipFile(dir) except zipfile.BadZipfile: self.warn(dir, "%s: not a directory or zip file", dir) continue names = zf.namelist() for ext in self._exts: candidate = filename + ext if candidate in names: modname = filename.replace(os.path.sep, '.') self._module_cache[(dotted_name, extrapath)] = modname self._module_cache[(dotted_name, None)] = modname return modname else: for ext in self._exts: candidate = os.path.join(dir, filename) + ext if os.path.exists(candidate): modname = self.filenameToModname(candidate) self._module_cache[(dotted_name, extrapath)] = modname self._module_cache[(dotted_name, None)] = modname return modname return None
[ "def", "isModule", "(", "self", ",", "dotted_name", ",", "extrapath", "=", "None", ")", ":", "try", ":", "return", "self", ".", "_module_cache", "[", "(", "dotted_name", ",", "extrapath", ")", "]", "except", "KeyError", ":", "pass", "if", "dotted_name", ...
Is ``dotted_name`` the name of a module?
[ "Is", "dotted_name", "the", "name", "of", "a", "module?" ]
python
train
tanghaibao/jcvi
jcvi/formats/blast.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L884-L915
def condense(args): """ %prog condense blastfile > blastfile.condensed Condense HSPs that belong to the same query-subject pair into one. """ p = OptionParser(condense.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args blast = BlastSlow(blastfile) key = lambda x: x.query blast.sort(key=key) clusters = [] for q, lines in groupby(blast, key=key): lines = list(lines) condenser = defaultdict(list) for b in lines: condenser[(b.subject, b.orientation)].append(b) for bs in condenser.values(): clusters.append(bs) chained_hsps = [combine_HSPs(x) for x in clusters] chained_hsps = sorted(chained_hsps, key=lambda x: (x.query, -x.score)) for b in chained_hsps: print(b)
[ "def", "condense", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "condense", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", ...
%prog condense blastfile > blastfile.condensed Condense HSPs that belong to the same query-subject pair into one.
[ "%prog", "condense", "blastfile", ">", "blastfile", ".", "condensed" ]
python
train
StackStorm/pybind
pybind/slxos/v17r_1_01a/isis_state/router_isis_config/is_address_family_v4/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/isis_state/router_isis_config/is_address_family_v4/__init__.py#L563-L586
def _set_redist_isis(self, v, load=False): """ Setter method for redist_isis, mapped from YANG variable /isis_state/router_isis_config/is_address_family_v4/redist_isis (container) If this variable is read-only (config: false) in the source YANG file, then _set_redist_isis is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_redist_isis() directly. YANG Description: Redistribution config for IS-IS routes into IS-IS between levels """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=redist_isis.redist_isis, is_container='container', presence=False, yang_name="redist-isis", rest_name="redist-isis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-isis-to-isis-redistribution', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """redist_isis must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=redist_isis.redist_isis, is_container='container', presence=False, yang_name="redist-isis", rest_name="redist-isis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-isis-to-isis-redistribution', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""", }) self.__redist_isis = t if hasattr(self, '_set'): self._set()
[ "def", "_set_redist_isis", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "ba...
Setter method for redist_isis, mapped from YANG variable /isis_state/router_isis_config/is_address_family_v4/redist_isis (container) If this variable is read-only (config: false) in the source YANG file, then _set_redist_isis is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_redist_isis() directly. YANG Description: Redistribution config for IS-IS routes into IS-IS between levels
[ "Setter", "method", "for", "redist_isis", "mapped", "from", "YANG", "variable", "/", "isis_state", "/", "router_isis_config", "/", "is_address_family_v4", "/", "redist_isis", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "confi...
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/openflow_state/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/openflow_state/__init__.py#L151-L174
def _set_flow(self, v, load=False): """ Setter method for flow, mapped from YANG variable /openflow_state/flow (container) If this variable is read-only (config: false) in the source YANG file, then _set_flow is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_flow() directly. YANG Description: Flow details """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=flow.flow, is_container='container', presence=False, yang_name="flow", rest_name="flow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-flow', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """flow must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=flow.flow, is_container='container', presence=False, yang_name="flow", rest_name="flow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-flow', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False)""", }) self.__flow = t if hasattr(self, '_set'): self._set()
[ "def", "_set_flow", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", ...
Setter method for flow, mapped from YANG variable /openflow_state/flow (container) If this variable is read-only (config: false) in the source YANG file, then _set_flow is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_flow() directly. YANG Description: Flow details
[ "Setter", "method", "for", "flow", "mapped", "from", "YANG", "variable", "/", "openflow_state", "/", "flow", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "fil...
python
train
johnnoone/json-spec
src/jsonspec/cli.py
https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/cli.py#L45-L61
def format_output(func): return func """ Format output. """ @wraps(func) def wrapper(*args, **kwargs): try: response = func(*args, **kwargs) except Exception as error: print(colored(error, 'red'), file=sys.stderr) sys.exit(1) else: print(response) sys.exit(0) return wrapper
[ "def", "format_output", "(", "func", ")", ":", "return", "func", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "response", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs"...
Format output.
[ "Format", "output", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Executor.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Executor.py#L645-L650
def _morph(self): """Morph this Null executor to a real Executor object.""" batches = self.batches self.__class__ = Executor self.__init__([]) self.batches = batches
[ "def", "_morph", "(", "self", ")", ":", "batches", "=", "self", ".", "batches", "self", ".", "__class__", "=", "Executor", "self", ".", "__init__", "(", "[", "]", ")", "self", ".", "batches", "=", "batches" ]
Morph this Null executor to a real Executor object.
[ "Morph", "this", "Null", "executor", "to", "a", "real", "Executor", "object", "." ]
python
train
sony/nnabla
python/src/nnabla/experimental/parametric_function_class/module.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/experimental/parametric_function_class/module.py#L97-L120
def load_parameters(self, path): """Load parameters from a file with the specified format. Args: path : path or file object """ nn.load_parameters(path) for v in self.get_modules(): if not isinstance(v, tuple): continue prefix, module = v for k, v in module.__dict__.items(): if not isinstance(v, nn.Variable): continue pname = k name = "{}/{}".format(prefix, pname) # Substitute param0 = v param1 = nn.parameter.pop_parameter(name) if param0 is None: raise ValueError( "Model does not have {} parameter.".format(name)) param0.d = param1.d.copy() nn.logger.info("`{}` loaded.)".format(name))
[ "def", "load_parameters", "(", "self", ",", "path", ")", ":", "nn", ".", "load_parameters", "(", "path", ")", "for", "v", "in", "self", ".", "get_modules", "(", ")", ":", "if", "not", "isinstance", "(", "v", ",", "tuple", ")", ":", "continue", "prefi...
Load parameters from a file with the specified format. Args: path : path or file object
[ "Load", "parameters", "from", "a", "file", "with", "the", "specified", "format", "." ]
python
train
googleapis/google-cloud-python
dns/google/cloud/dns/zone.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/dns/google/cloud/dns/zone.py#L165-L175
def name_server_set(self, value): """Update named set of DNS name servers. :type value: str :param value: (Optional) new title :raises: ValueError for invalid value types. """ if not isinstance(value, six.string_types) and value is not None: raise ValueError("Pass a string, or None") self._properties["nameServerSet"] = value
[ "def", "name_server_set", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", "and", "value", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Pass a string, or None\"", ")", "self", "...
Update named set of DNS name servers. :type value: str :param value: (Optional) new title :raises: ValueError for invalid value types.
[ "Update", "named", "set", "of", "DNS", "name", "servers", "." ]
python
train
Kozea/wdb
client/wdb/__init__.py
https://github.com/Kozea/wdb/blob/6af7901b02e866d76f8b0a697a8c078e5b70d1aa/client/wdb/__init__.py#L974-L984
def die(self): """Time to quit""" log.info('Time to die') if self.connected: try: self.send('Die') except Exception: pass if self._socket: self._socket.close() self.pop()
[ "def", "die", "(", "self", ")", ":", "log", ".", "info", "(", "'Time to die'", ")", "if", "self", ".", "connected", ":", "try", ":", "self", ".", "send", "(", "'Die'", ")", "except", "Exception", ":", "pass", "if", "self", ".", "_socket", ":", "sel...
Time to quit
[ "Time", "to", "quit" ]
python
train
goldmann/docker-squash
docker_squash/image.py
https://github.com/goldmann/docker-squash/blob/89e0297942be268791aff2098b7ebfa50d82f8e8/docker_squash/image.py#L501-L521
def _marker_files(self, tar, members): """ Searches for marker files in the specified archive. Docker marker files are files taht have the .wh. prefix in the name. These files mark the corresponding file to be removed (hidden) when we start a container from the image. """ marker_files = {} self.log.debug( "Searching for marker files in '%s' archive..." % tar.name) for member in members: if '.wh.' in member.name: self.log.debug("Found '%s' marker file" % member.name) marker_files[member] = tar.extractfile(member) self.log.debug("Done, found %s files" % len(marker_files)) return marker_files
[ "def", "_marker_files", "(", "self", ",", "tar", ",", "members", ")", ":", "marker_files", "=", "{", "}", "self", ".", "log", ".", "debug", "(", "\"Searching for marker files in '%s' archive...\"", "%", "tar", ".", "name", ")", "for", "member", "in", "member...
Searches for marker files in the specified archive. Docker marker files are files taht have the .wh. prefix in the name. These files mark the corresponding file to be removed (hidden) when we start a container from the image.
[ "Searches", "for", "marker", "files", "in", "the", "specified", "archive", "." ]
python
train
aaugustin/websockets
src/websockets/headers.py
https://github.com/aaugustin/websockets/blob/17b3f47549b6f752a1be07fa1ba3037cb59c7d56/src/websockets/headers.py#L54-L66
def parse_OWS(header: str, pos: int) -> int: """ Parse optional whitespace from ``header`` at the given position. Return the new position. The whitespace itself isn't returned because it isn't significant. """ # There's always a match, possibly empty, whose content doesn't matter. match = _OWS_re.match(header, pos) assert match is not None return match.end()
[ "def", "parse_OWS", "(", "header", ":", "str", ",", "pos", ":", "int", ")", "->", "int", ":", "# There's always a match, possibly empty, whose content doesn't matter.", "match", "=", "_OWS_re", ".", "match", "(", "header", ",", "pos", ")", "assert", "match", "is...
Parse optional whitespace from ``header`` at the given position. Return the new position. The whitespace itself isn't returned because it isn't significant.
[ "Parse", "optional", "whitespace", "from", "header", "at", "the", "given", "position", "." ]
python
train
Titan-C/slaveparticles
slaveparticles/quantum/dos.py
https://github.com/Titan-C/slaveparticles/blob/e4c2f5afb1a7b195517ef2f1b5cc758965036aab/slaveparticles/quantum/dos.py#L11-L14
def bethe_lattice(energy, hopping): """Bethe lattice in inf dim density of states""" energy = np.asarray(energy).clip(-2*hopping, 2*hopping) return np.sqrt(4*hopping**2 - energy**2) / (2*np.pi*hopping**2)
[ "def", "bethe_lattice", "(", "energy", ",", "hopping", ")", ":", "energy", "=", "np", ".", "asarray", "(", "energy", ")", ".", "clip", "(", "-", "2", "*", "hopping", ",", "2", "*", "hopping", ")", "return", "np", ".", "sqrt", "(", "4", "*", "hopp...
Bethe lattice in inf dim density of states
[ "Bethe", "lattice", "in", "inf", "dim", "density", "of", "states" ]
python
train
ZELLMECHANIK-DRESDEN/dclab
dclab/rtdc_dataset/export.py
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/export.py#L26-L85
def avi(self, path, filtered=True, override=False): """Exports filtered event images to an avi file Parameters ---------- path: str Path to a .tsv file. The ending .tsv is added automatically. filtered: bool If set to `True`, only the filtered data (index in ds._filter) are used. override: bool If set to `True`, an existing file ``path`` will be overridden. If set to `False`, raises `OSError` if ``path`` exists. Notes ----- Raises OSError if current dataset does not contain image data """ path = pathlib.Path(path) ds = self.rtdc_ds # Make sure that path ends with .avi if path.suffix != ".avi": path = path.with_name(path.name + ".avi") # Check if file already exist if not override and path.exists(): raise OSError("File already exists: {}\n".format( str(path).encode("ascii", "ignore")) + "Please use the `override=True` option.") # Start exporting if "image" in ds: # Open video for writing vout = imageio.get_writer(uri=path, format="FFMPEG", fps=25, codec="rawvideo", pixelformat="yuv420p", macro_block_size=None, ffmpeg_log_level="error") # write the filtered frames to avi file for evid in np.arange(len(ds)): # skip frames that were filtered out if filtered and not ds._filter[evid]: continue try: image = ds["image"][evid] except BaseException: warnings.warn("Could not read image {}!".format(evid), NoImageWarning) continue else: if np.isnan(image[0, 0]): # This is a nan-valued image image = np.zeros_like(image, dtype=np.uint8) # Convert image to RGB image = image.reshape(image.shape[0], image.shape[1], 1) image = np.repeat(image, 3, axis=2) vout.append_data(image) else: msg = "No image data to export: dataset {} !".format(ds.title) raise OSError(msg)
[ "def", "avi", "(", "self", ",", "path", ",", "filtered", "=", "True", ",", "override", "=", "False", ")", ":", "path", "=", "pathlib", ".", "Path", "(", "path", ")", "ds", "=", "self", ".", "rtdc_ds", "# Make sure that path ends with .avi", "if", "path",...
Exports filtered event images to an avi file Parameters ---------- path: str Path to a .tsv file. The ending .tsv is added automatically. filtered: bool If set to `True`, only the filtered data (index in ds._filter) are used. override: bool If set to `True`, an existing file ``path`` will be overridden. If set to `False`, raises `OSError` if ``path`` exists. Notes ----- Raises OSError if current dataset does not contain image data
[ "Exports", "filtered", "event", "images", "to", "an", "avi", "file" ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L164-L174
def update_fw_local_result_str(self, os_result=None, dcnm_result=None, dev_result=None): """Update the FW result in the dict. """ fw_dict = self.get_fw_dict() if os_result is not None: fw_dict['os_status'] = os_result if dcnm_result is not None: fw_dict['dcnm_status'] = dcnm_result if dev_result is not None: fw_dict['dev_status'] = dev_result self.update_fw_dict(fw_dict)
[ "def", "update_fw_local_result_str", "(", "self", ",", "os_result", "=", "None", ",", "dcnm_result", "=", "None", ",", "dev_result", "=", "None", ")", ":", "fw_dict", "=", "self", ".", "get_fw_dict", "(", ")", "if", "os_result", "is", "not", "None", ":", ...
Update the FW result in the dict.
[ "Update", "the", "FW", "result", "in", "the", "dict", "." ]
python
train
klen/muffin-session
muffin_session.py
https://github.com/klen/muffin-session/blob/f1d14d12b7d09d8cc40be14b0dfa0b1e2f4ae8e9/muffin_session.py#L191-L197
def encrypt(self, value): """Encrypt session data.""" timestamp = str(int(time.time())) value = base64.b64encode(value.encode(self.encoding)) signature = create_signature(self.secret, value + timestamp.encode(), encoding=self.encoding) return "|".join([value.decode(self.encoding), timestamp, signature])
[ "def", "encrypt", "(", "self", ",", "value", ")", ":", "timestamp", "=", "str", "(", "int", "(", "time", ".", "time", "(", ")", ")", ")", "value", "=", "base64", ".", "b64encode", "(", "value", ".", "encode", "(", "self", ".", "encoding", ")", ")...
Encrypt session data.
[ "Encrypt", "session", "data", "." ]
python
train
bennylope/django-organizations
organizations/base.py
https://github.com/bennylope/django-organizations/blob/85f753a8f7a8f0f31636c9209fb69e7030a5c79a/organizations/base.py#L311-L318
def name(self): """ Returns the connected user's full name or string representation if the full name method is unavailable (e.g. on a custom user class). """ if hasattr(self.user, "get_full_name"): return self.user.get_full_name() return "{0}".format(self.user)
[ "def", "name", "(", "self", ")", ":", "if", "hasattr", "(", "self", ".", "user", ",", "\"get_full_name\"", ")", ":", "return", "self", ".", "user", ".", "get_full_name", "(", ")", "return", "\"{0}\"", ".", "format", "(", "self", ".", "user", ")" ]
Returns the connected user's full name or string representation if the full name method is unavailable (e.g. on a custom user class).
[ "Returns", "the", "connected", "user", "s", "full", "name", "or", "string", "representation", "if", "the", "full", "name", "method", "is", "unavailable", "(", "e", ".", "g", ".", "on", "a", "custom", "user", "class", ")", "." ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/compiler/compiler_frontend.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/compiler_frontend.py#L341-L376
def _validate_recurse_directive_types(current_schema_type, field_schema_type, context): """Perform type checks on the enclosing type and the recursed type for a recurse directive. Args: current_schema_type: GraphQLType, the schema type at the current location field_schema_type: GraphQLType, the schema type at the inner scope context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! """ # Get the set of all allowed types in the current scope. type_hints = context['type_equivalence_hints'].get(field_schema_type) type_hints_inverse = context['type_equivalence_hints_inverse'].get(field_schema_type) allowed_current_types = {field_schema_type} if type_hints and isinstance(type_hints, GraphQLUnionType): allowed_current_types.update(type_hints.types) if type_hints_inverse and isinstance(type_hints_inverse, GraphQLUnionType): allowed_current_types.update(type_hints_inverse.types) # The current scope must be of the same type as the field scope, or an acceptable subtype. current_scope_is_allowed = current_schema_type in allowed_current_types is_implemented_interface = ( isinstance(field_schema_type, GraphQLInterfaceType) and isinstance(current_schema_type, GraphQLObjectType) and field_schema_type in current_schema_type.interfaces ) if not any((current_scope_is_allowed, is_implemented_interface)): raise GraphQLCompilationError(u'Edges expanded with a @recurse directive must either ' u'be of the same type as their enclosing scope, a supertype ' u'of the enclosing scope, or be of an interface type that is ' u'implemented by the type of their enclosing scope. ' u'Enclosing scope type: {}, edge type: ' u'{}'.format(current_schema_type, field_schema_type))
[ "def", "_validate_recurse_directive_types", "(", "current_schema_type", ",", "field_schema_type", ",", "context", ")", ":", "# Get the set of all allowed types in the current scope.", "type_hints", "=", "context", "[", "'type_equivalence_hints'", "]", ".", "get", "(", "field_...
Perform type checks on the enclosing type and the recursed type for a recurse directive. Args: current_schema_type: GraphQLType, the schema type at the current location field_schema_type: GraphQLType, the schema type at the inner scope context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function!
[ "Perform", "type", "checks", "on", "the", "enclosing", "type", "and", "the", "recursed", "type", "for", "a", "recurse", "directive", "." ]
python
train
sdispater/pendulum
pendulum/duration.py
https://github.com/sdispater/pendulum/blob/94d28b0d3cb524ae02361bd1ed7ea03e2e655e4e/pendulum/duration.py#L19-L36
def _divide_and_round(a, b): """divide a by b and round result to the nearest integer When the ratio is exactly half-way between two integers, the even integer is returned. """ # Based on the reference implementation for divmod_near # in Objects/longobject.c. q, r = divmod(a, b) # round up if either r / b > 0.5, or r / b == 0.5 and q is odd. # The expression r / b > 0.5 is equivalent to 2 * r > b if b is # positive, 2 * r < b if b negative. r *= 2 greater_than_half = r > b if b > 0 else r < b if greater_than_half or r == b and q % 2 == 1: q += 1 return q
[ "def", "_divide_and_round", "(", "a", ",", "b", ")", ":", "# Based on the reference implementation for divmod_near", "# in Objects/longobject.c.", "q", ",", "r", "=", "divmod", "(", "a", ",", "b", ")", "# round up if either r / b > 0.5, or r / b == 0.5 and q is odd.", "# Th...
divide a by b and round result to the nearest integer When the ratio is exactly half-way between two integers, the even integer is returned.
[ "divide", "a", "by", "b", "and", "round", "result", "to", "the", "nearest", "integer" ]
python
train
phoebe-project/phoebe2
phoebe/backend/backends.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/backend/backends.py#L349-L368
def get_packet_and_syns(self, b, compute, times=[], **kwargs): """ get_packet is called by the master and must get all information necessary to send to all workers. The returned packet will be passed on as _run_chunk(**packet) with the following exceptions: * b: the bundle will be included in the packet serialized * compute: the label of the compute options will be included in the packet * backend: the class name will be passed on in the packet so the worker can call the correct backend * all kwargs will be passed on verbatim """ packet, new_syns = self._get_packet_and_syns(b, compute, times, **kwargs) for k,v in kwargs.items(): packet[k] = v packet['b'] = b.to_json() if mpi.enabled else b packet['compute'] = compute packet['backend'] = self.__class__.__name__ return packet, new_syns
[ "def", "get_packet_and_syns", "(", "self", ",", "b", ",", "compute", ",", "times", "=", "[", "]", ",", "*", "*", "kwargs", ")", ":", "packet", ",", "new_syns", "=", "self", ".", "_get_packet_and_syns", "(", "b", ",", "compute", ",", "times", ",", "*"...
get_packet is called by the master and must get all information necessary to send to all workers. The returned packet will be passed on as _run_chunk(**packet) with the following exceptions: * b: the bundle will be included in the packet serialized * compute: the label of the compute options will be included in the packet * backend: the class name will be passed on in the packet so the worker can call the correct backend * all kwargs will be passed on verbatim
[ "get_packet", "is", "called", "by", "the", "master", "and", "must", "get", "all", "information", "necessary", "to", "send", "to", "all", "workers", ".", "The", "returned", "packet", "will", "be", "passed", "on", "as", "_run_chunk", "(", "**", "packet", ")"...
python
train
briney/abutils
abutils/core/pair.py
https://github.com/briney/abutils/blob/944755fc7d28bfc7d4f1ffad94ca0bf9d74ec54b/abutils/core/pair.py#L278-L291
def _refine_j(seq, species): ''' Completes the 3' end of a a truncated sequence with germline nucleotides. Input is a MongoDB dict (seq) and the species. ''' jgerm = germlines.get_germline(seq['j_gene']['full'], species) aln = global_alignment(seq['vdj_nt'], jgerm) append = '' for s, g in zip(aln.aligned_query[::-1], aln.aligned_target[::-1]): if s != '-': break else: append += g seq['vdj_nt'] = seq['vdj_nt'] + append[::-1]
[ "def", "_refine_j", "(", "seq", ",", "species", ")", ":", "jgerm", "=", "germlines", ".", "get_germline", "(", "seq", "[", "'j_gene'", "]", "[", "'full'", "]", ",", "species", ")", "aln", "=", "global_alignment", "(", "seq", "[", "'vdj_nt'", "]", ",", ...
Completes the 3' end of a a truncated sequence with germline nucleotides. Input is a MongoDB dict (seq) and the species.
[ "Completes", "the", "3", "end", "of", "a", "a", "truncated", "sequence", "with", "germline", "nucleotides", ".", "Input", "is", "a", "MongoDB", "dict", "(", "seq", ")", "and", "the", "species", "." ]
python
train
horazont/aioxmpp
aioxmpp/forms/fields.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/forms/fields.py#L384-L398
def value(self): """ A tuple of values. This attribute can be set with any iterable; the iterable is then evaluated into a tuple and stored at the bound field. Whenever values are written to this attribute, they are passed through the :meth:`~.AbstractCDataType.coerce` method of the :attr:`~.AbstractField.type_` of the field. To revert the :attr:`value` to its default, use the ``del`` operator. """ try: return self._value except AttributeError: self.value = self._field.default() return self._value
[ "def", "value", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_value", "except", "AttributeError", ":", "self", ".", "value", "=", "self", ".", "_field", ".", "default", "(", ")", "return", "self", ".", "_value" ]
A tuple of values. This attribute can be set with any iterable; the iterable is then evaluated into a tuple and stored at the bound field. Whenever values are written to this attribute, they are passed through the :meth:`~.AbstractCDataType.coerce` method of the :attr:`~.AbstractField.type_` of the field. To revert the :attr:`value` to its default, use the ``del`` operator.
[ "A", "tuple", "of", "values", ".", "This", "attribute", "can", "be", "set", "with", "any", "iterable", ";", "the", "iterable", "is", "then", "evaluated", "into", "a", "tuple", "and", "stored", "at", "the", "bound", "field", "." ]
python
train
django-fluent/django-fluent-dashboard
fluent_dashboard/items.py
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/items.py#L101-L122
def get_edited_object(self, request): """ Return the object which is currently being edited. Returns ``None`` if the match could not be made. """ resolvermatch = urls.resolve(request.path_info) if resolvermatch.namespace == 'admin' and resolvermatch.url_name and resolvermatch.url_name.endswith('_change'): # In "appname_modelname_change" view of the admin. # Extract the appname and model from the url name. # For some custom views, url_name might not be filled in (e.g. django-polymorphic's subclass_view) match = RE_CHANGE_URL.match(resolvermatch.url_name) if not match: return None # object_id can be string (e.g. a country code as PK). try: object_id = resolvermatch.kwargs['object_id'] # Django 2.0+ except KeyError: object_id = resolvermatch.args[0] return self.get_object_by_natural_key(match.group(1), match.group(2), object_id) return None
[ "def", "get_edited_object", "(", "self", ",", "request", ")", ":", "resolvermatch", "=", "urls", ".", "resolve", "(", "request", ".", "path_info", ")", "if", "resolvermatch", ".", "namespace", "==", "'admin'", "and", "resolvermatch", ".", "url_name", "and", ...
Return the object which is currently being edited. Returns ``None`` if the match could not be made.
[ "Return", "the", "object", "which", "is", "currently", "being", "edited", ".", "Returns", "None", "if", "the", "match", "could", "not", "be", "made", "." ]
python
train
openstack/networking-arista
networking_arista/ml2/security_groups/security_group_sync.py
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/security_group_sync.py#L63-L88
def synchronize_switch(self, switch_ip, expected_acls, expected_bindings): """Update ACL config on a switch to match expected config This is done as follows: 1. Get switch ACL config using show commands 2. Update expected bindings based on switch LAGs 3. Get commands to synchronize switch ACLs 4. Get commands to synchronize switch ACL bindings 5. Run sync commands on switch """ # Get ACL rules and interface mappings from the switch switch_acls, switch_bindings = self._get_dynamic_acl_info(switch_ip) # Adjust expected bindings for switch LAG config expected_bindings = self.adjust_bindings_for_lag(switch_ip, expected_bindings) # Get synchronization commands switch_cmds = list() switch_cmds.extend( self.get_sync_acl_cmds(switch_acls, expected_acls)) switch_cmds.extend( self.get_sync_binding_cmds(switch_bindings, expected_bindings)) # Update switch config self.run_openstack_sg_cmds(switch_cmds, self._switches.get(switch_ip))
[ "def", "synchronize_switch", "(", "self", ",", "switch_ip", ",", "expected_acls", ",", "expected_bindings", ")", ":", "# Get ACL rules and interface mappings from the switch", "switch_acls", ",", "switch_bindings", "=", "self", ".", "_get_dynamic_acl_info", "(", "switch_ip"...
Update ACL config on a switch to match expected config This is done as follows: 1. Get switch ACL config using show commands 2. Update expected bindings based on switch LAGs 3. Get commands to synchronize switch ACLs 4. Get commands to synchronize switch ACL bindings 5. Run sync commands on switch
[ "Update", "ACL", "config", "on", "a", "switch", "to", "match", "expected", "config" ]
python
train
jmbhughes/suvi-trainer
suvitrainer/gui.py
https://github.com/jmbhughes/suvi-trainer/blob/3d89894a4a037286221974c7eb5634d229b4f5d4/suvitrainer/gui.py#L596-L648
def setup_multicolor(self): """ initial setup of multicolor options and variables""" # Setup the options for multicolor multicolormasterframe = tk.Frame(self.tab_configure) channel_choices = sorted(list(self.data.keys())) rgb = ['red', 'green', 'blue'] self.multicolorframes = {color: tk.Frame(multicolormasterframe, bg=color) for color in rgb} self.multicolorlabels = {color: tk.Label(self.multicolorframes[color], text=color, bg=color, width=10) for color in rgb} self.multicolorvars = {color: tk.StringVar() for color in rgb} self.multicolorpower = {color: tk.DoubleVar() for color in rgb} self.multicolormin = {color: tk.DoubleVar() for color in rgb} self.multicolormax = {color: tk.DoubleVar() for color in rgb} self.multicolordropdowns = {color: tk.OptionMenu(self.multicolorframes[color], self.multicolorvars[color], *channel_choices) for color in rgb} self.multicolorscales = {color: tk.Scale(self.multicolorframes[color], variable=self.multicolorpower[color], orient=tk.HORIZONTAL, from_=self.config.ranges['multi_color_power_min'], to_=self.config.ranges['multi_color_power_max'], bg=color, resolution=self.config.ranges['multi_color_power_resolution'], length=200) for color in rgb} self.multicolorminscale = {color: tk.Scale(self.multicolorframes[color], variable=self.multicolormin[color], orient=tk.HORIZONTAL, from_=0, to_=self.config.ranges['multi_color_vmin'], bg=color, resolution=self.config.ranges['multi_color_vresolution'], length=200) for color in rgb} self.multicolormaxscale = {color: tk.Scale(self.multicolorframes[color], variable=self.multicolormax[color], orient=tk.HORIZONTAL, from_=self.config.ranges['multi_color_vmax'], to_=100, bg=color, resolution=self.config.ranges['multi_color_vresolution'], length=200) for color in rgb} for color in rgb: self.multicolorvars[color].set(self.config.products_map[self.config.default[color]]) self.multicolorpower[color].set(self.config.default[color + "_power"]) self.multicolormin[color].set(0) self.multicolormax[color].set(100) self.multicolordropdowns[color].config(bg=color, width=10) self.multicolorlabels[color].pack(side=tk.LEFT) self.multicolorscales[color].pack(side=tk.RIGHT) self.multicolormaxscale[color].pack(side=tk.RIGHT) self.multicolorminscale[color].pack(side=tk.RIGHT) self.multicolordropdowns[color].pack() self.multicolorframes[color].pack(fill=tk.BOTH) multicolormasterframe.grid(row=1, column=0, columnspan=5, rowspan=3)
[ "def", "setup_multicolor", "(", "self", ")", ":", "# Setup the options for multicolor", "multicolormasterframe", "=", "tk", ".", "Frame", "(", "self", ".", "tab_configure", ")", "channel_choices", "=", "sorted", "(", "list", "(", "self", ".", "data", ".", "keys"...
initial setup of multicolor options and variables
[ "initial", "setup", "of", "multicolor", "options", "and", "variables" ]
python
train
Toilal/rebulk
rebulk/match.py
https://github.com/Toilal/rebulk/blob/7511a4671f2fd9493e3df1e5177b7656789069e8/rebulk/match.py#L791-L816
def split(self, seps, predicate=None, index=None): """ Split this match in multiple matches using given separators. :param seps: :type seps: string containing separator characters :return: list of new Match objects :rtype: list """ split_match = copy.deepcopy(self) current_match = split_match ret = [] for i in range(0, len(self.raw)): if self.raw[i] in seps: if not split_match: split_match = copy.deepcopy(current_match) current_match.end = self.start + i else: if split_match: split_match.start = self.start + i current_match = split_match ret.append(split_match) split_match = None return filter_index(ret, predicate, index)
[ "def", "split", "(", "self", ",", "seps", ",", "predicate", "=", "None", ",", "index", "=", "None", ")", ":", "split_match", "=", "copy", ".", "deepcopy", "(", "self", ")", "current_match", "=", "split_match", "ret", "=", "[", "]", "for", "i", "in", ...
Split this match in multiple matches using given separators. :param seps: :type seps: string containing separator characters :return: list of new Match objects :rtype: list
[ "Split", "this", "match", "in", "multiple", "matches", "using", "given", "separators", ".", ":", "param", "seps", ":", ":", "type", "seps", ":", "string", "containing", "separator", "characters", ":", "return", ":", "list", "of", "new", "Match", "objects", ...
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/flask/app.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/flask/app.py#L639-L663
def create_jinja_environment(self): """Creates the Jinja2 environment based on :attr:`jinja_options` and :meth:`select_jinja_autoescape`. Since 0.7 this also adds the Jinja2 globals and filters after initialization. Override this function to customize the behavior. .. versionadded:: 0.5 """ options = dict(self.jinja_options) if 'autoescape' not in options: options['autoescape'] = self.select_jinja_autoescape rv = Environment(self, **options) rv.globals.update( url_for=url_for, get_flashed_messages=get_flashed_messages, config=self.config, # request, session and g are normally added with the # context processor for efficiency reasons but for imported # templates we also want the proxies in there. request=request, session=session, g=g ) rv.filters['tojson'] = json.tojson_filter return rv
[ "def", "create_jinja_environment", "(", "self", ")", ":", "options", "=", "dict", "(", "self", ".", "jinja_options", ")", "if", "'autoescape'", "not", "in", "options", ":", "options", "[", "'autoescape'", "]", "=", "self", ".", "select_jinja_autoescape", "rv",...
Creates the Jinja2 environment based on :attr:`jinja_options` and :meth:`select_jinja_autoescape`. Since 0.7 this also adds the Jinja2 globals and filters after initialization. Override this function to customize the behavior. .. versionadded:: 0.5
[ "Creates", "the", "Jinja2", "environment", "based", "on", ":", "attr", ":", "jinja_options", "and", ":", "meth", ":", "select_jinja_autoescape", ".", "Since", "0", ".", "7", "this", "also", "adds", "the", "Jinja2", "globals", "and", "filters", "after", "init...
python
test
hydpy-dev/hydpy
hydpy/core/timetools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/timetools.py#L1562-L1591
def assignrepr(self, prefix, style=None, utcoffset=None): """Return a |repr| string with an prefixed assignement. Without option arguments given, printing the returned string looks like: >>> from hydpy import Timegrid >>> timegrid = Timegrid('1996-11-01 00:00:00', ... '1997-11-01 00:00:00', ... '1d') >>> print(timegrid.assignrepr(prefix='timegrid = ')) timegrid = Timegrid('1996-11-01 00:00:00', '1997-11-01 00:00:00', '1d') The optional arguments are passed to method |Date.to_repr| without any modifications: >>> print(timegrid.assignrepr( ... prefix='', style='iso1', utcoffset=120)) Timegrid('1996-11-01T01:00:00+02:00', '1997-11-01T01:00:00+02:00', '1d') """ skip = len(prefix) + 9 blanks = ' ' * skip return (f"{prefix}Timegrid('" f"{self.firstdate.to_string(style, utcoffset)}',\n" f"{blanks}'{self.lastdate.to_string(style, utcoffset)}',\n" f"{blanks}'{str(self.stepsize)}')")
[ "def", "assignrepr", "(", "self", ",", "prefix", ",", "style", "=", "None", ",", "utcoffset", "=", "None", ")", ":", "skip", "=", "len", "(", "prefix", ")", "+", "9", "blanks", "=", "' '", "*", "skip", "return", "(", "f\"{prefix}Timegrid('\"", "f\"{sel...
Return a |repr| string with an prefixed assignement. Without option arguments given, printing the returned string looks like: >>> from hydpy import Timegrid >>> timegrid = Timegrid('1996-11-01 00:00:00', ... '1997-11-01 00:00:00', ... '1d') >>> print(timegrid.assignrepr(prefix='timegrid = ')) timegrid = Timegrid('1996-11-01 00:00:00', '1997-11-01 00:00:00', '1d') The optional arguments are passed to method |Date.to_repr| without any modifications: >>> print(timegrid.assignrepr( ... prefix='', style='iso1', utcoffset=120)) Timegrid('1996-11-01T01:00:00+02:00', '1997-11-01T01:00:00+02:00', '1d')
[ "Return", "a", "|repr|", "string", "with", "an", "prefixed", "assignement", "." ]
python
train
dmlc/gluon-nlp
src/gluonnlp/model/translation.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/translation.py#L129-L143
def encode(self, inputs, states=None, valid_length=None): """Encode the input sequence. Parameters ---------- inputs : NDArray states : list of NDArrays or None, default None valid_length : NDArray or None, default None Returns ------- outputs : list Outputs of the encoder. """ return self.encoder(self.src_embed(inputs), states, valid_length)
[ "def", "encode", "(", "self", ",", "inputs", ",", "states", "=", "None", ",", "valid_length", "=", "None", ")", ":", "return", "self", ".", "encoder", "(", "self", ".", "src_embed", "(", "inputs", ")", ",", "states", ",", "valid_length", ")" ]
Encode the input sequence. Parameters ---------- inputs : NDArray states : list of NDArrays or None, default None valid_length : NDArray or None, default None Returns ------- outputs : list Outputs of the encoder.
[ "Encode", "the", "input", "sequence", "." ]
python
train
heroku/sf-suds
suds/sax/__init__.py
https://github.com/heroku/sf-suds/blob/44b6743a45ff4447157605d6fecc9bf5922ce68a/suds/sax/__init__.py#L40-L53
def splitPrefix(name): """ Split the name into a tuple (I{prefix}, I{name}). The first element in the tuple is I{None} when the name does't have a prefix. @param name: A node name containing an optional prefix. @type name: basestring @return: A tuple containing the (2) parts of I{name} @rtype: (I{prefix}, I{name}) """ if isinstance(name, basestring) \ and ':' in name: return tuple(name.split(':', 1)) else: return (None, name)
[ "def", "splitPrefix", "(", "name", ")", ":", "if", "isinstance", "(", "name", ",", "basestring", ")", "and", "':'", "in", "name", ":", "return", "tuple", "(", "name", ".", "split", "(", "':'", ",", "1", ")", ")", "else", ":", "return", "(", "None",...
Split the name into a tuple (I{prefix}, I{name}). The first element in the tuple is I{None} when the name does't have a prefix. @param name: A node name containing an optional prefix. @type name: basestring @return: A tuple containing the (2) parts of I{name} @rtype: (I{prefix}, I{name})
[ "Split", "the", "name", "into", "a", "tuple", "(", "I", "{", "prefix", "}", "I", "{", "name", "}", ")", ".", "The", "first", "element", "in", "the", "tuple", "is", "I", "{", "None", "}", "when", "the", "name", "does", "t", "have", "a", "prefix", ...
python
train
mk-fg/feedjack
feedjack/models.py
https://github.com/mk-fg/feedjack/blob/3fe65c0f66dc2cfdf45834aaa7235ec9f81b3ca3/feedjack/models.py#L783-L814
def _filtering_result_checked(self, by_or): '''Check if post passes all / at_least_one (by_or parameter) filter(s). Filters are evaluated on only-if-necessary ("lazy") basis.''' filters, results = it.imap(set, ( self.feed.filters.all(), self.filtering_results.values_list('filter', flat=True) )) # Check if conclusion can already be made, based on cached results. if results.issubset(filters): # If at least one failed/passed test is already there, and/or outcome is defined. try: return self._filtering_result(by_or) except IndexError: # inconclusive until results are consistent if filters == results: return not by_or # Consistency check / update. if filters != results: # Drop obsolete (removed, unbound from feed) # filters' results (they WILL corrupt outcome). self.filtering_results.filter(filter__in=results.difference(filters)).delete() # One more try, now that results are only from feed filters' subset. try: return self._filtering_result(by_or) except IndexError: pass # Check if any filter-results are not cached yet, create them (perform actual filtering). # Note that independent filters applied first, since # crossrefs should be more resource-hungry in general. for filter_obj in sorted(filters.difference(results), key=op.attrgetter('base.crossref')): filter_op = FilterResult(filter=filter_obj, post=self, result=filter_obj.handler(self)) filter_op.save() if filter_op.result == by_or: return by_or # return as soon as first passed / failed # Final result try: return self._filtering_result(by_or) except IndexError: return not by_or
[ "def", "_filtering_result_checked", "(", "self", ",", "by_or", ")", ":", "filters", ",", "results", "=", "it", ".", "imap", "(", "set", ",", "(", "self", ".", "feed", ".", "filters", ".", "all", "(", ")", ",", "self", ".", "filtering_results", ".", "...
Check if post passes all / at_least_one (by_or parameter) filter(s). Filters are evaluated on only-if-necessary ("lazy") basis.
[ "Check", "if", "post", "passes", "all", "/", "at_least_one", "(", "by_or", "parameter", ")", "filter", "(", "s", ")", ".", "Filters", "are", "evaluated", "on", "only", "-", "if", "-", "necessary", "(", "lazy", ")", "basis", "." ]
python
train
a1ezzz/wasp-general
wasp_general/crypto/hash.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/crypto/hash.py#L161-L169
def generator_family(cls): """ :meth:`.WHashGeneratorProto.generator_family` implementation """ if cls.__generator_family__ is not None: if isinstance(cls.__generator_family__, str) is False: raise TypeError('"__generator_class__" if defined must be a str instance') if cls.__generator_family__ is not None: return cls.__generator_family__.upper()
[ "def", "generator_family", "(", "cls", ")", ":", "if", "cls", ".", "__generator_family__", "is", "not", "None", ":", "if", "isinstance", "(", "cls", ".", "__generator_family__", ",", "str", ")", "is", "False", ":", "raise", "TypeError", "(", "'\"__generator_...
:meth:`.WHashGeneratorProto.generator_family` implementation
[ ":", "meth", ":", ".", "WHashGeneratorProto", ".", "generator_family", "implementation" ]
python
train
Jaymon/captain
captain/__init__.py
https://github.com/Jaymon/captain/blob/4297f32961d423a10d0f053bc252e29fbe939a47/captain/__init__.py#L198-L202
def body(self): """get the contents of the script""" if not hasattr(self, '_body'): self._body = inspect.getsource(self.module) return self._body
[ "def", "body", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_body'", ")", ":", "self", ".", "_body", "=", "inspect", ".", "getsource", "(", "self", ".", "module", ")", "return", "self", ".", "_body" ]
get the contents of the script
[ "get", "the", "contents", "of", "the", "script" ]
python
valid
BlueBrain/NeuroM
neurom/fst/sectionfunc.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/sectionfunc.py#L101-L105
def section_meander_angles(section): '''Inter-segment opening angles in a section''' p = section.points return [mm.angle_3points(p[i - 1], p[i - 2], p[i]) for i in range(2, len(p))]
[ "def", "section_meander_angles", "(", "section", ")", ":", "p", "=", "section", ".", "points", "return", "[", "mm", ".", "angle_3points", "(", "p", "[", "i", "-", "1", "]", ",", "p", "[", "i", "-", "2", "]", ",", "p", "[", "i", "]", ")", "for",...
Inter-segment opening angles in a section
[ "Inter", "-", "segment", "opening", "angles", "in", "a", "section" ]
python
train
klavinslab/coral
coral/design/_oligo_synthesis/oligo_assembly.py
https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/design/_oligo_synthesis/oligo_assembly.py#L412-L432
def _adjust_overlap(positions_list, index, direction): '''Increase overlap to the right or left of an index. :param positions_list: list of overlap positions :type positions_list: list :param index: index of the overlap to increase. :type index: int :param direction: which side of the overlap to increase - left or right. :type direction: str :returns: A list of overlap positions (2-element lists) :rtype: list :raises: ValueError if direction isn't \'left\' or \'right\'. ''' if direction == 'left': positions_list[index + 1] -= 1 elif direction == 'right': positions_list[index] += 1 else: raise ValueError('direction must be \'left\' or \'right\'.') return positions_list
[ "def", "_adjust_overlap", "(", "positions_list", ",", "index", ",", "direction", ")", ":", "if", "direction", "==", "'left'", ":", "positions_list", "[", "index", "+", "1", "]", "-=", "1", "elif", "direction", "==", "'right'", ":", "positions_list", "[", "...
Increase overlap to the right or left of an index. :param positions_list: list of overlap positions :type positions_list: list :param index: index of the overlap to increase. :type index: int :param direction: which side of the overlap to increase - left or right. :type direction: str :returns: A list of overlap positions (2-element lists) :rtype: list :raises: ValueError if direction isn't \'left\' or \'right\'.
[ "Increase", "overlap", "to", "the", "right", "or", "left", "of", "an", "index", "." ]
python
train
luckydonald/pytgbot
code_generation/code_generator_template.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/code_generator_template.py#L86-L115
def func(command, description, link, params_string, returns="On success, the sent Message is returned.", return_type="Message"): """ Live template for pycharm: y = func(command="$cmd$", description="$desc$", link="$lnk$", params_string="$first_param$", returns="$returns$", return_type="$returntype$") """ variables_needed = [] variables_optional = [] imports = set() if params_string: # WHITELISTED_FUNCS have no params for param in params_string.split("\n"): variable = parse_param_types(param) # any variable.types has always_is_value => lenght must be 1. assert (not any([type_.always_is_value is not None for type_ in variable.types]) or len(variable.types) == 1) if variable.optional: variables_optional.append(variable) else: variables_needed.append(variable) # end if imports.update(variable.all_imports) # end for # end if imports = list(imports) imports.sort() returns = Variable(types=as_types(return_type, variable_name="return type"), description=returns) func_object = Function( imports=imports, api_name=command, link=link, description=description, returns=returns, parameters=variables_needed, keywords=variables_optional ) return func_object
[ "def", "func", "(", "command", ",", "description", ",", "link", ",", "params_string", ",", "returns", "=", "\"On success, the sent Message is returned.\"", ",", "return_type", "=", "\"Message\"", ")", ":", "variables_needed", "=", "[", "]", "variables_optional", "="...
Live template for pycharm: y = func(command="$cmd$", description="$desc$", link="$lnk$", params_string="$first_param$", returns="$returns$", return_type="$returntype$")
[ "Live", "template", "for", "pycharm", ":" ]
python
train
jxtech/wechatpy
wechatpy/client/api/merchant/__init__.py
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/merchant/__init__.py#L398-L410
def set_delivery(self, order_id, delivery_data): """ 修改货架 :param order_id: 订单ID :param delivery_data: 商品物流信息 :return: 返回的 JSON 数据包 """ delivery_data['order_id'] = order_id return self._post( 'merchant/shelf/setdeliverymod', data=delivery_data )
[ "def", "set_delivery", "(", "self", ",", "order_id", ",", "delivery_data", ")", ":", "delivery_data", "[", "'order_id'", "]", "=", "order_id", "return", "self", ".", "_post", "(", "'merchant/shelf/setdeliverymod'", ",", "data", "=", "delivery_data", ")" ]
修改货架 :param order_id: 订单ID :param delivery_data: 商品物流信息 :return: 返回的 JSON 数据包
[ "修改货架" ]
python
train
Alignak-monitoring/alignak
alignak/dependencynode.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/dependencynode.py#L259-L354
def get_complex_xof_node_state(self, hosts, services): # pylint: disable=too-many-locals, too-many-return-statements, too-many-branches """Get state , handle X of aggregation :: * Count the number of OK, WARNING, CRITICAL * Try too apply, in this order, Critical, Warning, OK rule * Return the code for first match (2, 1, 0) * If no rule apply, return OK for simple X of and worst state for multiple X of :param hosts: host objects :param services: service objects :return: 0, 1 or 2 :rtype: int TODO: Looks like the last if does the opposite of what the comment says """ # First we get the state of all our sons states = [s.get_state(hosts, services) for s in self.sons] # We search for OK, WARNING or CRITICAL applications # And we will choice between them nb_search_ok = self.of_values[0] nb_search_warn = self.of_values[1] nb_search_crit = self.of_values[2] # We look for each application nb_sons = len(states) nb_ok = nb_warn = nb_crit = 0 for state in states: if state == 0: nb_ok += 1 elif state == 1: nb_warn += 1 elif state == 2: nb_crit += 1 def get_state_for(nb_tot, nb_real, nb_search): """Check if there is enough value to apply this rule :param nb_tot: total number of value :type nb_tot: int :param nb_real: number of value that apply for this rule :type nb_real: int :param nb_search: max value to apply this rule (can be a percent) :type nb_search: int :return: True if the rule is effective (roughly nb_real > nb_search), False otherwise :rtype: bool """ if nb_search.endswith('%'): nb_search = int(nb_search[:-1]) if nb_search < 0: # nb_search is negative, so + nb_search = max(100 + nb_search, 0) apply_for = float(nb_real) / nb_tot * 100 >= nb_search else: nb_search = int(nb_search) if nb_search < 0: # nb_search is negative, so + nb_search = max(nb_tot + nb_search, 0) apply_for = nb_real >= nb_search return apply_for ok_apply = get_state_for(nb_sons, nb_ok, nb_search_ok) warn_apply = get_state_for(nb_sons, nb_warn + nb_crit, nb_search_warn) crit_apply = get_state_for(nb_sons, nb_crit, nb_search_crit) # return the worst state that apply if crit_apply: if self.not_value: return self.get_reverse_state(2) return 2 if warn_apply: if self.not_value: return self.get_reverse_state(1) return 1 if ok_apply: if self.not_value: return self.get_reverse_state(0) return 0 # Maybe even OK is not possible, if so, it depends if the admin # ask a simple form Xof: or a multiple one A,B,Cof: # the simple should give OK, the mult should give the worst state if self.is_of_mul: if self.not_value: return self.get_reverse_state(0) return 0 if 2 in states: worst_state = 2 else: worst_state = max(states) if self.not_value: return self.get_reverse_state(worst_state) return worst_state
[ "def", "get_complex_xof_node_state", "(", "self", ",", "hosts", ",", "services", ")", ":", "# pylint: disable=too-many-locals, too-many-return-statements, too-many-branches", "# First we get the state of all our sons", "states", "=", "[", "s", ".", "get_state", "(", "hosts", ...
Get state , handle X of aggregation :: * Count the number of OK, WARNING, CRITICAL * Try too apply, in this order, Critical, Warning, OK rule * Return the code for first match (2, 1, 0) * If no rule apply, return OK for simple X of and worst state for multiple X of :param hosts: host objects :param services: service objects :return: 0, 1 or 2 :rtype: int TODO: Looks like the last if does the opposite of what the comment says
[ "Get", "state", "handle", "X", "of", "aggregation", "::" ]
python
train
ray-project/ray
python/ray/worker.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/worker.py#L759-L794
def _get_arguments_for_execution(self, function_name, serialized_args): """Retrieve the arguments for the remote function. This retrieves the values for the arguments to the remote function that were passed in as object IDs. Arguments that were passed by value are not changed. This is called by the worker that is executing the remote function. Args: function_name (str): The name of the remote function whose arguments are being retrieved. serialized_args (List): The arguments to the function. These are either strings representing serialized objects passed by value or they are ray.ObjectIDs. Returns: The retrieved arguments in addition to the arguments that were passed by value. Raises: RayError: This exception is raised if a task that created one of the arguments failed. """ arguments = [] for (i, arg) in enumerate(serialized_args): if isinstance(arg, ObjectID): # get the object from the local object store argument = self.get_object([arg])[0] if isinstance(argument, RayError): raise argument else: # pass the argument by value argument = arg arguments.append(argument) return arguments
[ "def", "_get_arguments_for_execution", "(", "self", ",", "function_name", ",", "serialized_args", ")", ":", "arguments", "=", "[", "]", "for", "(", "i", ",", "arg", ")", "in", "enumerate", "(", "serialized_args", ")", ":", "if", "isinstance", "(", "arg", "...
Retrieve the arguments for the remote function. This retrieves the values for the arguments to the remote function that were passed in as object IDs. Arguments that were passed by value are not changed. This is called by the worker that is executing the remote function. Args: function_name (str): The name of the remote function whose arguments are being retrieved. serialized_args (List): The arguments to the function. These are either strings representing serialized objects passed by value or they are ray.ObjectIDs. Returns: The retrieved arguments in addition to the arguments that were passed by value. Raises: RayError: This exception is raised if a task that created one of the arguments failed.
[ "Retrieve", "the", "arguments", "for", "the", "remote", "function", "." ]
python
train
janpipek/physt
physt/histogram_base.py
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/histogram_base.py#L522-L532
def has_same_bins(self, other: "HistogramBase") -> bool: """Whether two histograms share the same binning.""" if self.shape != other.shape: return False elif self.ndim == 1: return np.allclose(self.bins, other.bins) elif self.ndim > 1: for i in range(self.ndim): if not np.allclose(self.bins[i], other.bins[i]): return False return True
[ "def", "has_same_bins", "(", "self", ",", "other", ":", "\"HistogramBase\"", ")", "->", "bool", ":", "if", "self", ".", "shape", "!=", "other", ".", "shape", ":", "return", "False", "elif", "self", ".", "ndim", "==", "1", ":", "return", "np", ".", "a...
Whether two histograms share the same binning.
[ "Whether", "two", "histograms", "share", "the", "same", "binning", "." ]
python
train
cmap/cmapPy
cmapPy/set_io/gmt.py
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/set_io/gmt.py#L93-L109
def write(gmt, out_path): """ Write a GMT to a text file. Args: gmt (GMT object): list of dicts out_path (string): output path Returns: None """ with open(out_path, 'w') as f: for _, each_dict in enumerate(gmt): f.write(each_dict[SET_IDENTIFIER_FIELD] + '\t') f.write(each_dict[SET_DESC_FIELD] + '\t') f.write('\t'.join([str(entry) for entry in each_dict[SET_MEMBERS_FIELD]])) f.write('\n')
[ "def", "write", "(", "gmt", ",", "out_path", ")", ":", "with", "open", "(", "out_path", ",", "'w'", ")", "as", "f", ":", "for", "_", ",", "each_dict", "in", "enumerate", "(", "gmt", ")", ":", "f", ".", "write", "(", "each_dict", "[", "SET_IDENTIFIE...
Write a GMT to a text file. Args: gmt (GMT object): list of dicts out_path (string): output path Returns: None
[ "Write", "a", "GMT", "to", "a", "text", "file", "." ]
python
train
bukun/TorCMS
ext_script/autocrud/func_gen_html.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/ext_script/autocrud/func_gen_html.py#L69-L92
def gen_select_view(sig_dic): ''' HTML view, for selection. ''' option_str = '' dic_tmp = sig_dic['dic'] for key, val in dic_tmp.items(): tmp_str = ''' {{% if '{sig_en}' in postinfo.extinfo %}} {{% set tmp_var = postinfo.extinfo["{sig_en}"] %}} {{% if tmp_var == "{sig_key}" %}} {sig_dic} {{% end %}} {{% end %}} '''.format(sig_en=sig_dic['en'], sig_key=key, sig_dic=val) option_str += tmp_str return ''' <div class="row"> <div class="col-sm-4"><span class="des"><strong>{sig_zh}</strong></span></div> <div class="col-sm-8"> {option_str} </div></div> '''.format(sig_zh=sig_dic['zh'], option_str=option_str)
[ "def", "gen_select_view", "(", "sig_dic", ")", ":", "option_str", "=", "''", "dic_tmp", "=", "sig_dic", "[", "'dic'", "]", "for", "key", ",", "val", "in", "dic_tmp", ".", "items", "(", ")", ":", "tmp_str", "=", "'''\n {{% if '{sig_en}' in postinfo.exti...
HTML view, for selection.
[ "HTML", "view", "for", "selection", "." ]
python
train
ethereum/py-evm
eth/vm/base.py
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/base.py#L710-L727
def generate_block_from_parent_header_and_coinbase(cls, parent_header: BlockHeader, coinbase: Address) -> BaseBlock: """ Generate block from parent header and coinbase. """ block_header = generate_header_from_parent_header( cls.compute_difficulty, parent_header, coinbase, timestamp=parent_header.timestamp + 1, ) block = cls.get_block_class()( block_header, transactions=[], uncles=[], ) return block
[ "def", "generate_block_from_parent_header_and_coinbase", "(", "cls", ",", "parent_header", ":", "BlockHeader", ",", "coinbase", ":", "Address", ")", "->", "BaseBlock", ":", "block_header", "=", "generate_header_from_parent_header", "(", "cls", ".", "compute_difficulty", ...
Generate block from parent header and coinbase.
[ "Generate", "block", "from", "parent", "header", "and", "coinbase", "." ]
python
train
UCSBarchlab/PyRTL
pyrtl/compilesim.py
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/compilesim.py#L236-L242
def _makeini(self, w, v): """C initializer string for a wire with a given value.""" pieces = [] for n in range(self._limbs(w)): pieces.append(hex(v & ((1 << 64)-1))) v >>= 64 return ','.join(pieces).join('{}')
[ "def", "_makeini", "(", "self", ",", "w", ",", "v", ")", ":", "pieces", "=", "[", "]", "for", "n", "in", "range", "(", "self", ".", "_limbs", "(", "w", ")", ")", ":", "pieces", ".", "append", "(", "hex", "(", "v", "&", "(", "(", "1", "<<", ...
C initializer string for a wire with a given value.
[ "C", "initializer", "string", "for", "a", "wire", "with", "a", "given", "value", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xviewwidget/xviewpanel.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewpanel.py#L1452-L1459
def setCurrentIndex(self, index): """ Sets the current index on self and on the tab bar to keep the two insync. :param index | <int> """ super(XViewPanel, self).setCurrentIndex(index) self.tabBar().setCurrentIndex(index)
[ "def", "setCurrentIndex", "(", "self", ",", "index", ")", ":", "super", "(", "XViewPanel", ",", "self", ")", ".", "setCurrentIndex", "(", "index", ")", "self", ".", "tabBar", "(", ")", ".", "setCurrentIndex", "(", "index", ")" ]
Sets the current index on self and on the tab bar to keep the two insync. :param index | <int>
[ "Sets", "the", "current", "index", "on", "self", "and", "on", "the", "tab", "bar", "to", "keep", "the", "two", "insync", "." ]
python
train
versae/neo4j-rest-client
neo4jrestclient/client.py
https://github.com/versae/neo4j-rest-client/blob/b03c09c8f598fa4dbad8ea8998ffb1c885805074/neo4jrestclient/client.py#L1788-L1813
def query(self, *args): """ Query a fulltext index by key and query or just a plain Lucene query, i1 = gdb.nodes.indexes.get('people',type='fulltext', provider='lucene') i1.query('name','do*') i1.query('name:do*') In this example, the last two line are equivalent. """ if not args or len(args) > 2: raise TypeError('query() takes 2 or 3 arguments (a query or a key ' 'and a query) (%d given)' % (len(args) + 1)) elif len(args) == 1: query, = args return self.get('text').query(text_type(query)) else: key, query = args index_key = self.get(key) if isinstance(query, string_types): return index_key.query(query) else: if query.fielded: raise ValueError('Queries with an included key should ' 'not include a field.') return index_key.query(text_type(query))
[ "def", "query", "(", "self", ",", "*", "args", ")", ":", "if", "not", "args", "or", "len", "(", "args", ")", ">", "2", ":", "raise", "TypeError", "(", "'query() takes 2 or 3 arguments (a query or a key '", "'and a query) (%d given)'", "%", "(", "len", "(", "...
Query a fulltext index by key and query or just a plain Lucene query, i1 = gdb.nodes.indexes.get('people',type='fulltext', provider='lucene') i1.query('name','do*') i1.query('name:do*') In this example, the last two line are equivalent.
[ "Query", "a", "fulltext", "index", "by", "key", "and", "query", "or", "just", "a", "plain", "Lucene", "query" ]
python
train
cloud-custodian/cloud-custodian
tools/c7n_policystream/policystream.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_policystream/policystream.py#L298-L325
def delta_commits(self, baseline, target): """Show policies changes between arbitrary commits. The common use form is comparing the heads of two branches. """ baseline_files = self._get_policy_fents(baseline.tree) target_files = self._get_policy_fents(target.tree) baseline_policies = PolicyCollection() target_policies = PolicyCollection() # Added for f in set(target_files) - set(baseline_files): target_policies += self._policy_file_rev(f, target) # Removed for f in set(baseline_files) - set(target_files): baseline_policies += self._policy_file_rev(f, baseline) # Modified for f in set(baseline_files).intersection(target_files): if baseline_files[f].hex == target_files[f].hex: continue target_policies += self._policy_file_rev(f, target) baseline_policies += self._policy_file_rev(f, baseline) return CollectionDelta( baseline_policies, target_policies, target, self.repo_uri).delta()
[ "def", "delta_commits", "(", "self", ",", "baseline", ",", "target", ")", ":", "baseline_files", "=", "self", ".", "_get_policy_fents", "(", "baseline", ".", "tree", ")", "target_files", "=", "self", ".", "_get_policy_fents", "(", "target", ".", "tree", ")",...
Show policies changes between arbitrary commits. The common use form is comparing the heads of two branches.
[ "Show", "policies", "changes", "between", "arbitrary", "commits", "." ]
python
train
valency/deeputils
deeputils/common.py
https://github.com/valency/deeputils/blob/27efd91668de0223ed8b07cfadf2151632521520/deeputils/common.py#L85-L92
def dict_sort(d, k): """ Sort a dictionary list by key :param d: dictionary list :param k: key :return: sorted dictionary list """ return sorted(d.copy(), key=lambda i: i[k])
[ "def", "dict_sort", "(", "d", ",", "k", ")", ":", "return", "sorted", "(", "d", ".", "copy", "(", ")", ",", "key", "=", "lambda", "i", ":", "i", "[", "k", "]", ")" ]
Sort a dictionary list by key :param d: dictionary list :param k: key :return: sorted dictionary list
[ "Sort", "a", "dictionary", "list", "by", "key", ":", "param", "d", ":", "dictionary", "list", ":", "param", "k", ":", "key", ":", "return", ":", "sorted", "dictionary", "list" ]
python
valid
rcarmo/pngcanvas
pngcanvas.py
https://github.com/rcarmo/pngcanvas/blob/e2eaa0d5ba353005b3b658f6ee453c1956340670/pngcanvas.py#L133-L139
def vertical_gradient(self, x0, y0, x1, y1, start, end): """Draw a vertical gradient""" x0, y0, x1, y1 = self.rect_helper(x0, y0, x1, y1) grad = gradient_list(start, end, y1 - y0) for x in range(x0, x1 + 1): for y in range(y0, y1 + 1): self.point(x, y, grad[y - y0])
[ "def", "vertical_gradient", "(", "self", ",", "x0", ",", "y0", ",", "x1", ",", "y1", ",", "start", ",", "end", ")", ":", "x0", ",", "y0", ",", "x1", ",", "y1", "=", "self", ".", "rect_helper", "(", "x0", ",", "y0", ",", "x1", ",", "y1", ")", ...
Draw a vertical gradient
[ "Draw", "a", "vertical", "gradient" ]
python
train
limix/numpy-sugar
numpy_sugar/_array.py
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/_array.py#L132-L149
def unique(ar): r"""Find the unique elements of an array. It uses ``dask.array.unique`` if necessary. Args: ar (array_like): Input array. Returns: array_like: the sorted unique elements. """ import dask.array as da if isinstance(ar, da.core.Array): return da.unique(ar) return _unique(ar)
[ "def", "unique", "(", "ar", ")", ":", "import", "dask", ".", "array", "as", "da", "if", "isinstance", "(", "ar", ",", "da", ".", "core", ".", "Array", ")", ":", "return", "da", ".", "unique", "(", "ar", ")", "return", "_unique", "(", "ar", ")" ]
r"""Find the unique elements of an array. It uses ``dask.array.unique`` if necessary. Args: ar (array_like): Input array. Returns: array_like: the sorted unique elements.
[ "r", "Find", "the", "unique", "elements", "of", "an", "array", "." ]
python
train
angr/angr
angr/annocfg.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/annocfg.py#L262-L292
def successor_func(self, path): """ Callback routine that takes in a path, and returns all feasible successors to path group. This callback routine should be passed to the keyword argument "successor_func" of PathGroup.step(). :param path: A Path instance. :return: A list of all feasible Path successors. """ whitelist = self.get_whitelisted_statements(path.addr) last_stmt = self.get_last_statement_index(path.addr) # pass in those arguments successors = path.step( stmt_whitelist=whitelist, last_stmt=None ) # further filter successors based on the annotated CFG taken_successors = [ ] for suc in successors: try: taken = self.should_take_exit(path.addr, suc.addr) except AngrExitError: l.debug("Got an unknown exit that AnnotatedCFG does not know about: %#x -> %#x", path.addr, suc.addr) continue if taken: taken_successors.append(suc) return taken_successors
[ "def", "successor_func", "(", "self", ",", "path", ")", ":", "whitelist", "=", "self", ".", "get_whitelisted_statements", "(", "path", ".", "addr", ")", "last_stmt", "=", "self", ".", "get_last_statement_index", "(", "path", ".", "addr", ")", "# pass in those ...
Callback routine that takes in a path, and returns all feasible successors to path group. This callback routine should be passed to the keyword argument "successor_func" of PathGroup.step(). :param path: A Path instance. :return: A list of all feasible Path successors.
[ "Callback", "routine", "that", "takes", "in", "a", "path", "and", "returns", "all", "feasible", "successors", "to", "path", "group", ".", "This", "callback", "routine", "should", "be", "passed", "to", "the", "keyword", "argument", "successor_func", "of", "Path...
python
train
odlgroup/odl
odl/set/space.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/space.py#L937-L946
def divide(self, other, out=None): """Return ``out = self / other``. If ``out`` is provided, the result is written to it. See Also -------- LinearSpace.divide """ return self.space.divide(self, other, out=out)
[ "def", "divide", "(", "self", ",", "other", ",", "out", "=", "None", ")", ":", "return", "self", ".", "space", ".", "divide", "(", "self", ",", "other", ",", "out", "=", "out", ")" ]
Return ``out = self / other``. If ``out`` is provided, the result is written to it. See Also -------- LinearSpace.divide
[ "Return", "out", "=", "self", "/", "other", "." ]
python
train
wandb/client
wandb/util.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/util.py#L637-L652
def downsample(values, target_length): """Downsamples 1d values to target_length, including start and end. Algorithm just rounds index down. Values can be any sequence, including a generator. """ assert target_length > 1 values = list(values) if len(values) < target_length: return values ratio = float(len(values) - 1) / (target_length - 1) result = [] for i in range(target_length): result.append(values[int(i * ratio)]) return result
[ "def", "downsample", "(", "values", ",", "target_length", ")", ":", "assert", "target_length", ">", "1", "values", "=", "list", "(", "values", ")", "if", "len", "(", "values", ")", "<", "target_length", ":", "return", "values", "ratio", "=", "float", "("...
Downsamples 1d values to target_length, including start and end. Algorithm just rounds index down. Values can be any sequence, including a generator.
[ "Downsamples", "1d", "values", "to", "target_length", "including", "start", "and", "end", "." ]
python
train
PyCQA/pyflakes
pyflakes/checker.py
https://github.com/PyCQA/pyflakes/blob/232cb1d27ee134bf96adc8f37e53589dc259b159/pyflakes/checker.py#L1309-L1337
def GLOBAL(self, node): """ Keep track of globals declarations. """ global_scope_index = 1 if self._in_doctest() else 0 global_scope = self.scopeStack[global_scope_index] # Ignore 'global' statement in global scope. if self.scope is not global_scope: # One 'global' statement can bind multiple (comma-delimited) names. for node_name in node.names: node_value = Assignment(node_name, node) # Remove UndefinedName messages already reported for this name. # TODO: if the global is not used in this scope, it does not # become a globally defined name. See test_unused_global. self.messages = [ m for m in self.messages if not isinstance(m, messages.UndefinedName) or m.message_args[0] != node_name] # Bind name to global scope if it doesn't exist already. global_scope.setdefault(node_name, node_value) # Bind name to non-global scopes, but as already "used". node_value.used = (global_scope, node) for scope in self.scopeStack[global_scope_index + 1:]: scope[node_name] = node_value
[ "def", "GLOBAL", "(", "self", ",", "node", ")", ":", "global_scope_index", "=", "1", "if", "self", ".", "_in_doctest", "(", ")", "else", "0", "global_scope", "=", "self", ".", "scopeStack", "[", "global_scope_index", "]", "# Ignore 'global' statement in global s...
Keep track of globals declarations.
[ "Keep", "track", "of", "globals", "declarations", "." ]
python
train
cenkalti/kuyruk
kuyruk/kuyruk.py
https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/kuyruk.py#L65-L87
def connection(self) -> Iterator[amqp.Connection]: """Returns a new connection as a context manager.""" TCP_USER_TIMEOUT = 18 # constant is available on Python 3.6+. socket_settings = {TCP_USER_TIMEOUT: self.config.TCP_USER_TIMEOUT} if sys.platform.startswith('darwin'): del socket_settings[TCP_USER_TIMEOUT] conn = amqp.Connection( host="%s:%s" % (self.config.RABBIT_HOST, self.config.RABBIT_PORT), userid=self.config.RABBIT_USER, password=self.config.RABBIT_PASSWORD, virtual_host=self.config.RABBIT_VIRTUAL_HOST, connect_timeout=self.config.RABBIT_CONNECT_TIMEOUT, read_timeout=self.config.RABBIT_READ_TIMEOUT, write_timeout=self.config.RABBIT_WRITE_TIMEOUT, socket_settings=socket_settings, heartbeat=self.config.RABBIT_HEARTBEAT, ) conn.connect() logger.info('Connected to RabbitMQ') with _safe_close(conn): yield conn
[ "def", "connection", "(", "self", ")", "->", "Iterator", "[", "amqp", ".", "Connection", "]", ":", "TCP_USER_TIMEOUT", "=", "18", "# constant is available on Python 3.6+.", "socket_settings", "=", "{", "TCP_USER_TIMEOUT", ":", "self", ".", "config", ".", "TCP_USER...
Returns a new connection as a context manager.
[ "Returns", "a", "new", "connection", "as", "a", "context", "manager", "." ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L14698-L14722
def vlcom(a, v1, b, v2): """ Compute a vector linear combination of two double precision, 3-dimensional vectors. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vlcom_c.html :param a: Coefficient of v1 :type a: float :param v1: Vector in 3-space :type v1: 3-Element Array of floats :param b: Coefficient of v2 :type b: float :param v2: Vector in 3-space :type v2: 3-Element Array of floats :return: Linear Vector Combination a*v1 + b*v2. :rtype: 3-Element Array of floats """ v1 = stypes.toDoubleVector(v1) v2 = stypes.toDoubleVector(v2) sumv = stypes.emptyDoubleVector(3) a = ctypes.c_double(a) b = ctypes.c_double(b) libspice.vlcom_c(a, v1, b, v2, sumv) return stypes.cVectorToPython(sumv)
[ "def", "vlcom", "(", "a", ",", "v1", ",", "b", ",", "v2", ")", ":", "v1", "=", "stypes", ".", "toDoubleVector", "(", "v1", ")", "v2", "=", "stypes", ".", "toDoubleVector", "(", "v2", ")", "sumv", "=", "stypes", ".", "emptyDoubleVector", "(", "3", ...
Compute a vector linear combination of two double precision, 3-dimensional vectors. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vlcom_c.html :param a: Coefficient of v1 :type a: float :param v1: Vector in 3-space :type v1: 3-Element Array of floats :param b: Coefficient of v2 :type b: float :param v2: Vector in 3-space :type v2: 3-Element Array of floats :return: Linear Vector Combination a*v1 + b*v2. :rtype: 3-Element Array of floats
[ "Compute", "a", "vector", "linear", "combination", "of", "two", "double", "precision", "3", "-", "dimensional", "vectors", "." ]
python
train
twneale/uni
uni/checker.py
https://github.com/twneale/uni/blob/1d2f3ef2cb97f544e878b8a1cde37ca8420af4e5/uni/checker.py#L43-L63
def path_eval(self, obj, keypath): '''Given an object and a mongo-style dotted key path, return the object value referenced by that key path. ''' segs = keypath.split('.') this = obj for seg in segs: if isinstance(this, dict): try: this = this[seg] except KeyError: raise self.InvalidPath() elif isinstance(this, (list, tuple)): if seg.isdigit(): this = this[int(seg)] else: try: this = getattr(this, seg) except AttributeError: raise self.InvalidPath() return this
[ "def", "path_eval", "(", "self", ",", "obj", ",", "keypath", ")", ":", "segs", "=", "keypath", ".", "split", "(", "'.'", ")", "this", "=", "obj", "for", "seg", "in", "segs", ":", "if", "isinstance", "(", "this", ",", "dict", ")", ":", "try", ":",...
Given an object and a mongo-style dotted key path, return the object value referenced by that key path.
[ "Given", "an", "object", "and", "a", "mongo", "-", "style", "dotted", "key", "path", "return", "the", "object", "value", "referenced", "by", "that", "key", "path", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/streamsasl.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/streamsasl.py#L299-L331
def _process_sasl_success(self, stream, element): """Process incoming <sasl:success/> element. [initiating entity only] """ if not self.authenticator: logger.debug("Unexpected SASL response") return False content = element.text if content: data = a2b_base64(content.encode("us-ascii")) else: data = None ret = self.authenticator.finish(data) if isinstance(ret, sasl.Success): logger.debug("SASL authentication succeeded") authzid = ret.properties.get("authzid") if authzid: me = JID(authzid) elif "username" in ret.properties: # FIXME: other rules for server me = JID(ret.properties["username"], stream.peer.domain) else: me = None stream.set_authenticated(me, True) else: logger.debug("SASL authentication failed") raise SASLAuthenticationFailed("Additional success data" " procesing failed") return True
[ "def", "_process_sasl_success", "(", "self", ",", "stream", ",", "element", ")", ":", "if", "not", "self", ".", "authenticator", ":", "logger", ".", "debug", "(", "\"Unexpected SASL response\"", ")", "return", "False", "content", "=", "element", ".", "text", ...
Process incoming <sasl:success/> element. [initiating entity only]
[ "Process", "incoming", "<sasl", ":", "success", "/", ">", "element", "." ]
python
valid
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/runsnakerun/meliaeadapter.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeadapter.py#L93-L106
def best_parent( self, node, tree_type=None ): """Choose the best parent for a given node""" parents = self.parents(node) selected_parent = None if node['type'] == 'type': module = ".".join( node['name'].split( '.' )[:-1] ) if module: for mod in parents: if mod['type'] == 'module' and mod['name'] == module: selected_parent = mod if parents and selected_parent is None: parents.sort( key = lambda x: self.value(node, x) ) return parents[-1] return selected_parent
[ "def", "best_parent", "(", "self", ",", "node", ",", "tree_type", "=", "None", ")", ":", "parents", "=", "self", ".", "parents", "(", "node", ")", "selected_parent", "=", "None", "if", "node", "[", "'type'", "]", "==", "'type'", ":", "module", "=", "...
Choose the best parent for a given node
[ "Choose", "the", "best", "parent", "for", "a", "given", "node" ]
python
train
boriel/zxbasic
arch/zx48k/backend/__init__.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__init__.py#L1600-L1605
def _paramf(ins): """ Pushes 40bit (float) param into the stack """ output = _float_oper(ins.quad[1]) output.extend(_fpush()) return output
[ "def", "_paramf", "(", "ins", ")", ":", "output", "=", "_float_oper", "(", "ins", ".", "quad", "[", "1", "]", ")", "output", ".", "extend", "(", "_fpush", "(", ")", ")", "return", "output" ]
Pushes 40bit (float) param into the stack
[ "Pushes", "40bit", "(", "float", ")", "param", "into", "the", "stack" ]
python
train
googleapis/gax-python
google/gax/__init__.py
https://github.com/googleapis/gax-python/blob/309aedfcfd48e4c8fa22dd60e9c84c3cc71bb20e/google/gax/__init__.py#L601-L608
def exception(self, timeout=None): """Similar to result(), except returns the exception if any.""" # Check exceptional case: return none if no error if not self._poll(timeout).HasField('error'): return None # Return expected error return self._operation.error
[ "def", "exception", "(", "self", ",", "timeout", "=", "None", ")", ":", "# Check exceptional case: return none if no error", "if", "not", "self", ".", "_poll", "(", "timeout", ")", ".", "HasField", "(", "'error'", ")", ":", "return", "None", "# Return expected e...
Similar to result(), except returns the exception if any.
[ "Similar", "to", "result", "()", "except", "returns", "the", "exception", "if", "any", "." ]
python
train
saltstack/salt
salt/modules/netbox.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netbox.py#L700-L766
def create_interface(device_name, interface_name, mac_address=None, description=None, enabled=None, lag=None, lag_parent=None, form_factor=None): ''' .. versionadded:: 2019.2.0 Attach an interface to a device. If not all arguments are provided, they will default to Netbox defaults. device_name The name of the device, e.g., ``edge_router`` interface_name The name of the interface, e.g., ``TenGigE0/0/0/0`` mac_address String of mac address, e.g., ``50:87:89:73:92:C8`` description String of interface description, e.g., ``NTT`` enabled String of boolean interface status, e.g., ``True`` lag: Boolean of interface lag status, e.g., ``True`` lag_parent String of interface lag parent name, e.g., ``ae13`` form_factor Integer of form factor id, obtained through _choices API endpoint, e.g., ``200`` CLI Example: .. code-block:: bash salt myminion netbox.create_interface edge_router ae13 description="Core uplink" ''' nb_device = get_('dcim', 'devices', name=device_name) if not nb_device: return False if lag_parent: lag_interface = get_('dcim', 'interfaces', device_id=nb_device['id'], name=lag_parent) if not lag_interface: return False if not description: description = '' if not enabled: enabled = 'false' # Set default form factor to 1200. This maps to SFP+ (10GE). This should be addressed by # the _choices endpoint. payload = {'device': nb_device['id'], 'name': interface_name, 'description': description, 'enabled': enabled, 'form_factor': 1200} if form_factor is not None: payload['form_factor'] = form_factor if lag: payload['form_factor'] = 200 if lag_parent: payload['lag'] = lag_interface['id'] if mac_address: payload['mac_address'] = mac_address nb_interface = get_('dcim', 'interfaces', device_id=nb_device['id'], name=interface_name) if not nb_interface: nb_interface = _add('dcim', 'interfaces', payload) if nb_interface: return {'dcim': {'interfaces': {nb_interface['id']: payload}}} else: return nb_interface
[ "def", "create_interface", "(", "device_name", ",", "interface_name", ",", "mac_address", "=", "None", ",", "description", "=", "None", ",", "enabled", "=", "None", ",", "lag", "=", "None", ",", "lag_parent", "=", "None", ",", "form_factor", "=", "None", "...
.. versionadded:: 2019.2.0 Attach an interface to a device. If not all arguments are provided, they will default to Netbox defaults. device_name The name of the device, e.g., ``edge_router`` interface_name The name of the interface, e.g., ``TenGigE0/0/0/0`` mac_address String of mac address, e.g., ``50:87:89:73:92:C8`` description String of interface description, e.g., ``NTT`` enabled String of boolean interface status, e.g., ``True`` lag: Boolean of interface lag status, e.g., ``True`` lag_parent String of interface lag parent name, e.g., ``ae13`` form_factor Integer of form factor id, obtained through _choices API endpoint, e.g., ``200`` CLI Example: .. code-block:: bash salt myminion netbox.create_interface edge_router ae13 description="Core uplink"
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
zaturox/glin
glin/app.py
https://github.com/zaturox/glin/blob/55214a579c4e4b4d74765f3f6aa2eb815bac1c3b/glin/app.py#L288-L292
def publish_scene_name(self, scene_id, name): """publish a changed scene name""" self.sequence_number += 1 self.publisher.send_multipart(msgs.MessageBuilder.scene_name(self.sequence_number, scene_id, name)) return self.sequence_number
[ "def", "publish_scene_name", "(", "self", ",", "scene_id", ",", "name", ")", ":", "self", ".", "sequence_number", "+=", "1", "self", ".", "publisher", ".", "send_multipart", "(", "msgs", ".", "MessageBuilder", ".", "scene_name", "(", "self", ".", "sequence_n...
publish a changed scene name
[ "publish", "a", "changed", "scene", "name" ]
python
train
tcalmant/ipopo
pelix/shell/report.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/shell/report.py#L307-L347
def os_details(): """ Returns a dictionary containing details about the operating system """ # Compute architecture and linkage bits, linkage = platform.architecture() results = { # Machine details "platform.arch.bits": bits, "platform.arch.linkage": linkage, "platform.machine": platform.machine(), "platform.process": platform.processor(), "sys.byteorder": sys.byteorder, # OS details "os.name": os.name, "host.name": socket.gethostname(), "sys.platform": sys.platform, "platform.system": platform.system(), "platform.release": platform.release(), "platform.version": platform.version(), "encoding.filesystem": sys.getfilesystemencoding(), } # Paths and line separators for name in "sep", "altsep", "pathsep", "linesep": results["os.{0}".format(name)] = getattr(os, name, None) try: # Available since Python 3.4 results["os.cpu_count"] = os.cpu_count() except AttributeError: results["os.cpu_count"] = None try: # Only for Unix # pylint: disable=E1101 results["sys.dlopenflags"] = sys.getdlopenflags() except AttributeError: results["sys.dlopenflags"] = None return results
[ "def", "os_details", "(", ")", ":", "# Compute architecture and linkage", "bits", ",", "linkage", "=", "platform", ".", "architecture", "(", ")", "results", "=", "{", "# Machine details", "\"platform.arch.bits\"", ":", "bits", ",", "\"platform.arch.linkage\"", ":", ...
Returns a dictionary containing details about the operating system
[ "Returns", "a", "dictionary", "containing", "details", "about", "the", "operating", "system" ]
python
train
saltstack/salt
salt/utils/state.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/state.py#L126-L161
def check_result(running, recurse=False, highstate=None): ''' Check the total return value of the run and determine if the running dict has any issues ''' if not isinstance(running, dict): return False if not running: return False ret = True for state_id, state_result in six.iteritems(running): expected_type = dict # The __extend__ state is a list if "__extend__" == state_id: expected_type = list if not recurse and not isinstance(state_result, expected_type): ret = False if ret and isinstance(state_result, dict): result = state_result.get('result', _empty) if result is False: ret = False # only override return value if we are not already failed elif result is _empty and isinstance(state_result, dict) and ret: ret = check_result( state_result, recurse=True, highstate=highstate) # if we detect a fail, check for onfail requisites if not ret: # ret can be None in case of no onfail reqs, recast it to bool ret = bool(check_onfail_requisites(state_id, state_result, running, highstate)) # return as soon as we got a failure if not ret: break return ret
[ "def", "check_result", "(", "running", ",", "recurse", "=", "False", ",", "highstate", "=", "None", ")", ":", "if", "not", "isinstance", "(", "running", ",", "dict", ")", ":", "return", "False", "if", "not", "running", ":", "return", "False", "ret", "=...
Check the total return value of the run and determine if the running dict has any issues
[ "Check", "the", "total", "return", "value", "of", "the", "run", "and", "determine", "if", "the", "running", "dict", "has", "any", "issues" ]
python
train
IRC-SPHERE/HyperStream
hyperstream/utils/time_utils.py
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/utils/time_utils.py#L105-L124
def json_serial(obj): """JSON serializer for objects not serializable by default json code""" if isinstance(obj, (datetime, date)): serial = obj.isoformat() return serial from ..time_interval import TimeInterval, TimeIntervals if isinstance(obj, (TimeInterval, TimeIntervals)): return obj.to_json() from ..stream import StreamId if isinstance(obj, StreamId): return obj.to_json() from ..channels import BaseChannel if isinstance(obj, BaseChannel): return json.dumps({'channel_id': obj.channel_id}) raise TypeError("Type %s not serializable" % type(obj))
[ "def", "json_serial", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "(", "datetime", ",", "date", ")", ")", ":", "serial", "=", "obj", ".", "isoformat", "(", ")", "return", "serial", "from", ".", ".", "time_interval", "import", "TimeInterv...
JSON serializer for objects not serializable by default json code
[ "JSON", "serializer", "for", "objects", "not", "serializable", "by", "default", "json", "code" ]
python
train
ekzhu/datasketch
datasketch/experimental/aio/lsh.py
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/experimental/aio/lsh.py#L275-L287
async def query(self, minhash): """ see :class:`datasketch.MinHashLSH`. """ if len(minhash) != self.h: raise ValueError("Expecting minhash with length %d, " "got %d" % (self.h, len(minhash))) fs = (hashtable.get(self._H(minhash.hashvalues[start:end])) for (start, end), hashtable in zip(self.hashranges, self.hashtables)) candidates = frozenset(chain.from_iterable(await asyncio.gather(*fs))) return list(candidates)
[ "async", "def", "query", "(", "self", ",", "minhash", ")", ":", "if", "len", "(", "minhash", ")", "!=", "self", ".", "h", ":", "raise", "ValueError", "(", "\"Expecting minhash with length %d, \"", "\"got %d\"", "%", "(", "self", ".", "h", ",", "len", "("...
see :class:`datasketch.MinHashLSH`.
[ "see", ":", "class", ":", "datasketch", ".", "MinHashLSH", "." ]
python
test
saltstack/salt
salt/utils/schedule.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L283-L304
def delete_job(self, name, persist=True): ''' Deletes a job from the scheduler. Ignore jobs from pillar ''' # ensure job exists, then delete it if name in self.opts['schedule']: del self.opts['schedule'][name] elif name in self._get_schedule(include_opts=False): log.warning("Cannot delete job %s, it's in the pillar!", name) # Fire the complete event back along with updated list of schedule evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True, 'schedule': self._get_schedule()}, tag='/salt/minion/minion_schedule_delete_complete') # remove from self.intervals if name in self.intervals: del self.intervals[name] if persist: self.persist()
[ "def", "delete_job", "(", "self", ",", "name", ",", "persist", "=", "True", ")", ":", "# ensure job exists, then delete it", "if", "name", "in", "self", ".", "opts", "[", "'schedule'", "]", ":", "del", "self", ".", "opts", "[", "'schedule'", "]", "[", "n...
Deletes a job from the scheduler. Ignore jobs from pillar
[ "Deletes", "a", "job", "from", "the", "scheduler", ".", "Ignore", "jobs", "from", "pillar" ]
python
train
sarugaku/vistir
tasks/__init__.py
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/tasks/__init__.py#L26-L33
def clean(ctx): """Clean previously built package artifacts. """ ctx.run(f"python setup.py clean") dist = ROOT.joinpath("dist") print(f"[clean] Removing {dist}") if dist.exists(): shutil.rmtree(str(dist))
[ "def", "clean", "(", "ctx", ")", ":", "ctx", ".", "run", "(", "f\"python setup.py clean\"", ")", "dist", "=", "ROOT", ".", "joinpath", "(", "\"dist\"", ")", "print", "(", "f\"[clean] Removing {dist}\"", ")", "if", "dist", ".", "exists", "(", ")", ":", "s...
Clean previously built package artifacts.
[ "Clean", "previously", "built", "package", "artifacts", "." ]
python
train
agoragames/haigha
haigha/connection.py
https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connection.py#L314-L348
def channel(self, channel_id=None, synchronous=False): """ Fetch a Channel object identified by the numeric channel_id, or create that object if it doesn't already exist. If channel_id is not None but no channel exists for that id, will raise InvalidChannel. If there are already too many channels open, will raise TooManyChannels. If synchronous=True, then the channel will act synchronous in all cases where a protocol method supports `nowait=False`, or where there is an implied callback in the protocol. """ if channel_id is None: # adjust for channel 0 if len(self._channels) - 1 >= self._channel_max: raise Connection.TooManyChannels( "%d channels already open, max %d", len(self._channels) - 1, self._channel_max) channel_id = self._next_channel_id() while channel_id in self._channels: channel_id = self._next_channel_id() elif channel_id in self._channels: return self._channels[channel_id] else: raise Connection.InvalidChannel( "%s is not a valid channel id", channel_id) # Call open() here so that ConnectionChannel doesn't have it called. # Could also solve this other ways, but it's a HACK regardless. rval = Channel( self, channel_id, self._class_map, synchronous=synchronous) self._channels[channel_id] = rval rval.add_close_listener(self._channel_closed) rval.open() return rval
[ "def", "channel", "(", "self", ",", "channel_id", "=", "None", ",", "synchronous", "=", "False", ")", ":", "if", "channel_id", "is", "None", ":", "# adjust for channel 0", "if", "len", "(", "self", ".", "_channels", ")", "-", "1", ">=", "self", ".", "_...
Fetch a Channel object identified by the numeric channel_id, or create that object if it doesn't already exist. If channel_id is not None but no channel exists for that id, will raise InvalidChannel. If there are already too many channels open, will raise TooManyChannels. If synchronous=True, then the channel will act synchronous in all cases where a protocol method supports `nowait=False`, or where there is an implied callback in the protocol.
[ "Fetch", "a", "Channel", "object", "identified", "by", "the", "numeric", "channel_id", "or", "create", "that", "object", "if", "it", "doesn", "t", "already", "exist", ".", "If", "channel_id", "is", "not", "None", "but", "no", "channel", "exists", "for", "t...
python
train
collectiveacuity/jsonModel
jsonmodel/validators.py
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L1327-L1347
def _ingest_number(self, input_number, path_to_root): ''' a helper method for ingesting a number :return: valid_number ''' valid_number = 0.0 try: valid_number = self._validate_number(input_number, path_to_root) except: rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root) if 'default_value' in self.keyMap[rules_path_to_root]: valid_number = self.keyMap[rules_path_to_root]['default_value'] elif 'integer_data' in self.keyMap[rules_path_to_root].keys(): if self.keyMap[rules_path_to_root]['integer_data']: valid_number = 0 return valid_number
[ "def", "_ingest_number", "(", "self", ",", "input_number", ",", "path_to_root", ")", ":", "valid_number", "=", "0.0", "try", ":", "valid_number", "=", "self", ".", "_validate_number", "(", "input_number", ",", "path_to_root", ")", "except", ":", "rules_path_to_r...
a helper method for ingesting a number :return: valid_number
[ "a", "helper", "method", "for", "ingesting", "a", "number" ]
python
train
bcbio/bcbio-nextgen
scripts/utils/hla_loh_comparison.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/scripts/utils/hla_loh_comparison.py#L294-L309
def get_hla_choice(h, hlas, normal_bam, tumor_bam): """Retrieve matching HLA with best read support in both tumor and normal """ def get_counts(bam_file): counts = {} for line in subprocess.check_output(["samtools", "idxstats", bam_file]).split("\n"): if line.startswith(h): name, _, count, _ = line.split() counts[name] = int(count) return counts tcounts = get_counts(tumor_bam) ncounts = get_counts(normal_bam) check_hlas = [x for x in hlas if x.startswith(h) and tcounts.get(x, 0) > 0 and ncounts.get(x, 0) > 0] cur_hlas = sorted(check_hlas, key=lambda x: ncounts[x], reverse=True) #print(cur_hlas[0], tcounts.get(cur_hlas[0]), ncounts.get(cur_hlas[0])) return cur_hlas[0]
[ "def", "get_hla_choice", "(", "h", ",", "hlas", ",", "normal_bam", ",", "tumor_bam", ")", ":", "def", "get_counts", "(", "bam_file", ")", ":", "counts", "=", "{", "}", "for", "line", "in", "subprocess", ".", "check_output", "(", "[", "\"samtools\"", ",",...
Retrieve matching HLA with best read support in both tumor and normal
[ "Retrieve", "matching", "HLA", "with", "best", "read", "support", "in", "both", "tumor", "and", "normal" ]
python
train
kstaniek/condoor
condoor/device.py
https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/device.py#L92-L104
def clear_info(self): """Clear the device info.""" self._version_text = None self._inventory_text = None self._users_text = None self.os_version = None self.os_type = None self.family = None self.platform = None self.udi = None # self.is_console = None self.prompt = None self.prompt_re = None
[ "def", "clear_info", "(", "self", ")", ":", "self", ".", "_version_text", "=", "None", "self", ".", "_inventory_text", "=", "None", "self", ".", "_users_text", "=", "None", "self", ".", "os_version", "=", "None", "self", ".", "os_type", "=", "None", "sel...
Clear the device info.
[ "Clear", "the", "device", "info", "." ]
python
train
instagrambot/instabot
instabot/bot/bot_support.py
https://github.com/instagrambot/instabot/blob/d734f892ac4cc35d22746a4f2680425ffaff0927/instabot/bot/bot_support.py#L23-L39
def read_list_from_file(file_path, quiet=False): """ Reads list from file. One line - one item. Returns the list if file items. """ try: if not check_if_file_exists(file_path, quiet=quiet): return [] with codecs.open(file_path, "r", encoding="utf-8") as f: content = f.readlines() if sys.version_info[0] < 3: content = [str(item.encode('utf8')) for item in content] content = [item.strip() for item in content] return [i for i in content if i] except Exception as exception: print(str(exception)) return []
[ "def", "read_list_from_file", "(", "file_path", ",", "quiet", "=", "False", ")", ":", "try", ":", "if", "not", "check_if_file_exists", "(", "file_path", ",", "quiet", "=", "quiet", ")", ":", "return", "[", "]", "with", "codecs", ".", "open", "(", "file_p...
Reads list from file. One line - one item. Returns the list if file items.
[ "Reads", "list", "from", "file", ".", "One", "line", "-", "one", "item", ".", "Returns", "the", "list", "if", "file", "items", "." ]
python
test
BDNYC/astrodbkit
astrodbkit/astrodb.py
https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrodb.py#L2124-L2141
def adapt_array(arr): """ Adapts a Numpy array into an ARRAY string to put into the database. Parameters ---------- arr: array The Numpy array to be adapted into an ARRAY type that can be inserted into a SQL file. Returns ------- ARRAY The adapted array object """ out = io.BytesIO() np.save(out, arr), out.seek(0) return buffer(out.read())
[ "def", "adapt_array", "(", "arr", ")", ":", "out", "=", "io", ".", "BytesIO", "(", ")", "np", ".", "save", "(", "out", ",", "arr", ")", ",", "out", ".", "seek", "(", "0", ")", "return", "buffer", "(", "out", ".", "read", "(", ")", ")" ]
Adapts a Numpy array into an ARRAY string to put into the database. Parameters ---------- arr: array The Numpy array to be adapted into an ARRAY type that can be inserted into a SQL file. Returns ------- ARRAY The adapted array object
[ "Adapts", "a", "Numpy", "array", "into", "an", "ARRAY", "string", "to", "put", "into", "the", "database", "." ]
python
train
ladybug-tools/ladybug
ladybug/datacollection.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/datacollection.py#L1207-L1256
def validate_analysis_period(self): """Get a collection where the header analysis_period aligns with datetimes. This means that checks for three criteria will be performed: 1) All months in the data collection are chronological starting from the analysis_period start month to the end month. 2) No duplicate months exist in the data collection. 3) There are no months that lie outside of the analysis_period range. Note that there is no need to run this check any time that a data collection has been derived from a continuous one or when the validated_a_period attribute of the collection is True. """ a_per = self.header.analysis_period n_ap = [a_per.st_month, a_per.end_month] # make sure that months are in chronological order. sort_datetimes, sort_values = zip(*sorted(zip(self.datetimes, self.values))) # check that no datetimes lie outside of the analysis_period if not a_per.is_reversed and not a_per.is_annual: if sort_datetimes[0] < a_per.st_month: n_ap[0] = sort_datetimes[0] if sort_datetimes[-1] > a_per.end_month: n_ap[1] = sort_datetimes[-1] elif a_per.is_reversed: last_ind = None for i, date_t in enumerate(sort_datetimes): last_ind = i if date_t <= a_per.end_time.month else last_ind if last_ind is not None: last_ind = last_ind + 1 sort_datetimes = sort_datetimes[last_ind:] + sort_datetimes[:last_ind] sort_values = sort_values[last_ind:] + sort_values[:last_ind] if sort_datetimes[0] > a_per.end_time.month and \ sort_datetimes[0] < a_per.st_time.month: n_ap = [1, 12] sort_datetimes, sort_values = zip(*sorted(zip( self.datetimes, self.values))) # check that there are no duplicate months. for i in xrange(len(sort_datetimes)): assert sort_datetimes[i] != sort_datetimes[i - 1], 'Duplicate month ' \ 'was found in the collection: {}'.format(sort_datetimes[i]) # build a validated collection. new_ap = AnalysisPeriod(st_month=n_ap[0], end_month=n_ap[1]) new_header = self.header.duplicate() new_header._analysis_period = new_ap new_coll = MonthlyCollection(new_header, sort_values, sort_datetimes) new_coll._validated_a_period = True return new_coll
[ "def", "validate_analysis_period", "(", "self", ")", ":", "a_per", "=", "self", ".", "header", ".", "analysis_period", "n_ap", "=", "[", "a_per", ".", "st_month", ",", "a_per", ".", "end_month", "]", "# make sure that months are in chronological order.", "sort_datet...
Get a collection where the header analysis_period aligns with datetimes. This means that checks for three criteria will be performed: 1) All months in the data collection are chronological starting from the analysis_period start month to the end month. 2) No duplicate months exist in the data collection. 3) There are no months that lie outside of the analysis_period range. Note that there is no need to run this check any time that a data collection has been derived from a continuous one or when the validated_a_period attribute of the collection is True.
[ "Get", "a", "collection", "where", "the", "header", "analysis_period", "aligns", "with", "datetimes", "." ]
python
train
pyca/pynacl
src/nacl/bindings/crypto_secretstream.py
https://github.com/pyca/pynacl/blob/0df0c2c7693fa5d316846111ce510702756f5feb/src/nacl/bindings/crypto_secretstream.py#L117-L178
def crypto_secretstream_xchacha20poly1305_push( state, m, ad=None, tag=crypto_secretstream_xchacha20poly1305_TAG_MESSAGE, ): """ Add an encrypted message to the secret stream. :param state: a secretstream state object :type state: crypto_secretstream_xchacha20poly1305_state :param m: the message to encrypt, the maximum length of an individual message is :data:`.crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX`. :type m: bytes :param ad: additional data to include in the authentication tag :type ad: bytes or None :param tag: the message tag, usually :data:`.crypto_secretstream_xchacha20poly1305_TAG_MESSAGE` or :data:`.crypto_secretstream_xchacha20poly1305_TAG_FINAL`. :type tag: int :return: ciphertext :rtype: bytes """ ensure( isinstance(state, crypto_secretstream_xchacha20poly1305_state), 'State must be a crypto_secretstream_xchacha20poly1305_state object', raising=exc.TypeError, ) ensure(isinstance(m, bytes), 'Message is not bytes', raising=exc.TypeError) ensure( len(m) <= crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX, 'Message is too long', raising=exc.ValueError, ) ensure( ad is None or isinstance(ad, bytes), 'Additional data must be bytes or None', raising=exc.TypeError, ) clen = len(m) + crypto_secretstream_xchacha20poly1305_ABYTES if state.rawbuf is None or len(state.rawbuf) < clen: state.rawbuf = ffi.new('unsigned char[]', clen) if ad is None: ad = ffi.NULL adlen = 0 else: adlen = len(ad) rc = lib.crypto_secretstream_xchacha20poly1305_push( state.statebuf, state.rawbuf, ffi.NULL, m, len(m), ad, adlen, tag, ) ensure(rc == 0, 'Unexpected failure', raising=exc.RuntimeError) return ffi.buffer(state.rawbuf, clen)[:]
[ "def", "crypto_secretstream_xchacha20poly1305_push", "(", "state", ",", "m", ",", "ad", "=", "None", ",", "tag", "=", "crypto_secretstream_xchacha20poly1305_TAG_MESSAGE", ",", ")", ":", "ensure", "(", "isinstance", "(", "state", ",", "crypto_secretstream_xchacha20poly13...
Add an encrypted message to the secret stream. :param state: a secretstream state object :type state: crypto_secretstream_xchacha20poly1305_state :param m: the message to encrypt, the maximum length of an individual message is :data:`.crypto_secretstream_xchacha20poly1305_MESSAGEBYTES_MAX`. :type m: bytes :param ad: additional data to include in the authentication tag :type ad: bytes or None :param tag: the message tag, usually :data:`.crypto_secretstream_xchacha20poly1305_TAG_MESSAGE` or :data:`.crypto_secretstream_xchacha20poly1305_TAG_FINAL`. :type tag: int :return: ciphertext :rtype: bytes
[ "Add", "an", "encrypted", "message", "to", "the", "secret", "stream", "." ]
python
train
HDI-Project/MLBlocks
mlblocks/datasets.py
https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/datasets.py#L237-L251
def load_usps(): """USPs Digits Dataset. The data of this dataset is a 3d numpy array vector with shape (224, 224, 3) containing 9298 224x224 RGB photos of handwritten digits, and the target is a 1d numpy integer array containing the label of the digit represented in the image. """ dataset_path = _load('usps') df = _load_csv(dataset_path, 'data') X = _load_images(os.path.join(dataset_path, 'images'), df.image) y = df.label.values return Dataset(load_usps.__doc__, X, y, accuracy_score, stratify=True)
[ "def", "load_usps", "(", ")", ":", "dataset_path", "=", "_load", "(", "'usps'", ")", "df", "=", "_load_csv", "(", "dataset_path", ",", "'data'", ")", "X", "=", "_load_images", "(", "os", ".", "path", ".", "join", "(", "dataset_path", ",", "'images'", "...
USPs Digits Dataset. The data of this dataset is a 3d numpy array vector with shape (224, 224, 3) containing 9298 224x224 RGB photos of handwritten digits, and the target is a 1d numpy integer array containing the label of the digit represented in the image.
[ "USPs", "Digits", "Dataset", "." ]
python
train
cmheisel/basecampreporting
src/basecampreporting/basecamp.py
https://github.com/cmheisel/basecampreporting/blob/88ecfc6e835608650ff6be23cbf2421d224c122b/src/basecampreporting/basecamp.py#L239-L255
def update_message(self, message_id, category_id, title, body, extended_body, use_textile=False, private=False, notify=None): """ Updates an existing message, optionally sending notifications to a selected list of people. Note that you can also upload files using this function, but you have to format the request as multipart/form-data. (See the ruby Basecamp API wrapper for an example of how to do this.) """ path = '/msg/update/%u' % message_id req = ET.Element('request') req.append(self._create_message_post_elem(category_id, title, body, extended_body, use_textile=False, private=False)) if notify is not None: for person_id in notify: ET.SubElement(req, 'notify').text = str(int(person_id)) return self._request(path, req)
[ "def", "update_message", "(", "self", ",", "message_id", ",", "category_id", ",", "title", ",", "body", ",", "extended_body", ",", "use_textile", "=", "False", ",", "private", "=", "False", ",", "notify", "=", "None", ")", ":", "path", "=", "'/msg/update/%...
Updates an existing message, optionally sending notifications to a selected list of people. Note that you can also upload files using this function, but you have to format the request as multipart/form-data. (See the ruby Basecamp API wrapper for an example of how to do this.)
[ "Updates", "an", "existing", "message", "optionally", "sending", "notifications", "to", "a", "selected", "list", "of", "people", ".", "Note", "that", "you", "can", "also", "upload", "files", "using", "this", "function", "but", "you", "have", "to", "format", ...
python
train
trailofbits/manticore
manticore/core/smtlib/solver.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/core/smtlib/solver.py#L383-L423
def get_all_values(self, constraints, expression, maxcnt=None, silent=False): """Returns a list with all the possible values for the symbol x""" if not isinstance(expression, Expression): return [expression] assert isinstance(constraints, ConstraintSet) assert isinstance(expression, Expression) expression = simplify(expression) if maxcnt is None: maxcnt = consts.maxsolutions with constraints as temp_cs: if isinstance(expression, Bool): var = temp_cs.new_bool() elif isinstance(expression, BitVec): var = temp_cs.new_bitvec(expression.size) elif isinstance(expression, Array): var = temp_cs.new_array(index_max=expression.index_max, value_bits=expression.value_bits, taint=expression.taint).array else: raise NotImplementedError(f"get_all_values only implemented for {type(expression)} expression type.") temp_cs.add(var == expression) self._reset(temp_cs.to_string(related_to=var)) result = [] while self._is_sat(): value = self._getvalue(var) result.append(value) self._assert(var != value) if len(result) >= maxcnt: if silent: # do not throw an exception if set to silent # Default is not silent, assume user knows # what they are doing and will check the size # of returned vals list (previous smtlib behavior) break else: raise TooManySolutions(result) return result
[ "def", "get_all_values", "(", "self", ",", "constraints", ",", "expression", ",", "maxcnt", "=", "None", ",", "silent", "=", "False", ")", ":", "if", "not", "isinstance", "(", "expression", ",", "Expression", ")", ":", "return", "[", "expression", "]", "...
Returns a list with all the possible values for the symbol x
[ "Returns", "a", "list", "with", "all", "the", "possible", "values", "for", "the", "symbol", "x" ]
python
valid
ibm-watson-iot/iot-python
src/wiotp/sdk/application/client.py
https://github.com/ibm-watson-iot/iot-python/blob/195f05adce3fba4ec997017e41e02ebd85c0c4cc/src/wiotp/sdk/application/client.py#L253-L264
def _onDeviceStatus(self, client, userdata, pahoMessage): """ Internal callback for device status messages, parses source device from topic string and passes the information on to the registerd device status callback """ try: status = Status(pahoMessage) self.logger.debug("Received %s action from %s" % (status.action, status.clientId)) if self.deviceStatusCallback: self.deviceStatusCallback(status) except InvalidEventException as e: self.logger.critical(str(e))
[ "def", "_onDeviceStatus", "(", "self", ",", "client", ",", "userdata", ",", "pahoMessage", ")", ":", "try", ":", "status", "=", "Status", "(", "pahoMessage", ")", "self", ".", "logger", ".", "debug", "(", "\"Received %s action from %s\"", "%", "(", "status",...
Internal callback for device status messages, parses source device from topic string and passes the information on to the registerd device status callback
[ "Internal", "callback", "for", "device", "status", "messages", "parses", "source", "device", "from", "topic", "string", "and", "passes", "the", "information", "on", "to", "the", "registerd", "device", "status", "callback" ]
python
test
QuantEcon/QuantEcon.py
quantecon/graph_tools.py
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/graph_tools.py#L375-L410
def random_tournament_graph(n, random_state=None): """ Return a random tournament graph [1]_ with n nodes. Parameters ---------- n : scalar(int) Number of nodes. random_state : int or np.random.RandomState, optional Random seed (integer) or np.random.RandomState instance to set the initial state of the random number generator for reproducibility. If None, a randomly initialized RandomState is used. Returns ------- DiGraph A DiGraph representing the tournament graph. References ---------- .. [1] `Tournament (graph theory) <https://en.wikipedia.org/wiki/Tournament_(graph_theory)>`_, Wikipedia. """ random_state = check_random_state(random_state) num_edges = n * (n-1) // 2 r = random_state.random_sample(num_edges) row = np.empty(num_edges, dtype=int) col = np.empty(num_edges, dtype=int) _populate_random_tournament_row_col(n, r, row, col) data = np.ones(num_edges, dtype=bool) adj_matrix = sparse.coo_matrix((data, (row, col)), shape=(n, n)) return DiGraph(adj_matrix)
[ "def", "random_tournament_graph", "(", "n", ",", "random_state", "=", "None", ")", ":", "random_state", "=", "check_random_state", "(", "random_state", ")", "num_edges", "=", "n", "*", "(", "n", "-", "1", ")", "//", "2", "r", "=", "random_state", ".", "r...
Return a random tournament graph [1]_ with n nodes. Parameters ---------- n : scalar(int) Number of nodes. random_state : int or np.random.RandomState, optional Random seed (integer) or np.random.RandomState instance to set the initial state of the random number generator for reproducibility. If None, a randomly initialized RandomState is used. Returns ------- DiGraph A DiGraph representing the tournament graph. References ---------- .. [1] `Tournament (graph theory) <https://en.wikipedia.org/wiki/Tournament_(graph_theory)>`_, Wikipedia.
[ "Return", "a", "random", "tournament", "graph", "[", "1", "]", "_", "with", "n", "nodes", "." ]
python
train
UDST/urbansim
urbansim/models/dcm.py
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L962-L984
def _iter_groups(self, data): """ Iterate over the groups in `data` after grouping by `segmentation_col`. Skips any groups for which there is no model stored. Yields tuples of (name, df) where name is the group key and df is the group DataFrame. Parameters ---------- data : pandas.DataFrame Must have a column with the same name as `segmentation_col`. """ groups = data.groupby(self.segmentation_col) for name, group in groups: if name not in self.models: continue logger.debug( 'returning group {} in LCM group {}'.format(name, self.name)) yield name, group
[ "def", "_iter_groups", "(", "self", ",", "data", ")", ":", "groups", "=", "data", ".", "groupby", "(", "self", ".", "segmentation_col", ")", "for", "name", ",", "group", "in", "groups", ":", "if", "name", "not", "in", "self", ".", "models", ":", "con...
Iterate over the groups in `data` after grouping by `segmentation_col`. Skips any groups for which there is no model stored. Yields tuples of (name, df) where name is the group key and df is the group DataFrame. Parameters ---------- data : pandas.DataFrame Must have a column with the same name as `segmentation_col`.
[ "Iterate", "over", "the", "groups", "in", "data", "after", "grouping", "by", "segmentation_col", ".", "Skips", "any", "groups", "for", "which", "there", "is", "no", "model", "stored", "." ]
python
train
fracpete/python-weka-wrapper3
python/weka/flow/sink.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/sink.py#L240-L257
def fix_config(self, options): """ Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict """ options = super(DumpFile, self).fix_config(options) opt = "append" if opt not in options: options[opt] = False if opt not in self.help: self.help[opt] = "Whether to append to the file or overwrite (bool)." return options
[ "def", "fix_config", "(", "self", ",", "options", ")", ":", "options", "=", "super", "(", "DumpFile", ",", "self", ")", ".", "fix_config", "(", "options", ")", "opt", "=", "\"append\"", "if", "opt", "not", "in", "options", ":", "options", "[", "opt", ...
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict
[ "Fixes", "the", "options", "if", "necessary", ".", "I", ".", "e", ".", "it", "adds", "all", "required", "elements", "to", "the", "dictionary", "." ]
python
train
emilydolson/avida-spatial-tools
avidaspatial/transform_data.py
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/transform_data.py#L251-L277
def make_optimal_phenotype_grid(environment, phenotypes): """ Takes an EnvironmentFile object and a 2d array of phenotypes and returns a 2d array in which each location contains an index representing the distance between the phenotype in that location and the optimal phenotype for that location. This is acheived by using the task list in the EnvironmentFile to convert the phenotypes to sets of tasks, and comparing them to the sets of resources in the environment. So if the environment file that you created the EnvironmentFile object from for some reason doesn't contain all of the tasks, or doesn't contain them in the right order this won't work. If this is the environment file that you used for the run of Avida that generated this data, you should be fine. """ world_size = environment.size phenotypes = deepcopy(phenotypes) for i in range(world_size[1]): for j in range(world_size[0]): for k in range(len(phenotypes[i][j])): phenotype = phenotype_to_res_set(phenotypes[i][j][k], environment.tasks) diff = len(environment[i][j].symmetric_difference(phenotype)) phenotypes[i][j][k] = diff return phenotypes
[ "def", "make_optimal_phenotype_grid", "(", "environment", ",", "phenotypes", ")", ":", "world_size", "=", "environment", ".", "size", "phenotypes", "=", "deepcopy", "(", "phenotypes", ")", "for", "i", "in", "range", "(", "world_size", "[", "1", "]", ")", ":"...
Takes an EnvironmentFile object and a 2d array of phenotypes and returns a 2d array in which each location contains an index representing the distance between the phenotype in that location and the optimal phenotype for that location. This is acheived by using the task list in the EnvironmentFile to convert the phenotypes to sets of tasks, and comparing them to the sets of resources in the environment. So if the environment file that you created the EnvironmentFile object from for some reason doesn't contain all of the tasks, or doesn't contain them in the right order this won't work. If this is the environment file that you used for the run of Avida that generated this data, you should be fine.
[ "Takes", "an", "EnvironmentFile", "object", "and", "a", "2d", "array", "of", "phenotypes", "and", "returns", "a", "2d", "array", "in", "which", "each", "location", "contains", "an", "index", "representing", "the", "distance", "between", "the", "phenotype", "in...
python
train
python-openxml/python-docx
docx/opc/pkgwriter.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/opc/pkgwriter.py#L100-L109
def _add_content_type(self, partname, content_type): """ Add a content type for the part with *partname* and *content_type*, using a default or override as appropriate. """ ext = partname.ext if (ext.lower(), content_type) in default_content_types: self._defaults[ext] = content_type else: self._overrides[partname] = content_type
[ "def", "_add_content_type", "(", "self", ",", "partname", ",", "content_type", ")", ":", "ext", "=", "partname", ".", "ext", "if", "(", "ext", ".", "lower", "(", ")", ",", "content_type", ")", "in", "default_content_types", ":", "self", ".", "_defaults", ...
Add a content type for the part with *partname* and *content_type*, using a default or override as appropriate.
[ "Add", "a", "content", "type", "for", "the", "part", "with", "*", "partname", "*", "and", "*", "content_type", "*", "using", "a", "default", "or", "override", "as", "appropriate", "." ]
python
train
OnroerendErfgoed/crabpy
crabpy/gateway/capakey.py
https://github.com/OnroerendErfgoed/crabpy/blob/3a6fd8bc5aca37c2a173e3ea94e4e468b8aa79c1/crabpy/gateway/capakey.py#L135-L164
def get_gemeente_by_id(self, id): ''' Retrieve a `gemeente` by id (the NIScode). :rtype: :class:`Gemeente` ''' def creator(): url = self.base_url + '/municipality/%s' % id h = self.base_headers p = { 'geometry': 'full', 'srs': '31370' } res = capakey_rest_gateway_request(url, h, p).json() return Gemeente( res['municipalityCode'], res['municipalityName'], self._parse_centroid(res['geometry']['center']), self._parse_bounding_box(res['geometry']['boundingBox']), res['geometry']['shape'] ) if self.caches['long'].is_configured: key = 'get_gemeente_by_id_rest#%s' % id gemeente = self.caches['long'].get_or_create(key, creator) else: gemeente = creator() gemeente.set_gateway(self) return gemeente
[ "def", "get_gemeente_by_id", "(", "self", ",", "id", ")", ":", "def", "creator", "(", ")", ":", "url", "=", "self", ".", "base_url", "+", "'/municipality/%s'", "%", "id", "h", "=", "self", ".", "base_headers", "p", "=", "{", "'geometry'", ":", "'full'"...
Retrieve a `gemeente` by id (the NIScode). :rtype: :class:`Gemeente`
[ "Retrieve", "a", "gemeente", "by", "id", "(", "the", "NIScode", ")", "." ]
python
train
saltstack/salt
salt/modules/proxy.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/proxy.py#L306-L352
def set_ftp_proxy(server, port, user=None, password=None, network_service="Ethernet", bypass_hosts=None): ''' Sets the ftp proxy settings server The proxy server to use port The port used by the proxy server user The username to use for the proxy server if required password The password to use if required by the server network_service The network service to apply the changes to, this only necessary on macOS bypass_hosts The hosts that are allowed to by pass the proxy. Only used on Windows for other OS's use set_proxy_bypass to edit the bypass hosts. CLI Example: .. code-block:: bash salt '*' proxy.set_ftp_proxy example.com 1080 user=proxy_user password=proxy_pass network_service=Ethernet ''' if __grains__['os'] == 'Windows': return _set_proxy_windows(server=server, port=port, types=['ftp'], bypass_hosts=bypass_hosts) return _set_proxy_osx(cmd_function="setftpproxy", server=server, port=port, user=user, password=password, network_service=network_service)
[ "def", "set_ftp_proxy", "(", "server", ",", "port", ",", "user", "=", "None", ",", "password", "=", "None", ",", "network_service", "=", "\"Ethernet\"", ",", "bypass_hosts", "=", "None", ")", ":", "if", "__grains__", "[", "'os'", "]", "==", "'Windows'", ...
Sets the ftp proxy settings server The proxy server to use port The port used by the proxy server user The username to use for the proxy server if required password The password to use if required by the server network_service The network service to apply the changes to, this only necessary on macOS bypass_hosts The hosts that are allowed to by pass the proxy. Only used on Windows for other OS's use set_proxy_bypass to edit the bypass hosts. CLI Example: .. code-block:: bash salt '*' proxy.set_ftp_proxy example.com 1080 user=proxy_user password=proxy_pass network_service=Ethernet
[ "Sets", "the", "ftp", "proxy", "settings" ]
python
train
jtwhite79/pyemu
pyemu/mc.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/mc.py#L93-L121
def get_null_proj(self,nsing=None): """ get a null-space projection matrix of XTQX Parameters ---------- nsing: int optional number of singular components to use If Nonte, then nsing is determined from call to MonteCarlo.get_nsing() Returns ------- v2_proj : pyemu.Matrix the null-space projection matrix (V2V2^T) """ if nsing is None: nsing = self.get_nsing() if nsing is None: raise Exception("nsing is None") print("using {0} singular components".format(nsing)) self.log("forming null space projection matrix with " +\ "{0} of {1} singular components".format(nsing,self.jco.shape[1])) v2_proj = (self.xtqx.v[:,nsing:] * self.xtqx.v[:,nsing:].T) self.log("forming null space projection matrix with " +\ "{0} of {1} singular components".format(nsing,self.jco.shape[1])) return v2_proj
[ "def", "get_null_proj", "(", "self", ",", "nsing", "=", "None", ")", ":", "if", "nsing", "is", "None", ":", "nsing", "=", "self", ".", "get_nsing", "(", ")", "if", "nsing", "is", "None", ":", "raise", "Exception", "(", "\"nsing is None\"", ")", "print"...
get a null-space projection matrix of XTQX Parameters ---------- nsing: int optional number of singular components to use If Nonte, then nsing is determined from call to MonteCarlo.get_nsing() Returns ------- v2_proj : pyemu.Matrix the null-space projection matrix (V2V2^T)
[ "get", "a", "null", "-", "space", "projection", "matrix", "of", "XTQX" ]
python
train
sassoftware/saspy
saspy/sasdata.py
https://github.com/sassoftware/saspy/blob/e433f71990f249d3a6c3db323ceb11cb2d462cf9/saspy/sasdata.py#L929-L966
def score(self, file: str = '', code: str = '', out: 'SASdata' = None) -> 'SASdata': """ This method is meant to update a SAS Data object with a model score file. :param file: a file reference to the SAS score code :param code: a string of the valid SAS score code :param out: Where to the write the file. Defaults to update in place :return: The Scored SAS Data object. """ if out is not None: outTable = out.table outLibref = out.libref else: outTable = self.table outLibref = self.libref codestr = code code = "data %s.%s%s;" % (outLibref, outTable, self._dsopts()) code += "set %s.%s%s;" % (self.libref, self.table, self._dsopts()) if len(file)>0: code += '%%include "%s";' % file else: code += "%s;" %codestr code += "run;" if self.sas.nosub: print(code) return None ll = self._is_valid() if not ll: html = self.HTML self.HTML = 1 ll = self.sas._io.submit(code) self.HTML = html if not self.sas.batch: self.sas.DISPLAY(self.sas.HTML(ll['LST'])) else: return ll
[ "def", "score", "(", "self", ",", "file", ":", "str", "=", "''", ",", "code", ":", "str", "=", "''", ",", "out", ":", "'SASdata'", "=", "None", ")", "->", "'SASdata'", ":", "if", "out", "is", "not", "None", ":", "outTable", "=", "out", ".", "ta...
This method is meant to update a SAS Data object with a model score file. :param file: a file reference to the SAS score code :param code: a string of the valid SAS score code :param out: Where to the write the file. Defaults to update in place :return: The Scored SAS Data object.
[ "This", "method", "is", "meant", "to", "update", "a", "SAS", "Data", "object", "with", "a", "model", "score", "file", "." ]
python
train
denisenkom/pytds
src/pytds/__init__.py
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/__init__.py#L421-L455
def cursor(self): """ Return cursor object that can be used to make queries and fetch results from the database. """ self._assert_open() if self.mars_enabled: in_tran = self._conn.tds72_transaction if in_tran and self._dirty: try: return _MarsCursor(self, self._conn.create_session(self._tzinfo_factory), self._tzinfo_factory) except (socket.error, OSError) as e: self._conn.close() raise else: try: return _MarsCursor(self, self._conn.create_session(self._tzinfo_factory), self._tzinfo_factory) except (socket.error, OSError) as e: if e.errno not in (errno.EPIPE, errno.ECONNRESET): raise self._conn.close() except ClosedConnectionError: pass self._assert_open() return _MarsCursor(self, self._conn.create_session(self._tzinfo_factory), self._tzinfo_factory) else: return Cursor(self, self._conn.main_session, self._tzinfo_factory)
[ "def", "cursor", "(", "self", ")", ":", "self", ".", "_assert_open", "(", ")", "if", "self", ".", "mars_enabled", ":", "in_tran", "=", "self", ".", "_conn", ".", "tds72_transaction", "if", "in_tran", "and", "self", ".", "_dirty", ":", "try", ":", "retu...
Return cursor object that can be used to make queries and fetch results from the database.
[ "Return", "cursor", "object", "that", "can", "be", "used", "to", "make", "queries", "and", "fetch", "results", "from", "the", "database", "." ]
python
train
miku/gluish
examples/newspapers.py
https://github.com/miku/gluish/blob/56d3ac4f41a944e31ecac0aa3b6d1dc2ce705e29/examples/newspapers.py#L67-L70
def run(self): """ Just run wget quietly. """ output = shellout('wget -q "{url}" -O {output}', url=self.url) luigi.LocalTarget(output).move(self.output().path)
[ "def", "run", "(", "self", ")", ":", "output", "=", "shellout", "(", "'wget -q \"{url}\" -O {output}'", ",", "url", "=", "self", ".", "url", ")", "luigi", ".", "LocalTarget", "(", "output", ")", ".", "move", "(", "self", ".", "output", "(", ")", ".", ...
Just run wget quietly.
[ "Just", "run", "wget", "quietly", "." ]
python
train
nilp0inter/cpe
cpe/cpelang2_3.py
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpelang2_3.py#L81-L114
def _check_fact_ref_eval(cls, cpel_dom): """ Returns the result (True, False, Error) of performing the specified check, unless the check isn’t supported, in which case it returns False. Error is a catch-all for all results other than True and False. :param string cpel_dom: XML infoset for the check_fact_ref element. :returns: result of performing the specified check :rtype: boolean or error """ CHECK_SYSTEM = "check-system" CHECK_LOCATION = "check-location" CHECK_ID = "check-id" checksystemID = cpel_dom.getAttribute(CHECK_SYSTEM) if (checksystemID == "http://oval.mitre.org/XMLSchema/ovaldefinitions-5"): # Perform an OVAL check. # First attribute is the URI of an OVAL definitions file. # Second attribute is an OVAL definition ID. return CPELanguage2_3._ovalcheck(cpel_dom.getAttribute(CHECK_LOCATION), cpel_dom.getAttribute(CHECK_ID)) if (checksystemID == "http://scap.nist.gov/schema/ocil/2"): # Perform an OCIL check. # First attribute is the URI of an OCIL questionnaire file. # Second attribute is OCIL questionnaire ID. return CPELanguage2_3._ocilcheck(cpel_dom.getAttribute(CHECK_LOCATION), cpel_dom.getAttribute(CHECK_ID)) # Can add additional check systems here, with each returning a # True, False, or Error value return False
[ "def", "_check_fact_ref_eval", "(", "cls", ",", "cpel_dom", ")", ":", "CHECK_SYSTEM", "=", "\"check-system\"", "CHECK_LOCATION", "=", "\"check-location\"", "CHECK_ID", "=", "\"check-id\"", "checksystemID", "=", "cpel_dom", ".", "getAttribute", "(", "CHECK_SYSTEM", ")"...
Returns the result (True, False, Error) of performing the specified check, unless the check isn’t supported, in which case it returns False. Error is a catch-all for all results other than True and False. :param string cpel_dom: XML infoset for the check_fact_ref element. :returns: result of performing the specified check :rtype: boolean or error
[ "Returns", "the", "result", "(", "True", "False", "Error", ")", "of", "performing", "the", "specified", "check", "unless", "the", "check", "isn’t", "supported", "in", "which", "case", "it", "returns", "False", ".", "Error", "is", "a", "catch", "-", "all", ...
python
train
zaturox/glin
glin/zmq/messages.py
https://github.com/zaturox/glin/blob/55214a579c4e4b4d74765f3f6aa2eb815bac1c3b/glin/zmq/messages.py#L19-L21
def animation_add(sequence_number, animation_id, name): """Create a animation.add message""" return MessageWriter().string("animation.add").uint64(sequence_number).uint32(animation_id).string(name).get()
[ "def", "animation_add", "(", "sequence_number", ",", "animation_id", ",", "name", ")", ":", "return", "MessageWriter", "(", ")", ".", "string", "(", "\"animation.add\"", ")", ".", "uint64", "(", "sequence_number", ")", ".", "uint32", "(", "animation_id", ")", ...
Create a animation.add message
[ "Create", "a", "animation", ".", "add", "message" ]
python
train
slarse/clanimtk
clanimtk/core.py
https://github.com/slarse/clanimtk/blob/cb93d2e914c3ecc4e0007745ff4d546318cf3902/clanimtk/core.py#L61-L77
def _raise_if_annotated(self, func): """Raise TypeError if a function is decorated with Annotate, as such functions cause visual bugs when decorated with Animate. Animate should be wrapped by Annotate instead. Args: func (function): Any callable. Raises: TypeError """ if hasattr(func, ANNOTATED) and getattr(func, ANNOTATED): msg = ('Functions decorated with {!r} ' 'should not be decorated with {!r}.\n' 'Please reverse the order of the decorators!'.format( self.__class__.__name__, Annotate.__name__)) raise TypeError(msg)
[ "def", "_raise_if_annotated", "(", "self", ",", "func", ")", ":", "if", "hasattr", "(", "func", ",", "ANNOTATED", ")", "and", "getattr", "(", "func", ",", "ANNOTATED", ")", ":", "msg", "=", "(", "'Functions decorated with {!r} '", "'should not be decorated with ...
Raise TypeError if a function is decorated with Annotate, as such functions cause visual bugs when decorated with Animate. Animate should be wrapped by Annotate instead. Args: func (function): Any callable. Raises: TypeError
[ "Raise", "TypeError", "if", "a", "function", "is", "decorated", "with", "Annotate", "as", "such", "functions", "cause", "visual", "bugs", "when", "decorated", "with", "Animate", "." ]
python
train
hyperledger/sawtooth-core
validator/sawtooth_validator/journal/consensus/batch_publisher.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/journal/consensus/batch_publisher.py#L38-L56
def send(self, transactions): """ Package up transactions into a batch and send them to the network via the provided batch_sender. :param transactions: list of transactions to package and broadcast. :return: None """ txn_signatures = [txn.header_signature for txn in transactions] header = BatchHeader( signer_public_key=self._identity_signer.get_public_key().as_hex(), transaction_ids=txn_signatures ).SerializeToString() signature = self._identity_signer.sign(header) batch = Batch( header=header, transactions=transactions, header_signature=signature) self._batch_sender.send(batch)
[ "def", "send", "(", "self", ",", "transactions", ")", ":", "txn_signatures", "=", "[", "txn", ".", "header_signature", "for", "txn", "in", "transactions", "]", "header", "=", "BatchHeader", "(", "signer_public_key", "=", "self", ".", "_identity_signer", ".", ...
Package up transactions into a batch and send them to the network via the provided batch_sender. :param transactions: list of transactions to package and broadcast. :return: None
[ "Package", "up", "transactions", "into", "a", "batch", "and", "send", "them", "to", "the", "network", "via", "the", "provided", "batch_sender", ".", ":", "param", "transactions", ":", "list", "of", "transactions", "to", "package", "and", "broadcast", ".", ":...
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/sizeformatter.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sizeformatter.py#L52-L113
def bytes2human(n: Union[int, float], format: str = '%(value).1f %(symbol)s', symbols: str = 'customary') -> str: """ Converts a number of bytes into a human-readable format. From http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/. Args: n: number of bytes format: a format specification string symbols: can be one of ``"customary"``, ``"customary_ext"``, ``"iec"`` or ``"iec_ext"``; see http://goo.gl/kTQMs Returns: the formatted number Examples: >>> bytes2human(0) '0.0 B' >>> bytes2human(0.9) '0.0 B' >>> bytes2human(1) '1.0 B' >>> bytes2human(1.9) '1.0 B' >>> bytes2human(1024) '1.0 K' >>> bytes2human(1048576) '1.0 M' >>> bytes2human(1099511627776127398123789121) '909.5 Y' >>> bytes2human(9856, symbols="customary") '9.6 K' >>> bytes2human(9856, symbols="customary_ext") '9.6 kilo' >>> bytes2human(9856, symbols="iec") '9.6 Ki' >>> bytes2human(9856, symbols="iec_ext") '9.6 kibi' >>> bytes2human(10000, "%(value).1f %(symbol)s/sec") '9.8 K/sec' >>> # precision can be adjusted by playing with %f operator >>> bytes2human(10000, format="%(value).5f %(symbol)s") '9.76562 K' """ # noqa n = int(n) if n < 0: raise ValueError("n < 0") symbols = SYMBOLS[symbols] prefix = {} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 for symbol in reversed(symbols[1:]): if n >= prefix[symbol]: value = float(n) / prefix[symbol] return format % locals() return format % dict(symbol=symbols[0], value=n)
[ "def", "bytes2human", "(", "n", ":", "Union", "[", "int", ",", "float", "]", ",", "format", ":", "str", "=", "'%(value).1f %(symbol)s'", ",", "symbols", ":", "str", "=", "'customary'", ")", "->", "str", ":", "# noqa", "n", "=", "int", "(", "n", ")", ...
Converts a number of bytes into a human-readable format. From http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/. Args: n: number of bytes format: a format specification string symbols: can be one of ``"customary"``, ``"customary_ext"``, ``"iec"`` or ``"iec_ext"``; see http://goo.gl/kTQMs Returns: the formatted number Examples: >>> bytes2human(0) '0.0 B' >>> bytes2human(0.9) '0.0 B' >>> bytes2human(1) '1.0 B' >>> bytes2human(1.9) '1.0 B' >>> bytes2human(1024) '1.0 K' >>> bytes2human(1048576) '1.0 M' >>> bytes2human(1099511627776127398123789121) '909.5 Y' >>> bytes2human(9856, symbols="customary") '9.6 K' >>> bytes2human(9856, symbols="customary_ext") '9.6 kilo' >>> bytes2human(9856, symbols="iec") '9.6 Ki' >>> bytes2human(9856, symbols="iec_ext") '9.6 kibi' >>> bytes2human(10000, "%(value).1f %(symbol)s/sec") '9.8 K/sec' >>> # precision can be adjusted by playing with %f operator >>> bytes2human(10000, format="%(value).5f %(symbol)s") '9.76562 K'
[ "Converts", "a", "number", "of", "bytes", "into", "a", "human", "-", "readable", "format", ".", "From", "http", ":", "//", "code", ".", "activestate", ".", "com", "/", "recipes", "/", "578019", "-", "bytes", "-", "to", "-", "human", "-", "human", "-"...
python
train
dmlc/xgboost
python-package/xgboost/sklearn.py
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/sklearn.py#L282-L300
def load_model(self, fname): """ Load the model from a file. The model is loaded from an XGBoost internal binary format which is universal among the various XGBoost interfaces. Auxiliary attributes of the Python Booster object (such as feature names) will not be loaded. Label encodings (text labels to numeric labels) will be also lost. **If you are using only the Python interface, we recommend pickling the model object for best results.** Parameters ---------- fname : string or a memory buffer Input file name or memory buffer(see also save_raw) """ if self._Booster is None: self._Booster = Booster({'nthread': self.n_jobs}) self._Booster.load_model(fname)
[ "def", "load_model", "(", "self", ",", "fname", ")", ":", "if", "self", ".", "_Booster", "is", "None", ":", "self", ".", "_Booster", "=", "Booster", "(", "{", "'nthread'", ":", "self", ".", "n_jobs", "}", ")", "self", ".", "_Booster", ".", "load_mode...
Load the model from a file. The model is loaded from an XGBoost internal binary format which is universal among the various XGBoost interfaces. Auxiliary attributes of the Python Booster object (such as feature names) will not be loaded. Label encodings (text labels to numeric labels) will be also lost. **If you are using only the Python interface, we recommend pickling the model object for best results.** Parameters ---------- fname : string or a memory buffer Input file name or memory buffer(see also save_raw)
[ "Load", "the", "model", "from", "a", "file", "." ]
python
train
aparo/pyes
pyes/es.py
https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/pyes/es.py#L931-L936
def exists(self, index, doc_type, id, **query_params): """ Return if a document exists """ path = make_path(index, doc_type, id) return self._send_request('HEAD', path, params=query_params)
[ "def", "exists", "(", "self", ",", "index", ",", "doc_type", ",", "id", ",", "*", "*", "query_params", ")", ":", "path", "=", "make_path", "(", "index", ",", "doc_type", ",", "id", ")", "return", "self", ".", "_send_request", "(", "'HEAD'", ",", "pat...
Return if a document exists
[ "Return", "if", "a", "document", "exists" ]
python
train
wandb/client
wandb/vendor/prompt_toolkit/interface.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/interface.py#L363-L373
def _on_resize(self): """ When the window size changes, we erase the current output and request again the cursor position. When the CPR answer arrives, the output is drawn again. """ # Erase, request position (when cursor is at the start position) # and redraw again. -- The order is important. self.renderer.erase(leave_alternate_screen=False, erase_title=False) self.renderer.request_absolute_cursor_position() self._redraw()
[ "def", "_on_resize", "(", "self", ")", ":", "# Erase, request position (when cursor is at the start position)", "# and redraw again. -- The order is important.", "self", ".", "renderer", ".", "erase", "(", "leave_alternate_screen", "=", "False", ",", "erase_title", "=", "Fals...
When the window size changes, we erase the current output and request again the cursor position. When the CPR answer arrives, the output is drawn again.
[ "When", "the", "window", "size", "changes", "we", "erase", "the", "current", "output", "and", "request", "again", "the", "cursor", "position", ".", "When", "the", "CPR", "answer", "arrives", "the", "output", "is", "drawn", "again", "." ]
python
train