repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
rmohr/static3
static.py
https://github.com/rmohr/static3/blob/e5f88c5e91789bd4db7fde0cf59e4a15c3326f11/static.py#L323-L330
def _conditions(self, full_path, environ): """Return Etag and Last-Modified values defaults to now for both.""" magic = self._match_magic(full_path) if magic is not None: return magic.conditions(full_path, environ) else: mtime = stat(full_path).st_mtime return str(mtime), rfc822.formatdate(mtime)
[ "def", "_conditions", "(", "self", ",", "full_path", ",", "environ", ")", ":", "magic", "=", "self", ".", "_match_magic", "(", "full_path", ")", "if", "magic", "is", "not", "None", ":", "return", "magic", ".", "conditions", "(", "full_path", ",", "enviro...
Return Etag and Last-Modified values defaults to now for both.
[ "Return", "Etag", "and", "Last", "-", "Modified", "values", "defaults", "to", "now", "for", "both", "." ]
python
train
sassoftware/saspy
saspy/sasproccommons.py
https://github.com/sassoftware/saspy/blob/e433f71990f249d3a6c3db323ceb11cb2d462cf9/saspy/sasproccommons.py#L516-L551
def _input_stmt(self, stmt: object) -> tuple: """ takes the input key from kwargs and processes it to aid in the generation of a model statement :param stmt: str, list, or dict that contains the model information. :return: tuple of strings one for the class statement one for the model statements """ code = '' cls = '' if isinstance(stmt, str): code += "%s " % (stmt) elif isinstance(stmt, dict): try: if 'interval' in stmt.keys(): if isinstance(stmt['interval'], str): code += "%s " % stmt['interval'] if isinstance(stmt['interval'], list): code += "%s " % " ".join(stmt['interval']) if 'nominal' in stmt.keys(): if isinstance(stmt['nominal'], str): code += "%s " % stmt['nominal'] cls += "%s " % stmt['nominal'] if isinstance(stmt['nominal'], list): code += "%s " % " ".join(stmt['nominal']) cls += "%s " % " ".join(stmt['nominal']) except: raise SyntaxError("Proper Keys not found for INPUT dictionary: %s" % stmt.keys()) elif isinstance(stmt, list): if len(stmt) == 1: code += "%s" % str(stmt[0]) elif len(stmt) > 1: code += "%s" % " ".join(stmt) else: raise SyntaxError("The input list has no members") else: raise SyntaxError("INPUT is in an unknown format: %s" % str(stmt)) return (code, cls)
[ "def", "_input_stmt", "(", "self", ",", "stmt", ":", "object", ")", "->", "tuple", ":", "code", "=", "''", "cls", "=", "''", "if", "isinstance", "(", "stmt", ",", "str", ")", ":", "code", "+=", "\"%s \"", "%", "(", "stmt", ")", "elif", "isinstance"...
takes the input key from kwargs and processes it to aid in the generation of a model statement :param stmt: str, list, or dict that contains the model information. :return: tuple of strings one for the class statement one for the model statements
[ "takes", "the", "input", "key", "from", "kwargs", "and", "processes", "it", "to", "aid", "in", "the", "generation", "of", "a", "model", "statement", ":", "param", "stmt", ":", "str", "list", "or", "dict", "that", "contains", "the", "model", "information", ...
python
train
asphalt-framework/asphalt
asphalt/core/context.py
https://github.com/asphalt-framework/asphalt/blob/4114b3ac9743cbd9facb374a3f53e19d3afef22d/asphalt/core/context.py#L444-L462
def require_resource(self, type: Type[T_Resource], name: str = 'default') -> T_Resource: """ Look up a resource in the chain of contexts and raise an exception if it is not found. This is like :meth:`get_resource` except that instead of returning ``None`` when a resource is not found, it will raise :exc:`~asphalt.core.context.ResourceNotFound`. :param type: type of the requested resource :param name: name of the requested resource :return: the requested resource :raises asphalt.core.context.ResourceNotFound: if a resource of the given type and name was not found """ resource = self.get_resource(type, name) if resource is None: raise ResourceNotFound(type, name) return resource
[ "def", "require_resource", "(", "self", ",", "type", ":", "Type", "[", "T_Resource", "]", ",", "name", ":", "str", "=", "'default'", ")", "->", "T_Resource", ":", "resource", "=", "self", ".", "get_resource", "(", "type", ",", "name", ")", "if", "resou...
Look up a resource in the chain of contexts and raise an exception if it is not found. This is like :meth:`get_resource` except that instead of returning ``None`` when a resource is not found, it will raise :exc:`~asphalt.core.context.ResourceNotFound`. :param type: type of the requested resource :param name: name of the requested resource :return: the requested resource :raises asphalt.core.context.ResourceNotFound: if a resource of the given type and name was not found
[ "Look", "up", "a", "resource", "in", "the", "chain", "of", "contexts", "and", "raise", "an", "exception", "if", "it", "is", "not", "found", "." ]
python
train
CityOfZion/neo-python
neo/SmartContract/ContractParameter.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/SmartContract/ContractParameter.py#L81-L130
def ToJson(self, auto_hex=True): """ Converts a ContractParameter instance to a json representation Returns: dict: a dictionary representation of the contract parameter """ jsn = {} jsn['type'] = str(ContractParameterType(self.Type)) if self.Type == ContractParameterType.Signature: jsn['value'] = self.Value.hex() elif self.Type == ContractParameterType.ByteArray: if auto_hex: jsn['value'] = self.Value.hex() else: jsn['value'] = self.Value elif self.Type == ContractParameterType.Boolean: jsn['value'] = self.Value elif self.Type == ContractParameterType.String: jsn['value'] = str(self.Value) elif self.Type == ContractParameterType.Integer: jsn['value'] = self.Value # @TODO, see ``FromJson``, not sure if this is working properly elif self.Type == ContractParameterType.PublicKey: jsn['value'] = self.Value.ToString() elif self.Type in [ContractParameterType.Hash160, ContractParameterType.Hash256]: jsn['value'] = self.Value.ToString() elif self.Type == ContractParameterType.Array: res = [] for item in self.Value: if item: res.append(item.ToJson(auto_hex=auto_hex)) jsn['value'] = res elif self.Type == ContractParameterType.InteropInterface: try: jsn['value'] = self.Value.ToJson() except Exception as e: pass return jsn
[ "def", "ToJson", "(", "self", ",", "auto_hex", "=", "True", ")", ":", "jsn", "=", "{", "}", "jsn", "[", "'type'", "]", "=", "str", "(", "ContractParameterType", "(", "self", ".", "Type", ")", ")", "if", "self", ".", "Type", "==", "ContractParameterTy...
Converts a ContractParameter instance to a json representation Returns: dict: a dictionary representation of the contract parameter
[ "Converts", "a", "ContractParameter", "instance", "to", "a", "json", "representation" ]
python
train
nschloe/pygmsh
pygmsh/opencascade/geometry.py
https://github.com/nschloe/pygmsh/blob/1a1a07481aebe6c161b60dd31e0fbe1ddf330d61/pygmsh/opencascade/geometry.py#L84-L161
def _boolean_operation( self, operation, input_entities, tool_entities, delete_first=True, delete_other=True, ): """Boolean operations, see https://gmsh.info/doc/texinfo/gmsh.html#Boolean-operations input_entity and tool_entity are called object and tool in gmsh documentation. """ self._BOOLEAN_ID += 1 # assert that all entities are of the same dimensionality dim = None legal_dim_types = {1: "Line", 2: "Surface", 3: "Volume"} for ldt in legal_dim_types: if input_entities[0].dimension == ldt: dim = ldt break assert dim is not None, "Illegal input type '{}' for Boolean operation.".format( type(input_entities[0]) ) for e in input_entities[1:] + tool_entities: assert ( e.dimension == dim ), "Incompatible input type '{}' for Boolean operation.".format(type(e)) name = "bo{}".format(self._BOOLEAN_ID) input_delete = "Delete;" if delete_first else "" tool_delete = "Delete;" if delete_other else "" legal_dim_type = legal_dim_types[dim] if input_entities: formatted_input_entities = ( ";".join(["%s{%s}" % (legal_dim_type, e.id) for e in input_entities]) + ";" ) else: formatted_input_entities = "" if tool_entities: formatted_tool_entities = ( ";".join(["%s{%s}" % (legal_dim_type, e.id) for e in tool_entities]) + ";" ) else: formatted_tool_entities = "" self._GMSH_CODE.append( # I wonder what this line does in Lisp. ;) # '{}[] = {}{{{} {{{}}}; {}}} {{{} {{{}}}; {}}};' # .format( # name, # operation, # legal_dim_types[dim], # ';'.join(e.id for e in input_entities), # 'Delete;' if delete_first else '', # legal_dim_types[dim], # ';'.join(e.id for e in tool_entities), # 'Delete;' if delete_other else '' # )) "%(name)s[] = %(op)s{ %(ientities)s %(idelete)s } { %(tentities)s %(tdelete)s};" % { "name": name, "op": operation, "ientities": formatted_input_entities, "idelete": input_delete, "tentities": formatted_tool_entities, "tdelete": tool_delete, } ) mapping = {"Line": None, "Surface": SurfaceBase, "Volume": VolumeBase} return mapping[legal_dim_types[dim]](id0=name, is_list=True)
[ "def", "_boolean_operation", "(", "self", ",", "operation", ",", "input_entities", ",", "tool_entities", ",", "delete_first", "=", "True", ",", "delete_other", "=", "True", ",", ")", ":", "self", ".", "_BOOLEAN_ID", "+=", "1", "# assert that all entities are of th...
Boolean operations, see https://gmsh.info/doc/texinfo/gmsh.html#Boolean-operations input_entity and tool_entity are called object and tool in gmsh documentation.
[ "Boolean", "operations", "see", "https", ":", "//", "gmsh", ".", "info", "/", "doc", "/", "texinfo", "/", "gmsh", ".", "html#Boolean", "-", "operations", "input_entity", "and", "tool_entity", "are", "called", "object", "and", "tool", "in", "gmsh", "documenta...
python
train
KennethWilke/PingdomLib
pingdomlib/pingdom.py
https://github.com/KennethWilke/PingdomLib/blob/3ed1e481f9c9d16b032558d62fb05c2166e162ed/pingdomlib/pingdom.py#L52-L97
def request(self, method, url, parameters=dict()): """Requests wrapper function""" # The requests library uses urllib, which serializes to "True"/"False" while Pingdom requires lowercase parameters = self._serializeBooleans(parameters) headers = {'App-Key': self.apikey} if self.accountemail: headers.update({'Account-Email': self.accountemail}) # Method selection handling if method.upper() == 'GET': response = requests.get(self.url + url, params=parameters, auth=(self.username, self.password), headers=headers) elif method.upper() == 'POST': response = requests.post(self.url + url, data=parameters, auth=(self.username, self.password), headers=headers) elif method.upper() == 'PUT': response = requests.put(self.url + url, data=parameters, auth=(self.username, self.password), headers=headers) elif method.upper() == 'DELETE': response = requests.delete(self.url + url, params=parameters, auth=(self.username, self.password), headers=headers) else: raise Exception("Invalid method in pingdom request") # Store pingdom api limits self.shortlimit = response.headers.get( 'Req-Limit-Short', self.shortlimit) self.longlimit = response.headers.get( 'Req-Limit-Long', self.longlimit) # Verify OK response if response.status_code != 200: sys.stderr.write('ERROR from %s: %d' % (response.url, response.status_code)) sys.stderr.write('Returned data: %s\n' % response.json()) response.raise_for_status() return response
[ "def", "request", "(", "self", ",", "method", ",", "url", ",", "parameters", "=", "dict", "(", ")", ")", ":", "# The requests library uses urllib, which serializes to \"True\"/\"False\" while Pingdom requires lowercase", "parameters", "=", "self", ".", "_serializeBooleans",...
Requests wrapper function
[ "Requests", "wrapper", "function" ]
python
train
davenquinn/Attitude
attitude/error/bootstrap.py
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/error/bootstrap.py#L6-L16
def bootstrap(array): """ Provides a bootstrap resampling of an array. Provides another statistical method to estimate the variance of a dataset. For a `PCA` object in this library, it should be applied to `Orientation.array` method. """ reg_func = lambda a: N.linalg.svd(a,full_matrices=False)[2][2] beta_boots = bootstrap(array, func=reg_func) return yhat, yhat_boots
[ "def", "bootstrap", "(", "array", ")", ":", "reg_func", "=", "lambda", "a", ":", "N", ".", "linalg", ".", "svd", "(", "a", ",", "full_matrices", "=", "False", ")", "[", "2", "]", "[", "2", "]", "beta_boots", "=", "bootstrap", "(", "array", ",", "...
Provides a bootstrap resampling of an array. Provides another statistical method to estimate the variance of a dataset. For a `PCA` object in this library, it should be applied to `Orientation.array` method.
[ "Provides", "a", "bootstrap", "resampling", "of", "an", "array", ".", "Provides", "another", "statistical", "method", "to", "estimate", "the", "variance", "of", "a", "dataset", "." ]
python
train
pystorm/pystorm
pystorm/component.py
https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/component.py#L504-L542
def run(self): """Main run loop for all components. Performs initial handshake with Storm and reads Tuples handing them off to subclasses. Any exceptions are caught and logged back to Storm prior to the Python process exiting. .. warning:: Subclasses should **not** override this method. """ storm_conf, context = self.read_handshake() self._setup_component(storm_conf, context) self.initialize(storm_conf, context) while True: try: self._run() except StormWentAwayError: log.info("Exiting because parent Storm process went away.") self._exit(2) except Exception as e: log_msg = "Exception in {}.run()".format(self.__class__.__name__) exc_info = sys.exc_info() try: self.logger.error(log_msg, exc_info=True) self._handle_run_exception(e) except StormWentAwayError: log.error(log_msg, exc_info=exc_info) log.info("Exiting because parent Storm process went away.") self._exit(2) except: log.error(log_msg, exc_info=exc_info) log.error( "While trying to handle previous exception...", exc_info=sys.exc_info(), ) if self.exit_on_exception: self._exit(1)
[ "def", "run", "(", "self", ")", ":", "storm_conf", ",", "context", "=", "self", ".", "read_handshake", "(", ")", "self", ".", "_setup_component", "(", "storm_conf", ",", "context", ")", "self", ".", "initialize", "(", "storm_conf", ",", "context", ")", "...
Main run loop for all components. Performs initial handshake with Storm and reads Tuples handing them off to subclasses. Any exceptions are caught and logged back to Storm prior to the Python process exiting. .. warning:: Subclasses should **not** override this method.
[ "Main", "run", "loop", "for", "all", "components", "." ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datarepo.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datarepo.py#L1313-L1328
def insertRnaQuantificationSet(self, rnaQuantificationSet): """ Inserts a the specified rnaQuantificationSet into this repository. """ try: models.Rnaquantificationset.create( id=rnaQuantificationSet.getId(), datasetid=rnaQuantificationSet.getParentContainer().getId(), referencesetid=rnaQuantificationSet.getReferenceSet().getId(), name=rnaQuantificationSet.getLocalId(), dataurl=rnaQuantificationSet.getDataUrl(), attributes=json.dumps(rnaQuantificationSet.getAttributes())) except Exception: raise exceptions.DuplicateNameException( rnaQuantificationSet.getLocalId(), rnaQuantificationSet.getParentContainer().getLocalId())
[ "def", "insertRnaQuantificationSet", "(", "self", ",", "rnaQuantificationSet", ")", ":", "try", ":", "models", ".", "Rnaquantificationset", ".", "create", "(", "id", "=", "rnaQuantificationSet", ".", "getId", "(", ")", ",", "datasetid", "=", "rnaQuantificationSet"...
Inserts a the specified rnaQuantificationSet into this repository.
[ "Inserts", "a", "the", "specified", "rnaQuantificationSet", "into", "this", "repository", "." ]
python
train
androguard/androguard
androguard/core/androconf.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/androconf.py#L280-L293
def make_color_tuple(color): """ turn something like "#000000" into 0,0,0 or "#FFFFFF into "255,255,255" """ R = color[1:3] G = color[3:5] B = color[5:7] R = int(R, 16) G = int(G, 16) B = int(B, 16) return R, G, B
[ "def", "make_color_tuple", "(", "color", ")", ":", "R", "=", "color", "[", "1", ":", "3", "]", "G", "=", "color", "[", "3", ":", "5", "]", "B", "=", "color", "[", "5", ":", "7", "]", "R", "=", "int", "(", "R", ",", "16", ")", "G", "=", ...
turn something like "#000000" into 0,0,0 or "#FFFFFF into "255,255,255"
[ "turn", "something", "like", "#000000", "into", "0", "0", "0", "or", "#FFFFFF", "into", "255", "255", "255" ]
python
train
postlund/pyatv
pyatv/airplay/auth.py
https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/airplay/auth.py#L36-L58
async def finish_authentication(self, username, password): """Finish authentication process. A username (generated by new_credentials) and the PIN code shown on screen must be provided. """ # Step 1 self.srp.step1(username, password) data = await self._send_plist( 'step1', method='pin', user=username) resp = plistlib.loads(data) # Step 2 pub_key, key_proof = self.srp.step2(resp['pk'], resp['salt']) await self._send_plist( 'step2', pk=binascii.unhexlify(pub_key), proof=binascii.unhexlify(key_proof)) # Step 3 epk, tag = self.srp.step3() await self._send_plist('step3', epk=epk, authTag=tag) return True
[ "async", "def", "finish_authentication", "(", "self", ",", "username", ",", "password", ")", ":", "# Step 1", "self", ".", "srp", ".", "step1", "(", "username", ",", "password", ")", "data", "=", "await", "self", ".", "_send_plist", "(", "'step1'", ",", ...
Finish authentication process. A username (generated by new_credentials) and the PIN code shown on screen must be provided.
[ "Finish", "authentication", "process", "." ]
python
train
bmcfee/resampy
resampy/core.py
https://github.com/bmcfee/resampy/blob/e5238a7120dcf0d76813bcd3e277998ead8fc308/resampy/core.py#L14-L115
def resample(x, sr_orig, sr_new, axis=-1, filter='kaiser_best', **kwargs): '''Resample a signal x from sr_orig to sr_new along a given axis. Parameters ---------- x : np.ndarray, dtype=np.float* The input signal(s) to resample. sr_orig : int > 0 The sampling rate of x sr_new : int > 0 The target sampling rate of the output signal(s) axis : int The target axis along which to resample `x` filter : optional, str or callable The resampling filter to use. By default, uses the `kaiser_best` (pre-computed filter). kwargs additional keyword arguments provided to the specified filter Returns ------- y : np.ndarray `x` resampled to `sr_new` Raises ------ ValueError if `sr_orig` or `sr_new` is not positive TypeError if the input signal `x` has an unsupported data type. Examples -------- >>> # Generate a sine wave at 440 Hz for 5 seconds >>> sr_orig = 44100.0 >>> x = np.sin(2 * np.pi * 440.0 / sr_orig * np.arange(5 * sr_orig)) >>> x array([ 0. , 0.063, ..., -0.125, -0.063]) >>> # Resample to 22050 with default parameters >>> resampy.resample(x, sr_orig, 22050) array([ 0.011, 0.123, ..., -0.193, -0.103]) >>> # Resample using the fast (low-quality) filter >>> resampy.resample(x, sr_orig, 22050, filter='kaiser_fast') array([ 0.013, 0.121, ..., -0.189, -0.102]) >>> # Resample using a high-quality filter >>> resampy.resample(x, sr_orig, 22050, filter='kaiser_best') array([ 0.011, 0.123, ..., -0.193, -0.103]) >>> # Resample using a Hann-windowed sinc filter >>> resampy.resample(x, sr_orig, 22050, filter='sinc_window', ... window=scipy.signal.hann) array([ 0.011, 0.123, ..., -0.193, -0.103]) >>> # Generate stereo data >>> x_right = np.sin(2 * np.pi * 880.0 / sr_orig * np.arange(len(x)))]) >>> x_stereo = np.stack([x, x_right]) >>> x_stereo.shape (2, 220500) >>> # Resample along the time axis (1) >>> y_stereo = resampy.resample(x, sr_orig, 22050, axis=1) >>> y_stereo.shape (2, 110250) ''' if sr_orig <= 0: raise ValueError('Invalid sample rate: sr_orig={}'.format(sr_orig)) if sr_new <= 0: raise ValueError('Invalid sample rate: sr_new={}'.format(sr_new)) sample_ratio = float(sr_new) / sr_orig # Set up the output shape shape = list(x.shape) shape[axis] = int(shape[axis] * sample_ratio) if shape[axis] < 1: raise ValueError('Input signal length={} is too small to ' 'resample from {}->{}'.format(x.shape[axis], sr_orig, sr_new)) y = np.zeros(shape, dtype=x.dtype) interp_win, precision, _ = get_filter(filter, **kwargs) if sample_ratio < 1: interp_win *= sample_ratio interp_delta = np.zeros_like(interp_win) interp_delta[:-1] = np.diff(interp_win) # Construct 2d views of the data with the resampling axis on the first dimension x_2d = x.swapaxes(0, axis).reshape((x.shape[axis], -1)) y_2d = y.swapaxes(0, axis).reshape((y.shape[axis], -1)) resample_f(x_2d, y_2d, sample_ratio, interp_win, interp_delta, precision) return y
[ "def", "resample", "(", "x", ",", "sr_orig", ",", "sr_new", ",", "axis", "=", "-", "1", ",", "filter", "=", "'kaiser_best'", ",", "*", "*", "kwargs", ")", ":", "if", "sr_orig", "<=", "0", ":", "raise", "ValueError", "(", "'Invalid sample rate: sr_orig={}...
Resample a signal x from sr_orig to sr_new along a given axis. Parameters ---------- x : np.ndarray, dtype=np.float* The input signal(s) to resample. sr_orig : int > 0 The sampling rate of x sr_new : int > 0 The target sampling rate of the output signal(s) axis : int The target axis along which to resample `x` filter : optional, str or callable The resampling filter to use. By default, uses the `kaiser_best` (pre-computed filter). kwargs additional keyword arguments provided to the specified filter Returns ------- y : np.ndarray `x` resampled to `sr_new` Raises ------ ValueError if `sr_orig` or `sr_new` is not positive TypeError if the input signal `x` has an unsupported data type. Examples -------- >>> # Generate a sine wave at 440 Hz for 5 seconds >>> sr_orig = 44100.0 >>> x = np.sin(2 * np.pi * 440.0 / sr_orig * np.arange(5 * sr_orig)) >>> x array([ 0. , 0.063, ..., -0.125, -0.063]) >>> # Resample to 22050 with default parameters >>> resampy.resample(x, sr_orig, 22050) array([ 0.011, 0.123, ..., -0.193, -0.103]) >>> # Resample using the fast (low-quality) filter >>> resampy.resample(x, sr_orig, 22050, filter='kaiser_fast') array([ 0.013, 0.121, ..., -0.189, -0.102]) >>> # Resample using a high-quality filter >>> resampy.resample(x, sr_orig, 22050, filter='kaiser_best') array([ 0.011, 0.123, ..., -0.193, -0.103]) >>> # Resample using a Hann-windowed sinc filter >>> resampy.resample(x, sr_orig, 22050, filter='sinc_window', ... window=scipy.signal.hann) array([ 0.011, 0.123, ..., -0.193, -0.103]) >>> # Generate stereo data >>> x_right = np.sin(2 * np.pi * 880.0 / sr_orig * np.arange(len(x)))]) >>> x_stereo = np.stack([x, x_right]) >>> x_stereo.shape (2, 220500) >>> # Resample along the time axis (1) >>> y_stereo = resampy.resample(x, sr_orig, 22050, axis=1) >>> y_stereo.shape (2, 110250)
[ "Resample", "a", "signal", "x", "from", "sr_orig", "to", "sr_new", "along", "a", "given", "axis", "." ]
python
train
awslabs/serverless-application-model
samtranslator/model/function_policies.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/model/function_policies.py#L96-L105
def _contains_policies(self, resource_properties): """ Is there policies data in this resource? :param dict resource_properties: Properties of the resource :return: True if we can process this resource. False, otherwise """ return resource_properties is not None \ and isinstance(resource_properties, dict) \ and self.POLICIES_PROPERTY_NAME in resource_properties
[ "def", "_contains_policies", "(", "self", ",", "resource_properties", ")", ":", "return", "resource_properties", "is", "not", "None", "and", "isinstance", "(", "resource_properties", ",", "dict", ")", "and", "self", ".", "POLICIES_PROPERTY_NAME", "in", "resource_pro...
Is there policies data in this resource? :param dict resource_properties: Properties of the resource :return: True if we can process this resource. False, otherwise
[ "Is", "there", "policies", "data", "in", "this", "resource?" ]
python
train
workforce-data-initiative/skills-utils
skills_utils/s3.py
https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/s3.py#L44-L63
def upload_dict(s3_conn, s3_prefix, data_to_sync): """Syncs a dictionary to an S3 bucket, serializing each value in the dictionary as a JSON file with the key as its name. Args: s3_conn: (boto.s3.connection) an s3 connection s3_prefix: (str) the destination prefix data_to_sync: (dict) """ bucket_name, prefix = split_s3_path(s3_prefix) bucket = s3_conn.get_bucket(bucket_name) for key, value in data_to_sync.items(): full_name = '{}/{}.json'.format(prefix, key) s3_key = boto.s3.key.Key( bucket=bucket, name=full_name ) logging.info('uploading key %s', full_name) s3_key.set_contents_from_string(json.dumps(value))
[ "def", "upload_dict", "(", "s3_conn", ",", "s3_prefix", ",", "data_to_sync", ")", ":", "bucket_name", ",", "prefix", "=", "split_s3_path", "(", "s3_prefix", ")", "bucket", "=", "s3_conn", ".", "get_bucket", "(", "bucket_name", ")", "for", "key", ",", "value"...
Syncs a dictionary to an S3 bucket, serializing each value in the dictionary as a JSON file with the key as its name. Args: s3_conn: (boto.s3.connection) an s3 connection s3_prefix: (str) the destination prefix data_to_sync: (dict)
[ "Syncs", "a", "dictionary", "to", "an", "S3", "bucket", "serializing", "each", "value", "in", "the", "dictionary", "as", "a", "JSON", "file", "with", "the", "key", "as", "its", "name", "." ]
python
train
pandas-dev/pandas
pandas/core/generic.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L9046-L9101
def tshift(self, periods=1, freq=None, axis=0): """ Shift the time index, using the index's frequency if available. Parameters ---------- periods : int Number of periods to move, can be positive or negative freq : DateOffset, timedelta, or time rule string, default None Increment to use from the tseries module or time rule (e.g. 'EOM') axis : int or basestring Corresponds to the axis that contains the Index Returns ------- shifted : NDFrame Notes ----- If freq is not specified then tries to use the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown """ index = self._get_axis(axis) if freq is None: freq = getattr(index, 'freq', None) if freq is None: freq = getattr(index, 'inferred_freq', None) if freq is None: msg = 'Freq was not given and was not set in the index' raise ValueError(msg) if periods == 0: return self if isinstance(freq, str): freq = to_offset(freq) block_axis = self._get_block_manager_axis(axis) if isinstance(index, PeriodIndex): orig_freq = to_offset(index.freq) if freq == orig_freq: new_data = self._data.copy() new_data.axes[block_axis] = index.shift(periods) else: msg = ('Given freq %s does not match PeriodIndex freq %s' % (freq.rule_code, orig_freq.rule_code)) raise ValueError(msg) else: new_data = self._data.copy() new_data.axes[block_axis] = index.shift(periods, freq) return self._constructor(new_data).__finalize__(self)
[ "def", "tshift", "(", "self", ",", "periods", "=", "1", ",", "freq", "=", "None", ",", "axis", "=", "0", ")", ":", "index", "=", "self", ".", "_get_axis", "(", "axis", ")", "if", "freq", "is", "None", ":", "freq", "=", "getattr", "(", "index", ...
Shift the time index, using the index's frequency if available. Parameters ---------- periods : int Number of periods to move, can be positive or negative freq : DateOffset, timedelta, or time rule string, default None Increment to use from the tseries module or time rule (e.g. 'EOM') axis : int or basestring Corresponds to the axis that contains the Index Returns ------- shifted : NDFrame Notes ----- If freq is not specified then tries to use the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown
[ "Shift", "the", "time", "index", "using", "the", "index", "s", "frequency", "if", "available", "." ]
python
train
Phyks/libbmc
libbmc/citations/pdf.py
https://github.com/Phyks/libbmc/blob/9ef1a29d2514157d1edd6c13ecbd61b07ae9315e/libbmc/citations/pdf.py#L219-L240
def pdfextract_dois(pdf_file): """ Extract DOIs of references using \ `pdfextract <https://github.com/CrossRef/pdfextract>`_. .. note:: See ``libbmc.citations.pdf.pdfextract`` function as this one is just \ a wrapper around it. See ``libbmc.citations.plaintext.get_cited_dois`` as well for the \ returned value, as it is ultimately called by this function. :param pdf_file: Path to the PDF file to handle. :returns: A dict of cleaned plaintext citations and their associated DOI. """ # Call pdf-extract on the PDF file references = pdfextract(pdf_file) # Parse the resulting XML root = ET.fromstring(references) plaintext_references = [e.text for e in root.iter("reference")] # Call the plaintext methods to fetch DOIs return plaintext.get_cited_dois(plaintext_references)
[ "def", "pdfextract_dois", "(", "pdf_file", ")", ":", "# Call pdf-extract on the PDF file", "references", "=", "pdfextract", "(", "pdf_file", ")", "# Parse the resulting XML", "root", "=", "ET", ".", "fromstring", "(", "references", ")", "plaintext_references", "=", "[...
Extract DOIs of references using \ `pdfextract <https://github.com/CrossRef/pdfextract>`_. .. note:: See ``libbmc.citations.pdf.pdfextract`` function as this one is just \ a wrapper around it. See ``libbmc.citations.plaintext.get_cited_dois`` as well for the \ returned value, as it is ultimately called by this function. :param pdf_file: Path to the PDF file to handle. :returns: A dict of cleaned plaintext citations and their associated DOI.
[ "Extract", "DOIs", "of", "references", "using", "\\", "pdfextract", "<https", ":", "//", "github", ".", "com", "/", "CrossRef", "/", "pdfextract", ">", "_", "." ]
python
train
calmjs/calmjs.parse
src/calmjs/parse/parsers/es5.py
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/parsers/es5.py#L1272-L1277
def p_continue_statement_2(self, p): """continue_statement : CONTINUE identifier SEMI | CONTINUE identifier AUTOSEMI """ p[0] = self.asttypes.Continue(p[2]) p[0].setpos(p)
[ "def", "p_continue_statement_2", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "self", ".", "asttypes", ".", "Continue", "(", "p", "[", "2", "]", ")", "p", "[", "0", "]", ".", "setpos", "(", "p", ")" ]
continue_statement : CONTINUE identifier SEMI | CONTINUE identifier AUTOSEMI
[ "continue_statement", ":", "CONTINUE", "identifier", "SEMI", "|", "CONTINUE", "identifier", "AUTOSEMI" ]
python
train
rigetti/quantumflow
quantumflow/utils.py
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/utils.py#L108-L116
def spanning_tree_count(graph: nx.Graph) -> int: """Return the number of unique spanning trees of a graph, using Kirchhoff's matrix tree theorem. """ laplacian = nx.laplacian_matrix(graph).toarray() comatrix = laplacian[:-1, :-1] det = np.linalg.det(comatrix) count = int(round(det)) return count
[ "def", "spanning_tree_count", "(", "graph", ":", "nx", ".", "Graph", ")", "->", "int", ":", "laplacian", "=", "nx", ".", "laplacian_matrix", "(", "graph", ")", ".", "toarray", "(", ")", "comatrix", "=", "laplacian", "[", ":", "-", "1", ",", ":", "-",...
Return the number of unique spanning trees of a graph, using Kirchhoff's matrix tree theorem.
[ "Return", "the", "number", "of", "unique", "spanning", "trees", "of", "a", "graph", "using", "Kirchhoff", "s", "matrix", "tree", "theorem", "." ]
python
train
NLeSC/noodles
noodles/prov/sqlite.py
https://github.com/NLeSC/noodles/blob/3759e24e6e54a3a1a364431309dbb1061f617c04/noodles/prov/sqlite.py#L178-L186
def register(self, job): """Takes a job (unencoded) and adorns it with a unique key; this makes an entry in the database without any further specification.""" with self.lock: self.cur.execute( 'insert into "jobs" ("name", "session", "status") ' 'values (?, ?, ?)', (job.name, self.session, Status.INACTIVE)) self.jobs[self.cur.lastrowid] = job return JobMessage(self.cur.lastrowid, job.node)
[ "def", "register", "(", "self", ",", "job", ")", ":", "with", "self", ".", "lock", ":", "self", ".", "cur", ".", "execute", "(", "'insert into \"jobs\" (\"name\", \"session\", \"status\") '", "'values (?, ?, ?)'", ",", "(", "job", ".", "name", ",", "self", "."...
Takes a job (unencoded) and adorns it with a unique key; this makes an entry in the database without any further specification.
[ "Takes", "a", "job", "(", "unencoded", ")", "and", "adorns", "it", "with", "a", "unique", "key", ";", "this", "makes", "an", "entry", "in", "the", "database", "without", "any", "further", "specification", "." ]
python
train
ssato/python-anyconfig
src/anyconfig/api.py
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/api.py#L480-L523
def loads(content, ac_parser=None, ac_dict=None, ac_template=False, ac_context=None, **options): """ :param content: Configuration file's content (a string) :param ac_parser: Forced parser type or ID or parser object :param ac_dict: callable (function or class) to make mapping object will be returned as a result or None. If not given or ac_dict is None, default mapping object used to store resutls is dict or :class:`collections.OrderedDict` if ac_ordered is True and selected backend can keep the order of items in mapping objects. :param ac_template: Assume configuration file may be a template file and try to compile it AAR if True :param ac_context: Context dict to instantiate template :param options: Optional keyword arguments. See also the description of 'options' in :func:`single_load` function. :return: Mapping object or any query result might be primitive objects :raises: ValueError, UnknownProcessorTypeError """ if ac_parser is None: LOGGER.warning("ac_parser was not given but it's must to find correct " "parser to load configurations from string.") return None psr = find(None, forced_type=ac_parser) schema = None ac_schema = options.get("ac_schema", None) if ac_schema is not None: options["ac_schema"] = None schema = loads(ac_schema, ac_parser=psr, ac_dict=ac_dict, ac_template=ac_template, ac_context=ac_context, **options) if ac_template: compiled = anyconfig.template.try_render(content=content, ctx=ac_context, **options) if compiled is not None: content = compiled cnf = psr.loads(content, ac_dict=ac_dict, **options) cnf = _try_validate(cnf, schema, **options) return anyconfig.query.query(cnf, **options)
[ "def", "loads", "(", "content", ",", "ac_parser", "=", "None", ",", "ac_dict", "=", "None", ",", "ac_template", "=", "False", ",", "ac_context", "=", "None", ",", "*", "*", "options", ")", ":", "if", "ac_parser", "is", "None", ":", "LOGGER", ".", "wa...
:param content: Configuration file's content (a string) :param ac_parser: Forced parser type or ID or parser object :param ac_dict: callable (function or class) to make mapping object will be returned as a result or None. If not given or ac_dict is None, default mapping object used to store resutls is dict or :class:`collections.OrderedDict` if ac_ordered is True and selected backend can keep the order of items in mapping objects. :param ac_template: Assume configuration file may be a template file and try to compile it AAR if True :param ac_context: Context dict to instantiate template :param options: Optional keyword arguments. See also the description of 'options' in :func:`single_load` function. :return: Mapping object or any query result might be primitive objects :raises: ValueError, UnknownProcessorTypeError
[ ":", "param", "content", ":", "Configuration", "file", "s", "content", "(", "a", "string", ")", ":", "param", "ac_parser", ":", "Forced", "parser", "type", "or", "ID", "or", "parser", "object", ":", "param", "ac_dict", ":", "callable", "(", "function", "...
python
train
StanfordVL/robosuite
robosuite/wrappers/ik_wrapper.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/wrappers/ik_wrapper.py#L73-L114
def step(self, action): """ Move the end effector(s) according to the input control. Args: action (numpy array): The array should have the corresponding elements. 0-2: The desired change in end effector position in x, y, and z. 3-6: The desired change in orientation, expressed as a (x, y, z, w) quaternion. Note that this quaternion encodes a relative rotation with respect to the current gripper orientation. If the current rotation is r, this corresponds to a quaternion d such that r * d will be the new rotation. *: Controls for gripper actuation. Note: When wrapping around a Baxter environment, the indices 0-6 inidicate the right hand. Indices 7-13 indicate the left hand, and the rest (*) are the gripper inputs (first right, then left). """ input_1 = self._make_input(action[:7], self.env._right_hand_quat) if self.env.mujoco_robot.name == "sawyer": velocities = self.controller.get_control(**input_1) low_action = np.concatenate([velocities, action[7:]]) elif self.env.mujoco_robot.name == "baxter": input_2 = self._make_input(action[7:14], self.env._left_hand_quat) velocities = self.controller.get_control(input_1, input_2) low_action = np.concatenate([velocities, action[14:]]) else: raise Exception( "Only Sawyer and Baxter robot environments are supported for IK " "control currently." ) # keep trying to reach the target in a closed-loop for i in range(self.action_repeat): ret = self.env.step(low_action) velocities = self.controller.get_control() if self.env.mujoco_robot.name == "sawyer": low_action = np.concatenate([velocities, action[7:]]) else: low_action = np.concatenate([velocities, action[14:]]) return ret
[ "def", "step", "(", "self", ",", "action", ")", ":", "input_1", "=", "self", ".", "_make_input", "(", "action", "[", ":", "7", "]", ",", "self", ".", "env", ".", "_right_hand_quat", ")", "if", "self", ".", "env", ".", "mujoco_robot", ".", "name", "...
Move the end effector(s) according to the input control. Args: action (numpy array): The array should have the corresponding elements. 0-2: The desired change in end effector position in x, y, and z. 3-6: The desired change in orientation, expressed as a (x, y, z, w) quaternion. Note that this quaternion encodes a relative rotation with respect to the current gripper orientation. If the current rotation is r, this corresponds to a quaternion d such that r * d will be the new rotation. *: Controls for gripper actuation. Note: When wrapping around a Baxter environment, the indices 0-6 inidicate the right hand. Indices 7-13 indicate the left hand, and the rest (*) are the gripper inputs (first right, then left).
[ "Move", "the", "end", "effector", "(", "s", ")", "according", "to", "the", "input", "control", "." ]
python
train
galaxyproject/pulsar
pulsar/managers/stateful.py
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/stateful.py#L177-L188
def __status(self, job_directory, proxy_status): """ Use proxied manager's status to compute the real (stateful) status of job. """ if proxy_status == status.COMPLETE: if not job_directory.has_metadata(JOB_FILE_POSTPROCESSED): job_status = status.POSTPROCESSING else: job_status = status.COMPLETE else: job_status = proxy_status return job_status
[ "def", "__status", "(", "self", ",", "job_directory", ",", "proxy_status", ")", ":", "if", "proxy_status", "==", "status", ".", "COMPLETE", ":", "if", "not", "job_directory", ".", "has_metadata", "(", "JOB_FILE_POSTPROCESSED", ")", ":", "job_status", "=", "sta...
Use proxied manager's status to compute the real (stateful) status of job.
[ "Use", "proxied", "manager", "s", "status", "to", "compute", "the", "real", "(", "stateful", ")", "status", "of", "job", "." ]
python
train
DistrictDataLabs/yellowbrick
yellowbrick/download.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/download.py#L35-L48
def download_all(data_home=None, replace=False): """ Downloads all the example datasets to the data directory specified by ``get_data_home``. This function ensures that all datasets are available for use with the examples. """ for _, meta in DATASETS.items(): download_data( meta['url'], meta['signature'], data_home=data_home, replace=replace ) print( "Downloaded {} datasets to {}".format(len(DATASETS), get_data_home(data_home)) )
[ "def", "download_all", "(", "data_home", "=", "None", ",", "replace", "=", "False", ")", ":", "for", "_", ",", "meta", "in", "DATASETS", ".", "items", "(", ")", ":", "download_data", "(", "meta", "[", "'url'", "]", ",", "meta", "[", "'signature'", "]...
Downloads all the example datasets to the data directory specified by ``get_data_home``. This function ensures that all datasets are available for use with the examples.
[ "Downloads", "all", "the", "example", "datasets", "to", "the", "data", "directory", "specified", "by", "get_data_home", ".", "This", "function", "ensures", "that", "all", "datasets", "are", "available", "for", "use", "with", "the", "examples", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/sts/forecast.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/forecast.py#L35-L169
def one_step_predictive(model, observed_time_series, parameter_samples): """Compute one-step-ahead predictive distributions for all timesteps. Given samples from the posterior over parameters, return the predictive distribution over observations at each time `T`, given observations up through time `T-1`. Args: model: An instance of `StructuralTimeSeries` representing a time-series model. This represents a joint distribution over time-series and their parameters with batch shape `[b1, ..., bN]`. observed_time_series: `float` `Tensor` of shape `concat([sample_shape, model.batch_shape, [num_timesteps, 1]]) where `sample_shape` corresponds to i.i.d. observations, and the trailing `[1]` dimension may (optionally) be omitted if `num_timesteps > 1`. May optionally be an instance of `tfp.sts.MaskedTimeSeries` including a mask `Tensor` to encode the locations of missing observations. parameter_samples: Python `list` of `Tensors` representing posterior samples of model parameters, with shapes `[concat([[num_posterior_draws], param.prior.batch_shape, param.prior.event_shape]) for param in model.parameters]`. This may optionally also be a map (Python `dict`) of parameter names to `Tensor` values. Returns: forecast_dist: a `tfd.MixtureSameFamily` instance with event shape [num_timesteps] and batch shape `concat([sample_shape, model.batch_shape])`, with `num_posterior_draws` mixture components. The `t`th step represents the forecast distribution `p(observed_time_series[t] | observed_time_series[0:t-1], parameter_samples)`. #### Examples Suppose we've built a model and fit it to data using HMC: ```python day_of_week = tfp.sts.Seasonal( num_seasons=7, observed_time_series=observed_time_series, name='day_of_week') local_linear_trend = tfp.sts.LocalLinearTrend( observed_time_series=observed_time_series, name='local_linear_trend') model = tfp.sts.Sum(components=[day_of_week, local_linear_trend], observed_time_series=observed_time_series) samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series) ``` Passing the posterior samples into `one_step_predictive`, we construct a one-step-ahead predictive distribution: ```python one_step_predictive_dist = tfp.sts.one_step_predictive( model, observed_time_series, parameter_samples=samples) predictive_means = one_step_predictive_dist.mean() predictive_scales = one_step_predictive_dist.stddev() ``` If using variational inference instead of HMC, we'd construct a forecast using samples from the variational posterior: ```python (variational_loss, variational_distributions) = tfp.sts.build_factored_variational_loss( model=model, observed_time_series=observed_time_series) # OMITTED: take steps to optimize variational loss samples = {k: q.sample(30) for (k, q) in variational_distributions.items()} one_step_predictive_dist = tfp.sts.one_step_predictive( model, observed_time_series, parameter_samples=samples) ``` We can visualize the forecast by plotting: ```python from matplotlib import pylab as plt def plot_one_step_predictive(observed_time_series, forecast_mean, forecast_scale): plt.figure(figsize=(12, 6)) num_timesteps = forecast_mean.shape[-1] c1, c2 = (0.12, 0.47, 0.71), (1.0, 0.5, 0.05) plt.plot(observed_time_series, label="observed time series", color=c1) plt.plot(forecast_mean, label="one-step prediction", color=c2) plt.fill_between(np.arange(num_timesteps), forecast_mean - 2 * forecast_scale, forecast_mean + 2 * forecast_scale, alpha=0.1, color=c2) plt.legend() plot_one_step_predictive(observed_time_series, forecast_mean=predictive_means, forecast_scale=predictive_scales) ``` To detect anomalous timesteps, we check whether the observed value at each step is within a 95% predictive interval, i.e., two standard deviations from the mean: ```python z_scores = ((observed_time_series[..., 1:] - predictive_means[..., :-1]) / predictive_scales[..., :-1]) anomalous_timesteps = tf.boolean_mask( tf.range(1, num_timesteps), tf.abs(z_scores) > 2.0) ``` """ with tf.compat.v1.name_scope( 'one_step_predictive', values=[observed_time_series, parameter_samples]): [ observed_time_series, is_missing ] = sts_util.canonicalize_observed_time_series_with_mask( observed_time_series) # Run filtering over the training timesteps to extract the # predictive means and variances. num_timesteps = dist_util.prefer_static_value( tf.shape(input=observed_time_series))[-2] lgssm = model.make_state_space_model( num_timesteps=num_timesteps, param_vals=parameter_samples) (_, _, _, _, _, observation_means, observation_covs ) = lgssm.forward_filter(observed_time_series, mask=is_missing) # Squeeze dims to convert from LGSSM's event shape `[num_timesteps, 1]` # to a scalar time series. return sts_util.mix_over_posterior_draws( means=observation_means[..., 0], variances=observation_covs[..., 0, 0])
[ "def", "one_step_predictive", "(", "model", ",", "observed_time_series", ",", "parameter_samples", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "'one_step_predictive'", ",", "values", "=", "[", "observed_time_series", ",", "parameter_s...
Compute one-step-ahead predictive distributions for all timesteps. Given samples from the posterior over parameters, return the predictive distribution over observations at each time `T`, given observations up through time `T-1`. Args: model: An instance of `StructuralTimeSeries` representing a time-series model. This represents a joint distribution over time-series and their parameters with batch shape `[b1, ..., bN]`. observed_time_series: `float` `Tensor` of shape `concat([sample_shape, model.batch_shape, [num_timesteps, 1]]) where `sample_shape` corresponds to i.i.d. observations, and the trailing `[1]` dimension may (optionally) be omitted if `num_timesteps > 1`. May optionally be an instance of `tfp.sts.MaskedTimeSeries` including a mask `Tensor` to encode the locations of missing observations. parameter_samples: Python `list` of `Tensors` representing posterior samples of model parameters, with shapes `[concat([[num_posterior_draws], param.prior.batch_shape, param.prior.event_shape]) for param in model.parameters]`. This may optionally also be a map (Python `dict`) of parameter names to `Tensor` values. Returns: forecast_dist: a `tfd.MixtureSameFamily` instance with event shape [num_timesteps] and batch shape `concat([sample_shape, model.batch_shape])`, with `num_posterior_draws` mixture components. The `t`th step represents the forecast distribution `p(observed_time_series[t] | observed_time_series[0:t-1], parameter_samples)`. #### Examples Suppose we've built a model and fit it to data using HMC: ```python day_of_week = tfp.sts.Seasonal( num_seasons=7, observed_time_series=observed_time_series, name='day_of_week') local_linear_trend = tfp.sts.LocalLinearTrend( observed_time_series=observed_time_series, name='local_linear_trend') model = tfp.sts.Sum(components=[day_of_week, local_linear_trend], observed_time_series=observed_time_series) samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series) ``` Passing the posterior samples into `one_step_predictive`, we construct a one-step-ahead predictive distribution: ```python one_step_predictive_dist = tfp.sts.one_step_predictive( model, observed_time_series, parameter_samples=samples) predictive_means = one_step_predictive_dist.mean() predictive_scales = one_step_predictive_dist.stddev() ``` If using variational inference instead of HMC, we'd construct a forecast using samples from the variational posterior: ```python (variational_loss, variational_distributions) = tfp.sts.build_factored_variational_loss( model=model, observed_time_series=observed_time_series) # OMITTED: take steps to optimize variational loss samples = {k: q.sample(30) for (k, q) in variational_distributions.items()} one_step_predictive_dist = tfp.sts.one_step_predictive( model, observed_time_series, parameter_samples=samples) ``` We can visualize the forecast by plotting: ```python from matplotlib import pylab as plt def plot_one_step_predictive(observed_time_series, forecast_mean, forecast_scale): plt.figure(figsize=(12, 6)) num_timesteps = forecast_mean.shape[-1] c1, c2 = (0.12, 0.47, 0.71), (1.0, 0.5, 0.05) plt.plot(observed_time_series, label="observed time series", color=c1) plt.plot(forecast_mean, label="one-step prediction", color=c2) plt.fill_between(np.arange(num_timesteps), forecast_mean - 2 * forecast_scale, forecast_mean + 2 * forecast_scale, alpha=0.1, color=c2) plt.legend() plot_one_step_predictive(observed_time_series, forecast_mean=predictive_means, forecast_scale=predictive_scales) ``` To detect anomalous timesteps, we check whether the observed value at each step is within a 95% predictive interval, i.e., two standard deviations from the mean: ```python z_scores = ((observed_time_series[..., 1:] - predictive_means[..., :-1]) / predictive_scales[..., :-1]) anomalous_timesteps = tf.boolean_mask( tf.range(1, num_timesteps), tf.abs(z_scores) > 2.0) ```
[ "Compute", "one", "-", "step", "-", "ahead", "predictive", "distributions", "for", "all", "timesteps", "." ]
python
test
opereto/pyopereto
pyopereto/client.py
https://github.com/opereto/pyopereto/blob/16ca987738a7e1b82b52b0b099794a74ed557223/pyopereto/client.py#L1231-L1250
def get_process_properties(self, pid=None, name=None): ''' get_process_properties(self, pid=None, name=None) Get process properties (both input and output properties) :Parameters: * *pid* (`string`) -- Identifier of an existing process * *name* (`string`) -- optional - Property name ''' pid = self._get_pid(pid) res = self._call_rest_api('get', '/processes/'+pid+'/properties', error='Failed to fetch process properties') if name: try: return res[name] except KeyError as e: raise OperetoClientError(message='Invalid property [%s]'%name, code=404) else: return res
[ "def", "get_process_properties", "(", "self", ",", "pid", "=", "None", ",", "name", "=", "None", ")", ":", "pid", "=", "self", ".", "_get_pid", "(", "pid", ")", "res", "=", "self", ".", "_call_rest_api", "(", "'get'", ",", "'/processes/'", "+", "pid", ...
get_process_properties(self, pid=None, name=None) Get process properties (both input and output properties) :Parameters: * *pid* (`string`) -- Identifier of an existing process * *name* (`string`) -- optional - Property name
[ "get_process_properties", "(", "self", "pid", "=", "None", "name", "=", "None", ")" ]
python
train
hsolbrig/PyShEx
pyshex/prefixlib.py
https://github.com/hsolbrig/PyShEx/blob/9d659cc36e808afd66d4a6d60e8ea21cb12eb744/pyshex/prefixlib.py#L100-L115
def nsname(self, uri: Union[str, URIRef]) -> str: """ Return the 'ns:name' format of URI :param uri: URI to transform :return: nsname format of URI or straight URI if no mapping """ uri = str(uri) nsuri = "" prefix = None for pfx, ns in self: nss = str(ns) if uri.startswith(nss) and len(nss) > len(nsuri): nsuri = nss prefix = pfx return (prefix.lower() + ':' + uri[len(nsuri):]) if prefix is not None else uri
[ "def", "nsname", "(", "self", ",", "uri", ":", "Union", "[", "str", ",", "URIRef", "]", ")", "->", "str", ":", "uri", "=", "str", "(", "uri", ")", "nsuri", "=", "\"\"", "prefix", "=", "None", "for", "pfx", ",", "ns", "in", "self", ":", "nss", ...
Return the 'ns:name' format of URI :param uri: URI to transform :return: nsname format of URI or straight URI if no mapping
[ "Return", "the", "ns", ":", "name", "format", "of", "URI" ]
python
train
obulpathi/cdn-fastly-python
fastly/__init__.py
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L614-L618
def update_request_setting(self, service_id, version_number, name_key, **kwargs): """Updates the specified Request Settings object.""" body = self._formdata(kwargs, FastlyHealthCheck.FIELDS) content = self._fetch("/service/%s/version/%d/request_settings/%s" % (service_id, version_number, name_key), method="PUT", body=body) return FastlyRequestSetting(self, content)
[ "def", "update_request_setting", "(", "self", ",", "service_id", ",", "version_number", ",", "name_key", ",", "*", "*", "kwargs", ")", ":", "body", "=", "self", ".", "_formdata", "(", "kwargs", ",", "FastlyHealthCheck", ".", "FIELDS", ")", "content", "=", ...
Updates the specified Request Settings object.
[ "Updates", "the", "specified", "Request", "Settings", "object", "." ]
python
train
twosigma/marbles
marbles/mixins/marbles/mixins/mixins.py
https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/mixins/marbles/mixins/mixins.py#L788-L813
def assertFileEncodingEqual(self, filename, encoding, msg=None): '''Fail if ``filename`` is not encoded with the given ``encoding`` as determined by the '==' operator. Parameters ---------- filename : str, bytes, file-like encoding : str, bytes msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``filename`` is not a str or bytes object and is not file-like. ''' fencoding = self._get_file_encoding(filename) fname = self._get_file_name(filename) standardMsg = '%s is not %s encoded' % (fname, encoding) self.assertEqual(fencoding.lower(), encoding.lower(), self._formatMessage(msg, standardMsg))
[ "def", "assertFileEncodingEqual", "(", "self", ",", "filename", ",", "encoding", ",", "msg", "=", "None", ")", ":", "fencoding", "=", "self", ".", "_get_file_encoding", "(", "filename", ")", "fname", "=", "self", ".", "_get_file_name", "(", "filename", ")", ...
Fail if ``filename`` is not encoded with the given ``encoding`` as determined by the '==' operator. Parameters ---------- filename : str, bytes, file-like encoding : str, bytes msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``filename`` is not a str or bytes object and is not file-like.
[ "Fail", "if", "filename", "is", "not", "encoded", "with", "the", "given", "encoding", "as", "determined", "by", "the", "==", "operator", "." ]
python
train
bhmm/bhmm
bhmm/estimators/_tmatrix_disconnected.py
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/estimators/_tmatrix_disconnected.py#L46-L56
def closed_sets(C, mincount_connectivity=0): """ Computes the strongly connected closed sets of C """ n = np.shape(C)[0] S = connected_sets(C, mincount_connectivity=mincount_connectivity, strong=True) closed = [] for s in S: mask = np.zeros(n, dtype=bool) mask[s] = True if C[np.ix_(mask, ~mask)].sum() == 0: # closed set, take it closed.append(s) return closed
[ "def", "closed_sets", "(", "C", ",", "mincount_connectivity", "=", "0", ")", ":", "n", "=", "np", ".", "shape", "(", "C", ")", "[", "0", "]", "S", "=", "connected_sets", "(", "C", ",", "mincount_connectivity", "=", "mincount_connectivity", ",", "strong",...
Computes the strongly connected closed sets of C
[ "Computes", "the", "strongly", "connected", "closed", "sets", "of", "C" ]
python
train
saltstack/salt
salt/modules/state.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/state.py#L2032-L2099
def single(fun, name, test=None, queue=False, **kwargs): ''' Execute a single state function with the named kwargs, returns False if insufficient data is sent to the command By default, the values of the kwargs will be parsed as YAML. So, you can specify lists values, or lists of single entry key-value maps, as you would in a YAML salt file. Alternatively, JSON format of keyword values is also supported. CLI Example: .. code-block:: bash salt '*' state.single pkg.installed name=vim ''' conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict comps = fun.split('.') if len(comps) < 2: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return 'Invalid function passed' kwargs.update({'state': comps[0], 'fun': comps[1], '__id__': name, 'name': name}) orig_test = __opts__.get('test', None) opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) opts['test'] = _get_test_value(test, **kwargs) pillar_override = kwargs.get('pillar') pillar_enc = kwargs.get('pillar_enc') if pillar_enc is None \ and pillar_override is not None \ and not isinstance(pillar_override, dict): raise SaltInvocationError( 'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'is specified.' ) try: st_ = salt.state.State(opts, pillar_override, pillar_enc=pillar_enc, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.State(opts, pillar_override, pillar_enc=pillar_enc, initial_pillar=_get_initial_pillar(opts)) err = st_.verify_data(kwargs) if err: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return err st_._mod_init(kwargs) snapper_pre = _snapper_pre(opts, kwargs.get('__pub_jid', 'called localy')) ret = {'{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(kwargs): st_.call(kwargs)} _set_retcode(ret) # Work around Windows multiprocessing bug, set __opts__['test'] back to # value from before this function was run. _snapper_post(opts, kwargs.get('__pub_jid', 'called localy'), snapper_pre) __opts__['test'] = orig_test return ret
[ "def", "single", "(", "fun", ",", "name", ",", "test", "=", "None", ",", "queue", "=", "False", ",", "*", "*", "kwargs", ")", ":", "conflict", "=", "_check_queue", "(", "queue", ",", "kwargs", ")", "if", "conflict", "is", "not", "None", ":", "retur...
Execute a single state function with the named kwargs, returns False if insufficient data is sent to the command By default, the values of the kwargs will be parsed as YAML. So, you can specify lists values, or lists of single entry key-value maps, as you would in a YAML salt file. Alternatively, JSON format of keyword values is also supported. CLI Example: .. code-block:: bash salt '*' state.single pkg.installed name=vim
[ "Execute", "a", "single", "state", "function", "with", "the", "named", "kwargs", "returns", "False", "if", "insufficient", "data", "is", "sent", "to", "the", "command" ]
python
train
twisted/mantissa
xmantissa/people.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/people.py#L1636-L1643
def getAddPerson(self): """ Return an L{AddPersonFragment} which is a child of this fragment and which will add a person to C{self.organizer}. """ fragment = AddPersonFragment(self.organizer) fragment.setFragmentParent(self) return fragment
[ "def", "getAddPerson", "(", "self", ")", ":", "fragment", "=", "AddPersonFragment", "(", "self", ".", "organizer", ")", "fragment", ".", "setFragmentParent", "(", "self", ")", "return", "fragment" ]
Return an L{AddPersonFragment} which is a child of this fragment and which will add a person to C{self.organizer}.
[ "Return", "an", "L", "{", "AddPersonFragment", "}", "which", "is", "a", "child", "of", "this", "fragment", "and", "which", "will", "add", "a", "person", "to", "C", "{", "self", ".", "organizer", "}", "." ]
python
train
bovee/Aston
aston/trace/new_integrator.py
https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/trace/new_integrator.py#L6-L20
def _get_windows(peak_list): """ Given a list of peaks, bin them into windows. """ win_list = [] for t0, t1, hints in peak_list: p_w = (t0, t1) for w in win_list: if p_w[0] <= w[0][1] and p_w[1] >= w[0][0]: w[0] = (min(p_w[0], w[0][0]), max(p_w[1], w[0][1])) w[1].append((t0, t1, hints)) break else: win_list.append([p_w, [(t0, t1, hints)]]) return win_list
[ "def", "_get_windows", "(", "peak_list", ")", ":", "win_list", "=", "[", "]", "for", "t0", ",", "t1", ",", "hints", "in", "peak_list", ":", "p_w", "=", "(", "t0", ",", "t1", ")", "for", "w", "in", "win_list", ":", "if", "p_w", "[", "0", "]", "<...
Given a list of peaks, bin them into windows.
[ "Given", "a", "list", "of", "peaks", "bin", "them", "into", "windows", "." ]
python
train
bububa/pyTOP
pyTOP/packages/requests/packages/urllib3/poolmanager.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/packages/requests/packages/urllib3/poolmanager.py#L120-L123
def urlopen(self, method, url, **kw): "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." kw['assert_same_host'] = False return self.proxy_pool.urlopen(method, url, **kw)
[ "def", "urlopen", "(", "self", ",", "method", ",", "url", ",", "*", "*", "kw", ")", ":", "kw", "[", "'assert_same_host'", "]", "=", "False", "return", "self", ".", "proxy_pool", ".", "urlopen", "(", "method", ",", "url", ",", "*", "*", "kw", ")" ]
Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute.
[ "Same", "as", "HTTP", "(", "S", ")", "ConnectionPool", ".", "urlopen", "url", "must", "be", "absolute", "." ]
python
train
Microsoft/nni
examples/trials/weight_sharing/ga_squad/train_model.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/examples/trials/weight_sharing/ga_squad/train_model.py#L234-L263
def build_char_states(self, char_embed, is_training, reuse, char_ids, char_lengths): """Build char embedding network for the QA model.""" max_char_length = self.cfg.max_char_length inputs = dropout(tf.nn.embedding_lookup(char_embed, char_ids), self.cfg.dropout, is_training) inputs = tf.reshape( inputs, shape=[max_char_length, -1, self.cfg.char_embed_dim]) char_lengths = tf.reshape(char_lengths, shape=[-1]) with tf.variable_scope('char_encoding', reuse=reuse): cell_fw = XGRUCell(hidden_dim=self.cfg.char_embed_dim) cell_bw = XGRUCell(hidden_dim=self.cfg.char_embed_dim) _, (left_right, right_left) = tf.nn.bidirectional_dynamic_rnn( cell_fw=cell_fw, cell_bw=cell_bw, sequence_length=char_lengths, inputs=inputs, time_major=True, dtype=tf.float32 ) left_right = tf.reshape(left_right, shape=[-1, self.cfg.char_embed_dim]) right_left = tf.reshape(right_left, shape=[-1, self.cfg.char_embed_dim]) states = tf.concat([left_right, right_left], axis=1) out_shape = tf.shape(char_ids)[1:3] out_shape = tf.concat([out_shape, tf.constant( value=[self.cfg.char_embed_dim * 2], dtype=tf.int32)], axis=0) return tf.reshape(states, shape=out_shape)
[ "def", "build_char_states", "(", "self", ",", "char_embed", ",", "is_training", ",", "reuse", ",", "char_ids", ",", "char_lengths", ")", ":", "max_char_length", "=", "self", ".", "cfg", ".", "max_char_length", "inputs", "=", "dropout", "(", "tf", ".", "nn", ...
Build char embedding network for the QA model.
[ "Build", "char", "embedding", "network", "for", "the", "QA", "model", "." ]
python
train
angr/claripy
claripy/ast/base.py
https://github.com/angr/claripy/blob/4ed61924880af1ea8fb778047d896ec0156412a6/claripy/ast/base.py#L395-L402
def remove_annotation(self, a): """ Removes an annotation from this AST. :param a: the annotation to remove :returns: a new AST, with the annotation removed """ return self._apply_to_annotations(lambda alist: tuple(oa for oa in alist if oa != a))
[ "def", "remove_annotation", "(", "self", ",", "a", ")", ":", "return", "self", ".", "_apply_to_annotations", "(", "lambda", "alist", ":", "tuple", "(", "oa", "for", "oa", "in", "alist", "if", "oa", "!=", "a", ")", ")" ]
Removes an annotation from this AST. :param a: the annotation to remove :returns: a new AST, with the annotation removed
[ "Removes", "an", "annotation", "from", "this", "AST", "." ]
python
train
tensorflow/cleverhans
cleverhans/confidence_report.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/confidence_report.py#L238-L270
def print_stats(correctness, confidence, name): """ Prints out accuracy, coverage, etc. statistics :param correctness: ndarray One bool per example specifying whether it was correctly classified :param confidence: ndarray The probability associated with each prediction :param name: str The name of this type of data (e.g. "clean", "MaxConfidence") """ accuracy = correctness.mean() wrongness = 1 - correctness denom1 = np.maximum(1, wrongness.sum()) ave_prob_on_mistake = (wrongness * confidence).sum() / denom1 assert ave_prob_on_mistake <= 1., ave_prob_on_mistake denom2 = np.maximum(1, correctness.sum()) ave_prob_on_correct = (correctness * confidence).sum() / denom2 covered = confidence > 0.5 cov_half = covered.mean() acc_half = (correctness * covered).sum() / np.maximum(1, covered.sum()) print('Accuracy on %s examples: %0.4f' % (name, accuracy)) print("Average prob on mistakes: %0.4f" % ave_prob_on_mistake) print("Average prob on correct: %0.4f" % ave_prob_on_correct) print("Accuracy when prob thresholded at .5: %0.4f" % acc_half) print("Coverage when prob thresholded at .5: %0.4f" % cov_half) success_rate = acc_half * cov_half # Success is correctly classifying a covered example print("Success rate at .5: %0.4f" % success_rate) # Failure is misclassifying a covered example failure_rate = (1. - acc_half) * cov_half print("Failure rate at .5: %0.4f" % failure_rate) print()
[ "def", "print_stats", "(", "correctness", ",", "confidence", ",", "name", ")", ":", "accuracy", "=", "correctness", ".", "mean", "(", ")", "wrongness", "=", "1", "-", "correctness", "denom1", "=", "np", ".", "maximum", "(", "1", ",", "wrongness", ".", ...
Prints out accuracy, coverage, etc. statistics :param correctness: ndarray One bool per example specifying whether it was correctly classified :param confidence: ndarray The probability associated with each prediction :param name: str The name of this type of data (e.g. "clean", "MaxConfidence")
[ "Prints", "out", "accuracy", "coverage", "etc", ".", "statistics", ":", "param", "correctness", ":", "ndarray", "One", "bool", "per", "example", "specifying", "whether", "it", "was", "correctly", "classified", ":", "param", "confidence", ":", "ndarray", "The", ...
python
train
MSchnei/pyprf_feature
pyprf_feature/simulation/pRF_functions.py
https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/simulation/pRF_functions.py#L112-L131
def funcConvPar(aryDm, vecHrf, varNumVol): """ Function for convolution of pixel-wise 'design matrix' with HRF model. """ # In order to avoid an artefact at the end of the time series, we have to # concatenate an empty array to both the design matrix and the HRF model # before convolution. aryDm = np.concatenate((aryDm, np.zeros((aryDm.shape[0], 100))), axis=1) vecHrf = np.concatenate((vecHrf, np.zeros((100,)))) aryDmConv = np.empty((aryDm.shape[0], varNumVol)) for idx in range(0, aryDm.shape[0]): vecDm = aryDm[idx, :] # Convolve design matrix with HRF model: aryDmConv[idx, :] = np.convolve(vecDm, vecHrf, mode='full')[:varNumVol] return aryDmConv
[ "def", "funcConvPar", "(", "aryDm", ",", "vecHrf", ",", "varNumVol", ")", ":", "# In order to avoid an artefact at the end of the time series, we have to", "# concatenate an empty array to both the design matrix and the HRF model", "# before convolution.", "aryDm", "=", "np", ".", ...
Function for convolution of pixel-wise 'design matrix' with HRF model.
[ "Function", "for", "convolution", "of", "pixel", "-", "wise", "design", "matrix", "with", "HRF", "model", "." ]
python
train
bukun/TorCMS
torcms/handlers/post_handler.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/post_handler.py#L190-L198
def _gen_uid(self): ''' Generate the ID for post. :return: the new ID. ''' cur_uid = self.kind + tools.get_uu4d() while MPost.get_by_uid(cur_uid): cur_uid = self.kind + tools.get_uu4d() return cur_uid
[ "def", "_gen_uid", "(", "self", ")", ":", "cur_uid", "=", "self", ".", "kind", "+", "tools", ".", "get_uu4d", "(", ")", "while", "MPost", ".", "get_by_uid", "(", "cur_uid", ")", ":", "cur_uid", "=", "self", ".", "kind", "+", "tools", ".", "get_uu4d",...
Generate the ID for post. :return: the new ID.
[ "Generate", "the", "ID", "for", "post", ".", ":", "return", ":", "the", "new", "ID", "." ]
python
train
praekeltfoundation/seed-auth-api
authapi/views.py
https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/views.py#L41-L54
def get_queryset(self): '''We want to still be able to modify archived organizations, but they shouldn't show up on list views. We have an archived query param, where 'true' shows archived, 'false' omits them, and 'both' shows both.''' if self.action == 'list': archived = get_true_false_both( self.request.query_params, 'archived', 'false') if archived == 'true': return self.queryset.filter(archived=True) if archived == 'false': return self.queryset.filter(archived=False) return self.queryset
[ "def", "get_queryset", "(", "self", ")", ":", "if", "self", ".", "action", "==", "'list'", ":", "archived", "=", "get_true_false_both", "(", "self", ".", "request", ".", "query_params", ",", "'archived'", ",", "'false'", ")", "if", "archived", "==", "'true...
We want to still be able to modify archived organizations, but they shouldn't show up on list views. We have an archived query param, where 'true' shows archived, 'false' omits them, and 'both' shows both.
[ "We", "want", "to", "still", "be", "able", "to", "modify", "archived", "organizations", "but", "they", "shouldn", "t", "show", "up", "on", "list", "views", "." ]
python
train
Telefonica/toolium
toolium/utils.py
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/utils.py#L393-L403
def wait_until_element_not_contain_text(self, element, text, timeout=None): """Search element and wait until it does not contain the expected text :param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found :param text: text expected to be contained into the element :param timeout: max time to wait :returns: the web element if it does not contain the given text :rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement :raises TimeoutException: If the element contains the expected text after the timeout """ return self._wait_until(self._expected_condition_find_element_not_containing_text, (element, text), timeout)
[ "def", "wait_until_element_not_contain_text", "(", "self", ",", "element", ",", "text", ",", "timeout", "=", "None", ")", ":", "return", "self", ".", "_wait_until", "(", "self", ".", "_expected_condition_find_element_not_containing_text", ",", "(", "element", ",", ...
Search element and wait until it does not contain the expected text :param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found :param text: text expected to be contained into the element :param timeout: max time to wait :returns: the web element if it does not contain the given text :rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement :raises TimeoutException: If the element contains the expected text after the timeout
[ "Search", "element", "and", "wait", "until", "it", "does", "not", "contain", "the", "expected", "text" ]
python
train
CGATOxford/UMI-tools
umi_tools/dedup.py
https://github.com/CGATOxford/UMI-tools/blob/c4b5d84aac391d59916d294f8f4f8f5378abcfbe/umi_tools/dedup.py#L127-L416
def main(argv=None): """script main. parses command line options in sys.argv, unless *argv* is given. """ if argv is None: argv = sys.argv # setup command line parser parser = U.OptionParser(version="%prog version: $Id$", usage=usage, description=globals()["__doc__"]) group = U.OptionGroup(parser, "dedup-specific options") group.add_option("--output-stats", dest="stats", type="string", default=False, help="Specify location to output stats") parser.add_option_group(group) # add common options (-h/--help, ...) and parse command line (options, args) = U.Start(parser, argv=argv) U.validateSamOptions(options, group=False) if options.random_seed: np.random.seed(options.random_seed) if options.stdin != sys.stdin: in_name = options.stdin.name options.stdin.close() else: raise ValueError("Input on standard in not currently supported") if options.stdout != sys.stdout: if options.no_sort_output: out_name = options.stdout.name else: out_name = U.getTempFilename(dir=options.tmpdir) sorted_out_name = options.stdout.name options.stdout.close() else: if options.no_sort_output: out_name = "-" else: out_name = U.getTempFilename(dir=options.tmpdir) sorted_out_name = "-" if not options.no_sort_output: # need to determine the output format for sort if options.out_sam: sort_format = "sam" else: sort_format = "bam" if options.in_sam: in_mode = "r" else: in_mode = "rb" if options.out_sam: out_mode = "wh" else: out_mode = "wb" if options.stats and options.ignore_umi: raise ValueError("'--output-stats' and '--ignore-umi' options" " cannot be used together") infile = pysam.Samfile(in_name, in_mode) outfile = pysam.Samfile(out_name, out_mode, template=infile) if options.paired: outfile = sam_methods.TwoPassPairWriter(infile, outfile) nInput, nOutput, input_reads, output_reads = 0, 0, 0, 0 if options.detection_method: bam_features = detect_bam_features(infile.filename) if not bam_features[options.detection_method]: if sum(bam_features.values()) == 0: raise ValueError( "There are no bam tags available to detect multimapping. " "Do not set --multimapping-detection-method") else: raise ValueError( "The chosen method of detection for multimapping (%s) " "will not work with this bam. Multimapping can be detected" " for this bam using any of the following: %s" % ( options.detection_method, ",".join( [x for x in bam_features if bam_features[x]]))) gene_tag = options.gene_tag metacontig2contig = None if options.chrom: inreads = infile.fetch(reference=options.chrom) else: if options.per_contig and options.gene_transcript_map: metacontig2contig = sam_methods.getMetaContig2contig( infile, options.gene_transcript_map) metatag = "MC" inreads = sam_methods.metafetcher(infile, metacontig2contig, metatag) gene_tag = metatag else: inreads = infile.fetch() # set up ReadCluster functor with methods specific to # specified options.method processor = network.ReadDeduplicator(options.method) bundle_iterator = sam_methods.get_bundles( options, metacontig_contig=metacontig2contig) if options.stats: # set up arrays to hold stats data stats_pre_df_dict = {"UMI": [], "counts": []} stats_post_df_dict = {"UMI": [], "counts": []} pre_cluster_stats = [] post_cluster_stats = [] pre_cluster_stats_null = [] post_cluster_stats_null = [] topology_counts = collections.Counter() node_counts = collections.Counter() read_gn = umi_methods.random_read_generator( infile.filename, chrom=options.chrom, barcode_getter=bundle_iterator.barcode_getter) for bundle, key, status in bundle_iterator(inreads): nInput += sum([bundle[umi]["count"] for umi in bundle]) while nOutput >= output_reads + 100000: output_reads += 100000 U.info("Written out %i reads" % output_reads) while nInput >= input_reads + 1000000: input_reads += 1000000 U.info("Parsed %i input reads" % input_reads) if options.stats: # generate pre-dudep stats average_distance = umi_methods.get_average_umi_distance(bundle.keys()) pre_cluster_stats.append(average_distance) cluster_size = len(bundle) random_umis = read_gn.getUmis(cluster_size) average_distance_null = umi_methods.get_average_umi_distance(random_umis) pre_cluster_stats_null.append(average_distance_null) if options.ignore_umi: for umi in bundle: nOutput += 1 outfile.write(bundle[umi]["read"]) else: # dedup using umis and write out deduped bam reads, umis, umi_counts = processor( bundle=bundle, threshold=options.threshold) for read in reads: outfile.write(read) nOutput += 1 if options.stats: # collect pre-dudupe stats stats_pre_df_dict['UMI'].extend(bundle) stats_pre_df_dict['counts'].extend( [bundle[UMI]['count'] for UMI in bundle]) # collect post-dudupe stats post_cluster_umis = [bundle_iterator.barcode_getter(x)[0] for x in reads] stats_post_df_dict['UMI'].extend(umis) stats_post_df_dict['counts'].extend(umi_counts) average_distance = umi_methods.get_average_umi_distance(post_cluster_umis) post_cluster_stats.append(average_distance) cluster_size = len(post_cluster_umis) random_umis = read_gn.getUmis(cluster_size) average_distance_null = umi_methods.get_average_umi_distance(random_umis) post_cluster_stats_null.append(average_distance_null) outfile.close() if not options.no_sort_output: # sort the output pysam.sort("-o", sorted_out_name, "-O", sort_format, out_name) os.unlink(out_name) # delete the tempfile if options.stats: # generate the stats dataframe stats_pre_df = pd.DataFrame(stats_pre_df_dict) stats_post_df = pd.DataFrame(stats_post_df_dict) # tally the counts per umi per position pre_counts = collections.Counter(stats_pre_df["counts"]) post_counts = collections.Counter(stats_post_df["counts"]) counts_index = list(set(pre_counts.keys()).union(set(post_counts.keys()))) counts_index.sort() with U.openFile(options.stats + "_per_umi_per_position.tsv", "w") as outf: outf.write("counts\tinstances_pre\tinstances_post\n") for count in counts_index: values = (count, pre_counts[count], post_counts[count]) outf.write("\t".join(map(str, values)) + "\n") # aggregate stats pre/post per UMI agg_pre_df = aggregateStatsDF(stats_pre_df) agg_post_df = aggregateStatsDF(stats_post_df) agg_df = pd.merge(agg_pre_df, agg_post_df, how='left', left_index=True, right_index=True, sort=True, suffixes=["_pre", "_post"]) # TS - if count value not observed either pre/post-dedup, # merge will leave an empty cell and the column will be cast as a float # see http://pandas.pydata.org/pandas-docs/dev/missing_data.html # --> Missing data casting rules and indexing # so, back fill with zeros and convert back to int agg_df = agg_df.fillna(0).astype(int) agg_df.index = [x.decode() for x in agg_df.index] agg_df.index.name = 'UMI' agg_df.to_csv(options.stats + "_per_umi.tsv", sep="\t") # bin distances into integer bins max_ed = int(max(map(max, [pre_cluster_stats, post_cluster_stats, pre_cluster_stats_null, post_cluster_stats_null]))) cluster_bins = range(-1, int(max_ed) + 2) def bin_clusters(cluster_list, bins=cluster_bins): ''' take list of floats and return bins''' return np.digitize(cluster_list, bins, right=True) def tallyCounts(binned_cluster, max_edit_distance): ''' tally counts per bin ''' return np.bincount(binned_cluster, minlength=max_edit_distance + 3) pre_cluster_binned = bin_clusters(pre_cluster_stats) post_cluster_binned = bin_clusters(post_cluster_stats) pre_cluster_null_binned = bin_clusters(pre_cluster_stats_null) post_cluster_null_binned = bin_clusters(post_cluster_stats_null) edit_distance_df = pd.DataFrame( {"unique": tallyCounts(pre_cluster_binned, max_ed), "unique_null": tallyCounts(pre_cluster_null_binned, max_ed), options.method: tallyCounts(post_cluster_binned, max_ed), "%s_null" % options.method: tallyCounts(post_cluster_null_binned, max_ed), "edit_distance": cluster_bins}, columns=["unique", "unique_null", options.method, "%s_null" % options.method, "edit_distance"]) # TS - set lowest bin (-1) to "Single_UMI" edit_distance_df['edit_distance'][0] = "Single_UMI" edit_distance_df.to_csv(options.stats + "_edit_distance.tsv", index=False, sep="\t") # write footer and output benchmark information. U.info( "Reads: %s" % ", ".join(["%s: %s" % (x[0], x[1]) for x in bundle_iterator.read_events.most_common()])) U.info("Number of reads out: %i" % nOutput) if not options.ignore_umi: # otherwise processor has not been used U.info("Total number of positions deduplicated: %i" % processor.UMIClusterer.positions) if processor.UMIClusterer.positions > 0: U.info("Mean number of unique UMIs per position: %.2f" % (float(processor.UMIClusterer.total_umis_per_position) / processor.UMIClusterer.positions)) U.info("Max. number of unique UMIs per position: %i" % processor.UMIClusterer.max_umis_per_position) else: U.warn("The BAM did not contain any valid " "reads/read pairs for deduplication") U.Stop()
[ "def", "main", "(", "argv", "=", "None", ")", ":", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "# setup command line parser", "parser", "=", "U", ".", "OptionParser", "(", "version", "=", "\"%prog version: $Id$\"", ",", "usage", "=", ...
script main. parses command line options in sys.argv, unless *argv* is given.
[ "script", "main", "." ]
python
train
PyCQA/pylint
pylint/message/message_definition.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/message/message_definition.py#L51-L77
def format_help(self, checkerref=False): """return the help string for the given message id""" desc = self.descr if checkerref: desc += " This message belongs to the %s checker." % self.checker.name title = self.msg if self.symbol: msgid = "%s (%s)" % (self.symbol, self.msgid) else: msgid = self.msgid if self.minversion or self.maxversion: restr = [] if self.minversion: restr.append("< %s" % ".".join([str(n) for n in self.minversion])) if self.maxversion: restr.append(">= %s" % ".".join([str(n) for n in self.maxversion])) restr = " or ".join(restr) if checkerref: desc += " It can't be emitted when using Python %s." % restr else: desc += " This message can't be emitted when using Python %s." % restr desc = normalize_text(" ".join(desc.split()), indent=" ") if title != "%s": title = title.splitlines()[0] return ":%s: *%s*\n%s" % (msgid, title.rstrip(" "), desc) return ":%s:\n%s" % (msgid, desc)
[ "def", "format_help", "(", "self", ",", "checkerref", "=", "False", ")", ":", "desc", "=", "self", ".", "descr", "if", "checkerref", ":", "desc", "+=", "\" This message belongs to the %s checker.\"", "%", "self", ".", "checker", ".", "name", "title", "=", "s...
return the help string for the given message id
[ "return", "the", "help", "string", "for", "the", "given", "message", "id" ]
python
test
hydpy-dev/hydpy
hydpy/models/dam/dam_model.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/dam/dam_model.py#L382-L436
def calc_naturalremotedischarge_v1(self): """Try to estimate the natural discharge of a cross section far downstream based on the last few simulation steps. Required control parameter: |NmbLogEntries| Required log sequences: |LoggedTotalRemoteDischarge| |LoggedOutflow| Calculated flux sequence: |NaturalRemoteDischarge| Basic equation: :math:`RemoteDemand = max(\\frac{\\Sigma(LoggedTotalRemoteDischarge - LoggedOutflow)} {NmbLogEntries}), 0)` Examples: Usually, the mean total remote flow should be larger than the mean dam outflows. Then the estimated natural remote discharge is simply the difference of both mean values: >>> from hydpy.models.dam import * >>> parameterstep() >>> nmblogentries(3) >>> logs.loggedtotalremotedischarge(2.5, 2.0, 1.5) >>> logs.loggedoutflow(2.0, 1.0, 0.0) >>> model.calc_naturalremotedischarge_v1() >>> fluxes.naturalremotedischarge naturalremotedischarge(1.0) Due to the wave travel times, the difference between remote discharge and dam outflow mights sometimes be negative. To avoid negative estimates of natural discharge, it its value is set to zero in such cases: >>> logs.loggedoutflow(4.0, 3.0, 5.0) >>> model.calc_naturalremotedischarge_v1() >>> fluxes.naturalremotedischarge naturalremotedischarge(0.0) """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess log = self.sequences.logs.fastaccess flu.naturalremotedischarge = 0. for idx in range(con.nmblogentries): flu.naturalremotedischarge += ( log.loggedtotalremotedischarge[idx] - log.loggedoutflow[idx]) if flu.naturalremotedischarge > 0.: flu.naturalremotedischarge /= con.nmblogentries else: flu.naturalremotedischarge = 0.
[ "def", "calc_naturalremotedischarge_v1", "(", "self", ")", ":", "con", "=", "self", ".", "parameters", ".", "control", ".", "fastaccess", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".", "fastaccess", "log", "=", "self", ".", "sequences", ".", "lo...
Try to estimate the natural discharge of a cross section far downstream based on the last few simulation steps. Required control parameter: |NmbLogEntries| Required log sequences: |LoggedTotalRemoteDischarge| |LoggedOutflow| Calculated flux sequence: |NaturalRemoteDischarge| Basic equation: :math:`RemoteDemand = max(\\frac{\\Sigma(LoggedTotalRemoteDischarge - LoggedOutflow)} {NmbLogEntries}), 0)` Examples: Usually, the mean total remote flow should be larger than the mean dam outflows. Then the estimated natural remote discharge is simply the difference of both mean values: >>> from hydpy.models.dam import * >>> parameterstep() >>> nmblogentries(3) >>> logs.loggedtotalremotedischarge(2.5, 2.0, 1.5) >>> logs.loggedoutflow(2.0, 1.0, 0.0) >>> model.calc_naturalremotedischarge_v1() >>> fluxes.naturalremotedischarge naturalremotedischarge(1.0) Due to the wave travel times, the difference between remote discharge and dam outflow mights sometimes be negative. To avoid negative estimates of natural discharge, it its value is set to zero in such cases: >>> logs.loggedoutflow(4.0, 3.0, 5.0) >>> model.calc_naturalremotedischarge_v1() >>> fluxes.naturalremotedischarge naturalremotedischarge(0.0)
[ "Try", "to", "estimate", "the", "natural", "discharge", "of", "a", "cross", "section", "far", "downstream", "based", "on", "the", "last", "few", "simulation", "steps", "." ]
python
train
google/grumpy
third_party/pypy/_md5.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_md5.py#L350-L367
def copy(self): """Return a clone object. Return a copy ('clone') of the md5 object. This can be used to efficiently compute the digests of strings that share a common initial substring. """ if 0: # set this to 1 to make the flow space crash return copy.deepcopy(self) clone = self.__class__() clone.length = self.length clone.count = [] + self.count[:] clone.input = [] + self.input clone.A = self.A clone.B = self.B clone.C = self.C clone.D = self.D return clone
[ "def", "copy", "(", "self", ")", ":", "if", "0", ":", "# set this to 1 to make the flow space crash", "return", "copy", ".", "deepcopy", "(", "self", ")", "clone", "=", "self", ".", "__class__", "(", ")", "clone", ".", "length", "=", "self", ".", "length",...
Return a clone object. Return a copy ('clone') of the md5 object. This can be used to efficiently compute the digests of strings that share a common initial substring.
[ "Return", "a", "clone", "object", "." ]
python
valid
tensorflow/tensor2tensor
tensor2tensor/trax/rlax/ppo.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/rlax/ppo.py#L159-L275
def collect_trajectories(env, policy_fun, num_trajectories=1, policy="greedy", max_timestep=None, epsilon=0.1): """Collect trajectories with the given policy net and behaviour. Args: env: A gym env interface, for now this is not-batched. policy_fun: observations(B,T+1) -> log-probabs(B,T+1, A) callable. num_trajectories: int, number of trajectories. policy: string, "greedy", "epsilon-greedy", or "categorical-sampling" i.e. how to use the policy_fun to return an action. max_timestep: int or None, the index of the maximum time-step at which we return the trajectory, None for ending a trajectory only when env returns done. epsilon: float, the epsilon for `epsilon-greedy` policy. Returns: trajectory: list of (observation, action, reward) tuples, where each element `i` is a tuple of numpy arrays with shapes as follows: observation[i] = (B, T_i + 1) action[i] = (B, T_i) reward[i] = (B, T_i) """ trajectories = [] for t in range(num_trajectories): t_start = time.time() rewards = [] actions = [] done = False observation = env.reset() # This is currently shaped (1, 1) + OBS, but new observations will keep # getting added to it, making it eventually (1, T+1) + OBS observation_history = observation[np.newaxis, np.newaxis, :] # Run either till we're done OR if max_timestep is defined only till that # timestep. ts = 0 while ((not done) and (not max_timestep or observation_history.shape[1] < max_timestep)): ts_start = time.time() # Run the policy, to pick an action, shape is (1, t, A) because # observation_history is shaped (1, t) + OBS predictions = policy_fun(observation_history) # We need the predictions for the last time-step, so squeeze the batch # dimension and take the last time-step. predictions = np.squeeze(predictions, axis=0)[-1] # Policy can be run in one of the following ways: # - Greedy # - Epsilon-Greedy # - Categorical-Sampling action = None if policy == "greedy": action = np.argmax(predictions) elif policy == "epsilon-greedy": # A schedule for epsilon is 1/k where k is the episode number sampled. if onp.random.random() < epsilon: # Choose an action at random. action = onp.random.randint(0, high=len(predictions)) else: # Return the best action. action = np.argmax(predictions) elif policy == "categorical-sampling": # NOTE: The predictions aren't probabilities but log-probabilities # instead, since they were computed with LogSoftmax. # So just np.exp them to make them probabilities. predictions = np.exp(predictions) action = onp.argwhere(onp.random.multinomial(1, predictions) == 1) else: raise ValueError("Unknown policy: %s" % policy) # NOTE: Assumption, single batch. try: action = int(action) except TypeError as err: # Let's dump some information before we die off. logging.error("Cannot convert action into an integer: [%s]", err) logging.error("action.shape: [%s]", action.shape) logging.error("action: [%s]", action) logging.error("predictions.shape: [%s]", predictions.shape) logging.error("predictions: [%s]", predictions) logging.error("observation_history: [%s]", observation_history) raise err observation, reward, done, _ = env.step(action) # observation is of shape OBS, so add extra dims and concatenate on the # time dimension. observation_history = np.concatenate( [observation_history, observation[np.newaxis, np.newaxis, :]], axis=1) rewards.append(reward) actions.append(action) ts += 1 logging.vlog( 2, " Collected time-step[ %5d] of trajectory[ %5d] in [%0.2f] msec.", ts, t, get_time(ts_start)) logging.vlog( 2, " Collected trajectory[ %5d] in [%0.2f] msec.", t, get_time(t_start)) # This means we are done we're been terminated early. assert done or ( max_timestep and max_timestep >= observation_history.shape[1]) # observation_history is (1, T+1) + OBS, lets squeeze out the batch dim. observation_history = np.squeeze(observation_history, axis=0) trajectories.append( (observation_history, np.stack(actions), np.stack(rewards))) return trajectories
[ "def", "collect_trajectories", "(", "env", ",", "policy_fun", ",", "num_trajectories", "=", "1", ",", "policy", "=", "\"greedy\"", ",", "max_timestep", "=", "None", ",", "epsilon", "=", "0.1", ")", ":", "trajectories", "=", "[", "]", "for", "t", "in", "r...
Collect trajectories with the given policy net and behaviour. Args: env: A gym env interface, for now this is not-batched. policy_fun: observations(B,T+1) -> log-probabs(B,T+1, A) callable. num_trajectories: int, number of trajectories. policy: string, "greedy", "epsilon-greedy", or "categorical-sampling" i.e. how to use the policy_fun to return an action. max_timestep: int or None, the index of the maximum time-step at which we return the trajectory, None for ending a trajectory only when env returns done. epsilon: float, the epsilon for `epsilon-greedy` policy. Returns: trajectory: list of (observation, action, reward) tuples, where each element `i` is a tuple of numpy arrays with shapes as follows: observation[i] = (B, T_i + 1) action[i] = (B, T_i) reward[i] = (B, T_i)
[ "Collect", "trajectories", "with", "the", "given", "policy", "net", "and", "behaviour", "." ]
python
train
tanghaibao/jcvi
jcvi/formats/gff.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L491-L509
def is_valid_codon(codon, type='start'): """ Given a codon sequence, check if it is a valid start/stop codon """ if len(codon) != 3: return False if type == 'start': if codon != 'ATG': return False elif type == 'stop': if not any(_codon == codon for _codon in ('TGA', 'TAG', 'TAA')): return False else: logging.error("`{0}` is not a valid codon type. ".format(type) + \ "Should be one of (`start` or `stop`)") sys.exit() return True
[ "def", "is_valid_codon", "(", "codon", ",", "type", "=", "'start'", ")", ":", "if", "len", "(", "codon", ")", "!=", "3", ":", "return", "False", "if", "type", "==", "'start'", ":", "if", "codon", "!=", "'ATG'", ":", "return", "False", "elif", "type",...
Given a codon sequence, check if it is a valid start/stop codon
[ "Given", "a", "codon", "sequence", "check", "if", "it", "is", "a", "valid", "start", "/", "stop", "codon" ]
python
train
Yelp/detect-secrets
detect_secrets/core/secrets_collection.py
https://github.com/Yelp/detect-secrets/blob/473923ea71f1ac2b5ea1eacc49b98f97967e3d05/detect_secrets/core/secrets_collection.py#L249-L272
def format_for_baseline_output(self): """ :rtype: dict """ results = self.json() for key in results: results[key] = sorted(results[key], key=lambda x: x['line_number']) plugins_used = list(map( lambda x: x.__dict__, self.plugins, )) plugins_used = sorted(plugins_used, key=lambda x: x['name']) return { 'generated_at': strftime("%Y-%m-%dT%H:%M:%SZ", gmtime()), 'exclude': { 'files': self.exclude_files, 'lines': self.exclude_lines, }, 'plugins_used': plugins_used, 'results': results, 'version': self.version, }
[ "def", "format_for_baseline_output", "(", "self", ")", ":", "results", "=", "self", ".", "json", "(", ")", "for", "key", "in", "results", ":", "results", "[", "key", "]", "=", "sorted", "(", "results", "[", "key", "]", ",", "key", "=", "lambda", "x",...
:rtype: dict
[ ":", "rtype", ":", "dict" ]
python
train
annoviko/pyclustering
pyclustering/cluster/bang.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/bang.py#L156-L169
def __draw_blocks(ax, blocks, pair): """! @brief Display BANG-blocks on specified figure. @param[in] ax (Axis): Axis where bang-blocks should be displayed. @param[in] blocks (list): List of blocks that should be displyed. @param[in] pair (tuple): Pair of coordinate index that should be displayed. """ ax.grid(False) density_scale = blocks[-1].get_density() for block in blocks: bang_visualizer.__draw_block(ax, pair, block, density_scale)
[ "def", "__draw_blocks", "(", "ax", ",", "blocks", ",", "pair", ")", ":", "ax", ".", "grid", "(", "False", ")", "density_scale", "=", "blocks", "[", "-", "1", "]", ".", "get_density", "(", ")", "for", "block", "in", "blocks", ":", "bang_visualizer", "...
! @brief Display BANG-blocks on specified figure. @param[in] ax (Axis): Axis where bang-blocks should be displayed. @param[in] blocks (list): List of blocks that should be displyed. @param[in] pair (tuple): Pair of coordinate index that should be displayed.
[ "!" ]
python
valid
tensorflow/lucid
lucid/misc/io/loading.py
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/loading.py#L92-L104
def _load_graphdef_protobuf(handle, **kwargs): """Load GraphDef from a binary proto file.""" # as_graph_def graph_def = tf.GraphDef.FromString(handle.read()) # check if this is a lucid-saved model # metadata = modelzoo.util.extract_metadata(graph_def) # if metadata is not None: # url = handle.name # return modelzoo.vision_base.Model.load_from_metadata(url, metadata) # else return a normal graph_def return graph_def
[ "def", "_load_graphdef_protobuf", "(", "handle", ",", "*", "*", "kwargs", ")", ":", "# as_graph_def", "graph_def", "=", "tf", ".", "GraphDef", ".", "FromString", "(", "handle", ".", "read", "(", ")", ")", "# check if this is a lucid-saved model", "# metadata = mod...
Load GraphDef from a binary proto file.
[ "Load", "GraphDef", "from", "a", "binary", "proto", "file", "." ]
python
train
davenquinn/Attitude
attitude/geom/__init__.py
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/geom/__init__.py#L7-L15
def aligned_covariance(fit, type='noise'): """ Covariance rescaled so that eigenvectors sum to 1 and rotated into data coordinates from PCA space """ cov = fit._covariance_matrix(type) # Rescale eigenvectors to sum to 1 cov /= N.linalg.norm(cov) return dot(fit.axes,cov)
[ "def", "aligned_covariance", "(", "fit", ",", "type", "=", "'noise'", ")", ":", "cov", "=", "fit", ".", "_covariance_matrix", "(", "type", ")", "# Rescale eigenvectors to sum to 1", "cov", "/=", "N", ".", "linalg", ".", "norm", "(", "cov", ")", "return", "...
Covariance rescaled so that eigenvectors sum to 1 and rotated into data coordinates from PCA space
[ "Covariance", "rescaled", "so", "that", "eigenvectors", "sum", "to", "1", "and", "rotated", "into", "data", "coordinates", "from", "PCA", "space" ]
python
train
marcotcr/lime
lime/lime_image.py
https://github.com/marcotcr/lime/blob/08133d47df00ed918e22005e0c98f6eefd5a1d71/lime/lime_image.py#L216-L261
def data_labels(self, image, fudged_image, segments, classifier_fn, num_samples, batch_size=10): """Generates images and predictions in the neighborhood of this image. Args: image: 3d numpy array, the image fudged_image: 3d numpy array, image to replace original image when superpixel is turned off segments: segmentation of the image classifier_fn: function that takes a list of images and returns a matrix of prediction probabilities num_samples: size of the neighborhood to learn the linear model batch_size: classifier_fn will be called on batches of this size. Returns: A tuple (data, labels), where: data: dense num_samples * num_superpixels labels: prediction probabilities matrix """ n_features = np.unique(segments).shape[0] data = self.random_state.randint(0, 2, num_samples * n_features)\ .reshape((num_samples, n_features)) labels = [] data[0, :] = 1 imgs = [] for row in data: temp = copy.deepcopy(image) zeros = np.where(row == 0)[0] mask = np.zeros(segments.shape).astype(bool) for z in zeros: mask[segments == z] = True temp[mask] = fudged_image[mask] imgs.append(temp) if len(imgs) == batch_size: preds = classifier_fn(np.array(imgs)) labels.extend(preds) imgs = [] if len(imgs) > 0: preds = classifier_fn(np.array(imgs)) labels.extend(preds) return data, np.array(labels)
[ "def", "data_labels", "(", "self", ",", "image", ",", "fudged_image", ",", "segments", ",", "classifier_fn", ",", "num_samples", ",", "batch_size", "=", "10", ")", ":", "n_features", "=", "np", ".", "unique", "(", "segments", ")", ".", "shape", "[", "0",...
Generates images and predictions in the neighborhood of this image. Args: image: 3d numpy array, the image fudged_image: 3d numpy array, image to replace original image when superpixel is turned off segments: segmentation of the image classifier_fn: function that takes a list of images and returns a matrix of prediction probabilities num_samples: size of the neighborhood to learn the linear model batch_size: classifier_fn will be called on batches of this size. Returns: A tuple (data, labels), where: data: dense num_samples * num_superpixels labels: prediction probabilities matrix
[ "Generates", "images", "and", "predictions", "in", "the", "neighborhood", "of", "this", "image", "." ]
python
train
habnabit/panglery
panglery/pangler.py
https://github.com/habnabit/panglery/blob/4d62e408c4bfaae126c93a6151ded1e8dc75bcc8/panglery/pangler.py#L128-L139
def bind(self, instance): """Bind an instance to this Pangler. Returns a clone of this Pangler, with the only difference being that the new Pangler is bound to the provided instance. Both will have the same `id`, but new hooks will not be shared. """ p = self.clone() p.instance = weakref.ref(instance) return p
[ "def", "bind", "(", "self", ",", "instance", ")", ":", "p", "=", "self", ".", "clone", "(", ")", "p", ".", "instance", "=", "weakref", ".", "ref", "(", "instance", ")", "return", "p" ]
Bind an instance to this Pangler. Returns a clone of this Pangler, with the only difference being that the new Pangler is bound to the provided instance. Both will have the same `id`, but new hooks will not be shared.
[ "Bind", "an", "instance", "to", "this", "Pangler", "." ]
python
train
JasonKessler/scattertext
scattertext/__init__.py
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/__init__.py#L1239-L1323
def produce_pca_explorer(corpus, category, word2vec_model=None, projection_model=None, embeddings=None, projection=None, term_acceptance_re=re.compile('[a-z]{3,}'), x_dim=0, y_dim=1, scaler=scale, show_axes=False, show_dimensions_on_tooltip=True, **kwargs): """ Parameters ---------- corpus : ParsedCorpus It is highly recommended to use a stoplisted, unigram corpus-- `corpus.get_stoplisted_unigram_corpus()` category : str word2vec_model : Word2Vec A gensim word2vec model. A default model will be used instead. See Word2VecFromParsedCorpus for the default model. projection_model : sklearn-style dimensionality reduction model. Ignored if 'projection' is presents By default: umap.UMAP(min_dist=0.5, metric='cosine') unless projection is present. If so, You could also use, e.g., sklearn.manifold.TSNE(perplexity=10, n_components=2, init='pca', n_iter=2500, random_state=23) embeddings : array[len(corpus.get_terms()), X] Word embeddings. If None (default), and no value is passed into projection, use word2vec_model projection : DataFrame('x': array[len(corpus.get_terms())], 'y': array[len(corpus.get_terms())]) If None (default), produced using projection_model term_acceptance_re : SRE_Pattern, Regular expression to identify valid terms x_dim : int, default 0 Dimension of transformation matrix for x-axis y_dim : int, default 1 Dimension of transformation matrix for y-axis scalers : function , default scattertext.Scalers.scale Function used to scale projection show_axes : bool, default False Show the ticked axes on the plot. If false, show inner axes as a crosshair. show_dimensions_on_tooltip : bool, False by default If true, shows dimension positions on tooltip, along with term name. Otherwise, default to the get_tooltip_content parameter. kwargs : dict Remaining produce_scattertext_explorer keywords get_tooltip_content Returns ------- str HTML of visualization """ if projection is None: embeddings_resolover = EmbeddingsResolver(corpus) if embeddings is not None: embeddings_resolover.set_embeddings(embeddings) else: embeddings_resolover.set_embeddings_model(word2vec_model, term_acceptance_re) corpus, projection = embeddings_resolover.project_embeddings(projection_model, x_dim=x_dim, y_dim=y_dim) else: assert type(projection) == pd.DataFrame assert 'x' in projection and 'y' in projection if kwargs.get('use_non_text_features', False): assert set(projection.index) == set(corpus.get_metadata()) else: assert set(projection.index) == set(corpus.get_terms()) if show_dimensions_on_tooltip: kwargs['get_tooltip_content'] = '''(function(d) { return d.term + "<br/>Dim %s: " + Math.round(d.ox*1000)/1000 + "<br/>Dim %s: " + Math.round(d.oy*1000)/1000 })''' % (x_dim, y_dim) html = produce_scattertext_explorer( corpus=corpus, category=category, minimum_term_frequency=0, sort_by_dist=False, original_x=projection['x'], original_y=projection['y'], x_coords=scaler(projection['x']), y_coords=scaler(projection['y']), y_label='', x_label='', show_axes=show_axes, horizontal_line_y_position=0, vertical_line_x_position=0, **kwargs ) return html
[ "def", "produce_pca_explorer", "(", "corpus", ",", "category", ",", "word2vec_model", "=", "None", ",", "projection_model", "=", "None", ",", "embeddings", "=", "None", ",", "projection", "=", "None", ",", "term_acceptance_re", "=", "re", ".", "compile", "(", ...
Parameters ---------- corpus : ParsedCorpus It is highly recommended to use a stoplisted, unigram corpus-- `corpus.get_stoplisted_unigram_corpus()` category : str word2vec_model : Word2Vec A gensim word2vec model. A default model will be used instead. See Word2VecFromParsedCorpus for the default model. projection_model : sklearn-style dimensionality reduction model. Ignored if 'projection' is presents By default: umap.UMAP(min_dist=0.5, metric='cosine') unless projection is present. If so, You could also use, e.g., sklearn.manifold.TSNE(perplexity=10, n_components=2, init='pca', n_iter=2500, random_state=23) embeddings : array[len(corpus.get_terms()), X] Word embeddings. If None (default), and no value is passed into projection, use word2vec_model projection : DataFrame('x': array[len(corpus.get_terms())], 'y': array[len(corpus.get_terms())]) If None (default), produced using projection_model term_acceptance_re : SRE_Pattern, Regular expression to identify valid terms x_dim : int, default 0 Dimension of transformation matrix for x-axis y_dim : int, default 1 Dimension of transformation matrix for y-axis scalers : function , default scattertext.Scalers.scale Function used to scale projection show_axes : bool, default False Show the ticked axes on the plot. If false, show inner axes as a crosshair. show_dimensions_on_tooltip : bool, False by default If true, shows dimension positions on tooltip, along with term name. Otherwise, default to the get_tooltip_content parameter. kwargs : dict Remaining produce_scattertext_explorer keywords get_tooltip_content Returns ------- str HTML of visualization
[ "Parameters", "----------", "corpus", ":", "ParsedCorpus", "It", "is", "highly", "recommended", "to", "use", "a", "stoplisted", "unigram", "corpus", "--", "corpus", ".", "get_stoplisted_unigram_corpus", "()", "category", ":", "str", "word2vec_model", ":", "Word2Vec"...
python
train
geertj/gruvi
lib/gruvi/poll.py
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/poll.py#L151-L160
def close(self): """Close the poll instance.""" if self._poll is None: return self._poll.close() self._poll = None self._readers = 0 self._writers = 0 self._events = 0 clear_callbacks(self)
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_poll", "is", "None", ":", "return", "self", ".", "_poll", ".", "close", "(", ")", "self", ".", "_poll", "=", "None", "self", ".", "_readers", "=", "0", "self", ".", "_writers", "=", "0",...
Close the poll instance.
[ "Close", "the", "poll", "instance", "." ]
python
train
MonashBI/arcana
arcana/data/collection.py
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/collection.py#L167-L205
def bind(self, study, **kwargs): # @UnusedVariable """ Used for duck typing Collection objects with Spec and Match in source and sink initiation. Checks IDs match sessions in study. """ if self.frequency == 'per_subject': tree_subject_ids = list(study.tree.subject_ids) subject_ids = list(self._collection.keys()) if tree_subject_ids != subject_ids: raise ArcanaUsageError( "Subject IDs in collection provided to '{}' ('{}') " "do not match Study tree ('{}')".format( self.name, "', '".join(subject_ids), "', '".join(tree_subject_ids))) elif self.frequency == 'per_visit': tree_visit_ids = list(study.tree.visit_ids) visit_ids = list(self._collection.keys()) if tree_visit_ids != visit_ids: raise ArcanaUsageError( "Subject IDs in collection provided to '{}' ('{}') " "do not match Study tree ('{}')".format( self.name, "', '".join(visit_ids), "', '".join(tree_visit_ids))) elif self.frequency == 'per_session': for subject in study.tree.subjects: if subject.id not in self._collection: raise ArcanaUsageError( "Study subject ID '{}' was not found in colleciton " "provided to '{}' (found '{}')".format( subject.id, self.name, "', '".join(self._collection.keys()))) for session in subject.sessions: if session.visit_id not in self._collection[subject.id]: raise ArcanaUsageError( "Study visit ID '{}' for subject '{}' was not " "found in colleciton provided to '{}' (found '{}')" .format(subject.id, self.name, "', '".join( self._collection[subject.id].keys())))
[ "def", "bind", "(", "self", ",", "study", ",", "*", "*", "kwargs", ")", ":", "# @UnusedVariable", "if", "self", ".", "frequency", "==", "'per_subject'", ":", "tree_subject_ids", "=", "list", "(", "study", ".", "tree", ".", "subject_ids", ")", "subject_ids"...
Used for duck typing Collection objects with Spec and Match in source and sink initiation. Checks IDs match sessions in study.
[ "Used", "for", "duck", "typing", "Collection", "objects", "with", "Spec", "and", "Match", "in", "source", "and", "sink", "initiation", ".", "Checks", "IDs", "match", "sessions", "in", "study", "." ]
python
train
LuminosoInsight/wordfreq
wordfreq/__init__.py
https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/__init__.py#L35-L84
def read_cBpack(filename): """ Read a file from an idiosyncratic format that we use for storing approximate word frequencies, called "cBpack". The cBpack format is as follows: - The file on disk is a gzipped file in msgpack format, which decodes to a list whose first element is a header, and whose remaining elements are lists of words. - The header is a dictionary with 'format' and 'version' keys that make sure that we're reading the right thing. - Each inner list of words corresponds to a particular word frequency, rounded to the nearest centibel -- that is, one tenth of a decibel, or a factor of 10 ** .01. 0 cB represents a word that occurs with probability 1, so it is the only word in the data (this of course doesn't happen). -200 cB represents a word that occurs once per 100 tokens, -300 cB represents a word that occurs once per 1000 tokens, and so on. - The index of each list within the overall list (without the header) is the negative of its frequency in centibels. - Each inner list is sorted in alphabetical order. As an example, consider a corpus consisting only of the words "red fish blue fish". The word "fish" occurs as 50% of tokens (-30 cB), while "red" and "blue" occur as 25% of tokens (-60 cB). The cBpack file of their word frequencies would decode to this: [ {'format': 'cB', 'version': 1}, [], [], [], ... # 30 empty lists ['fish'], [], [], [], ... # 29 more empty lists ['blue', 'red'] ] """ with gzip.open(filename, 'rb') as infile: data = msgpack.load(infile, raw=False) header = data[0] if ( not isinstance(header, dict) or header.get('format') != 'cB' or header.get('version') != 1 ): raise ValueError("Unexpected header: %r" % header) return data[1:]
[ "def", "read_cBpack", "(", "filename", ")", ":", "with", "gzip", ".", "open", "(", "filename", ",", "'rb'", ")", "as", "infile", ":", "data", "=", "msgpack", ".", "load", "(", "infile", ",", "raw", "=", "False", ")", "header", "=", "data", "[", "0"...
Read a file from an idiosyncratic format that we use for storing approximate word frequencies, called "cBpack". The cBpack format is as follows: - The file on disk is a gzipped file in msgpack format, which decodes to a list whose first element is a header, and whose remaining elements are lists of words. - The header is a dictionary with 'format' and 'version' keys that make sure that we're reading the right thing. - Each inner list of words corresponds to a particular word frequency, rounded to the nearest centibel -- that is, one tenth of a decibel, or a factor of 10 ** .01. 0 cB represents a word that occurs with probability 1, so it is the only word in the data (this of course doesn't happen). -200 cB represents a word that occurs once per 100 tokens, -300 cB represents a word that occurs once per 1000 tokens, and so on. - The index of each list within the overall list (without the header) is the negative of its frequency in centibels. - Each inner list is sorted in alphabetical order. As an example, consider a corpus consisting only of the words "red fish blue fish". The word "fish" occurs as 50% of tokens (-30 cB), while "red" and "blue" occur as 25% of tokens (-60 cB). The cBpack file of their word frequencies would decode to this: [ {'format': 'cB', 'version': 1}, [], [], [], ... # 30 empty lists ['fish'], [], [], [], ... # 29 more empty lists ['blue', 'red'] ]
[ "Read", "a", "file", "from", "an", "idiosyncratic", "format", "that", "we", "use", "for", "storing", "approximate", "word", "frequencies", "called", "cBpack", "." ]
python
train
minhhoit/yacms
yacms/accounts/__init__.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/accounts/__init__.py#L43-L61
def get_profile_for_user(user): """ Returns site-specific profile for this user. Raises ``ProfileNotConfigured`` if ``settings.ACCOUNTS_PROFILE_MODEL`` is not set, and ``ImproperlyConfigured`` if the corresponding model can't be found. """ if not hasattr(user, '_yacms_profile'): # Raises ProfileNotConfigured if not bool(ACCOUNTS_PROFILE_MODEL) profile_model = get_profile_model() profile_manager = profile_model._default_manager.using(user._state.db) user_field = get_profile_user_fieldname(profile_model, user.__class__) profile, created = profile_manager.get_or_create(**{user_field: user}) profile.user = user user._yacms_profile = profile return user._yacms_profile
[ "def", "get_profile_for_user", "(", "user", ")", ":", "if", "not", "hasattr", "(", "user", ",", "'_yacms_profile'", ")", ":", "# Raises ProfileNotConfigured if not bool(ACCOUNTS_PROFILE_MODEL)", "profile_model", "=", "get_profile_model", "(", ")", "profile_manager", "=", ...
Returns site-specific profile for this user. Raises ``ProfileNotConfigured`` if ``settings.ACCOUNTS_PROFILE_MODEL`` is not set, and ``ImproperlyConfigured`` if the corresponding model can't be found.
[ "Returns", "site", "-", "specific", "profile", "for", "this", "user", ".", "Raises", "ProfileNotConfigured", "if", "settings", ".", "ACCOUNTS_PROFILE_MODEL", "is", "not", "set", "and", "ImproperlyConfigured", "if", "the", "corresponding", "model", "can", "t", "be"...
python
train
twidi/py-dataql
dataql/parsers/base.py
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/parsers/base.py#L343-L369
def visit_oper(self, node, _): """Return an operator as a string. Currently only "=" and ":" (both synonyms) Arguments --------- node : parsimonious.nodes.Node. _ (children) : list, unused Result ------ str The operator as a string. Example ------- >>> BaseParser('=', default_rule='OPER').data '=' >>> BaseParser(':', default_rule='OPER').data '=' """ oper = node.text if oper == ':': oper = '=' return oper
[ "def", "visit_oper", "(", "self", ",", "node", ",", "_", ")", ":", "oper", "=", "node", ".", "text", "if", "oper", "==", "':'", ":", "oper", "=", "'='", "return", "oper" ]
Return an operator as a string. Currently only "=" and ":" (both synonyms) Arguments --------- node : parsimonious.nodes.Node. _ (children) : list, unused Result ------ str The operator as a string. Example ------- >>> BaseParser('=', default_rule='OPER').data '=' >>> BaseParser(':', default_rule='OPER').data '='
[ "Return", "an", "operator", "as", "a", "string", ".", "Currently", "only", "=", "and", ":", "(", "both", "synonyms", ")" ]
python
train
saltstack/salt
salt/ext/ipaddress.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/ext/ipaddress.py#L1377-L1398
def is_private(self): """Test if this address is allocated for private networks. Returns: A boolean, True if the address is reserved per iana-ipv4-special-registry. """ return (self in IPv4Network('0.0.0.0/8') or self in IPv4Network('10.0.0.0/8') or self in IPv4Network('127.0.0.0/8') or self in IPv4Network('169.254.0.0/16') or self in IPv4Network('172.16.0.0/12') or self in IPv4Network('192.0.0.0/29') or self in IPv4Network('192.0.0.170/31') or self in IPv4Network('192.0.2.0/24') or self in IPv4Network('192.168.0.0/16') or self in IPv4Network('198.18.0.0/15') or self in IPv4Network('198.51.100.0/24') or self in IPv4Network('203.0.113.0/24') or self in IPv4Network('240.0.0.0/4') or self in IPv4Network('255.255.255.255/32'))
[ "def", "is_private", "(", "self", ")", ":", "return", "(", "self", "in", "IPv4Network", "(", "'0.0.0.0/8'", ")", "or", "self", "in", "IPv4Network", "(", "'10.0.0.0/8'", ")", "or", "self", "in", "IPv4Network", "(", "'127.0.0.0/8'", ")", "or", "self", "in", ...
Test if this address is allocated for private networks. Returns: A boolean, True if the address is reserved per iana-ipv4-special-registry.
[ "Test", "if", "this", "address", "is", "allocated", "for", "private", "networks", "." ]
python
train
cdeboever3/cdpybio
cdpybio/analysis.py
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/analysis.py#L211-L299
def ld_prune(df, ld_beds, snvs=None): """ Prune set of GWAS based on LD and significance. A graph of all SNVs is constructed with edges for LD >= 0.8 and the most significant SNV per connected component is kept. Parameters ---------- df : pandas.DataFrame Pandas dataframe with unique SNVs. The index is of the form chrom:pos where pos is the one-based position of the SNV. The columns must include chrom, start, end, and pvalue. chrom, start, end make a zero-based bed file with the SNV coordinates. ld_beds : dict Dict whose keys are chromosomes and whose values are filenames of tabixed LD bed files. An LD bed file looks like "chr1 11007 11008 11008:11012:1" where the first three columns are the zero-based half-open coordinate of the SNV and the fourth column has the one-based coordinate followed of the SNV followed by the one-based coordinate of a different SNV and the LD between them. In this example, the variants are in perfect LD. The bed file should also contain the reciprocal line for this LD relationship: "chr1 11011 11012 11012:11008:1". snvs : list List of SNVs to filter against. If a SNV is not in this list, it will not be included. If you are working with GWAS SNPs, this is useful for filtering out SNVs that aren't in the SNPsnap database for instance. Returns ------- out : pandas.DataFrame Pandas dataframe in the same format as the input dataframe but with only independent SNVs. """ import networkx as nx import tabix if snvs: df = df.ix[set(df.index) & set(snvs)] keep = set() for chrom in ld_beds.keys(): tdf = df[df['chrom'].astype(str) == chrom] if tdf.shape[0] > 0: f = tabix.open(ld_beds[chrom]) # Make a dict where each key is a SNP and the values are all of the # other SNPs in LD with the key. ld_d = {} for j in tdf.index: p = tdf.ix[j, 'end'] ld_d[p] = [] try: r = f.query(chrom, p - 1, p) while True: try: n = r.next() p1, p2, r2 = n[-1].split(':') if float(r2) >= 0.8: ld_d[p].append(int(p2)) except StopIteration: break except TabixError: continue # Make adjacency matrix for LD. cols = sorted(list(set( [item for sublist in ld_d.values() for item in sublist]))) t = pd.DataFrame(0, index=ld_d.keys(), columns=cols) for k in ld_d.keys(): t.ix[k, ld_d[k]] = 1 t.index = ['{}:{}'.format(chrom, x) for x in t.index] t.columns = ['{}:{}'.format(chrom, x) for x in t.columns] # Keep all SNPs not in LD with any others. These will be in the index # but not in the columns. keep |= set(t.index) - set(t.columns) # Filter so we only have SNPs that are in LD with at least one other # SNP. ind = list(set(t.columns) & set(t.index)) # Keep one most sig. SNP per connected subgraph. t = t.ix[ind, ind] g = nx.Graph(t.values) c = nx.connected_components(g) while True: try: sg = c.next() s = tdf.ix[t.index[list(sg)]] keep.add(s[s.pvalue == s.pvalue.min()].index[0]) except StopIteration: break out = df.ix[keep] return out
[ "def", "ld_prune", "(", "df", ",", "ld_beds", ",", "snvs", "=", "None", ")", ":", "import", "networkx", "as", "nx", "import", "tabix", "if", "snvs", ":", "df", "=", "df", ".", "ix", "[", "set", "(", "df", ".", "index", ")", "&", "set", "(", "sn...
Prune set of GWAS based on LD and significance. A graph of all SNVs is constructed with edges for LD >= 0.8 and the most significant SNV per connected component is kept. Parameters ---------- df : pandas.DataFrame Pandas dataframe with unique SNVs. The index is of the form chrom:pos where pos is the one-based position of the SNV. The columns must include chrom, start, end, and pvalue. chrom, start, end make a zero-based bed file with the SNV coordinates. ld_beds : dict Dict whose keys are chromosomes and whose values are filenames of tabixed LD bed files. An LD bed file looks like "chr1 11007 11008 11008:11012:1" where the first three columns are the zero-based half-open coordinate of the SNV and the fourth column has the one-based coordinate followed of the SNV followed by the one-based coordinate of a different SNV and the LD between them. In this example, the variants are in perfect LD. The bed file should also contain the reciprocal line for this LD relationship: "chr1 11011 11012 11012:11008:1". snvs : list List of SNVs to filter against. If a SNV is not in this list, it will not be included. If you are working with GWAS SNPs, this is useful for filtering out SNVs that aren't in the SNPsnap database for instance. Returns ------- out : pandas.DataFrame Pandas dataframe in the same format as the input dataframe but with only independent SNVs.
[ "Prune", "set", "of", "GWAS", "based", "on", "LD", "and", "significance", ".", "A", "graph", "of", "all", "SNVs", "is", "constructed", "with", "edges", "for", "LD", ">", "=", "0", ".", "8", "and", "the", "most", "significant", "SNV", "per", "connected"...
python
train
nickpandolfi/Cyther
cyther/pathway.py
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/pathway.py#L233-L247
def has_suffix(path_name, suffix): """ Determines if path_name has a suffix of at least 'suffix' """ if isinstance(suffix, str): suffix = disintegrate(suffix) components = disintegrate(path_name) for i in range(-1, -(len(suffix) + 1), -1): if components[i] != suffix[i]: break else: return True return False
[ "def", "has_suffix", "(", "path_name", ",", "suffix", ")", ":", "if", "isinstance", "(", "suffix", ",", "str", ")", ":", "suffix", "=", "disintegrate", "(", "suffix", ")", "components", "=", "disintegrate", "(", "path_name", ")", "for", "i", "in", "range...
Determines if path_name has a suffix of at least 'suffix'
[ "Determines", "if", "path_name", "has", "a", "suffix", "of", "at", "least", "suffix" ]
python
train
chrisrink10/basilisp
src/basilisp/lang/compiler/generator.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/compiler/generator.py#L1265-L1314
def _if_to_py_ast(ctx: GeneratorContext, node: If) -> GeneratedPyAST: """Generate an intermediate if statement which assigns to a temporary variable, which is returned as the expression value at the end of evaluation. Every expression in Basilisp is true if it is not the literal values nil or false. This function compiles direct checks for the test value against the Python values None and False to accommodate this behavior. Note that the if and else bodies are switched in compilation so that we can perform a short-circuit or comparison, rather than exhaustively checking for both false and nil each time.""" assert node.op == NodeOp.IF test_ast = gen_py_ast(ctx, node.test) result_name = genname(_IF_RESULT_PREFIX) then_ast = __if_body_to_py_ast(ctx, node.then, result_name) else_ast = __if_body_to_py_ast(ctx, node.else_, result_name) test_name = genname(_IF_TEST_PREFIX) test_assign = ast.Assign( targets=[ast.Name(id=test_name, ctx=ast.Store())], value=test_ast.node ) ifstmt = ast.If( test=ast.BoolOp( op=ast.Or(), values=[ ast.Compare( left=ast.NameConstant(None), ops=[ast.Is()], comparators=[ast.Name(id=test_name, ctx=ast.Load())], ), ast.Compare( left=ast.NameConstant(False), ops=[ast.Is()], comparators=[ast.Name(id=test_name, ctx=ast.Load())], ), ], ), values=[], body=list(map(statementize, chain(else_ast.dependencies, [else_ast.node]))), orelse=list(map(statementize, chain(then_ast.dependencies, [then_ast.node]))), ) return GeneratedPyAST( node=ast.Name(id=result_name, ctx=ast.Load()), dependencies=list(chain(test_ast.dependencies, [test_assign, ifstmt])), )
[ "def", "_if_to_py_ast", "(", "ctx", ":", "GeneratorContext", ",", "node", ":", "If", ")", "->", "GeneratedPyAST", ":", "assert", "node", ".", "op", "==", "NodeOp", ".", "IF", "test_ast", "=", "gen_py_ast", "(", "ctx", ",", "node", ".", "test", ")", "re...
Generate an intermediate if statement which assigns to a temporary variable, which is returned as the expression value at the end of evaluation. Every expression in Basilisp is true if it is not the literal values nil or false. This function compiles direct checks for the test value against the Python values None and False to accommodate this behavior. Note that the if and else bodies are switched in compilation so that we can perform a short-circuit or comparison, rather than exhaustively checking for both false and nil each time.
[ "Generate", "an", "intermediate", "if", "statement", "which", "assigns", "to", "a", "temporary", "variable", "which", "is", "returned", "as", "the", "expression", "value", "at", "the", "end", "of", "evaluation", "." ]
python
test
spotify/luigi
luigi/contrib/redshift.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/redshift.py#L293-L357
def create_table(self, connection): """ Override to provide code for creating the target table. By default it will be created using types (optionally) specified in columns. If overridden, use the provided connection object for setting up the table in order to create the table and insert data using the same transaction. """ if len(self.columns[0]) == 1: # only names of columns specified, no types raise NotImplementedError("create_table() not implemented " "for %r and columns types not " "specified" % self.table) elif len(self.columns[0]) == 2: # if columns is specified as (name, type) tuples coldefs = ','.join( '{name} {type}'.format( name=name, type=type) for name, type in self.columns ) table_constraints = '' if self.table_constraints != '': table_constraints = ', ' + self.table_constraints query = ("CREATE {type} TABLE " "{table} ({coldefs} {table_constraints}) " "{table_attributes}").format( type=self.table_type, table=self.table, coldefs=coldefs, table_constraints=table_constraints, table_attributes=self.table_attributes) connection.cursor().execute(query) elif len(self.columns[0]) == 3: # if columns is specified as (name, type, encoding) tuples # possible column encodings: https://docs.aws.amazon.com/redshift/latest/dg/c_Compression_encodings.html coldefs = ','.join( '{name} {type} ENCODE {encoding}'.format( name=name, type=type, encoding=encoding) for name, type, encoding in self.columns ) table_constraints = '' if self.table_constraints != '': table_constraints = ',' + self.table_constraints query = ("CREATE {type} TABLE " "{table} ({coldefs} {table_constraints}) " "{table_attributes}").format( type=self.table_type, table=self.table, coldefs=coldefs, table_constraints=table_constraints, table_attributes=self.table_attributes) connection.cursor().execute(query) else: raise ValueError("create_table() found no columns for %r" % self.table)
[ "def", "create_table", "(", "self", ",", "connection", ")", ":", "if", "len", "(", "self", ".", "columns", "[", "0", "]", ")", "==", "1", ":", "# only names of columns specified, no types", "raise", "NotImplementedError", "(", "\"create_table() not implemented \"", ...
Override to provide code for creating the target table. By default it will be created using types (optionally) specified in columns. If overridden, use the provided connection object for setting up the table in order to create the table and insert data using the same transaction.
[ "Override", "to", "provide", "code", "for", "creating", "the", "target", "table", "." ]
python
train
msfrank/cifparser
cifparser/parser.py
https://github.com/msfrank/cifparser/blob/ecd899ba2e7b990e2cec62b115742d830e7e4384/cifparser/parser.py#L414-L421
def print_ast(f): """ :param f: :type f: file :return: """ for linenum,indent,value in iter_lines(f): print("{0}{1}|{2}".format(str(linenum).rjust(3), ' ' * indent, value))
[ "def", "print_ast", "(", "f", ")", ":", "for", "linenum", ",", "indent", ",", "value", "in", "iter_lines", "(", "f", ")", ":", "print", "(", "\"{0}{1}|{2}\"", ".", "format", "(", "str", "(", "linenum", ")", ".", "rjust", "(", "3", ")", ",", "' '", ...
:param f: :type f: file :return:
[ ":", "param", "f", ":", ":", "type", "f", ":", "file", ":", "return", ":" ]
python
train
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/dictandlist.py
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/dictandlist.py#L268-L280
def integer_key_convert(dictin, dropfailedkeys=False): # type: (DictUpperBound, bool) -> Dict """Convert keys of dictionary to integers Args: dictin (DictUpperBound): Input dictionary dropfailedkeys (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with keys converted to integers """ return key_value_convert(dictin, keyfn=int, dropfailedkeys=dropfailedkeys)
[ "def", "integer_key_convert", "(", "dictin", ",", "dropfailedkeys", "=", "False", ")", ":", "# type: (DictUpperBound, bool) -> Dict", "return", "key_value_convert", "(", "dictin", ",", "keyfn", "=", "int", ",", "dropfailedkeys", "=", "dropfailedkeys", ")" ]
Convert keys of dictionary to integers Args: dictin (DictUpperBound): Input dictionary dropfailedkeys (bool): Whether to drop dictionary entries where key conversion fails. Defaults to False. Returns: Dict: Dictionary with keys converted to integers
[ "Convert", "keys", "of", "dictionary", "to", "integers" ]
python
train
sporsh/carnifex
carnifex/sshprocess.py
https://github.com/sporsh/carnifex/blob/82dd3bd2bc134dfb69a78f43171e227f2127060b/carnifex/sshprocess.py#L64-L88
def execute(self, processProtocol, command, env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None): """Execute a process on the remote machine using SSH @param processProtocol: the ProcessProtocol instance to connect @param executable: the executable program to run @param args: the arguments to pass to the process @param env: environment variables to request the remote ssh server to set @param path: the remote path to start the remote process on @param uid: user id or username to connect to the ssh server with @param gid: this is not used for remote ssh processes @param usePTY: wither to request a pty for the process @param childFDs: file descriptors to use for stdin, stdout and stderr """ sshCommand = (command if isinstance(command, SSHCommand) else SSHCommand(command, self.precursor, path)) commandLine = sshCommand.getCommandLine() # Get connection to ssh server connectionDeferred = self.getConnection(uid) # spawn the remote process connectionDeferred.addCallback(connectProcess, processProtocol, commandLine, env, usePTY, childFDs) return connectionDeferred
[ "def", "execute", "(", "self", ",", "processProtocol", ",", "command", ",", "env", "=", "{", "}", ",", "path", "=", "None", ",", "uid", "=", "None", ",", "gid", "=", "None", ",", "usePTY", "=", "0", ",", "childFDs", "=", "None", ")", ":", "sshCom...
Execute a process on the remote machine using SSH @param processProtocol: the ProcessProtocol instance to connect @param executable: the executable program to run @param args: the arguments to pass to the process @param env: environment variables to request the remote ssh server to set @param path: the remote path to start the remote process on @param uid: user id or username to connect to the ssh server with @param gid: this is not used for remote ssh processes @param usePTY: wither to request a pty for the process @param childFDs: file descriptors to use for stdin, stdout and stderr
[ "Execute", "a", "process", "on", "the", "remote", "machine", "using", "SSH" ]
python
train
VasilyStepanov/pywidl
pywidl/grammar.py
https://github.com/VasilyStepanov/pywidl/blob/8d84b2e53157bfe276bf16301c19e8b6b32e861e/pywidl/grammar.py#L988-L992
def p_ExtendedAttributeIdent(p): """ExtendedAttributeIdent : IDENTIFIER "=" IDENTIFIER""" p[0] = model.ExtendedAttribute( name=p[1], value=model.ExtendedAttributeValue(name=p[3]))
[ "def", "p_ExtendedAttributeIdent", "(", "p", ")", ":", "p", "[", "0", "]", "=", "model", ".", "ExtendedAttribute", "(", "name", "=", "p", "[", "1", "]", ",", "value", "=", "model", ".", "ExtendedAttributeValue", "(", "name", "=", "p", "[", "3", "]", ...
ExtendedAttributeIdent : IDENTIFIER "=" IDENTIFIER
[ "ExtendedAttributeIdent", ":", "IDENTIFIER", "=", "IDENTIFIER" ]
python
train
jeffknupp/sandman
sandman/sandman.py
https://github.com/jeffknupp/sandman/blob/253ea4d15cbccd9f0016d66fedd7478614cc0b2f/sandman/sandman.py#L203-L219
def get_resource_data(incoming_request): """Return the data from the incoming *request* based on the Content-type.""" content_type = incoming_request.headers['Content-type'].split(';')[0] if ('Content-type' not in incoming_request.headers or content_type in JSON_CONTENT_TYPES): return incoming_request.json elif content_type in HTML_CONTENT_TYPES: if not incoming_request.form: raise InvalidAPIUsage(400) return incoming_request.form else: # HTTP 415: Unsupported Media Type raise InvalidAPIUsage( 415, UNSUPPORTED_CONTENT_TYPE_MESSAGE.format( types=incoming_request.headers['Content-type']))
[ "def", "get_resource_data", "(", "incoming_request", ")", ":", "content_type", "=", "incoming_request", ".", "headers", "[", "'Content-type'", "]", ".", "split", "(", "';'", ")", "[", "0", "]", "if", "(", "'Content-type'", "not", "in", "incoming_request", ".",...
Return the data from the incoming *request* based on the Content-type.
[ "Return", "the", "data", "from", "the", "incoming", "*", "request", "*", "based", "on", "the", "Content", "-", "type", "." ]
python
train
saltstack/salt
salt/modules/firewalld.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/firewalld.py#L855-L872
def get_interfaces(zone, permanent=True): ''' List interfaces bound to a zone .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' firewalld.get_interfaces zone ''' cmd = '--zone={0} --list-interfaces'.format(zone) if permanent: cmd += ' --permanent' return __firewall_cmd(cmd).split()
[ "def", "get_interfaces", "(", "zone", ",", "permanent", "=", "True", ")", ":", "cmd", "=", "'--zone={0} --list-interfaces'", ".", "format", "(", "zone", ")", "if", "permanent", ":", "cmd", "+=", "' --permanent'", "return", "__firewall_cmd", "(", "cmd", ")", ...
List interfaces bound to a zone .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' firewalld.get_interfaces zone
[ "List", "interfaces", "bound", "to", "a", "zone" ]
python
train
epfl-lts2/pygsp
pygsp/filters/approximations.py
https://github.com/epfl-lts2/pygsp/blob/8ce5bde39206129287375af24fdbcd7edddca8c5/pygsp/filters/approximations.py#L58-L114
def cheby_op(G, c, signal, **kwargs): r""" Chebyshev polynomial of graph Laplacian applied to vector. Parameters ---------- G : Graph c : ndarray or list of ndarrays Chebyshev coefficients for a Filter or a Filterbank signal : ndarray Signal to filter Returns ------- r : ndarray Result of the filtering """ # Handle if we do not have a list of filters but only a simple filter in cheby_coeff. if not isinstance(c, np.ndarray): c = np.array(c) c = np.atleast_2d(c) Nscales, M = c.shape if M < 2: raise TypeError("The coefficients have an invalid shape") # thanks to that, we can also have 1d signal. try: Nv = np.shape(signal)[1] r = np.zeros((G.N * Nscales, Nv)) except IndexError: r = np.zeros((G.N * Nscales)) a_arange = [0, G.lmax] a1 = float(a_arange[1] - a_arange[0]) / 2. a2 = float(a_arange[1] + a_arange[0]) / 2. twf_old = signal twf_cur = (G.L.dot(signal) - a2 * signal) / a1 tmpN = np.arange(G.N, dtype=int) for i in range(Nscales): r[tmpN + G.N*i] = 0.5 * c[i, 0] * twf_old + c[i, 1] * twf_cur factor = 2/a1 * (G.L - a2 * sparse.eye(G.N)) for k in range(2, M): twf_new = factor.dot(twf_cur) - twf_old for i in range(Nscales): r[tmpN + G.N*i] += c[i, k] * twf_new twf_old = twf_cur twf_cur = twf_new return r
[ "def", "cheby_op", "(", "G", ",", "c", ",", "signal", ",", "*", "*", "kwargs", ")", ":", "# Handle if we do not have a list of filters but only a simple filter in cheby_coeff.", "if", "not", "isinstance", "(", "c", ",", "np", ".", "ndarray", ")", ":", "c", "=", ...
r""" Chebyshev polynomial of graph Laplacian applied to vector. Parameters ---------- G : Graph c : ndarray or list of ndarrays Chebyshev coefficients for a Filter or a Filterbank signal : ndarray Signal to filter Returns ------- r : ndarray Result of the filtering
[ "r", "Chebyshev", "polynomial", "of", "graph", "Laplacian", "applied", "to", "vector", "." ]
python
train
brbsix/pip-utils
pip_utils/outdated.py
https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/outdated.py#L218-L245
def run_outdated(cls, options): """Print outdated user packages.""" latest_versions = sorted( cls.find_packages_latest_versions(cls.options), key=lambda p: p[0].project_name.lower()) for dist, latest_version, typ in latest_versions: if latest_version > dist.parsed_version: if options.all: pass elif options.pinned: if cls.can_be_updated(dist, latest_version): continue elif not options.pinned: if not cls.can_be_updated(dist, latest_version): continue elif options.update: print(dist.project_name if options.brief else 'Updating %s to Latest: %s [%s]' % (cls.output_package(dist), latest_version, typ)) main(['install', '--upgrade'] + ([ '--user' ] if ENABLE_USER_SITE else []) + [dist.key]) continue print(dist.project_name if options.brief else '%s - Latest: %s [%s]' % (cls.output_package(dist), latest_version, typ))
[ "def", "run_outdated", "(", "cls", ",", "options", ")", ":", "latest_versions", "=", "sorted", "(", "cls", ".", "find_packages_latest_versions", "(", "cls", ".", "options", ")", ",", "key", "=", "lambda", "p", ":", "p", "[", "0", "]", ".", "project_name"...
Print outdated user packages.
[ "Print", "outdated", "user", "packages", "." ]
python
train
fronzbot/blinkpy
blinkpy/blinkpy.py
https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/blinkpy.py#L197-L214
def get_cameras(self): """Retrieve a camera list for each onboarded network.""" response = api.request_homescreen(self) try: all_cameras = {} for camera in response['cameras']: camera_network = str(camera['network_id']) camera_name = camera['name'] camera_id = camera['id'] camera_info = {'name': camera_name, 'id': camera_id} if camera_network not in all_cameras: all_cameras[camera_network] = [] all_cameras[camera_network].append(camera_info) return all_cameras except KeyError: _LOGGER.error("Initialization failue. Could not retrieve cameras.") return {}
[ "def", "get_cameras", "(", "self", ")", ":", "response", "=", "api", ".", "request_homescreen", "(", "self", ")", "try", ":", "all_cameras", "=", "{", "}", "for", "camera", "in", "response", "[", "'cameras'", "]", ":", "camera_network", "=", "str", "(", ...
Retrieve a camera list for each onboarded network.
[ "Retrieve", "a", "camera", "list", "for", "each", "onboarded", "network", "." ]
python
train
pypa/pipenv
pipenv/vendor/requirementslib/models/dependencies.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requirementslib/models/dependencies.py#L57-L77
def find_all_matches(finder, ireq, pre=False): # type: (PackageFinder, InstallRequirement, bool) -> List[InstallationCandidate] """Find all matching dependencies using the supplied finder and the given ireq. :param finder: A package finder for discovering matching candidates. :type finder: :class:`~pip._internal.index.PackageFinder` :param ireq: An install requirement. :type ireq: :class:`~pip._internal.req.req_install.InstallRequirement` :return: A list of matching candidates. :rtype: list[:class:`~pip._internal.index.InstallationCandidate`] """ candidates = clean_requires_python(finder.find_all_candidates(ireq.name)) versions = {candidate.version for candidate in candidates} allowed_versions = _get_filtered_versions(ireq, versions, pre) if not pre and not allowed_versions: allowed_versions = _get_filtered_versions(ireq, versions, True) candidates = {c for c in candidates if c.version in allowed_versions} return candidates
[ "def", "find_all_matches", "(", "finder", ",", "ireq", ",", "pre", "=", "False", ")", ":", "# type: (PackageFinder, InstallRequirement, bool) -> List[InstallationCandidate]", "candidates", "=", "clean_requires_python", "(", "finder", ".", "find_all_candidates", "(", "ireq",...
Find all matching dependencies using the supplied finder and the given ireq. :param finder: A package finder for discovering matching candidates. :type finder: :class:`~pip._internal.index.PackageFinder` :param ireq: An install requirement. :type ireq: :class:`~pip._internal.req.req_install.InstallRequirement` :return: A list of matching candidates. :rtype: list[:class:`~pip._internal.index.InstallationCandidate`]
[ "Find", "all", "matching", "dependencies", "using", "the", "supplied", "finder", "and", "the", "given", "ireq", "." ]
python
train
ojarva/python-sshpubkeys
sshpubkeys/keys.py
https://github.com/ojarva/python-sshpubkeys/blob/86dc1ab27ce82dcc091ce127416cc3ee219e9bec/sshpubkeys/keys.py#L324-L353
def _process_ssh_dss(self, data): """Parses ssh-dsa public keys.""" data_fields = {} current_position = 0 for item in ("p", "q", "g", "y"): current_position, value = self._unpack_by_int(data, current_position) data_fields[item] = self._parse_long(value) q_bits = self._bits_in_number(data_fields["q"]) p_bits = self._bits_in_number(data_fields["p"]) if q_bits != self.DSA_N_LENGTH: raise InvalidKeyError("Incorrect DSA key parameters: bits(p)=%s, q=%s" % (self.bits, q_bits)) if self.strict_mode: min_length = self.DSA_MIN_LENGTH_STRICT max_length = self.DSA_MAX_LENGTH_STRICT else: min_length = self.DSA_MIN_LENGTH_LOOSE max_length = self.DSA_MAX_LENGTH_LOOSE if p_bits < min_length: raise TooShortKeyError("%s key can not be shorter than %s bits (was %s)" % (self.key_type, min_length, p_bits)) if p_bits > max_length: raise TooLongKeyError( "%s key data can not be longer than %s bits (was %s)" % (self.key_type, max_length, p_bits) ) dsa_parameters = DSAParameterNumbers(data_fields["p"], data_fields["q"], data_fields["g"]) self.dsa = DSAPublicNumbers(data_fields["y"], dsa_parameters).public_key(default_backend()) self.bits = self.dsa.key_size return current_position
[ "def", "_process_ssh_dss", "(", "self", ",", "data", ")", ":", "data_fields", "=", "{", "}", "current_position", "=", "0", "for", "item", "in", "(", "\"p\"", ",", "\"q\"", ",", "\"g\"", ",", "\"y\"", ")", ":", "current_position", ",", "value", "=", "se...
Parses ssh-dsa public keys.
[ "Parses", "ssh", "-", "dsa", "public", "keys", "." ]
python
test
raiden-network/raiden
raiden/transfer/mediated_transfer/mediator.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/transfer/mediated_transfer/mediator.py#L1565-L1678
def state_transition( mediator_state: Optional[MediatorTransferState], state_change: StateChange, channelidentifiers_to_channels: ChannelMap, nodeaddresses_to_networkstates: NodeNetworkStateMap, pseudo_random_generator: random.Random, block_number: BlockNumber, block_hash: BlockHash, ) -> TransitionResult[MediatorTransferState]: """ State machine for a node mediating a transfer. """ # pylint: disable=too-many-branches # Notes: # - A user cannot cancel a mediated transfer after it was initiated, she # may only reject to mediate before hand. This is because the mediator # doesn't control the secret reveal and needs to wait for the lock # expiration before safely discarding the transfer. iteration = TransitionResult(mediator_state, list()) if type(state_change) == ActionInitMediator: assert isinstance(state_change, ActionInitMediator), MYPY_ANNOTATION if mediator_state is None: iteration = handle_init( state_change=state_change, channelidentifiers_to_channels=channelidentifiers_to_channels, nodeaddresses_to_networkstates=nodeaddresses_to_networkstates, pseudo_random_generator=pseudo_random_generator, block_number=block_number, ) elif type(state_change) == Block: assert isinstance(state_change, Block), MYPY_ANNOTATION assert mediator_state, 'Block should be accompanied by a valid mediator state' iteration = handle_block( mediator_state=mediator_state, state_change=state_change, channelidentifiers_to_channels=channelidentifiers_to_channels, pseudo_random_generator=pseudo_random_generator, ) elif type(state_change) == ReceiveTransferRefund: assert isinstance(state_change, ReceiveTransferRefund), MYPY_ANNOTATION msg = 'ReceiveTransferRefund should be accompanied by a valid mediator state' assert mediator_state, msg iteration = handle_refundtransfer( mediator_state=mediator_state, mediator_state_change=state_change, channelidentifiers_to_channels=channelidentifiers_to_channels, nodeaddresses_to_networkstates=nodeaddresses_to_networkstates, pseudo_random_generator=pseudo_random_generator, block_number=block_number, ) elif type(state_change) == ReceiveSecretReveal: assert isinstance(state_change, ReceiveSecretReveal), MYPY_ANNOTATION msg = 'ReceiveSecretReveal should be accompanied by a valid mediator state' assert mediator_state, msg iteration = handle_offchain_secretreveal( mediator_state=mediator_state, mediator_state_change=state_change, channelidentifiers_to_channels=channelidentifiers_to_channels, pseudo_random_generator=pseudo_random_generator, block_number=block_number, block_hash=block_hash, ) elif type(state_change) == ContractReceiveSecretReveal: assert isinstance(state_change, ContractReceiveSecretReveal), MYPY_ANNOTATION msg = 'ContractReceiveSecretReveal should be accompanied by a valid mediator state' assert mediator_state, msg iteration = handle_onchain_secretreveal( mediator_state=mediator_state, onchain_secret_reveal=state_change, channelidentifiers_to_channels=channelidentifiers_to_channels, pseudo_random_generator=pseudo_random_generator, block_number=block_number, ) elif type(state_change) == ReceiveUnlock: assert isinstance(state_change, ReceiveUnlock), MYPY_ANNOTATION assert mediator_state, 'ReceiveUnlock should be accompanied by a valid mediator state' iteration = handle_unlock( mediator_state=mediator_state, state_change=state_change, channelidentifiers_to_channels=channelidentifiers_to_channels, ) elif type(state_change) == ReceiveLockExpired: assert isinstance(state_change, ReceiveLockExpired), MYPY_ANNOTATION assert mediator_state, 'ReceiveLockExpired should be accompanied by a valid mediator state' iteration = handle_lock_expired( mediator_state=mediator_state, state_change=state_change, channelidentifiers_to_channels=channelidentifiers_to_channels, block_number=block_number, ) elif type(state_change) == ActionChangeNodeNetworkState: assert isinstance(state_change, ActionChangeNodeNetworkState), MYPY_ANNOTATION msg = 'ActionChangeNodeNetworkState should be accompanied by a valid mediator state' assert mediator_state, msg iteration = handle_node_change_network_state( mediator_state=mediator_state, state_change=state_change, channelidentifiers_to_channels=channelidentifiers_to_channels, pseudo_random_generator=pseudo_random_generator, block_number=block_number, ) # this is the place for paranoia if iteration.new_state is not None: assert isinstance(iteration.new_state, MediatorTransferState) sanity_check(iteration.new_state, channelidentifiers_to_channels) return clear_if_finalized(iteration, channelidentifiers_to_channels)
[ "def", "state_transition", "(", "mediator_state", ":", "Optional", "[", "MediatorTransferState", "]", ",", "state_change", ":", "StateChange", ",", "channelidentifiers_to_channels", ":", "ChannelMap", ",", "nodeaddresses_to_networkstates", ":", "NodeNetworkStateMap", ",", ...
State machine for a node mediating a transfer.
[ "State", "machine", "for", "a", "node", "mediating", "a", "transfer", "." ]
python
train
IdentityPython/pysaml2
src/saml2/sigver.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/sigver.py#L990-L1068
def security_context(conf): """ Creates a security context based on the configuration :param conf: The configuration, this is a Config instance :return: A SecurityContext instance """ if not conf: return None try: metadata = conf.metadata except AttributeError: metadata = None try: id_attr = conf.id_attr_name except AttributeError: id_attr = None sec_backend = None if conf.crypto_backend == 'xmlsec1': xmlsec_binary = conf.xmlsec_binary if not xmlsec_binary: try: _path = conf.xmlsec_path except AttributeError: _path = [] xmlsec_binary = get_xmlsec_binary(_path) # verify that xmlsec is where it's supposed to be if not os.path.exists(xmlsec_binary): # if not os.access(, os.F_OK): err_msg = 'xmlsec binary not found: {binary}' err_msg = err_msg.format(binary=xmlsec_binary) raise SigverError(err_msg) crypto = _get_xmlsec_cryptobackend(xmlsec_binary) _file_name = conf.getattr('key_file', '') if _file_name: try: rsa_key = import_rsa_key_from_file(_file_name) except Exception as err: logger.error('Cannot import key from {file}: {err_msg}'.format( file=_file_name, err_msg=err)) raise else: sec_backend = RSACrypto(rsa_key) elif conf.crypto_backend == 'XMLSecurity': # new and somewhat untested pyXMLSecurity crypto backend. crypto = CryptoBackendXMLSecurity() else: err_msg = 'Unknown crypto_backend {backend}' err_msg = err_msg.format(backend=conf.crypto_backend) raise SigverError(err_msg) enc_key_files = [] if conf.encryption_keypairs is not None: for _encryption_keypair in conf.encryption_keypairs: if 'key_file' in _encryption_keypair: enc_key_files.append(_encryption_keypair['key_file']) return SecurityContext( crypto, conf.key_file, cert_file=conf.cert_file, metadata=metadata, only_use_keys_in_metadata=conf.only_use_keys_in_metadata, cert_handler_extra_class=conf.cert_handler_extra_class, generate_cert_info=conf.generate_cert_info, tmp_cert_file=conf.tmp_cert_file, tmp_key_file=conf.tmp_key_file, validate_certificate=conf.validate_certificate, enc_key_files=enc_key_files, encryption_keypairs=conf.encryption_keypairs, sec_backend=sec_backend, id_attr=id_attr)
[ "def", "security_context", "(", "conf", ")", ":", "if", "not", "conf", ":", "return", "None", "try", ":", "metadata", "=", "conf", ".", "metadata", "except", "AttributeError", ":", "metadata", "=", "None", "try", ":", "id_attr", "=", "conf", ".", "id_att...
Creates a security context based on the configuration :param conf: The configuration, this is a Config instance :return: A SecurityContext instance
[ "Creates", "a", "security", "context", "based", "on", "the", "configuration" ]
python
train
pywbem/pywbem
pywbem_mock/_wbemconnection_mock.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem_mock/_wbemconnection_mock.py#L3025-L3032
def _make_pull_imethod_resp(objs, eos, context_id): """ Create the correct imethod response for the open and pull methods """ eos_tup = (u'EndOfSequence', None, eos) enum_ctxt_tup = (u'EnumerationContext', None, context_id) return [("IRETURNVALUE", {}, objs), enum_ctxt_tup, eos_tup]
[ "def", "_make_pull_imethod_resp", "(", "objs", ",", "eos", ",", "context_id", ")", ":", "eos_tup", "=", "(", "u'EndOfSequence'", ",", "None", ",", "eos", ")", "enum_ctxt_tup", "=", "(", "u'EnumerationContext'", ",", "None", ",", "context_id", ")", "return", ...
Create the correct imethod response for the open and pull methods
[ "Create", "the", "correct", "imethod", "response", "for", "the", "open", "and", "pull", "methods" ]
python
train
datadesk/python-documentcloud
documentcloud/__init__.py
https://github.com/datadesk/python-documentcloud/blob/0d7f42cbf1edf5c61fca37ed846362cba4abfd76/documentcloud/__init__.py#L219-L265
def search(self, query, page=None, per_page=1000, mentions=3, data=False): """ Retrieve all objects that make a search query. Will loop through all pages that match unless you provide the number of pages you'd like to restrict the search to. Example usage: >> documentcloud.documents.search('salazar') """ # If the user provides a page, search it and stop there if page: document_list = self._get_search_page( query, page=page, per_page=per_page, mentions=mentions, data=data, ) # If the user doesn't provide a page keep looping until you have # everything else: page = 1 document_list = [] # Loop through all the search pages and fetch everything while True: results = self._get_search_page( query, page=page, per_page=per_page, mentions=mentions, data=data, ) if results: document_list += results page += 1 else: break # Convert the JSON objects from the API into Python objects obj_list = [] for doc in document_list: doc['_connection'] = self._connection obj = Document(doc) obj_list.append(obj) # Pass it back out return obj_list
[ "def", "search", "(", "self", ",", "query", ",", "page", "=", "None", ",", "per_page", "=", "1000", ",", "mentions", "=", "3", ",", "data", "=", "False", ")", ":", "# If the user provides a page, search it and stop there", "if", "page", ":", "document_list", ...
Retrieve all objects that make a search query. Will loop through all pages that match unless you provide the number of pages you'd like to restrict the search to. Example usage: >> documentcloud.documents.search('salazar')
[ "Retrieve", "all", "objects", "that", "make", "a", "search", "query", "." ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxworkflow.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxworkflow.py#L257-L275
def remove_stage(self, stage, edit_version=None, **kwargs): ''' :param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID :type stage: int or string :param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional) :type edit_version: int :returns: Stage ID that was removed :rtype: string Removes the specified stage from the workflow ''' stage_id = self._get_stage_id(stage) remove_stage_input = {"stage": stage_id} self._add_edit_version_to_request(remove_stage_input, edit_version) try: dxpy.api.workflow_remove_stage(self._dxid, remove_stage_input, **kwargs) finally: self.describe() # update cached describe return stage_id
[ "def", "remove_stage", "(", "self", ",", "stage", ",", "edit_version", "=", "None", ",", "*", "*", "kwargs", ")", ":", "stage_id", "=", "self", ".", "_get_stage_id", "(", "stage", ")", "remove_stage_input", "=", "{", "\"stage\"", ":", "stage_id", "}", "s...
:param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID :type stage: int or string :param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional) :type edit_version: int :returns: Stage ID that was removed :rtype: string Removes the specified stage from the workflow
[ ":", "param", "stage", ":", "A", "number", "for", "the", "stage", "index", "(", "for", "the", "nth", "stage", "starting", "from", "0", ")", "or", "a", "string", "of", "the", "stage", "index", "name", "or", "ID", ":", "type", "stage", ":", "int", "o...
python
train
iskandr/fancyimpute
fancyimpute/dictionary_helpers.py
https://github.com/iskandr/fancyimpute/blob/9f0837d387c7303d5c8c925a9989ca77a1a96e3e/fancyimpute/dictionary_helpers.py#L50-L57
def flattened_nested_key_indices(nested_dict): """ Combine the outer and inner keys of nested dictionaries into a single ordering. """ outer_keys, inner_keys = collect_nested_keys(nested_dict) combined_keys = list(sorted(set(outer_keys + inner_keys))) return {k: i for (i, k) in enumerate(combined_keys)}
[ "def", "flattened_nested_key_indices", "(", "nested_dict", ")", ":", "outer_keys", ",", "inner_keys", "=", "collect_nested_keys", "(", "nested_dict", ")", "combined_keys", "=", "list", "(", "sorted", "(", "set", "(", "outer_keys", "+", "inner_keys", ")", ")", ")...
Combine the outer and inner keys of nested dictionaries into a single ordering.
[ "Combine", "the", "outer", "and", "inner", "keys", "of", "nested", "dictionaries", "into", "a", "single", "ordering", "." ]
python
train
bitesofcode/projexui
projexui/xsettings.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/xsettings.py#L260-L314
def storeValue(self, xelem, value): """ Stores the value for the inptued instance to the given xml element. :param xelem | <xml.etree.Element> value | <variant> """ typ = type(value) if typ == QtGui.QColor: xelem.set('type', 'color') xelem.text = nativestring(value.name()) elif typ == QtCore.QPoint: xelem.set('type', 'point') xelem.text = '{0},{1}'.format(value.x(), value.y()) elif typ == QtCore.QPointF: xelem.set('type', 'pointf') xelem.text = '{0},{1}'.format(value.x(), value.y()) elif typ == QtCore.QRect: xelem.set('type', 'rect') xelem.text = '{0},{1},{2},{3}'.format(value.x(), value.y(), value.width(), value.height()) elif typ == QtCore.QRectF: xelem.set('type', 'rectf') xelem.text = '{0},{1},{2},{3}'.format(value.x(), value.y(), value.width(), value.height()) elif typ == QtCore.QByteArray: xelem.set('type', 'bytea') xelem.text = cPickle.dumps(nativestring(value)) elif typ == ElementTree.Element: xelem.set('type', 'xml') xelem.append(value) elif typ in (list, tuple, dict): xelem.set('type', 'pickle') xelem.text = cPickle.dumps(value) else: if not typ in (str, unicode): value_text = nativestring(value) else: value_text = value xelem.set('type', typ.__name__) xelem.text = value_text
[ "def", "storeValue", "(", "self", ",", "xelem", ",", "value", ")", ":", "typ", "=", "type", "(", "value", ")", "if", "typ", "==", "QtGui", ".", "QColor", ":", "xelem", ".", "set", "(", "'type'", ",", "'color'", ")", "xelem", ".", "text", "=", "na...
Stores the value for the inptued instance to the given xml element. :param xelem | <xml.etree.Element> value | <variant>
[ "Stores", "the", "value", "for", "the", "inptued", "instance", "to", "the", "given", "xml", "element", ".", ":", "param", "xelem", "|", "<xml", ".", "etree", ".", "Element", ">", "value", "|", "<variant", ">" ]
python
train
reiinakano/xcessiv
xcessiv/models.py
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L317-L327
def meta_features_path(self, path): """Returns path for meta-features Args: path (str): Absolute/local path of xcessiv folder """ return os.path.join( path, app.config['XCESSIV_META_FEATURES_FOLDER'], str(self.id) ) + '.npy'
[ "def", "meta_features_path", "(", "self", ",", "path", ")", ":", "return", "os", ".", "path", ".", "join", "(", "path", ",", "app", ".", "config", "[", "'XCESSIV_META_FEATURES_FOLDER'", "]", ",", "str", "(", "self", ".", "id", ")", ")", "+", "'.npy'" ]
Returns path for meta-features Args: path (str): Absolute/local path of xcessiv folder
[ "Returns", "path", "for", "meta", "-", "features" ]
python
train
ronhanson/python-tbx
tbx/bytes.py
https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/bytes.py#L81-L94
def batch(byte_array, funcs): """ Converts a batch to a list of values. :param byte_array: a byte array of length n*item_length + 8 :return: a list of uuid objects """ result = [] length = bytes_to_int(byte_array[0:4]) item_size = bytes_to_int(byte_array[4:8]) for i in range(0, length): chunk = byte_array[8+i*item_size:8+(i+1)*item_size] for f in funcs: f(chunk) return result
[ "def", "batch", "(", "byte_array", ",", "funcs", ")", ":", "result", "=", "[", "]", "length", "=", "bytes_to_int", "(", "byte_array", "[", "0", ":", "4", "]", ")", "item_size", "=", "bytes_to_int", "(", "byte_array", "[", "4", ":", "8", "]", ")", "...
Converts a batch to a list of values. :param byte_array: a byte array of length n*item_length + 8 :return: a list of uuid objects
[ "Converts", "a", "batch", "to", "a", "list", "of", "values", ".", ":", "param", "byte_array", ":", "a", "byte", "array", "of", "length", "n", "*", "item_length", "+", "8", ":", "return", ":", "a", "list", "of", "uuid", "objects" ]
python
train
push-things/wallabag_api
wallabag_api/wallabag.py
https://github.com/push-things/wallabag_api/blob/8d1e10a6ebc03d1ac9af2b38b57eb69f29b4216e/wallabag_api/wallabag.py#L534-L556
async def get_token(cls, host, **params): """ POST /oauth/v2/token Get a new token :param host: host of the service :param params: will contain : params = {"grant_type": "password", "client_id": "a string", "client_secret": "a string", "username": "a login", "password": "a password"} :return: access token """ params['grant_type'] = "password" path = "/oauth/v2/token" async with aiohttp.ClientSession() as sess: async with sess.post(host + path, data=params) as resp: data = await cls.handle_json_response(resp) return data.get("access_token")
[ "async", "def", "get_token", "(", "cls", ",", "host", ",", "*", "*", "params", ")", ":", "params", "[", "'grant_type'", "]", "=", "\"password\"", "path", "=", "\"/oauth/v2/token\"", "async", "with", "aiohttp", ".", "ClientSession", "(", ")", "as", "sess", ...
POST /oauth/v2/token Get a new token :param host: host of the service :param params: will contain : params = {"grant_type": "password", "client_id": "a string", "client_secret": "a string", "username": "a login", "password": "a password"} :return: access token
[ "POST", "/", "oauth", "/", "v2", "/", "token" ]
python
train
ralphbean/bugwarrior
bugwarrior/db.py
https://github.com/ralphbean/bugwarrior/blob/b2a5108f7b40cb0c437509b64eaa28f941f7ac8b/bugwarrior/db.py#L129-L212
def find_local_uuid(tw, keys, issue, legacy_matching=False): """ For a given issue issue, find its local UUID. Assembles a list of task IDs existing in taskwarrior matching the supplied issue (`issue`) on the combination of any set of supplied unique identifiers (`keys`) or, optionally, the task's description field (should `legacy_matching` be `True`). :params: * `tw`: An instance of `taskw.TaskWarriorShellout` * `keys`: A list of lists of keys to use for uniquely identifying an issue. To clarify the "list of lists" behavior, assume that there are two services, one having a single primary key field -- 'serviceAid' -- and another having a pair of fields composing its primary key -- 'serviceBproject' and 'serviceBnumber' --, the incoming data for this field would be:: [ ['serviceAid'], ['serviceBproject', 'serviceBnumber'], ] * `issue`: An instance of a subclass of `bugwarrior.services.Issue`. * `legacy_matching`: By default, this is disabled, and it allows the matching algorithm to -- in addition to searching by stored issue keys -- search using the task's description for a match. It is prone to error and should avoided if possible. :returns: * A single string UUID. :raises: * `bugwarrior.db.MultipleMatches`: if multiple matches were found. * `bugwarrior.db.NotFound`: if an issue was not found. """ if not issue['description']: raise ValueError('Issue %s has no description.' % issue) possibilities = set([]) if legacy_matching: legacy_description = issue.get_default_description().rsplit('..', 1)[0] # Furthermore, we have to kill off any single quotes which break in # task-2.4.x, as much as it saddens me. legacy_description = legacy_description.split("'")[0] results = tw.filter_tasks({ 'description.startswith': legacy_description, 'or': [ ('status', 'pending'), ('status', 'waiting'), ], }) possibilities = possibilities | set([ task['uuid'] for task in results ]) for service, key_list in six.iteritems(keys): if any([key in issue for key in key_list]): results = tw.filter_tasks({ 'and': [("%s.is" % key, issue[key]) for key in key_list], 'or': [ ('status', 'pending'), ('status', 'waiting'), ], }) possibilities = possibilities | set([ task['uuid'] for task in results ]) if len(possibilities) == 1: return possibilities.pop() if len(possibilities) > 1: raise MultipleMatches( "Issue %s matched multiple IDs: %s" % ( issue['description'], possibilities ) ) raise NotFound( "No issue was found matching %s" % issue )
[ "def", "find_local_uuid", "(", "tw", ",", "keys", ",", "issue", ",", "legacy_matching", "=", "False", ")", ":", "if", "not", "issue", "[", "'description'", "]", ":", "raise", "ValueError", "(", "'Issue %s has no description.'", "%", "issue", ")", "possibilitie...
For a given issue issue, find its local UUID. Assembles a list of task IDs existing in taskwarrior matching the supplied issue (`issue`) on the combination of any set of supplied unique identifiers (`keys`) or, optionally, the task's description field (should `legacy_matching` be `True`). :params: * `tw`: An instance of `taskw.TaskWarriorShellout` * `keys`: A list of lists of keys to use for uniquely identifying an issue. To clarify the "list of lists" behavior, assume that there are two services, one having a single primary key field -- 'serviceAid' -- and another having a pair of fields composing its primary key -- 'serviceBproject' and 'serviceBnumber' --, the incoming data for this field would be:: [ ['serviceAid'], ['serviceBproject', 'serviceBnumber'], ] * `issue`: An instance of a subclass of `bugwarrior.services.Issue`. * `legacy_matching`: By default, this is disabled, and it allows the matching algorithm to -- in addition to searching by stored issue keys -- search using the task's description for a match. It is prone to error and should avoided if possible. :returns: * A single string UUID. :raises: * `bugwarrior.db.MultipleMatches`: if multiple matches were found. * `bugwarrior.db.NotFound`: if an issue was not found.
[ "For", "a", "given", "issue", "issue", "find", "its", "local", "UUID", "." ]
python
test
christophertbrown/bioscripts
ctbBio/cluster_ani.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/cluster_ani.py#L17-L45
def make_mashes(fastas, mash_file, threads, kmer = 21, force = False): """ Create mash files for multiple fasta files Input: fastas <list[str]> -- paths to fasta files mash_file <str> -- path to output mash file threads <int> -- # threads for parallelization kmer <int> -- kmer size for mash sketching force <boolean> -- force overwrite of all mash files """ mash_processes = set() sketches = [fasta + '.msh' for fasta in fastas] devnull = open(os.devnull, 'w') # Perform the sketching for fasta, sketch in zip(fastas, sketches): if os.path.isfile(sketch): continue mash_cmd = ['/opt/bin/bio/mash', 'sketch', '-o', fasta, '-k', str(kmer), fasta] mash_processes.add(subprocess.Popen(mash_cmd, stderr=devnull)) if len(mash_processes) >= threads: os.wait() mash_processes.difference_update([mp for mp in mash_processes if mp.poll() is not None]) # Collect stragglers for mp in mash_processes: if mp.poll() is None: mp.wait() # Paste sketches into single mash paste_mashes(sketches, mash_file, force = force) return
[ "def", "make_mashes", "(", "fastas", ",", "mash_file", ",", "threads", ",", "kmer", "=", "21", ",", "force", "=", "False", ")", ":", "mash_processes", "=", "set", "(", ")", "sketches", "=", "[", "fasta", "+", "'.msh'", "for", "fasta", "in", "fastas", ...
Create mash files for multiple fasta files Input: fastas <list[str]> -- paths to fasta files mash_file <str> -- path to output mash file threads <int> -- # threads for parallelization kmer <int> -- kmer size for mash sketching force <boolean> -- force overwrite of all mash files
[ "Create", "mash", "files", "for", "multiple", "fasta", "files", "Input", ":", "fastas", "<list", "[", "str", "]", ">", "--", "paths", "to", "fasta", "files", "mash_file", "<str", ">", "--", "path", "to", "output", "mash", "file", "threads", "<int", ">", ...
python
train
cloud-custodian/cloud-custodian
tools/c7n_gcp/c7n_gcp/mu.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_gcp/c7n_gcp/mu.py#L171-L194
def _upload(self, archive, region): """Upload function source and return source url """ # Generate source upload url url = self.client.execute_command( 'generateUploadUrl', {'parent': 'projects/{}/locations/{}'.format( self.session.get_default_project(), region)}).get('uploadUrl') log.debug("uploading function code %s", url) http = self._get_http_client(self.client) headers, response = http.request( url, method='PUT', headers={ 'content-type': 'application/zip', 'Content-Length': '%d' % archive.size, 'x-goog-content-length-range': '0,104857600' }, body=open(archive.path, 'rb') ) log.info("function code uploaded") if headers['status'] != '200': raise RuntimeError("%s\n%s" % (headers, response)) return url
[ "def", "_upload", "(", "self", ",", "archive", ",", "region", ")", ":", "# Generate source upload url", "url", "=", "self", ".", "client", ".", "execute_command", "(", "'generateUploadUrl'", ",", "{", "'parent'", ":", "'projects/{}/locations/{}'", ".", "format", ...
Upload function source and return source url
[ "Upload", "function", "source", "and", "return", "source", "url" ]
python
train
salesking/salesking_python_sdk
salesking/utils/schema.py
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/utils/schema.py#L43-L52
def _value_is_type_text(val): """ val is a dictionary :param val: :return: True/False """ if ((u'type' in val.keys()) and (val['type'].lower() == u"text")): return True return False
[ "def", "_value_is_type_text", "(", "val", ")", ":", "if", "(", "(", "u'type'", "in", "val", ".", "keys", "(", ")", ")", "and", "(", "val", "[", "'type'", "]", ".", "lower", "(", ")", "==", "u\"text\"", ")", ")", ":", "return", "True", "return", "...
val is a dictionary :param val: :return: True/False
[ "val", "is", "a", "dictionary", ":", "param", "val", ":", ":", "return", ":", "True", "/", "False" ]
python
train
hvac/hvac
hvac/v1/__init__.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/v1/__init__.py#L1631-L1642
def transit_read_key(self, name, mount_point='transit'): """GET /<mount_point>/keys/<name> :param name: :type name: :param mount_point: :type mount_point: :return: :rtype: """ url = '/v1/{0}/keys/{1}'.format(mount_point, name) return self._adapter.get(url).json()
[ "def", "transit_read_key", "(", "self", ",", "name", ",", "mount_point", "=", "'transit'", ")", ":", "url", "=", "'/v1/{0}/keys/{1}'", ".", "format", "(", "mount_point", ",", "name", ")", "return", "self", ".", "_adapter", ".", "get", "(", "url", ")", "....
GET /<mount_point>/keys/<name> :param name: :type name: :param mount_point: :type mount_point: :return: :rtype:
[ "GET", "/", "<mount_point", ">", "/", "keys", "/", "<name", ">" ]
python
train
globus/globus-cli
globus_cli/commands/endpoint/permission/update.py
https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/commands/endpoint/permission/update.py#L21-L29
def update_command(permissions, rule_id, endpoint_id): """ Executor for `globus endpoint permission update` """ client = get_client() rule_data = assemble_generic_doc("access", permissions=permissions) res = client.update_endpoint_acl_rule(endpoint_id, rule_id, rule_data) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
[ "def", "update_command", "(", "permissions", ",", "rule_id", ",", "endpoint_id", ")", ":", "client", "=", "get_client", "(", ")", "rule_data", "=", "assemble_generic_doc", "(", "\"access\"", ",", "permissions", "=", "permissions", ")", "res", "=", "client", "....
Executor for `globus endpoint permission update`
[ "Executor", "for", "globus", "endpoint", "permission", "update" ]
python
train
SmokinCaterpillar/pypet
pypet/storageservice.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/storageservice.py#L2632-L2645
def _tree_create_leaf(self, name, trajectory, hdf5_group): """ Creates a new pypet leaf instance. Returns the leaf and if it is an explored parameter the length of the range. """ class_name = self._all_get_from_attrs(hdf5_group, HDF5StorageService.CLASS_NAME) # Create the instance with the appropriate constructor class_constructor = trajectory._create_class(class_name) instance = trajectory._construct_instance(class_constructor, name) return instance
[ "def", "_tree_create_leaf", "(", "self", ",", "name", ",", "trajectory", ",", "hdf5_group", ")", ":", "class_name", "=", "self", ".", "_all_get_from_attrs", "(", "hdf5_group", ",", "HDF5StorageService", ".", "CLASS_NAME", ")", "# Create the instance with the appropria...
Creates a new pypet leaf instance. Returns the leaf and if it is an explored parameter the length of the range.
[ "Creates", "a", "new", "pypet", "leaf", "instance", "." ]
python
test
StackStorm/pybind
pybind/slxos/v17s_1_02/isis_state/router_isis_config/is_address_family_v6/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/isis_state/router_isis_config/is_address_family_v6/__init__.py#L558-L579
def _set_redist_connected(self, v, load=False): """ Setter method for redist_connected, mapped from YANG variable /isis_state/router_isis_config/is_address_family_v6/redist_connected (container) If this variable is read-only (config: false) in the source YANG file, then _set_redist_connected is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_redist_connected() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=redist_connected.redist_connected, is_container='container', presence=False, yang_name="redist-connected", rest_name="redist-connected", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-redistribution-redist-connected-1'}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """redist_connected must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=redist_connected.redist_connected, is_container='container', presence=False, yang_name="redist-connected", rest_name="redist-connected", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-redistribution-redist-connected-1'}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""", }) self.__redist_connected = t if hasattr(self, '_set'): self._set()
[ "def", "_set_redist_connected", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", ...
Setter method for redist_connected, mapped from YANG variable /isis_state/router_isis_config/is_address_family_v6/redist_connected (container) If this variable is read-only (config: false) in the source YANG file, then _set_redist_connected is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_redist_connected() directly.
[ "Setter", "method", "for", "redist_connected", "mapped", "from", "YANG", "variable", "/", "isis_state", "/", "router_isis_config", "/", "is_address_family_v6", "/", "redist_connected", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "("...
python
train
SuperCowPowers/workbench
workbench_apps/workbench_cli/help_content.py
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench_apps/workbench_cli/help_content.py#L26-L37
def help_cli_basic(self): """ Help for Workbench CLI Basics """ help = '%sWorkbench: Getting started...' % (color.Yellow) help += '\n%sLoad in a sample:' % (color.Green) help += '\n\t%s> load_sample /path/to/file' % (color.LightBlue) help += '\n\n%sNotice the prompt now shows the md5 of the sample...'% (color.Yellow) help += '\n%sRun workers on the sample:' % (color.Green) help += '\n\t%s> view' % (color.LightBlue) help += '\n%sType the \'help workers\' or the first part of the worker <tab>...' % (color.Green) help += '\n\t%s> help workers (lists all possible workers)' % (color.LightBlue) help += '\n\t%s> pe_<tab> (will give you pe_classifier, pe_deep_sim, pe_features, pe_indicators, pe_peid)%s' % (color.LightBlue, color.Normal) return help
[ "def", "help_cli_basic", "(", "self", ")", ":", "help", "=", "'%sWorkbench: Getting started...'", "%", "(", "color", ".", "Yellow", ")", "help", "+=", "'\\n%sLoad in a sample:'", "%", "(", "color", ".", "Green", ")", "help", "+=", "'\\n\\t%s> load_sample /path/to/...
Help for Workbench CLI Basics
[ "Help", "for", "Workbench", "CLI", "Basics" ]
python
train
SergeySatskiy/cdm-pythonparser
legacy/src/cdmbriefparser.py
https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/legacy/src/cdmbriefparser.py#L397-L423
def niceStringify( self ): " Returns a string representation with new lines and shifts " out = "" if self.docstring is not None: out += str( self.docstring ) if not self.encoding is None: if out != "": out += '\n' out += str( self.encoding ) for item in self.imports: if out != "": out += '\n' out += str( item ) for item in self.globals: if out != "": out += '\n' out += str( item ) for item in self.functions: if out != "": out += '\n' out += item.niceStringify( 0 ) for item in self.classes: if out != "": out += '\n' out += item.niceStringify( 0 ) return out
[ "def", "niceStringify", "(", "self", ")", ":", "out", "=", "\"\"", "if", "self", ".", "docstring", "is", "not", "None", ":", "out", "+=", "str", "(", "self", ".", "docstring", ")", "if", "not", "self", ".", "encoding", "is", "None", ":", "if", "out...
Returns a string representation with new lines and shifts
[ "Returns", "a", "string", "representation", "with", "new", "lines", "and", "shifts" ]
python
train
btr1975/persistentdatatools
persistentdatatools/persistentdatatools.py
https://github.com/btr1975/persistentdatatools/blob/39e1294ce34a0a34363c65d94cdd592be5ad791b/persistentdatatools/persistentdatatools.py#L115-L131
def verify_directory(directory_name, directory_location, directory_create=False): """ Function to verify if a directory exists Args: directory_name: The name of directory to check directory_location: The location of the directory, derive from the os module directory_create: If you want to create the directory Returns: returns boolean True or False, but if you set directory_create to True it will create the directory """ if not directory_create: return __os.path.exists(__os.path.join(directory_location, directory_name)) elif directory_create: good = __os.path.exists(__os.path.join(directory_location, directory_name)) if not good: __os.mkdir(__os.path.join(directory_location, directory_name))
[ "def", "verify_directory", "(", "directory_name", ",", "directory_location", ",", "directory_create", "=", "False", ")", ":", "if", "not", "directory_create", ":", "return", "__os", ".", "path", ".", "exists", "(", "__os", ".", "path", ".", "join", "(", "dir...
Function to verify if a directory exists Args: directory_name: The name of directory to check directory_location: The location of the directory, derive from the os module directory_create: If you want to create the directory Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
[ "Function", "to", "verify", "if", "a", "directory", "exists", "Args", ":", "directory_name", ":", "The", "name", "of", "directory", "to", "check", "directory_location", ":", "The", "location", "of", "the", "directory", "derive", "from", "the", "os", "module", ...
python
train
saltstack/salt
salt/runners/state.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/state.py#L43-L52
def soft_kill(jid, state_id=None): ''' Set up a state run to die before executing the given state id, this instructs a running state to safely exit at a given state id. This needs to pass in the jid of the running state. If a state_id is not passed then the jid referenced will be safely exited at the beginning of the next state run. ''' minion = salt.minion.MasterMinion(__opts__) minion.functions['state.soft_kill'](jid, state_id)
[ "def", "soft_kill", "(", "jid", ",", "state_id", "=", "None", ")", ":", "minion", "=", "salt", ".", "minion", ".", "MasterMinion", "(", "__opts__", ")", "minion", ".", "functions", "[", "'state.soft_kill'", "]", "(", "jid", ",", "state_id", ")" ]
Set up a state run to die before executing the given state id, this instructs a running state to safely exit at a given state id. This needs to pass in the jid of the running state. If a state_id is not passed then the jid referenced will be safely exited at the beginning of the next state run.
[ "Set", "up", "a", "state", "run", "to", "die", "before", "executing", "the", "given", "state", "id", "this", "instructs", "a", "running", "state", "to", "safely", "exit", "at", "a", "given", "state", "id", ".", "This", "needs", "to", "pass", "in", "the...
python
train
spyder-ide/spyder-notebook
spyder_notebook/notebookplugin.py
https://github.com/spyder-ide/spyder-notebook/blob/54e626b9d2a3fccd3e4625b0f97fe06e5bb1a6db/spyder_notebook/notebookplugin.py#L439-L455
def open_console(self, client=None): """Open an IPython console for the given client or the current one.""" if not client: client = self.get_current_client() if self.ipyconsole is not None: kernel_id = client.get_kernel_id() if not kernel_id: QMessageBox.critical( self, _('Error opening console'), _('There is no kernel associated to this notebook.')) return self.ipyconsole._create_client_for_kernel(kernel_id, None, None, None) ipyclient = self.ipyconsole.get_current_client() ipyclient.allow_rename = False self.ipyconsole.rename_client_tab(ipyclient, client.get_short_name())
[ "def", "open_console", "(", "self", ",", "client", "=", "None", ")", ":", "if", "not", "client", ":", "client", "=", "self", ".", "get_current_client", "(", ")", "if", "self", ".", "ipyconsole", "is", "not", "None", ":", "kernel_id", "=", "client", "."...
Open an IPython console for the given client or the current one.
[ "Open", "an", "IPython", "console", "for", "the", "given", "client", "or", "the", "current", "one", "." ]
python
train
annoviko/pyclustering
pyclustering/cluster/silhouette.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/silhouette.py#L287-L301
def get_type(self): """! @brief Returns algorithm type that corresponds to specified enumeration value. @return (type) Algorithm type for cluster analysis. """ if self == silhouette_ksearch_type.KMEANS: return kmeans elif self == silhouette_ksearch_type.KMEDIANS: return kmedians elif self == silhouette_ksearch_type.KMEDOIDS: return kmedoids else: return None
[ "def", "get_type", "(", "self", ")", ":", "if", "self", "==", "silhouette_ksearch_type", ".", "KMEANS", ":", "return", "kmeans", "elif", "self", "==", "silhouette_ksearch_type", ".", "KMEDIANS", ":", "return", "kmedians", "elif", "self", "==", "silhouette_ksearc...
! @brief Returns algorithm type that corresponds to specified enumeration value. @return (type) Algorithm type for cluster analysis.
[ "!" ]
python
valid
iotile/coretools
iotilecore/iotile/core/hw/transport/server/standard.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/transport/server/standard.py#L369-L387
async def send_script(self, client_id, conn_string, script): """Send a script to a device on behalf of a client. See :meth:`AbstractDeviceAdapter.send_script`. Args: client_id (str): The client we are working for. conn_string (str): A connection string that will be passed to the underlying device adapter. script (bytes): The script that we wish to send. Raises: DeviceServerError: There is an issue with your client_id such as not being connected to the device. DeviceAdapterError: The adapter had a protocol issue sending the script. """ conn_id = self._client_connection(client_id, conn_string) await self.adapter.send_script(conn_id, script)
[ "async", "def", "send_script", "(", "self", ",", "client_id", ",", "conn_string", ",", "script", ")", ":", "conn_id", "=", "self", ".", "_client_connection", "(", "client_id", ",", "conn_string", ")", "await", "self", ".", "adapter", ".", "send_script", "(",...
Send a script to a device on behalf of a client. See :meth:`AbstractDeviceAdapter.send_script`. Args: client_id (str): The client we are working for. conn_string (str): A connection string that will be passed to the underlying device adapter. script (bytes): The script that we wish to send. Raises: DeviceServerError: There is an issue with your client_id such as not being connected to the device. DeviceAdapterError: The adapter had a protocol issue sending the script.
[ "Send", "a", "script", "to", "a", "device", "on", "behalf", "of", "a", "client", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xtoolbutton.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtoolbutton.py#L94-L110
def blink(self, state=True): """ Starts or stops the blinking state for this button. This only works for when the toolbutton is in Shadowed or Colored mode. :param state | <bool> :return <bool> | success """ if self._blinking == state: return True elif not self.graphicsEffect(): return False else: self._blinking = state if state: self.startTimer(self.blinkInterval())
[ "def", "blink", "(", "self", ",", "state", "=", "True", ")", ":", "if", "self", ".", "_blinking", "==", "state", ":", "return", "True", "elif", "not", "self", ".", "graphicsEffect", "(", ")", ":", "return", "False", "else", ":", "self", ".", "_blinki...
Starts or stops the blinking state for this button. This only works for when the toolbutton is in Shadowed or Colored mode. :param state | <bool> :return <bool> | success
[ "Starts", "or", "stops", "the", "blinking", "state", "for", "this", "button", ".", "This", "only", "works", "for", "when", "the", "toolbutton", "is", "in", "Shadowed", "or", "Colored", "mode", ".", ":", "param", "state", "|", "<bool", ">", ":", "return",...
python
train