text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def create_file_new_actions(self, fnames): """Return actions for submenu 'New...'""" if not fnames: return [] new_file_act = create_action(self, _("File..."), icon=ima.icon('filenew'), triggered=lambda: self.new_file(fnames[-1])) new_module_act = create_action(self, _("Module..."), icon=ima.icon('spyder'), triggered=lambda: self.new_module(fnames[-1])) new_folder_act = create_action(self, _("Folder..."), icon=ima.icon('folder_new'), triggered=lambda: self.new_folder(fnames[-1])) new_package_act = create_action(self, _("Package..."), icon=ima.icon('package_new'), triggered=lambda: self.new_package(fnames[-1])) return [new_file_act, new_folder_act, None, new_module_act, new_package_act]
[ "def", "create_file_new_actions", "(", "self", ",", "fnames", ")", ":", "if", "not", "fnames", ":", "return", "[", "]", "new_file_act", "=", "create_action", "(", "self", ",", "_", "(", "\"File...\"", ")", ",", "icon", "=", "ima", ".", "icon", "(", "'f...
56.681818
19.409091
def evaluate(self, instance, step, extra): """Evaluate the current definition and fill its attributes. Uses attributes definition in the following order: - values defined when defining the ParameteredAttribute - additional values defined when instantiating the containing factory Args: instance (builder.Resolver): The object holding currently computed attributes step: a factory.builder.BuildStep extra (dict): additional, call-time added kwargs for the step. """ defaults = dict(self.defaults) if extra: defaults.update(extra) return self.generate(step, defaults)
[ "def", "evaluate", "(", "self", ",", "instance", ",", "step", ",", "extra", ")", ":", "defaults", "=", "dict", "(", "self", ".", "defaults", ")", "if", "extra", ":", "defaults", ".", "update", "(", "extra", ")", "return", "self", ".", "generate", "("...
36.947368
18.421053
def _get_req_fp(self, op): '''Decisions on what verb to use and content headers happen here Args: op a string specifying a http verb''' if(op): op = op.lower() if op == 'get': return requests.get, None if op == 'put': return requests.put, {'Content-Type': 'application/x-www-form-urlencoded'} if op == 'post': return requests.post, {'Content-Type': 'application/json'} if op == 'delete': return requests.delete, None else: raise NotImplementedError('Operation {} is not supported!'.format(op))
[ "def", "_get_req_fp", "(", "self", ",", "op", ")", ":", "if", "(", "op", ")", ":", "op", "=", "op", ".", "lower", "(", ")", "if", "op", "==", "'get'", ":", "return", "requests", ".", "get", ",", "None", "if", "op", "==", "'put'", ":", "return",...
32.875
22.375
def address(self) -> str: '''generate an address from pubkey''' return str(self._public_key.to_address( net_query(self.network)) )
[ "def", "address", "(", "self", ")", "->", "str", ":", "return", "str", "(", "self", ".", "_public_key", ".", "to_address", "(", "net_query", "(", "self", ".", "network", ")", ")", ")" ]
30
15
def rgb2gray(image_rgb_array): """! @brief Returns image as 1-dimension (gray colored) matrix, where one element of list describes pixel. @details Luma coding is used for transformation and that is calculated directly from gamma-compressed primary intensities as a weighted sum: \f[Y = 0.2989R + 0.587G + 0.114B\f] @param[in] image_rgb_array (list): Image represented by RGB list. @return (list) Image as gray colored matrix, where one element of list describes pixel. @code colored_image = read_image(file_name); gray_image = rgb2gray(colored_image); @endcode @see read_image() """ image_gray_array = [0.0] * len(image_rgb_array); for index in range(0, len(image_rgb_array), 1): image_gray_array[index] = float(image_rgb_array[index][0]) * 0.2989 + float(image_rgb_array[index][1]) * 0.5870 + float(image_rgb_array[index][2]) * 0.1140; return image_gray_array;
[ "def", "rgb2gray", "(", "image_rgb_array", ")", ":", "image_gray_array", "=", "[", "0.0", "]", "*", "len", "(", "image_rgb_array", ")", "for", "index", "in", "range", "(", "0", ",", "len", "(", "image_rgb_array", ")", ",", "1", ")", ":", "image_gray_arra...
39.08
31.68
def require_http_methods(request_methods): """ Decorator to make a function view only accept particular request methods. Usage:: @require_http_methods(["GET", "POST"]) def function_view(request): # HTTP methods != GET or POST results in 405 error code response """ if not isinstance(request_methods, (list, tuple)): raise ImproperlyConfigured( "require_http_methods decorator must be called " "with a list or tuple of strings. For example:\n\n" " @require_http_methods(['GET', 'POST'])\n" " def function_view(request):\n" " ...\n") request_methods = list(map(str.upper, request_methods)) for method in request_methods: if method not in HTTP_METHOD_NAMES: raise ImproperlyConfigured( "require_http_method called with '%s', " "which is not a valid HTTP method.\n" % (method,)) if 'GET' in request_methods and 'HEAD' not in request_methods: request_methods.append('HEAD') if 'OPTIONS' not in request_methods: request_methods.append('OPTIONS') request_methods.sort() def decorator(func): @wraps(func, assigned=available_attrs(func)) def inner(request, *args, **kwargs): if request.method == 'OPTIONS': response = HttpResponse() response['Allow'] = ', '.join( [m.upper() for m in request_methods]) response['Content-Length'] = '0' return response if request.method not in request_methods: logger.warning( 'Method Not Allowed (%s): %s', request.method, request.path, extra={ 'status_code': 405, 'request': request } ) return HttpResponseNotAllowed(request_methods) return func(request, *args, **kwargs) return inner return decorator
[ "def", "require_http_methods", "(", "request_methods", ")", ":", "if", "not", "isinstance", "(", "request_methods", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "ImproperlyConfigured", "(", "\"require_http_methods decorator must be called \"", "\"with a list ...
37.418182
14.145455
def appendAssayToStudy(assay, studyNum, pathToISATABFile): """ This function appends an Assay object to a study in an ISA file Typically, you should use the exploreISA function to check the contents of the ISA file and retrieve the assay and study number you are interested in! :param assay: The Assay :type assay: ISA Assay object :param studyNum: The Study number (notice it's not zero-based index). :type studyNum: int :param pathToISATABFile: The path to the ISATAB file :type pathToISATABFile: string :raise FileNotFoundError: If pathToISATABFile does not contain file 'i_Investigation.txt'. """ from isatools import isatab try: isa = isatab.load(pathToISATABFile, skip_load_tables=True) std = isa.studies[studyNum - 1] lngth = len(std.assays) base = os.path.basename(assay.filename) fname = os.path.splitext(base)[0] fname = fname + str(lngth) ext = os.path.splitext(base)[1] fname = fname + ext assay.filename = fname isa.studies[studyNum - 1].assays.append(assay) isatab.dump(isa_obj=isa, output_path=pathToISATABFile) except FileNotFoundError as err: raise err
[ "def", "appendAssayToStudy", "(", "assay", ",", "studyNum", ",", "pathToISATABFile", ")", ":", "from", "isatools", "import", "isatab", "try", ":", "isa", "=", "isatab", ".", "load", "(", "pathToISATABFile", ",", "skip_load_tables", "=", "True", ")", "std", "...
42.892857
15.964286
def pvcreate(devices, override=True, **kwargs): ''' Set a physical device to be used as an LVM physical volume override Skip devices, if they are already LVM physical volumes CLI Examples: .. code-block:: bash salt mymachine lvm.pvcreate /dev/sdb1,/dev/sdb2 salt mymachine lvm.pvcreate /dev/sdb1 dataalignmentoffset=7s ''' if not devices: return 'Error: at least one device is required' if isinstance(devices, six.string_types): devices = devices.split(',') cmd = ['pvcreate', '-y'] for device in devices: if not os.path.exists(device): raise CommandExecutionError('{0} does not exist'.format(device)) if not pvdisplay(device, quiet=True): cmd.append(device) elif not override: raise CommandExecutionError('Device "{0}" is already an LVM physical volume.'.format(device)) if not cmd[2:]: # All specified devices are already LVM volumes return True valid = ('metadatasize', 'dataalignment', 'dataalignmentoffset', 'pvmetadatacopies', 'metadatacopies', 'metadataignore', 'restorefile', 'norestorefile', 'labelsector', 'setphysicalvolumesize') no_parameter = ('force', 'norestorefile') for var in kwargs: if kwargs[var] and var in valid: cmd.extend(['--{0}'.format(var), kwargs[var]]) elif kwargs[var] and var in no_parameter: cmd.append('--{0}'.format(var)) out = __salt__['cmd.run_all'](cmd, python_shell=False) if out.get('retcode'): raise CommandExecutionError(out.get('stderr')) # Verify pvcreate was successful for device in devices: if not pvdisplay(device): raise CommandExecutionError('Device "{0}" was not affected.'.format(device)) return True
[ "def", "pvcreate", "(", "devices", ",", "override", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "not", "devices", ":", "return", "'Error: at least one device is required'", "if", "isinstance", "(", "devices", ",", "six", ".", "string_types", ")", "...
34.113208
21.924528
def evaluate_hourly_forecasts(self): """ Calculates ROC curves and Reliability scores for each forecast hour. Returns: A pandas DataFrame containing forecast metadata as well as DistributedROC and Reliability objects. """ score_columns = ["Run_Date", "Forecast_Hour", "Ensemble Name", "Model_Name", "Forecast_Variable", "Neighbor_Radius", "Smoothing_Radius", "Size_Threshold", "ROC", "Reliability"] all_scores = pd.DataFrame(columns=score_columns) for h, hour in enumerate(range(self.start_hour, self.end_hour + 1)): for neighbor_radius in self.neighbor_radii: n_filter = disk(neighbor_radius) for s, size_threshold in enumerate(self.size_thresholds): print("Eval hourly forecast {0:02d} {1} {2} {3} {4:d} {5:d}".format(hour, self.model_name, self.forecast_variable, self.run_date, neighbor_radius, size_threshold)) hour_obs = fftconvolve(self.raw_obs[self.mrms_variable][h] >= self.obs_thresholds[s], n_filter, mode="same") hour_obs[hour_obs > 1] = 1 hour_obs[hour_obs < 1] = 0 if self.obs_mask: hour_obs = hour_obs[self.raw_obs[self.mask_variable][h] > 0] for smoothing_radius in self.smoothing_radii: hour_var = "neighbor_prob_r_{0:d}_s_{1:d}_{2}_{3:0.2f}".format(neighbor_radius, smoothing_radius, self.forecast_variable, size_threshold) if self.obs_mask: hour_forecast = self.hourly_forecasts[hour_var][h][self.raw_obs[self.mask_variable][h] > 0] else: hour_forecast = self.hourly_forecasts[hour_var][h] roc = DistributedROC(thresholds=self.probability_levels, obs_threshold=0.5) roc.update(hour_forecast, hour_obs) rel = DistributedReliability(thresholds=self.probability_levels, obs_threshold=0.5) rel.update(hour_forecast, hour_obs) row = [self.run_date, hour, self.ensemble_name, self.model_name, self.forecast_variable, neighbor_radius, smoothing_radius, size_threshold, roc, rel] all_scores.loc[hour_var + "_{0:d}".format(hour)] = row return all_scores
[ "def", "evaluate_hourly_forecasts", "(", "self", ")", ":", "score_columns", "=", "[", "\"Run_Date\"", ",", "\"Forecast_Hour\"", ",", "\"Ensemble Name\"", ",", "\"Model_Name\"", ",", "\"Forecast_Variable\"", ",", "\"Neighbor_Radius\"", ",", "\"Smoothing_Radius\"", ",", "...
71.761905
37.761905
def fermion_avg(efermi, norm_hopping, func): """calcules for every slave it's average over the desired observable""" if func == 'ekin': func = bethe_ekin_zeroT elif func == 'ocupation': func = bethe_filling_zeroT return np.asarray([func(ef, tz) for ef, tz in zip(efermi, norm_hopping)])
[ "def", "fermion_avg", "(", "efermi", ",", "norm_hopping", ",", "func", ")", ":", "if", "func", "==", "'ekin'", ":", "func", "=", "bethe_ekin_zeroT", "elif", "func", "==", "'ocupation'", ":", "func", "=", "bethe_filling_zeroT", "return", "np", ".", "asarray",...
39
15.625
def get_more(collection_name, num_to_return, cursor_id, ctx=None): """Get a **getMore** message.""" if ctx: return _get_more_compressed( collection_name, num_to_return, cursor_id, ctx) return _get_more_uncompressed(collection_name, num_to_return, cursor_id)
[ "def", "get_more", "(", "collection_name", ",", "num_to_return", ",", "cursor_id", ",", "ctx", "=", "None", ")", ":", "if", "ctx", ":", "return", "_get_more_compressed", "(", "collection_name", ",", "num_to_return", ",", "cursor_id", ",", "ctx", ")", "return",...
47.333333
19
def create_user(app, appbuilder, role, username, firstname, lastname, email, password): """ Create a user """ _appbuilder = import_application(app, appbuilder) role_object = _appbuilder.sm.find_role(role) user = _appbuilder.sm.add_user( username, firstname, lastname, email, role_object, password ) if user: click.echo(click.style("User {0} created.".format(username), fg="green")) else: click.echo(click.style("Error! No user created", fg="red"))
[ "def", "create_user", "(", "app", ",", "appbuilder", ",", "role", ",", "username", ",", "firstname", ",", "lastname", ",", "email", ",", "password", ")", ":", "_appbuilder", "=", "import_application", "(", "app", ",", "appbuilder", ")", "role_object", "=", ...
38.384615
21.615385
def saveJSON(g, data, backup=False): """ Saves the current setup to disk. g : hcam_drivers.globals.Container Container with globals data : dict The current setup in JSON compatible dictionary format. backup : bool If we are saving a backup on close, don't prompt for filename """ if not backup: fname = filedialog.asksaveasfilename( defaultextension='.json', filetypes=[('json files', '.json'), ], initialdir=g.cpars['app_directory'] ) else: fname = os.path.join(os.path.expanduser('~/.hdriver'), 'app.json') if not fname: g.clog.warn('Aborted save to disk') return False with open(fname, 'w') as of: of.write( json.dumps(data, sort_keys=True, indent=4, separators=(',', ': ')) ) g.clog.info('Saved setup to' + fname) return True
[ "def", "saveJSON", "(", "g", ",", "data", ",", "backup", "=", "False", ")", ":", "if", "not", "backup", ":", "fname", "=", "filedialog", ".", "asksaveasfilename", "(", "defaultextension", "=", "'.json'", ",", "filetypes", "=", "[", "(", "'json files'", "...
27.060606
18.454545
def _generate_default_grp_constraints(roles, network_constraints): """Generate default symetric grp constraints. """ default_delay = network_constraints.get('default_delay') default_rate = network_constraints.get('default_rate') default_loss = network_constraints.get('default_loss', 0) except_groups = network_constraints.get('except', []) grps = network_constraints.get('groups', roles.keys()) # expand each groups grps = [expand_groups(g) for g in grps] # flatten grps = [x for expanded_group in grps for x in expanded_group] # building the default group constraints return [{'src': grp1, 'dst': grp2, 'delay': default_delay, 'rate': default_rate, 'loss': default_loss} for grp1 in grps for grp2 in grps if ((grp1 != grp2 or _src_equals_dst_in_constraints(network_constraints, grp1)) and grp1 not in except_groups and grp2 not in except_groups)]
[ "def", "_generate_default_grp_constraints", "(", "roles", ",", "network_constraints", ")", ":", "default_delay", "=", "network_constraints", ".", "get", "(", "'default_delay'", ")", "default_rate", "=", "network_constraints", ".", "get", "(", "'default_rate'", ")", "d...
44.909091
15.090909
def u2ver(self): """ Get the major/minor version of the urllib2 lib. @return: The urllib2 version. @rtype: float """ try: part = u2.__version__.split('.', 1) n = float('.'.join(part)) return n except Exception as e: log.exception(e) return 0
[ "def", "u2ver", "(", "self", ")", ":", "try", ":", "part", "=", "u2", ".", "__version__", ".", "split", "(", "'.'", ",", "1", ")", "n", "=", "float", "(", "'.'", ".", "join", "(", "part", ")", ")", "return", "n", "except", "Exception", "as", "e...
26.538462
12.384615
def prepare_data(problem, hparams, params, config): """Construct input pipeline.""" input_fn = problem.make_estimator_input_fn( tf.estimator.ModeKeys.EVAL, hparams, force_repeat=True) dataset = input_fn(params, config) features, _ = dataset.make_one_shot_iterator().get_next() inputs, labels = features["targets"], features["inputs"] inputs = tf.to_float(inputs) input_shape = inputs.shape.as_list() inputs = tf.reshape(inputs, [hparams.batch_size] + input_shape[1:]) labels = tf.reshape(labels, [hparams.batch_size]) return inputs, labels, features
[ "def", "prepare_data", "(", "problem", ",", "hparams", ",", "params", ",", "config", ")", ":", "input_fn", "=", "problem", ".", "make_estimator_input_fn", "(", "tf", ".", "estimator", ".", "ModeKeys", ".", "EVAL", ",", "hparams", ",", "force_repeat", "=", ...
47
11.416667
def identify(self,geometry,geometryType="esriGeometryPoint",mosaicRule=None, renderingRule=None,renderingRules=None,pixelSize=None,time=None, returnGeometry="false",returnCatalogItems="false"): """ The identify operation is performed on an image service resource. It identifies the content of an image service for a given location and a given mosaic rule. The location can be a point or a polygon. The identify operation is supported by both mosaic dataset and raster dataset image services. The result of this operation includes the pixel value of the mosaic for a given mosaic rule, a resolution (pixel size), and a set of catalog items that overlap the given geometry. The single pixel value is that of the mosaic at the centroid of the specified location. If there are multiple rasters overlapping the location, the visibility of a raster is determined by the order of the rasters defined in the mosaic rule. It also contains a set of catalog items that overlap the given geometry. The catalog items are ordered based on the mosaic rule. A list of catalog item visibilities gives the percentage contribution of the item to overall mosaic. Inputs: geometry - A geometry that defines the location to be identified. The location can be a point or polygon. The structure of the geometry is the same as the structure of the JSON geometry objects returned by the ArcGIS REST API. In addition to the JSON structures, for points, you can specify the geometry with a simple comma-separated syntax. This is a required parameter. The default geometry type is a point. By default, the geometry is assumed to be in the spatial reference of the image service. You can specify a different spatial reference by using the JSON structure syntax for geometries. geometryType - The type of geometry specified by the geometry parameter. The geometry type can be a point or polygon. Values: esriGeometryPoint | esriGeometryPolygon mosaicRule - Specifies the mosaic rule when defining how individual images should be mosaicked. When a mosaic rule is not specified, the default mosaic rule of the image service will be used (as advertised in the root resource: defaultMosaicMethod, mosaicOperator, sortField, sortValue). renderingRule - Specifies the rendering rule for how the requested image should be rendered. renderingRules - Specifies an array of rendering rules. Use this parameter to get multiple processed values from different raster functions in one single request. pixelSize - The pixel level being identified (or the resolution being looked at). If pixel size is not specified, then pixelSize will default to the base resolution of the dataset. The raster at the specified pixel size in the mosaic dataset will be used for identify. The structure of the pixelSize parameter is the same as the structure of the point object returned by the ArcGIS REST API. In addition to the JSON structure, you can specify the pixel size with a simple comma-separated syntax. time - The time instant or time extent of the raster to be identified. This parameter is only valid if the image service supports time. returnGeometry - Indicates whether or not to return the raster catalog item's footprint. Set it to false when the catalog item's footprint is not needed to improve the identify operation's response time. returnCatalogItems - Indicates whether or not to return raster catalog items. Set it to false when catalog items are not needed to improve the identify operation's performance significantly. When set to false, neither the geometry nor attributes of catalog items will be returned. """ url = self._url + "/identify" params = { "f" : "json", "geometry" : geometry, "geometryType": geometryType } if not mosaicRule is None: params["mosaicRule"] = mosaicRule if not renderingRule is None: params["renderingRule"] = renderingRule if not renderingRules is None: params["renderingRules"] = renderingRules if not pixelSize is None: params["pixelSize"] = pixelSize if not time is None: params["time"] = time if not returnGeometry is None: params["returnGeometry"] = returnGeometry if not returnCatalogItems is None: params["returnCatalogItems"] = returnCatalogItems return self._get(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "identify", "(", "self", ",", "geometry", ",", "geometryType", "=", "\"esriGeometryPoint\"", ",", "mosaicRule", "=", "None", ",", "renderingRule", "=", "None", ",", "renderingRules", "=", "None", ",", "pixelSize", "=", "None", ",", "time", "=", "None"...
51.804124
27.474227
def ParseOptions(cls, options, analysis_plugin): """Parses and validates options. Args: options (argparse.Namespace): parser options. analysis_plugin (VirusTotalAnalysisPlugin): analysis plugin to configure. Raises: BadConfigObject: when the output module object is of the wrong type. BadConfigOption: when a configuration parameter fails validation or when unable to connect to VirusTotal. """ if not isinstance(analysis_plugin, virustotal.VirusTotalAnalysisPlugin): raise errors.BadConfigObject( 'Analysis plugin is not an instance of VirusTotalAnalysisPlugin') api_key = cls._ParseStringOption(options, 'virustotal_api_key') if not api_key: raise errors.BadConfigOption( 'VirusTotal API key not specified. Try again with ' '--virustotal-api-key.') analysis_plugin.SetAPIKey(api_key) enable_rate_limit = getattr( options, 'virustotal_free_rate_limit', cls._DEFAULT_RATE_LIMIT) if enable_rate_limit: analysis_plugin.EnableFreeAPIKeyRateLimit() lookup_hash = cls._ParseStringOption( options, 'virustotal_hash', default_value=cls._DEFAULT_HASH) analysis_plugin.SetLookupHash(lookup_hash) if not analysis_plugin.TestConnection(): raise errors.BadConfigOption('Unable to connect to VirusTotal')
[ "def", "ParseOptions", "(", "cls", ",", "options", ",", "analysis_plugin", ")", ":", "if", "not", "isinstance", "(", "analysis_plugin", ",", "virustotal", ".", "VirusTotalAnalysisPlugin", ")", ":", "raise", "errors", ".", "BadConfigObject", "(", "'Analysis plugin ...
37.714286
21.742857
def schema(self, dataset_id, table_id): """Retrieve the schema of the table Obtain from BigQuery the field names and field types for the table defined by the parameters Parameters ---------- dataset_id : str Name of the BigQuery dataset for the table table_id : str Name of the BigQuery table Returns ------- list of dicts Fields representing the schema """ table_ref = self.client.dataset(dataset_id).table(table_id) try: table = self.client.get_table(table_ref) remote_schema = table.schema remote_fields = [ field_remote.to_api_repr() for field_remote in remote_schema ] for field in remote_fields: field["type"] = field["type"].upper() field["mode"] = field["mode"].upper() return remote_fields except self.http_error as ex: self.process_http_error(ex)
[ "def", "schema", "(", "self", ",", "dataset_id", ",", "table_id", ")", ":", "table_ref", "=", "self", ".", "client", ".", "dataset", "(", "dataset_id", ")", ".", "table", "(", "table_id", ")", "try", ":", "table", "=", "self", ".", "client", ".", "ge...
29.676471
18.029412
def newProp(self, name, value): """Create a new property carried by a node. """ ret = libxml2mod.xmlNewProp(self._o, name, value) if ret is None:raise treeError('xmlNewProp() failed') __tmp = xmlAttr(_obj=ret) return __tmp
[ "def", "newProp", "(", "self", ",", "name", ",", "value", ")", ":", "ret", "=", "libxml2mod", ".", "xmlNewProp", "(", "self", ".", "_o", ",", "name", ",", "value", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'xmlNewProp() failed'",...
42.833333
12.333333
def __read_block(self, size): """Read a block of 'size' bytes from the server. An internal buffer is used to read data from the server. If enough data is available from it, we return that data. Eventually, we try to grab the missing part from the server for Client.read_timeout seconds. If no data can be retrieved, it is considered as a fatal error and an 'Error' exception is raised. :param size: number of bytes to read :rtype: string :returns: the read block (can be empty) """ buf = b"" if len(self.__read_buffer): limit = ( size if size <= len(self.__read_buffer) else len(self.__read_buffer) ) buf = self.__read_buffer[:limit] self.__read_buffer = self.__read_buffer[limit:] size -= limit if not size: return buf try: buf += self.sock.recv(size) except (socket.timeout, ssl.SSLError): raise Error("Failed to read %d bytes from the server" % size) self.__dprint(buf) return buf
[ "def", "__read_block", "(", "self", ",", "size", ")", ":", "buf", "=", "b\"\"", "if", "len", "(", "self", ".", "__read_buffer", ")", ":", "limit", "=", "(", "size", "if", "size", "<=", "len", "(", "self", ".", "__read_buffer", ")", "else", "len", "...
35.21875
17.75
def raise_for_api_error(headers: MutableMapping, data: MutableMapping) -> None: """ Check request response for Slack API error Args: headers: Response headers data: Response data Raises: :class:`slack.exceptions.SlackAPIError` """ if not data["ok"]: raise exceptions.SlackAPIError(data.get("error", "unknow_error"), headers, data) if "warning" in data: LOG.warning("Slack API WARNING: %s", data["warning"])
[ "def", "raise_for_api_error", "(", "headers", ":", "MutableMapping", ",", "data", ":", "MutableMapping", ")", "->", "None", ":", "if", "not", "data", "[", "\"ok\"", "]", ":", "raise", "exceptions", ".", "SlackAPIError", "(", "data", ".", "get", "(", "\"err...
27.176471
23.176471
def qasm(self, prec=15): """Return the corresponding OPENQASM string.""" string = "gate " + self.name if self.arguments is not None: string += "(" + self.arguments.qasm(prec) + ")" string += " " + self.bitlist.qasm(prec) + "\n" string += "{\n" + self.body.qasm(prec) + "}" return string
[ "def", "qasm", "(", "self", ",", "prec", "=", "15", ")", ":", "string", "=", "\"gate \"", "+", "self", ".", "name", "if", "self", ".", "arguments", "is", "not", "None", ":", "string", "+=", "\"(\"", "+", "self", ".", "arguments", ".", "qasm", "(", ...
42.375
10.75
def get_catalogs_by_ids(self, *args, **kwargs): """Pass through to provider CatalogLookupSession.get_catalogs_by_ids""" # Implemented from kitosid template for - # osid.resource.BinLookupSession.get_bins_by_ids catalogs = self._get_provider_session('catalog_lookup_session').get_catalogs_by_ids(*args, **kwargs) cat_list = [] for cat in catalogs: cat_list.append(Catalog(self._provider_manager, cat, self._runtime, self._proxy)) return CatalogList(cat_list)
[ "def", "get_catalogs_by_ids", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Implemented from kitosid template for -", "# osid.resource.BinLookupSession.get_bins_by_ids", "catalogs", "=", "self", ".", "_get_provider_session", "(", "'catalog_lookup_sess...
57.444444
20.888889
def extract(self, doc): """From the defined JSONPath(s), pull out the values and insert them into a document with renamed field(s) then apply the Extractor and return the doc with the extracted values """ if isinstance(self.jsonpaths, JSONPath): input_field = self.extractor.get_renamed_input_fields() if isinstance(self.extractor.get_renamed_input_fields(), list): input_field = input_field[0] jsonpath = self.jsonpaths renamed_inputs = dict() if self.flat_map_inputs: flat_mapped = itertools.chain.from_iterable( [iter(match.value) if hasattr(match.value, '__iter__') and not isinstance(match.value, dict) and not isinstance(match.value, basestring) else iter([match.value]) for match in jsonpath.find(doc)]) renamed_inputs[input_field] = flat_mapped if input_field in renamed_inputs: self.extract_from_renamed_inputs(doc, renamed_inputs) else: for value in [match.value for match in jsonpath.find(doc)]: renamed_inputs[input_field] = value self.extract_from_renamed_inputs(doc, renamed_inputs) elif isinstance(self.jsonpaths, types.ListType): renamed_inputs_lists = dict() for jsonpath, renamed_input in \ itertools.izip( iter(self.jsonpaths), iter(self.extractor.get_renamed_input_fields())): renamed_inputs_lists[renamed_input] = [ match.value for match in jsonpath.find(doc)] if self.flat_map_inputs: renamed_inputs_tuple_lists = [ (x, itertools.chain.from_iterable( [iter(z) if hasattr(z, '__iter__') and not isinstance(z, dict) and not isinstance(z, basestring) else iter([z])for z in y])) for x, y in renamed_inputs_lists.iteritems()] renamed_inputs = reduce( ExtractorProcessor.add_tuple_to_doc, renamed_inputs_tuple_lists, dict()) self.extract_from_renamed_inputs(doc, renamed_inputs) else: renamed_inputs_lists_lists = [[(x, z) for z in y] for x, y in renamed_inputs_lists.iteritems()] for i in itertools.product(*renamed_inputs_lists_lists): renamed_inputs = reduce( ExtractorProcessor.add_tuple_to_doc, i, dict()) self.extract_from_renamed_inputs(doc, renamed_inputs) else: raise ValueError("input_fields must be a string or a list") return doc
[ "def", "extract", "(", "self", ",", "doc", ")", ":", "if", "isinstance", "(", "self", ".", "jsonpaths", ",", "JSONPath", ")", ":", "input_field", "=", "self", ".", "extractor", ".", "get_renamed_input_fields", "(", ")", "if", "isinstance", "(", "self", "...
47.870968
18.983871
def insert(exif, image, new_file=None): """ py:function:: piexif.insert(exif_bytes, filename) Insert exif into JPEG. :param bytes exif_bytes: Exif as bytes :param str filename: JPEG """ if exif[0:6] != b"\x45\x78\x69\x66\x00\x00": raise ValueError("Given data is not exif data") output_file = False # Prevents "UnicodeWarning: Unicode equal comparison failed" warnings on Python 2 maybe_image = sys.version_info >= (3,0,0) or isinstance(image, str) if maybe_image and image[0:2] == b"\xff\xd8": image_data = image file_type = "jpeg" elif maybe_image and image[0:4] == b"RIFF" and image[8:12] == b"WEBP": image_data = image file_type = "webp" else: with open(image, 'rb') as f: image_data = f.read() if image_data[0:2] == b"\xff\xd8": file_type = "jpeg" elif image_data[0:4] == b"RIFF" and image_data[8:12] == b"WEBP": file_type = "webp" else: raise InvalidImageDataError output_file = True if file_type == "jpeg": exif = b"\xff\xe1" + struct.pack(">H", len(exif) + 2) + exif segments = split_into_segments(image_data) new_data = merge_segments(segments, exif) elif file_type == "webp": exif = exif[6:] new_data = _webp.insert(image_data, exif) if isinstance(new_file, io.BytesIO): new_file.write(new_data) new_file.seek(0) elif new_file: with open(new_file, "wb+") as f: f.write(new_data) elif output_file: with open(image, "wb+") as f: f.write(new_data) else: raise ValueError("Give a 3rd argument to 'insert' to output file")
[ "def", "insert", "(", "exif", ",", "image", ",", "new_file", "=", "None", ")", ":", "if", "exif", "[", "0", ":", "6", "]", "!=", "b\"\\x45\\x78\\x69\\x66\\x00\\x00\"", ":", "raise", "ValueError", "(", "\"Given data is not exif data\"", ")", "output_file", "=",...
32.5
17.038462
def get_inbox_documents_per_page(self, per_page=1000, page=1): """ Get inbox documents per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=INBOX_DOCUMENTS, per_page=per_page, page=page, )
[ "def", "get_inbox_documents_per_page", "(", "self", ",", "per_page", "=", "1000", ",", "page", "=", "1", ")", ":", "return", "self", ".", "_get_resource_per_page", "(", "resource", "=", "INBOX_DOCUMENTS", ",", "per_page", "=", "per_page", ",", "page", "=", "...
30
13.692308
def update_user(self, ID, data): """Update a User.""" # http://teampasswordmanager.com/docs/api-users/#update_user log.info('Update user %s with %s' % (ID, data)) self.put('users/%s.json' % ID, data)
[ "def", "update_user", "(", "self", ",", "ID", ",", "data", ")", ":", "# http://teampasswordmanager.com/docs/api-users/#update_user", "log", ".", "info", "(", "'Update user %s with %s'", "%", "(", "ID", ",", "data", ")", ")", "self", ".", "put", "(", "'users/%s.j...
45.4
11
def print_solution(solution): """Prints a solution Arguments --------- solution : BaseSolution Example ------- :: [8, 9, 10, 7]: 160 [5, 6]: 131 [3, 4, 2]: 154 Total cost: 445 """ total_cost = 0 for solution in solution.routes(): cost = solution.length() total_cost = total_cost + cost print('{}: {}'.format(solution, cost)) #print('xxx') print('Total cost: {}'.format(total_cost))
[ "def", "print_solution", "(", "solution", ")", ":", "total_cost", "=", "0", "for", "solution", "in", "solution", ".", "routes", "(", ")", ":", "cost", "=", "solution", ".", "length", "(", ")", "total_cost", "=", "total_cost", "+", "cost", "print", "(", ...
20.25
18.708333
def _try_parse_gene_association(self, reaction_id, s): """Try to parse the given gene association rule. Logs a warning if the association rule could not be parsed and returns the original string. Otherwise, returns the boolean.Expression object. """ s = s.strip() if s == '': return None try: return boolean.Expression(s) except boolean.ParseError as e: msg = 'Failed to parse gene association for {}: {}'.format( reaction_id, text_type(e)) if e.indicator is not None: msg += '\n{}\n{}'.format(s, e.indicator) logger.warning(msg) return s
[ "def", "_try_parse_gene_association", "(", "self", ",", "reaction_id", ",", "s", ")", ":", "s", "=", "s", ".", "strip", "(", ")", "if", "s", "==", "''", ":", "return", "None", "try", ":", "return", "boolean", ".", "Expression", "(", "s", ")", "except...
34.3
18.95
def init_raspbian_disk(self, yes=0): """ Downloads the latest Raspbian image and writes it to a microSD card. Based on the instructions from: https://www.raspberrypi.org/documentation/installation/installing-images/linux.md """ self.assume_localhost() yes = int(yes) device_question = 'SD card present at %s? ' % self.env.sd_device if not yes and not raw_input(device_question).lower().startswith('y'): return r = self.local_renderer r.local_if_missing( fn='{raspbian_image_zip}', cmd='wget {raspbian_download_url} -O raspbian_lite_latest.zip') r.lenv.img_fn = \ r.local("unzip -l {raspbian_image_zip} | sed -n 4p | awk '{{print $4}}'", capture=True) or '$IMG_FN' r.local('echo {img_fn}') r.local('[ ! -f {img_fn} ] && unzip {raspbian_image_zip} {img_fn} || true') r.lenv.img_fn = r.local('readlink -f {img_fn}', capture=True) r.local('echo {img_fn}') with self.settings(warn_only=True): r.sudo('[ -d "{sd_media_mount_dir}" ] && umount {sd_media_mount_dir} || true') with self.settings(warn_only=True): r.sudo('[ -d "{sd_media_mount_dir2}" ] && umount {sd_media_mount_dir2} || true') r.pc('Writing the image onto the card.') r.sudo('time dd bs=4M if={img_fn} of={sd_device}') # Flush all writes to disk. r.run('sync')
[ "def", "init_raspbian_disk", "(", "self", ",", "yes", "=", "0", ")", ":", "self", ".", "assume_localhost", "(", ")", "yes", "=", "int", "(", "yes", ")", "device_question", "=", "'SD card present at %s? '", "%", "self", ".", "env", ".", "sd_device", "if", ...
38.837838
24.891892
def list(self, filterfn=lambda x: True): """Return all direct descendands of directory `self` for which `filterfn` returns True. """ return [self / p for p in self.listdir() if filterfn(self / p)]
[ "def", "list", "(", "self", ",", "filterfn", "=", "lambda", "x", ":", "True", ")", ":", "return", "[", "self", "/", "p", "for", "p", "in", "self", ".", "listdir", "(", ")", "if", "filterfn", "(", "self", "/", "p", ")", "]" ]
45.4
7.2
def path_to_dir(*path_args): """Convert a UNIX-style path into platform specific directory spec.""" return os.path.join( *list(path_args[:-1]) + path_args[-1].split(posixpath.sep) )
[ "def", "path_to_dir", "(", "*", "path_args", ")", ":", "return", "os", ".", "path", ".", "join", "(", "*", "list", "(", "path_args", "[", ":", "-", "1", "]", ")", "+", "path_args", "[", "-", "1", "]", ".", "split", "(", "posixpath", ".", "sep", ...
42.6
17
def _get_template_list(self): " Get the hierarchy of templates belonging to the object/box_type given. " t_list = [] if hasattr(self.obj, 'category_id') and self.obj.category_id: cat = self.obj.category base_path = 'box/category/%s/content_type/%s/' % (cat.path, self.name) if hasattr(self.obj, 'slug'): t_list.append(base_path + '%s/%s.html' % (self.obj.slug, self.box_type,)) t_list.append(base_path + '%s.html' % (self.box_type,)) t_list.append(base_path + 'box.html') base_path = 'box/content_type/%s/' % self.name if hasattr(self.obj, 'slug'): t_list.append(base_path + '%s/%s.html' % (self.obj.slug, self.box_type,)) t_list.append(base_path + '%s.html' % (self.box_type,)) t_list.append(base_path + 'box.html') t_list.append('box/%s.html' % self.box_type) t_list.append('box/box.html') return t_list
[ "def", "_get_template_list", "(", "self", ")", ":", "t_list", "=", "[", "]", "if", "hasattr", "(", "self", ".", "obj", ",", "'category_id'", ")", "and", "self", ".", "obj", ".", "category_id", ":", "cat", "=", "self", ".", "obj", ".", "category", "ba...
45.52381
22.857143
def get_default_config(self): """ Returns the default collector settings """ config = super(MonitCollector, self).get_default_config() config.update({ 'host': '127.0.0.1', 'port': 2812, 'user': 'monit', 'passwd': 'monit', 'path': 'monit', 'byte_unit': ['byte'], 'send_totals': False, }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "MonitCollector", ",", "self", ")", ".", "get_default_config", "(", ")", "config", ".", "update", "(", "{", "'host'", ":", "'127.0.0.1'", ",", "'port'", ":", "2812", ",", "'...
31.2
9.066667
def plot_transaction_rate_heterogeneity( model, suptitle="Heterogeneity in Transaction Rate", xlabel="Transaction Rate", ylabel="Density", suptitle_fontsize=14, **kwargs ): """ Plot the estimated gamma distribution of lambda (customers' propensities to purchase). Parameters ---------- model: lifetimes model A fitted lifetimes model, for now only for BG/NBD suptitle: str, optional Figure suptitle xlabel: str, optional Figure xlabel ylabel: str, optional Figure ylabel kwargs Passed into the matplotlib.pyplot.plot command. Returns ------- axes: matplotlib.AxesSubplot """ from matplotlib import pyplot as plt r, alpha = model._unload_params("r", "alpha") rate_mean = r / alpha rate_var = r / alpha ** 2 rv = stats.gamma(r, scale=1 / alpha) lim = rv.ppf(0.99) x = np.linspace(0, lim, 100) fig, ax = plt.subplots(1) fig.suptitle("Heterogeneity in Transaction Rate", fontsize=suptitle_fontsize, fontweight="bold") ax.set_title("mean: {:.3f}, var: {:.3f}".format(rate_mean, rate_var)) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) fig.tight_layout(rect=[0, 0.03, 1, 0.95]) plt.plot(x, rv.pdf(x), **kwargs) return ax
[ "def", "plot_transaction_rate_heterogeneity", "(", "model", ",", "suptitle", "=", "\"Heterogeneity in Transaction Rate\"", ",", "xlabel", "=", "\"Transaction Rate\"", ",", "ylabel", "=", "\"Density\"", ",", "suptitle_fontsize", "=", "14", ",", "*", "*", "kwargs", ")",...
25.469388
21.265306
def getFileInfos(self): """Return a list of FileInfo objects""" data = self.searchIndex(False) self.data = data self.printd(" ") fileInfos = [] for datum in data: try: fileInfo = self.getFileInfo(datum[0], datum[1]) fileInfos.append(fileInfo) except NotImplementedError: self.printd("Error: the info command is not supported for " + self.name + ".") return [] return fileInfos
[ "def", "getFileInfos", "(", "self", ")", ":", "data", "=", "self", ".", "searchIndex", "(", "False", ")", "self", ".", "data", "=", "data", "self", ".", "printd", "(", "\" \"", ")", "fileInfos", "=", "[", "]", "for", "datum", "in", "data", ":", "tr...
28.5
19.142857
def _resolve_class(self, new_class, namespace, qualifier_repo, verbose=None): """ Resolve the class defined by new_class by: 1. Validating that the new class provided is a valid class. 2. Validating the class against the repository to confirm that components required in the repository are in the repository, This includes the superclass if specified and the dependencies for EmbeddedInstance and reference properties. 2. propagating any properties, methods, parameters, and qualifiers from the superclass if it is defined. """ is_association_class = 'Association' in new_class.qualifiers if new_class.superclass: try: superclass = self._get_class(new_class.superclass, namespace=namespace, local_only=False, include_qualifiers=True, include_classorigin=True) except CIMError as ce: if ce.status_code == CIM_ERR_NOT_FOUND: raise CIMError( CIM_ERR_INVALID_SUPERCLASS, _format("Superclass {0!A} for class {1!A} not found " "in namespace {2!A}.", new_class.superclass, new_class.classname, namespace)) else: raise else: superclass = None # Validate association qualifier matches superclass if is_association_class and superclass: if 'Association' not in superclass.qualifiers: raise CIMError( CIM_ERR_INVALID_PARAMETER, _format("New class {0|A} derived from superclass {1!A} " "in namespace {3!A} which is not Association " "Class .", new_class.classname, new_class.superclass, namespace)) # validate no reference properties in non-association for new_prop in six.itervalues(new_class.properties): if not is_association_class and new_prop.type == 'reference': raise CIMError( CIM_ERR_INVALID_PARAMETER, _format("Reference property {0!A} not allowed on " "non-association class {1!A}", new_prop.name, new_class.classname)) objects = list(new_class.properties.values()) for meth in new_class.methods.values(): objects += list(meth.parameters.values()) # Validate the attributes of all qualifiers in the new class if qualifier_repo: self._validate_qualifiers(new_class.qualifiers, qualifier_repo, new_class, 'CLASS') for pvalue in new_class.properties.values(): self._validate_qualifiers(pvalue.qualifiers, qualifier_repo, new_class, 'PROPERTY') for mvalue in new_class.methods.values(): self._validate_qualifiers(mvalue.qualifiers, qualifier_repo, new_class, 'METHOD') for pvalue in mvalue.parameters.values(): self._validate_qualifiers(pvalue.qualifiers, qualifier_repo, new_class, 'PARAMETER') # resolve class level qualifiers and attributes qualdict = superclass.qualifiers if superclass else {} new_class.classorigin = superclass.classname if superclass \ else new_class.classname new_class.propagated = True if superclass else False self._resolve_qualifiers(new_class.qualifiers, qualdict, new_class, superclass, new_class.classname, 'class', qualifier_repo, propagate=False, verbose=verbose) classrepo = self._get_class_repo(namespace) # resolve properties in new class self._resolve_objects(new_class.properties, superclass.properties if superclass else None, new_class, superclass, classrepo, qualifier_repo, "Property", verbose=verbose) # resolve methods and parameters in new class self._resolve_objects(new_class.methods, superclass.methods if superclass else None, new_class, superclass, classrepo, qualifier_repo, "Method", verbose=verbose) return new_class
[ "def", "_resolve_class", "(", "self", ",", "new_class", ",", "namespace", ",", "qualifier_repo", ",", "verbose", "=", "None", ")", ":", "is_association_class", "=", "'Association'", "in", "new_class", ".", "qualifiers", "if", "new_class", ".", "superclass", ":",...
47.009434
20.632075
def calibrate_cameras(self): """Calibrate cameras based on found chessboard corners.""" criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 1e-5) flags = (cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_ZERO_TANGENT_DIST + cv2.CALIB_SAME_FOCAL_LENGTH) calib = StereoCalibration() (calib.cam_mats["left"], calib.dist_coefs["left"], calib.cam_mats["right"], calib.dist_coefs["right"], calib.rot_mat, calib.trans_vec, calib.e_mat, calib.f_mat) = cv2.stereoCalibrate(self.object_points, self.image_points["left"], self.image_points["right"], self.image_size, calib.cam_mats["left"], calib.dist_coefs["left"], calib.cam_mats["right"], calib.dist_coefs["right"], calib.rot_mat, calib.trans_vec, calib.e_mat, calib.f_mat, criteria=criteria, flags=flags)[1:] (calib.rect_trans["left"], calib.rect_trans["right"], calib.proj_mats["left"], calib.proj_mats["right"], calib.disp_to_depth_mat, calib.valid_boxes["left"], calib.valid_boxes["right"]) = cv2.stereoRectify(calib.cam_mats["left"], calib.dist_coefs["left"], calib.cam_mats["right"], calib.dist_coefs["right"], self.image_size, calib.rot_mat, calib.trans_vec, flags=0) for side in ("left", "right"): (calib.undistortion_map[side], calib.rectification_map[side]) = cv2.initUndistortRectifyMap( calib.cam_mats[side], calib.dist_coefs[side], calib.rect_trans[side], calib.proj_mats[side], self.image_size, cv2.CV_32FC1) # This is replaced because my results were always bad. Estimates are # taken from the OpenCV samples. width, height = self.image_size focal_length = 0.8 * width calib.disp_to_depth_mat = np.float32([[1, 0, 0, -0.5 * width], [0, -1, 0, 0.5 * height], [0, 0, 0, -focal_length], [0, 0, 1, 0]]) return calib
[ "def", "calibrate_cameras", "(", "self", ")", ":", "criteria", "=", "(", "cv2", ".", "TERM_CRITERIA_MAX_ITER", "+", "cv2", ".", "TERM_CRITERIA_EPS", ",", "100", ",", "1e-5", ")", "flags", "=", "(", "cv2", ".", "CALIB_FIX_ASPECT_RATIO", "+", "cv2", ".", "CA...
61.641509
23.264151
def hexblock_cb(cls, callback, data, address = None, bits = None, width = 16, cb_args = (), cb_kwargs = {}): """ Dump a block of binary data using a callback function to convert each line of text. @type callback: function @param callback: Callback function to convert each line of data. @type data: str @param data: Binary data. @type address: str @param address: (Optional) Memory address where the data was read from. @type bits: int @param bits: (Optional) Number of bits of the target architecture. The default is platform dependent. See: L{HexDump.address_size} @type cb_args: str @param cb_args: (Optional) Arguments to pass to the callback function. @type cb_kwargs: str @param cb_kwargs: (Optional) Keyword arguments to pass to the callback function. @type width: int @param width: (Optional) Maximum number of bytes to convert per text line. @rtype: str @return: Multiline output text. """ result = '' if address is None: for i in compat.xrange(0, len(data), width): result = '%s%s\n' % ( result, \ callback(data[i:i+width], *cb_args, **cb_kwargs) ) else: for i in compat.xrange(0, len(data), width): result = '%s%s: %s\n' % ( result, cls.address(address, bits), callback(data[i:i+width], *cb_args, **cb_kwargs) ) address += width return result
[ "def", "hexblock_cb", "(", "cls", ",", "callback", ",", "data", ",", "address", "=", "None", ",", "bits", "=", "None", ",", "width", "=", "16", ",", "cb_args", "=", "(", ")", ",", "cb_kwargs", "=", "{", "}", ")", ":", "result", "=", "''", "if", ...
37.358491
22.90566
def _refresh_db_conditional(saltenv, **kwargs): ''' Internal use only in this module, has a different set of defaults and returns True or False. And supports checking the age of the existing generated metadata db, as well as ensure metadata db exists to begin with Args: saltenv (str): Salt environment Kwargs: force (bool): Force a refresh if the minimum age has been reached. Default is False. failhard (bool): If ``True``, an error will be raised if any repo SLS files failed to process. Returns: bool: True Fetched or Cache uptodate, False to indicate an issue :codeauthor: Damon Atkins <https://github.com/damon-atkins> ''' force = salt.utils.data.is_true(kwargs.pop('force', False)) failhard = salt.utils.data.is_true(kwargs.pop('failhard', False)) expired_max = __opts__['winrepo_cache_expire_max'] expired_min = __opts__['winrepo_cache_expire_min'] repo_details = _get_repo_details(saltenv) # Skip force if age less than minimum age if force and expired_min > 0 and repo_details.winrepo_age < expired_min: log.info( 'Refresh skipped, age of winrepo metadata in seconds (%s) is less ' 'than winrepo_cache_expire_min (%s)', repo_details.winrepo_age, expired_min ) force = False # winrepo_age is -1 if repo db does not exist refresh = True if force \ or repo_details.winrepo_age == -1 \ or repo_details.winrepo_age > expired_max \ else False if not refresh: log.debug( 'Using existing pkg metadata db for saltenv \'%s\' (age is %s)', saltenv, datetime.timedelta(seconds=repo_details.winrepo_age) ) return True if repo_details.winrepo_age == -1: # no repo meta db log.debug( 'No winrepo.p cache file for saltenv \'%s\', creating one now', saltenv ) results = refresh_db(saltenv=saltenv, verbose=False, failhard=failhard) try: # Return True if there were no failed winrepo SLS files, and False if # failures were reported. return not bool(results.get('failed', 0)) except AttributeError: return False
[ "def", "_refresh_db_conditional", "(", "saltenv", ",", "*", "*", "kwargs", ")", ":", "force", "=", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "kwargs", ".", "pop", "(", "'force'", ",", "False", ")", ")", "failhard", "=", "salt", ".", "ut...
33.328358
25.477612
def make_statement(self, action, mention): """Makes an INDRA statement from a Geneways action and action mention. Parameters ---------- action : GenewaysAction The mechanism that the Geneways mention maps to. Note that several text mentions can correspond to the same action if they are referring to the same relationship - there may be multiple Geneways action mentions corresponding to each action. mention : GenewaysActionMention The Geneways action mention object corresponding to a single mention of a mechanism in a specific text. We make a new INDRA statement corresponding to each action mention. Returns ------- statement : indra.statements.Statement An INDRA statement corresponding to the provided Geneways action mention, or None if the action mention's type does not map onto any INDRA statement type in geneways_action_type_mapper. """ (statement_generator, is_direct) = \ geneways_action_to_indra_statement_type(mention.actiontype, action.plo) if statement_generator is None: # Geneways statement does not map onto an indra statement return None # Try to find the full-text sentence # Unfortunately, the sentence numbers in the Geneways dataset # don't correspond to an obvious sentence segmentation. # This code looks for sentences with the subject, object, and verb # listed by the Geneways action mention table and only includes # it in the evidence if there is exactly one such sentence text = None if self.get_ft_mention: try: content, content_type = get_full_text(mention.pmid, 'pmid') if content is not None: ftm = FullTextMention(mention, content) sentences = ftm.find_matching_sentences() if len(sentences) == 1: text = sentences[0] except Exception: logger.warning('Could not fetch full text for PMID ' + mention.pmid) # Make an evidence object epistemics = dict() epistemics['direct'] = is_direct annotations = mention.make_annotation() annotations['plo'] = action.plo # plo only in action table evidence = Evidence(source_api='geneways', source_id=mention.actionmentionid, pmid=mention.pmid, text=text, epistemics=epistemics, annotations=annotations) # Construct the grounded and name standardized agents # Note that this involves grounding the agent by # converting the Entrez ID listed in the Geneways data with # HGNC and UniProt upstream_agent = get_agent(mention.upstream, action.up) downstream_agent = get_agent(mention.downstream, action.dn) # Make the statement return statement_generator(upstream_agent, downstream_agent, evidence)
[ "def", "make_statement", "(", "self", ",", "action", ",", "mention", ")", ":", "(", "statement_generator", ",", "is_direct", ")", "=", "geneways_action_to_indra_statement_type", "(", "mention", ".", "actiontype", ",", "action", ".", "plo", ")", "if", "statement_...
46.057971
20.826087
def parse_response(response, encoding='utf-8'): """Parse a multipart Requests.Response into a tuple of BodyPart objects. Args: response: Requests.Response encoding: The parser will assume that any text in the HTML body is encoded with this encoding when decoding it for use in the ``text`` attribute. Returns: tuple of BodyPart Members: headers (CaseInsensitiveDict), content (bytes), text (Unicode), encoding (str). """ return requests_toolbelt.multipart.decoder.MultipartDecoder.from_response( response, encoding ).parts
[ "def", "parse_response", "(", "response", ",", "encoding", "=", "'utf-8'", ")", ":", "return", "requests_toolbelt", ".", "multipart", ".", "decoder", ".", "MultipartDecoder", ".", "from_response", "(", "response", ",", "encoding", ")", ".", "parts" ]
31.052632
25.421053
def setmem(vm_, memory, config=False, **kwargs): ''' Changes the amount of memory allocated to VM. The VM must be shutdown for this to work. :param vm_: name of the domain :param memory: memory amount to set in MB :param config: if True then libvirt will be asked to modify the config as well :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.setmem <domain> <size> salt '*' virt.setmem my_domain 768 ''' conn = __get_conn(**kwargs) dom = _get_domain(conn, vm_) if VIRT_STATE_NAME_MAP.get(dom.info()[0], 'unknown') != 'shutdown': return False # libvirt has a funny bitwise system for the flags in that the flag # to affect the "current" setting is 0, which means that to set the # current setting we have to call it a second time with just 0 set flags = libvirt.VIR_DOMAIN_MEM_MAXIMUM if config: flags = flags | libvirt.VIR_DOMAIN_AFFECT_CONFIG ret1 = dom.setMemoryFlags(memory * 1024, flags) ret2 = dom.setMemoryFlags(memory * 1024, libvirt.VIR_DOMAIN_AFFECT_CURRENT) conn.close() # return True if both calls succeeded return ret1 == ret2 == 0
[ "def", "setmem", "(", "vm_", ",", "memory", ",", "config", "=", "False", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "__get_conn", "(", "*", "*", "kwargs", ")", "dom", "=", "_get_domain", "(", "conn", ",", "vm_", ")", "if", "VIRT_STATE_NAME_MAP",...
31.911111
24.266667
def get_orderbook(self): """Get orderbook for the instrument :Retruns: orderbook : dict orderbook dict for the instrument """ if self in self.parent.books.keys(): return self.parent.books[self] return { "bid": [0], "bidsize": [0], "ask": [0], "asksize": [0] }
[ "def", "get_orderbook", "(", "self", ")", ":", "if", "self", "in", "self", ".", "parent", ".", "books", ".", "keys", "(", ")", ":", "return", "self", ".", "parent", ".", "books", "[", "self", "]", "return", "{", "\"bid\"", ":", "[", "0", "]", ","...
25.714286
14.571429
def create_switch(apps, schema_editor): """Create the `role_based_access_control` switch if it does not already exist.""" Switch = apps.get_model('waffle', 'Switch') Switch.objects.update_or_create(name=ENTERPRISE_ROLE_BASED_ACCESS_CONTROL_SWITCH, defaults={'active': False})
[ "def", "create_switch", "(", "apps", ",", "schema_editor", ")", ":", "Switch", "=", "apps", ".", "get_model", "(", "'waffle'", ",", "'Switch'", ")", "Switch", ".", "objects", ".", "update_or_create", "(", "name", "=", "ENTERPRISE_ROLE_BASED_ACCESS_CONTROL_SWITCH",...
71
20.25
def mode(data): """Return the most common data point from discrete or nominal data. ``mode`` assumes discrete data, and returns a single value. This is the standard treatment of the mode as commonly taught in schools: >>> mode([1, 1, 2, 3, 3, 3, 3, 4]) 3 This also works with nominal (non-numeric) data: >>> mode(["red", "blue", "blue", "red", "green", "red", "red"]) 'red' """ # Generate a table of sorted (value, frequency) pairs. hist = collections.Counter(data) top = hist.most_common(2) if len(top) == 1: return top[0][0] elif not top: raise StatisticsError('no mode for empty data') elif top[0][1] == top[1][1]: raise StatisticsError( 'no unique mode; found %d equally common values' % len(hist) ) else: return top[0][0]
[ "def", "mode", "(", "data", ")", ":", "# Generate a table of sorted (value, frequency) pairs.", "hist", "=", "collections", ".", "Counter", "(", "data", ")", "top", "=", "hist", ".", "most_common", "(", "2", ")", "if", "len", "(", "top", ")", "==", "1", ":...
28.413793
22.827586
def info(self, user_id): """Gets user information by user id Args: user_id(int): the id of user Returns: User Throws: RTMServiceError when request failed """ resp = self._rtm_client.get('v1/user.info?user_id={}'.format(user_id)) if resp.is_fail(): raise RTMServiceError('Failed to get user information', resp) return resp.data['result']
[ "def", "info", "(", "self", ",", "user_id", ")", ":", "resp", "=", "self", ".", "_rtm_client", ".", "get", "(", "'v1/user.info?user_id={}'", ".", "format", "(", "user_id", ")", ")", "if", "resp", ".", "is_fail", "(", ")", ":", "raise", "RTMServiceError",...
25.647059
22
def writeLogToFile(self): """ writes the log to a """ if not os.path.exists(self.logFolder): os.mkdir(self.logFolder) with open(self.logFile, mode='a') as f: f.write('\n\n' + self.log)
[ "def", "writeLogToFile", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "logFolder", ")", ":", "os", ".", "mkdir", "(", "self", ".", "logFolder", ")", "with", "open", "(", "self", ".", "logFile", ",", "mode"...
26.777778
9.666667
def config_schema(self): """Returns the merged configuration data schema for this plugin type.""" from rez.config import _plugin_config_dict d = _plugin_config_dict.get(self.type_name, {}) for name, plugin_class in self.plugin_classes.iteritems(): if hasattr(plugin_class, "schema_dict") \ and plugin_class.schema_dict: d_ = {name: plugin_class.schema_dict} deep_update(d, d_) return dict_to_schema(d, required=True, modifier=expand_system_vars)
[ "def", "config_schema", "(", "self", ")", ":", "from", "rez", ".", "config", "import", "_plugin_config_dict", "d", "=", "_plugin_config_dict", ".", "get", "(", "self", ".", "type_name", ",", "{", "}", ")", "for", "name", ",", "plugin_class", "in", "self", ...
45.583333
15.333333
def _add_element(self, cls, **kwargs): """Add an element.""" # Convert stylename strings to actual style elements. kwargs = self._replace_stylename(kwargs) el = cls(**kwargs) self._doc.text.addElement(el)
[ "def", "_add_element", "(", "self", ",", "cls", ",", "*", "*", "kwargs", ")", ":", "# Convert stylename strings to actual style elements.", "kwargs", "=", "self", ".", "_replace_stylename", "(", "kwargs", ")", "el", "=", "cls", "(", "*", "*", "kwargs", ")", ...
39.833333
8
async def read(cls, *, hostnames: typing.Sequence[str] = None): """List nodes. :param hostnames: Sequence of hostnames to only return. :type hostnames: sequence of `str` """ params = {} if hostnames: params["hostname"] = [ normalize_hostname(hostname) for hostname in hostnames ] data = await cls._handler.read(**params) return cls(map(cls._object, data))
[ "async", "def", "read", "(", "cls", ",", "*", ",", "hostnames", ":", "typing", ".", "Sequence", "[", "str", "]", "=", "None", ")", ":", "params", "=", "{", "}", "if", "hostnames", ":", "params", "[", "\"hostname\"", "]", "=", "[", "normalize_hostname...
33.071429
12.571429
def emitFragment(fw, fragID, libID, shredded_seq, clr=None, qvchar='l', fasta=False): """ Print out the shredded sequence. """ if fasta: s = SeqRecord(shredded_seq, id=fragID, description="") SeqIO.write([s], fw, "fasta") return seq = str(shredded_seq) slen = len(seq) qvs = qvchar * slen # shredded reads have default low qv if clr is None: clr_beg, clr_end = 0, slen else: clr_beg, clr_end = clr print(frgTemplate.format(fragID=fragID, libID=libID, seq=seq, qvs=qvs, clr_beg=clr_beg, clr_end=clr_end), file=fw)
[ "def", "emitFragment", "(", "fw", ",", "fragID", ",", "libID", ",", "shredded_seq", ",", "clr", "=", "None", ",", "qvchar", "=", "'l'", ",", "fasta", "=", "False", ")", ":", "if", "fasta", ":", "s", "=", "SeqRecord", "(", "shredded_seq", ",", "id", ...
29.25
20.75
def update_ride(api_client, ride_status, ride_id): """Use an UberRidesClient to update ride status and print the results. Parameters api_client (UberRidesClient) An authorized UberRidesClient with 'request' scope. ride_status (str) New ride status to update to. ride_id (str) Unique identifier for ride to update. """ try: update_product = api_client.update_sandbox_ride(ride_id, ride_status) except (ClientError, ServerError) as error: fail_print(error) else: message = '{} New status: {}' message = message.format(update_product.status_code, ride_status) success_print(message)
[ "def", "update_ride", "(", "api_client", ",", "ride_status", ",", "ride_id", ")", ":", "try", ":", "update_product", "=", "api_client", ".", "update_sandbox_ride", "(", "ride_id", ",", "ride_status", ")", "except", "(", "ClientError", ",", "ServerError", ")", ...
32.666667
18.809524
def post(self, request, *args, **kwargs): """ Method for handling POST requests. Expects the 'vid' of the version to act on to be passed as in the POST variable 'version'. If a POST variable 'revert' is present this will call the revert method and then return a 'render redirect' to the result of the `get_done_url` method. If a POST variable 'delete' is present this will call the delete method and return a 'render redirect' to the result of the `get_done_url` method. If this method receives unexpected input, it will silently redirect to the result of the `get_done_url` method. """ versions = self._get_versions() url = self.get_done_url() msg = None try: vid = int(request.POST.get('version', '')) version = versions.get(vid=vid) if request.POST.get('revert'): object_url = self.get_object_url() msg = self.revert(version, object_url) elif request.POST.get('delete'): msg = self.delete(version) # Delete should redirect back to itself url = self.request.build_absolute_uri() # If the give version isn't valid we'll just silently redirect except (ValueError, versions.model.DoesNotExist): pass return self.render(request, redirect_url=url, message=msg, obj=self.object, collect_render_data=False)
[ "def", "post", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "versions", "=", "self", ".", "_get_versions", "(", ")", "url", "=", "self", ".", "get_done_url", "(", ")", "msg", "=", "None", "try", ":", "vid", "=...
36.47619
16.952381
def get_scenario_data(scenario_id,**kwargs): """ Get all the datasets from the group with the specified name @returns a list of dictionaries """ user_id = kwargs.get('user_id') scenario_data = db.DBSession.query(Dataset).filter(Dataset.id==ResourceScenario.dataset_id, ResourceScenario.scenario_id==scenario_id).options(joinedload_all('metadata')).distinct().all() for sd in scenario_data: if sd.hidden == 'Y': try: sd.check_read_permission(user_id) except: sd.value = None sd.metadata = [] db.DBSession.expunge_all() log.info("Retrieved %s datasets", len(scenario_data)) return scenario_data
[ "def", "get_scenario_data", "(", "scenario_id", ",", "*", "*", "kwargs", ")", ":", "user_id", "=", "kwargs", ".", "get", "(", "'user_id'", ")", "scenario_data", "=", "db", ".", "DBSession", ".", "query", "(", "Dataset", ")", ".", "filter", "(", "Dataset"...
33.52381
23.142857
def fcoe_get_interface_output_fcoe_intf_list_interface_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_interface = ET.Element("fcoe_get_interface") config = fcoe_get_interface output = ET.SubElement(fcoe_get_interface, "output") fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list") fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id") fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id') interface_name = ET.SubElement(fcoe_intf_list, "interface-name") interface_name.text = kwargs.pop('interface_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "fcoe_get_interface_output_fcoe_intf_list_interface_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "fcoe_get_interface", "=", "ET", ".", "Element", "(", "\"fcoe_get_interface\"", ")", "con...
50.8
21
def get(self, acl): """Get the ACL specified by ID belonging to this instance. See :py:meth:`Acls.get` for call signature. """ return self._instance._client.acls.get(self._instance.name, acl)
[ "def", "get", "(", "self", ",", "acl", ")", ":", "return", "self", ".", "_instance", ".", "_client", ".", "acls", ".", "get", "(", "self", ".", "_instance", ".", "name", ",", "acl", ")" ]
36.5
17.333333
def reversed(self): ''' Return a new FSM such that for every string that self accepts (e.g. "beer", the new FSM accepts the reversed string ("reeb"). ''' alphabet = self.alphabet # Start from a composite "state-set" consisting of all final states. # If there are no final states, this set is empty and we'll find that # no other states get generated. initial = frozenset(self.finals) # Find every possible way to reach the current state-set # using this symbol. def follow(current, symbol): next = frozenset([ prev for prev in self.map for state in current if symbol in self.map[prev] and self.map[prev][symbol] == state ]) if len(next) == 0: raise OblivionError return next # A state-set is final if the initial state is in it. def final(state): return self.initial in state # Man, crawl() is the best! return crawl(alphabet, initial, final, follow)
[ "def", "reversed", "(", "self", ")", ":", "alphabet", "=", "self", ".", "alphabet", "# Start from a composite \"state-set\" consisting of all final states.", "# If there are no final states, this set is empty and we'll find that", "# no other states get generated.", "initial", "=", "...
28.806452
22.741935
def get(self, robj, r=None, pr=None, timeout=None, basic_quorum=None, notfound_ok=None, head_only=False): """ Serialize get request and deserialize response """ msg_code = riak.pb.messages.MSG_CODE_GET_REQ codec = self._get_codec(msg_code) msg = codec.encode_get(robj, r, pr, timeout, basic_quorum, notfound_ok, head_only) resp_code, resp = self._request(msg, codec) return codec.decode_get(robj, resp)
[ "def", "get", "(", "self", ",", "robj", ",", "r", "=", "None", ",", "pr", "=", "None", ",", "timeout", "=", "None", ",", "basic_quorum", "=", "None", ",", "notfound_ok", "=", "None", ",", "head_only", "=", "False", ")", ":", "msg_code", "=", "riak"...
44.083333
8.916667
def _strip(string, pattern): """Return complement of pattern in string""" m = re.compile(pattern).search(string) if m: return string[0:m.start()] + string[m.end():len(string)] else: return string
[ "def", "_strip", "(", "string", ",", "pattern", ")", ":", "m", "=", "re", ".", "compile", "(", "pattern", ")", ".", "search", "(", "string", ")", "if", "m", ":", "return", "string", "[", "0", ":", "m", ".", "start", "(", ")", "]", "+", "string"...
27.625
19.875
def get_java_binpath(cmd=None): """Retrieve path for java to use, handling custom BCBIO_JAVA_HOME Defaults to the dirname of cmd, or local anaconda directory """ if os.environ.get("BCBIO_JAVA_HOME"): test_cmd = os.path.join(os.environ["BCBIO_JAVA_HOME"], "bin", "java") if os.path.exists(test_cmd): cmd = test_cmd if not cmd: cmd = Rscript_cmd() return os.path.dirname(cmd)
[ "def", "get_java_binpath", "(", "cmd", "=", "None", ")", ":", "if", "os", ".", "environ", ".", "get", "(", "\"BCBIO_JAVA_HOME\"", ")", ":", "test_cmd", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "\"BCBIO_JAVA_HOME\"", "]", ","...
35.25
14.583333
def get_git_changeset(): """Get git identifier; taken from Django project.""" git_log = Popen( 'git log --pretty=format:%ct --quiet -1 HEAD', stdout=PIPE, stderr=PIPE, shell=True, universal_newlines=True) timestamp = git_log.communicate()[0] try: timestamp = datetime.utcfromtimestamp(int(timestamp)) except ValueError: return None return timestamp.strftime('%Y%m%d%H%M%S')
[ "def", "get_git_changeset", "(", ")", ":", "git_log", "=", "Popen", "(", "'git log --pretty=format:%ct --quiet -1 HEAD'", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ",", "shell", "=", "True", ",", "universal_newlines", "=", "True", ")", "timestamp",...
38.090909
16.090909
def _key_values(self, sn: "SequenceNode") -> Union[EntryKeys, EntryValue]: """Parse leaf-list value or list keys.""" try: keys = self.up_to("/") except EndOfInput: keys = self.remaining() if not keys: raise UnexpectedInput(self, "entry value or keys") if isinstance(sn, LeafListNode): return EntryValue(unquote(keys)) ks = keys.split(",") try: if len(ks) != len(sn.keys): raise UnexpectedInput(self, f"exactly {len(sn.keys)} keys") except AttributeError: raise BadSchemaNodeType(sn.qual_name, "list") sel = {} for j in range(len(ks)): knod = sn.get_data_child(*sn.keys[j]) val = unquote(ks[j]) sel[(knod.name, None if knod.ns == sn.ns else knod.ns)] = val return EntryKeys(sel)
[ "def", "_key_values", "(", "self", ",", "sn", ":", "\"SequenceNode\"", ")", "->", "Union", "[", "EntryKeys", ",", "EntryValue", "]", ":", "try", ":", "keys", "=", "self", ".", "up_to", "(", "\"/\"", ")", "except", "EndOfInput", ":", "keys", "=", "self"...
39.454545
14.954545
def codigo_ibge_uf(sigla): """Retorna o código do IBGE para a UF informada.""" idx = [s for s, i, n, r in UNIDADES_FEDERACAO].index(sigla) return UNIDADES_FEDERACAO[idx][_UF_CODIGO_IBGE]
[ "def", "codigo_ibge_uf", "(", "sigla", ")", ":", "idx", "=", "[", "s", "for", "s", ",", "i", ",", "n", ",", "r", "in", "UNIDADES_FEDERACAO", "]", ".", "index", "(", "sigla", ")", "return", "UNIDADES_FEDERACAO", "[", "idx", "]", "[", "_UF_CODIGO_IBGE", ...
48.75
12
def prepare_io_example_1() -> Tuple[devicetools.Nodes, devicetools.Elements]: # noinspection PyUnresolvedReferences """Prepare an IO example configuration. >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, elements = prepare_io_example_1() (1) Prepares a short initialisation period of five days: >>> from hydpy import pub >>> pub.timegrids Timegrids(Timegrid('2000-01-01 00:00:00', '2000-01-05 00:00:00', '1d')) (2) Prepares a plain IO testing directory structure: >>> pub.sequencemanager.inputdirpath 'inputpath' >>> pub.sequencemanager.fluxdirpath 'outputpath' >>> pub.sequencemanager.statedirpath 'outputpath' >>> pub.sequencemanager.nodedirpath 'nodepath' >>> import os >>> from hydpy import TestIO >>> with TestIO(): ... print(sorted(filename for filename in os.listdir('.') ... if not filename.startswith('_'))) ['inputpath', 'nodepath', 'outputpath'] (3) Returns three |Element| objects handling either application model |lland_v1| or |lland_v2|, and two |Node| objects handling variables `Q` and `T`: >>> for element in elements: ... print(element.name, element.model) element1 lland_v1 element2 lland_v1 element3 lland_v2 >>> for node in nodes: ... print(node.name, node.variable) node1 Q node2 T (4) Prepares the time series data of the input sequence |lland_inputs.Nied|, flux sequence |lland_fluxes.NKor|, and state sequence |lland_states.BoWa| for each model instance, and |Sim| for each node instance (all values are different), e.g.: >>> nied1 = elements.element1.model.sequences.inputs.nied >>> nied1.series InfoArray([ 0., 1., 2., 3.]) >>> nkor1 = elements.element1.model.sequences.fluxes.nkor >>> nkor1.series InfoArray([[ 12.], [ 13.], [ 14.], [ 15.]]) >>> bowa3 = elements.element3.model.sequences.states.bowa >>> bowa3.series InfoArray([[ 48., 49., 50.], [ 51., 52., 53.], [ 54., 55., 56.], [ 57., 58., 59.]]) >>> sim2 = nodes.node2.sequences.sim >>> sim2.series InfoArray([ 64., 65., 66., 67.]) (5) All sequences carry |numpy.ndarray| objects with (deep) copies of the time series data for testing: >>> import numpy >>> (numpy.all(nied1.series == nied1.testarray) and ... numpy.all(nkor1.series == nkor1.testarray) and ... numpy.all(bowa3.series == bowa3.testarray) and ... numpy.all(sim2.series == sim2.testarray)) InfoArray(True, dtype=bool) >>> bowa3.series[1, 2] = -999.0 >>> numpy.all(bowa3.series == bowa3.testarray) InfoArray(False, dtype=bool) """ from hydpy import TestIO TestIO.clear() from hydpy.core.filetools import SequenceManager hydpy.pub.sequencemanager = SequenceManager() with TestIO(): hydpy.pub.sequencemanager.inputdirpath = 'inputpath' hydpy.pub.sequencemanager.fluxdirpath = 'outputpath' hydpy.pub.sequencemanager.statedirpath = 'outputpath' hydpy.pub.sequencemanager.nodedirpath = 'nodepath' hydpy.pub.timegrids = '2000-01-01', '2000-01-05', '1d' from hydpy import Node, Nodes, Element, Elements, prepare_model node1 = Node('node1') node2 = Node('node2', variable='T') nodes = Nodes(node1, node2) element1 = Element('element1', outlets=node1) element2 = Element('element2', outlets=node1) element3 = Element('element3', outlets=node1) elements = Elements(element1, element2, element3) from hydpy.models import lland_v1, lland_v2 element1.model = prepare_model(lland_v1) element2.model = prepare_model(lland_v1) element3.model = prepare_model(lland_v2) from hydpy.models.lland import ACKER for idx, element in enumerate(elements): parameters = element.model.parameters parameters.control.nhru(idx+1) parameters.control.lnk(ACKER) parameters.derived.absfhru(10.0) with hydpy.pub.options.printprogress(False): nodes.prepare_simseries() elements.prepare_inputseries() elements.prepare_fluxseries() elements.prepare_stateseries() def init_values(seq, value1_): value2_ = value1_ + len(seq.series.flatten()) values_ = numpy.arange(value1_, value2_, dtype=float) seq.testarray = values_.reshape(seq.seriesshape) seq.series = seq.testarray.copy() return value2_ import numpy value1 = 0 for subname, seqname in zip(['inputs', 'fluxes', 'states'], ['nied', 'nkor', 'bowa']): for element in elements: subseqs = getattr(element.model.sequences, subname) value1 = init_values(getattr(subseqs, seqname), value1) for node in nodes: value1 = init_values(node.sequences.sim, value1) return nodes, elements
[ "def", "prepare_io_example_1", "(", ")", "->", "Tuple", "[", "devicetools", ".", "Nodes", ",", "devicetools", ".", "Elements", "]", ":", "# noinspection PyUnresolvedReferences", "from", "hydpy", "import", "TestIO", "TestIO", ".", "clear", "(", ")", "from", "hydp...
34.885714
17.228571
def run(self, stdscr): """ Initialize curses and refresh in a loop """ self.win = stdscr curses.curs_set(0) stdscr.timeout(0) curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK) curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK) curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_BLACK) curses.init_pair(4, curses.COLOR_RED, curses.COLOR_BLACK) while True: self.refresh(True) now = time.time() while time.time() - now < self._refresh_rate: time.sleep(0.1) self.refresh(False)
[ "def", "run", "(", "self", ",", "stdscr", ")", ":", "self", ".", "win", "=", "stdscr", "curses", ".", "curs_set", "(", "0", ")", "stdscr", ".", "timeout", "(", "0", ")", "curses", ".", "init_pair", "(", "1", ",", "curses", ".", "COLOR_CYAN", ",", ...
41.333333
16.066667
def get_load(jid): ''' Return the load data that marks a specified jid ''' jid = _escape_jid(jid) conn = _get_conn() if conn is None: return None cur = conn.cursor() sql = '''SELECT jid, tgt_type, cmd, tgt, kwargs, ret, username, arg,''' \ ''' fun FROM jids WHERE jid = %s''' cur.execute(sql, (jid,)) data = cur.fetchone() if data: return _build_dict(data) _close_conn(conn) return {}
[ "def", "get_load", "(", "jid", ")", ":", "jid", "=", "_escape_jid", "(", "jid", ")", "conn", "=", "_get_conn", "(", ")", "if", "conn", "is", "None", ":", "return", "None", "cur", "=", "conn", ".", "cursor", "(", ")", "sql", "=", "'''SELECT jid, tgt_t...
26.235294
20
def _get_level(tag): """ Match the header level in the given tag name, or None if it's not a header tag. """ m = re.match(r'^h([123456])$', tag, flags=re.IGNORECASE) if not m: return None return int(m.group(1))
[ "def", "_get_level", "(", "tag", ")", ":", "m", "=", "re", ".", "match", "(", "r'^h([123456])$'", ",", "tag", ",", "flags", "=", "re", ".", "IGNORECASE", ")", "if", "not", "m", ":", "return", "None", "return", "int", "(", "m", ".", "group", "(", ...
30
12
def make_jagged_equity_info(num_assets, start_date, first_end, frequency, periods_between_ends, auto_close_delta): """ Create a DataFrame representing assets that all begin at the same start date, but have cascading end dates. Parameters ---------- num_assets : int How many assets to create. start_date : pd.Timestamp The start date for all the assets. first_end : pd.Timestamp The date at which the first equity will end. frequency : str or pd.tseries.offsets.Offset (e.g. trading_day) Frequency used to interpret the next argument. periods_between_ends : int Starting after the first end date, end each asset every `frequency` * `periods_between_ends`. Returns ------- info : pd.DataFrame DataFrame representing newly-created assets. """ frame = pd.DataFrame( { 'symbol': [chr(ord('A') + i) for i in range(num_assets)], 'start_date': start_date, 'end_date': pd.date_range( first_end, freq=(periods_between_ends * frequency), periods=num_assets, ), 'exchange': 'TEST', }, index=range(num_assets), ) # Explicitly pass None to disable setting the auto_close_date column. if auto_close_delta is not None: frame['auto_close_date'] = frame['end_date'] + auto_close_delta return frame
[ "def", "make_jagged_equity_info", "(", "num_assets", ",", "start_date", ",", "first_end", ",", "frequency", ",", "periods_between_ends", ",", "auto_close_delta", ")", ":", "frame", "=", "pd", ".", "DataFrame", "(", "{", "'symbol'", ":", "[", "chr", "(", "ord",...
32.270833
16.9375
def _get_csrf_token(self): """Return the CSRF Token of easyname login form.""" from bs4 import BeautifulSoup home_response = self.session.get(self.URLS['login']) self._log('Home', home_response) assert home_response.status_code == 200, \ 'Could not load Easyname login page.' html = BeautifulSoup(home_response.content, 'html.parser') self._log('Home', html) csrf_token_field = html.find('input', {'id': 'loginxtoken'}) assert csrf_token_field is not None, 'Could not find login token.' return csrf_token_field['value']
[ "def", "_get_csrf_token", "(", "self", ")", ":", "from", "bs4", "import", "BeautifulSoup", "home_response", "=", "self", ".", "session", ".", "get", "(", "self", ".", "URLS", "[", "'login'", "]", ")", "self", ".", "_log", "(", "'Home'", ",", "home_respon...
46.153846
14.846154
def fig2x(figure, format): """Returns svg from matplotlib chart""" # Save svg to file like object svg_io io = StringIO() figure.savefig(io, format=format) # Rewind the file like object io.seek(0) data = io.getvalue() io.close() return data
[ "def", "fig2x", "(", "figure", ",", "format", ")", ":", "# Save svg to file like object svg_io", "io", "=", "StringIO", "(", ")", "figure", ".", "savefig", "(", "io", ",", "format", "=", "format", ")", "# Rewind the file like object", "io", ".", "seek", "(", ...
19
21.357143
def get_normalized_url(url): """ Returns a normalized url, without params """ scheme, netloc, path, params, query, fragment = urlparse(url) # Exclude default port numbers. if scheme == 'http' and netloc[-3:] == ':80': netloc = netloc[:-3] elif scheme == 'https' and netloc[-4:] == ':443': netloc = netloc[:-4] if scheme not in ('http', 'https'): raise ValueError("Unsupported URL %s (%s)." % (url, scheme)) # Normalized URL excludes params, query, and fragment. return urlunparse((scheme, netloc, path, None, None, None))
[ "def", "get_normalized_url", "(", "url", ")", ":", "scheme", ",", "netloc", ",", "path", ",", "params", ",", "query", ",", "fragment", "=", "urlparse", "(", "url", ")", "# Exclude default port numbers.", "if", "scheme", "==", "'http'", "and", "netloc", "[", ...
39
16.25
def read_register(self, registeraddress, numberOfDecimals=0, functioncode=3, signed=False): """Read an integer from one 16-bit register in the slave, possibly scaling it. The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16"). Args: * registeraddress (int): The slave register address (use decimal numbers, not hex). * numberOfDecimals (int): The number of decimals for content conversion. * functioncode (int): Modbus function code. Can be 3 or 4. * signed (bool): Whether the data should be interpreted as unsigned or signed. If a value of 77.0 is stored internally in the slave register as 770, then use ``numberOfDecimals=1`` which will divide the received data by 10 before returning the value. Similarly ``numberOfDecimals=2`` will divide the received data by 100 before returning the value. Some manufacturers allow negative values for some registers. Instead of an allowed integer range 0 to 65535, a range -32768 to 32767 is allowed. This is implemented as any received value in the upper range (32768 to 65535) is interpreted as negative value (in the range -32768 to -1). Use the parameter ``signed=True`` if reading from a register that can hold negative values. Then upper range data will be automatically converted into negative return values (two's complement). ============== ================== ================ =============== ``signed`` Data type in slave Alternative name Range ============== ================== ================ =============== :const:`False` Unsigned INT16 Unsigned short 0 to 65535 :const:`True` INT16 Short -32768 to 32767 ============== ================== ================ =============== Returns: The register data in numerical value (int or float). Raises: ValueError, TypeError, IOError """ _checkFunctioncode(functioncode, [3, 4]) _checkInt(numberOfDecimals, minvalue=0, maxvalue=10, description='number of decimals') _checkBool(signed, description='signed') return self._genericCommand(functioncode, registeraddress, numberOfDecimals=numberOfDecimals, signed=signed)
[ "def", "read_register", "(", "self", ",", "registeraddress", ",", "numberOfDecimals", "=", "0", ",", "functioncode", "=", "3", ",", "signed", "=", "False", ")", ":", "_checkFunctioncode", "(", "functioncode", ",", "[", "3", ",", "4", "]", ")", "_checkInt",...
54.069767
35.813953
def bz2_opener(path, pattern='', verbose=False): """Opener that opens single bz2 compressed file. :param str path: Path. :param str pattern: Regular expression pattern. :return: Filehandle(s). """ source = path if is_url(path) else os.path.abspath(path) filename = os.path.basename(path) if pattern and not re.match(pattern, filename): logger.verbose('Skipping file: {}, did not match regex pattern "{}"'.format(os.path.abspath(path), pattern)) return try: filehandle = bz2.open(io.BytesIO(urlopen(path).read())) if is_url(path) else bz2.open(path) filehandle.read(1) filehandle.seek(0) logger.verbose('Processing file: {}'.format(source)) yield filehandle except (OSError, IOError): raise BZ2ValidationError
[ "def", "bz2_opener", "(", "path", ",", "pattern", "=", "''", ",", "verbose", "=", "False", ")", ":", "source", "=", "path", "if", "is_url", "(", "path", ")", "else", "os", ".", "path", ".", "abspath", "(", "path", ")", "filename", "=", "os", ".", ...
36.136364
21.545455
def delete_reference_image( self, location, product_id, reference_image_id, project_id=None, retry=None, timeout=None, metadata=None, ): """ For the documentation see: :py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionReferenceImageCreateOperator` """ client = self.get_conn() self.log.info('Deleting ReferenceImage') name = ProductSearchClient.reference_image_path( project=project_id, location=location, product=product_id, reference_image=reference_image_id ) response = client.delete_reference_image(name=name, retry=retry, timeout=timeout, metadata=metadata) self.log.info('ReferenceImage with the name [%s] deleted.', name) return MessageToDict(response)
[ "def", "delete_reference_image", "(", "self", ",", "location", ",", "product_id", ",", "reference_image_id", ",", "project_id", "=", "None", ",", "retry", "=", "None", ",", "timeout", "=", "None", ",", "metadata", "=", "None", ",", ")", ":", "client", "=",...
35.913043
23.826087
def ReplaceHomoglyphs(s): """Returns s with unicode homoglyphs replaced by ascii equivalents.""" homoglyphs = { '\xa0': ' ', # &nbsp; ? '\u00e3': '', # TODO(gsfowler) drop after .proto spurious char elided '\u00a0': ' ', # &nbsp; ? '\u00a9': '(C)', # COPYRIGHT SIGN (would you believe "asciiglyph"?) '\u00ae': '(R)', # REGISTERED SIGN (would you believe "asciiglyph"?) '\u2014': '-', # EM DASH '\u2018': "'", # LEFT SINGLE QUOTATION MARK '\u2019': "'", # RIGHT SINGLE QUOTATION MARK '\u201c': '"', # LEFT DOUBLE QUOTATION MARK '\u201d': '"', # RIGHT DOUBLE QUOTATION MARK '\u2026': '...', # HORIZONTAL ELLIPSIS '\u2e3a': '-', # TWO-EM DASH } def _ReplaceOne(c): """Returns the homoglyph or escaped replacement for c.""" equiv = homoglyphs.get(c) if equiv is not None: return equiv try: c.encode('ascii') return c except UnicodeError: pass try: return c.encode('unicode-escape').decode('ascii') except UnicodeError: return '?' return ''.join([_ReplaceOne(c) for c in s])
[ "def", "ReplaceHomoglyphs", "(", "s", ")", ":", "homoglyphs", "=", "{", "'\\xa0'", ":", "' '", ",", "# &nbsp; ?", "'\\u00e3'", ":", "''", ",", "# TODO(gsfowler) drop after .proto spurious char elided", "'\\u00a0'", ":", "' '", ",", "# &nbsp; ?", "'\\u00a9'", ":", ...
36.212121
17.454545
def save(self): # type: () -> None """Save the currentin-memory state. """ self._ensure_have_load_only() for fname, parser in self._modified_parsers: logger.info("Writing to %s", fname) # Ensure directory exists. ensure_dir(os.path.dirname(fname)) with open(fname, "w") as f: parser.write(f)
[ "def", "save", "(", "self", ")", ":", "# type: () -> None", "self", ".", "_ensure_have_load_only", "(", ")", "for", "fname", ",", "parser", "in", "self", ".", "_modified_parsers", ":", "logger", ".", "info", "(", "\"Writing to %s\"", ",", "fname", ")", "# En...
27.5
14.214286
def grab_xml(host, token=None): """Grab XML data from Gateway, returned as a dict.""" urllib3.disable_warnings() if token: scheme = "https" if not token: scheme = "http" token = "1234567890" url = ( scheme + '://' + host + '/gwr/gop.php?cmd=GWRBatch&data=<gwrcmds><gwrcmd><gcmd>RoomGetCarousel</gcmd><gdata><gip><version>1</version><token>' + token + '</token><fields>name,status</fields></gip></gdata></gwrcmd></gwrcmds>&fmt=xml') response = requests.get(url, verify=False) parsed = xmltodict.parse(response.content, force_list={'room', 'device'}) parsed = parsed['gwrcmds']['gwrcmd']['gdata']['gip']['room'] return parsed
[ "def", "grab_xml", "(", "host", ",", "token", "=", "None", ")", ":", "urllib3", ".", "disable_warnings", "(", ")", "if", "token", ":", "scheme", "=", "\"https\"", "if", "not", "token", ":", "scheme", "=", "\"http\"", "token", "=", "\"1234567890\"", "url"...
48.714286
31.214286
def save(self, *args, **kwargs): """ **uid**: :code:`electiontype:{name}` """ self.uid = 'electiontype:{}'.format(self.slug) super(ElectionType, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "uid", "=", "'electiontype:{}'", ".", "format", "(", "self", ".", "slug", ")", "super", "(", "ElectionType", ",", "self", ")", ".", "save", "(", "*", "a...
34.5
6.833333
def convert(dbus_obj): """Converts dbus_obj from dbus type to python type. :param dbus_obj: dbus object. :returns: dbus_obj in python type. """ _isinstance = partial(isinstance, dbus_obj) ConvertType = namedtuple('ConvertType', 'pytype dbustypes') pyint = ConvertType(int, (dbus.Byte, dbus.Int16, dbus.Int32, dbus.Int64, dbus.UInt16, dbus.UInt32, dbus.UInt64)) pybool = ConvertType(bool, (dbus.Boolean, )) pyfloat = ConvertType(float, (dbus.Double, )) pylist = ConvertType(lambda _obj: list(map(convert, dbus_obj)), (dbus.Array, )) pytuple = ConvertType(lambda _obj: tuple(map(convert, dbus_obj)), (dbus.Struct, )) types_str = (dbus.ObjectPath, dbus.Signature, dbus.String) if not PY3: types_str += (dbus.UTF8String,) pystr = ConvertType(str if PY3 else unicode, types_str) pydict = ConvertType( lambda _obj: dict(zip(map(convert, dbus_obj.keys()), map(convert, dbus_obj.values()) ) ), (dbus.Dictionary, ) ) for conv in (pyint, pybool, pyfloat, pylist, pytuple, pystr, pydict): if any(map(_isinstance, conv.dbustypes)): return conv.pytype(dbus_obj) else: return dbus_obj
[ "def", "convert", "(", "dbus_obj", ")", ":", "_isinstance", "=", "partial", "(", "isinstance", ",", "dbus_obj", ")", "ConvertType", "=", "namedtuple", "(", "'ConvertType'", ",", "'pytype dbustypes'", ")", "pyint", "=", "ConvertType", "(", "int", ",", "(", "d...
38.028571
18.257143
def system_find_analyses(input_params={}, always_retry=True, **kwargs): """ Invokes the /system/findAnalyses API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method%3A-%2Fsystem%2FfindAnalyses """ return DXHTTPRequest('/system/findAnalyses', input_params, always_retry=always_retry, **kwargs)
[ "def", "system_find_analyses", "(", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/system/findAnalyses'", ",", "input_params", ",", "always_retry", "=", "always_retry", ",", ...
50.285714
31.142857
def debug(cls, message): """ Display debug message if verbose level allows it. """ if cls.verbose > 1: msg = '[DEBUG] %s' % message cls.echo(msg)
[ "def", "debug", "(", "cls", ",", "message", ")", ":", "if", "cls", ".", "verbose", ">", "1", ":", "msg", "=", "'[DEBUG] %s'", "%", "message", "cls", ".", "echo", "(", "msg", ")" ]
36.2
8.8
def as_block_string(txt): """Return a string formatted as a python block comment string, like the one you're currently reading. Special characters are escaped if necessary. """ import json lines = [] for line in txt.split('\n'): line_ = json.dumps(line) line_ = line_[1:-1].rstrip() # drop double quotes lines.append(line_) return '"""\n%s\n"""' % '\n'.join(lines)
[ "def", "as_block_string", "(", "txt", ")", ":", "import", "json", "lines", "=", "[", "]", "for", "line", "in", "txt", ".", "split", "(", "'\\n'", ")", ":", "line_", "=", "json", ".", "dumps", "(", "line", ")", "line_", "=", "line_", "[", "1", ":"...
31.307692
17.461538
def get_property_func(key): """ Get the accessor function for an instance to look for `key`. Look for it as an attribute, and if that does not work, look to see if it is a tag. """ def get_it(obj): try: return getattr(obj, key) except AttributeError: return obj.tags.get(key) return get_it
[ "def", "get_property_func", "(", "key", ")", ":", "def", "get_it", "(", "obj", ")", ":", "try", ":", "return", "getattr", "(", "obj", ",", "key", ")", "except", "AttributeError", ":", "return", "obj", ".", "tags", ".", "get", "(", "key", ")", "return...
26.615385
17.692308
def _is_field_serializable(self, field_name): """Return True if the field can be serialized into a JSON doc.""" return ( self._meta.get_field(field_name).get_internal_type() in self.SIMPLE_UPDATE_FIELD_TYPES )
[ "def", "_is_field_serializable", "(", "self", ",", "field_name", ")", ":", "return", "(", "self", ".", "_meta", ".", "get_field", "(", "field_name", ")", ".", "get_internal_type", "(", ")", "in", "self", ".", "SIMPLE_UPDATE_FIELD_TYPES", ")" ]
42
14.833333
def convert_scalar_multiply(net, node, model, builder): """Convert a scalar multiply layer from mxnet to coreml. Parameters ---------- net: network A mxnet network object. node: layer Node to convert. model: model An model for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ import numpy as _np input_name, output_name = _get_input_output_name(net, node) name = node['name'] param = _get_attr(node) alpha = _np.array([float(param['scalar'])]) builder.add_scale(name = name, input_name = input_name, output_name = output_name, W = alpha, has_bias=False, b=None)
[ "def", "convert_scalar_multiply", "(", "net", ",", "node", ",", "model", ",", "builder", ")", ":", "import", "numpy", "as", "_np", "input_name", ",", "output_name", "=", "_get_input_output_name", "(", "net", ",", "node", ")", "name", "=", "node", "[", "'na...
27.75
19.791667
def _min_conflicts_value(problem, assignment, variable): ''' Return the value generate the less number of conflicts. In case of tie, a random value is selected among this values subset. ''' return argmin(problem.domains[variable], lambda x: _count_conflicts(problem, assignment, variable, x))
[ "def", "_min_conflicts_value", "(", "problem", ",", "assignment", ",", "variable", ")", ":", "return", "argmin", "(", "problem", ".", "domains", "[", "variable", "]", ",", "lambda", "x", ":", "_count_conflicts", "(", "problem", ",", "assignment", ",", "varia...
51.166667
33.166667
def mt_deconvolve(data_a, data_b, delta, nfft=None, time_bandwidth=None, number_of_tapers=None, weights="adaptive", demean=True, fmax=0.0): """ Deconvolve two time series using multitapers. This uses the eigencoefficients and the weights from the multitaper spectral estimations and more or less follows this paper: .. |br| raw:: html <br /> **Receiver Functions from Multiple-Taper Spectral Correlation Estimates** *Jeffrey Park, Vadim Levin* |br| Bulletin of the Seismological Society of America Dec 2000, 90 (6) 1507-1520 http://dx.doi.org/10.1785/0119990122 :type data_a: :class:`numpy.ndarray` :param data_a: Data for first time series. :type data_b: :class:`numpy.ndarray` :param data_b: Data for second time series. :type delta: float :param delta: Sample spacing of the data. :type nfft: int :param nfft: Number of points for the FFT. If ``nfft == None``, no zero padding will be applied before the FFT. :type time_bandwidth: float :param time_bandwidth: Time-bandwidth product. Common values are 2, 3, 4, and numbers in between. :type number_of_tapers: int :param number_of_tapers: Number of tapers to use. Defaults to ``int(2*time_bandwidth) - 1``. This is maximum senseful amount. More tapers will have no great influence on the final spectrum but increase the calculation time. Use fewer tapers for a faster calculation. :type weights: str :param weights: ``"adaptive"`` or ``"constant"`` weights. :type deman: bool :param demean: Force the complex TF to be demeaned. :type fmax: float :param fmax: Maximum frequency for lowpass cosine filter. Set this to zero to not have a filter. :return: Returns a dictionary with 5 :class:`numpy.ndarray`'s. See the note below. .. note:: Returns a dictionary with five arrays: * ``"deconvolved"``: Deconvolved time series. * ``"spectrum_a"``: Spectrum of the first time series. * ``"spectrum_b"``: Spectrum of the second time series. * ``"spectral_ratio"``: The ratio of both spectra. * ``"frequencies"``: The used frequency bins for the spectra. """ npts = len(data_a) if len(data_b) != npts: raise ValueError("Input arrays must have the same length!") if nfft is None: nfft = npts elif nfft < npts: raise ValueError("nfft must be larger then the number of samples in " "the array.") # Deconvolution utilizes the 32bit version. mt = _MtspecType("float32") # Use the optimal number of tapers in case no number is specified. if number_of_tapers is None: number_of_tapers = int(2 * time_bandwidth) - 1 # Transform the data to work with the library. data_a = np.require(data_a, mt.float, requirements=[mt.order]) data_b = np.require(data_b, mt.float, requirements=[mt.order]) nf = nfft // 2 + 1 # Internally uses integers if demean: demean = 1 else: demean = 0 # iad = 0 are adaptive, iad = 1 are constant weight - this is # counter intuitive. if weights == "constant": adaptive = 1 elif weights == "adaptive": adaptive = 0 else: raise ValueError('Weights must be either "adaptive" or "constant".') tfun = mt.empty(nfft) freq = mt.empty(nf) spec_ratio = mt.empty(nf) speci = mt.empty(nf) specj = mt.empty(nf) mtspeclib.mt_deconv_( C.byref(C.c_int(int(npts))), C.byref(C.c_int(int(nfft))), C.byref(C.c_float(float(delta))), mt.p(data_a), mt.p(data_b), C.byref(C.c_float(float(time_bandwidth))), C.byref(C.c_int(int(number_of_tapers))), C.byref(C.c_int(int(nf))), C.byref(C.c_int(adaptive)), mt.p(freq), mt.p(tfun), mt.p(spec_ratio), mt.p(speci), mt.p(specj), C.byref(C.c_int(demean)), C.byref(C.c_float(fmax))) return { "frequencies": freq, "deconvolved": tfun, "spectral_ratio": spec_ratio, "spectrum_a": speci, "spectrum_b": specj }
[ "def", "mt_deconvolve", "(", "data_a", ",", "data_b", ",", "delta", ",", "nfft", "=", "None", ",", "time_bandwidth", "=", "None", ",", "number_of_tapers", "=", "None", ",", "weights", "=", "\"adaptive\"", ",", "demean", "=", "True", ",", "fmax", "=", "0....
32.472441
21.023622
def run_chunk(environ, lowstate): ''' Expects a list of lowstate dictionaries that are executed and returned in order ''' client = environ['SALT_APIClient'] for chunk in lowstate: yield client.run(chunk)
[ "def", "run_chunk", "(", "environ", ",", "lowstate", ")", ":", "client", "=", "environ", "[", "'SALT_APIClient'", "]", "for", "chunk", "in", "lowstate", ":", "yield", "client", ".", "run", "(", "chunk", ")" ]
25.333333
22.888889
def heritability(args): """ %prog pg.tsv MZ-twins.csv DZ-twins.csv Plot composite figures ABCD on absolute difference of 4 traits, EFGH on heritability of 4 traits. The 4 traits are: telomere length, ccn.chrX, ccn.chrY, TRA.PPM """ p = OptionParser(heritability.__doc__) opts, args, iopts = p.set_image_options(args, figsize="12x18") if len(args) != 3: sys.exit(not p.print_help()) combined, mz, dz = args # Prepare twins data def get_pairs(filename): with open(filename) as fp: for row in fp: yield row.strip().split(",") MZ = list(get_pairs(mz)) DZ = list(get_pairs(dz)) print(len(MZ), "monozygotic twins") print(len(DZ), "dizygotic twins") df = pd.read_csv(combined, sep="\t", index_col=0) df["Sample name"] = np.array(df["Sample name"], dtype=np.str) gender = extract_trait(df, "Sample name", "hli_calc_gender") sameGenderMZ = list(filter_same_gender(MZ, gender)) sameGenderDZ = list(filter_same_gender(DZ, gender)) composite(df, sameGenderMZ, sameGenderDZ, size=(iopts.w, iopts.h)) logging.getLogger().setLevel(logging.CRITICAL) savefig("heritability.pdf")
[ "def", "heritability", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "heritability", ".", "__doc__", ")", "opts", ",", "args", ",", "iopts", "=", "p", ".", "set_image_options", "(", "args", ",", "figsize", "=", "\"12x18\"", ")", "if", "len", "(...
31.702703
18.189189
def kill_eaters(self): """ Returns a list of tuples containing the proper localized kill eater type strings and their values according to set/type/value "order" """ eaters = {} ranktypes = self._kill_types for attr in self: aname = attr.name.strip() aid = attr.id if aname.startswith("kill eater"): try: # Get the name prefix (matches up type and score and # determines the primary type for ranking) eateri = list(filter(None, aname.split(' ')))[-1] if eateri.isdigit(): eateri = int(eateri) else: # Probably the primary type/score which has no number eateri = 0 except IndexError: # Fallback to attr ID (will completely fail to make # anything legible but better than nothing) eateri = aid if aname.find("user") != -1: # User score types have lower sorting priority eateri += 100 eaters.setdefault(eateri, [None, None]) if aname.find("score type") != -1 or aname.find("kill type") != -1: # Score type attribute if eaters[eateri][0] is None: eaters[eateri][0] = attr.value else: # Value attribute eaters[eateri][1] = attr.value eaterlist = [] defaultleveldata = "KillEaterRank" for key, eater in sorted(eaters.items()): etype, count = eater # Eater type can be null (it still is in some older items), null # count means we're looking at either an uninitialized item or # schema item if count is not None: rank = ranktypes.get(etype or 0, {"level_data": defaultleveldata, "type_name": "Count"}) eaterlist.append((rank.get("level_data", defaultleveldata), rank["type_name"], count)) return eaterlist
[ "def", "kill_eaters", "(", "self", ")", ":", "eaters", "=", "{", "}", "ranktypes", "=", "self", ".", "_kill_types", "for", "attr", "in", "self", ":", "aname", "=", "attr", ".", "name", ".", "strip", "(", ")", "aid", "=", "attr", ".", "id", "if", ...
39.403509
19.508772
async def send_mail( self, sender, recipients, message, mail_options=None, rcpt_options=None ): """ Alias for :meth:`SMTP.sendmail`. """ return await self.sendmail( sender, recipients, message, mail_options, rcpt_options )
[ "async", "def", "send_mail", "(", "self", ",", "sender", ",", "recipients", ",", "message", ",", "mail_options", "=", "None", ",", "rcpt_options", "=", "None", ")", ":", "return", "await", "self", ".", "sendmail", "(", "sender", ",", "recipients", ",", "...
30.888889
17.333333
def ssad(patch, cols, splits): """ Calculates an empirical intra-specific spatial abundance distribution Parameters ---------- {0} Returns ------- {1} Result has one column giving the individuals of species in each subplot. Notes ----- {2} {3} Examples -------- {4} >>> # Get the spatial abundance distribution for all species for each of >>> # the cells in the ANBO plot >>> all_spp_ssads = meco.empirical.ssad(pat, cols='spp_col:spp; count_col:count', splits='row:4; column:4') >>> # Convert to dict for easy searching >>> all_ssads_dict = dict(all_spp_ssads) >>> # Look up the spatial abundance distribution for 'grass' >>> all_ssads_dict['grass'] y 0 42 1 20 2 60 3 60 4 88 5 86 6 20 7 0 8 110 9 12 10 115 11 180 12 160 13 120 14 26 15 11 >>> # Each value in 'y' gives the abundance of grass in one of the 16 cells See http://www.macroeco.org/tutorial_macroeco.html for additional examples and explanation """ # Get and check SAD sad_results = sad(patch, cols, splits, clean=False) # Create dataframe with col for spp name and numbered col for each split for i, sad_result in enumerate(sad_results): if i == 0: # For first result, create dataframe fulldf = sad_result[1] fulldf.columns = ['spp', '0'] # Renames y col to 0 else: # For other results, append col to dataframe, named by num fulldf[str(i)] = sad_result[1]['y'] # Get each spp SSAD (row of fulldf) and append as tuple in result_list result_list = [] for _, row in fulldf.iterrows(): row_values_array = np.array(row[1:], dtype=float) result_list.append((row[0], pd.DataFrame({'y': row_values_array}))) # Return all results return result_list
[ "def", "ssad", "(", "patch", ",", "cols", ",", "splits", ")", ":", "# Get and check SAD", "sad_results", "=", "sad", "(", "patch", ",", "cols", ",", "splits", ",", "clean", "=", "False", ")", "# Create dataframe with col for spp name and numbered col for each split"...
24.298701
27.701299
def delete_cloud_integration(self, id, **kwargs): # noqa: E501 """Delete a specific cloud integration # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_cloud_integration(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :return: ResponseContainerCloudIntegration If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_cloud_integration_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.delete_cloud_integration_with_http_info(id, **kwargs) # noqa: E501 return data
[ "def", "delete_cloud_integration", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "del...
43.238095
20.047619
def expect_exitstatus(self, exit_status): """Wait for the running program to finish and expect some exit status. Args: exit_status (int): The expected exit status. Raises: WrongExitStatusException: The produced exit status is not the expected one. """ self.expect_end() logger.debug("Checking exit status of '{0}', output so far: {1}".format( self.name, self.get_output())) if self._spawn.exitstatus is None: raise WrongExitStatusException( instance=self, expected=exit_status, output=self.get_output()) if self._spawn.exitstatus is not exit_status: raise WrongExitStatusException( instance=self, expected=exit_status, got=self._spawn.exitstatus, output=self.get_output())
[ "def", "expect_exitstatus", "(", "self", ",", "exit_status", ")", ":", "self", ".", "expect_end", "(", ")", "logger", ".", "debug", "(", "\"Checking exit status of '{0}', output so far: {1}\"", ".", "format", "(", "self", ".", "name", ",", "self", ".", "get_outp...
39.045455
16.818182
def separate_particles_into_groups(s, region_size=40, bounds=None): """ Given a state, returns a list of groups of particles. Each group of particles are located near each other in the image. Every particle located in the desired region is contained in exactly 1 group. Parameters: ----------- s : state The PERI state to find particles in. region_size: int or list of ints The size of the box. Groups particles into boxes of shape region_size. If region_size is a scalar, the box is a cube of length region_size. Default is 40. bounds: 2-element list-like of 3-element lists. The sub-region of the image over which to look for particles. bounds[0]: The lower-left corner of the image region. bounds[1]: The upper-right corner of the image region. Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire image size, i.e. the default places every particle in the image somewhere in the groups. Returns: ----------- particle_groups: list Each element of particle_groups is an int numpy.ndarray of the group of nearby particles. Only contains groups with a nonzero number of particles, so the elements don't necessarily correspond to a given image region. """ imtile = ( s.oshape.translate(-s.pad) if bounds is None else util.Tile(bounds[0], bounds[1]) ) # does all particle including out of image, is that correct? region = util.Tile(region_size, dim=s.dim) trange = np.ceil(imtile.shape.astype('float') / region.shape) translations = util.Tile(trange).coords(form='vector') translations = translations.reshape(-1, translations.shape[-1]) groups = [] positions = s.obj_get_positions() for v in translations: tmptile = region.copy().translate(region.shape * v - s.pad) groups.append(find_particles_in_tile(positions, tmptile)) return [g for g in groups if len(g) > 0]
[ "def", "separate_particles_into_groups", "(", "s", ",", "region_size", "=", "40", ",", "bounds", "=", "None", ")", ":", "imtile", "=", "(", "s", ".", "oshape", ".", "translate", "(", "-", "s", ".", "pad", ")", "if", "bounds", "is", "None", "else", "u...
38.862745
23.960784
def export_settings(settings, config_path): """ Export the given settings instance to the given file system path. type settings: IDASettingsInterface type config_path: str """ other = QtCore.QSettings(config_path, QtCore.QSettings.IniFormat) for k, v in settings.iteritems(): other.setValue(k, v)
[ "def", "export_settings", "(", "settings", ",", "config_path", ")", ":", "other", "=", "QtCore", ".", "QSettings", "(", "config_path", ",", "QtCore", ".", "QSettings", ".", "IniFormat", ")", "for", "k", ",", "v", "in", "settings", ".", "iteritems", "(", ...
32.4
13.2
async def get_artifacts(self, agent=None): '''Return artifacts published to the environment. :param agent: If not ``None``, then returns only artifacts created by the agent. :returns: All artifacts published (by the agent). :rtype: list If environment has a :attr:`manager` agent, e.g. it is a slave environment in a :class:`~creamas.mp.MultiEnvironment`, then the manager's :meth:`~creamas.mp.EnvManager.get_artifacts` is called. ''' # TODO: Figure better way for this if hasattr(self, 'manager') and self.manager is not None: artifacts = await self.manager.get_artifacts() else: artifacts = self.artifacts if agent is not None: artifacts = [a for a in artifacts if agent.name == a.creator] return artifacts
[ "async", "def", "get_artifacts", "(", "self", ",", "agent", "=", "None", ")", ":", "# TODO: Figure better way for this", "if", "hasattr", "(", "self", ",", "'manager'", ")", "and", "self", ".", "manager", "is", "not", "None", ":", "artifacts", "=", "await", ...
40.142857
23.380952
def ready_print(worker, output, error): # pragma : no cover """Local test helper.""" global COUNTER COUNTER += 1 print(COUNTER, output, error)
[ "def", "ready_print", "(", "worker", ",", "output", ",", "error", ")", ":", "# pragma : no cover", "global", "COUNTER", "COUNTER", "+=", "1", "print", "(", "COUNTER", ",", "output", ",", "error", ")" ]
31
14.6