Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
371,800
def partial_imap_1to1(func, si_func): @functools.wraps(si_func) def wrapper(input_): if not util_iter.isiterable(input_): return func(si_func(input_)) else: return list(map(func, si_func(input_))) set_funcname(wrapper, util_str.get_callable_name(func) + + get_funcname(si_func)) return wrapper
a bit messy DEPRICATE
371,801
def parameterized_expectations(model, verbose=False, initial_dr=None, pert_order=1, with_complementarities=True, grid={}, distribution={}, maxit=100, tol=1e-8, inner_maxit=100, direct=False): def vprint(t): if verbose: print(t) g = model.functions[] h = model.functions[] f = model.functions[] parms = model.calibration[] if direct is True: d = model.functions[] approx = model.get_grid(**grid) grid = approx.grid interp_type = approx.interpolation dr = create_interpolator(approx, interp_type) expect = create_interpolator(approx, interp_type) distrib = model.get_distribution(**distribution) nodes, weights = distrib.discretize() N = grid.shape[0] if initial_dr is None: if pert_order == 1: initial_dr = approximate_controls(model) if pert_order > 1: raise Exception("Perturbation order > 1 not supported (yet).") x_0 = initial_dr(grid) x_0 = x_0.real z_0 = np.zeros((N, len(model.symbols[]))) z_new = np.zeros((N, len(model.symbols[]))) xxnext = np.zeros((x_0.shape[0], x_0.shape[1], weights.shape[0])) for i in range(weights.shape[0]): e = nodes[i, :] ssnext = g(grid, x_0, e, parms) xxnext[:, :, i] = initial_dr(ssnext) z_0 += weights[i]*h(ssnext, xxnext[:, :, i], parms) t1 = time.time() it = 0 err = 10 err_0 = 10 verbit = True if verbose == else False if with_complementarities is True: lbfun = model.functions[] ubfun = model.functions[] lb = lbfun(grid, parms) ub = ubfun(grid, parms) else: lb = None ub = None if verbose: headline = headline = headline.format(, , , ) stars = *len(headline) print(stars) print(headline) print(stars) fmt_str = while err > tol and it <= maxit: it += 1 t_start = time.time() expect.set_values(z_0) xxnext[...] = 0 if direct is True: xx = d(grid, expect(grid), parms) for i in range(weights.shape[0]): e = nodes[i, :] ssnext = g(grid, xx, e, parms) xxnext[:, :, i] = d(ssnext, expect(ssnext), parms) if with_complementarities is True: xx = np.minimum(xx, ub) xx = np.maximum(xx, lb) for i in range(weights.shape[0]): xxnext[:, :, i] = np.minimum(xxnext[:, :, i], ub) xxnext[:, :, i] = np.maximum(xxnext[:, :, i], lb) else: def fun(x): return f(grid, x, expect(grid), parms) sdfun = SerialDifferentiableFunction(fun) if with_complementarities is True: [xx, nit] = ncpsolve(sdfun, lb, ub, x_0, verbose=verbit, maxit=inner_maxit) dr.set_values(xx) for i in range(weights.shape[0]): e = nodes[i, :] ssnext = g(grid, xx, e, parms) xxnext[:, :, i] = dr(ssnext) xxnext[:, :, i] = np.minimum(xxnext[:, :, i], ub) xxnext[:, :, i] = np.maximum(xxnext[:, :, i], lb) else: [xx, nit] = serial_newton(sdfun, x_0, verbose=verbit) dr.set_values(xx) for i in range(weights.shape[0]): e = nodes[i, :] ssnext = g(grid, xx, e, parms) xxnext[:, :, i] = dr(ssnext) z_new[...] = 0 for i in range(weights.shape[0]): e = nodes[i, :] ssnext = g(grid, xx, e, parms) z_new += weights[i]*h(ssnext, xxnext[:, :, i], parms) err = (abs(z_new - z_0).max()) z_0 = z_new.copy() x_0 = xx err_SA = err/err_0 err_0 = err t_finish = time.time() elapsed = t_finish - t_start if verbose: print(fmt_str.format(it, err, err_SA, elapsed)) if it == maxit: import warnings warnings.warn(UserWarning("Maximum number of iterations reached")) t2 = time.time() if verbose: print(stars) print(.format(t2 - t1)) print(stars) dr.set_values(x_0) return dr
Find global solution for ``model`` via parameterized expectations. Controls must be expressed as a direct function of equilibrium objects. Algorithm iterates over the expectations function in the arbitrage equation. Parameters: ---------- model : NumericModel ``dtcscc`` model to be solved verbose : boolean if True, display iterations initial_dr : decision rule initial guess for the decision rule pert_order : {1} if no initial guess is supplied, the perturbation solution at order ``pert_order`` is used as initial guess grid : grid options distribution : distribution options maxit : maximum number of iterations tol : tolerance criterium for successive approximations inner_maxit : maximum number of iteration for inner solver direct : if True, solve with direct method. If false, solve indirectly Returns ------- decision rule : approximated solution
371,802
def hash(self, value): result = 0 for i in range(len(value)): result += self.seed * result + ord(value[i]) return (self.capacity - 1) % result
function hash() implement to acquire hash value that use simply method that weighted sum. Parameters: ----------- value: string the value is param of need acquire hash Returns: -------- result hash code for value
371,803
def run(self, args, options): self.pre_run(args, options) args_and_options = args.copy() args_and_options.update(options) uploadchannel(self, **args_and_options)
This function calls uploadchannel which performs all the run steps: - Create ChannelNode - Pupulate Tree with TopicNodes, ContentNodes, and associated File objects - . - .. - ... Args: args (dict): ricecooker command line arguments options (dict): additional compatibility mode options given on command line
371,804
def generate_output_network(self, json_data=None, hr=True, show_name=False, colorize=True): if json_data is None: json_data = {} output = generate_output( line=, short=HR_RDAP[][] if hr else , name=HR_RDAP[][] if (hr and show_name) else None, is_parent=True, colorize=colorize ) for key, val in json_data[].items(): if key in [, ]: output += self.generate_output_list( source=, key=key, val=val, line=, hr=hr, show_name=show_name, colorize=colorize ) elif key in [, ]: output += self.generate_output_notices( source=, key=key, val=val, line=, hr=hr, show_name=show_name, colorize=colorize ) elif key == : output += self.generate_output_events( source=, key=key, val=val, line=, hr=hr, show_name=show_name, colorize=colorize ) elif key not in []: output += generate_output( line=, short=HR_RDAP[][key][] if hr else key, name=HR_RDAP[][key][] if ( hr and show_name) else None, value=val, colorize=colorize ) return output
The function for generating CLI output RDAP network results. Args: json_data (:obj:`dict`): The data to process. Defaults to None. hr (:obj:`bool`): Enable human readable key translations. Defaults to True. show_name (:obj:`bool`): Show human readable name (default is to only show short). Defaults to False. colorize (:obj:`bool`): Colorize the console output with ANSI colors. Defaults to True. Returns: str: The generated output.
371,805
def set_text(self, text=None): if text is None: raise NullArgument() if self.get_text_metadata().is_read_only(): raise NoAccess() if not self.my_osid_object_form._is_valid_string( text, self.get_text_metadata()): raise InvalidArgument() self.my_osid_object_form._my_map[][] = text
stub
371,806
def get_course_current_grades(self, course_id): resp = self.requester.get( urljoin( self.base_url, .format(course_key=course_id) ) ) resp.raise_for_status() resp_json = resp.json() if in resp_json: grade_entries = [CurrentGrade(entry) for entry in resp_json["results"]] while resp_json[] is not None: resp = self.requester.get(resp_json[]) resp.raise_for_status() resp_json = resp.json() grade_entries.extend((CurrentGrade(entry) for entry in resp_json["results"])) else: grade_entries = [CurrentGrade(entry) for entry in resp_json] return CurrentGradesByCourse(grade_entries)
Returns a CurrentGradesByCourse object for all users in the specified course. Args: course_id (str): an edX course ids. Returns: CurrentGradesByCourse: object representing the student current grades Authorization: The authenticated user must have staff permissions to see grades for all users in a course.
371,807
def save_formset(self, request, form, formset, change): instances = formset.save(commit=False) for instance in instances: if isinstance(instance, Photo): instance.author = request.user instance.save()
For each photo set it's author to currently authenticated user.
371,808
def setdefault(self, key, value): try: super(FlaskConfigStorage, self).setdefault(key, value) except RuntimeError: self._defaults.__setitem__(key, value)
We may not always be connected to an app, but we still need to provide a way to the base environment to set it's defaults.
371,809
def synchronize(self, verbose=False): if self.__path is None: return for dirPath in sorted(list(self.walk_directories_relative_path())): realPath = os.path.join(self.__path, dirPath) if os.path.isdir(realPath): continue if verbose: warnings.warn("%s directory is missing"%realPath) keys = dirPath.split(os.sep) dirInfoDict = self for idx in range(len(keys)-1): dirs = dict.get(dirInfoDict, , None) if dirs is None: break dirInfoDict = dict.get(dirs, keys[idx], None) if dirInfoDict is None: break if dirInfoDict is not None: dirs = dict.get(dirInfoDict, , None) if dirs is not None: dict.pop( dirs, keys[-1], None ) for filePath in sorted(list(self.walk_files_relative_path())): realPath = os.path.join(self.__path, filePath) if os.path.isfile( realPath ): continue if verbose: warnings.warn("%s file is missing"%realPath) keys = filePath.split(os.sep) dirInfoDict = self for idx in range(len(keys)-1): dirs = dict.get(dirInfoDict, , None) if dirs is None: break dirInfoDict = dict.get(dirs, keys[idx], None) if dirInfoDict is None: break if dirInfoDict is not None: files = dict.get(dirInfoDict, , None) if files is not None: dict.pop( files, keys[-1], None )
Synchronizes the Repository information with the directory. All registered but missing files and directories in the directory, will be automatically removed from the Repository. :parameters: #. verbose (boolean): Whether to be warn and inform about any abnormalities.
371,810
def _ctab(stream): yield CtabBlockStart() counts_line = stream.popleft() counts_line_values = [counts_line[i:i + 3].strip() for i in range(0, len(counts_line) - 6, 3)] + \ [counts_line[-6:len(counts_line)].strip()] ctab_counts_line = CtabCountsLine(*counts_line_values) yield ctab_counts_line number_of_atoms = ctab_counts_line.number_of_atoms number_of_bonds = ctab_counts_line.number_of_bonds for token in _ctab_atom_bond_block(number_of_lines=number_of_atoms, block_type=CtabAtomBlockLine, stream=stream): yield token for token in _ctab_atom_bond_block(number_of_lines=number_of_bonds, block_type=CtabBondBlockLine, stream=stream): yield token for token in _ctab_property_block(stream=stream): yield token yield CtabBlockEnd()
Process ``Ctab``. :param stream: Queue containing lines of text. :type stream: :py:class:`collections.deque` :return: Tuples of data.
371,811
def image_show(id=None, name=None, profile=None): * g_client = _auth(profile) ret = {} if name: for image in g_client.images.list(): if image.name == name: id = image.id continue if not id: return { : False, : {0}\.format(name) } try: image = g_client.images.get(id) except exc.HTTPNotFound: return { : False, : .format(id) } log.debug( , image.name, pprint.PrettyPrinter(indent=4).pformat(image) ) schema = image_schema(profile=profile) if len(schema.keys()) == 1: schema = schema[] for key in schema: if key in image: ret[key] = image[key] return ret
Return details about a specific image (glance image-show) CLI Example: .. code-block:: bash salt '*' glance.image_show
371,812
def getChannel(self, channel_id, **kwargs): channelinfo = yield self.call(, channel_id, **kwargs) channel = Channel.fromDict(channelinfo) if channel: channel.connection = self defer.returnValue(channel)
Load all information about a channel and return a custom Channel class. Calls "getChannel" XML-RPC. :param channel_id: ``int``, for example 12345, or ``str`` for name. :returns: deferred that when fired returns a Channel (Munch, dict-like) object representing this Koji channel, or None if no channel was found.
371,813
def get_xy(self, xy, addr=True): x = xy[0] y = xy[1] if x < self.origin[0] or x >= self.origin[0] + self.gs[0]: raise ValueError("x-coordinate inappropriate ({})".format(x)) if y < self.origin[1] or y >= self.origin[1] + self.gs[1]: raise ValueError("y-coordinate inappropriate ({})".format(y)) i = x - self.origin[0] j = y - self.origin[1] if addr: return self.grid[i][j].addr return self.grid[i][j]
Get the agent with xy-coordinate in the grid. If *addr* is True, returns only the agent's address. If no such agent in the grid, returns None. :raises: :exc:`ValueError` if xy-coordinate is outside the environment's grid.
371,814
def _serialize_datetime(value): if not isinstance(value, (datetime, arrow.Arrow)): raise ValueError(u u.format(type(value), value)) return value.isoformat()
Serialize a DateTime object to its proper ISO-8601 representation.
371,815
def hmget(key, *fields, **options): * host = options.get(, None) port = options.get(, None) database = options.get(, None) password = options.get(, None) server = _connect(host, port, database, password) return server.hmget(key, *fields)
Returns the values of all the given hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmget foo_hash bar_field1 bar_field2
371,816
def _lookup_abs(self, p, klass, create=1): k = _my_normcase(p) try: result = self._lookupDict[k] except KeyError: if not create: msg = "No such file or directory: in (and create is False)" % (p, str(self)) raise SCons.Errors.UserError(msg) dir_node = self._lookup_abs(dir_name, Dir) result = klass(file_name, dir_node, self.fs) result.diskcheck_match() self._lookupDict[k] = result dir_node.entries[_my_normcase(file_name)] = result dir_node.implicit = None else: result.must_be_same(klass) return result
Fast (?) lookup of a *normalized* absolute path. This method is intended for use by internal lookups with already-normalized path data. For general-purpose lookups, use the FS.Entry(), FS.Dir() or FS.File() methods. The caller is responsible for making sure we're passed a normalized absolute path; we merely let Python's dictionary look up and return the One True Node.FS object for the path. If a Node for the specified "p" doesn't already exist, and "create" is specified, the Node may be created after recursive invocation to find or create the parent directory or directories.
371,817
def dskx02(handle, dladsc, vertex, raydir): handle = ctypes.c_int(handle) vertex = stypes.toDoubleVector(vertex) raydir = stypes.toDoubleVector(raydir) plid = ctypes.c_int() xpt = stypes.emptyDoubleVector(3) found = ctypes.c_int() libspice.dskx02_c(handle, ctypes.byref(dladsc), vertex, raydir, ctypes.byref(plid), xpt, ctypes.byref(found)) return plid.value, stypes.cVectorToPython(xpt), bool(found.value)
Determine the plate ID and body-fixed coordinates of the intersection of a specified ray with the surface defined by a type 2 DSK plate model. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskx02_c.html :param handle: Handle of DSK kernel containing plate model. :type handle: int :param dladsc: DLA descriptor of plate model segment. :type dladsc: spiceypy.utils.support_types.SpiceDLADescr :param vertex: Ray's vertex in the body fixed frame. :type vertex: 3-Element Array of floats :param raydir: Ray direction in the body fixed frame. :type raydir: 3-Element Array of floats :return: ID code of the plate intersected by the ray, Intercept, and Flag indicating whether intercept exists. :rtype: tuple
371,818
def get_layer_params(self, layer_name): assert layer_name in self.layer_names out = [] layer = self.layers[layer_name] layer_variables = layer.variables for param in layer_variables: if param not in out: out.append(param) return out
Provides access to the parameters of the given layer. Works arounds the non-availability of graph collections in eager mode. :layer_name: name of the layer for which parameters are required, must be one of the string in the list layer_names :return: list of parameters corresponding to the given layer.
371,819
def Collect( self, knowledge_base, artifact_definition, searcher, file_system): for source in artifact_definition.sources: if source.type_indicator not in ( artifact_definitions.TYPE_INDICATOR_FILE, artifact_definitions.TYPE_INDICATOR_PATH): continue for path in source.paths: path_segments = path.split(source.separator) find_spec = file_system_searcher.FindSpec( location_glob=path_segments[1:], case_sensitive=False) for path_specification in searcher.Find(find_specs=[find_spec]): self._ParsePathSpecification( knowledge_base, searcher, file_system, path_specification, source.separator)
Collects values using a file artifact definition. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. artifact_definition (artifacts.ArtifactDefinition): artifact definition. searcher (dfvfs.FileSystemSearcher): file system searcher to preprocess the file system. file_system (dfvfs.FileSystem): file system to be preprocessed. Raises: PreProcessFail: if the preprocessing fails.
371,820
def collect(self, order_ref): response = self.client.post( self._collect_endpoint, json={"orderRef": order_ref} ) if response.status_code == 200: return response.json() else: raise get_json_error_class(response)
Collects the result of a sign or auth order using the ``orderRef`` as reference. RP should keep on calling collect every two seconds as long as status indicates pending. RP must abort if status indicates failed. The user identity is returned when complete. Example collect results returned while authentication or signing is still pending: .. code-block:: json { "orderRef":"131daac9-16c6-4618-beb0-365768f37288", "status":"pending", "hintCode":"userSign" } Example collect result when authentication or signing has failed: .. code-block:: json { "orderRef":"131daac9-16c6-4618-beb0-365768f37288", "status":"failed", "hintCode":"userCancel" } Example collect result when authentication or signing is successful and completed: .. code-block:: json { "orderRef":"131daac9-16c6-4618-beb0-365768f37288", "status":"complete", "completionData": { "user": { "personalNumber":"190000000000", "name":"Karl Karlsson", "givenName":"Karl", "surname":"Karlsson" }, "device": { "ipAddress":"192.168.0.1" }, "cert": { "notBefore":"1502983274000", "notAfter":"1563549674000" }, "signature":"<base64-encoded data>", "ocspResponse":"<base64-encoded data>" } } See `BankID Relying Party Guidelines Version: 3.0 <https://www.bankid.com/assets/bankid/rp/bankid-relying-party-guidelines-v3.0.pdf>`_ for more details about how to inform end user of the current status, whether it is pending, failed or completed. :param order_ref: The ``orderRef`` UUID returned from auth or sign. :type order_ref: str :return: The CollectResponse parsed to a dictionary. :rtype: dict :raises BankIDError: raises a subclass of this error when error has been returned from server.
371,821
def winsorize(self, min_percentile, max_percentile, mask=NotSpecified, groupby=NotSpecified): if not 0.0 <= min_percentile < max_percentile <= 1.0: raise BadPercentileBounds( min_percentile=min_percentile, max_percentile=max_percentile, upper_bound=1.0, ) return GroupedRowTransform( transform=winsorize, transform_args=(min_percentile, max_percentile), factor=self, groupby=groupby, dtype=self.dtype, missing_value=self.missing_value, mask=mask, window_safe=self.window_safe, )
Construct a new factor that winsorizes the result of this factor. Winsorizing changes values ranked less than the minimum percentile to the value at the minimum percentile. Similarly, values ranking above the maximum percentile are changed to the value at the maximum percentile. Winsorizing is useful for limiting the impact of extreme data points without completely removing those points. If ``mask`` is supplied, ignore values where ``mask`` returns False when computing percentile cutoffs, and output NaN anywhere the mask is False. If ``groupby`` is supplied, winsorization is applied separately separately to each group defined by ``groupby``. Parameters ---------- min_percentile: float, int Entries with values at or below this percentile will be replaced with the (len(input) * min_percentile)th lowest value. If low values should not be clipped, use 0. max_percentile: float, int Entries with values at or above this percentile will be replaced with the (len(input) * max_percentile)th lowest value. If high values should not be clipped, use 1. mask : zipline.pipeline.Filter, optional A Filter defining values to ignore when winsorizing. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to winsorize. Returns ------- winsorized : zipline.pipeline.Factor A Factor producing a winsorized version of self. Examples -------- .. code-block:: python price = USEquityPricing.close.latest columns={ 'PRICE': price, 'WINSOR_1: price.winsorize( min_percentile=0.25, max_percentile=0.75 ), 'WINSOR_2': price.winsorize( min_percentile=0.50, max_percentile=1.0 ), 'WINSOR_3': price.winsorize( min_percentile=0.0, max_percentile=0.5 ), } Given a pipeline with columns, defined above, the result for a given day could look like: :: 'PRICE' 'WINSOR_1' 'WINSOR_2' 'WINSOR_3' Asset_1 1 2 4 3 Asset_2 2 2 4 3 Asset_3 3 3 4 3 Asset_4 4 4 4 4 Asset_5 5 5 5 4 Asset_6 6 5 5 4 See Also -------- :func:`scipy.stats.mstats.winsorize` :meth:`pandas.DataFrame.groupby`
371,822
def fit(self, X, y, **fit_params): return super().fit(X, y, **fit_params)
See ``NeuralNet.fit``. In contrast to ``NeuralNet.fit``, ``y`` is non-optional to avoid mistakenly forgetting about ``y``. However, ``y`` can be set to ``None`` in case it is derived dynamically from ``X``.
371,823
def add_step(self, setting, duration): if len(self._prog_steps) < 10: self._prog_steps.append(ProgramStep(self, setting, duration)) else: raise IndexError("Maximum of 10 steps are allowed")
Adds steps to a program. :param setting: Current, Wattage or Resistance, depending on program mode. :param duration: Length of step in seconds. :return: None
371,824
def download_file_job(entry, directory, checksums, filetype=, symlink_path=None): pattern = NgdConfig.get_fileending(filetype) filename, expected_checksum = get_name_and_checksum(checksums, pattern) base_url = convert_ftp_url(entry[]) full_url = .format(base_url, filename) local_file = os.path.join(directory, filename) full_symlink = None if symlink_path is not None: full_symlink = os.path.join(symlink_path, filename) mtable = metadata.get() mtable.add(entry, local_file) return DownloadJob(full_url, local_file, expected_checksum, full_symlink)
Generate a DownloadJob that actually triggers a file download.
371,825
def asint(vari): if isinstance(vari, Poly): core = vari.A.copy() for key in vari.keys: core[key] = numpy.asarray(core[key], dtype=int) return Poly(core, vari.dim, vari.shape, int) return numpy.asarray(vari, dtype=int)
Convert dtype of polynomial coefficients to float. Example: >>> poly = 1.5*cp.variable()+2.25 >>> print(poly) 1.5q0+2.25 >>> print(cp.asint(poly)) q0+2
371,826
def workspaces_provider(context): catalog = api.portal.get_tool(name="portal_catalog") workspaces = catalog(portal_type="ploneintranet.workspace.workspacefolder") current = api.content.get_uuid(context) terms = [] for ws in workspaces: if current != ws["UID"]: terms.append(SimpleVocabulary.createTerm( ws["UID"], ws["UID"], ws["Title"])) return SimpleVocabulary(terms)
create a vocab of all workspaces in this site
371,827
def get_fields(model_class, field_name=, path=): fields = get_direct_fields_from_model(model_class) app_label = model_class._meta.app_label if field_name != : field, model, direct, m2m = _get_field_by_name(model_class, field_name) path += field_name path += if direct: try: new_model = _get_remote_field(field).parent_model except AttributeError: new_model = _get_remote_field(field).model else: new_model = field.related_model fields = get_direct_fields_from_model(new_model) app_label = new_model._meta.app_label return { : fields, : path, : app_label, }
Get fields and meta data from a model :param model_class: A django model class :param field_name: The field name to get sub fields from :param path: path of our field in format field_name__second_field_name__ect__ :returns: Returns fields and meta data about such fields fields: Django model fields properties: Any properties the model has path: Our new path :rtype: dict
371,828
def write_results(self, data, name=None): if name: filepath = os.path.abspath(name) else: filepath = os.path.join(os.path.getcwd(), "results.json") with open(filepath, "w", encoding="utf8") as f: try: f.write(unicode(json.dumps(data, indent=4))) except NameError: f.write(json.dumps(data, indent=4))
Write JSON to file with the specified name. :param name: Path to the file to be written to. If no path is passed a new JSON file "results.json" will be created in the current working directory. :param output: JSON object.
371,829
def get_modules(modulename=None): modulename = compat.ensure_not_unicode(modulename) if not modulename: try: return ([modname for (importer, modname, ispkg) in iter_modules() if not modname.startswith("_")] + list(sys.builtin_module_names)) except OSError: return list(sys.builtin_module_names) try: module = safeimport(modulename) except ErrorDuringImport: return [] if module is None: return [] if hasattr(module, "__path__"): return [modname for (importer, modname, ispkg) in iter_modules(module.__path__) if not modname.startswith("_")] return []
Return a list of modules and packages under modulename. If modulename is not given, return a list of all top level modules and packages.
371,830
def _bapp(self, sample, target_label, target_image): if target_label is None: original_label = np.argmax( self.sess.run(self.logits, feed_dict={self.input_ph: sample[None]}) ) else: target_label = np.argmax(target_label) def decision_function(images): images = clip_image(images, self.clip_min, self.clip_max) prob = [] for i in range(0, len(images), self.batch_size): batch = images[i:i+self.batch_size] prob_i = self.sess.run(self.logits, feed_dict={self.input_ph: batch}) prob.append(prob_i) prob = np.concatenate(prob, axis=0) if target_label is None: return np.argmax(prob, axis=1) != original_label else: return np.argmax(prob, axis=1) == target_label if target_image is None: perturbed = initialize(decision_function, sample, self.shape, self.clip_min, self.clip_max) else: perturbed = target_image perturbed, dist_post_update = binary_search_batch(sample, np.expand_dims(perturbed, 0), decision_function, self.shape, self.constraint, self.theta) dist = compute_distance(perturbed, sample, self.constraint) for j in np.arange(self.num_iterations): current_iteration = j + 1 delta = select_delta(dist_post_update, current_iteration, self.clip_max, self.clip_min, self.d, self.theta, self.constraint) num_evals = int(min([self.initial_num_evals * np.sqrt(j+1), self.max_num_evals])) gradf = approximate_gradient(decision_function, perturbed, num_evals, delta, self.constraint, self.shape, self.clip_min, self.clip_max) if self.constraint == : update = np.sign(gradf) else: update = gradf if self.stepsize_search == : epsilon = geometric_progression_for_stepsize(perturbed, update, dist, decision_function, current_iteration) perturbed = clip_image(perturbed + epsilon * update, self.clip_min, self.clip_max) perturbed, dist_post_update = binary_search_batch(sample, perturbed[None], decision_function, self.shape, self.constraint, self.theta) elif self.stepsize_search == : epsilons = np.logspace(-4, 0, num=20, endpoint=True) * dist epsilons_shape = [20] + len(self.shape) * [1] perturbeds = perturbed + epsilons.reshape(epsilons_shape) * update perturbeds = clip_image(perturbeds, self.clip_min, self.clip_max) idx_perturbed = decision_function(perturbeds) if np.sum(idx_perturbed) > 0: perturbed, dist_post_update = binary_search_batch(sample, perturbeds[idx_perturbed], decision_function, self.shape, self.constraint, self.theta) dist = compute_distance(perturbed, sample, self.constraint) if self.verbose: print(.format( j+1, self.constraint, dist)) perturbed = np.expand_dims(perturbed, 0) return perturbed
Main algorithm for Boundary Attack ++. Return a tensor that constructs adversarial examples for the given input. Generate uses tf.py_func in order to operate over tensors. :param sample: input image. Without the batchsize dimension. :param target_label: integer for targeted attack, None for nontargeted attack. Without the batchsize dimension. :param target_image: an array with the same size as sample, or None. Without the batchsize dimension. Output: perturbed image.
371,831
def export(local_root, commit, target): log = logging.getLogger(__name__) target = os.path.realpath(target) mtimes = list() def extract(stdout): queued_links = list() try: with tarfile.open(fileobj=stdout, mode=) as tar: for info in tar: log.debug(, info.name, info.mode, info.size, info.type) path = os.path.realpath(os.path.join(target, info.name)) if not path.startswith(target): log.warning(, info.name) elif info.isdir(): if not os.path.exists(path): os.makedirs(path, mode=info.mode) elif info.issym() or info.islnk(): queued_links.append(info) else: tar.extract(member=info, path=target) if os.path.splitext(info.name)[1].lower() == : mtimes.append(info.name) for info in (i for i in queued_links if os.path.exists(os.path.join(target, i.linkname))): tar.extract(member=info, path=target) except tarfile.TarError as exc: log.debug(, str(exc)) run_command(local_root, [, , , commit], pipeto=extract) for file_path in mtimes: last_committed = int(run_command(local_root, [, , , , commit, , file_path])) os.utime(os.path.join(target, file_path), (last_committed, last_committed))
Export git commit to directory. "Extracts" all files at the commit to the target directory. Set mtime of RST files to last commit date. :raise CalledProcessError: Unhandled git command failure. :param str local_root: Local path to git root directory. :param str commit: Git commit SHA to export. :param str target: Directory to export to.
371,832
def as_task(self, logger=None, **fields): return self._startTask( logger, self.action_type, self._serializers, **fields)
Start a new L{eliot.Action} of this type as a task (i.e. top-level action) with the given start fields. See L{ActionType.__call__} for example of usage. @param logger: A L{eliot.ILogger} provider to which the action's messages will be written, or C{None} to use the default one. @param fields: Extra fields to add to the message. @rtype: L{eliot.Action}
371,833
def _process_config(self): self._user_agent_adding_config = botocore.config.Config(user_agent_extra=USER_AGENT_SUFFIX) if self.config.region_names: self.add_regional_clients_from_list(self.config.region_names) self.default_region = self.config.region_names[0] else: self.default_region = self.config.botocore_session.get_config_variable("region") if self.default_region is not None: self.add_regional_client(self.default_region) if self.config.key_ids: self.add_master_keys_from_list(self.config.key_ids)
Traverses the config and adds master keys and regional clients as needed.
371,834
def generate_data(nitem, nfeat=2, dim=10, labeldim=1, base=): import numpy as np items = [base + + str(i) for i in range(nitem)] features = [np.random.randn(nfeat, dim) for _ in range(nitem)] if labeldim == 1: labels = [np.linspace(0, 1, nfeat)] * nitem else: t = np.linspace(0, 1, nfeat) labels = [np.array([t+i for i in range(labeldim)])] * nitem return h5f.Data(items, labels, features, check=True)
Returns a randomly generated h5f.Data instance. - nitem is the number of items to generate. - nfeat is the number of features to generate for each item. - dim is the dimension of the features vectors. - base is the items basename - labeldim is the dimension of the labels vectors.
371,835
def emit(self, span_datas): try: responses = self.client.Export( self.generate_span_requests(span_datas)) for _ in responses: pass except grpc.RpcError: pass
:type span_datas: list of :class: `~opencensus.trace.span_data.SpanData` :param list of opencensus.trace.span_data.SpanData span_datas: SpanData tuples to emit
371,836
def apply(self, X, ntree_limit=0): test_dmatrix = DMatrix(X, missing=self.missing, nthread=self.n_jobs) return self.get_booster().predict(test_dmatrix, pred_leaf=True, ntree_limit=ntree_limit)
Return the predicted leaf every tree for each sample. Parameters ---------- X : array_like, shape=[n_samples, n_features] Input features matrix. ntree_limit : int Limit number of trees in the prediction; defaults to 0 (use all trees). Returns ------- X_leaves : array_like, shape=[n_samples, n_trees] For each datapoint x in X and for each tree, return the index of the leaf x ends up in. Leaves are numbered within ``[0; 2**(self.max_depth+1))``, possibly with gaps in the numbering.
371,837
def get_public_ip(addresses, version=4): for addr in addresses: if addr[] and addr[] == version: return addr.get() return None
Return either the devices public IPv4 or IPv6 address.
371,838
def _lmder1_freudenstein_roth(): def func(params, vec): vec[0] = -13 + params[0] + ((5 - params[1]) * params[1] - 2) * params[1] vec[1] = -29 + params[0] + ((1 + params[1]) * params[1] - 14) * params[1] def jac(params, jac): jac[0] = 1 jac[1,0] = params[1] * (10 - 3 * params[1]) - 2 jac[1,1] = params[1] * (2 + 3 * params[1]) - 14 guess = np.asfarray([0.5, -2]) _lmder1_driver(2, func, jac, guess, 0.200124960962e+02, 0.699887517585e+01, [0.114124844655e+02, -0.896827913732e+00]) _lmder1_driver(2, func, jac, guess * 10, 0.124328339489e+05, 0.699887517449e+01, [0.114130046615e+02, -0.896796038686e+00]) _lmder1_driver(2, func, jac, guess * 100, 0.11426454595762e+08, 0.699887517243e+01, [0.114127817858e+02, -0.896805107492e+00])
Freudenstein and Roth function (lmder1 test #7)
371,839
def save_visible_toolbars(self): toolbars = [] for toolbar in self.visible_toolbars: toolbars.append(toolbar.objectName()) CONF.set(, , toolbars)
Saves the name of the visible toolbars in the .ini file.
371,840
def tick(self, index, length): assert int(length) <= 25, self.data[].append(%(index,length)) return self.parent
Add tick marks in order of axes by width APIPARAM: chxtc <axis index>,<length of tick mark>
371,841
def dcc(self, *args, **kwargs): dcc = self.reactor.dcc(*args, **kwargs) self.dcc_connections.append(dcc) return dcc
Create and associate a new DCCConnection object. Use the returned object to listen for or connect to a DCC peer.
371,842
def reset(self): if self.ffmpeg_process is not None: try: self.ffmpeg_process.send_signal(signal.SIGINT) except OSError: pass command = [] command.extend([ self.ffmpeg_binary, , , , , , , , , % self.fps, , , , % (self.width, self.height), , , , , , , ]) if self.audio_enabled: command.extend([ , % AUDIORATE, , , , , , , , ]) else: command.extend([ , , , , , , , , ]) command.extend([ , , , % self.fps, , , , % (self.width, self.height), , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , % self.twitch_stream_key ]) devnullpipe = open("/dev/null", "w") if self.verbose: devnullpipe = None self.ffmpeg_process = subprocess.Popen( command, stdin=subprocess.PIPE, stderr=devnullpipe, stdout=devnullpipe)
Reset the videostream by restarting ffmpeg
371,843
def com_google_fonts_check_has_ttfautohint_params(ttFont): from fontbakery.utils import get_name_entry_strings def ttfautohint_version(value): if results: return results.group(1), results.group(2) version_strings = get_name_entry_strings(ttFont, NameID.VERSION_STRING) failed = True for vstring in version_strings: values = ttfautohint_version(vstring) if values: ttfa_version, params = values if params: yield PASS, f"Font has ttfautohint params ({params})" failed = False else: yield SKIP, "Font appears to our heuristic as not hinted using ttfautohint." failed = False if failed: yield FAIL, "Font is lacking ttfautohint params on its version strings on the name table."
Font has ttfautohint params?
371,844
def lock(self): provider = self.get_provider() reporter = self.get_reporter() resolver = resolvelib.Resolver(provider, reporter) with vistir.cd(self.project.root): state = resolver.resolve(self.requirements) traces = trace_graph(state.graph) hash_cache = HashCache() for r in state.mapping.values(): if not r.hashes: r.hashes = get_hashes(hash_cache, r) set_metadata( state.mapping, traces, provider.fetched_dependencies, provider.collected_requires_pythons, ) lockfile = plette.Lockfile.with_meta_from(self.project.pipfile) lockfile["default"] = _collect_derived_entries( state, traces, self.default_requirements, ) lockfile["develop"] = _collect_derived_entries( state, traces, self.develop_requirements, ) self.project.lockfile = lockfile
Lock specified (abstract) requirements into (concrete) candidates. The locking procedure consists of four stages: * Resolve versions and dependency graph (powered by ResolveLib). * Walk the graph to determine "why" each candidate came to be, i.e. what top-level requirements result in a given candidate. * Populate hashes for resolved candidates. * Populate markers based on dependency specifications of each candidate, and the dependency graph.
371,845
def from_kwargs(cls, **kwargs): config = cls() for slot in cls.__slots__: if slot.startswith(): slot = slot[1:] setattr(config, slot, kwargs.pop(slot, cls.get_default(slot))) if kwargs: raise ValueError("Unrecognized option(s): {}".format(kwargs.keys())) return config
Initialise configuration from kwargs.
371,846
def fullselection(self) -> selectiontools.Selection: fullselection = selectiontools.Selection() for selection in self.selections: fullselection += selection fullselection += self.devices return fullselection
A |Selection| object containing all |Element| and |Node| objects defined by |XMLInterface.selections| and |XMLInterface.devices|. >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, TestIO, XMLInterface >>> hp = HydPy('LahnH') >>> with TestIO(): ... hp.prepare_network() ... interface = XMLInterface('single_run.xml') >>> interface.find('selections').text = 'nonheadwaters' >>> interface.fullselection Selection("fullselection", nodes=("dill", "lahn_2", "lahn_3"), elements=("land_dill", "land_lahn_1", "land_lahn_2", "land_lahn_3"))
371,847
def install_timers(config, context): timers = [] if config.get(): timeout_threshold = config.get() time_remaining = context.get_remaining_time_in_millis() / 1000 timers.append(Timer(time_remaining * timeout_threshold, timeout_warning, (config, context))) timers.append(Timer(max(time_remaining - .5, 0), timeout_error, [config])) if config.get(): timers.append(Timer(.5, memory_warning, (config, context))) for t in timers: t.start() return timers
Create the timers as specified by the plugin configuration.
371,848
def get_checksum(file): with open(file, ) as FH: contents = FH.read() return hashlib.sha256(contents).hexdigest()
Get SHA256 hash from the contents of a given file
371,849
def synonym(name): return hybrid_property(lambda inst: getattr(inst, name), lambda inst, value: setattr(inst, name, value), expr=lambda cls: getattr(cls, name))
Utility function mimicking the behavior of the old SA synonym function with the new hybrid property semantics.
371,850
def fontsize(self, fontsize=None): if fontsize is not None: self._canvas.fontsize = fontsize else: return self._canvas.fontsize
Set or return size of current font. :param fontsize: Size of font. :return: Size of font (if fontsize was not specified)
371,851
def source(self): if len(self.sources) == 0: raise ValueError("No source associated with %s" % self.__class__.__name__) elif len(self.sources) > 1: raise ValueError("Multiple sources for %s" % self.__class__.__name__) return list(self.sources)[0]
Returns the single source name for a variant collection if it is unique, otherwise raises an error.
371,852
def _set_line_speed(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=line_speed.line_speed, is_container=, presence=False, yang_name="line-speed", rest_name="line-speed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__line_speed = t if hasattr(self, ): self._set()
Setter method for line_speed, mapped from YANG variable /interface/management/line_speed (container) If this variable is read-only (config: false) in the source YANG file, then _set_line_speed is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_line_speed() directly. YANG Description: The line-speed characteristics for this management interface.
371,853
def _init_metadata(self): super(MultiLanguageMultipleChoiceQuestionFormRecord, self)._init_metadata() self._choices_metadata = { : Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, ), : , : , : True, : False, : False, : True, : [[]], : , : [] }
stub
371,854
def user_agent(self): components = ["/".join(x) for x in self.user_agent_components.items()] return " ".join(components)
Return the formatted user agent string.
371,855
def createLinkToSelf(self, new_zone, callback=None, errback=None, **kwargs): zone = Zone(self.config, new_zone) kwargs[] = self.data[] return zone.create(callback=callback, errback=errback, **kwargs)
Create a new linked zone, linking to ourselves. All records in this zone will then be available as "linked records" in the new zone. :param str new_zone: the new zone name to link to this one :return: new Zone
371,856
def feature_subset(self, indices): if isinstance(indices, np.ndarray): indices = indices.tolist() if not isinstance(indices, list): raise ValueError() return [self.features_[i] for i in indices]
Returns some subset of the features. Parameters ---------- indices : :obj:`list` of :obj:`int` indices of the features in the list Returns ------- :obj:`list` of :obj:`Feature`
371,857
def write_surf_params_to_file(self): inp_file = self.water_surface_file + lg.info( + inp_file) if self.surf_state == : lg.info() f = open(inp_file, ) f.write( + str(self.verbose) + ) f.write( + str(self.num_bands) + ) f.write() f.write(",".join([str(wave) for wave in self.wavelengths]) + ) f.write( + self.partition + ) f.write( + str(self.vn) + ) f.write( + str(self.hn) + ) f.write() f.write(",".join([str(theta) for theta in self.theta_points]) + ) f.write( + self.iface_type + ) f.write( + str(self.iface_0_ri) + ) f.write( + str(self.iface_1_ri) + ) f.write( + str(self.wind_speed) + ) f.write( + str(self.wind_direc) + ) f.write( + str(self.crosswind_vertices) + ) f.write( + str(self.upwind_vertices) + ) f.write( + str(self.surface_size) + ) f.write( + str(self.surface_radius) + ) f.write( + str(self.target_size) + ) f.write( + str(self.rays_per_quad) + ) f.write( + str(self.surface_count) + ) f.write( + str(self.azimuthally_average) + ) f.write( + inp_file.strip() + ) f.flush() f.close()
Write the params to file that surftool_Free needs to generate the surface facets
371,858
def restore(self): sys = set(self._sys_modules.keys()) for mod_name in sys.difference(self._saved_modules): del self._sys_modules[mod_name]
Unloads all modules that weren't loaded when save_modules was called.
371,859
def get_info_consistent(self, ndim): if ndim > len(self.spacing): spacing = self.spacing + (1.0, ) * (ndim - len(self.spacing)) else: spacing = self.spacing[:ndim] if ndim > len(self.offset): offset = self.offset + (0.0, ) * (ndim - len(self.offset)) else: offset = self.offset[:ndim] if ndim > self.direction.shape[0]: direction = np.identity(ndim) direction[:self.direction.shape[0], :self.direction.shape[0]] = self.direction else: direction = self.direction[:ndim, :ndim] return spacing, offset, direction
Returns the main meta-data information adapted to the supplied image dimensionality. It will try to resolve inconsistencies and other conflicts, altering the information avilable int he most plausible way. Parameters ---------- ndim : int image's dimensionality Returns ------- spacing : tuple of floats offset : tuple of floats direction : ndarray
371,860
def col_to_dt(df,col_name,set_format = None,infer_format = True, dest = False): new_col = _pd.to_datetime(df[col_name],errors = , format = set_format,infer_datetime_format = infer_format) if dest: set_col(df,col_name,new_col) else: return new_col
Coerces a column in a DataFrame to datetime Parameters: df - DataFrame DataFrame to operate on col_name - string Name of column to coerce dest - bool, default False Whether to apply the result to the DataFrame or return it. True is apply, False is return.
371,861
def _store(self, uid, content, data=None): doc = dict(uid=uid) if data: gfs = gridfs.GridFS(self.db) id = gfs.put(data, encoding=) doc.update(data_id=id) doc.update(content) self.db.pastes.insert_one(doc)
Store the given dict of content at uid. Nothing returned.
371,862
def assignOrderNames(self): try: schema = self.tableType().schema() except AttributeError: return for colname in self.columns(): column = schema.column(colname) if column: self.setColumnOrderName(colname, column.name())
Assigns the order names for this tree based on the name of the columns.
371,863
def _get_instance_key(self, host, namespace, wmi_class, other=None): if other: return "{host}:{namespace}:{wmi_class}-{other}".format( host=host, namespace=namespace, wmi_class=wmi_class, other=other ) return "{host}:{namespace}:{wmi_class}".format(host=host, namespace=namespace, wmi_class=wmi_class)
Return an index key for a given instance. Useful for caching.
371,864
def kernel_id(self): if self.connection_file is not None: json_file = osp.basename(self.connection_file) return json_file.split()[0]
Get kernel id
371,865
def process_raw_data(cls, raw_data): properties = raw_data["properties"] address_pools = [] for content in properties.get("loadBalancerBackendAddressPools", []): resource = Resource.from_raw_data(content) address_pools.append(resource) properties["loadBalancerBackendAddressPools"] = address_pools nat_rules = [] for content in properties.get("loadBalancerInboundNatRules", None): resource = Resource.from_raw_data(content) nat_rules.append(resource) properties["loadBalancerInboundNatRules"] = nat_rules raw_content = properties.get("publicIPAddress", None) if raw_content is not None: resource = Resource.from_raw_data(raw_content) properties["publicIPAddress"] = resource raw_content = properties.get("serviceInsertion", None) if raw_content is not None: resource = Resource.from_raw_data(raw_content) properties["serviceInsertion"] = resource raw_content = properties.get("subnet", None) if raw_content is not None: resource = Resource.from_raw_data(raw_content) properties["subnet"] = resource return super(IPConfiguration, cls).process_raw_data(raw_data)
Create a new model using raw API response.
371,866
def parser_set(self, args): for key, value in vars(args).items(): self._parser_update(key, value)
Set config from an :py:class:`argparse.Namespace` object. Call this method with the return value from :py:meth:`~argparse.ArgumentParser.parse_args`. :param argparse.Namespace args: The populated :py:class:`argparse.Namespace` object.
371,867
def update_tcs_table(self): g = get_root(self).globals if not g.cpars[] or not g.cpars[].lower() == : self.after(60000, self.update_tcs_table) return try: tel_server = tcs.get_telescope_server() telpars = tel_server.getTelescopeParams() add_gtc_header_table_row(self.tcs_table, telpars) except Exception as err: g.clog.warn() self.after(60000, self.update_tcs_table)
Periodically update a table of info from the TCS. Only works at GTC
371,868
def current_timestamp(self) -> datetime: timestamp = DB.get_hash_value(self._key, ) return datetime_from_isoformat(timestamp)
Get the current state timestamp.
371,869
def RV_2(self): return -self.orbpop_long.RV * (self.orbpop_long.M1 / (self.orbpop_long.M1 + self.orbpop_long.M2)) +\ self.orbpop_short.RV_com1
Instantaneous RV of star 2 with respect to system center-of-mass
371,870
def check_obfuscated_ip (self): if iputil.is_obfuscated_ip(self.host): ips = iputil.resolve_host(self.host) if ips: self.host = ips[0] self.add_warning( _("URL %(url)s has obfuscated IP address %(ip)s") % \ {"url": self.base_url, "ip": ips[0]}, tag=WARN_URL_OBFUSCATED_IP)
Warn if host of this URL is obfuscated IP address.
371,871
def save_setting(self, setting_name, value): setting = self.get_setting(setting_name) if setting is None: setting = models.DashboardWidgetSettings.objects.create( widget_name=self.get_name(), setting_name=setting_name, value=value) setting.value = value setting.save() return setting
Saves the setting value into the database.
371,872
def visibility(cls, orb, **kwargs): from ..orbits.listeners import stations_listeners, Listener listeners = kwargs.setdefault(, []) events = kwargs.pop(, None) event_classes = tuple() if events: if isinstance(events, Listener): listeners.append(events) elif isinstance(events, (list, tuple)): listeners.extend(events) sta_list = stations_listeners(cls) listeners.extend(sta_list) event_classes = tuple(listener.event for listener in sta_list) for point in orb.iter(**kwargs): point.frame = cls point.form = if point.phi < 0 and not isinstance(point.event, event_classes): continue yield point
Visibility from a topocentric frame see :py:meth:`Propagator.iter() <beyond.propagators.base.Propagator.iter>` for description of arguments handling. Args: orb (Orbit): Orbit to compute visibility from the station with Keyword Args: start (Date): starting date of the visibility search stop (Date or datetime.timedelta) end of the visibility search step (datetime.timedelta): step of the computation events (bool, Listener or list): If evaluate to True, compute AOS, LOS and MAX elevation for each pass on this station. If 'events' is a Listener or an iterable of Listeners, they will be added to the computation Any other keyword arguments are passed to the propagator. Yield: Orbit: In-visibility point of the orbit. This Orbit is already in the frame of the station and in spherical form.
371,873
def tmpdir(prefix=, delete=True): path = tempfile.mkdtemp(prefix=prefix) if not os.path.isdir(path): raise ValueError() if delete: atexit.register(shutil.rmtree, path) return path
tmpdir() creates a temporary directory and yields its path. At python exit, the directory and all of its contents are recursively deleted (so long as the the normal python exit process is allowed to call the atexit handlers). tmpdir(prefix) uses the given prefix in the tempfile.mkdtemp() call. The option delete may be set to False to specify that the tempdir should not be deleted on exit.
371,874
def List(validator): @wraps(List) def built(value): if not hasattr(value, ): raise Error("Must be a list") invalid = Invalid() for i, item in enumerate(value): try: value[i] = validator(item) except Invalid as e: for error in e: error.path.insert(0, i) invalid.append(error) except Error as e: e.path.insert(0, i) invalid.append(e) if len(invalid): raise invalid return value return built
Creates a validator that runs the given validator on every item in a list or other collection. The validator can mutate the values. Any raised errors will be collected into a single ``Invalid`` error. Their paths will be replaced with the index of the item. Will raise an error if the input value is not iterable.
371,875
def register_actions(self, shortcut_manager): shortcut_manager.add_callback_for_action("add", partial(self._add_new_state, state_type=StateType.EXECUTION)) shortcut_manager.add_callback_for_action("add_execution_state", partial(self._add_new_state, state_type=StateType.EXECUTION)) shortcut_manager.add_callback_for_action("add_hierarchy_state", partial(self._add_new_state, state_type=StateType.HIERARCHY)) shortcut_manager.add_callback_for_action("add_barrier_state", partial(self._add_new_state, state_type=StateType.BARRIER_CONCURRENCY)) shortcut_manager.add_callback_for_action("add_preemptive_state", partial(self._add_new_state, state_type=StateType.PREEMPTION_CONCURRENCY)) shortcut_manager.add_callback_for_action("add_output", partial(self._add_data_port_to_selected_state, data_port_type=)) shortcut_manager.add_callback_for_action("add_input", partial(self._add_data_port_to_selected_state, data_port_type=)) shortcut_manager.add_callback_for_action("add_scoped_variable", self._add_scoped_variable_to_selected_state) shortcut_manager.add_callback_for_action("add_outcome", self._add_outcome_to_selected_state) shortcut_manager.add_callback_for_action("delete", self._remove_selected_elements) shortcut_manager.add_callback_for_action("copy", self._copy_selection) shortcut_manager.add_callback_for_action("paste", self._paste_clipboard) shortcut_manager.add_callback_for_action("cut", self._cut_selection) shortcut_manager.add_callback_for_action(, self.update_view) shortcut_manager.add_callback_for_action(, self.update_view) shortcut_manager.add_callback_for_action(, self.data_flow_mode) shortcut_manager.add_callback_for_action(, self.update_view)
Register callback methods for triggered actions :param rafcon.gui.shortcut_manager.ShortcutManager shortcut_manager: Shortcut Manager Object holding mappings between shortcuts and actions.
371,876
def read(self, size): raw_read = super(USBRawDevice, self).read received = bytearray() while not len(received) >= size: resp = raw_read(self.RECV_CHUNK) received.extend(resp) return bytes(received)
Read raw bytes from the instrument. :param size: amount of bytes to be sent to the instrument :type size: integer :return: received bytes :return type: bytes
371,877
def get_parameters_at_instant(self, instant): if isinstance(instant, periods.Period): instant = instant.start elif isinstance(instant, (str, int)): instant = periods.instant(instant) else: assert isinstance(instant, periods.Instant), "Expected an Instant (e.g. Instant((2017, 1, 1)) ). Got: {}.".format(instant) parameters_at_instant = self._parameters_at_instant_cache.get(instant) if parameters_at_instant is None and self.parameters is not None: parameters_at_instant = self.parameters.get_at_instant(str(instant)) self._parameters_at_instant_cache[instant] = parameters_at_instant return parameters_at_instant
Get the parameters of the legislation at a given instant :param instant: string of the format 'YYYY-MM-DD' or `openfisca_core.periods.Instant` instance. :returns: The parameters of the legislation at a given instant. :rtype: :any:`ParameterNodeAtInstant`
371,878
def download_file_from_google_drive(driveid, filename=None, destination=os.path.curdir): if in driveid: driveid = driveid.split()[-1] if in driveid: driveid = driveid.split()[-1] URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params={: driveid}, stream=True) token = get_response_confirmation_token(response) if token: params = {: driveid, : token} response = session.get(URL, params=params, stream=True) filename = filename or get_url_filename(driveid=driveid) full_destination_path = save_response_content(response, filename=fileanme, destination=destination) return os.path.abspath(destination)
Download script for google drive shared links Thank you @turdus-merula and Andrew Hundt! https://stackoverflow.com/a/39225039/623735
371,879
def _dims_in_order(self, dimension_order): regx = regex.compile(r) dimension_string = .join(dimension_order) return regx.match(dimension_string) is not None
:param list dimension_order: A list of axes :rtype: bool :return: Returns True if the dimensions are in order U*, T, Z, Y, X, False otherwise
371,880
def from_str(cls, string): if string[0] == : signature = -1 variable = string[1:] else: signature = 1 variable = string return cls(variable, signature)
Creates a literal from a string Parameters ---------- string : str If the string starts with '!', it's interpreted as a negated variable Returns ------- caspo.core.literal.Literal Created object instance
371,881
def pci_lookup_name1( access: (IN, ctypes.POINTER(pci_access)), buf: (IN, ctypes.c_char_p), size: (IN, ctypes.c_int), flags: (IN, ctypes.c_int), arg1: (IN, ctypes.c_int), ) -> ctypes.c_char_p: pass
Conversion of PCI ID's to names (according to the pci.ids file). char *pci_lookup_name( struct pci_access *a, char *buf, int size, int flags, ... ) PCI_ABI; This is a variant of pci_lookup_name() that gets called with one argument. It is required because ctypes doesn't support varadic functions.
371,882
def _handle_stream(self, msg): self.log.debug("stream: %s", msg.get(, )) if not self._hidden and self._is_from_this_session(msg): self._append_plain_text(text, before_prompt=True) self._control.moveCursor(QtGui.QTextCursor.End)
Handle stdout, stderr, and stdin.
371,883
def get(self, *args, **kwargs): if args or kwargs: return self.filter(*args, **kwargs).get() self._execute_query() try: self[1] raise self.model.MultipleObjectsReturned() except IndexError: pass try: obj = self[0] except IndexError: raise self.model.DoesNotExist return obj
Returns a single instance matching this query, optionally with additional filter kwargs. See :ref:`retrieving-objects-with-filters` Returns a single object matching the QuerySet. .. code-block:: python user = User.get(id=1) If no objects are matched, a :class:`~.DoesNotExist` exception is raised. If more than one object is found, a :class:`~.MultipleObjectsReturned` exception is raised.
371,884
def CreateTypes(self, allTypes): enumTypes, dataTypes, managedTypes = self._ConvertAllTypes(allTypes) self._CreateAllTypes(enumTypes, dataTypes, managedTypes)
Create pyVmomi types from vmodl.reflect.DynamicTypeManager.AllTypeInfo
371,885
def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.cloudfront import CloudFrontConnection return CloudFrontConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
:type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.fps.connection.FPSConnection` :return: A connection to FPS
371,886
def early_stopping(stopping_rounds, first_metric_only=False, verbose=True): best_score = [] best_iter = [] best_score_list = [] cmp_op = [] enabled = [True] def _init(env): enabled[0] = not any((boost_alias in env.params and env.params[boost_alias] == ) for boost_alias in (, , )) if not enabled[0]: warnings.warn() return if not env.evaluation_result_list: raise ValueError( ) if verbose: msg = "Training until validation scores don-infinfEarly stopping, best iteration is:\n[%d]\t%s\tDid not meet early stopping. Best iteration is:\n[%d]\t%s\t'.join([_format_eval_result(x) for x in best_score_list[i]]))) raise EarlyStopException(best_iter[i], best_score_list[i]) if first_metric_only: break _callback.order = 30 return _callback
Create a callback that activates early stopping. Note ---- Activates early stopping. The model will train until the validation score stops improving. Validation score needs to improve at least every ``early_stopping_rounds`` round(s) to continue training. Requires at least one validation data and one metric. If there's more than one, will check all of them. But the training data is ignored anyway. To check only the first metric set ``first_metric_only`` to True. Parameters ---------- stopping_rounds : int The possible number of rounds without the trend occurrence. first_metric_only : bool, optional (default=False) Whether to use only the first metric for early stopping. verbose : bool, optional (default=True) Whether to print message with early stopping information. Returns ------- callback : function The callback that activates early stopping.
371,887
def choose_colour(self, title="Select Colour", **kwargs): return_data = self._run_zenity(title, ["--color-selection"], kwargs) if return_data.successful: converted_colour = ColourData.from_zenity_tuple_str(return_data.data) return DialogData(return_data.return_code, converted_colour) else: return DialogData(return_data.return_code, None)
Show a Colour Chooser dialog Usage: C{dialog.choose_colour(title="Select Colour")} @param title: window title for the dialog @return: @rtype: C{DialogData(int, Optional[ColourData])}
371,888
def _getitem(self, index): row = self._records[index] if row is not None: pass elif self.is_attached(): if index < 0: index = len(self._records) + index row = next((decode_row(line) for i, line in self._enum_lines() if i == index), None) if row is None: raise ItsdbError() else: raise ItsdbError(.format(index)) return Record._make(self.fields, row, self, index)
Get a single non-slice index.
371,889
def find_disulfide_bridges(self, representative_only=True): if representative_only: if self.representative_structure: try: self.representative_structure.find_disulfide_bridges() except KeyError: log.error(.format(self.id, self.representative_structure)) else: log.warning(.format(self.id)) else: for s in self.structures: try: s.find_disulfide_bridges() except KeyError: log.error(.format(self.id, s.id))
Run Biopython's disulfide bridge finder and store found bridges. Annotations are stored in the protein structure's chain sequence at: ``<chain_prop>.seq_record.annotations['SSBOND-biopython']`` Args: representative_only (bool): If analysis should only be run on the representative structure
371,890
def Rect_to_wxRect(self, fr): r = (fr * self.zoom).irect return wx.Rect(r.x0, r.y0, r.width, r.height)
Return a zoomed wx.Rect for given fitz.Rect.
371,891
def get_following(self, auth_secret): result = {pytwis_constants.ERROR_KEY: None} loggedin, userid = self._is_loggedin(auth_secret) if not loggedin: result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN return (False, result) with self._rc.pipeline() as pipe: pipe.multi() for following_userid in following_userids: following_userid_profile_key = \ pytwis_constants.USER_PROFILE_KEY_FORMAT.format(following_userid) pipe.hget(following_userid_profile_key, pytwis_constants.USERNAME_KEY) result[pytwis_constants.FOLLOWING_LIST_KEY] = pipe.execute() return (True, result)
Get the following list of a logged-in user. Parameters ---------- auth_secret: str The authentication secret of the logged-in user. Returns ------- bool True if the following list is successfully obtained, False otherwise. result A dict containing the following list with the key FOLLOWING_LIST_KEY if the follower list is successfully obtained, a dict containing the error string with the key ERROR_KEY otherwise. Note ---- Possible error strings are listed as below: - ERROR_NOT_LOGGED_IN
371,892
def hide_routemap_holder_route_map_content_set_ip_interface_null0(self, **kwargs): config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop() action_rm_key = ET.SubElement(route_map, "action-rm") action_rm_key.text = kwargs.pop() instance_key = ET.SubElement(route_map, "instance") instance_key.text = kwargs.pop() content = ET.SubElement(route_map, "content") set = ET.SubElement(content, "set") ip = ET.SubElement(set, "ip") interface = ET.SubElement(ip, "interface") null0 = ET.SubElement(interface, "null0") callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
371,893
def _SmallestColSize(self, text): if not text: return 0 stripped = terminal.StripAnsiText(text) return max(len(word) for word in stripped.split())
Finds the largest indivisible word of a string. ...and thus the smallest possible column width that can contain that word unsplit over rows. Args: text: A string of text potentially consisting of words. Returns: Integer size of the largest single word in the text.
371,894
def get_row_by_fsid(self, fs_id): for row in self.liststore: if row[FSID_COL] == fs_id: return row return None
确认在Liststore中是否存在这条任务. 如果存在, 返回TreeModelRow, 否则就返回None
371,895
def nside2pixarea(nside, degrees=False): area = nside_to_pixel_area(nside) if degrees: return area.to(u.deg ** 2).value else: return area.to(u.sr).value
Drop-in replacement for healpy `~healpy.pixelfunc.nside2pixarea`.
371,896
def sample_poly(self, poly, scalar=None, bias_range=1, poly_range=None, ignored_terms=None, **parameters): if ignored_terms is None: ignored_terms = set() else: ignored_terms = {frozenset(term) for term in ignored_terms} original, poly = poly, poly.copy() if scalar is not None: poly.scale(scalar, ignored_terms=ignored_terms) else: poly.normalize(bias_range=bias_range, poly_range=poly_range, ignored_terms=ignored_terms) try: v = next(v for v, bias in original.items() if bias and v not in ignored_terms) except StopIteration: scalar = 1 else: scalar = poly[v] / original[v] sampleset = self.child.sample_poly(poly, **parameters) if ignored_terms: sampleset.record.energy = original.energies((sampleset.record.sample, sampleset.variables)) else: sampleset.record.energy /= scalar return sampleset
Scale and sample from the given binary polynomial. If scalar is not given, problem is scaled based on bias and polynomial ranges. See :meth:`.BinaryPolynomial.scale` and :meth:`.BinaryPolynomial.normalize` Args: poly (obj:`.BinaryPolynomial`): A binary polynomial. scalar (number, optional): Value by which to scale the energy range of the binary polynomial. bias_range (number/pair, optional, default=1): Value/range by which to normalize the all the biases, or if `poly_range` is provided, just the linear biases. poly_range (number/pair, optional): Value/range by which to normalize the higher order biases. ignored_terms (iterable, optional): Biases associated with these terms are not scaled. **parameters: Other parameters for the sampling method, specified by the child sampler.
371,897
def timeout(timeout): def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): key = "{0}:{1}:{2}:{3}".format(id(func), func.__name__, args, kwargs) if key in _thread_by_func: worker = _thread_by_func[key] else: worker = ThreadMethod(func, args, kwargs) _thread_by_func[key] = worker worker.join(timeout) if worker.is_alive(): raise TimeoutException() del _thread_by_func[key] if worker.exception: raise worker.exception else: return worker.result return wrapper return decorator
A decorator to timeout a function. Decorated method calls are executed in a separate new thread with a specified timeout. Also check if a thread for the same function already exists before creating a new one. Note: Compatible with Windows (thread based).
371,898
def save(self): storage = get_media_storage() for storage_name in self.cleaned_data[]: full_path = storage.path(storage_name) try: storage.delete(storage_name) self.success_files.append(full_path) except OSError: self.error_files.append(full_path)
Deletes the selected files from storage
371,899
def get_var(self, name, user=None): if user is not None: if user not in self._users: raise UserNotDefinedError return self._users[user].get_var(name) if name not in self._global_vars: raise VarNotDefinedError return self._global_vars[name]
Retrieve a global or user variable :param name: The name of the variable to retrieve :type name: str :param user: If retrieving a user variable, the user identifier :type user: str or None :rtype: str :raises UserNotDefinedError: The specified user does not exist :raises VarNotDefinedError: The requested variable has not been defined