code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def member_of(self, group): if isinstance(group, Group): group = group.name return self.groups.filter(name=group).exists()
Returns whether a user is a member of a certain group. Args: group The name of a group (string) or a group object Returns: Boolean
def install(self): domain_settings = DomainSettings.get() with root(): if os.path.exists(self.SMBCONF_FILE): os.remove(self.SMBCONF_FILE) if domain_settings.mode == 'ad': domain_settings.adminpass = make_password(15) domain_settings.save() run("samba-tool domain provision " "--domain='%s' " "--workgroup='%s' " "--realm='%s' " "--use-xattrs=yes " "--use-rfc2307 " "--server-role='domain controller' " "--use-ntvfs " "--adminpass='%s'" % (domain_settings.domain, domain_settings.workgroup, domain_settings.realm, domain_settings.adminpass)) self.smbconf.write() shutil.copy2(self.SMB_KRB5CONF_FILE, self.KRB5CONF_FILE) run("echo 'nameserver 127.0.0.1' > /etc/resolv.conf") run("touch /etc/samba/shares.conf") elif domain_settings.mode == 'member': pass
Installation procedure, it writes basic smb.conf and uses samba-tool to provision the domain
def fingerprint(self): if self.num_vertices == 0: return np.zeros(20, np.ubyte) else: return sum(self.vertex_fingerprints)
A total graph fingerprint The result is invariant under permutation of the vertex indexes. The chance that two different (molecular) graphs yield the same fingerprint is small but not zero. (See unit tests.)
def get_package_name_in_pipfile(self, package_name, dev=False): key = "dev-packages" if dev else "packages" section = self.parsed_pipfile.get(key, {}) package_name = pep423_name(package_name) for name in section.keys(): if pep423_name(name) == package_name: return name return None
Get the equivalent package name in pipfile
def _walk_modules(modules, class_name, path, ignored_formats, args): for module in _iter_modules(modules=modules, class_name=class_name, path=path, ignored_formats=ignored_formats, args=args): for section in module.sections: for lecture in section.lectures: for resource in lecture.resources: yield module, section, lecture, resource
Helper generator that traverses modules in returns a flattened iterator.
def whitespace_around_keywords(logical_line): r for match in KEYWORD_REGEX.finditer(logical_line): before, after = match.groups() if '\t' in before: yield match.start(1), "E274 tab before keyword" elif len(before) > 1: yield match.start(1), "E272 multiple spaces before keyword" if '\t' in after: yield match.start(2), "E273 tab after keyword" elif len(after) > 1: yield match.start(2), "E271 multiple spaces after keyword"
r"""Avoid extraneous whitespace around keywords. Okay: True and False E271: True and False E272: True and False E273: True and\tFalse E274: True\tand False
def _fetch_options(self, merge): cmd = tuple() for option in FETCH_DEFAULTS: value = merge.get(option, self.defaults.get(option)) if value: cmd += ("--%s" % option, str(value)) return cmd
Get the fetch options from the given merge dict.
def get_urls(self): urls = super(TreeAdmin, self).get_urls() if django.VERSION < (1, 10): from django.views.i18n import javascript_catalog jsi18n_url = url(r'^jsi18n/$', javascript_catalog, {'packages': ('treebeard',)}) else: from django.views.i18n import JavaScriptCatalog jsi18n_url = url(r'^jsi18n/$', JavaScriptCatalog.as_view(packages=['treebeard']), name='javascript-catalog' ) new_urls = [ url('^move/$', self.admin_site.admin_view(self.move_node), ), jsi18n_url, ] return new_urls + urls
Adds a url to move nodes to this admin
def get_marathon_task( task_name, inactive=False, completed=False ): return get_service_task('marathon', task_name, inactive, completed)
Get a dictionary describing a named marathon task
def get_type(self, idx): _type = self.get_type_ref(idx) if _type == -1: return "AG:ITI: invalid type" return self.get_string(_type)
Return the resolved type name based on the index This returns the string associated with the type. :param int idx: :return: the type name :rtype: str
def _aggregate_metrics(self, session_group): if (self._request.aggregation_type == api_pb2.AGGREGATION_AVG or self._request.aggregation_type == api_pb2.AGGREGATION_UNSET): _set_avg_session_metrics(session_group) elif self._request.aggregation_type == api_pb2.AGGREGATION_MEDIAN: _set_median_session_metrics(session_group, self._request.aggregation_metric) elif self._request.aggregation_type == api_pb2.AGGREGATION_MIN: _set_extremum_session_metrics(session_group, self._request.aggregation_metric, min) elif self._request.aggregation_type == api_pb2.AGGREGATION_MAX: _set_extremum_session_metrics(session_group, self._request.aggregation_metric, max) else: raise error.HParamsError('Unknown aggregation_type in request: %s' % self._request.aggregation_type)
Sets the metrics of the group based on aggregation_type.
def _request(self, method, body=None, raise_exc=True, headers=None, files=None): headers = headers or {} if body and 'Content-Type' not in headers: headers.update({'Content-Type': 'application/json'}) response = self._core.session.request( method, self.uri, data=body if not isinstance(body, dict) else None, json=body if isinstance(body, dict) else None, files=files, headers=headers, allow_redirects=False, ) nav = self._create_navigator(response, raise_exc=raise_exc) if raise_exc and not response: raise exc.HALNavigatorError( message=response.text, status=response.status_code, nav=nav, response=response, ) else: return nav
Fetches HTTP response using the passed http method. Raises HALNavigatorError if response is in the 400-500 range.
def copy(self, new_grab=None): obj = self.__class__() obj.process_grab(new_grab if new_grab else self.grab) copy_keys = ('status', 'code', 'head', 'body', 'total_time', 'connect_time', 'name_lookup_time', 'url', 'charset', '_unicode_body', '_grab_config') for key in copy_keys: setattr(obj, key, getattr(self, key)) obj.headers = copy(self.headers) obj.cookies = copy(self.cookies) return obj
Clone the Response object.
def _get_broadcasts(tables): tables = set(tables) casts = tz.keyfilter( lambda x: x[0] in tables and x[1] in tables, _BROADCASTS) if tables - set(tz.concat(casts.keys())): raise ValueError('Not enough links to merge all tables.') return casts
Get the broadcasts associated with a set of tables. Parameters ---------- tables : sequence of str Table names for which broadcasts have been registered. Returns ------- casts : dict of `Broadcast` Keys are tuples of strings like (cast_name, onto_name).
def get(self, request): sections = self.generate_sections() if self.paginated: p = Paginator(sections, 25) page = request.GET.get('page') try: sections = p.page(page) except PageNotAnInteger: sections = p.page(1) except EmptyPage: sections = p.page(p.num_pages) pageUpper = int(p.num_pages) / 2 try: pageLower = int(page) / 2 except TypeError: pageLower = -999 else: pageUpper = None pageLower = None context = { 'sections': sections, 'page_title': self.generate_page_title(), 'browse_type': self.browse_type, 'pageUpper': pageUpper, 'pageLower': pageLower } return render( request, self.template_path, context )
View for HTTP GET method. Returns template and context from generate_page_title and generate_sections to populate template.
def mouse_press_event(self, x, y, button): if button == 1: print("Left mouse button pressed @", x, y) if button == 2: print("Right mouse button pressed @", x, y)
Reports left and right mouse button presses + position
def add_metric_group_definition(self, definition): assert isinstance(definition, FakedMetricGroupDefinition) group_name = definition.name if group_name in self._metric_group_defs: raise ValueError("A metric group definition with this name " "already exists: {}".format(group_name)) self._metric_group_defs[group_name] = definition self._metric_group_def_names.append(group_name)
Add a faked metric group definition. The definition will be used: * For later addition of faked metrics responses. * For returning the metric-group-info objects in the response of the Create Metrics Context operations. For defined metric groups, see chapter "Metric groups" in the :term:`HMC API` book. Parameters: definition (:class:~zhmcclient.FakedMetricGroupDefinition`): Definition of the metric group. Raises: ValueError: A metric group definition with this name already exists.
def as_dict(self): d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "structure": self.structure.as_dict(), "frequencies": list(self.frequencies), "densities": list(self.densities), "pdos": []} if len(self.pdos) > 0: for at in self.structure: d["pdos"].append(list(self.pdos[at])) return d
Json-serializable dict representation of CompletePhononDos.
def param_redirect(request, viewname, *args): url = reverse(viewname, PARAMS_URL_CONF, args) params = request.GET.urlencode().split('&') if hasattr(request, 'cparam'): for k, v in request.cparam.items(): params.append('{0}={1}'.format(k, v)) new_params = '&'.join(x for x in params if x != '') if len(new_params) > 0: return HttpResponseRedirect('{0}?{1}'.format(url, new_params)) return HttpResponseRedirect(url)
Redirect and keep URL parameters if any.
def _close_pidfile(self): if self._pid_fd is not None: os.close(self._pid_fd) try: os.remove(self.pidfile) except OSError as ex: if ex.errno != errno.ENOENT: raise
Closes and removes the PID file.
def ToOBMag(self, wave, flux, area=None): area = area if area else refs.PRIMARY_AREA bin_widths = \ binning.calculate_bin_widths(binning.calculate_bin_edges(wave)) arg = flux * bin_widths * area return -1.085736 * N.log(arg)
Convert to ``obmag``. .. math:: \\textnormal{obmag} = -2.5 \\; \\log(\\delta \\lambda \\; \\times \\; \\textnormal{area} \\; \\times \\; \\textnormal{photlam}) where :math:`\\delta \\lambda` represent bin widths derived from :func:`~pysynphot.binning.calculate_bin_edges` and :func:`~pysynphot.binning.calculate_bin_widths`, using the input wavelength values as bin centers. Parameters ---------- wave, flux : number or array_like Wavelength and flux values to be used for conversion. area : number or `None` Telescope collecting area. If not given, default value from :ref:`pysynphot-refdata` is used. Returns ------- result : number or array_like Converted values.
def serialize_to_file( root_processor, value, xml_file_path, encoding='utf-8', indent=None ): serialized_value = serialize_to_string(root_processor, value, indent) with open(xml_file_path, 'w', encoding=encoding) as xml_file: xml_file.write(serialized_value)
Serialize the value to an XML file using the root processor. :param root_processor: Root processor of the XML document. :param value: Value to serialize. :param xml_file_path: Path to the XML file to which the serialized value will be written. :param encoding: Encoding of the file. :param indent: If specified, then the XML will be formatted with the specified indentation.
def build_pipelines_lsst_io_configs(*, project_name, copyright=None): sys.setrecursionlimit(2000) c = {} c = _insert_common_sphinx_configs( c, project_name=project_name) c = _insert_html_configs( c, project_name=project_name, short_project_name=project_name) c = _insert_extensions(c) c = _insert_intersphinx_mapping(c) c = _insert_automodapi_configs(c) c = _insert_matplotlib_configs(c) c = _insert_graphviz_configs(c) c = _insert_eups_version(c) date = datetime.datetime.now() c['today'] = date.strftime('%Y-%m-%d') c['copyright'] = '2015-{year} LSST contributors'.format( year=date.year) c['todo_include_todos'] = False c['exclude_patterns'] = [ 'README.rst', '_build', 'releases/note-source/*.rst', 'releases/tickets-source/*.rst', 'ups', '.pyvenv', '.github', 'home', ] c = _insert_rst_epilog(c) c = _insert_jinja_configuration(c) return c
Build a `dict` of Sphinx configurations that populate the ``conf.py`` of the main pipelines_lsst_io Sphinx project for LSST Science Pipelines documentation. The ``conf.py`` file can ingest these configurations via:: from documenteer.sphinxconfig.stackconf import \ build_pipelines_lsst_io_configs _g = globals() _g.update(build_pipelines_lsst_io_configs( project_name='LSST Science Pipelines') You can subsequently customize the Sphinx configuration by directly assigning global variables, as usual in a Sphinx ``config.py``, e.g.:: copyright = '2016 Association of Universities for ' 'Research in Astronomy, Inc.' Parameters ---------- project_name : `str` Name of the project copyright : `str`, optional Copyright statement. Do not include the 'Copyright (c)' string; it'll be added automatically. Returns ------- c : dict Dictionary of configurations that should be added to the ``conf.py`` global namespace via:: _g = global() _g.update(c)
def wait(self, *, block=True, timeout=None): cleared = not self.backend.decr(self.key, 1, 1, self.ttl) if cleared: self.backend.wait_notify(self.key_events, self.ttl) return True if block: return self.backend.wait(self.key_events, timeout) return False
Signal that a party has reached the barrier. Warning: Barrier blocking is currently only supported by the stub and Redis backends. Warning: Re-using keys between blocking calls may lead to undefined behaviour. Make sure your barrier keys are always unique (use a UUID). Parameters: block(bool): Whether or not to block while waiting for the other parties. timeout(int): The maximum number of milliseconds to wait for the barrier to be cleared. Returns: bool: Whether or not the barrier has been reached by all parties.
def get_graphql_schema_from_orientdb_schema_data(schema_data, class_to_field_type_overrides=None, hidden_classes=None): if class_to_field_type_overrides is None: class_to_field_type_overrides = dict() if hidden_classes is None: hidden_classes = set() schema_graph = SchemaGraph(schema_data) return get_graphql_schema_from_schema_graph(schema_graph, class_to_field_type_overrides, hidden_classes)
Construct a GraphQL schema from an OrientDB schema. Args: schema_data: list of dicts describing the classes in the OrientDB schema. The following format is the way the data is structured in OrientDB 2. See the README.md file for an example of how to query this data. Each dict has the following string fields: - name: string, the name of the class. - superClasses (optional): list of strings, the name of the class's superclasses. - superClass (optional): string, the name of the class's superclass. May be used instead of superClasses if there is only one superClass. Used for backwards compatibility with OrientDB. - customFields (optional): dict, string -> string, data defined on the class instead of instances of the class. - abstract: bool, true if the class is abstract. - properties: list of dicts, describing the class's properties. Each property dictionary has the following string fields: - name: string, the name of the property. - type: int, builtin OrientDB type ID of the property. See schema_properties.py for the mapping. - linkedType (optional): int, if the property is a collection of builtin OrientDB objects, then it indicates their type ID. - linkedClass (optional): string, if the property is a collection of class instances, then it indicates the name of the class. If class is an edge class, and the field name is either 'in' or 'out', then it describes the name of an endpoint of the edge. - defaultValue: string, the textual representation of the default value for the property, as returned by OrientDB's schema introspection code, e.g., '{}' for the embedded set type. Note that if the property is a collection type, it must have a default value. class_to_field_type_overrides: optional dict, class name -> {field name -> field type}, (string -> {string -> GraphQLType}). Used to override the type of a field in the class where it's first defined and all the class's subclasses. hidden_classes: optional set of strings, classes to not include in the GraphQL schema. Returns: tuple of (GraphQL schema object, GraphQL type equivalence hints dict). The tuple is of type (GraphQLSchema, {GraphQLObjectType -> GraphQLUnionType}).
def has_publish_permission(self, request, obj=None): if is_automatic_publishing_enabled(self.model): return False user_obj = request.user if not user_obj.is_active: return False if user_obj.is_superuser: return True if user_obj.has_perm('%s.can_publish' % self.opts.app_label): return True if user_obj.has_perm('%s.can_republish' % self.opts.app_label) and \ obj and getattr(obj, 'has_been_published', False): return True return False
Determines if the user has permissions to publish. :param request: Django request object. :param obj: The object to determine if the user has permissions to publish. :return: Boolean.
def azureContainerSAS(self, *args, **kwargs): return self._makeApiCall(self.funcinfo["azureContainerSAS"], *args, **kwargs)
Get Shared-Access-Signature for Azure Container Get a shared access signature (SAS) string for use with a specific Azure Blob Storage container. The `level` parameter can be `read-write` or `read-only` and determines which type of credentials are returned. If level is read-write, it will create the container if it doesn't already exist. This method gives output: ``v1/azure-container-response.json#`` This method is ``stable``
def css_number(self, path, default=NULL, ignore_spaces=False, smart=False, make_int=True): try: text = self.css_text(path, smart=smart) return find_number(text, ignore_spaces=ignore_spaces, make_int=make_int) except IndexError: if default is NULL: raise else: return default
Find number in normalized text of node which matches the given css path.
def predict(self, date, obs_code=568): time = Time(date, scale='utc', precision=6) jd = ctypes.c_double(time.jd) self.orbfit.predict.restype = ctypes.POINTER(ctypes.c_double * 5) self.orbfit.predict.argtypes = [ ctypes.c_char_p, ctypes.c_double, ctypes.c_int ] predict = self.orbfit.predict(ctypes.c_char_p(self.abg.name), jd, ctypes.c_int(obs_code)) self.coordinate = coordinates.SkyCoord(predict.contents[0], predict.contents[1], unit=(units.degree, units.degree)) self.dra = predict.contents[2] self.ddec = predict.contents[3] self.pa = predict.contents[4] self.date = str(time)
use the bk predict method to compute the location of the source on the given date.
def average_datetimes(dt_list): if sys.version_info < (3, 3): import time def timestamp_func(dt): return time.mktime(dt.timetuple()) else: timestamp_func = datetime.timestamp total = [timestamp_func(dt) for dt in dt_list] return datetime.fromtimestamp(sum(total) / len(total))
Average a series of datetime objects. .. note:: This function assumes all datetime objects are naive and in the same time zone (UTC). Args: dt_list (iterable): Datetime objects to average Returns: Average datetime as a datetime object
def marginalize_out(node_indices, tpm): return tpm.sum(tuple(node_indices), keepdims=True) / ( np.array(tpm.shape)[list(node_indices)].prod())
Marginalize out nodes from a TPM. Args: node_indices (list[int]): The indices of nodes to be marginalized out. tpm (np.ndarray): The TPM to marginalize the node out of. Returns: np.ndarray: A TPM with the same number of dimensions, with the nodes marginalized out.
def _get_per_location_glob(tasks, outputs, regexes): paths = [o.path for o in outputs] matches = [r.search(p) for r, p in zip(regexes, paths)] for m, p, t in zip(matches, paths, tasks): if m is None: raise NotImplementedError("Couldn't deduce datehour representation in output path %r of task %s" % (p, t)) n_groups = len(matches[0].groups()) positions = [most_common((m.start(i), m.end(i)) for m in matches)[0] for i in range(1, n_groups + 1)] glob = list(paths[0]) for start, end in positions: glob = glob[:start] + ['[0-9]'] * (end - start) + glob[end:] return ''.join(glob).rsplit('/', 1)[0]
Builds a glob listing existing output paths. Esoteric reverse engineering, but worth it given that (compared to an equivalent contiguousness guarantee by naive complete() checks) requests to the filesystem are cut by orders of magnitude, and users don't even have to retrofit existing tasks anyhow.
def reader(stream): string = stream.read() decoder = json.JSONDecoder().raw_decode index = START.match(string, 0).end() while index < len(string): obj, end = decoder(string, index) item = Item() item.primitive = obj yield item index = END.match(string, end).end()
Read Items from a stream containing a JSON array.
def has_files(the_path): the_path = Path(the_path) try: for _ in the_path.walkfiles(): return True return False except OSError as ex: if ex.errno == errno.ENOENT: return False else: raise
Given a path, returns whether the path has any files in it or any subfolders. Works recursively.
def search_nn(self, point, dist=None): return next(iter(self.search_knn(point, 1, dist)), None)
Search the nearest node of the given point point must be an actual point, not a node. The nearest node to the point is returned. If a location of an actual node is used, the Node with this location will be returned (not its neighbor). dist is a distance function, expecting two points and returning a distance value. Distance values can be any comparable type. The result is a (node, distance) tuple.
def polarity_as_string(self, add_colour=True): with self._mutex: if self.polarity == self.PROVIDED: result = 'Provided', ['reset'] elif self.polarity == self.REQUIRED: result = 'Required', ['reset'] if add_colour: return utils.build_attr_string(result[1], supported=add_colour) + \ result[0] + utils.build_attr_string('reset', supported=add_colour) else: return result[0]
Get the polarity of this interface as a string. @param add_colour If True, ANSI colour codes will be added to the string. @return A string describing the polarity of this interface.
def fit_row(connection, row, unique_keys): new_columns = [] for column_name, column_value in list(row.items()): new_column = sqlalchemy.Column(column_name, get_column_type(column_value)) if not column_name in list(_State.table.columns.keys()): new_columns.append(new_column) _State.table.append_column(new_column) if _State.table_pending: create_table(unique_keys) return for new_column in new_columns: add_column(connection, new_column)
Takes a row and checks to make sure it fits in the columns of the current table. If it does not fit, adds the required columns.
def fetch_coords(self, query): q = query.add_query_parameter(req='coord') return self._parse_messages(self.get_query(q).content)
Pull down coordinate data from the endpoint.
def element_should_be_disabled(self, locator, loglevel='INFO'): if self._element_find(locator, True, True).is_enabled(): self.log_source(loglevel) raise AssertionError("Element '%s' should be disabled " "but did not" % locator) self._info("Element '%s' is disabled ." % locator)
Verifies that element identified with locator is disabled. Key attributes for arbitrary elements are `id` and `name`. See `introduction` for details about locating elements.
def del_functions(self, names): if isinstance(names, string_types): names = [names] for name in names: self._functionlib.pop(name)
Removes the specified function names from the function library. Functions are removed from this instance of the array; all copies and slices of this array will also have the functions removed. Parameters ---------- names : (list of) string(s) Name or list of names of the functions to remove.
def _check_pool_attr(self, attr, req_attr=None): if req_attr is None: req_attr = [] self._check_attr(attr, req_attr, _pool_attrs) if attr.get('ipv4_default_prefix_length') is not None: try: attr['ipv4_default_prefix_length'] = \ int(attr['ipv4_default_prefix_length']) if (attr['ipv4_default_prefix_length'] > 32 or attr['ipv4_default_prefix_length'] < 1): raise ValueError() except ValueError: raise NipapValueError('Default IPv4 prefix length must be an integer between 1 and 32.') if attr.get('ipv6_default_prefix_length'): try: attr['ipv6_default_prefix_length'] = \ int(attr['ipv6_default_prefix_length']) if (attr['ipv6_default_prefix_length'] > 128 or attr['ipv6_default_prefix_length'] < 1): raise ValueError() except ValueError: raise NipapValueError('Default IPv6 prefix length must be an integer between 1 and 128.')
Check pool attributes.
def do_GET(self): if self.path.endswith("http_manifest.json"): try: manifest = self.generate_http_manifest() self.send_response(200) self.end_headers() self.wfile.write(manifest) except dtoolcore.DtoolCoreTypeError: self.send_response(400) self.end_headers() else: super(DtoolHTTPRequestHandler, self).do_GET()
Override inherited do_GET method. Include logic for returning a http manifest when the URL ends with "http_manifest.json".
def installedApp(self): try: return self._installedApp except: self._installedApp = runConfigs.get() return self._installedApp
identify the propery application to launch, given the configuration
def iter_ensure_instance(iterable, types): ensure_instance(iterable, Iterable) [ ensure_instance(item, types) for item in iterable ]
Iterate over object and check each item type >>> iter_ensure_instance([1,2,3], [str]) Traceback (most recent call last): TypeError: >>> iter_ensure_instance([1,2,3], int) >>> iter_ensure_instance(1, int) Traceback (most recent call last): TypeError:
def is_equal(self, other): other = StringCell.coerce(other) empties = [None,''] if self.value in empties and other.value in empties: return True return self.value == other.value
Whether two strings are equal
def _handle_request_noblock(self): try: request, client_address = self.get_request() except socket.error: return if self.verify_request(request, client_address): try: self.process_request(request, client_address) except: self.handle_error(request, client_address) self.shutdown_request(request)
Handle one request, without blocking.
def __pathToTuple(self, path): if not path or path.count('/') > 2: raise YTFS.PathConvertError("Bad path given") try: split = path.split('/') except (AttributeError, TypeError): raise TypeError("Path has to be string") if split[0]: raise YTFS.PathConvertError("Path needs to start with '/'") del split[0] try: if not split[-1]: split.pop() except IndexError: raise YTFS.PathConvertError("Bad path given") if len(split) > 2: raise YTFS.PathConvertError("Path is too deep. Max allowed level is 2") try: d = split[0] except IndexError: d = None try: f = split[1] except IndexError: f = None if not d and f: raise YTFS.PathConvertError("Bad path given") return (d, f)
Convert directory or file path to its tuple identifier. Parameters ---------- path : str Path to convert. It can look like /, /directory, /directory/ or /directory/filename. Returns ------- tup_id : tuple Two element tuple identifier of directory/file of (`directory`, `filename`) format. If path leads to main directory, then both fields of tuple will be ``None``. If path leads to a directory, then field `filename` will be ``None``. Raises ------ YTFS.PathConvertError When invalid path is given.
def _get_template(querystring_key, mapping): default = None try: template_and_keys = mapping.items() except AttributeError: template_and_keys = mapping for template, key in template_and_keys: if key is None: key = PAGE_LABEL default = template if key == querystring_key: return template return default
Return the template corresponding to the given ``querystring_key``.
def wait(self, timeout=None): self.__stopped.wait(timeout) return self.__stopped.is_set()
Waits for the client to stop its loop
def ExecuteCommandFromClient(command): cmd = command.cmd args = command.args time_limit = command.time_limit res = client_utils_common.Execute(cmd, args, time_limit) (stdout, stderr, status, time_used) = res stdout = stdout[:10 * 1024 * 1024] stderr = stderr[:10 * 1024 * 1024] yield rdf_client_action.ExecuteResponse( request=command, stdout=stdout, stderr=stderr, exit_status=status, time_used=int(1e6 * time_used))
Executes one of the predefined commands. Args: command: An `ExecuteRequest` object. Yields: `rdf_client_action.ExecuteResponse` objects.
def can_process_matrix(entry, matrix_tags): if len(matrix_tags) == 0: return True count = 0 if 'tags' in entry: for tag in matrix_tags: if tag in entry['tags']: count += 1 return count > 0
Check given matrix tags to be in the given list of matric tags. Args: entry (dict): matrix item (in yaml). matrix_tags (list): represents --matrix-tags defined by user in command line. Returns: bool: True when matrix entry can be processed.
def main(self) -> None: path = ask_path("where should the config be stored?", ".snekrc") conf = configobj.ConfigObj() tools = self.get_tools() for tool in tools: conf[tool] = getattr(self, tool)() conf.filename = path conf.write() print("Written config file!") if "pylint" in tools: print( "Please also run `pylint --generate-rcfile` to complete setup")
The main function for generating the config file
def sort_timeplaceentries(self, timeentry, placeentry) -> Tuple[Any, Any]: if self._timeaxis: return placeentry, timeentry return timeentry, placeentry
Return a |tuple| containing the given `timeentry` and `placeentry` sorted in agreement with the currently selected `timeaxis`. >>> from hydpy.core.netcdftools import NetCDFVariableBase >>> from hydpy import make_abc_testable >>> NCVar = make_abc_testable(NetCDFVariableBase) >>> ncvar = NCVar('flux_nkor', isolate=True, timeaxis=1) >>> ncvar.sort_timeplaceentries('time', 'place') ('place', 'time') >>> ncvar = NetCDFVariableDeep('test', isolate=False, timeaxis=0) >>> ncvar.sort_timeplaceentries('time', 'place') ('time', 'place')
def get_seconds(self): parsed = self.parse_hh_mm_ss() total_seconds = parsed.second total_seconds += parsed.minute * 60.0 total_seconds += parsed.hour * 60.0 * 60.0 return total_seconds
Gets seconds from raw time :return: Seconds in time
def request_password_reset(self, user, base_url): user.generate_password_link() db.session.add(user) db.session.commit() events.password_change_requested_event.send(user) self.send_password_change_message(user, base_url)
Regenerate password link and send message
def tokens(self, instance): if not instance.pk: return "-" totp = TOTP(instance.bin_key, instance.step, instance.t0, instance.digits) tokens = [] for offset in range(-instance.tolerance, instance.tolerance + 1): totp.drift = instance.drift + offset tokens.append(totp.token()) return " ".join(["%s" % token for token in tokens])
Just display current acceptable TOTP tokens
def seq_view_shot(self, ): if not self.cur_seq: return i = self.seq_shot_tablev.currentIndex() item = i.internalPointer() if item: shot = item.internal_data() self.view_shot(shot)
View the shot that is selected in the table view of the sequence page :returns: None :rtype: None :raises: None
def transformer_wikitext103_l4k_memory_v0(): hparams = transformer_wikitext103_l4k_v0() hparams.split_targets_chunk_length = 64 hparams.split_targets_max_chunks = 64 hparams.split_targets_strided_training = True hparams.add_hparam("memory_type", "transformer_xl") target_tokens_per_batch = 4096 hparams.batch_size = int(target_tokens_per_batch * ( hparams.max_length / hparams.split_targets_chunk_length)) hparams.pos = None hparams.self_attention_type = "dot_product_relative" hparams.max_relative_position = 2 * hparams.split_targets_chunk_length hparams.add_hparam("unconditional", True) hparams.add_hparam("recurrent_memory_batch_size", 0) hparams.add_hparam("num_memory_items", hparams.split_targets_chunk_length) return hparams
HParams for training languagemodel_wikitext103_l4k with memory.
def get_parsed_context(pipeline, context_in_string): logger.debug("starting") if 'context_parser' in pipeline: parser_module_name = pipeline['context_parser'] logger.debug(f"context parser found: {parser_module_name}") parser_module = pypyr.moduleloader.get_module(parser_module_name) try: logger.debug(f"running parser {parser_module_name}") result_context = parser_module.get_parsed_context( context_in_string) logger.debug(f"step {parser_module_name} done") if result_context is None: logger.debug(f"{parser_module_name} returned None. Using " "empty context instead") return pypyr.context.Context() else: return pypyr.context.Context(result_context) except AttributeError: logger.error(f"The parser {parser_module_name} doesn't have a " "get_parsed_context(context) function.") raise else: logger.debug("pipeline does not have custom context parser. Using " "empty context.") logger.debug("done") return pypyr.context.Context()
Execute get_parsed_context handler if specified. Dynamically load the module specified by the context_parser key in pipeline dict and execute the get_parsed_context function on that module. Args: pipeline: dict. Pipeline object. context_in_string: string. Argument string used to initialize context. Returns: pypyr.context.Context() instance. Raises: AttributeError: parser specified on pipeline missing get_parsed_context function.
def get_version(version): if len(version) > 2 and version[2] is not None: if isinstance(version[2], int): str_version = "%s.%s.%s" % version[:3] else: str_version = "%s.%s_%s" % version[:3] else: str_version = "%s.%s" % version[:2] return str_version
Dynamically calculate the version based on VERSION tuple.
def max_brightness(self): self._max_brightness, value = self.get_cached_attr_int(self._max_brightness, 'max_brightness') return value
Returns the maximum allowable brightness value.
def remove_root_log(self, log_id): if self._catalog_session is not None: return self._catalog_session.remove_root_catalog(catalog_id=log_id) return self._hierarchy_session.remove_root(id_=log_id)
Removes a root log. arg: log_id (osid.id.Id): the ``Id`` of a log raise: NotFound - ``log_id`` is not a root raise: NullArgument - ``log_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def generate_id(self): if self.use_repeatable_ids: self.repeatable_id_counter += 1 return 'autobaked-{}'.format(self.repeatable_id_counter) else: return str(uuid4())
Generate a fresh id
def validate_on_submit(self): valid = FlaskWtf.validate_on_submit(self) if not self._schema or not self.is_submitted(): return valid data = dict() for field in self._fields: data[field] = self._fields[field].data result = self.schema.process(data, context=self._force_context) self.set_errors(result) for field in data: self._fields[field].data = data[field] return valid and not bool(self.errors)
Extend validate on submit to allow validation with schema
def get_selected_tab(self): selected = self.request.GET.get(self.param_name, None) if selected: try: tab_group, tab_name = selected.split(SEPARATOR) except ValueError: return None if tab_group == self.get_id(): self._selected = self.get_tab(tab_name) return self._selected
Returns the tab specific by the GET request parameter. In the event that there is no GET request parameter, the value of the query parameter is invalid, or the tab is not allowed/enabled, the return value of this function is None.
def complement(self): if self.is_empty: return StridedInterval.top(self.bits) if self.is_top: return StridedInterval.empty(self.bits) y_plus_1 = StridedInterval._modular_add(self.upper_bound, 1, self.bits) x_minus_1 = StridedInterval._modular_sub(self.lower_bound, 1, self.bits) dist = StridedInterval._wrapped_cardinality(y_plus_1, x_minus_1, self.bits) - 1 if dist < 0: new_stride = 0 elif self._stride == 0: new_stride = 1 else: new_stride = fractions.gcd(self._stride, dist) return StridedInterval(lower_bound=y_plus_1, upper_bound=x_minus_1, bits=self.bits, stride=new_stride, uninitialized=self.uninitialized)
Return the complement of the interval Refer section 3.1 augmented for managing strides :return:
def _check_current_value(gnome_kwargs, value): current_value = __salt__['gnome.get'](**gnome_kwargs) return six.text_type(current_value) == six.text_type(value)
Check the current value with the passed value
def get_class_doc(klass, config=default_config): if config.exclude_class: for ex in config.exclude_class: if ex.match(klass.__name__): return None nested_doc = [] class_dict = klass.__dict__ for item in dir(klass): if item in class_dict.keys(): appended = None if isinstance(class_dict[item], type) and config.nested_class: appended = get_class_doc(class_dict[item], config) elif isinstance(class_dict[item], types.FunctionType): appended = get_function_doc(class_dict[item], config) if appended is not None: nested_doc.append(appended) return _doc_object(klass, 'class', nested_doc, config)
Return doc for a class.
def main(self): self._setup_task_manager() self._setup_source_and_destination() self.task_manager.blocking_start(waiting_func=self.waiting_func) self.close() self.config.logger.info('done.')
this main routine sets up the signal handlers, the source and destination crashstorage systems at the theaded task manager. That starts a flock of threads that are ready to shepherd crashes from the source to the destination.
def num_samples(self, sr=None): native_sr = self.sampling_rate num_samples = units.seconds_to_sample(self.duration, native_sr) if sr is not None: ratio = float(sr) / native_sr num_samples = int(np.ceil(num_samples * ratio)) return num_samples
Return the number of samples. Args: sr (int): Calculate the number of samples with the given sampling-rate. If None use the native sampling-rate. Returns: int: Number of samples
def ratio(self, col: str, ratio_col: str="Ratio"): try: df = self.df.copy() df[ratio_col] = df[[col]].apply( lambda x: 100 * x / float(x.sum())) self.df = df except Exception as e: self.err(e, self.ratio, "Can not calculate ratio")
Add a column whith the percentages ratio from a column :param col: column to calculate ratio from :type col: str :param ratio_col: new ratio column name, defaults to "Ratio" :param ratio_col: str, optional :example: ``ds2 = ds.ratio("Col 1")``
def _get_attrs(self, names): assert isinstance(names, str) names = names.replace(",", " ").split(" ") res = [] for n in names: if n == "": continue if n not in self.__dict__: raise KeyError("Unknown name for Container attribute: '{}'".format(n)) res.append(getattr(self, n)) return res
Convenience function to extract multiple attributes at once :param names: string of names separated by comma or space :return:
def dbg_repr(self, max_display=10): s = repr(self) + "\n" if len(self.chosen_statements) > max_display: s += "%d SimRuns in program slice, displaying %d.\n" % (len(self.chosen_statements), max_display) else: s += "%d SimRuns in program slice.\n" % len(self.chosen_statements) if max_display is None: run_addrs = sorted(self.chosen_statements.keys()) else: run_addrs = sorted(self.chosen_statements.keys())[ : max_display] for run_addr in run_addrs: s += self.dbg_repr_run(run_addr) + "\n" return s
Debugging output of this slice. :param max_display: The maximum number of SimRun slices to show. :return: A string representation.
def exec_before_request_actions(actions, **kwargs): groups = ("before", "before_" + flask.request.method.lower()) return execute_actions(actions, limit_groups=groups, **kwargs)
Execute actions in the "before" and "before_METHOD" groups
def get_static_properties(self): return {tags.tag: tags.get('VALUE') for tags in self.bnmodel.find('STATICPROPERTIES')}
Returns a dictionary of STATICPROPERTIES Examples -------- >>> reader = XBNReader('xbn_test.xml') >>> reader.get_static_properties() {'FORMAT': 'MSR DTAS XML', 'VERSION': '0.2', 'CREATOR': 'Microsoft Research DTAS'}
def enterEvent(self, event): super(XViewPanelItem, self).enterEvent(event) self._hovered = True self.update()
Mark the hovered state as being true. :param event | <QtCore.QEnterEvent>
def _get_data_volumes(vm_): ret = [] volumes = vm_['volumes'] for key, value in six.iteritems(volumes): if 'disk_size' not in volumes[key].keys(): raise SaltCloudConfigError( 'The volume \'{0}\' is missing \'disk_size\''.format(key) ) if 'disk_type' not in volumes[key].keys(): volumes[key]['disk_type'] = 'HDD' volume = Volume( name=key, size=volumes[key]['disk_size'], disk_type=volumes[key]['disk_type'], licence_type='OTHER' ) if 'disk_availability_zone' in volumes[key].keys(): volume.availability_zone = volumes[key]['disk_availability_zone'] ret.append(volume) return ret
Construct a list of optional data volumes from the cloud profile
def pop(self, key, default=None): return self._dictionary.pop(key.lower(), default)
Remove the key and return the associated value or default if not found Args: key (str): The key to remove default (obj): The value to return if key is not present
def addPrivateCertificate(self, subjectName, existingCertificate=None): if existingCertificate is None: assert '@' not in subjectName, "Don't self-sign user certs!" mainDN = DistinguishedName(commonName=subjectName) mainKey = KeyPair.generate() mainCertReq = mainKey.certificateRequest(mainDN) mainCertData = mainKey.signCertificateRequest( mainDN, mainCertReq, lambda dn: True, self.genSerial(subjectName) ) mainCert = mainKey.newCertificate(mainCertData) else: mainCert = existingCertificate self.localStore[subjectName] = mainCert
Add a PrivateCertificate object to this store for this subjectName. If existingCertificate is None, add a new self-signed certificate.
def find(self, soup): for tag in soup.recursiveChildGenerator(): if self.match_criterion(tag): yield tag
Yield tags matching the tag criterion from a soup. There is no need to override this if you are satisfied with finding tags that match match_criterion. Args: soup: A BeautifulSoup to search through. Yields: BeautifulSoup Tags that match the criterion.
def _print(self, force_flush=False): self._stream_flush() next_perc = self._calc_percent() if self.update_interval: do_update = time.time() - self.last_time >= self.update_interval elif force_flush: do_update = True else: do_update = next_perc > self.last_progress if do_update and self.active: self.last_progress = next_perc self._cache_percent_indicator(self.last_progress) if self.track: self._cached_output += ' Time elapsed: ' + \ self._get_time(self._elapsed()) self._cache_eta() if self.item_id: self._cache_item_id() self._stream_out('\r%s' % self._cached_output) self._stream_flush() self._cached_output = ''
Prints formatted percentage and tracked time to the screen.
def where(self, **kwargs): clauses = copy(self.clauses) for dimension, condition in kwargs.items(): if dimension in self.clauses: raise Exception('There should be only one clause for {}'.format(dimension)) if dimension not in self.schema: raise Exception('The dimension {} doesn\'t exist'.format(dimension)) if isfunction(condition) or isinstance(condition, functools.partial): clauses[dimension] = condition else: clauses[dimension] = functools.partial((lambda x, y: x == y), self._sanitize_dimension(str(condition))) return self._copy(clauses=clauses)
Return a new Dataset refined using the given condition :param kwargs: a map of `dimension` => `condition` to filter the elements of the dataset. `condition` can either be an exact value or a callable returning a boolean value. If `condition` is a value, it is converted to a string, then sanitized. If `condition` is a callable, note that it will be passed sanitized values -- i.e., characters outside [a-zA-Z0-9_.] are converted to `_`.
def send_raw_packet(self, packet: str): data = packet + '\r\n' log.debug('writing data: %s', repr(data)) self.transport.write(data.encode())
Encode and put packet string onto write buffer.
def add_user_rating(self, item_type, item_id, item_rating): raw_response = requests_util.run_request('put', self.API_BASE_URL + '/user/ratings/%s/%d/%d' % (item_type, item_id, item_rating), headers=self.__get_header_with_auth()) return self.parse_raw_response(raw_response)
Adds the rating for the item indicated for the current user. :param item_type: One of: series, episode, banner. :param item_id: The TheTVDB id of the item. :param item_rating: The rating from 0 to 10. :return:
def remove_empty_cols(records): records = list(records) seqstrs = [str(rec.seq) for rec in records] clean_cols = [col for col in zip(*seqstrs) if not all(c == '-' for c in col)] clean_seqs = [''.join(row) for row in zip(*clean_cols)] for rec, clean_seq in zip(records, clean_seqs): yield SeqRecord(Seq(clean_seq, rec.seq.alphabet), id=rec.id, name=rec.name, description=rec.description, dbxrefs=rec.dbxrefs, features=rec.features, annotations=rec.annotations, letter_annotations=rec.letter_annotations)
Remove all-gap columns from aligned SeqRecords.
def collect_directories(self, directories): directories = util.to_absolute_paths(directories) if not self.recursive: return self._remove_blacklisted(directories) recursive_dirs = set() for dir_ in directories: walk_iter = os.walk(dir_, followlinks=True) walk_iter = [w[0] for w in walk_iter] walk_iter = util.to_absolute_paths(walk_iter) walk_iter = self._remove_blacklisted(walk_iter) recursive_dirs.update(walk_iter) return recursive_dirs
Collects all the directories into a `set` object. If `self.recursive` is set to `True` this method will iterate through and return all of the directories and the subdirectories found from `directories` that are not blacklisted. if `self.recursive` is set to `False` this will return all the directories that are not balcklisted. `directories` may be either a single object or an iterable. Recommend passing in absolute paths instead of relative. `collect_directories` will attempt to convert `directories` to absolute paths if they are not already.
def area2lonlat(dataarray): area = dataarray.attrs['area'] lons, lats = area.get_lonlats_dask() lons = xr.DataArray(lons, dims=['y', 'x'], attrs={'name': "longitude", 'standard_name': "longitude", 'units': 'degrees_east'}, name='longitude') lats = xr.DataArray(lats, dims=['y', 'x'], attrs={'name': "latitude", 'standard_name': "latitude", 'units': 'degrees_north'}, name='latitude') dataarray.attrs['coordinates'] = 'longitude latitude' return [dataarray, lons, lats]
Convert an area to longitudes and latitudes.
def open_url(self, url, sleep_after_open=2): self.zap.urlopen(url) time.sleep(sleep_after_open)
Access a URL through ZAP.
def map_position(pos): posiction_dict = dict(zip(range(1, 17), [i for i in range(30, 62) if i % 2])) return posiction_dict[pos]
Map natural position to machine code postion
def process_bind_param(self, obj, dialect): value = obj or {} if isinstance(obj, flask_cloudy.Object): value = {} for k in self.DEFAULT_KEYS: value[k] = getattr(obj, k) return super(self.__class__, self).process_bind_param(value, dialect)
Get a flask_cloudy.Object and save it as a dict
def get_log_id(cls, id): conn = Qubole.agent() r = conn.get_raw(cls.element_path(id) + "/logs") return r.text
Fetches log for the command represented by this id Args: `id`: command id
def clean_line(str, delimiter): return [x.strip() for x in str.strip().split(delimiter) if x != '']
Split string on given delimiter, remove whitespace from each field.
def height(self): alloc_h = self.alloc_h if self.parent and isinstance(self.parent, graphics.Scene): alloc_h = self.parent.height min_height = (self.min_height or 0) + self.margin_top + self.margin_bottom h = alloc_h if alloc_h is not None and self.fill else min_height h = max(h or 0, self.get_min_size()[1]) return h - self.margin_top - self.margin_bottom
height in pixels
def get_entries(self, start=0, end=0, data_request=None, steam_ids=None): message = MsgProto(EMsg.ClientLBSGetLBEntries) message.body.app_id = self.app_id message.body.leaderboard_id = self.id message.body.range_start = start message.body.range_end = end message.body.leaderboard_data_request = self.data_request if data_request is None else data_request if steam_ids: message.body.steamids.extend(steam_ids) resp = self._steam.send_job_and_wait(message, timeout=15) if not resp: raise LookupError("Didn't receive response within 15seconds :(") if resp.eresult != EResult.OK: raise LookupError(EResult(resp.eresult)) if resp.HasField('leaderboard_entry_count'): self.entry_count = resp.leaderboard_entry_count return resp.entries
Get leaderboard entries. :param start: start entry, not index (e.g. rank 1 is ``start=1``) :type start: :class:`int` :param end: end entry, not index (e.g. only one entry then ``start=1,end=1``) :type end: :class:`int` :param data_request: data being requested :type data_request: :class:`steam.enums.common.ELeaderboardDataRequest` :param steam_ids: list of steam ids when using :prop:`.ELeaderboardDataRequest.Users` :type steamids: :class:`list` :return: a list of entries, see ``CMsgClientLBSGetLBEntriesResponse`` :rtype: :class:`list` :raises: :class:`LookupError` on message timeout or error
def policy(self, observations): input_data = self.input_block(observations) policy_base_output = self.policy_backbone(input_data) policy_params = self.action_head(policy_base_output) return policy_params
Calculate only action head for given state
def deleteByOrigIndex(self, index): result = [] result_tracker = [] for counter, row in enumerate(self.table): if self.index_track[counter] != index: result.append(row) result_tracker.append(self.index_track[counter]) self.table = result self.index_track = result_tracker return self
Removes a single entry from the list given the index reference. The index, in this instance, is a reference to the *original* list indexing as seen when the list was first inserted into PLOD. An example: >>> test = [ ... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 }, ... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]}, ... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]}, ... {"name": "Bill", "age": 19, "income": 29000 }, ... ] >>> myPLOD = PLOD(test) >>> print myPLOD.sort("name").returnString() [ {age: 19, income: 29000, name: 'Bill' , wigs: None }, {age: 18, income: 93000, name: 'Jim' , wigs: 68}, {age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]}, {age: 18, income: None , name: 'Larry', wigs: [3, 2, 9]} ] >>> print myPLOD.deleteByOrigIndex(0).returnString() [ {age: 19, income: 29000, name: 'Bill' , wigs: None }, {age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]}, {age: 18, income: None , name: 'Larry', wigs: [3, 2, 9]} ] As you can see in the example, the list was sorted by 'name', which placed 'Bill' as the first entry. Yet, when the deleteByOrigIndex was passed a zero (for the first entry), it removed 'Jim' instead since it was the original first entry. :param index: An integer representing the place of entry in the original list of dictionaries. :return: self
def LDA_discriminants(x, labels): try: x = np.array(x) except: raise ValueError('Impossible to convert x to a numpy array.') eigen_values, eigen_vectors = LDA_base(x, labels) return eigen_values[(-eigen_values).argsort()]
Linear Discriminant Analysis helper for determination how many columns of data should be reduced. **Args:** * `x` : input matrix (2d array), every row represents new sample * `labels` : list of labels (iterable), every item should be label for \ sample with corresponding index **Returns:** * `discriminants` : array of eigenvalues sorted in descending order
def check_signature(signature, key, data): if isinstance(key, type(u'')): key = key.encode() digest = 'sha1=' + hmac.new(key, data, hashlib.sha1).hexdigest() if isinstance(digest, type(u'')): digest = digest.encode() if isinstance(signature, type(u'')): signature = signature.encode() return werkzeug.security.safe_str_cmp(digest, signature)
Compute the HMAC signature and test against a given hash.
def purge(root=os.path.join(base.data_dir(), 'models')): r root = os.path.expanduser(root) files = os.listdir(root) for f in files: if f.endswith(".params"): os.remove(os.path.join(root, f))
r"""Purge all pretrained model files in local file store. Parameters ---------- root : str, default '$MXNET_HOME/models' Location for keeping the model parameters.
def add(self, resource): if isinstance(resource, collections.Iterable): for r in resource: self.resources.append(r) else: self.resources.append(resource)
Add a resource or an iterable collection of resources to this container. Must be implemented in derived class.