code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _exclude_on_fail(self, df): """Assign a selection of scenarios as `exclude: True` in meta""" idx = df if isinstance(df, pd.MultiIndex) else _meta_idx(df) self.meta.loc[idx, 'exclude'] = True logger().info('{} non-valid scenario{} will be excluded' .format(len(idx), '' if len(idx) == 1 else 's'))
Assign a selection of scenarios as `exclude: True` in meta
def register_function(self, patterns, instances=None, **reg_kwargs): """Decorator for register.""" def wrapper(function): self.register(patterns, function, instances=instances, **reg_kwargs) return function return wrapper
Decorator for register.
def add_link(self, link): """Add a Link. :type link: :class: `~opencensus.trace.link.Link` :param link: A Link object. """ if isinstance(link, link_module.Link): self.links.append(link) else: raise TypeError("Type Error: received {}, but requires Link.". format(type(link).__name__))
Add a Link. :type link: :class: `~opencensus.trace.link.Link` :param link: A Link object.
def _set_evpn_instance(self, v, load=False): """ Setter method for evpn_instance, mapped from YANG variable /routing_system/evpn_config/evpn/evpn_instance (list) If this variable is read-only (config: false) in the source YANG file, then _set_evpn_instance is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_evpn_instance() directly. YANG Description: EVPN instance config """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("instance_name",evpn_instance.evpn_instance, yang_name="evpn-instance", rest_name="evpn-instance", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-name', extensions={u'tailf-common': {u'info': u'Evpn instance config', u'callpoint': u'EvpnInstances', u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'cli-full-no': None}}), is_container='list', yang_name="evpn-instance", rest_name="evpn-instance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Evpn instance config', u'callpoint': u'EvpnInstances', u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """evpn_instance must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("instance_name",evpn_instance.evpn_instance, yang_name="evpn-instance", rest_name="evpn-instance", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='instance-name', extensions={u'tailf-common': {u'info': u'Evpn instance config', u'callpoint': u'EvpnInstances', u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'cli-full-no': None}}), is_container='list', yang_name="evpn-instance", rest_name="evpn-instance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Evpn instance config', u'callpoint': u'EvpnInstances', u'cli-suppress-list-no': None, u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True)""", }) self.__evpn_instance = t if hasattr(self, '_set'): self._set()
Setter method for evpn_instance, mapped from YANG variable /routing_system/evpn_config/evpn/evpn_instance (list) If this variable is read-only (config: false) in the source YANG file, then _set_evpn_instance is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_evpn_instance() directly. YANG Description: EVPN instance config
def _db_urls(opts: Namespace) -> Tuple[str, str]: """ Return the crc and ontology db urls :param opts: options :return: Tuple w/ crc and ontology url """ return opts.crcdb.replace("//", "//{crcuser}:{crcpassword}@".format(**opts.__dict__)),\ opts.ontodb.replace("//", "//{ontouser}:{ontopassword}@".format(**opts.__dict__))
Return the crc and ontology db urls :param opts: options :return: Tuple w/ crc and ontology url
def report_data(self, entity_data): """ Used to report entity data (metrics & snapshot) to the host agent. """ try: response = None response = self.client.post(self.__data_url(), data=self.to_json(entity_data), headers={"Content-Type": "application/json"}, timeout=0.8) # logger.warn("report_data: response.status_code is %s" % response.status_code) if response.status_code is 200: self.last_seen = datetime.now() except (requests.ConnectTimeout, requests.ConnectionError): logger.debug("report_data: host agent connection error") finally: return response
Used to report entity data (metrics & snapshot) to the host agent.
def recv_file_from_host(src_file, dst_filename, filesize, dst_mode='wb'): """Function which runs on the pyboard. Matches up with send_file_to_remote.""" import sys import ubinascii if HAS_BUFFER: try: import pyb usb = pyb.USB_VCP() except: try: import machine usb = machine.USB_VCP() except: usb = None if usb and usb.isconnected(): # We don't want 0x03 bytes in the data to be interpreted as a Control-C # This gets reset each time the REPL runs a line, so we don't need to # worry about resetting it ourselves usb.setinterrupt(-1) try: with open(dst_filename, dst_mode) as dst_file: bytes_remaining = filesize if not HAS_BUFFER: bytes_remaining *= 2 # hexlify makes each byte into 2 buf_size = BUFFER_SIZE write_buf = bytearray(buf_size) read_buf = bytearray(buf_size) while bytes_remaining > 0: # Send back an ack as a form of flow control sys.stdout.write('\x06') read_size = min(bytes_remaining, buf_size) buf_remaining = read_size buf_index = 0 while buf_remaining > 0: if HAS_BUFFER: bytes_read = sys.stdin.buffer.readinto(read_buf, read_size) else: bytes_read = sys.stdin.readinto(read_buf, read_size) if bytes_read > 0: write_buf[buf_index:bytes_read] = read_buf[0:bytes_read] buf_index += bytes_read buf_remaining -= bytes_read if HAS_BUFFER: dst_file.write(write_buf[0:read_size]) else: dst_file.write(ubinascii.unhexlify(write_buf[0:read_size])) bytes_remaining -= read_size return True except: return False
Function which runs on the pyboard. Matches up with send_file_to_remote.
def post_attachment(self, bugid, attachment): '''http://bugzilla.readthedocs.org/en/latest/api/core/v1/attachment.html#create-attachment''' assert type(attachment) is DotDict assert 'data' in attachment assert 'file_name' in attachment assert 'summary' in attachment if (not 'content_type' in attachment): attachment.content_type = 'text/plain' attachment.ids = bugid attachment.data = base64.standard_b64encode(bytearray(attachment.data, 'ascii')).decode('ascii') return self._post('bug/{bugid}/attachment'.format(bugid=bugid), json.dumps(attachment))
http://bugzilla.readthedocs.org/en/latest/api/core/v1/attachment.html#create-attachment
def get_value(self, node): """Convert value from an AST node.""" if not isinstance(node, ast.Dict): raise TypeError("must be a dictionary") evaluator = SafeEvaluator() try: value = evaluator.run(node) except Exception as ex: # TODO: Handle errors. raise ex try: # Ensure value is a serializable dictionary. value = json.loads(json.dumps(value)) if not isinstance(value, dict): raise TypeError except (TypeError, ValueError): raise TypeError("must be serializable") return value
Convert value from an AST node.
def accumulate(a_generator, cooperator=None): """ Start a Deferred whose callBack arg is a deque of the accumulation of the values yielded from a_generator. :param a_generator: An iterator which yields some not None values. :return: A Deferred to which the next callback will be called with the yielded contents of the generator function. """ if cooperator: own_cooperate = cooperator.cooperate else: own_cooperate = cooperate spigot = ValueBucket() items = stream_tap((spigot,), a_generator) d = own_cooperate(items).whenDone() d.addCallback(accumulation_handler, spigot) return d
Start a Deferred whose callBack arg is a deque of the accumulation of the values yielded from a_generator. :param a_generator: An iterator which yields some not None values. :return: A Deferred to which the next callback will be called with the yielded contents of the generator function.
def time2internaldate(date_time): """Convert date_time to IMAP4 INTERNALDATE representation. Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'. The date_time argument can be a number (int or float) representing seconds since epoch (as returned by time.time()), a 9-tuple representing local time, an instance of time.struct_time (as returned by time.localtime()), an aware datetime instance or a double-quoted string. In the last case, it is assumed to already be in the correct format. """ if isinstance(date_time, (int, float)): dt = datetime.fromtimestamp(date_time, timezone.utc).astimezone() elif isinstance(date_time, tuple): try: gmtoff = date_time.tm_gmtoff except AttributeError: if time.daylight: dst = date_time[8] if dst == -1: dst = time.localtime(time.mktime(date_time))[8] gmtoff = -(time.timezone, time.altzone)[dst] else: gmtoff = -time.timezone delta = timedelta(seconds=gmtoff) dt = datetime(*date_time[:6], tzinfo=timezone(delta)) elif isinstance(date_time, datetime): if date_time.tzinfo is None: raise ValueError("date_time must be aware") dt = date_time elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'): return date_time # Assume in correct format else: raise ValueError("date_time not of a known type") fmt = '"%d-{}-%Y %H:%M:%S %z"'.format(Months[dt.month]) return dt.strftime(fmt)
Convert date_time to IMAP4 INTERNALDATE representation. Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'. The date_time argument can be a number (int or float) representing seconds since epoch (as returned by time.time()), a 9-tuple representing local time, an instance of time.struct_time (as returned by time.localtime()), an aware datetime instance or a double-quoted string. In the last case, it is assumed to already be in the correct format.
def search(**criteria): """ Search registered *component* classes matching the given criteria. :param criteria: search criteria of the form: ``a='1', b='x'`` :return: parts registered with the given criteria :rtype: :class:`set` Will return an empty :class:`set` if nothing is found. :: from cqparts.search import search import cqparts_motors # example of a 3rd party lib # Get all DC motor classes dc_motors = search(type='motor', current_class='dc') # For more complex queries: air_cooled = search(cooling='air') non_aircooled_dcmotors = dc_motors - air_cooled # will be all DC motors that aren't air-cooled """ # Find all parts that match the given criteria results = copy(class_list) # start with full list for (category, value) in criteria.items(): results &= index[category][value] return results
Search registered *component* classes matching the given criteria. :param criteria: search criteria of the form: ``a='1', b='x'`` :return: parts registered with the given criteria :rtype: :class:`set` Will return an empty :class:`set` if nothing is found. :: from cqparts.search import search import cqparts_motors # example of a 3rd party lib # Get all DC motor classes dc_motors = search(type='motor', current_class='dc') # For more complex queries: air_cooled = search(cooling='air') non_aircooled_dcmotors = dc_motors - air_cooled # will be all DC motors that aren't air-cooled
def dist_baystat(src, tar, min_ss_len=None, left_ext=None, right_ext=None): """Return the Baystat distance. This is a wrapper for :py:meth:`Baystat.dist`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison min_ss_len : int Minimum substring length to be considered left_ext : int Left-side extension length right_ext : int Right-side extension length Returns ------- float The Baystat distance Examples -------- >>> round(dist_baystat('cat', 'hat'), 12) 0.333333333333 >>> dist_baystat('Niall', 'Neil') 0.6 >>> round(dist_baystat('Colin', 'Cuilen'), 12) 0.833333333333 >>> dist_baystat('ATCG', 'TAGC') 1.0 """ return Baystat().dist(src, tar, min_ss_len, left_ext, right_ext)
Return the Baystat distance. This is a wrapper for :py:meth:`Baystat.dist`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison min_ss_len : int Minimum substring length to be considered left_ext : int Left-side extension length right_ext : int Right-side extension length Returns ------- float The Baystat distance Examples -------- >>> round(dist_baystat('cat', 'hat'), 12) 0.333333333333 >>> dist_baystat('Niall', 'Neil') 0.6 >>> round(dist_baystat('Colin', 'Cuilen'), 12) 0.833333333333 >>> dist_baystat('ATCG', 'TAGC') 1.0
def _getEventFromUid(self, request, uid): """Try and find an event with the given UID in this site.""" event = getEventFromUid(request, uid) # might raise ObjectDoesNotExist home = request.site.root_page if event.get_ancestors().filter(id=home.id).exists(): # only return event if it is in the same site return event
Try and find an event with the given UID in this site.
def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw['assert_same_host'] = False kw['redirect'] = False if 'headers' not in kw: kw['headers'] = self.headers.copy() if self.proxy is not None and u.scheme == "http": response = conn.urlopen(method, url, **kw) else: response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response # Support relative URLs for redirecting. redirect_location = urljoin(url, redirect_location) # RFC 7231, Section 6.4.4 if response.status == 303: method = 'GET' retries = kw.get('retries') if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect) # Strip headers marked as unsafe to forward to the redirected location. # Check remove_headers_on_redirect to avoid a potential network call within # conn.is_same_host() which may use socket.gethostbyname() in the future. if (retries.remove_headers_on_redirect and not conn.is_same_host(redirect_location)): for header in retries.remove_headers_on_redirect: kw['headers'].pop(header, None) try: retries = retries.increment(method, url, response=response, _pool=conn) except MaxRetryError: if retries.raise_on_redirect: raise return response kw['retries'] = retries kw['redirect'] = redirect log.info("Redirecting %s -> %s", url, redirect_location) return self.urlopen(method, redirect_location, **kw)
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
def iterable(obj): """Returns ``True`` if *obj* can be iterated over and is *not* a string.""" if isinstance(obj, string_types): return False # avoid iterating over characters of a string if hasattr(obj, 'next'): return True # any iterator will do try: len(obj) # anything else that might work except TypeError: return False return True
Returns ``True`` if *obj* can be iterated over and is *not* a string.
def release_lock(dax, key, lock_mode=LockMode.wait): """Manually release a pg advisory lock. :dax: a DataAccess instance :key: either a big int or a 2-tuple of integers :lock_mode: a member of the LockMode enum """ lock_fxn = _lock_fxn("unlock", lock_mode, False) return dax.get_scalar( dax.callproc(lock_fxn, key if isinstance(key, (list, tuple)) else [key])[0])
Manually release a pg advisory lock. :dax: a DataAccess instance :key: either a big int or a 2-tuple of integers :lock_mode: a member of the LockMode enum
def is_import_interface(instrument_interface): """Returns whether the instrument interface passed in is for results import """ if IInstrumentImportInterface.providedBy(instrument_interface): return True # TODO Remove this once classic instrument interface migrated if hasattr(instrument_interface, '__name__'): obj_name = instrument_interface.__name__.replace(__name__, "") if obj_name[1:] in __all__ and hasattr(instrument_interface, "Import"): return True return False
Returns whether the instrument interface passed in is for results import
def input(self): """ Input items used for process stored in a dictionary. Keys are the hashes of the input parameters, values the respective InputData classes. """ # the delimiters are used by some input drivers delimiters = dict( zoom=self.init_zoom_levels, bounds=self.init_bounds, process_bounds=self.bounds, effective_bounds=self.effective_bounds ) # get input items only of initialized zoom levels raw_inputs = { # convert input definition to hash get_hash(v): v for zoom in self.init_zoom_levels if "input" in self._params_at_zoom[zoom] # to preserve file groups, "flatten" the input tree and use # the tree paths as keys for key, v in _flatten_tree(self._params_at_zoom[zoom]["input"]) if v is not None } initalized_inputs = {} for k, v in raw_inputs.items(): # for files and tile directories if isinstance(v, str): logger.debug("load input reader for simple input %s", v) try: reader = load_input_reader( dict( path=absolute_path(path=v, base_dir=self.config_dir), pyramid=self.process_pyramid, pixelbuffer=self.process_pyramid.pixelbuffer, delimiters=delimiters ), readonly=self.mode == "readonly") except Exception as e: logger.exception(e) raise MapcheteDriverError("error when loading input %s: %s" % (v, e)) logger.debug("input reader for simple input %s is %s", v, reader) # for abstract inputs elif isinstance(v, dict): logger.debug("load input reader for abstract input %s", v) try: reader = load_input_reader( dict( abstract=deepcopy(v), pyramid=self.process_pyramid, pixelbuffer=self.process_pyramid.pixelbuffer, delimiters=delimiters, conf_dir=self.config_dir ), readonly=self.mode == "readonly") except Exception as e: logger.exception(e) raise MapcheteDriverError("error when loading input %s: %s" % (v, e)) logger.debug("input reader for abstract input %s is %s", v, reader) else: raise MapcheteConfigError("invalid input type %s", type(v)) # trigger bbox creation reader.bbox(out_crs=self.process_pyramid.crs) initalized_inputs[k] = reader return initalized_inputs
Input items used for process stored in a dictionary. Keys are the hashes of the input parameters, values the respective InputData classes.
def __validate(data, classes, labels): "Validator of inputs." if not isinstance(data, dict): raise TypeError( 'data must be a dict! keys: sample ID or any unique identifier') if not isinstance(labels, dict): raise TypeError( 'labels must be a dict! keys: sample ID or any unique identifier') if classes is not None: if not isinstance(classes, dict): raise TypeError( 'labels must be a dict! keys: sample ID or any unique identifier') if not len(data) == len(labels) == len(classes): raise ValueError('Lengths of data, labels and classes do not match!') if not set(list(data)) == set(list(labels)) == set(list(classes)): raise ValueError( 'data, classes and labels dictionaries must have the same keys!') num_features_in_elements = np.unique([sample.size for sample in data.values()]) if len(num_features_in_elements) > 1: raise ValueError( 'different samples have different number of features - invalid!') return True
Validator of inputs.
def contains_plural_field(model, fields): """ Returns a boolean indicating if ``fields`` contains a relationship to multiple items. """ source_model = model for orm_path in fields: model = source_model bits = orm_path.lstrip('+-').split('__') for bit in bits[:-1]: field = model._meta.get_field(bit) if field.many_to_many or field.one_to_many: return True model = get_model_at_related_field(model, bit) return False
Returns a boolean indicating if ``fields`` contains a relationship to multiple items.
def full_data(self): """ Returns all the info available for the chat in the following format: title [username] (type) <id> If any data is not available, it is not added. """ data = [ self.chat.title, self._username(), self._type(), self._id() ] return " ".join(filter(None, data))
Returns all the info available for the chat in the following format: title [username] (type) <id> If any data is not available, it is not added.
def load(self): """ Load the file. """ fd = None try: obj = parse_dot_file( self.dot_file.absolute_path ) finally: if fd is not None: fd.close() return obj
Load the file.
def get_indices(self, labels): """ Find the indices of the input ``labels``. Parameters ---------- labels : int, array-like (1D, int) The label numbers(s) to find. Returns ------- indices : int `~numpy.ndarray` An integer array of indices with the same shape as ``labels``. If ``labels`` is a scalar, then the returned index will also be a scalar. Raises ------ ValueError If any input ``labels`` are invalid. """ self.check_labels(labels) return np.searchsorted(self.labels, labels)
Find the indices of the input ``labels``. Parameters ---------- labels : int, array-like (1D, int) The label numbers(s) to find. Returns ------- indices : int `~numpy.ndarray` An integer array of indices with the same shape as ``labels``. If ``labels`` is a scalar, then the returned index will also be a scalar. Raises ------ ValueError If any input ``labels`` are invalid.
def get_connection_details(session, vcenter_resource_model, resource_context): """ Methods retrieves the connection details from the vcenter resource model attributes. :param CloudShellAPISession session: :param VMwarevCenterResourceModel vcenter_resource_model: Instance of VMwarevCenterResourceModel :param ResourceContextDetails resource_context: the context of the command """ session = session resource_context = resource_context # get vCenter connection details from vCenter resource user = vcenter_resource_model.user vcenter_url = resource_context.address password = session.DecryptPassword(vcenter_resource_model.password).Value return VCenterConnectionDetails(vcenter_url, user, password)
Methods retrieves the connection details from the vcenter resource model attributes. :param CloudShellAPISession session: :param VMwarevCenterResourceModel vcenter_resource_model: Instance of VMwarevCenterResourceModel :param ResourceContextDetails resource_context: the context of the command
def calc_trees(self, indices=None, task_interface=None, jobhandler=default_jobhandler, batchsize=1, show_progress=True, **kwargs): """ Infer phylogenetic trees for the loaded Alignments :param indices: Only run inference on the alignments at these given indices :param task_interface: Inference tool specified via TaskInterface (default RaxmlTaskInterface) :param jobhandler: Launch jobs via this JobHandler (default SequentialJobHandler; also available are ThreadpoolJobHandler and ProcesspoolJobHandler for running inference in parallel) :param batchsize: Batch size for Thread- or ProcesspoolJobHandlers) :param kwargs: Remaining arguments to pass to the TaskInterface :return: None """ if indices is None: indices = list(range(len(self))) if task_interface is None: task_interface = tasks.RaxmlTaskInterface() records = [self[i] for i in indices] # Scrape args from records args, to_delete = task_interface.scrape_args(records, **kwargs) # Dispatch work msg = '{} Tree estimation'.format(task_interface.name) if show_progress else '' map_result = jobhandler(task_interface.get_task(), args, msg, batchsize) # Process results with fileIO.TempFileList(to_delete): for rec, result in zip(records, map_result): #logger.debug('Result - {}'.format(result)) rec.parameters.construct_from_dict(result)
Infer phylogenetic trees for the loaded Alignments :param indices: Only run inference on the alignments at these given indices :param task_interface: Inference tool specified via TaskInterface (default RaxmlTaskInterface) :param jobhandler: Launch jobs via this JobHandler (default SequentialJobHandler; also available are ThreadpoolJobHandler and ProcesspoolJobHandler for running inference in parallel) :param batchsize: Batch size for Thread- or ProcesspoolJobHandlers) :param kwargs: Remaining arguments to pass to the TaskInterface :return: None
def add_tag(self, name, value): """ :param name: Name of the tag :type name: string :param value: Value of the tag :type value: string """ self.tags.append(Tag(name, value))
:param name: Name of the tag :type name: string :param value: Value of the tag :type value: string
def compute_balance_median_ts(self, ts): """ Compute the balance at each time 't' of the time series.""" balance = [self.compute_balance_median(ts, t) for t in np.arange(0, len(ts) - 1)] return balance
Compute the balance at each time 't' of the time series.
def get_name_as_short_text(cls, name_field: cryptography.x509.Name) -> str: """Convert a name field returned by the cryptography module to a string suitable for displaying it to the user. """ # Name_field is supposed to be a Subject or an Issuer; print the CN if there is one common_names = cls.get_common_names(name_field) if common_names: # We don't support certs with multiple CNs return common_names[0] else: # Otherwise show the whole field return cls.get_name_as_text(name_field)
Convert a name field returned by the cryptography module to a string suitable for displaying it to the user.
def split(self): """ Split a Path2D into multiple Path2D objects where each one has exactly one root curve. Parameters -------------- self : trimesh.path.Path2D Input geometry Returns ------------- split : list of trimesh.path.Path2D Original geometry as separate paths """ # avoid a circular import by referencing class of self Path2D = type(self) # save the results of the split to an array split = [] # get objects from cache to avoid a bajillion # cache checks inside the tight loop paths = self.paths discrete = self.discrete polygons_closed = self.polygons_closed enclosure_directed = self.enclosure_directed for root_index, root in enumerate(self.root): # get a list of the root curve's children connected = list(enclosure_directed[root].keys()) # add the root node to the list connected.append(root) # store new paths and entities new_paths = [] new_entities = [] for index in connected: path = paths[index] # add a path which is just sequential indexes new_paths.append(np.arange(len(path)) + len(new_entities)) # save the entity indexes new_entities.extend(path) # store the root index from the original drawing metadata = copy.deepcopy(self.metadata) metadata['split_2D'] = root_index # we made the root path the last index of connected new_root = np.array([len(new_paths) - 1]) # prevents the copying from nuking our cache with self._cache: # create the Path2D split.append(Path2D( entities=copy.deepcopy(self.entities[new_entities]), vertices=copy.deepcopy(self.vertices), metadata=metadata)) # add back expensive things to the cache split[-1]._cache.update( {'paths': new_paths, 'polygons_closed': polygons_closed[connected], 'discrete': discrete[connected], 'root': new_root}) # set the cache ID split[-1]._cache.id_set() return np.array(split)
Split a Path2D into multiple Path2D objects where each one has exactly one root curve. Parameters -------------- self : trimesh.path.Path2D Input geometry Returns ------------- split : list of trimesh.path.Path2D Original geometry as separate paths
def name(self, src=None): """Return string representing the name of this type.""" return " & ".join(_get_type_name(tt, src) for tt in self._types)
Return string representing the name of this type.
def desc_from_uri(uri): """Create the content of DIDL desc element from a uri. Args: uri (str): A uri, eg: ``'x-sonos-http:track%3a3402413.mp3?sid=2&amp;flags=32&amp;sn=4'`` Returns: str: The content of a desc element for that uri, eg ``'SA_RINCON519_email@example.com'`` """ # # If there is an sn parameter (which is the serial number of an account), # we can obtain all the information we need from that, because we can find # the relevant service_id in the account database (it is the same as the # service_type). Consequently, the sid parameter is unneeded. But if sn is # missing, we need the sid (service_type) parameter to find a relevant # account # urlparse does not work consistently with custom URI schemes such as # those used by Sonos. This is especially broken in Python 2.6 and # early versions of 2.7: http://bugs.python.org/issue9374 # As a workaround, we split off the scheme manually, and then parse # the uri as if it were http if ":" in uri: _, uri = uri.split(":", 1) query_string = parse_qs(urlparse(uri, 'http').query) # Is there an account serial number? if query_string.get('sn'): account_serial_number = query_string['sn'][0] try: account = Account.get_accounts()[account_serial_number] desc = "SA_RINCON{}_{}".format( account.service_type, account.username) return desc except KeyError: # There is no account matching this serial number. Fall back to # using the service id to find an account pass if query_string.get('sid'): service_id = query_string['sid'][0] for service in MusicService._get_music_services_data().values(): if service_id == service["ServiceID"]: service_type = service["ServiceType"] account = Account.get_accounts_for_service(service_type) if not account: break # Use the first account we find account = account[0] desc = "SA_RINCON{}_{}".format( account.service_type, account.username) return desc # Nothing found. Default to the standard desc value. Is this the right # thing to do? desc = 'RINCON_AssociatedZPUDN' return desc
Create the content of DIDL desc element from a uri. Args: uri (str): A uri, eg: ``'x-sonos-http:track%3a3402413.mp3?sid=2&amp;flags=32&amp;sn=4'`` Returns: str: The content of a desc element for that uri, eg ``'SA_RINCON519_email@example.com'``
def get_facets_ranges(self): ''' Returns query facet ranges :: >>> res = solr.query('SolrClient_unittest',{ 'q':'*:*', 'facet':True, 'facet.range':'price', 'facet.range.start':0, 'facet.range.end':100, 'facet.range.gap':10 }) >>> res.get_facets_ranges() {'price': {'80': 9, '10': 5, '50': 3, '20': 7, '90': 3, '70': 4, '60': 7, '0': 3, '40': 5, '30': 4}} ''' if not hasattr(self,'facet_ranges'): self.facet_ranges = {} data = self.data if 'facet_counts' in data.keys() and type(data['facet_counts']) == dict: if 'facet_ranges' in data['facet_counts'].keys() and type(data['facet_counts']['facet_ranges']) == dict: for facetfield in data['facet_counts']['facet_ranges']: if type(data['facet_counts']['facet_ranges'][facetfield]['counts']) == list: l = data['facet_counts']['facet_ranges'][facetfield]['counts'] self.facet_ranges[facetfield] = OrderedDict(zip(l[::2],l[1::2])) return self.facet_ranges else: raise SolrResponseError("No Facet Ranges in the Response") else: return self.facet_ranges
Returns query facet ranges :: >>> res = solr.query('SolrClient_unittest',{ 'q':'*:*', 'facet':True, 'facet.range':'price', 'facet.range.start':0, 'facet.range.end':100, 'facet.range.gap':10 }) >>> res.get_facets_ranges() {'price': {'80': 9, '10': 5, '50': 3, '20': 7, '90': 3, '70': 4, '60': 7, '0': 3, '40': 5, '30': 4}}
def get_vads_trans_id(vads_site_id, vads_trans_date): """ Returns a default value for vads_trans_id field. vads_trans_id field is mandatory. It is composed by 6 numeric characters that identifies the transaction. There is a unicity contraint between vads_site_id and vads_trans_date (the first 8 characters representing the transaction date). We consider the probability of having 2 identical generated vads_trans_id in the same day as null.""" vads_trans_id = "" for i in range(0, 6): vads_trans_id += str(random.randint(0, 9)) return vads_trans_id
Returns a default value for vads_trans_id field. vads_trans_id field is mandatory. It is composed by 6 numeric characters that identifies the transaction. There is a unicity contraint between vads_site_id and vads_trans_date (the first 8 characters representing the transaction date). We consider the probability of having 2 identical generated vads_trans_id in the same day as null.
def modLocationPort(self, location): """ Ensures that the location port is a the given port value Used in `handleHeader` """ components = urlparse.urlparse(location) reverse_proxy_port = self.father.getHost().port reverse_proxy_host = self.father.getHost().host # returns an ordered dict of urlparse.ParseResult components _components = components._asdict() _components['netloc'] = '%s:%d' % ( reverse_proxy_host, reverse_proxy_port ) return urlparse.urlunparse(_components.values())
Ensures that the location port is a the given port value Used in `handleHeader`
def mktns(self, root): """Get/create the target namespace.""" tns = root.get("targetNamespace") prefix = root.findPrefix(tns) if prefix is None: log.debug("warning: tns (%s), not mapped to prefix", tns) prefix = "tns" return (prefix, tns)
Get/create the target namespace.
def addItem(self, itemParameters, filePath=None, overwrite=False, folder=None, dataURL=None, url=None, text=None, relationshipType=None, originItemId=None, destinationItemId=None, serviceProxyParams=None, metadata=None, multipart=False): """ Adds an item to ArcGIS Online or Portal. Te Add Item operation (POST only) is used to upload an item file, submit text content, or submit the item URL to the specified user folder depending on documented items and item types. This operation is available only to the specified user. Inputs: itemParameters - required except for when multipart = True or SD file. This contains all the information regarding type, tags, etc... filePath - if updating the item's content overwrite - if the item exists, it overwrites it folder - id of the folder to place the item dataURL - The URL where the item can be downloaded. The resource will be downloaded and stored as a file type. Similar to uploading a file to be added, but instead of transferring the contents of the file, the URL of the data file is referenced and creates a file item. url - The URL of the item to be submitted. The URL can be a URL to a service, a web mapping application, or any other content available at that URL. text - The text content for the item to be submitted. relationshipType - The type of relationship between the two items. See Relationship types for a complete listing of types. originItemId - The item ID of the origin item of the relationship destinationItemId - item ID of the destination item of the relationship. serviceProxyParams - JSON object that provides rate limiting and referrer checks when accessing secured services. metadata - XML meta data file. multipart - If true, the file is uploaded in multiple parts. Used for files over 100 MBs in size. """ params = { "f" : "json" } res = "" if itemParameters is not None: for k,v in itemParameters.value.items(): if isinstance(v, bool): params[k] = json.dumps(v) else: params[k] = v if itemParameters.overwrite is not None: params['overwrite'] = json.dumps(overwrite) if itemParameters.overwrite != overwrite: params['overwrite'] = json.dumps(overwrite) if dataURL is not None: params['dataURL'] = dataURL if url is not None: params['url'] = url if text is not None: params['text'] = text if relationshipType is not None: params['relationshipType'] = relationshipType if originItemId is not None: params['originItemId'] = originItemId if destinationItemId is not None: params['destinationItemId'] = destinationItemId if serviceProxyParams is not None: params['serviceProxyParams'] = serviceProxyParams url = "%s/addItem" % self.location files = {} if multipart: res = self._addItemMultiPart( itemParameters=itemParameters, filePath=filePath) else: if filePath is not None and os.path.isfile(filePath): files['file'] = filePath params["filename"] = os.path.basename(filePath) elif filePath is not None and multipart: params["filename"] = os.path.basename(filePath) elif filePath is not None and not os.path.isfile(filePath): print ("{0} not found".format(filePath)) if 'thumbnail' in params: v = params['thumbnail'] del params['thumbnail'] files['thumbnail'] = v if metadata is not None and os.path.isfile(metadata): files['metadata'] = metadata if len(files) < 1: res = self._post(url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) else: params['itemType'] = 'file' params['async'] = False res = self._post(url=url, param_dict=params, files=files, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) if (isinstance(res, dict) and \ "id" not in res): raise Exception("Cannot add the item: %s" % res) elif (isinstance(res, (UserItem, Item)) and \ res.id is None): raise Exception("Cannot add the item: %s" % str(res)) elif isinstance(res, (UserItem, Item)): return res else: itemId = res['id'] return UserItem(url="%s/items/%s" % (self.location, itemId), securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
Adds an item to ArcGIS Online or Portal. Te Add Item operation (POST only) is used to upload an item file, submit text content, or submit the item URL to the specified user folder depending on documented items and item types. This operation is available only to the specified user. Inputs: itemParameters - required except for when multipart = True or SD file. This contains all the information regarding type, tags, etc... filePath - if updating the item's content overwrite - if the item exists, it overwrites it folder - id of the folder to place the item dataURL - The URL where the item can be downloaded. The resource will be downloaded and stored as a file type. Similar to uploading a file to be added, but instead of transferring the contents of the file, the URL of the data file is referenced and creates a file item. url - The URL of the item to be submitted. The URL can be a URL to a service, a web mapping application, or any other content available at that URL. text - The text content for the item to be submitted. relationshipType - The type of relationship between the two items. See Relationship types for a complete listing of types. originItemId - The item ID of the origin item of the relationship destinationItemId - item ID of the destination item of the relationship. serviceProxyParams - JSON object that provides rate limiting and referrer checks when accessing secured services. metadata - XML meta data file. multipart - If true, the file is uploaded in multiple parts. Used for files over 100 MBs in size.
def add(self, client_id, email_address, name, access_level, password): """Adds a person to a client. Password is optional and if not supplied, an invitation will be emailed to the person""" body = { "EmailAddress": email_address, "Name": name, "AccessLevel": access_level, "Password": password} response = self._post("/clients/%s/people.json" % client_id, json.dumps(body)) return json_to_py(response)
Adds a person to a client. Password is optional and if not supplied, an invitation will be emailed to the person
def launchApp(self, **kwargs): """Launch Starcraft2 process in the background using this configuration. WARNING: if the same IP address and port are specified between multiple SC2 process instances, all subsequent processes after the first will fail to initialize and crash. """ app = self.installedApp # TODO -- launch host in window minimized/headless mode vers = self.getVersion() return app.start(version=vers,#game_version=vers.baseVersion, data_version=vers.dataHash, full_screen=self.fullscreen, verbose=self.debug, **kwargs)
Launch Starcraft2 process in the background using this configuration. WARNING: if the same IP address and port are specified between multiple SC2 process instances, all subsequent processes after the first will fail to initialize and crash.
def update_intervals(self): ''' Returns a dictionary mapping remote IDs to their intervals, designed to be used for variable update intervals in salt.master.FileserverUpdate. A remote's ID is defined here as a tuple of the GitPython/Pygit2 object's "id" and "name" attributes, with None being assumed as the "name" value if the attribute is not present. ''' return {(repo.id, getattr(repo, 'name', None)): repo.update_interval for repo in self.remotes}
Returns a dictionary mapping remote IDs to their intervals, designed to be used for variable update intervals in salt.master.FileserverUpdate. A remote's ID is defined here as a tuple of the GitPython/Pygit2 object's "id" and "name" attributes, with None being assumed as the "name" value if the attribute is not present.
def autodecode(b): """ Try to decode ``bytes`` to text - try default encoding first, otherwise try to autodetect Args: b (bytes): byte string Returns: str: decoded text string """ import warnings import chardet try: return b.decode() except UnicodeError: result = chardet.detect(b) if result['confidence'] < 0.95: warnings.warn('autodecode failed with utf-8; guessing %s' % result['encoding']) return result.decode(result['encoding'])
Try to decode ``bytes`` to text - try default encoding first, otherwise try to autodetect Args: b (bytes): byte string Returns: str: decoded text string
def detect_encoding(sample, encoding=None): """Detect encoding of a byte string sample. """ # To reduce tabulator import time from cchardet import detect if encoding is not None: return normalize_encoding(sample, encoding) result = detect(sample) confidence = result['confidence'] or 0 encoding = result['encoding'] or 'ascii' encoding = normalize_encoding(sample, encoding) if confidence < config.ENCODING_CONFIDENCE: encoding = config.DEFAULT_ENCODING if encoding == 'ascii': encoding = config.DEFAULT_ENCODING return encoding
Detect encoding of a byte string sample.
def create_category(cls, category, **kwargs): """Create Category Create a new Category This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_category(category, async=True) >>> result = thread.get() :param async bool :param Category category: Attributes of category to create (required) :return: Category If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_category_with_http_info(category, **kwargs) else: (data) = cls._create_category_with_http_info(category, **kwargs) return data
Create Category Create a new Category This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_category(category, async=True) >>> result = thread.get() :param async bool :param Category category: Attributes of category to create (required) :return: Category If the method is called asynchronously, returns the request thread.
def add_filtered_folder(self, path, regex, depth=None, source_type=DefaultSourceType): """ Add a folder source to scan recursively, with a regex filter on directories. :param regex: regex string to filter folders by. :param depth: if provided will be depth limit. 0 = first level only. :param source_type: what to return; files only, folders only, or both. """ self.add_source(FilteredFolderSource(path, regex, depth, **source_type)) return self
Add a folder source to scan recursively, with a regex filter on directories. :param regex: regex string to filter folders by. :param depth: if provided will be depth limit. 0 = first level only. :param source_type: what to return; files only, folders only, or both.
def _reset(self, **kwargs): """ Reset after repopulating from API. """ # there are some inconsistenciens in the API regarding these # note: this could be written in fancier ways, but this way is simpler if 'uuid' in kwargs: self.uuid = kwargs['uuid'] elif 'storage' in kwargs: # let's never use storage.storage internally self.uuid = kwargs['storage'] if 'title' in kwargs: self.title = kwargs['title'] elif 'storage_title' in kwargs: self.title = kwargs['storage_title'] if 'size' in kwargs: self.size = kwargs['size'] elif 'storage_size' in kwargs: self.size = kwargs['storage_size'] # send the rest to super._reset filtered_kwargs = dict( (key, val) for key, val in kwargs.items() if key not in ['uuid', 'storage', 'title', 'storage_title', 'size', 'storage_size'] ) super(Storage, self)._reset(**filtered_kwargs)
Reset after repopulating from API.
def __evaluate_result(self, result, condition): """ Evaluates a result of a heuristic with the condition given in the config. :param mixed result: The result of the heuristic :param mixed condition: The condition string to evaluate on the result :return bool: Whether the heuristic result matches the condition """ # If result is bool this means, that the heuristic # is bool as well or has a special situation # (for example some condition [e.g. in config] is [not] met, thus # just pass it) if isinstance(result, bool): return result # Check if the condition is a String condition, # allowing <=, >=, <, >, = conditions or string # when they start with " or ' if isinstance(condition, basestring): # Check if result should match a string if (condition.startswith("'") and condition.endswith("'")) or \ (condition.startswith('"') and condition.endswith('"')): if isinstance(result, basestring): self.log.debug("Condition %s recognized as string.", condition) return result == condition[1:-1] return self.__evaluation_error( result, condition, "Result not string") # Only number-comparision following if not isinstance(result, (float, int)): return self.__evaluation_error( result, condition, "Result not number on comparision") # Check if result should match a number if condition.startswith("="): number = self.__try_parse_number(condition[1:]) if isinstance(number, bool): return self.__evaluation_error( result, condition, "Number not parsable (=)") return result == number # Check if result should be >= then a number if condition.startswith(">="): number = self.__try_parse_number(condition[2:]) if isinstance(number, bool): return self.__evaluation_error( result, condition, "Number not parsable (>=)") return result >= number # Check if result should be <= then a number if condition.startswith("<="): number = self.__try_parse_number(condition[2:]) if isinstance(number, bool): return self.__evaluation_error( result, condition, "Number not parsable (<=)") return result <= number # Check if result should be > then a number if condition.startswith(">"): number = self.__try_parse_number(condition[1:]) if isinstance(number, bool): return self.__evaluation_error( result, condition, "Number not parsable (>)") return result > number # Check if result should be < then a number if condition.startswith("<"): number = self.__try_parse_number(condition[1:]) if isinstance(number, bool): return self.__evaluation_error( result, condition, "Number not parsable (<)") return result < number # Check if result should be equal a number number = self.__try_parse_number(condition) if isinstance(number, bool): return self.__evaluation_error( result, condition, "Number not parsable") return result == number # Check if the condition is a number and matches the result if isinstance(condition, (float, int)) and isinstance(result, (float, int)): return condition == result return self.__evaluation_error(result, condition, "Unknown")
Evaluates a result of a heuristic with the condition given in the config. :param mixed result: The result of the heuristic :param mixed condition: The condition string to evaluate on the result :return bool: Whether the heuristic result matches the condition
def seek(self, pos=0): """Set the stream's file pointer to pos. Negative seeking is forbidden. """ if pos - self.pos >= 0: blocks, remainder = divmod(pos - self.pos, self.bufsize) for i in range(blocks): self.read(self.bufsize) self.read(remainder) else: raise StreamError("seeking backwards is not allowed") return self.pos
Set the stream's file pointer to pos. Negative seeking is forbidden.
def makeGlyphsBoundingBoxes(self): """ Make bounding boxes for all the glyphs, and return a dictionary of BoundingBox(xMin, xMax, yMin, yMax) namedtuples keyed by glyph names. The bounding box of empty glyphs (without contours or components) is set to None. Check that the float values are within the range of the specified self.roundTolerance, and if so use the rounded value; else take the floor or ceiling to ensure that the bounding box encloses the original values. """ def getControlPointBounds(glyph): pen.init() glyph.draw(pen) return pen.bounds def toInt(value, else_callback): rounded = otRound(value) if tolerance >= 0.5 or abs(rounded - value) <= tolerance: return rounded else: return int(else_callback(value)) tolerance = self.roundTolerance glyphBoxes = {} pen = ControlBoundsPen(self.allGlyphs) for glyphName, glyph in self.allGlyphs.items(): bounds = None if glyph or glyph.components: bounds = getControlPointBounds(glyph) if bounds: rounded = [] for value in bounds[:2]: rounded.append(toInt(value, math.floor)) for value in bounds[2:]: rounded.append(toInt(value, math.ceil)) bounds = BoundingBox(*rounded) glyphBoxes[glyphName] = bounds return glyphBoxes
Make bounding boxes for all the glyphs, and return a dictionary of BoundingBox(xMin, xMax, yMin, yMax) namedtuples keyed by glyph names. The bounding box of empty glyphs (without contours or components) is set to None. Check that the float values are within the range of the specified self.roundTolerance, and if so use the rounded value; else take the floor or ceiling to ensure that the bounding box encloses the original values.
def rename_genome(genome_in, genome_out=None): """Rename genome and slugify headers Rename genomes according to a simple naming scheme; this is mainly done to avoid special character weirdness. Parameters ---------- genome_in : file, str or pathlib.Path The input genome to be renamed and slugify. genome_out : file, str or pathlib.Path The output genome to be written into. Defaults is <base>_renamed.fa, where <base> is genome_in without its extension. """ if genome_out is None: genome_out = "{}_renamed.fa".format(genome_in.split(".")[0]) with open(genome_out, "w") as output_handle: for record in SeqIO.parse(genome_in, "fasta"): # Replace hyphens, tabs and whitespace with underscores new_record_id = record.id.replace(" ", "_") new_record_id = new_record_id.replace("-", "_") new_record_id = new_record_id.replace("\t", "_") # Remove anything that's weird, i.e. not alphanumeric # or an underscore new_record_id = re.sub("[^_A-Za-z0-9]+", "", new_record_id) header = ">{}\n".format(new_record_id) output_handle.write(header) output_handle.write("{}\n".format(str(record.seq)))
Rename genome and slugify headers Rename genomes according to a simple naming scheme; this is mainly done to avoid special character weirdness. Parameters ---------- genome_in : file, str or pathlib.Path The input genome to be renamed and slugify. genome_out : file, str or pathlib.Path The output genome to be written into. Defaults is <base>_renamed.fa, where <base> is genome_in without its extension.
def add_issue_comment(self, issue_id_or_key, content, extra_request_params={}): """ client = BacklogClient("your_space_name", "your_api_key") client.add_issue_comment("YOUR_PROJECT-999", u"or ... else e.") """ request_params = extra_request_params request_params["content"] = content return self.do("POST", "issues/{issue_id_or_key}/comments", url_params={"issue_id_or_key": issue_id_or_key}, request_params=request_params, )
client = BacklogClient("your_space_name", "your_api_key") client.add_issue_comment("YOUR_PROJECT-999", u"or ... else e.")
def register_iq_request_coro(self, type_, payload_cls, coro): """ Alias of :meth:`register_iq_request_handler`. .. deprecated:: 0.10 This alias will be removed in version 1.0. """ warnings.warn( "register_iq_request_coro is a deprecated alias to " "register_iq_request_handler and will be removed in aioxmpp 1.0", DeprecationWarning, stacklevel=2) return self.register_iq_request_handler(type_, payload_cls, coro)
Alias of :meth:`register_iq_request_handler`. .. deprecated:: 0.10 This alias will be removed in version 1.0.
def get_axis_bin_index(self, value, axis): """Returns index along axis of bin in histogram which contains value Inclusive on both endpoints """ axis = self.get_axis_number(axis) bin_edges = self.bin_edges[axis] # The right bin edge of np.histogram is inclusive: if value == bin_edges[-1]: # Minus two: one for bin edges rather than centers, one for 0-based indexing return len(bin_edges) - 2 # For all other bins, it is exclusive. result = np.searchsorted(bin_edges, [value], side='right')[0] - 1 if not 0 <= result <= len(bin_edges) - 1: raise CoordinateOutOfRangeException("Value %s is not in range (%s-%s) of axis %s" % ( value, bin_edges[0], bin_edges[-1], axis)) return result
Returns index along axis of bin in histogram which contains value Inclusive on both endpoints
def get_cfg_value(config, section, option): """Get configuration value.""" try: value = config[section][option] except KeyError: if (section, option) in MULTI_OPTIONS: return [] else: return '' if (section, option) in MULTI_OPTIONS: value = split_multiline(value) if (section, option) in ENVIRON_OPTIONS: value = eval_environ(value) return value
Get configuration value.
def find_includes(basedirs, source, log=None): """Finds all thrift files included by the given thrift source. :basedirs: A set of thrift source file base directories to look for includes in. :source: The thrift source file to scan for includes. :log: An optional logger """ all_basedirs = [os.path.dirname(source)] all_basedirs.extend(basedirs) includes = set() with open(source, 'r') as thrift: for line in thrift.readlines(): match = INCLUDE_PARSER.match(line) if match: capture = match.group(1) added = False for basedir in all_basedirs: include = os.path.join(basedir, capture) if os.path.exists(include): if log: log.debug('{} has include {}'.format(source, include)) includes.add(include) added = True if not added: raise ValueError("{} included in {} not found in bases {}" .format(include, source, all_basedirs)) return includes
Finds all thrift files included by the given thrift source. :basedirs: A set of thrift source file base directories to look for includes in. :source: The thrift source file to scan for includes. :log: An optional logger
def packvalue(value, *properties): ''' Store a specified value to specified property path. Often used in nstruct "init" parameter. :param value: a fixed value :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and store the value to property path. ''' def func(namedstruct): v = namedstruct._target for p in properties[:-1]: v = getattr(v, p) setattr(v, properties[-1], value) return func
Store a specified value to specified property path. Often used in nstruct "init" parameter. :param value: a fixed value :param properties: specified field name, same as sizefromlen. :returns: a function which takes a NamedStruct as parameter, and store the value to property path.
def _structure_recipients_data(cls, recipients): """Converts recipients data into a list of Recipient objects. :param list recipients: list of objects :return: list of Recipient :rtype: list """ try: # That's all due Django 1.7 apps loading. from django.contrib.auth import get_user_model USER_MODEL = get_user_model() except ImportError: # Django 1.4 fallback. from django.contrib.auth.models import User as USER_MODEL if not is_iterable(recipients): recipients = (recipients,) objects = [] for r in recipients: user = None if isinstance(r, USER_MODEL): user = r address = cls.get_address(r) # todo maybe raise an exception of not a string? objects.append(Recipient(cls.get_alias(), user, address)) return objects
Converts recipients data into a list of Recipient objects. :param list recipients: list of objects :return: list of Recipient :rtype: list
def diff_one(model1, model2, **kwargs): """Find difference between given peewee models.""" changes = [] fields1 = model1._meta.fields fields2 = model2._meta.fields # Add fields names1 = set(fields1) - set(fields2) if names1: fields = [fields1[name] for name in names1] changes.append(create_fields(model1, *fields, **kwargs)) # Drop fields names2 = set(fields2) - set(fields1) if names2: changes.append(drop_fields(model1, *names2)) # Change fields fields_ = [] nulls_ = [] indexes_ = [] for name in set(fields1) - names1 - names2: field1, field2 = fields1[name], fields2[name] diff = compare_fields(field1, field2) null = diff.pop('null', None) index = diff.pop('index', None) if diff: fields_.append(field1) if null is not None: nulls_.append((name, null)) if index is not None: indexes_.append((name, index[0], index[1])) if fields_: changes.append(change_fields(model1, *fields_, **kwargs)) for name, null in nulls_: changes.append(change_not_null(model1, name, null)) for name, index, unique in indexes_: if index is True or unique is True: if fields2[name].unique or fields2[name].index: changes.append(drop_index(model1, name)) changes.append(add_index(model1, name, unique)) else: changes.append(drop_index(model1, name)) return changes
Find difference between given peewee models.
def scatter_table(self, x, y, c, s, mark='*'): """Add a data series to the plot. :param x: array containing x-values. :param y: array containing y-values. :param c: array containing values for the color of the mark. :param s: array containing values for the size of the mark. :param mark: the symbol used to mark the data point. May be None, or any plot mark accepted by TikZ (e.g. ``*, x, +, o, square, triangle``). The dimensions of x, y, c and s should be equal. The c values will be mapped to a colormap. """ # clear the background of the marks # self._clear_plot_mark_background(x, y, mark, markstyle) # draw the plot series over the background options = self._parse_plot_options(mark) s = [sqrt(si) for si in s] plot_series = self._create_plot_tables_object(x, y, c, s, options) self.plot_table_list.append(plot_series)
Add a data series to the plot. :param x: array containing x-values. :param y: array containing y-values. :param c: array containing values for the color of the mark. :param s: array containing values for the size of the mark. :param mark: the symbol used to mark the data point. May be None, or any plot mark accepted by TikZ (e.g. ``*, x, +, o, square, triangle``). The dimensions of x, y, c and s should be equal. The c values will be mapped to a colormap.
def _on_update_rt_filter(self, peer, new_rts, old_rts): """Handles update of peer RT filter. Parameters: - `peer`: (Peer) whose RT filter has changed. - `new_rts`: (set) of new RTs that peer is interested in. - `old_rts`: (set) of RTs that peers is no longer interested in. """ for table in self._table_manager._global_tables.values(): if table.route_family == RF_RTC_UC: continue self._spawn('rt_filter_chg_%s' % peer, self._rt_mgr.on_rt_filter_chg_sync_peer, peer, new_rts, old_rts, table) LOG.debug('RT Filter change handler launched for route_family %s', table.route_family)
Handles update of peer RT filter. Parameters: - `peer`: (Peer) whose RT filter has changed. - `new_rts`: (set) of new RTs that peer is interested in. - `old_rts`: (set) of RTs that peers is no longer interested in.
def cache_makedirs(self, subdir=None): ''' Make necessary directories to hold cache value ''' if subdir is not None: dirname = self.cache_path if subdir: dirname = os.path.join(dirname, subdir) else: dirname = os.path.dirname(self.cache_path) os.makedirs(dirname, exist_ok=True)
Make necessary directories to hold cache value
def get_human_size(size, use_giga=True): '''将文件大小由byte, 转为人类可读的字符串 size - 整数, 文件的大小, 以byte为单位 use_giga - 如果这个选项为False, 那最大的单位就是MegaBytes, 而不会用到 GigaBytes, 这个在显示下载进度时很有用, 因为可以动态的显示下载 状态. ''' size_kb = '{0:,}'.format(size) if size < SIZE_K: return ('{0} B'.format(size), size_kb) if size < SIZE_M: return ('{0:.1f} kB'.format(size / SIZE_K), size_kb) if size < SIZE_G or not use_giga: return ('{0:.1f} MB'.format(size / SIZE_M), size_kb) if size < SIZE_T: return ('{0:.1f} GB'.format(size / SIZE_G), size_kb) return ('{0:.1f} TB'.format(size / SIZE_T), size_kb)
将文件大小由byte, 转为人类可读的字符串 size - 整数, 文件的大小, 以byte为单位 use_giga - 如果这个选项为False, 那最大的单位就是MegaBytes, 而不会用到 GigaBytes, 这个在显示下载进度时很有用, 因为可以动态的显示下载 状态.
def get_pipe(self): """ Returns a list that maps the input of the pipe to an elasticsearch object. Will call id_to_object if it cannot serialize the data from json. """ lines = [] for line in sys.stdin: try: lines.append(self.line_to_object(line.strip())) except ValueError: pass except KeyError: pass return lines
Returns a list that maps the input of the pipe to an elasticsearch object. Will call id_to_object if it cannot serialize the data from json.
def convertSequenceMachineSequence(generatedSequences): """ Convert a sequence from the SequenceMachine into a list of sequences, such that each sequence is a list of set of SDRs. """ sequenceList = [] currentSequence = [] for s in generatedSequences: if s is None: sequenceList.append(currentSequence) currentSequence = [] else: currentSequence.append(s) return sequenceList
Convert a sequence from the SequenceMachine into a list of sequences, such that each sequence is a list of set of SDRs.
def plot_polar( log, title, dataDictionary, pathToOutputPlotsFolder="~/Desktop", dataRange=False, ylabel=False, radius=False, circumference=True, circleTicksRange=(0, 360, 60), circleTicksLabels=".", prependNum=False): """ *Plot a dictionary of numpy lightcurves polynomials* **Key Arguments:** - ``log`` -- logger - ``title`` -- title for the plot - ``dataDictionary`` -- dictionary of data to plot { label01 : dataArray01, label02 : dataArray02 } - ``pathToOutputPlotsFolder`` -- path the the output folder to save plot to - ``dataRange`` -- the range for the data [min, max] - ``ylabel`` -- ylabel - ``radius`` -- the max radius of the plot - ``circumference`` -- draw the circumference of the plot? - ``circleTicksRange`` - ``circleTicksLabels`` - ``prependNum`` -- prepend this number to the output filename **Return:** - None """ ################ > IMPORTS ################ ## STANDARD LIB ## import sys ## THIRD PARTY ## import matplotlib.pyplot as plt import numpy as np ## LOCAL APPLICATION ## ################ >ACTION(S) ################ colors = [ {'green': '#859900'}, {'blue': '#268bd2'}, {'red': '#dc322f'}, {'gray': '#D2D1D1'}, {'orange': '#cb4b16'}, {'violet': '#6c71c4'}, {'cyan': '#2aa198'}, {'magenta': '#d33682'}, {'yellow': '#b58900'} ] # FORCE SQUARE FIGURE AND SQUARE AXES LOOKS BETTER FOR POLAR fig = plt.figure( num=None, figsize=(8, 8), dpi=None, facecolor=None, edgecolor=None, frameon=True) ax = fig.add_axes( [0.1, 0.1, 0.8, 0.8], polar=True, frameon=circumference) ax.set_ylim(0, radius) # ax.get_xaxis().set_visible(circumference) if circleTicksRange: circleTicks = np.arange(circleTicksRange[0], circleTicksRange[ 1], circleTicksRange[2]) tickLabels = [] for tick in circleTicks: tickLabels.append(".") plt.xticks(2 * np.pi * circleTicks / 360., tickLabels) count = 0 for k, v in dataDictionary.iteritems(): if count <= len(colors): colorDict = colors[count] count += 1 else: count = 0 colorDict = colors[count] thetaList = [] twoPi = 2. * np.pi for i in range(len(v)): thetaList.append(twoPi * np.random.rand()) thetaArray = np.array(thetaList) x = thetaArray y = v plt.scatter( x, y, label=k, s=50, c=colorDict.values(), marker='o', cmap=None, norm=None, vmin=None, vmax=None, alpha=0.5, linewidths=None, edgecolor='#657b83', verts=None, hold=True) box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) # Put a legend to the right of the current axis ax.legend(loc='center left', bbox_to_anchor=(0.7, -0.1), prop={'size': 8}) plt.grid(True) plt.title(title) if prependNum: title = "%02d_%s" % (prependNum, title) thisTitle = title.replace(" ", "_") thisTitle = thisTitle.replace("-", "_") fileName = pathToOutputPlotsFolder + thisTitle + ".png" imageLink = """ ![%s_plot](%s) """ % (thisTitle, fileName) plt.savefig(fileName) plt.clf() # clear figure return imageLink
*Plot a dictionary of numpy lightcurves polynomials* **Key Arguments:** - ``log`` -- logger - ``title`` -- title for the plot - ``dataDictionary`` -- dictionary of data to plot { label01 : dataArray01, label02 : dataArray02 } - ``pathToOutputPlotsFolder`` -- path the the output folder to save plot to - ``dataRange`` -- the range for the data [min, max] - ``ylabel`` -- ylabel - ``radius`` -- the max radius of the plot - ``circumference`` -- draw the circumference of the plot? - ``circleTicksRange`` - ``circleTicksLabels`` - ``prependNum`` -- prepend this number to the output filename **Return:** - None
def getVersion(init_file): """ Return BUILDBOT_VERSION environment variable, content of VERSION file, git tag or 'latest' """ try: return os.environ['BUILDBOT_VERSION'] except KeyError: pass try: cwd = os.path.dirname(os.path.abspath(init_file)) fn = os.path.join(cwd, 'VERSION') with open(fn) as f: return f.read().strip() except IOError: pass version = getVersionFromArchiveId() if version is not None: return version try: p = Popen(['git', 'describe', '--tags', '--always'], stdout=PIPE, stderr=STDOUT, cwd=cwd) out = p.communicate()[0] if (not p.returncode) and out: v = gitDescribeToPep440(str(out)) if v: return v except OSError: pass try: # if we really can't find the version, we use the date of modification of the most recent file # docker hub builds cannot use git describe return mTimeVersion(init_file) except Exception: # bummer. lets report something return "latest"
Return BUILDBOT_VERSION environment variable, content of VERSION file, git tag or 'latest'
def tokenize (self, value): """Take a string and break it into tokens. Return the tokens as a list of strings. """ # This code uses a state machine: class STATE: NORMAL = 0 GROUP_PUNCTUATION = 1 PROCESS_HTML_TAG = 2 PROCESS_HTML_ENTITY = 3 GROUP_LINEBREAKS = 4 state_names = { STATE.NORMAL: "normal", STATE.GROUP_PUNCTUATION: "punctuation", STATE.PROCESS_HTML_TAG: "html", STATE.PROCESS_HTML_ENTITY: "html_entity", STATE.GROUP_LINEBREAKS: "break" } # "state" and "token" have array values to allow their # contents to be modified within finishToken(). state = [STATE.NORMAL] token = [""] # The current token being assembled. tokens = [] # The tokens extracted from the input. index = -1 def clearToken(): """Clear the current token and return to normal state.""" token[0] = "" state[0] = STATE.NORMAL def emitToken(): """Emit the current token, if any, and return to normal state.""" if len(token[0]) > 0: # add character end and start char_start, char_end = index, index + len(token[0]) if self.create_structured_tokens: new_token = {'value': token[0], 'type': state_names[state[0]], 'char_start': char_start, 'char_end': char_end} tokens.append(new_token) else: tokens.append(token[0]) clearToken() def fixBrokenHtmlEntity(): # This is not a valid HTML entity. # TODO: embedded "#" characters should be treated better # here. if not self.recognizePunctuation: # If we aren't treating punctuation specially, then just treat # the broken HTML entity as an ordinary token. # # TODO: This is not quite correct. "x& " should # be treated as a single token, althouth "s & " # should result in two tokens. state[0] = STATE.NORMAL return if self.groupPunctuation: # If all the saved tokens are punctuation characters, then # enter STATE.GROUP_PUNCTUATION insted of STATE.NORMAL. sawOnlyPunctuation = True for c in token[0]: if c not in CrfTokenizer.punctuationSet: sawOnlyPunctuation = False break if sawOnlyPunctuation: state[0] = STATE.GROUP_PUNCTUATION return # Emit the ampersand that began the prospective entity and use the # rest as a new current token. saveToken = token[0] token[0] = saveToken[0:1] emitToken() if len(saveToken) > 1: token[0] = saveToken[1:] # The caller should continue processing with the current # character. # Process each character in the input string: for c in value: index += 1 if state[0] == STATE.PROCESS_HTML_TAG: if c in CrfTokenizer.whitespaceSet: continue # Suppress for safety. CRF++ doesn't like spaces in tokens, for example. token[0] += c if c == CrfTokenizer.END_HTML_TAG_CHAR: if self.skipHtmlTags: clearToken() else: emitToken() continue if state[0] == STATE.PROCESS_HTML_ENTITY: # Parse an HTML entity name. TODO: embedded "#" # characters imply more extensive parsing rules should # be performed here. if c == CrfTokenizer.END_HTML_ENTITY_CHAR: if len(token[0]) == 1: # This is the special case of "&;", which is not a # valid HTML entity. If self.groupPunctuation is # True, return to normal parsing state in case more # punctuation follows. Otherwise, emit "&" and ";" as # separate tokens. if not self.recognizePunctuation: # TODO: This is not quite correct. "x&;" should # be treated as a single token, althouth "s &;" # should result in two tokens. token[0] = token[0] + c state[0] = STATE.NORMAL elif self.groupPunctuation: token[0] = token[0] + c state[0] = STATE.GROUP_PUNCTUATION else: emitToken() # Emit the "&" as a seperate token. token[0] = token[0] + c emitToken() # Emit the ";' as a seperate token. continue token[0] = token[0] + c if self.skipHtmlEntities: clearToken() else: emitToken() continue elif c in CrfTokenizer.htmlEntityNameCharacterSet: token[0] = token[0] + c continue else: # This is not a valid HTML entity. fixBrokenHtmlEntity() # intentional fall-through if state[0] == STATE.GROUP_LINEBREAKS: # we will look for \n\r and ignore spaces if c in CrfTokenizer.linebreaking_character_set: token[0] += c continue elif c in CrfTokenizer.whitespaceSet: continue else: emitToken() state[0] = STATE.NORMAL if c in CrfTokenizer.whitespaceSet: # White space terminates the current token, then is dropped. emitToken() # Check to see whether we should look for line breaks if c in CrfTokenizer.linebreaking_start_character_set and self.recognize_linebreaks: state[0] = STATE.GROUP_LINEBREAKS token[0] = c elif c == CrfTokenizer.START_HTML_TAG_CHAR and self.recognizeHtmlTags: emitToken() state[0] = STATE.PROCESS_HTML_TAG token[0] = c elif c == CrfTokenizer.START_HTML_ENTITY_CHAR and self.recognizeHtmlEntities: emitToken() state[0] = STATE.PROCESS_HTML_ENTITY token[0] = c elif c in CrfTokenizer.punctuationSet and self.recognizePunctuation: if self.groupPunctuation: # Finish any current token. Concatenate # contiguous punctuation into a single token: if state[0] != STATE.GROUP_PUNCTUATION: emitToken() state[0] = STATE.GROUP_PUNCTUATION token[0] = token[0] + c else: # Finish any current token and form a token from # the punctuation character: emitToken() token[0] = c emitToken() else: # Everything else goes here. Presumably, that includes # Unicode characters that aren't ASCII # strings. Further work is needed. if state[0] != STATE.NORMAL: emitToken() token[0] = token[0] + c # Finish any final token and return the array of tokens: if state[0] == STATE.PROCESS_HTML_ENTITY: fixBrokenHtmlEntity() emitToken() # Was a token prefix requested? If so, we'll apply it now. If the # normal case is not to apply a token prefix, this might be a little # more efficient than applying the prefix in emitToken(). if self.tokenPrefix is not None and len(self.tokenPrefix) > 0: tokens = map(lambda x: self.tokenPrefix + x, tokens) return tokens
Take a string and break it into tokens. Return the tokens as a list of strings.
def Open(pathfileext=None, shot=None, t=None, Dt=None, Mesh=None, Deg=None, Deriv=None, Sep=True, Pos=True, OutPath=None, ReplacePath=None, Ves=None, out='full', Verb=False, Print=True): """ Open a ToFu object saved file This generic open function identifies the required loading routine by detecting how the object was saved from the file name extension. Also, it uses :meth:`~tofu.pathfile.FindSolFile()` to identify the relevant file in case key criteria such as shot, Deg... are provided instead of the file name itself. Finally, once all the relevant data is loaded from the file, a ToFu object is re-created, if necessary by implicitly loading all other objects it may depend on (i.e.: vessel, apertures...) If pathfileext is not provided (None), then the following keyword arguments are fed to :meth:`~tofu.pathfile.FindSolFile()`: shot, t, Dt, Mesh, Deg, Deriv, Sep, Pos Parameters ---------- pathfileext : None / str If provided, the name of the file to load OutPath : None / str If provided, the absolute path where the file is to be found ReplacePath : str If provided, ? (to finish) Ves : None / If provided, the :class:`tofu.geom.Ves` object that shall be used to reconstruct the object (if not provided, the appropriate vessel will be loaded). out : str Flag indicating whether the object should be loaded completely ('full'), in a light dismissing the heaviest attributes ('light') or whether only the Id or a list of Id should be returned ('Id'), valid only for '.npz' Verb : bool Flag indicating whether to pring intermediate comments on the loading procedure Returns ------- obj ToFu object The loaded and re-created ToFu object """ assert None in [pathfileext,shot] and not (pathfileext is None and shot is None), "Arg pathfileext or shot must be None, but not both !" if pathfileext is None: File = FindSolFile(shot=shot, t=t, Dt=Dt, Mesh=Mesh, Deg=Deg, Deriv=Deriv, Sep=Sep, Pos=Pos, OutPath=OutPath) if File is None: return File pathfileext = os.path.join(OutPath,File) C = any([ss in pathfileext for ss in ['.npz']]) assert C, "Arg pathfileext must contain '.npz' !" if '.npz' in pathfileext: obj = _open_np(pathfileext, Ves=Ves, ReplacePath=ReplacePath, out=out, Verb=Verb, Print=Print) if Print: print("Loaded : "+pathfileext) return obj
Open a ToFu object saved file This generic open function identifies the required loading routine by detecting how the object was saved from the file name extension. Also, it uses :meth:`~tofu.pathfile.FindSolFile()` to identify the relevant file in case key criteria such as shot, Deg... are provided instead of the file name itself. Finally, once all the relevant data is loaded from the file, a ToFu object is re-created, if necessary by implicitly loading all other objects it may depend on (i.e.: vessel, apertures...) If pathfileext is not provided (None), then the following keyword arguments are fed to :meth:`~tofu.pathfile.FindSolFile()`: shot, t, Dt, Mesh, Deg, Deriv, Sep, Pos Parameters ---------- pathfileext : None / str If provided, the name of the file to load OutPath : None / str If provided, the absolute path where the file is to be found ReplacePath : str If provided, ? (to finish) Ves : None / If provided, the :class:`tofu.geom.Ves` object that shall be used to reconstruct the object (if not provided, the appropriate vessel will be loaded). out : str Flag indicating whether the object should be loaded completely ('full'), in a light dismissing the heaviest attributes ('light') or whether only the Id or a list of Id should be returned ('Id'), valid only for '.npz' Verb : bool Flag indicating whether to pring intermediate comments on the loading procedure Returns ------- obj ToFu object The loaded and re-created ToFu object
def set_edges(self, name: str, a: np.ndarray, b: np.ndarray, w: np.ndarray, *, axis: int) -> None: """ **DEPRECATED** - Use `ds.row_graphs[name] = g` or `ds.col_graphs[name] = g` instead """ deprecated("'set_edges' is deprecated. Use 'ds.row_graphs[name] = g' or 'ds.col_graphs[name] = g' instead") try: g = scipy.sparse.coo_matrix((w, (a, b)), (self.shape[axis], self.shape[axis])) except Exception: raise ValueError("Input arrays could not be converted to a sparse matrix") if axis == 0: self.row_graphs[name] = g elif axis == 1: self.col_graphs[name] = g else: raise ValueError("axis must be 0 (rows) or 1 (columns)")
**DEPRECATED** - Use `ds.row_graphs[name] = g` or `ds.col_graphs[name] = g` instead
def _GetIdValue(self, registry_key): """Retrieves the Id value from Task Cache Tree key. Args: registry_key (dfwinreg.WinRegistryKey): Windows Registry key. Yields: tuple: containing: dfwinreg.WinRegistryKey: Windows Registry key. dfwinreg.WinRegistryValue: Windows Registry value. """ id_value = registry_key.GetValueByName('Id') if id_value: yield registry_key, id_value for sub_key in registry_key.GetSubkeys(): for value_key, id_value in self._GetIdValue(sub_key): yield value_key, id_value
Retrieves the Id value from Task Cache Tree key. Args: registry_key (dfwinreg.WinRegistryKey): Windows Registry key. Yields: tuple: containing: dfwinreg.WinRegistryKey: Windows Registry key. dfwinreg.WinRegistryValue: Windows Registry value.
def executor(self, max_workers=1): """single global executor""" cls = self.__class__ if cls._executor is None: cls._executor = ThreadPoolExecutor(max_workers) return cls._executor
single global executor
def error(self, s): """ Prints out an error message to stderr. :param s: The error string to print :return: None """ print(" ERROR: '%s', %s" % (self.src_id, s), file=sys.stderr)
Prints out an error message to stderr. :param s: The error string to print :return: None
def _remove_dummies(self, to_remove=None, inplace=False): """Works INPLACE""" zmat = self if inplace else self.copy() if to_remove is None: to_remove = zmat._has_removable_dummies() if not to_remove: if inplace: return None else: return zmat has_dummies = zmat._metadata['has_dummies'] c_table = zmat.loc[to_remove, ['b', 'a', 'd']] c_table['d'] = [has_dummies[k]['actual_d'] for k in to_remove] zmat.unsafe_loc[to_remove, 'd'] = c_table['d'].astype('i8') zmat_values = zmat.get_cartesian()._calculate_zmat_values(c_table) zmat.unsafe_loc[to_remove, ['bond', 'angle', 'dihedral']] = zmat_values zmat._frame.drop([has_dummies[k]['dummy_d'] for k in to_remove], inplace=True) warnings.warn('The dummy atoms {} were removed'.format(to_remove), UserWarning) for k in to_remove: zmat._metadata['has_dummies'].pop(k) if not inplace: return zmat
Works INPLACE
def copy(self): """Return a deep copy""" result = Vector3(self.size, self.deriv) result.x.v = self.x.v result.y.v = self.y.v result.z.v = self.z.v if self.deriv > 0: result.x.d[:] = self.x.d result.y.d[:] = self.y.d result.z.d[:] = self.z.d if self.deriv > 1: result.x.dd[:] = self.x.dd result.y.dd[:] = self.y.dd result.z.dd[:] = self.z.dd return result
Return a deep copy
def check_multi_dimensional_coords(self, ds): ''' Checks that no multidimensional coordinate shares a name with its dimensions. Chapter 5 paragraph 4 We recommend that the name of a [multidimensional coordinate] should not match the name of any of its dimensions. :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results ''' ret_val = [] # This can only apply to auxiliary coordinate variables for coord in self._find_aux_coord_vars(ds): variable = ds.variables[coord] if variable.ndim < 2: continue not_matching = TestCtx(BaseCheck.MEDIUM, self.section_titles['5']) not_matching.assert_true(coord not in variable.dimensions, '{} shares the same name as one of its dimensions' ''.format(coord)) ret_val.append(not_matching.to_result()) return ret_val
Checks that no multidimensional coordinate shares a name with its dimensions. Chapter 5 paragraph 4 We recommend that the name of a [multidimensional coordinate] should not match the name of any of its dimensions. :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results
def run(self, coords=None, debug=False): """ Run the likelihood grid search """ #self.grid.precompute() self.grid.search(coords=coords) return self.grid
Run the likelihood grid search
def peek(self): """ Returns PeekableIterator.Nothing when the iterator is exhausted. """ try: v = next(self._iter) self._iter = itertools.chain((v,), self._iter) return v except StopIteration: return PeekableIterator.Nothing
Returns PeekableIterator.Nothing when the iterator is exhausted.
def getUsrCfgFilesForPyPkg(pkgName): """ See if the user has one of their own local .cfg files for this task, such as might be created automatically during the save of a read-only package, and return their names. """ # Get the python package and it's .cfg file thePkg, theFile = findCfgFileForPkg(pkgName, '.cfg') # See if the user has any of their own local .cfg files for this task tname = getEmbeddedKeyVal(theFile, TASK_NAME_KEY) flist = getCfgFilesInDirForTask(getAppDir(), tname) return flist
See if the user has one of their own local .cfg files for this task, such as might be created automatically during the save of a read-only package, and return their names.
def get_value_in_base_currency(self) -> Decimal: """ Calculates the value of security holdings in base currency """ # check if the currency is the base currency. amt_orig = self.get_value() # Security currency sec_cur = self.get_currency() #base_cur = self.book.default_currency cur_svc = CurrenciesAggregate(self.book) base_cur = cur_svc.get_default_currency() if sec_cur == base_cur: return amt_orig # otherwise recalculate single_svc = cur_svc.get_currency_aggregate(sec_cur) rate = single_svc.get_latest_rate(base_cur) result = amt_orig * rate.value return result
Calculates the value of security holdings in base currency
def dcc_connect(self, address, port, dcctype="chat"): """Connect to a DCC peer. Arguments: address -- IP address of the peer. port -- Port to connect to. Returns a DCCConnection instance. """ warnings.warn("Use self.dcc(type).connect()", DeprecationWarning) return self.dcc(dcctype).connect(address, port)
Connect to a DCC peer. Arguments: address -- IP address of the peer. port -- Port to connect to. Returns a DCCConnection instance.
def _any_would_run(func, filenames, *args): """True if a linter function would be called on any of filenames.""" if os.environ.get("_POLYSQUARE_GENERIC_FILE_LINTER_NO_STAMPING", None): return True for filename in filenames: # suppress(E204) stamp_args, stamp_kwargs = _run_lint_on_file_stamped_args(filename, *args, **{}) dependency = jobstamp.out_of_date(func, *stamp_args, **stamp_kwargs) if dependency: return True return False
True if a linter function would be called on any of filenames.
def quadratic_jacobian_polynomial(nodes): r"""Compute the Jacobian determinant of a quadratic surface. .. note:: This is used **only** by :meth:`Surface._compute_valid` (which is in turn used to compute / cache the :attr:`Surface.is_valid` property). Converts :math:`\det(J(s, t))` to a polynomial on the reference triangle and represents it as a surface object. .. note:: This assumes that ``nodes`` is ``2 x 6`` but doesn't verify this. (However, the right multiplication by ``_QUADRATIC_JACOBIAN_HELPER`` would fail if ``nodes`` wasn't ``R x 6`` and then the ensuing determinants would fail if there weren't 2 rows.) Args: nodes (numpy.ndarray): A 2 x 6 array of nodes in a surface. Returns: numpy.ndarray: 1 x 6 array, coefficients in Bernstein basis. """ # First evaluate the Jacobian at each of the 6 nodes. jac_parts = _helpers.matrix_product(nodes, _QUADRATIC_JACOBIAN_HELPER) jac_at_nodes = np.empty((1, 6), order="F") jac_at_nodes[0, 0] = two_by_two_det(jac_parts[:, :2]) jac_at_nodes[0, 1] = two_by_two_det(jac_parts[:, 2:4]) jac_at_nodes[0, 2] = two_by_two_det(jac_parts[:, 4:6]) jac_at_nodes[0, 3] = two_by_two_det(jac_parts[:, 6:8]) jac_at_nodes[0, 4] = two_by_two_det(jac_parts[:, 8:10]) jac_at_nodes[0, 5] = two_by_two_det(jac_parts[:, 10:]) # Convert the nodal values to the Bernstein basis... bernstein = _helpers.matrix_product(jac_at_nodes, _QUADRATIC_TO_BERNSTEIN) return bernstein
r"""Compute the Jacobian determinant of a quadratic surface. .. note:: This is used **only** by :meth:`Surface._compute_valid` (which is in turn used to compute / cache the :attr:`Surface.is_valid` property). Converts :math:`\det(J(s, t))` to a polynomial on the reference triangle and represents it as a surface object. .. note:: This assumes that ``nodes`` is ``2 x 6`` but doesn't verify this. (However, the right multiplication by ``_QUADRATIC_JACOBIAN_HELPER`` would fail if ``nodes`` wasn't ``R x 6`` and then the ensuing determinants would fail if there weren't 2 rows.) Args: nodes (numpy.ndarray): A 2 x 6 array of nodes in a surface. Returns: numpy.ndarray: 1 x 6 array, coefficients in Bernstein basis.
def emit(self, signal, message='__nomessagetoken__'): """Emit a signal to the frontend. :param str signal: name of the signal :param message: message to send :returns: return value from frontend emit function :rtype: tornado.concurrent.Future """ # call pre-emit hooks if signal == 'log': self.log_backend.info(message) elif signal == 'warn': self.log_backend.warn(message) elif signal == 'error': self.log_backend.error(message) return self.emit_to_frontend(signal, message)
Emit a signal to the frontend. :param str signal: name of the signal :param message: message to send :returns: return value from frontend emit function :rtype: tornado.concurrent.Future
def _pywrap_tensorflow(): """Provide pywrap_tensorflow access in TensorBoard. pywrap_tensorflow cannot be accessed from tf.python.pywrap_tensorflow and needs to be imported using `from tensorflow.python import pywrap_tensorflow`. Therefore, we provide a separate accessor function for it here. NOTE: pywrap_tensorflow is not part of TensorFlow API and this dependency will go away soon. Returns: pywrap_tensorflow import, if available. Raises: ImportError: if we couldn't import pywrap_tensorflow. """ try: from tensorboard.compat import notf # pylint: disable=g-import-not-at-top except ImportError: try: from tensorflow.python import pywrap_tensorflow # pylint: disable=g-import-not-at-top return pywrap_tensorflow except ImportError: pass from tensorboard.compat.tensorflow_stub import pywrap_tensorflow # pylint: disable=g-import-not-at-top return pywrap_tensorflow
Provide pywrap_tensorflow access in TensorBoard. pywrap_tensorflow cannot be accessed from tf.python.pywrap_tensorflow and needs to be imported using `from tensorflow.python import pywrap_tensorflow`. Therefore, we provide a separate accessor function for it here. NOTE: pywrap_tensorflow is not part of TensorFlow API and this dependency will go away soon. Returns: pywrap_tensorflow import, if available. Raises: ImportError: if we couldn't import pywrap_tensorflow.
def getchallenge(self): "Return server challenge" self.sock.send(CHALLENGE_PACKET) # wait challenge response for packet in self.read_iterator(self.CHALLENGE_TIMEOUT): if packet.startswith(CHALLENGE_RESPONSE_HEADER): return parse_challenge_response(packet)
Return server challenge
def _parse_dict(element, definition): """Parse xml element by a definition given in dict format. :param element: ElementTree element :param definition: definition schema :type definition: dict :return: parsed xml :rtype: dict """ sub_dict = {} for name, subdef in viewitems(definition): (name, required) = _parse_name(name) sub_dict[name] = xml_to_json(element, subdef, required) return sub_dict
Parse xml element by a definition given in dict format. :param element: ElementTree element :param definition: definition schema :type definition: dict :return: parsed xml :rtype: dict
def error_response(self, request, error, **kwargs): """ Return an error to be displayed to the resource owner if anything goes awry. Errors can include invalid clients, authorization denials and other edge cases such as a wrong ``redirect_uri`` in the authorization request. :param request: :attr:`django.http.HttpRequest` :param error: ``dict`` The different types of errors are outlined in :rfc:`4.2.2.1` """ ctx = {} ctx.update(error) # If we got a malicious redirect_uri or client_id, remove all the # cached data and tell the resource owner. We will *not* redirect back # to the URL. if error['error'] in ['redirect_uri', 'unauthorized_client']: ctx.update(next='/') return self.render_to_response(ctx, **kwargs) ctx.update(next=self.get_redirect_url(request)) return self.render_to_response(ctx, **kwargs)
Return an error to be displayed to the resource owner if anything goes awry. Errors can include invalid clients, authorization denials and other edge cases such as a wrong ``redirect_uri`` in the authorization request. :param request: :attr:`django.http.HttpRequest` :param error: ``dict`` The different types of errors are outlined in :rfc:`4.2.2.1`
def generate_ha_relation_data(service, extra_settings=None): """ Generate relation data for ha relation Based on configuration options and unit interfaces, generate a json encoded dict of relation data items for the hacluster relation, providing configuration for DNS HA or VIP's + haproxy clone sets. Example of supplying additional settings:: COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips' AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth' AGENT_CA_PARAMS = 'op monitor interval="5s"' ha_console_settings = { 'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH}, 'init_services': {'res_nova_consoleauth': 'nova-consoleauth'}, 'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH}, 'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}) generate_ha_relation_data('nova', extra_settings=ha_console_settings) @param service: Name of the service being configured @param extra_settings: Dict of additional resource data @returns dict: json encoded data for use with relation_set """ _haproxy_res = 'res_{}_haproxy'.format(service) _relation_data = { 'resources': { _haproxy_res: 'lsb:haproxy', }, 'resource_params': { _haproxy_res: 'op monitor interval="5s"' }, 'init_services': { _haproxy_res: 'haproxy' }, 'clones': { 'cl_{}_haproxy'.format(service): _haproxy_res }, } if extra_settings: for k, v in extra_settings.items(): if _relation_data.get(k): _relation_data[k].update(v) else: _relation_data[k] = v if config('dns-ha'): update_hacluster_dns_ha(service, _relation_data) else: update_hacluster_vip(service, _relation_data) return { 'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS) for k, v in _relation_data.items() if v }
Generate relation data for ha relation Based on configuration options and unit interfaces, generate a json encoded dict of relation data items for the hacluster relation, providing configuration for DNS HA or VIP's + haproxy clone sets. Example of supplying additional settings:: COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips' AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth' AGENT_CA_PARAMS = 'op monitor interval="5s"' ha_console_settings = { 'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH}, 'init_services': {'res_nova_consoleauth': 'nova-consoleauth'}, 'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH}, 'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}) generate_ha_relation_data('nova', extra_settings=ha_console_settings) @param service: Name of the service being configured @param extra_settings: Dict of additional resource data @returns dict: json encoded data for use with relation_set
def Reverse(self, copy=False): """ Reverse the order of the points """ numPoints = self.GetN() if copy: revGraph = self.Clone() else: revGraph = self X = self.GetX() EXlow = self.GetEXlow() EXhigh = self.GetEXhigh() Y = self.GetY() EYlow = self.GetEYlow() EYhigh = self.GetEYhigh() for i in range(numPoints): index = numPoints - 1 - i revGraph.SetPoint(i, X[index], Y[index]) revGraph.SetPointError( i, EXlow[index], EXhigh[index], EYlow[index], EYhigh[index]) return revGraph
Reverse the order of the points
def fit(self, X=None, u=None, s = None): """Fit X into an embedded space. Inputs ---------- X : array, shape (n_samples, n_features) u,s,v : svd decomposition of X (optional) Assigns ---------- embedding : array-like, shape (n_samples, n_components) Stores the embedding vectors. u,sv,v : singular value decomposition of data S, potentially with smoothing isort1 : sorting along first dimension of matrix isort2 : sorting along second dimension of matrix (if n_Y > 0) cmap: correlation of each item with all locations in the embedding map (before upsampling) A: PC coefficients of each Fourier mode """ X = X.copy() if self.mode is 'parallel': Xall = X.copy() X = np.reshape(Xall.copy(), (-1, Xall.shape[-1])) #X -= X.mean(axis=-1)[:,np.newaxis] if ((u is None)): # compute svd and keep iPC's of data nmin = min([X.shape[0], X.shape[1]]) nmin = np.minimum(nmin-1, self.nPC) u,sv,v = svdecon(np.float64(X), k=nmin) u = u * sv NN, self.nPC = u.shape # first smooth in Y (if n_Y > 0) self.u = u if self.mode is 'parallel': NN = Xall.shape[1] X = np.zeros((2, NN, u.shape[1]), 'float64') for j in range(2): Xall[j] -= Xall[j].mean(axis=-1)[:, np.newaxis] X[j] = Xall[j] @ self.v utu = np.sum(u**2, axis=1) ikeep = np.argmax(utu) #ikeep = int(NN/2) #ikeep = np.random.randint(0, NN) ccu = u @ u[ikeep,:] cmax = np.maximum(0, ccu)**2/utu ikeep = np.argsort(cmax)[::-1] ikeep = ikeep[:int(NN/10)] ikeep = np.sort(ikeep) if self.init == 'pca': U = svdecon(u[ikeep,:], k=2)[0] #U = u[ikeep, :2] usort = U * np.sign(skew(U, axis=0)) init_sort = np.argsort(usort[:, :self.n_components], axis=0) elif self.init == 'random': init_sort = np.random.permutation(len(ikeep))[:,np.newaxis] for j in range(1,self.n_components): init_sort = np.concatenate((init_sort, np.random.permutation(len(ikeep))[:,np.newaxis]), axis=-1) else: init_sort = self.init if self.n_components==1 and init_sort.ndim==1: init_sort = init_sort[:,np.newaxis] # now sort in X isort1, iclustup = self._map(u.copy(), self.n_components, self.n_X, init_sort, ikeep, s) self.isort = isort1 self.embedding = iclustup return self
Fit X into an embedded space. Inputs ---------- X : array, shape (n_samples, n_features) u,s,v : svd decomposition of X (optional) Assigns ---------- embedding : array-like, shape (n_samples, n_components) Stores the embedding vectors. u,sv,v : singular value decomposition of data S, potentially with smoothing isort1 : sorting along first dimension of matrix isort2 : sorting along second dimension of matrix (if n_Y > 0) cmap: correlation of each item with all locations in the embedding map (before upsampling) A: PC coefficients of each Fourier mode
async def declareWorkerType(self, *args, **kwargs): """ Update a worker-type Declare a workerType, supplying some details about it. `declareWorkerType` allows updating one or more properties of a worker-type as long as the required scopes are possessed. For example, a request to update the `gecko-b-1-w2008` worker-type within the `aws-provisioner-v1` provisioner with a body `{description: 'This worker type is great'}` would require you to have the scope `queue:declare-worker-type:aws-provisioner-v1/gecko-b-1-w2008#description`. This method takes input: ``v1/update-workertype-request.json#`` This method gives output: ``v1/workertype-response.json#`` This method is ``experimental`` """ return await self._makeApiCall(self.funcinfo["declareWorkerType"], *args, **kwargs)
Update a worker-type Declare a workerType, supplying some details about it. `declareWorkerType` allows updating one or more properties of a worker-type as long as the required scopes are possessed. For example, a request to update the `gecko-b-1-w2008` worker-type within the `aws-provisioner-v1` provisioner with a body `{description: 'This worker type is great'}` would require you to have the scope `queue:declare-worker-type:aws-provisioner-v1/gecko-b-1-w2008#description`. This method takes input: ``v1/update-workertype-request.json#`` This method gives output: ``v1/workertype-response.json#`` This method is ``experimental``
def wait(self, timeout=None): """Wait until notified or until a timeout occurs. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. This method releases the underlying lock, and then blocks until it is awakened by a notify() or notifyAll() call for the same condition variable in another thread, or until the optional timeout occurs. Once awakened or timed out, it re-acquires the lock and returns. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). When the underlying lock is an RLock, it is not released using its release() method, since this may not actually unlock the lock when it was acquired multiple times recursively. Instead, an internal interface of the RLock class is used, which really unlocks it even when it has been recursively acquired several times. Another internal interface is then used to restore the recursion level when the lock is reacquired. """ if not self._is_owned(): raise RuntimeError("cannot wait on un-acquired lock") waiter = _allocate_lock() waiter.acquire() self.__waiters.append(waiter) saved_state = self._release_save() try: # restore state no matter what (e.g., KeyboardInterrupt) if timeout is None: waiter.acquire() if __debug__: self._note("%s.wait(): got it", self) else: # Balancing act: We can't afford a pure busy loop, so we # have to sleep; but if we sleep the whole timeout time, # we'll be unresponsive. The scheme here sleeps very # little at first, longer as time goes on, but never longer # than 20 times per second (or the timeout time remaining). endtime = _time() + timeout delay = 0.0005 # 500 us -> initial delay of 1 ms while True: gotit = waiter.acquire(0) if gotit: break remaining = endtime - _time() if remaining <= 0: break delay = min(delay * 2, remaining, .05) _sleep(delay) if not gotit: if __debug__: self._note("%s.wait(%s): timed out", self, timeout) try: self.__waiters.remove(waiter) except ValueError: pass else: if __debug__: self._note("%s.wait(%s): got it", self, timeout) finally: self._acquire_restore(saved_state)
Wait until notified or until a timeout occurs. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. This method releases the underlying lock, and then blocks until it is awakened by a notify() or notifyAll() call for the same condition variable in another thread, or until the optional timeout occurs. Once awakened or timed out, it re-acquires the lock and returns. When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). When the underlying lock is an RLock, it is not released using its release() method, since this may not actually unlock the lock when it was acquired multiple times recursively. Instead, an internal interface of the RLock class is used, which really unlocks it even when it has been recursively acquired several times. Another internal interface is then used to restore the recursion level when the lock is reacquired.
def get(self, path): """ Get the content of a file, indentified by its path relative to the folder configured in PyGreen. If the file extension is one of the extensions that should be processed through Mako, it will be processed. """ data = self.app.test_client().get("/%s" % path).data return data
Get the content of a file, indentified by its path relative to the folder configured in PyGreen. If the file extension is one of the extensions that should be processed through Mako, it will be processed.
def cli(ctx, name, dname, license_key, ips_version, force, enable, ssl, spdy, gzip, cache, install, dev): """ Downloads and installs a new instance of the latest Invision Power Suite release. """ assert isinstance(ctx, Context) login_session = ctx.get_login() log = logging.getLogger('ipsv.new') ctx.cache = cache # Prompt for our desired license def get_license(): """ Prompt the user for a license selection @rtype: ips_vagrant.scraper.licenses.LicenseMeta """ licenses = Licenses(login_session).get() user_license = license_key or ctx.config.get('User', 'LicenseKey') # If we already have a license key saved, skip the prompt and use it instead if user_license: licenses = {license.license_key: license for license in licenses} if user_license in licenses: return licenses[user_license] # Ask the user to select a license key opt = choice([ (key, '{u} ({k})'.format(u=license.community_url, k=license.license_key)) for key, license in enumerate(licenses) ], 1, 'Which license key would you like to use?') license = licenses[opt] # Should we save this license? if click.confirm('Would you like to save and use this license for future requests?', True): ctx.log.debug('Saving license key {k}'.format(k=license.license_key)) ctx.config.set('User', 'LicenseKey', license.license_key) with open(ctx.config_path, 'wb') as configfile: ctx.config.write(configfile) return license # Get the latest IPS release lmeta = get_license() p = Echo('Fetching IPS version information...') ips = IpsManager(ctx, lmeta) p.done() if ips_version: if ips_version == 'latest_dev': v = ips.dev_version if not v: click.secho('There is no IPS development release available for download', err=True, fg='red', bold=True) raise Exception('There is no IPS development release available for download') p = Echo('Downloading IPS development release {vs}...'.format(vs=v.version.vstring)) else: ips_version = Version(ips_version) v = ips.versions[ips_version.vtuple] p = Echo('Fetching IPS version {iv}'.format(iv=ips_version.vstring)) else: v = ips.latest p = Echo('Downloading IPS release {vs}...'.format(vs=v.version.vstring)) filename = ips.get(v, cache) p.done() # Parse the specific domain and make sure it's valid log.debug('Parsing domain name: %s', dname) dname = domain_parse(dname) if ssl is None: ssl = dname.scheme == 'https' log.debug('Domain name parsed: %s', dname) domain = Domain.get_or_create(dname) # Make sure this site does not already exist p = Echo('Constructing site data...') site = ctx.db.query(Site).filter(Site.domain == domain).filter(collate(Site.name, 'NOCASE') == name).count() if site: p.done(p.FAIL) log.error('Site already exists') click.secho('An installation named "{s}" has already been created for the domain {d}' .format(s=name, d=dname.hostname), err=True, fg='red', bold=True) raise click.Abort # Create the site database entry site = Site(name=name, domain=domain, license_key=lmeta.license_key, version=v.version.vstring, ssl=ssl, spdy=spdy, gzip=gzip, enabled=enable, in_dev=dev) status = p.OK if os.path.exists(site.root): if not force: p.done(p.FAIL) click.secho("Installation path already exists and --force was not passed:\n{p}".format(p=site.root), err=True, fg='red', bold=True) log.info('Aborting installation, path already exists: {p}'.format(p=site.root)) raise click.Abort log.warn('Overwriting existing installation path: {p}'.format(p=site.root)) status = p.WARN ctx.db.add(site) ctx.db.commit() p.done(status) # Construct the HTTP path p = Echo('Constructing paths and configuration files...') site.write_nginx_config() p.done() # Generate SSL certificates if enabled if ssl: p = Echo('Generating SSL certificate...') ssl_path = os.path.join(ctx.config.get('Paths', 'NginxSSL'), domain.name) if not os.path.exists(ssl_path): log.debug('Creating new SSL path: %s', ssl_path) os.makedirs(ssl_path, 0o755) sc = CertificateFactory(site).get() site.ssl_key = sc.key site.ssl_certificate = sc.certificate with open(os.path.join(ssl_path, '{s}.key'.format(s=site.slug)), 'w') as f: f.write(sc.key) with open(os.path.join(ssl_path, '{s}.pem').format(s=site.slug), 'w') as f: f.write(sc.certificate) p.done() # Create a symlink if this site is being enabled if site.enabled: site.enable(force) # Restart Nginx p = Echo('Restarting web server...') FNULL = open(os.devnull, 'w') subprocess.check_call(['service', 'nginx', 'restart'], stdout=FNULL, stderr=subprocess.STDOUT) p.done() # Extract IPS setup files p = Echo('Extracting setup files to tmp...') tmpdir = tempfile.mkdtemp('ips') setup_zip = os.path.join(tmpdir, 'setup.zip') setup_dir = os.path.join(tmpdir, 'setup') os.mkdir(setup_dir) log.info('Extracting setup files') shutil.copyfile(filename, setup_zip) with zipfile.ZipFile(setup_zip) as z: namelist = z.namelist() if re.match(r'^ips_\w{5}\/?$', namelist[0]): log.debug('Setup directory matched: %s', namelist[0]) else: log.error('No setup directory matched, unable to continue') raise Exception('Unrecognized setup file format, aborting') z.extractall(setup_dir) log.debug('Setup files extracted to: %s', setup_dir) p.done() p = MarkerProgressBar('Copying setup files...') setup_tmpdir = os.path.join(setup_dir, namelist[0]) for dirname, dirnames, filenames in p(os.walk(setup_tmpdir)): for filepath in dirnames: site_path = os.path.join(site.root, dirname.replace(setup_tmpdir, ''), filepath) if not os.path.exists(site_path): log.debug('Creating directory: %s', site_path) os.mkdir(site_path, 0o755) for filepath in filenames: tmp_path = os.path.join(dirname, filepath) site_path = os.path.join(site.root, dirname.replace(setup_tmpdir, ''), filepath) shutil.copy(tmp_path, site_path) log.info('Setup files copied to: %s', site.root) shutil.rmtree(tmpdir) # Apply proper permissions # p = MarkerProgressBar('Setting file permissions...') writeable_dirs = ['uploads', 'plugins', 'applications', 'datastore'] for wdir in writeable_dirs: log.debug('Setting file permissions in %s', wdir) os.chmod(os.path.join(site.root, wdir), 0o777) p = MarkerProgressBar('Setting file permissions...', nl=False) for dirname, dirnames, filenames in p(os.walk(os.path.join(site.root, wdir))): for filename in filenames: os.chmod(os.path.join(dirname, filename), 0o666) for filename in dirnames: os.chmod(os.path.join(dirname, filename), 0o777) Echo('Setting file permissions...').done() shutil.move(os.path.join(site.root, 'conf_global.dist.php'), os.path.join(site.root, 'conf_global.php')) os.chmod(os.path.join(site.root, 'conf_global.php'), 0o777) # Run the installation if install: p = Echo('Initializing installer...') i = installer(v.version, ctx, site, force) p.done() i.start() else: db_info = None if click.confirm('Would you like to create the database for this installation now?', default=True): db_info = create_database(site) click.echo('------') if db_info: db_name, db_user, db_pass = db_info log.debug('MySQL Database Name: %s', db_name) log.debug('MySQL Database User: %s', db_user) log.debug('MySQL Database Password: %s', db_pass) click.secho('MySQL Database Name: {dbn}'.format(dbn=db_name), bold=True) click.secho('MySQL Database User: {dbu}'.format(dbu=db_user), bold=True) click.secho('MySQL Database Password: {dbp}'.format(dbp=db_pass), bold=True) click.secho('IPS is now ready to be installed. To proceed with the installation, follow the link below', fg='yellow', bold=True) click.echo('{schema}://{host}'.format(schema='https' if site.ssl else 'http', host=site.domain.name))
Downloads and installs a new instance of the latest Invision Power Suite release.
def make_unicode(string): """ Python 2 and 3 compatibility function that converts a string to Unicode. In case of Unicode, the string is returned unchanged :param string: input string :return: Unicode string """ if sys.version < '3' and isinstance(string, str): return unicode(string.decode('utf-8')) return string
Python 2 and 3 compatibility function that converts a string to Unicode. In case of Unicode, the string is returned unchanged :param string: input string :return: Unicode string
def _RetryRequest(self, timeout=None, **request_args): """Retry the request a few times before we determine it failed. Sometimes the frontend becomes loaded and issues a 500 error to throttle the clients. We wait Client.error_poll_min seconds between each attempt to back off the frontend. Note that this does not affect any timing algorithm in the client itself which is controlled by the Timer() class. Args: timeout: Timeout for retry. **request_args: Args to the requests.request call. Returns: a tuple of duration, urllib.request.urlopen response. """ while True: try: now = time.time() if not timeout: timeout = config.CONFIG["Client.http_timeout"] result = requests.request(**request_args) # By default requests doesn't raise on HTTP error codes. result.raise_for_status() # Requests does not always raise an exception when an incorrect response # is received. This fixes that behaviour. if not result.ok: raise requests.RequestException(response=result) return time.time() - now, result # Catch any exceptions that dont have a code (e.g. socket.error). except IOError as e: self.consecutive_connection_errors += 1 # Request failed. If we connected successfully before we attempt a few # connections before we determine that it really failed. This might # happen if the front end is loaded and returns a few throttling 500 # messages. if self.active_base_url is not None: # Propagate 406 immediately without retrying, as 406 is a valid # response that indicates a need for enrollment. response = getattr(e, "response", None) if getattr(response, "status_code", None) == 406: raise if self.consecutive_connection_errors >= self.retry_error_limit: # We tried several times but this really did not work, just fail it. logging.info( "Too many connection errors to %s, retrying another URL", self.active_base_url) self.active_base_url = None raise e # Back off hard to allow the front end to recover. logging.debug( "Unable to connect to frontend. Backing off %s seconds.", self.error_poll_min) self.Wait(self.error_poll_min) # We never previously connected, maybe the URL/proxy is wrong? Just fail # right away to allow callers to try a different URL. else: raise e
Retry the request a few times before we determine it failed. Sometimes the frontend becomes loaded and issues a 500 error to throttle the clients. We wait Client.error_poll_min seconds between each attempt to back off the frontend. Note that this does not affect any timing algorithm in the client itself which is controlled by the Timer() class. Args: timeout: Timeout for retry. **request_args: Args to the requests.request call. Returns: a tuple of duration, urllib.request.urlopen response.
def get_blog_context(context): """ Get context data useful on all blog related pages """ context['authors'] = get_user_model().objects.filter( owned_pages__live=True, owned_pages__content_type__model='blogpage' ).annotate(Count('owned_pages')).order_by('-owned_pages__count') context['all_categories'] = BlogCategory.objects.all() context['root_categories'] = BlogCategory.objects.filter( parent=None, ).prefetch_related( 'children', ).annotate( blog_count=Count('blogpage'), ) return context
Get context data useful on all blog related pages
def standardise_quotes(self, val): """ Change the quotes used to wrap a value to the pprint default E.g. "val" to 'val' or 'val' to "val" """ if self._in_quotes(val, self.altquote): middle = self.remove_quotes(val) val = self.add_quotes(middle) return self.escape_quotes(val)
Change the quotes used to wrap a value to the pprint default E.g. "val" to 'val' or 'val' to "val"
def normalized_per_object(image, labels): """Normalize the intensities of each object to the [0, 1] range.""" nobjects = labels.max() objects = np.arange(nobjects + 1) lmin, lmax = scind.extrema(image, labels, objects)[:2] # Divisor is the object's max - min, or 1 if they are the same. divisor = np.ones((nobjects + 1,)) divisor[lmax > lmin] = (lmax - lmin)[lmax > lmin] return (image - lmin[labels]) / divisor[labels]
Normalize the intensities of each object to the [0, 1] range.
def qn_df(df, axis='row', keep_orig=False): ''' do quantile normalization of a dataframe dictionary, does not write to net ''' df_qn = {} for mat_type in df: inst_df = df[mat_type] # using transpose to do row qn if axis == 'row': inst_df = inst_df.transpose() missing_values = inst_df.isnull().values.any() # make mask of missing values if missing_values: # get nan mask missing_mask = pd.isnull(inst_df) # tmp fill in na with zero, will not affect qn inst_df = inst_df.fillna(value=0) # calc common distribution common_dist = calc_common_dist(inst_df) # swap in common distribution inst_df = swap_in_common_dist(inst_df, common_dist) # swap back in missing values if missing_values: inst_df = inst_df.mask(missing_mask, other=np.nan) # using transpose to do row qn if axis == 'row': inst_df = inst_df.transpose() df_qn[mat_type] = inst_df return df_qn
do quantile normalization of a dataframe dictionary, does not write to net
def form_invalid(self, post_form, attachment_formset, poll_option_formset, **kwargs): """ Processes invalid forms. """ poll_errors = [k for k in post_form.errors.keys() if k.startswith('poll_')] if ( poll_errors or ( poll_option_formset and not poll_option_formset.is_valid() and len(post_form.cleaned_data['poll_question']) ) ): messages.error(self.request, self.poll_option_formset_general_error_message) return super().form_invalid( post_form, attachment_formset, poll_option_formset=poll_option_formset, **kwargs)
Processes invalid forms.