code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def do_quality(self, quality): """Apply value of quality parameter. For PIL docs see <http://pillow.readthedocs.org/en/latest/reference/Image.html#PIL.Image.Image.convert> """ if (quality == 'grey' or quality == 'gray'): # Checking for 1.1 gray or 20.0 grey elsewhere self.logger.debug("quality: converting to gray") self.image = self.image.convert('L') elif (quality == 'bitonal'): self.logger.debug("quality: converting to bitonal") self.image = self.image.convert('1') else: # color or default/native (which we take as color) # Deal first with conversions from I;16* formats which Pillow # appears not to handle properly, resulting in mostly white images # if we convert directly. See: # <http://stackoverflow.com/questions/7247371/python-and-16-bit-tiff> if (self.image.mode.startswith('I;16')): self.logger.debug("quality: fudged conversion from mode %s to I" % (self.image.mode)) self.image = self.image.convert('I') self.image = self.image.point(lambda i: i * (1.0 / 256.0)) if (self.image.mode not in ('1', 'L', 'RGB', 'RGBA')): # Need to convert from palette etc. in order to write out self.logger.debug("quality: converting from mode %s to RGB" % (self.image.mode)) self.image = self.image.convert('RGB') else: self.logger.debug("quality: quality (nop)")
Apply value of quality parameter. For PIL docs see <http://pillow.readthedocs.org/en/latest/reference/Image.html#PIL.Image.Image.convert>
def save_models(self, model_path): """ Save machine learning models to pickle files. """ for group, condition_model_set in self.condition_models.items(): for model_name, model_obj in condition_model_set.items(): out_filename = model_path + \ "{0}_{1}_condition.pkl".format(group, model_name.replace(" ", "-")) with open(out_filename, "wb") as pickle_file: pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL) for group, size_model_set in self.size_models.items(): for model_name, model_obj in size_model_set.items(): out_filename = model_path + \ "{0}_{1}_size.pkl".format(group, model_name.replace(" ", "-")) with open(out_filename, "wb") as pickle_file: pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL) for group, dist_model_set in self.size_distribution_models.items(): for model_type, model_objs in dist_model_set.items(): for model_name, model_obj in model_objs.items(): out_filename = model_path + \ "{0}_{1}_{2}_sizedist.pkl".format(group, model_name.replace(" ", "-"), model_type) with open(out_filename, "wb") as pickle_file: pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL) for model_type, track_type_models in self.track_models.items(): for group, track_model_set in track_type_models.items(): for model_name, model_obj in track_model_set.items(): out_filename = model_path + \ "{0}_{1}_{2}_track.pkl".format(group, model_name.replace(" ", "-"), model_type) with open(out_filename, "wb") as pickle_file: pickle.dump(model_obj, pickle_file, pickle.HIGHEST_PROTOCOL) return
Save machine learning models to pickle files.
def find(self, pair, default=None): """ Returns the value for the kerning pair. **pair** is a ``tuple`` of two :ref:`type-string`\s, and the returned values will either be :ref:`type-int-float` or ``None`` if no pair was found. :: >>> font.kerning[("A", "V")] -25 """ pair = normalizers.normalizeKerningKey(pair) value = self._find(pair, default) if value != default: value = normalizers.normalizeKerningValue(value) return value
Returns the value for the kerning pair. **pair** is a ``tuple`` of two :ref:`type-string`\s, and the returned values will either be :ref:`type-int-float` or ``None`` if no pair was found. :: >>> font.kerning[("A", "V")] -25
def get_bucket(self, hash_name, bucket_key): """ Returns bucket content as list of tuples (vector, data). """ if hash_name in self.buckets: if bucket_key in self.buckets[hash_name]: return self.buckets[hash_name][bucket_key] return []
Returns bucket content as list of tuples (vector, data).
def convert_mapper(self, tomap): """ Converts our object from using one coordinate map to another. NOTE: In some cases this only approximately preserves the equivalent point values when transforming between coordinate spaces. """ frommap = self.crdmap if frommap == tomap: return # mild hack to convert radii on objects that have them if hasattr(self, 'radius'): # get coordinates of a point radius away from center # under current coordmap x0, y0 = frommap.offset_pt((self.x, self.y), (self.radius, 0)) pts = frommap.to_data(((self.x, self.y), (x0, y0))) pts = tomap.data_to(pts) self.radius = np.fabs(pts[1][0] - pts[0][0]) elif hasattr(self, 'xradius'): # similar to above case, but there are 2 radii x0, y0 = frommap.offset_pt((self.x, self.y), (self.xradius, self.yradius)) pts = frommap.to_data(((self.x, self.y), (x0, y0))) pts = tomap.data_to(pts) self.xradius = np.fabs(pts[1][0] - pts[0][0]) self.yradius = np.fabs(pts[1][1] - pts[0][1]) data_pts = self.get_data_points() # set our map to the new map self.crdmap = tomap self.set_data_points(data_pts)
Converts our object from using one coordinate map to another. NOTE: In some cases this only approximately preserves the equivalent point values when transforming between coordinate spaces.
def minimize_t0s(means, weights, combs): """Varies t0s to minimize the deviation of the gaussian means from zero. Parameters ---------- means: numpy array of means of all PMT combinations weights: numpy array of weights for the squared sum combs: pmt combinations to use for minimization Returns ------- opt_t0s: optimal t0 values for all PMTs """ def make_quality_function(means, weights, combs): def quality_function(t0s): sq_sum = 0 for mean, comb, weight in zip(means, combs, weights): sq_sum += ((mean - (t0s[comb[1]] - t0s[comb[0]])) * weight)**2 return sq_sum return quality_function qfunc = make_quality_function(means, weights, combs) # t0s = np.zeros(31) t0s = np.random.rand(31) bounds = [(0, 0)] + [(-10., 10.)] * 30 opt_t0s = optimize.minimize(qfunc, t0s, bounds=bounds) return opt_t0s
Varies t0s to minimize the deviation of the gaussian means from zero. Parameters ---------- means: numpy array of means of all PMT combinations weights: numpy array of weights for the squared sum combs: pmt combinations to use for minimization Returns ------- opt_t0s: optimal t0 values for all PMTs
def reduce_loss_dict(loss_dict): """ Reduce the loss dictionary from all processes so that process with rank 0 has the averaged results. Returns a dict with the same fields as loss_dict, after reduction. """ world_size = get_world_size() if world_size < 2: return loss_dict with torch.no_grad(): loss_names = [] all_losses = [] for k in sorted(loss_dict.keys()): loss_names.append(k) all_losses.append(loss_dict[k]) all_losses = torch.stack(all_losses, dim=0) dist.reduce(all_losses, dst=0) if dist.get_rank() == 0: # only main process gets accumulated, so only divide by # world_size in this case all_losses /= world_size reduced_losses = {k: v for k, v in zip(loss_names, all_losses)} return reduced_losses
Reduce the loss dictionary from all processes so that process with rank 0 has the averaged results. Returns a dict with the same fields as loss_dict, after reduction.
def dispatch_command(function, *args, **kwargs): """ A wrapper for :func:`dispatch` that creates a one-command parser. Uses :attr:`PARSER_FORMATTER`. This:: dispatch_command(foo) ...is a shortcut for:: parser = ArgumentParser() set_default_command(parser, foo) dispatch(parser) This function can be also used as a decorator. """ parser = argparse.ArgumentParser(formatter_class=PARSER_FORMATTER) set_default_command(parser, function) dispatch(parser, *args, **kwargs)
A wrapper for :func:`dispatch` that creates a one-command parser. Uses :attr:`PARSER_FORMATTER`. This:: dispatch_command(foo) ...is a shortcut for:: parser = ArgumentParser() set_default_command(parser, foo) dispatch(parser) This function can be also used as a decorator.
def updateHeader(self, wcsname=None, reusename=False): """ Update header of image with shifts computed by *perform_fit()*. """ # Insure filehandle is open and available... self.openFile() verbose_level = 1 if not self.perform_update: verbose_level = 0 # Create WCSCORR table to keep track of WCS revisions anyway if self.perform_update: wcscorr.init_wcscorr(self._im.hdu) extlist = [] wcscorr_extname = self.ext_name if self.ext_name == "PRIMARY": extlist = [0] else: for ext in range(1,self.nvers+1): extlist.append((self.ext_name,ext)) # add WCSNAME to SCI headers, if not provided (such as for # drizzled images directly obtained from the archive pre-AD) if ('wcsname' not in self._im.hdu[self.ext_name,ext].header and self._im.hdu.fileinfo(0)['filemode'] == 'update'): self._im.hdu[self.ext_name,ext].header['wcsname'] = 'Default' if not self.identityfit and self.goodmatch and \ self.fit['offset'][0] != np.nan: updatehdr.updatewcs_with_shift(self._im.hdu, self.refWCS, wcsname=wcsname, reusename=reusename, fitgeom=self.fit_pars['fitgeometry'], xsh=self.fit['offset'][0],ysh=self.fit['offset'][1], rot=self.fit['rot'],scale=self.fit['scale'][0], fit=self.fit['fit_matrix'], verbose=verbose_level, xrms=self.fit['rms_keys']['RMS_RA'], yrms=self.fit['rms_keys']['RMS_DEC']) wnames = altwcs.wcsnames(self._im.hdu,ext=extlist[0]) altkeys = [] for k in wnames: if wnames[k] == wcsname: altkeys.append(k) if len(altkeys) > 1 and ' ' in altkeys: altkeys.remove(' ') if len(altkeys) == 0: next_key = ' ' else: next_key = altkeys[-1] if self.perform_update: log.info(' Writing out new WCS to alternate WCS: "%s"'%next_key) self.next_key = next_key else: #if self.identityfit or not self.goodmatch: if reusename: # Look for key of WCS with this name next_key = altwcs.getKeyFromName(self._im.hdu[extlist[0]].header,wcsname) # This wcsname is new, so start fresh if next_key is None: next_key = altwcs.next_wcskey(self._im.hdu[extlist[0]].header) else: # Find key for next WCS and save again to replicate an updated solution next_key = altwcs.next_wcskey(self._im.hdu[extlist[0]].header) if self.perform_update: # archive current WCS as alternate WCS with specified WCSNAME # Start by archiving original PRIMARY WCS wnames = altwcs.wcsnames(self._im.hdu,ext=extlist[0]) # Define a default WCSNAME in the case that the file to be # updated did not have the WCSNAME keyword defined already # (as will happen when updating images that have not been # updated using updatewcs). if len(wnames) == 0: pri_wcsname = None else: # Safeguard against headers not having WCSNAME defined # This would occur if they were written out by something # other than stwcs.updatewcs v if ' ' not in wnames: self._im.hdu[extlist[0]].header['wscname'] = '' wnames[' '] = '' pri_wcsname = wnames[' '] next_pkey = altwcs.getKeyFromName(fits.getheader(self.name, extlist[0], memmap=False),pri_wcsname) log.info(' Saving Primary WCS to alternate WCS: "%s"'%next_pkey) altwcs.archiveWCS(self._im.hdu, extlist, wcskey=next_pkey, wcsname=pri_wcsname, reusekey=True) if reusename: # Look for key of WCS with this name next_key = altwcs.getKeyFromName(self._im.hdu[extlist[0]].header,wcsname) # This wcsname is new, so start fresh if next_key is None: next_key = altwcs.next_wcskey(self._im.hdu[extlist[0]].header) else: # Find key for next WCS and save again to replicate an updated solution next_key = altwcs.next_wcskey(self._im.hdu[extlist[0]].header) # update WCSNAME to be the new name for ext in extlist: self._im.hdu[ext].header['WCSNAME'] = wcsname # save again using new WCSNAME altwcs.archiveWCS(self._im.hdu, extlist, wcskey=next_key,wcsname=wcsname, reusekey=reusename) self.next_key = ' ' # add FIT values to image's PRIMARY header fimg = self._im.hdu if wcsname in ['',' ',None,"INDEF"]: wcsname = 'TWEAK' # Record values for the fit with both the PRIMARY WCS being updated # and the alternate WCS which will be created. assert(not self._im.closed) for ext in extlist: self._im.hdu[ext].header['FITNAME'+next_key] = wcsname for kw in self.fit['rms_keys']: self._im.hdu[ext].header.set(kw+next_key, self.fit['rms_keys'][kw], after='FITNAME'+next_key) if self.perform_update: log.info('Updating WCSCORR table with new WCS solution "%s"'%wcsname) wcscorr.update_wcscorr(self._im.hdu, wcs_id=wcsname, extname=self.ext_name)
Update header of image with shifts computed by *perform_fit()*.
def rootChild_resetPassword(self, req, webViewer): """ Redirect authenticated users to their settings page (hopefully they have one) when they try to reset their password. This is the wrong way for this functionality to be implemented. See #2524. """ from xmantissa.ixmantissa import IWebTranslator, IPreferenceAggregator return URL.fromString( IWebTranslator(self.store).linkTo( IPreferenceAggregator(self.store).storeID))
Redirect authenticated users to their settings page (hopefully they have one) when they try to reset their password. This is the wrong way for this functionality to be implemented. See #2524.
def _get_table_info(self): """Database-specific method to get field names""" self.rowid = None self.fields = [] self.field_info = {} self.cursor.execute('DESCRIBE %s' %self.name) for row in self.cursor.fetchall(): field,typ,null,key,default,extra = row self.fields.append(field) self.field_info[field] = {'type':typ,'NOT NULL':null,'key':key, 'DEFAULT':default,'extra':extra} if extra == 'auto_increment': self.rowid = field
Database-specific method to get field names
def get_cameras_schedule(self): """Return the schedule set for cameras.""" resource = "schedule" schedule_event = self.publish_and_get_event(resource) if schedule_event: return schedule_event.get('properties') return None
Return the schedule set for cameras.
def add(self, *nodes): """ Adds nodes as siblings :param nodes: GraphNode(s) """ for node in nodes: node.set_parent(self) self.add_sibling(node)
Adds nodes as siblings :param nodes: GraphNode(s)
def check_webhook_secret(app_configs=None, **kwargs): """ Check that DJSTRIPE_WEBHOOK_SECRET looks correct """ from . import settings as djstripe_settings messages = [] secret = djstripe_settings.WEBHOOK_SECRET if secret and not secret.startswith("whsec_"): messages.append( checks.Warning( "DJSTRIPE_WEBHOOK_SECRET does not look valid", hint="It should start with whsec_...", id="djstripe.W003", ) ) return messages
Check that DJSTRIPE_WEBHOOK_SECRET looks correct
def iter_actions(self): """Yield the service's actions with their arguments. Yields: `Action`: the next action. Each action is an Action namedtuple, consisting of action_name (a string), in_args (a list of Argument namedtuples consisting of name and argtype), and out_args (ditto), eg:: Action( name='SetFormat', in_args=[ Argument(name='DesiredTimeFormat', vartype=<Vartype>), Argument(name='DesiredDateFormat', vartype=<Vartype>)], out_args=[] ) """ # pylint: disable=too-many-locals # pylint: disable=invalid-name ns = '{urn:schemas-upnp-org:service-1-0}' # get the scpd body as bytes, and feed directly to elementtree # which likes to receive bytes scpd_body = requests.get(self.base_url + self.scpd_url).content tree = XML.fromstring(scpd_body) # parse the state variables to get the relevant variable types vartypes = {} srvStateTables = tree.findall('{}serviceStateTable'.format(ns)) for srvStateTable in srvStateTables: statevars = srvStateTable.findall('{}stateVariable'.format(ns)) for state in statevars: name = state.findtext('{}name'.format(ns)) datatype = state.findtext('{}dataType'.format(ns)) default = state.findtext('{}defaultValue'.format(ns)) value_list_elt = state.find('{}allowedValueList'.format(ns)) if value_list_elt is None: value_list_elt = () value_list = [item.text for item in value_list_elt] or None value_range_elt = state.find('{}allowedValueRange'.format(ns)) if value_range_elt is None: value_range_elt = () value_range = [item.text for item in value_range_elt] or None vartypes[name] = Vartype(datatype, default, value_list, value_range) # find all the actions actionLists = tree.findall('{}actionList'.format(ns)) for actionList in actionLists: actions = actionList.findall('{}action'.format(ns)) for i in actions: action_name = i.findtext('{}name'.format(ns)) argLists = i.findall('{}argumentList'.format(ns)) for argList in argLists: args_iter = argList.findall('{}argument'.format(ns)) in_args = [] out_args = [] for arg in args_iter: arg_name = arg.findtext('{}name'.format(ns)) direction = arg.findtext('{}direction'.format(ns)) related_variable = arg.findtext( '{}relatedStateVariable'.format(ns)) vartype = vartypes[related_variable] if direction == "in": in_args.append(Argument(arg_name, vartype)) else: out_args.append(Argument(arg_name, vartype)) yield Action(action_name, in_args, out_args)
Yield the service's actions with their arguments. Yields: `Action`: the next action. Each action is an Action namedtuple, consisting of action_name (a string), in_args (a list of Argument namedtuples consisting of name and argtype), and out_args (ditto), eg:: Action( name='SetFormat', in_args=[ Argument(name='DesiredTimeFormat', vartype=<Vartype>), Argument(name='DesiredDateFormat', vartype=<Vartype>)], out_args=[] )
def login(self, access_token=""): """ Configure and save {prog} authentication credentials. This command may open a browser window to ask for your consent to use web service authentication credentials. """ if access_token: credentials = argparse.Namespace(token=access_token, refresh_token=None, id_token=None) else: scopes = ["openid", "email", "offline_access"] from google_auth_oauthlib.flow import InstalledAppFlow flow = InstalledAppFlow.from_client_config(self.application_secrets, scopes=scopes) msg = "Authentication successful. Please close this tab and run HCA CLI commands in the terminal." credentials = flow.run_local_server(success_message=msg, audience=self._audience) # TODO: (akislyuk) test token autorefresh on expiration self.config.oauth2_token = dict(access_token=credentials.token, refresh_token=credentials.refresh_token, id_token=credentials.id_token, expires_at="-1", token_type="Bearer") print("Storing access credentials")
Configure and save {prog} authentication credentials. This command may open a browser window to ask for your consent to use web service authentication credentials.
def find_children(self, pattern=r".*", flags=0, candidates=None): """ Finds the children matching the given patten. Usage:: >>> node_a = AbstractCompositeNode("MyNodeA") >>> node_b = AbstractCompositeNode("MyNodeB", node_a) >>> node_c = AbstractCompositeNode("MyNodeC", node_a) >>> node_a.find_children("c", re.IGNORECASE) [<AbstractCompositeNode object at 0x101078040>] :param pattern: Matching pattern. :type pattern: unicode :param flags: Matching regex flags. :type flags: int :param candidates: Matching candidates. :type candidates: list :return: Matching children. :rtype: list """ if candidates is None: candidates = [] for child in self.__children: if re.search(pattern, child.name, flags): child not in candidates and candidates.append(child) child.find_children(pattern, flags, candidates) return candidates
Finds the children matching the given patten. Usage:: >>> node_a = AbstractCompositeNode("MyNodeA") >>> node_b = AbstractCompositeNode("MyNodeB", node_a) >>> node_c = AbstractCompositeNode("MyNodeC", node_a) >>> node_a.find_children("c", re.IGNORECASE) [<AbstractCompositeNode object at 0x101078040>] :param pattern: Matching pattern. :type pattern: unicode :param flags: Matching regex flags. :type flags: int :param candidates: Matching candidates. :type candidates: list :return: Matching children. :rtype: list
def coverage(fn): """Mark `fn` for line coverage analysis. Results will be printed to sys.stdout on program termination. Usage:: def fn(...): ... fn = coverage(fn) If you are using Python 2.4, you should be able to use the decorator syntax:: @coverage def fn(...): ... """ fp = TraceFuncCoverage(fn) # or HotShotFuncCoverage # We cannot return fp or fp.__call__ directly as that would break method # definitions, instead we need to return a plain function. def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn
Mark `fn` for line coverage analysis. Results will be printed to sys.stdout on program termination. Usage:: def fn(...): ... fn = coverage(fn) If you are using Python 2.4, you should be able to use the decorator syntax:: @coverage def fn(...): ...
def _split_audio_by_duration(self, audio_abs_path, results_abs_path, duration_seconds): """ Calculates the length of each segment and passes it to self._audio_segment_extractor Parameters ---------- audio_abs_path : str results_abs_path : str A place for adding digits needs to be added prior the the format decleration i.e. name%03.wav. Here, we've added `*` at staging step, which we'll replace. duration_seconds : int """ total_seconds = self._get_audio_duration_seconds(audio_abs_path) current_segment = 0 while current_segment <= total_seconds // duration_seconds + 1: if current_segment + duration_seconds > total_seconds: ending_second = total_seconds else: ending_second = current_segment + duration_seconds self._audio_segment_extractor( audio_abs_path, results_abs_path.replace("*", "{:03d}".format( current_segment)), starting_second=current_segment, duration=(ending_second - current_segment)) current_segment += 1
Calculates the length of each segment and passes it to self._audio_segment_extractor Parameters ---------- audio_abs_path : str results_abs_path : str A place for adding digits needs to be added prior the the format decleration i.e. name%03.wav. Here, we've added `*` at staging step, which we'll replace. duration_seconds : int
def broken_faces(mesh, color=None): """ Return the index of faces in the mesh which break the watertight status of the mesh. Parameters -------------- mesh: Trimesh object color: (4,) uint8, will set broken faces to this color None, will not alter mesh colors Returns --------------- broken: (n, ) int, indexes of mesh.faces """ adjacency = nx.from_edgelist(mesh.face_adjacency) broken = [k for k, v in dict(adjacency.degree()).items() if v != 3] broken = np.array(broken) if color is not None: # if someone passed a broken color color = np.array(color) if not (color.shape == (4,) or color.shape == (3,)): color = [255, 0, 0, 255] mesh.visual.face_colors[broken] = color return broken
Return the index of faces in the mesh which break the watertight status of the mesh. Parameters -------------- mesh: Trimesh object color: (4,) uint8, will set broken faces to this color None, will not alter mesh colors Returns --------------- broken: (n, ) int, indexes of mesh.faces
def write_long_at(self, n, pos, pack_into=Struct('>I').pack_into): ''' Write an unsigned 32bit value at a specific position in the buffer. Used for writing tables and frames. ''' if 0 <= n <= 0xFFFFFFFF: pack_into(self._output_buffer, pos, n) else: raise ValueError('Long %d out of range 0..0xFFFFFFFF', n) return self
Write an unsigned 32bit value at a specific position in the buffer. Used for writing tables and frames.
def _multicall_callback(self, values, calls): """ Fires when we get information back from the XML-RPC server. This is processes the raw results of system.multicall into a usable iterator of values (and/or Faults). :param values: list of data txkoji.Connection.call() :param calls: list of calls we sent in this multicall RPC :returns: KojiMultiCallIterator with the resulting values from all our calls. """ result = KojiMultiCallIterator(values) result.connection = self.connection result.calls = calls return result
Fires when we get information back from the XML-RPC server. This is processes the raw results of system.multicall into a usable iterator of values (and/or Faults). :param values: list of data txkoji.Connection.call() :param calls: list of calls we sent in this multicall RPC :returns: KojiMultiCallIterator with the resulting values from all our calls.
def get_social_login(self, *args, **kwargs): """ Set the social login process state to connect rather than login Refer to the implementation of get_social_login in base class and to the allauth.socialaccount.helpers module complete_social_login function. """ social_login = super(SocialConnectMixin, self).get_social_login(*args, **kwargs) social_login.state['process'] = AuthProcess.CONNECT return social_login
Set the social login process state to connect rather than login Refer to the implementation of get_social_login in base class and to the allauth.socialaccount.helpers module complete_social_login function.
def auth_required(realm, auth_func): '''Decorator that protect methods with HTTP authentication.''' def auth_decorator(func): def inner(self, *args, **kw): if self.get_authenticated_user(auth_func, realm): return func(self, *args, **kw) return inner return auth_decorator
Decorator that protect methods with HTTP authentication.
def as_new_format(self, format="ATR"): """ Create a new disk image in the specified format """ first_data = len(self.header) raw = self.rawdata[first_data:] data = add_atr_header(raw) newraw = SegmentData(data) image = self.__class__(newraw) return image
Create a new disk image in the specified format
def isInRoom(self, _id): """ Check a given user is in given room """ if SockJSRoomHandler._room.has_key(self._gcls() + _id): if self in SockJSRoomHandler._room[self._gcls() + _id]: return True return False
Check a given user is in given room
def create_snapshot(self, systemId, snapshotSpecificationObject): """ Create snapshot for list of volumes :param systemID: Cluster ID :param snapshotSpecificationObject: Of class SnapshotSpecification :rtype: SnapshotGroupId """ self.conn.connection._check_login() #try: response = self.conn.connection._do_post("{}/{}{}/{}".format(self.conn.connection._api_url, "instances/System::", systemId, 'action/snapshotVolumes'), json=snapshotSpecificationObject.__to_dict__()) #except: # raise RuntimeError("create_snapshot_by_system_id() - Error communicating with ScaleIO gateway") return response
Create snapshot for list of volumes :param systemID: Cluster ID :param snapshotSpecificationObject: Of class SnapshotSpecification :rtype: SnapshotGroupId
def update(self, read, write, manage): """ Update the SyncMapPermissionInstance :param bool read: Read access. :param bool write: Write access. :param bool manage: Manage access. :returns: Updated SyncMapPermissionInstance :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_permission.SyncMapPermissionInstance """ data = values.of({'Read': read, 'Write': write, 'Manage': manage, }) payload = self._version.update( 'POST', self._uri, data=data, ) return SyncMapPermissionInstance( self._version, payload, service_sid=self._solution['service_sid'], map_sid=self._solution['map_sid'], identity=self._solution['identity'], )
Update the SyncMapPermissionInstance :param bool read: Read access. :param bool write: Write access. :param bool manage: Manage access. :returns: Updated SyncMapPermissionInstance :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_permission.SyncMapPermissionInstance
def listTheExtras(self, deleteAlso): """ Use ConfigObj's get_extra_values() call to find any extra/unknown parameters we may have loaded. Return a string similar to findTheLost. If deleteAlso is True, this will also delete any extra/unknown items. """ # get list of extras extras = configobj.get_extra_values(self) # extras is in format: [(sections, key), (sections, key), ] # but we need: [(sections, key, result), ...] - set all results to # a bool just to make it the right shape. BUT, since we are in # here anyway, make that bool mean something - hide info in it about # whether that extra item is a section (1) or just a single par (0) # # simplified, this is: expanded = [ (x+(abool,)) for x in extras] expanded = [ (x+ \ ( bool(len(x[0])<1 and hasattr(self[x[1]], 'keys')), ) \ ) for x in extras] retval = '' if expanded: retval = flattened2str(expanded, extra=1) # but before we return, delete them (from ourself!) if requested to if deleteAlso: for tup_to_del in extras: target = self # descend the tree to the dict where this items is located. # (this works because target is not a copy (because the dict # type is mutable)) location = tup_to_del[0] for subdict in location: target = target[subdict] # delete it target.pop(tup_to_del[1]) return retval
Use ConfigObj's get_extra_values() call to find any extra/unknown parameters we may have loaded. Return a string similar to findTheLost. If deleteAlso is True, this will also delete any extra/unknown items.
def get_is_group_member(self, grp_name, user): """ Check if the given user is a member of the named group. Note that a group maintainer is not considered a member unless the user is also explicitly added as a member. Args: name (string): Name of group. user_name (string): User of interest. Returns: (bool): False if user not a member. """ self.project_service.set_auth(self._token_project) return self.project_service.get_is_group_member(grp_name, user)
Check if the given user is a member of the named group. Note that a group maintainer is not considered a member unless the user is also explicitly added as a member. Args: name (string): Name of group. user_name (string): User of interest. Returns: (bool): False if user not a member.
def RemoveClass(self, class_name): """Removes an entry from the list of known classes. Args: class_name: A string with the class name that is to be removed. Raises: NonexistentMapping if there is no class with the specified class_name. """ if class_name not in self._class_mapping: raise problems.NonexistentMapping(class_name) del self._class_mapping[class_name]
Removes an entry from the list of known classes. Args: class_name: A string with the class name that is to be removed. Raises: NonexistentMapping if there is no class with the specified class_name.
def MeetsConditions(knowledge_base, source): """Check conditions on the source.""" source_conditions_met = True os_conditions = ConvertSupportedOSToConditions(source) if os_conditions: source.conditions.append(os_conditions) for condition in source.conditions: source_conditions_met &= artifact_utils.CheckCondition( condition, knowledge_base) return source_conditions_met
Check conditions on the source.
def safe_listget(list_, index, default='?'): """ depricate """ if index >= len(list_): return default ret = list_[index] if ret is None: return default return ret
depricate
def multi_to_dict(multi): '''Transform a Werkzeug multidictionnary into a flat dictionnary''' return dict( (key, value[0] if len(value) == 1 else value) for key, value in multi.to_dict(False).items() )
Transform a Werkzeug multidictionnary into a flat dictionnary
def make_pipeline(context): """ Create our pipeline. """ # Filter for primary share equities. IsPrimaryShare is a built-in filter. primary_share = IsPrimaryShare() # Not when-issued equities. not_wi = ~IEXCompany.symbol.latest.endswith('.WI') # Equities without LP in their name, .matches does a match using a regular # expression not_lp_name = ~IEXCompany.companyName.latest.matches('.* L[. ]?P.?$') # Equities whose most recent Morningstar market cap is not null have # fundamental data and therefore are not ETFs. have_market_cap = IEXKeyStats.marketcap.latest >= 1 # At least a certain price price = USEquityPricing.close.latest AtLeastPrice = (price >= context.MyLeastPrice) AtMostPrice = (price <= context.MyMostPrice) # Filter for stocks that pass all of our previous filters. tradeable_stocks = ( primary_share & not_wi & not_lp_name & have_market_cap & AtLeastPrice & AtMostPrice ) LowVar = 6 HighVar = 40 log.info( ''' Algorithm initialized variables: context.MaxCandidates %s LowVar %s HighVar %s''' % (context.MaxCandidates, LowVar, HighVar)) # High dollar volume filter. base_universe = AverageDollarVolume( window_length=20, mask=tradeable_stocks ).percentile_between(LowVar, HighVar) # Short close price average. ShortAvg = SimpleMovingAverage( inputs=[USEquityPricing.close], window_length=3, mask=base_universe ) # Long close price average. LongAvg = SimpleMovingAverage( inputs=[USEquityPricing.close], window_length=45, mask=base_universe ) percent_difference = (ShortAvg - LongAvg) / LongAvg # Filter to select securities to long. stocks_worst = percent_difference.bottom(context.MaxCandidates) securities_to_trade = (stocks_worst) return Pipeline( columns={ 'stocks_worst': stocks_worst }, screen=(securities_to_trade), )
Create our pipeline.
def send_request(user_session, method, request): """ Send request to SMC :param Session user_session: session object :param str method: method for request :param SMCRequest request: request object :raises SMCOperationFailure: failure with reason :rtype: SMCResult """ if user_session.session: session = user_session.session # requests session try: method = method.upper() if method else '' if method == GET: if request.filename: # File download request return file_download(user_session, request) response = session.get( request.href, params=request.params, headers=request.headers, timeout=user_session.timeout) response.encoding = 'utf-8' counters.update(read=1) if logger.isEnabledFor(logging.DEBUG): debug(response) if response.status_code not in (200, 204, 304): raise SMCOperationFailure(response) elif method == POST: if request.files: # File upload request return file_upload(user_session, method, request) response = session.post( request.href, data=json.dumps(request.json, cls=CacheEncoder), headers=request.headers, params=request.params) response.encoding = 'utf-8' counters.update(create=1) if logger.isEnabledFor(logging.DEBUG): debug(response) if response.status_code not in (200, 201, 202): # 202 is asynchronous response with follower link raise SMCOperationFailure(response) elif method == PUT: if request.files: # File upload request return file_upload(user_session, method, request) # Etag should be set in request object request.headers.update(Etag=request.etag) response = session.put( request.href, data=json.dumps(request.json, cls=CacheEncoder), params=request.params, headers=request.headers) counters.update(update=1) if logger.isEnabledFor(logging.DEBUG): debug(response) if response.status_code != 200: raise SMCOperationFailure(response) elif method == DELETE: response = session.delete( request.href, headers=request.headers) counters.update(delete=1) # Conflict (409) if ETag is not current if response.status_code in (409,): req = session.get(request.href) etag = req.headers.get('ETag') response = session.delete( request.href, headers={'if-match': etag}) response.encoding = 'utf-8' if logger.isEnabledFor(logging.DEBUG): debug(response) if response.status_code not in (200, 204): raise SMCOperationFailure(response) else: # Unsupported method return SMCResult(msg='Unsupported method: %s' % method, user_session=user_session) except SMCOperationFailure as error: if error.code in (401,): user_session.refresh() return send_request(user_session, method, request) raise error except requests.exceptions.RequestException as e: raise SMCConnectionError('Connection problem to SMC, ensure the API ' 'service is running and host is correct: %s, exiting.' % e) else: return SMCResult(response, user_session=user_session) else: raise SMCConnectionError('No session found. Please login to continue')
Send request to SMC :param Session user_session: session object :param str method: method for request :param SMCRequest request: request object :raises SMCOperationFailure: failure with reason :rtype: SMCResult
def jensen_shannon(logu, self_normalized=False, name=None): """The Jensen-Shannon Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` When `self_normalized = True`, the Jensen-Shannon Csiszar-function is: ```none f(u) = u log(u) - (1 + u) log(1 + u) + (u + 1) log(2) ``` When `self_normalized = False` the `(u + 1) log(2)` term is omitted. Observe that as an f-Divergence, this Csiszar-function implies: ```none D_f[p, q] = KL[p, m] + KL[q, m] m(x) = 0.5 p(x) + 0.5 q(x) ``` In a sense, this divergence is the "reverse" of the Arithmetic-Geometric f-Divergence. This Csiszar-function induces a symmetric f-Divergence, i.e., `D_f[p, q] = D_f[q, p]`. Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. For more information, see: Lin, J. "Divergence measures based on the Shannon entropy." IEEE Trans. Inf. Th., 37, 145-151, 1991. Args: logu: `float`-like `Tensor` representing `log(u)` from above. self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even when `p, q` are unnormalized measures. name: Python `str` name prefixed to Ops created by this function. Returns: jensen_shannon_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`. """ with tf.compat.v1.name_scope(name, "jensen_shannon", [logu]): logu = tf.convert_to_tensor(value=logu, name="logu") npdt = logu.dtype.as_numpy_dtype y = tf.nn.softplus(logu) if self_normalized: y -= np.log(2).astype(npdt) return tf.exp(logu) * logu - (1. + tf.exp(logu)) * y
The Jensen-Shannon Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` When `self_normalized = True`, the Jensen-Shannon Csiszar-function is: ```none f(u) = u log(u) - (1 + u) log(1 + u) + (u + 1) log(2) ``` When `self_normalized = False` the `(u + 1) log(2)` term is omitted. Observe that as an f-Divergence, this Csiszar-function implies: ```none D_f[p, q] = KL[p, m] + KL[q, m] m(x) = 0.5 p(x) + 0.5 q(x) ``` In a sense, this divergence is the "reverse" of the Arithmetic-Geometric f-Divergence. This Csiszar-function induces a symmetric f-Divergence, i.e., `D_f[p, q] = D_f[q, p]`. Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. For more information, see: Lin, J. "Divergence measures based on the Shannon entropy." IEEE Trans. Inf. Th., 37, 145-151, 1991. Args: logu: `float`-like `Tensor` representing `log(u)` from above. self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even when `p, q` are unnormalized measures. name: Python `str` name prefixed to Ops created by this function. Returns: jensen_shannon_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`.
def _prefix_from_uri(self, uriname): """Given a fully qualified XML name, find a prefix e.g. {http://ns.adobe.com/pdf/1.3/}Producer -> pdf:Producer """ uripart, tag = uriname.split('}', maxsplit=1) uri = uripart.replace('{', '') return self.REVERSE_NS[uri] + ':' + tag
Given a fully qualified XML name, find a prefix e.g. {http://ns.adobe.com/pdf/1.3/}Producer -> pdf:Producer
def status( message: str = None, progress: float = None, section_message: str = None, section_progress: float = None, ): """ Updates the status display, which is only visible while a step is running. This is useful for providing feedback and information during long-running steps. A section progress is also available for cases where long running tasks consist of multiple tasks and you want to display sub-progress messages within the context of the larger status. Note: this is only supported when running in the Cauldron desktop application. :param message: The status message you want to display. If left blank the previously set status message will be retained. Should you desire to remove an existing message, specify a blank string for this argument. :param progress: A number between zero and one that indicates the overall progress for the current status. If no value is specified, the previously assigned progress will be retained. :param section_message: The status message you want to display for a particular task within a long-running step. If left blank the previously set section message will be retained. Should you desire to remove an existing message, specify a blank string for this argument. :param section_progress: A number between zero and one that indicates the progress for the current section status. If no value is specified, the previously assigned section progress value will be retained. """ environ.abort_thread() step = _cd.project.get_internal_project().current_step if message is not None: step.progress_message = message if progress is not None: step.progress = max(0.0, min(1.0, progress)) if section_message is not None: step.sub_progress_message = section_message if section_progress is not None: step.sub_progress = section_progress
Updates the status display, which is only visible while a step is running. This is useful for providing feedback and information during long-running steps. A section progress is also available for cases where long running tasks consist of multiple tasks and you want to display sub-progress messages within the context of the larger status. Note: this is only supported when running in the Cauldron desktop application. :param message: The status message you want to display. If left blank the previously set status message will be retained. Should you desire to remove an existing message, specify a blank string for this argument. :param progress: A number between zero and one that indicates the overall progress for the current status. If no value is specified, the previously assigned progress will be retained. :param section_message: The status message you want to display for a particular task within a long-running step. If left blank the previously set section message will be retained. Should you desire to remove an existing message, specify a blank string for this argument. :param section_progress: A number between zero and one that indicates the progress for the current section status. If no value is specified, the previously assigned section progress value will be retained.
def replace_find_selection(self, focus_replace_text=False): """Replace and find in the current selection""" if self.editor is not None: replace_text = to_text_string(self.replace_text.currentText()) search_text = to_text_string(self.search_text.currentText()) case = self.case_button.isChecked() words = self.words_button.isChecked() re_flags = re.MULTILINE if case else re.IGNORECASE|re.MULTILINE re_pattern = None if self.re_button.isChecked(): pattern = search_text else: pattern = re.escape(search_text) replace_text = re.escape(replace_text) if words: # match whole words only pattern = r'\b{pattern}\b'.format(pattern=pattern) # Check regexp before proceeding try: re_pattern = re.compile(pattern, flags=re_flags) # Check if replace_text can be substituted in re_pattern # Fixes issue #7177 re_pattern.sub(replace_text, '') except re.error as e: # Do nothing with an invalid regexp return selected_text = to_text_string(self.editor.get_selected_text()) replacement = re_pattern.sub(replace_text, selected_text) if replacement != selected_text: cursor = self.editor.textCursor() cursor.beginEditBlock() cursor.removeSelectedText() if not self.re_button.isChecked(): replacement = re.sub(r'\\(?![nrtf])(.)', r'\1', replacement) cursor.insertText(replacement) cursor.endEditBlock() if focus_replace_text: self.replace_text.setFocus() else: self.editor.setFocus()
Replace and find in the current selection
def get_permissions(self, token, resource_scopes_tuples=None, submit_request=False, ticket=None): """ Request permissions for user from keycloak server. https://www.keycloak.org/docs/latest/authorization_services/index .html#_service_protection_permission_api_papi :param str token: client access token :param Iterable[Tuple[str, str]] resource_scopes_tuples: list of tuples (resource, scope) :param boolean submit_request: submit request if not allowed to access? :param str ticket: Permissions ticket rtype: dict """ headers = { "Authorization": "Bearer %s" % token, 'Content-type': 'application/x-www-form-urlencoded', } data = [ ('grant_type', 'urn:ietf:params:oauth:grant-type:uma-ticket'), ('audience', self._client_id), ('response_include_resource_name', True), ] if resource_scopes_tuples: for atuple in resource_scopes_tuples: data.append(('permission', '#'.join(atuple))) data.append(('submit_request', submit_request)) elif ticket: data.append(('ticket', ticket)) authz_info = {} try: response = self._realm.client.post( self.well_known['token_endpoint'], data=urlencode(data), headers=headers, ) error = response.get('error') if error: self.logger.warning( '%s: %s', error, response.get('error_description') ) else: token = response.get('refresh_token') decoded_token = self._decode_token(token.split('.')[1]) authz_info = decoded_token.get('authorization', {}) except KeycloakClientError as error: self.logger.warning(str(error)) return authz_info
Request permissions for user from keycloak server. https://www.keycloak.org/docs/latest/authorization_services/index .html#_service_protection_permission_api_papi :param str token: client access token :param Iterable[Tuple[str, str]] resource_scopes_tuples: list of tuples (resource, scope) :param boolean submit_request: submit request if not allowed to access? :param str ticket: Permissions ticket rtype: dict
def add_child(self, child): """Add a child object to the current one. Checks the contained_children list to make sure that the object is allowable, and throws an exception if not. """ # Make sure the child exists before adding it. if child: # Add the child if it is allowed to exist under the parent. if child.tag in self.contained_children: self.children.append(child) else: raise ETD_MS_StructureException( 'Invalid child "%s" for parent "%s"' % (child.tag, self.tag) )
Add a child object to the current one. Checks the contained_children list to make sure that the object is allowable, and throws an exception if not.
def select_previous(self): """Move to the previous status in the timeline.""" self.footer.clear_message() if self.selected == 0: self.footer.draw_message("Cannot move beyond first toot.", Color.GREEN) return old_index = self.selected new_index = self.selected - 1 self.selected = new_index self.redraw_after_selection_change(old_index, new_index)
Move to the previous status in the timeline.
def get_orgas(self): """Return the list of pk for all orgas""" r = self._request('orgas/') if not r: return None retour = [] for data in r.json()['data']: o = Orga() o.__dict__.update(data) o.pk = o.id retour.append(o) return retour
Return the list of pk for all orgas
def enable(self): """ Enables all settings """ nwin = self.nwin.value() for label, xs, ys, nx, ny in \ zip(self.label[:nwin], self.xs[:nwin], self.ys[:nwin], self.nx[:nwin], self.ny[:nwin]): label.config(state='normal') xs.enable() ys.enable() nx.enable() ny.enable() for label, xs, ys, nx, ny in \ zip(self.label[nwin:], self.xs[nwin:], self.ys[nwin:], self.nx[nwin:], self.ny[nwin:]): label.config(state='disable') xs.disable() ys.disable() nx.disable() ny.disable() self.nwin.enable() self.xbin.enable() self.ybin.enable() self.sbutt.enable()
Enables all settings
def _create_session(self): """ Creates a fresh session with the default header (random UA) """ self.driver = requests.Session(**self.driver_args) # Set default headers self.update_headers(self.current_headers) self.update_cookies(self.current_cookies) self.set_proxy(self.current_proxy)
Creates a fresh session with the default header (random UA)
def yiq_to_rgb(y, i=None, q=None): """Convert the color from YIQ coordinates to RGB. Parameters: :y: Tte Y component value [0...1] :i: The I component value [0...1] :q: The Q component value [0...1] Returns: The color as an (r, g, b) tuple in the range: r[0...1], g[0...1], b[0...1] >>> '({}, {}, {})'.format(*[round(v, 6) for v in yiq_to_rgb(0.592263, 0.458874, -0.0499818)]) '(1.0, 0.5, 1e-06)' """ if type(y) in [list,tuple]: y, i, q = y r = y + (i * 0.9562) + (q * 0.6210) g = y - (i * 0.2717) - (q * 0.6485) b = y - (i * 1.1053) + (q * 1.7020) return (r, g, b)
Convert the color from YIQ coordinates to RGB. Parameters: :y: Tte Y component value [0...1] :i: The I component value [0...1] :q: The Q component value [0...1] Returns: The color as an (r, g, b) tuple in the range: r[0...1], g[0...1], b[0...1] >>> '({}, {}, {})'.format(*[round(v, 6) for v in yiq_to_rgb(0.592263, 0.458874, -0.0499818)]) '(1.0, 0.5, 1e-06)'
def write_and_return( command, ack, serial_connection, timeout=DEFAULT_WRITE_TIMEOUT): '''Write a command and return the response''' clear_buffer(serial_connection) with serial_with_temp_timeout( serial_connection, timeout) as device_connection: response = _write_to_device_and_return(command, ack, device_connection) return response
Write a command and return the response
def _parse_args(self, args): """self.parser->self.parsed_data""" # decode sys.argv to support unicode command-line options enc = DEFAULT_ENCODING uargs = [py3compat.cast_unicode(a, enc) for a in args] self.parsed_data, self.extra_args = self.parser.parse_known_args(uargs)
self.parser->self.parsed_data
def process_rule(edges: Edges, ast: Function, rule: Mapping[str, Any], spec: BELSpec): """Process computed edge rule Recursively processes BELAst versus a single computed edge rule Args: edges (List[Tuple[Union[Function, str], str, Function]]): BEL Edge ASTs ast (Function): BEL Function AST rule (Mapping[str, Any]: computed edge rule """ ast_type = ast.__class__.__name__ trigger_functions = rule.get("trigger_function", []) trigger_types = rule.get("trigger_type", []) rule_subject = rule.get("subject") rule_relation = rule.get("relation") rule_object = rule.get("object") log.debug(f"Running {rule_relation} Type: {ast_type}") if isinstance(ast, Function): function_name = ast.name args = ast.args parent_function = ast.parent_function if function_name in trigger_functions: if rule_subject == "trigger_value": subject = ast if rule_object == "args": for arg in args: log.debug(f"1: {subject} {arg}") edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif rule_object == "parent_function" and parent_function: log.debug(f"2: {subject} {parent_function}") edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) elif ast_type in trigger_types: if rule_subject == "trigger_value": subject = ast if rule_object == "args": for arg in args: log.debug(f"3: {subject} {arg}") edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif rule_object == "parent_function" and parent_function: log.debug(f"4: {subject} {parent_function}") edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) if isinstance(ast, NSArg): term = "{}:{}".format(ast.namespace, ast.value) parent_function = ast.parent_function if ast_type in trigger_types: if rule_subject == "trigger_value": subject = term if rule_object == "args": for arg in args: log.debug(f"5: {subject} {arg}") edge_ast = BELAst(subject, rule_relation, arg, spec) edges.append(edge_ast) elif rule_object == "parent_function" and parent_function: log.debug(f"6: {subject} {parent_function}") edge_ast = BELAst(subject, rule_relation, parent_function, spec) edges.append(edge_ast) # Recursively process every element by processing BELAst and Functions if hasattr(ast, "args"): for arg in ast.args: process_rule(edges, arg, rule, spec)
Process computed edge rule Recursively processes BELAst versus a single computed edge rule Args: edges (List[Tuple[Union[Function, str], str, Function]]): BEL Edge ASTs ast (Function): BEL Function AST rule (Mapping[str, Any]: computed edge rule
def get_polygon_pattern_rules(declarations, dirs): """ Given a list of declarations, return a list of output.Rule objects. Optionally provide an output directory for local copies of image files. """ property_map = {'polygon-pattern-file': 'file', 'polygon-pattern-width': 'width', 'polygon-pattern-height': 'height', 'polygon-pattern-type': 'type', 'polygon-meta-output': 'meta-output', 'polygon-meta-writer': 'meta-writer'} property_names = property_map.keys() # a place to put rules rules = [] for (filter, values) in filtered_property_declarations(declarations, property_names): poly_pattern_file, poly_pattern_type, poly_pattern_width, poly_pattern_height \ = values.has_key('polygon-pattern-file') \ and post_process_symbolizer_image_file(str(values['polygon-pattern-file'].value), dirs) \ or (None, None, None, None) poly_pattern_width = values.has_key('polygon-pattern-width') and values['polygon-pattern-width'].value or poly_pattern_width poly_pattern_height = values.has_key('polygon-pattern-height') and values['polygon-pattern-height'].value or poly_pattern_height symbolizer = poly_pattern_file and output.PolygonPatternSymbolizer(poly_pattern_file, poly_pattern_type, poly_pattern_width, poly_pattern_height) if symbolizer: rules.append(make_rule(filter, symbolizer)) return rules
Given a list of declarations, return a list of output.Rule objects. Optionally provide an output directory for local copies of image files.
def _lines_only(shape): """ Extract the lines (LineString, MultiLineString) from any geometry. We expect the input to be mostly lines, such as the result of an intersection between a line and a polygon. The main idea is to remove points, and any other geometry which might throw a wrench in the works. """ lines = _explode_lines(shape) if len(lines) == 1: return lines[0] else: return MultiLineString(lines)
Extract the lines (LineString, MultiLineString) from any geometry. We expect the input to be mostly lines, such as the result of an intersection between a line and a polygon. The main idea is to remove points, and any other geometry which might throw a wrench in the works.
def activate(self): """ Activates the logical volume. *Raises:* * HandleError """ self.open() a = lvm_lv_activate(self.handle) self.close() if a != 0: raise CommitError("Failed to activate LV.")
Activates the logical volume. *Raises:* * HandleError
def get_emerg(): """Get the cached FCPS emergency page, or check it again. Timeout defined in settings.CACHE_AGE["emerg"] """ key = "emerg:{}".format(datetime.datetime.now().date()) cached = cache.get(key) cached = None # Remove this for production if cached: logger.debug("Returning emergency info from cache") return cached else: result = get_emerg_result() cache.set(key, result, timeout=settings.CACHE_AGE["emerg"]) return result
Get the cached FCPS emergency page, or check it again. Timeout defined in settings.CACHE_AGE["emerg"]
def delete(self, membershipId): """Delete a team membership, by ID. Args: membershipId(basestring): The team membership ID. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error. """ check_type(membershipId, basestring, may_be_none=False) # API request self._session.delete(API_ENDPOINT + '/' + membershipId)
Delete a team membership, by ID. Args: membershipId(basestring): The team membership ID. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error.
def fetch(self): """Unfortunately, IEX's API can only retrieve data one day or one month at a time. Rather than specifying a date range, we will have to run the read function for each date provided. :return: DataFrame """ tlen = self.end - self.start dfs = [] # Build list of all dates within the given range lrange = [x for x in (self.start + timedelta(n) for n in range(tlen.days))] mrange = [] for dt in lrange: if datetime(dt.year, dt.month, 1) not in mrange: mrange.append(datetime(dt.year, dt.month, 1)) lrange = mrange for date in lrange: self.curr_date = date tdf = super(MonthlySummaryReader, self).fetch() # We may not return data if this was a weekend/holiday: if self.output_format == 'pandas': if not tdf.empty: tdf['date'] = date.strftime(self.date_format) dfs.append(tdf) # We may not return any data if we failed to specify useful parameters: if self.output_format == 'pandas': result = pd.concat(dfs) if len(dfs) > 0 else pd.DataFrame() return result.set_index('date') else: return dfs
Unfortunately, IEX's API can only retrieve data one day or one month at a time. Rather than specifying a date range, we will have to run the read function for each date provided. :return: DataFrame
def set_physical_page_for_file(self, pageId, ocrd_file, order=None, orderlabel=None): """ Create a new physical page """ # print(pageId, ocrd_file) # delete any page mapping for this file.ID for el_fptr in self._tree.getroot().findall( 'mets:structMap[@TYPE="PHYSICAL"]/mets:div[@TYPE="physSequence"]/mets:div[@TYPE="page"]/mets:fptr[@FILEID="%s"]' % ocrd_file.ID, namespaces=NS): el_fptr.getparent().remove(el_fptr) # find/construct as necessary el_structmap = self._tree.getroot().find('mets:structMap[@TYPE="PHYSICAL"]', NS) if el_structmap is None: el_structmap = ET.SubElement(self._tree.getroot(), TAG_METS_STRUCTMAP) el_structmap.set('TYPE', 'PHYSICAL') el_seqdiv = el_structmap.find('mets:div[@TYPE="physSequence"]', NS) if el_seqdiv is None: el_seqdiv = ET.SubElement(el_structmap, TAG_METS_DIV) el_seqdiv.set('TYPE', 'physSequence') el_pagediv = el_seqdiv.find('mets:div[@ID="%s"]' % pageId, NS) if el_pagediv is None: el_pagediv = ET.SubElement(el_seqdiv, TAG_METS_DIV) el_pagediv.set('TYPE', 'page') el_pagediv.set('ID', pageId) if order: el_pagediv.set('ORDER', order) if orderlabel: el_pagediv.set('ORDERLABEL', orderlabel) el_fptr = ET.SubElement(el_pagediv, TAG_METS_FPTR) el_fptr.set('FILEID', ocrd_file.ID)
Create a new physical page
def play(self): """Starts an animation playing.""" if self.state == PygAnimation.PLAYING: pass # nothing to do elif self.state == PygAnimation.STOPPED: # restart from beginning of animation self.index = 0 # first image in list self.elapsed = 0 self.playingStartTime = time.time() self.elapsedStopTime = self.endTimesList[-1] # end of last animation image time self.nextElapsedThreshold = self.endTimesList[0] self.nIterationsLeft = self.nTimes # typically 1 elif self.state == PygAnimation.PAUSED: # restart where we left off self.playingStartTime = time.time() - self.elapsedAtPause # recalc start time self.elapsed = self.elapsedAtPause self.elapsedStopTime = self.endTimesList[-1] # end of last animation image time self.nextElapsedThreshold = self.endTimesList[self.index] self.state = PygAnimation.PLAYING
Starts an animation playing.
def get_dataset(self, dsid, info): """Load a dataset.""" dsid_name = dsid.name if dsid_name in self.cache: logger.debug('Get the data set from cache: %s.', dsid_name) return self.cache[dsid_name] if dsid_name in ['lon', 'lat'] and dsid_name not in self.nc: dsid_name = dsid_name + '_reduced' logger.debug('Reading %s.', dsid_name) variable = self.nc[dsid_name] variable = self.remove_timedim(variable) variable = self.scale_dataset(dsid, variable, info) if dsid_name.endswith('_reduced'): # Get full resolution lon,lat from the reduced (tie points) grid self.upsample_geolocation(dsid, info) return self.cache[dsid.name] return variable
Load a dataset.
def _get_indices(num_results, sequence_indices, dtype, name=None): """Generates starting points for the Halton sequence procedure. The k'th element of the sequence is generated starting from a positive integer which must be distinct for each `k`. It is conventional to choose the starting point as `k` itself (or `k+1` if k is zero based). This function generates the starting integers for the required elements and reshapes the result for later use. Args: num_results: Positive scalar `Tensor` of dtype int32. The number of samples to generate. If this parameter is supplied, then `sequence_indices` should be None. sequence_indices: `Tensor` of dtype int32 and rank 1. The entries index into the Halton sequence starting with 0 and hence, must be whole numbers. For example, sequence_indices=[0, 5, 6] will produce the first, sixth and seventh elements of the sequence. If this parameter is not None then `n` must be None. dtype: The dtype of the sample. One of `float32` or `float64`. Default is `float32`. name: Python `str` name which describes ops created by this function. Returns: indices: `Tensor` of dtype `dtype` and shape = `[n, 1, 1]`. """ with tf.compat.v1.name_scope(name, '_get_indices', [num_results, sequence_indices]): if sequence_indices is None: num_results = tf.cast(num_results, dtype=dtype) sequence_indices = tf.range(num_results, dtype=dtype) else: sequence_indices = tf.cast(sequence_indices, dtype) # Shift the indices so they are 1 based. indices = sequence_indices + 1 # Reshape to make space for the event dimension and the place value # coefficients. return tf.reshape(indices, [-1, 1, 1])
Generates starting points for the Halton sequence procedure. The k'th element of the sequence is generated starting from a positive integer which must be distinct for each `k`. It is conventional to choose the starting point as `k` itself (or `k+1` if k is zero based). This function generates the starting integers for the required elements and reshapes the result for later use. Args: num_results: Positive scalar `Tensor` of dtype int32. The number of samples to generate. If this parameter is supplied, then `sequence_indices` should be None. sequence_indices: `Tensor` of dtype int32 and rank 1. The entries index into the Halton sequence starting with 0 and hence, must be whole numbers. For example, sequence_indices=[0, 5, 6] will produce the first, sixth and seventh elements of the sequence. If this parameter is not None then `n` must be None. dtype: The dtype of the sample. One of `float32` or `float64`. Default is `float32`. name: Python `str` name which describes ops created by this function. Returns: indices: `Tensor` of dtype `dtype` and shape = `[n, 1, 1]`.
def add_variable(self, variable, card=0): """ Add a variable to the model. Parameters: ----------- variable: any hashable python object card: int Representing the cardinality of the variable to be added. Examples: --------- >>> from pgmpy.models import MarkovChain as MC >>> model = MC() >>> model.add_variable('x', 4) """ if variable not in self.variables: self.variables.append(variable) else: warn('Variable {var} already exists.'.format(var=variable)) self.cardinalities[variable] = card self.transition_models[variable] = {}
Add a variable to the model. Parameters: ----------- variable: any hashable python object card: int Representing the cardinality of the variable to be added. Examples: --------- >>> from pgmpy.models import MarkovChain as MC >>> model = MC() >>> model.add_variable('x', 4)
def isdir(self): """Returns True if entry is a directory. """ if self.type == RAR_BLOCK_FILE: return (self.flags & RAR_FILE_DIRECTORY) == RAR_FILE_DIRECTORY return False
Returns True if entry is a directory.
def _split_classes_by_kind(self, class_name_to_definition): """Assign each class to the vertex, edge or non-graph type sets based on its kind.""" for class_name in class_name_to_definition: inheritance_set = self._inheritance_sets[class_name] is_vertex = ORIENTDB_BASE_VERTEX_CLASS_NAME in inheritance_set is_edge = ORIENTDB_BASE_EDGE_CLASS_NAME in inheritance_set if is_vertex and is_edge: raise AssertionError(u'Class {} appears to be both a vertex and an edge class: ' u'{}'.format(class_name, inheritance_set)) elif is_vertex: self._vertex_class_names.add(class_name) elif is_edge: self._edge_class_names.add(class_name) else: self._non_graph_class_names.add(class_name) # Freeze the classname sets so they cannot be modified again. self._vertex_class_names = frozenset(self._vertex_class_names) self._edge_class_names = frozenset(self._edge_class_names) self._non_graph_class_names = frozenset(self._non_graph_class_names)
Assign each class to the vertex, edge or non-graph type sets based on its kind.
def rebuildDay( self ): """ Rebuilds the current item in day mode. """ scene = self.scene() if ( not scene ): return # calculate the base information start_date = self.dateStart() end_date = self.dateEnd() min_date = scene.minimumDate() max_date = scene.maximumDate() # make sure our item is visible if ( not (min_date <= end_date and start_date <= max_date)): self.hide() self.setPath(QPainterPath()) return # make sure we have valid range information if ( start_date < min_date ): start_date = min_date start_inrange = False else: start_inrange = True if ( max_date < end_date ): end_date = max_date end_inrange = False else: end_inrange = True # rebuild the path path = QPainterPath() self.setPos(0, 0) pad = 2 offset = 18 height = 16 # rebuild a timed item if ( not self.isAllDay() ): start_dtime = QDateTime(self.dateStart(), self.timeStart()) end_dtime = QDateTime(self.dateStart(), self.timeEnd().addSecs(-30*60)) start_rect = scene.dateTimeRect(start_dtime) end_rect = scene.dateTimeRect(end_dtime) left = start_rect.left() + pad top = start_rect.top() + pad right = start_rect.right() - pad bottom = end_rect.bottom() - pad path.moveTo(left, top) path.lineTo(right, top) path.lineTo(right, bottom) path.lineTo(left, bottom) path.lineTo(left, top) data = (left + 6, top + 6, right - left - 12, bottom - top - 12, Qt.AlignTop | Qt.AlignLeft, '%s - %s\n(%s)' % (self.timeStart().toString('h:mmap')[:-1], self.timeEnd().toString('h:mmap'), self.title())) self._textData.append(data) self.setPath(path) self.show()
Rebuilds the current item in day mode.
def get_connection(self, command_name, *keys, **options): """Get a connection from the pool""" self._checkpid() try: connection = self._available_connections[self._pattern_idx].pop() except IndexError: connection = self.make_connection() self._in_use_connections[self._pattern_idx].add(connection) self._next_pattern() return connection
Get a connection from the pool
def enrich_backend(url, clean, backend_name, backend_params, cfg_section_name, ocean_index=None, ocean_index_enrich=None, db_projects_map=None, json_projects_map=None, db_sortinghat=None, no_incremental=False, only_identities=False, github_token=None, studies=False, only_studies=False, url_enrich=None, events_enrich=False, db_user=None, db_password=None, db_host=None, do_refresh_projects=False, do_refresh_identities=False, author_id=None, author_uuid=None, filter_raw=None, filters_raw_prefix=None, jenkins_rename_file=None, unaffiliated_group=None, pair_programming=False, node_regex=False, studies_args=None, es_enrich_aliases=None, last_enrich_date=None, projects_json_repo=None): """ Enrich Ocean index """ backend = None enrich_index = None if ocean_index or ocean_index_enrich: clean = False # don't remove index, it could be shared if do_refresh_projects or do_refresh_identities: clean = False # refresh works over the existing enriched items if not get_connector_from_name(backend_name): raise RuntimeError("Unknown backend %s" % backend_name) connector = get_connector_from_name(backend_name) klass = connector[3] # BackendCmd for the connector try: backend = None backend_cmd = None if klass: # Data is retrieved from Perceval backend_cmd = init_backend(klass(*backend_params)) backend = backend_cmd.backend if ocean_index_enrich: enrich_index = ocean_index_enrich else: if not ocean_index: ocean_index = backend_name + "_" + backend.origin enrich_index = ocean_index + "_enrich" if events_enrich: enrich_index += "_events" enrich_backend = connector[2](db_sortinghat, db_projects_map, json_projects_map, db_user, db_password, db_host) enrich_backend.set_params(backend_params) # store the cfg section name in the enrich backend to recover the corresponding project name in projects.json enrich_backend.set_cfg_section_name(cfg_section_name) enrich_backend.set_from_date(last_enrich_date) if url_enrich: elastic_enrich = get_elastic(url_enrich, enrich_index, clean, enrich_backend, es_enrich_aliases) else: elastic_enrich = get_elastic(url, enrich_index, clean, enrich_backend, es_enrich_aliases) enrich_backend.set_elastic(elastic_enrich) if github_token and backend_name == "git": enrich_backend.set_github_token(github_token) if jenkins_rename_file and backend_name == "jenkins": enrich_backend.set_jenkins_rename_file(jenkins_rename_file) if unaffiliated_group: enrich_backend.unaffiliated_group = unaffiliated_group if pair_programming: enrich_backend.pair_programming = pair_programming if node_regex: enrich_backend.node_regex = node_regex # The filter raw is needed to be able to assign the project value to an enriched item # see line 544, grimoire_elk/enriched/enrich.py (fltr = eitem['origin'] + ' --filter-raw=' + self.filter_raw) if filter_raw: enrich_backend.set_filter_raw(filter_raw) elif filters_raw_prefix: enrich_backend.set_filter_raw_should(filters_raw_prefix) enrich_backend.set_projects_json_repo(projects_json_repo) ocean_backend = get_ocean_backend(backend_cmd, enrich_backend, no_incremental, filter_raw, filters_raw_prefix) if only_studies: logger.info("Running only studies (no SH and no enrichment)") do_studies(ocean_backend, enrich_backend, studies_args) elif do_refresh_projects: logger.info("Refreshing project field in %s", enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url)) field_id = enrich_backend.get_field_unique_id() eitems = refresh_projects(enrich_backend) enrich_backend.elastic.bulk_upload(eitems, field_id) elif do_refresh_identities: author_attr = None author_values = None if author_id: author_attr = 'author_id' author_values = [author_id] elif author_uuid: author_attr = 'author_uuid' author_values = [author_uuid] logger.info("Refreshing identities fields in %s", enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url)) field_id = enrich_backend.get_field_unique_id() eitems = refresh_identities(enrich_backend, author_attr, author_values) enrich_backend.elastic.bulk_upload(eitems, field_id) else: clean = False # Don't remove ocean index when enrich elastic_ocean = get_elastic(url, ocean_index, clean, ocean_backend) ocean_backend.set_elastic(elastic_ocean) logger.info("Adding enrichment data to %s", enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url)) if db_sortinghat and enrich_backend.has_identities(): # FIXME: This step won't be done from enrich in the future total_ids = load_identities(ocean_backend, enrich_backend) logger.info("Total identities loaded %i ", total_ids) if only_identities: logger.info("Only SH identities added. Enrich not done!") else: # Enrichment for the new items once SH update is finished if not events_enrich: enrich_count = enrich_items(ocean_backend, enrich_backend) if enrich_count is not None: logger.info("Total items enriched %i ", enrich_count) else: enrich_count = enrich_items(ocean_backend, enrich_backend, events=True) if enrich_count is not None: logger.info("Total events enriched %i ", enrich_count) if studies: do_studies(ocean_backend, enrich_backend, studies_args) except Exception as ex: if backend: logger.error("Error enriching ocean from %s (%s): %s", backend_name, backend.origin, ex, exc_info=True) else: logger.error("Error enriching ocean %s", ex, exc_info=True) logger.info("Done %s ", backend_name)
Enrich Ocean index
def get_sources(self, skydir=None, distance=None, cuts=None, minmax_ts=None, minmax_npred=None, exclude=None, square=False, coordsys='CEL', names=None): """Retrieve list of source objects satisfying the following selections: * Angular separation from ``skydir`` or ROI center (if ``skydir`` is None) less than ``distance``. * Cuts on source properties defined in ``cuts`` list. * TS and Npred in range specified by ``minmax_ts`` and ``minmax_npred``. * Name matching a value in ``names`` Sources can be excluded from the selection by adding their name to the ``exclude`` list. Returns ------- srcs : list List of source objects. """ if skydir is None: skydir = self.skydir if exclude is None: exclude = [] rsrc, srcs = self.get_sources_by_position(skydir, distance, square=square, coordsys=coordsys) o = [] for s in srcs + self.diffuse_sources: if names and s.name not in names: continue if s.name in exclude: continue if not s.check_cuts(cuts): continue ts = s['ts'] npred = s['npred'] if not utils.apply_minmax_selection(ts, minmax_ts): continue if not utils.apply_minmax_selection(npred, minmax_npred): continue o.append(s) return o
Retrieve list of source objects satisfying the following selections: * Angular separation from ``skydir`` or ROI center (if ``skydir`` is None) less than ``distance``. * Cuts on source properties defined in ``cuts`` list. * TS and Npred in range specified by ``minmax_ts`` and ``minmax_npred``. * Name matching a value in ``names`` Sources can be excluded from the selection by adding their name to the ``exclude`` list. Returns ------- srcs : list List of source objects.
def initialize(self, params, qubits): """Apply initialize to circuit.""" if isinstance(qubits, QuantumRegister): qubits = qubits[:] else: qubits = _convert_to_bits([qubits], [qbit for qreg in self.qregs for qbit in qreg])[0] return self.append(Initialize(params), qubits)
Apply initialize to circuit.
def remove_root_bin(self, bin_id): """Removes a root bin. arg: bin_id (osid.id.Id): the ``Id`` of a bin raise: NotFound - ``bin_id`` not a root raise: NullArgument - ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchyDesignSession.remove_root_bin_template if self._catalog_session is not None: return self._catalog_session.remove_root_catalog(catalog_id=bin_id) return self._hierarchy_session.remove_root(id_=bin_id)
Removes a root bin. arg: bin_id (osid.id.Id): the ``Id`` of a bin raise: NotFound - ``bin_id`` not a root raise: NullArgument - ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def _get_overlaps_tensor(self, L): """Transforms the input label matrix to a three-way overlaps tensor. Args: L: (np.array) An n x m array of LF output labels, in {0,...,k} if self.abstains, else in {1,...,k}, generated by m conditionally independent LFs on n data points Outputs: O: (torch.Tensor) A (m, m, m, k, k, k) tensor of the label-specific empirical overlap rates; that is, O[i,j,k,y1,y2,y3] = P(\lf_i = y1, \lf_j = y2, \lf_k = y3) where this quantity is computed empirically by this function, based on the label matrix L. """ n, m = L.shape # Convert from a (n,m) matrix of ints to a (k_lf, n, m) indicator tensor LY = np.array([np.where(L == y, 1, 0) for y in range(self.k_0, self.k + 1)]) # Form the three-way overlaps matrix O = np.einsum("abc,dbe,fbg->cegadf", LY, LY, LY) / n return torch.from_numpy(O).float()
Transforms the input label matrix to a three-way overlaps tensor. Args: L: (np.array) An n x m array of LF output labels, in {0,...,k} if self.abstains, else in {1,...,k}, generated by m conditionally independent LFs on n data points Outputs: O: (torch.Tensor) A (m, m, m, k, k, k) tensor of the label-specific empirical overlap rates; that is, O[i,j,k,y1,y2,y3] = P(\lf_i = y1, \lf_j = y2, \lf_k = y3) where this quantity is computed empirically by this function, based on the label matrix L.
def _members(self): """ Return a dict of non-private members. """ return { key: value for key, value in self.__dict__.items() # NB: ignore internal SQLAlchemy state and nested relationships if not key.startswith("_") and not isinstance(value, Model) }
Return a dict of non-private members.
def execute_command(self, command): """Execute a command on the node Args: command (str) """ self.info_log("executing command: %s" % command) try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) k = paramiko.RSAKey.from_private_key_file( self.browser_config.get('ssh_key_path') ) ssh.connect( self.private_ip, username=self.browser_config.get('username'), pkey=k ) sleep_time = 0.1 stdout = [] stderr = [] ssh_transport = ssh.get_transport() channel = ssh_transport.open_session() channel.setblocking(0) channel.exec_command(command) while True: while channel.recv_ready(): stdout.append(channel.recv(1000)) while channel.recv_stderr_ready(): stderr.append(channel.recv_stderr(1000)) if channel.exit_status_ready(): break sleep(sleep_time) # ret = channel.recv_exit_status() ssh_transport.close() ssh.close() return b''.join(stdout), b''.join(stderr) except Exception as e: msg = "Execute_command exception: %s" % str(e) self.error_log(msg) raise Exception(msg)
Execute a command on the node Args: command (str)
def spliced_offset(self, position): """ Convert from an absolute chromosomal position to the offset into this transcript"s spliced mRNA. Position must be inside some exon (otherwise raise exception). """ # this code is performance sensitive, so switching from # typechecks.require_integer to a simpler assertion assert type(position) == int, \ "Position argument must be an integer, got %s : %s" % ( position, type(position)) if position < self.start or position > self.end: raise ValueError( "Invalid position: %d (must be between %d and %d)" % ( position, self.start, self.end)) # offset from beginning of unspliced transcript (including introns) unspliced_offset = self.offset(position) total_spliced_offset = 0 # traverse exons in order of their appearance on the strand # Since absolute positions may decrease if on the negative strand, # we instead use unspliced offsets to get always increasing indices. # # Example: # # Exon Name: exon 1 exon 2 # Spliced Offset: 123456 789... # Intron vs. Exon: ...iiiiiieeeeeeiiiiiiiiiiiiiiiieeeeeeiiiiiiiiiii... for exon in self.exons: exon_unspliced_start, exon_unspliced_end = self.offset_range( exon.start, exon.end) # If the relative position is not within this exon, keep a running # total of the total exonic length-so-far. # # Otherwise, if the relative position is within an exon, get its # offset into that exon by subtracting the exon"s relative start # position from the relative position. Add that to the total exonic # length-so-far. if exon_unspliced_start <= unspliced_offset <= exon_unspliced_end: # all offsets are base 0, can be used as indices into # sequence string exon_offset = unspliced_offset - exon_unspliced_start return total_spliced_offset + exon_offset else: exon_length = len(exon) # exon_end_position - exon_start_position + 1 total_spliced_offset += exon_length raise ValueError( "Couldn't find position %d on any exon of %s" % ( position, self.id))
Convert from an absolute chromosomal position to the offset into this transcript"s spliced mRNA. Position must be inside some exon (otherwise raise exception).
def _parse_ical_string(ical_string): """ SU,MO,TU,WE,TH,FR,SA DTSTART;TZID=America/New_York:20180804T233251\nRRULE:FREQ=WEEKLY;BYDAY=SA DTSTART;TZID=America/New_York:20180804T233251\nRRULE:FREQ=DAILY DTSTART;TZID=America/New_York:20180804T233251\nRRULE:FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR,SA DTSTART;TZID=America/New_York:20180718T174500 """ start_time = ical_string.splitlines()[0].replace(DTSTART, '') if "RRULE" in ical_string: days = ical_string.splitlines()[1].replace(REPEAT, '') if days == "RRULE:FREQ=DAILY": days = ['DAILY'] else: days = days.split(',') else: days = None start_time = start_time.splitlines()[0].split(':')[1] datetime_object = datetime.strptime(start_time, '%Y%m%dT%H%M%S') return datetime_object, days
SU,MO,TU,WE,TH,FR,SA DTSTART;TZID=America/New_York:20180804T233251\nRRULE:FREQ=WEEKLY;BYDAY=SA DTSTART;TZID=America/New_York:20180804T233251\nRRULE:FREQ=DAILY DTSTART;TZID=America/New_York:20180804T233251\nRRULE:FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR,SA DTSTART;TZID=America/New_York:20180718T174500
def admm_linearized(x, f, g, L, tau, sigma, niter, **kwargs): r"""Generic linearized ADMM method for convex problems. ADMM stands for "Alternating Direction Method of Multipliers" and is a popular convex optimization method. This variant solves problems of the form :: min_x [ f(x) + g(Lx) ] with convex ``f`` and ``g``, and a linear operator ``L``. See Section 4.4 of `[PB2014] <http://web.stanford.edu/~boyd/papers/prox_algs.html>`_ and the Notes for more mathematical details. Parameters ---------- x : ``L.domain`` element Starting point of the iteration, updated in-place. f, g : `Functional` The functions ``f`` and ``g`` in the problem definition. They need to implement the ``proximal`` method. L : linear `Operator` The linear operator that is composed with ``g`` in the problem definition. It must fulfill ``L.domain == f.domain`` and ``L.range == g.domain``. tau, sigma : positive float Step size parameters for the update of the variables. niter : non-negative int Number of iterations. Other Parameters ---------------- callback : callable, optional Function called with the current iterate after each iteration. Notes ----- Given :math:`x^{(0)}` (the provided ``x``) and :math:`u^{(0)} = z^{(0)} = 0`, linearized ADMM applies the following iteration: .. math:: x^{(k+1)} &= \mathrm{prox}_{\tau f} \left[ x^{(k)} - \sigma^{-1}\tau L^*\big( L x^{(k)} - z^{(k)} + u^{(k)} \big) \right] z^{(k+1)} &= \mathrm{prox}_{\sigma g}\left( L x^{(k+1)} + u^{(k)} \right) u^{(k+1)} &= u^{(k)} + L x^{(k+1)} - z^{(k+1)} The step size parameters :math:`\tau` and :math:`\sigma` must satisfy .. math:: 0 < \tau < \frac{\sigma}{\|L\|^2} to guarantee convergence. The name "linearized ADMM" comes from the fact that in the minimization subproblem for the :math:`x` variable, this variant uses a linearization of a quadratic term in the augmented Lagrangian of the generic ADMM, in order to make the step expressible with the proximal operator of :math:`f`. Another name for this algorithm is *split inexact Uzawa method*. References ---------- [PB2014] Parikh, N and Boyd, S. *Proximal Algorithms*. Foundations and Trends in Optimization, 1(3) (2014), pp 123-231. """ if not isinstance(L, Operator): raise TypeError('`op` {!r} is not an `Operator` instance' ''.format(L)) if x not in L.domain: raise OpDomainError('`x` {!r} is not in the domain of `op` {!r}' ''.format(x, L.domain)) tau, tau_in = float(tau), tau if tau <= 0: raise ValueError('`tau` must be positive, got {}'.format(tau_in)) sigma, sigma_in = float(sigma), sigma if sigma <= 0: raise ValueError('`sigma` must be positive, got {}'.format(sigma_in)) niter, niter_in = int(niter), niter if niter < 0 or niter != niter_in: raise ValueError('`niter` must be a non-negative integer, got {}' ''.format(niter_in)) # Callback object callback = kwargs.pop('callback', None) if callback is not None and not callable(callback): raise TypeError('`callback` {} is not callable'.format(callback)) # Initialize range variables z = L.range.zero() u = L.range.zero() # Temporary for Lx + u [- z] tmp_ran = L(x) # Temporary for L^*(Lx + u - z) tmp_dom = L.domain.element() # Store proximals since their initialization may involve computation prox_tau_f = f.proximal(tau) prox_sigma_g = g.proximal(sigma) for _ in range(niter): # tmp_ran has value Lx^k here # tmp_dom <- L^*(Lx^k + u^k - z^k) tmp_ran += u tmp_ran -= z L.adjoint(tmp_ran, out=tmp_dom) # x <- x^k - (tau/sigma) L^*(Lx^k + u^k - z^k) x.lincomb(1, x, -tau / sigma, tmp_dom) # x^(k+1) <- prox[tau*f](x) prox_tau_f(x, out=x) # tmp_ran <- Lx^(k+1) L(x, out=tmp_ran) # z^(k+1) <- prox[sigma*g](Lx^(k+1) + u^k) prox_sigma_g(tmp_ran + u, out=z) # 1 copy here # u^(k+1) = u^k + Lx^(k+1) - z^(k+1) u += tmp_ran u -= z if callback is not None: callback(x)
r"""Generic linearized ADMM method for convex problems. ADMM stands for "Alternating Direction Method of Multipliers" and is a popular convex optimization method. This variant solves problems of the form :: min_x [ f(x) + g(Lx) ] with convex ``f`` and ``g``, and a linear operator ``L``. See Section 4.4 of `[PB2014] <http://web.stanford.edu/~boyd/papers/prox_algs.html>`_ and the Notes for more mathematical details. Parameters ---------- x : ``L.domain`` element Starting point of the iteration, updated in-place. f, g : `Functional` The functions ``f`` and ``g`` in the problem definition. They need to implement the ``proximal`` method. L : linear `Operator` The linear operator that is composed with ``g`` in the problem definition. It must fulfill ``L.domain == f.domain`` and ``L.range == g.domain``. tau, sigma : positive float Step size parameters for the update of the variables. niter : non-negative int Number of iterations. Other Parameters ---------------- callback : callable, optional Function called with the current iterate after each iteration. Notes ----- Given :math:`x^{(0)}` (the provided ``x``) and :math:`u^{(0)} = z^{(0)} = 0`, linearized ADMM applies the following iteration: .. math:: x^{(k+1)} &= \mathrm{prox}_{\tau f} \left[ x^{(k)} - \sigma^{-1}\tau L^*\big( L x^{(k)} - z^{(k)} + u^{(k)} \big) \right] z^{(k+1)} &= \mathrm{prox}_{\sigma g}\left( L x^{(k+1)} + u^{(k)} \right) u^{(k+1)} &= u^{(k)} + L x^{(k+1)} - z^{(k+1)} The step size parameters :math:`\tau` and :math:`\sigma` must satisfy .. math:: 0 < \tau < \frac{\sigma}{\|L\|^2} to guarantee convergence. The name "linearized ADMM" comes from the fact that in the minimization subproblem for the :math:`x` variable, this variant uses a linearization of a quadratic term in the augmented Lagrangian of the generic ADMM, in order to make the step expressible with the proximal operator of :math:`f`. Another name for this algorithm is *split inexact Uzawa method*. References ---------- [PB2014] Parikh, N and Boyd, S. *Proximal Algorithms*. Foundations and Trends in Optimization, 1(3) (2014), pp 123-231.
def com_google_fonts_check_production_glyphs_similarity(ttFont, api_gfonts_ttFont): """Glyphs are similiar to Google Fonts version?""" def glyphs_surface_area(ttFont): """Calculate the surface area of a glyph's ink""" from fontTools.pens.areaPen import AreaPen glyphs = {} glyph_set = ttFont.getGlyphSet() area_pen = AreaPen(glyph_set) for glyph in glyph_set.keys(): glyph_set[glyph].draw(area_pen) area = area_pen.value area_pen.value = 0 glyphs[glyph] = area return glyphs bad_glyphs = [] these_glyphs = glyphs_surface_area(ttFont) gfonts_glyphs = glyphs_surface_area(api_gfonts_ttFont) shared_glyphs = set(these_glyphs) & set(gfonts_glyphs) this_upm = ttFont['head'].unitsPerEm gfonts_upm = api_gfonts_ttFont['head'].unitsPerEm for glyph in shared_glyphs: # Normalize area difference against comparison's upm this_glyph_area = (these_glyphs[glyph] / this_upm) * gfonts_upm gfont_glyph_area = (gfonts_glyphs[glyph] / gfonts_upm) * this_upm if abs(this_glyph_area - gfont_glyph_area) > 7000: bad_glyphs.append(glyph) if bad_glyphs: yield WARN, ("Following glyphs differ greatly from" " Google Fonts version: [{}]").format(", ".join(bad_glyphs)) else: yield PASS, ("Glyphs are similar in" " comparison to the Google Fonts version.")
Glyphs are similiar to Google Fonts version?
def backpropagate_3d_tilted(uSin, angles, res, nm, lD=0, tilted_axis=[0, 1, 0], coords=None, weight_angles=True, onlyreal=False, padding=(True, True), padfac=1.75, padval=None, intp_order=2, dtype=None, num_cores=_ncores, save_memory=False, copy=True, count=None, max_count=None, verbose=0): r"""3D backpropagation with a tilted axis of rotation Three-dimensional diffraction tomography reconstruction algorithm for scattering of a plane wave :math:`u_0(\mathbf{r}) = u_0(x,y,z)` by a dielectric object with refractive index :math:`n(x,y,z)`. This method implements the 3D backpropagation algorithm with a rotational axis that is tilted by :math:`\theta_\mathrm{tilt}` w.r.t. the imaging plane :cite:`Mueller2015tilted`. .. math:: f(\mathbf{r}) = -\frac{i k_\mathrm{m}}{2\pi} \sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j}^\mathrm{tilt} \!\! \left \{ \text{FFT}^{-1}_{\mathrm{2D}} \left \{ \left| k_\mathrm{Dx} \cdot \cos \theta_\mathrm{tilt}\right| \frac{\text{FFT}_{\mathrm{2D}} \left \{ u_{\mathrm{B},\phi_j}(x_\mathrm{D}, y_\mathrm{D}) \right \}} {u_0(l_\mathrm{D})} \exp \! \left[i k_\mathrm{m}(M - 1) \cdot (z_{\phi_j}-l_\mathrm{D}) \right] \right \} \right \} with a modified rotational operator :math:`D_{-\phi_j}^\mathrm{tilt}` and a different filter in Fourier space :math:`|k_\mathrm{Dx} \cdot \cos \theta_\mathrm{tilt}|` when compared to :func:`backpropagate_3d`. .. versionadded:: 0.1.2 Parameters ---------- uSin: (A, Ny, Nx) ndarray Three-dimensional sinogram of plane recordings :math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D}, y_\mathrm{D})` divided by the incident plane wave :math:`u_0(l_\mathrm{D})` measured at the detector. angles: ndarray of shape (A,3) or 1D array of length A If the shape is (A,3), then `angles` consists of vectors on the unit sphere that correspond to the direction of illumination and acquisition (s₀). If the shape is (A,), then `angles` is a one-dimensional array of angles in radians that determines the angular position :math:`\phi_j`. In both cases, `tilted_axis` must be set according to the tilt of the rotational axis. res: float Vacuum wavelength of the light :math:`\lambda` in pixels. nm: float Refractive index of the surrounding medium :math:`n_\mathrm{m}`. lD: float Distance from center of rotation to detector plane :math:`l_\mathrm{D}` in pixels. tilted_axis: list of floats The coordinates [x, y, z] on a unit sphere representing the tilted axis of rotation. The default is (0,1,0), which corresponds to a rotation about the y-axis and follows the behavior of :func:`odtbrain.backpropagate_3d`. coords: None [(3, M) ndarray] Only compute the output image at these coordinates. This keyword is reserved for future versions and is not implemented yet. weight_angles: bool If `True`, weights each backpropagated projection with a factor proportional to the angular distance between the neighboring projections. .. math:: \Delta \phi_0 \longmapsto \Delta \phi_j = \frac{\phi_{j+1} - \phi_{j-1}}{2} This currently only works when `angles` has the shape (A,). onlyreal: bool If `True`, only the real part of the reconstructed image will be returned. This saves computation time. padding: tuple of bool Pad the input data to the second next power of 2 before Fourier transforming. This reduces artifacts and speeds up the process for input image sizes that are not powers of 2. The default is padding in x and y: `padding=(True, True)`. For padding only in x-direction (e.g. for cylindrical symmetries), set `padding` to `(True, False)`. To turn off padding, set it to `(False, False)`. padfac: float Increase padding size of the input data. A value greater than one will trigger padding to the second-next power of two. For example, a value of 1.75 will lead to a padded size of 256 for an initial size of 144, whereas for it will lead to a padded size of 512 for an initial size of 150. Values geater than 2 are allowed. This parameter may greatly increase memory usage! padval: float The value used for padding. This is important for the Rytov approximation, where an approximat zero in the phase might translate to 2πi due to the unwrapping algorithm. In that case, this value should be a multiple of 2πi. If `padval` is `None`, then the edge values are used for padding (see documentation of :func:`numpy.pad`). intp_order: int between 0 and 5 Order of the interpolation for rotation. See :func:`scipy.ndimage.interpolation.affine_transform` for details. dtype: dtype object or argument for :func:`numpy.dtype` The data type that is used for calculations (float or double). Defaults to `numpy.float_`. num_cores: int The number of cores to use for parallel operations. This value defaults to the number of cores on the system. save_memory: bool Saves memory at the cost of longer computation time. .. versionadded:: 0.1.5 copy: bool Copy input sinogram `uSin` for data processing. If `copy` is set to `False`, then `uSin` will be overridden. .. versionadded:: 0.1.5 count, max_count: multiprocessing.Value or `None` Can be used to monitor the progress of the algorithm. Initially, the value of `max_count.value` is incremented by the total number of steps. At each step, the value of `count.value` is incremented. verbose: int Increment to increase verbosity. Returns ------- f: ndarray of shape (Nx, Ny, Nx), complex if `onlyreal==False` Reconstructed object function :math:`f(\mathbf{r})` as defined by the Helmholtz equation. :math:`f(x,z) = k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)` See Also -------- odt_to_ri: conversion of the object function :math:`f(\mathbf{r})` to refractive index :math:`n(\mathbf{r})` Notes ----- This implementation can deal with projection angles that are not distributed along a circle about the rotational axis. If there are slight deviations from this circle, simply pass the 3D rotational positions instead of the 1D angles to the `angles` argument. In principle, this should improve the reconstruction. The general problem here is that the backpropagation algorithm requires a ramp filter in Fourier space that is oriented perpendicular to the rotational axis. If the sample does not rotate about a single axis, then a 1D parametric representation of this rotation must be found to correctly determine the filter in Fourier space. Such a parametric representation could e.g. be a spiral between the poles of the unit sphere (but this kind of rotation is probably difficult to implement experimentally). If you have input images with rectangular shape, e.g. Nx!=Ny and the rotational axis deviates by approximately PI/2 from the axis (0,1,0), then data might get cropped in the reconstruction volume. You can avoid that by rotating your input data and the rotational axis by PI/2. For instance, change`tilted_axis` from [1,0,0] to [0,1,0] and `np.rot90` the sinogram images. Do not use the parameter `lD` in combination with the Rytov approximation - the propagation is not correctly described. Instead, numerically refocus the sinogram prior to converting it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`) with a numerical focusing algorithm (available in the Python package :py:mod:`nrefocus`). """ A = angles.shape[0] if angles.shape not in [(A,), (A, 1), (A, 3)]: raise ValueError("`angles` must have shape (A,) or (A,3)!") if len(uSin.shape) != 3: raise ValueError("Input data `uSin` must have shape (A,Ny,Nx).") if len(uSin) != A: raise ValueError("`len(angles)` must be equal to `len(uSin)`.") if len(list(padding)) != 2: raise ValueError("`padding` must be boolean tuple of length 2!") if np.array(padding).dtype is not np.dtype(bool): raise ValueError("Parameter `padding` must be boolean tuple.") if coords is not None: raise NotImplementedError("Setting coordinates is not yet supported.") if num_cores > _ncores: raise ValueError("`num_cores` must not exceed number " + "of physical cores: {}".format(_ncores)) # setup dtype if dtype is None: dtype = np.float_ dtype = np.dtype(dtype) if dtype.name not in ["float32", "float64"]: raise ValueError("dtype must be float32 or float64!") dtype_complex = np.dtype("complex{}".format( 2 * int(dtype.name.strip("float")))) # progess monitoring if max_count is not None: max_count.value += A + 2 ne.set_num_threads(num_cores) uSin = np.array(uSin, copy=copy) angles = np.array(angles, copy=copy) angles = np.squeeze(angles) # support shape (A,1) # lengths of the input data lny, lnx = uSin.shape[1], uSin.shape[2] ln = lnx # We perform zero-padding before performing the Fourier transform. # This gets rid of artifacts due to false periodicity and also # speeds up Fourier transforms of the input image size is not # a power of 2. orderx = np.int(max(64., 2**np.ceil(np.log(lnx * padfac) / np.log(2)))) ordery = np.int(max(64., 2**np.ceil(np.log(lny * padfac) / np.log(2)))) if padding[0]: padx = orderx - lnx else: padx = 0 if padding[1]: pady = ordery - lny else: pady = 0 padyl = np.int(np.ceil(pady / 2)) padyr = pady - padyl padxl = np.int(np.ceil(padx / 2)) padxr = padx - padxl # zero-padded length of sinogram. lNx, lNy = lnx + padx, lny + pady lNz = ln if verbose > 0: print("......Image size (x,y): {}x{}, padded: {}x{}".format( lnx, lny, lNx, lNy)) # `tilted_axis` is required for several things: # 1. the filter |kDx*v + kDy*u| with (u,v,w)==tilted_axis # 2. the alignment of the rotational axis with the y-axis # 3. the determination of the point coordinates if only # angles in radians are given. # For (1) we need the exact axis that corresponds to our input data. # For (2) and (3) we need `tilted_axis_yz` (see below) which is the # axis `tilted_axis` rotated in the detector plane such that its # projection onto the detector coincides with the y-axis. # Normalize input axis tilted_axis = norm_vec(tilted_axis) # `tilted_axis_yz` is computed by performing the inverse rotation in # the x-y plane with `angz`. We will again use `angz` in the transform # within the for-loop to rotate each projection according to its # acquisition angle. angz = np.arctan2(tilted_axis[0], tilted_axis[1]) rotmat = np.array([ [np.cos(angz), -np.sin(angz), 0], [np.sin(angz), np.cos(angz), 0], [0, 0, 1], ]) # rotate `tilted_axis` onto the y-z plane. tilted_axis_yz = norm_vec(np.dot(rotmat, tilted_axis)) if len(angles.shape) == 1: if weight_angles: weights = util.compute_angle_weights_1d(angles).reshape(-1, 1, 1) # compute the 3D points from tilted axis angles = sphere_points_from_angles_and_tilt(angles, tilted_axis_yz) else: if weight_angles: warnings.warn("3D angular weighting not yet supported!") weights = 1 # normalize and rotate angles for ii in range(angles.shape[0]): # angles[ii] = norm_vec(angles[ii]) #-> not correct # instead rotate like `tilted_axis` onto the y-z plane. angles[ii] = norm_vec(np.dot(rotmat, angles[ii])) if weight_angles: uSin *= weights # Cut-Off frequency # km [1/px] km = (2 * np.pi * nm) / res # The notation in the our optical tomography script for # a wave propagating to the right is: # # u0(x) = exp(ikx) # # However, in physics usually we use the other sign convention: # # u0(x) = exp(-ikx) # # In order to be consistent with programs like Meep or our # scattering script for a dielectric cylinder, we want to use the # latter sign convention. # This is not a big problem. We only need to multiply the imaginary # part of the scattered wave by -1. # Ask for the filter. Do not include zero (first element). # # Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ] # - double coverage factor 1/2 already included # - unitary angular frequency to unitary ordinary frequency # conversion performed in calculation of UB=FT(uB). # # f(r) = -i kₘ / ((2π)² a₀) (prefactor) # * iiint dϕ₀ dkx dky (prefactor) # * |kx| (prefactor) # * exp(-i kₘ M lD ) (prefactor) # * UBϕ₀(kx) (dependent on ϕ₀) # * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r) # (r and s₀ are vectors. The last term contains a dot-product) # # kₘM = sqrt( kₘ² - kx² - ky² ) # t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) ) # s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) ) # # The filter can be split into two parts # # 1) part without dependence on the z-coordinate # # -i kₘ / ((2π)² a₀) # * iiint dϕ₀ dkx dky # * |kx| # * exp(-i kₘ M lD ) # # 2) part with dependence of the z-coordinate # # exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) # # The filter (1) can be performed using the classical filter process # as in the backprojection algorithm. # # # if lNx != lNy: # raise NotImplementedError("Input data must be square shaped!") # Corresponding sample frequencies fx = np.fft.fftfreq(lNx) # 1D array fy = np.fft.fftfreq(lNy) # 1D array # kx is a 1D array. kx = 2 * np.pi * fx ky = 2 * np.pi * fy # Differentials for integral dphi0 = 2 * np.pi / A # We will later multiply with phi0. # a, y, x kx = kx.reshape(1, -1) ky = ky.reshape(-1, 1) # Low-pass filter: # less-than-or-equal would give us zero division error. filter_klp = (kx**2 + ky**2 < km**2) # Filter M so there are no nans from the root M = 1. / km * np.sqrt((km**2 - kx**2 - ky**2) * filter_klp) prefactor = -1j * km / (2 * np.pi) prefactor *= dphi0 # Also filter the prefactor, so nothing outside the required # low-pass contributes to the sum. # The filter is now dependent on the rotational position of the # specimen. We have to include information from the angles. # We want to estimate the rotational axis for every frame. We # do that by computing the cross-product of the vectors in # angles from the current and previous image. u, v, _w = tilted_axis filterabs = np.abs(kx*v+ky*u) * filter_klp # new in version 0.1.4: # We multiply by the factor (M-1) instead of just (M) # to take into account that we have a scattered # wave that is normalized by u0. prefactor *= np.exp(-1j * km * (M-1) * lD) if count is not None: count.value += 1 # # # filter (2) must be applied before rotation as well # exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) # # kₘM = sqrt( kₘ² - kx² - ky² ) # t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) ) # s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) ) # # This filter is effectively an inverse Fourier transform # # exp(i kx xD) exp(i ky yD) exp(i kₘ (M - 1) zD ) # # xD = x cos(ϕ₀) + z sin(ϕ₀) # zD = - x sin(ϕ₀) + z cos(ϕ₀) # Everything is in pixels center = lNz / 2.0 # x = np.linspace(-centerx, centerx, lNx, endpoint=False) # x = np.arange(lNx) - center + .5 # Meshgrid for output array # zv, yv, xv = np.meshgrid(x,x,x) # z, y, x # xv = x.reshape( 1, 1,-1) # yv = x.reshape( 1,-1, 1) # z = np.arange(ln) - center + .5 z = np.linspace(-center, center, lNz, endpoint=False) zv = z.reshape(-1, 1, 1) # y, x Mp = M.reshape(lNy, lNx) # filter2 = np.exp(1j * zv * km * (Mp - 1)) f2_exp_fac = 1j * km * (Mp - 1) if save_memory: # compute filter2 later pass else: # compute filter2 now # (this requires more RAM but is faster) filter2 = ne.evaluate("exp(factor * zv)", local_dict={"factor": f2_exp_fac, "zv": zv}) if count is not None: count.value += 1 # Prepare complex output image if onlyreal: outarr = np.zeros((ln, lny, lnx), dtype=dtype) else: outarr = np.zeros((ln, lny, lnx), dtype=dtype_complex) # Create plan for FFTW: # Flag is "estimate": # specifies that, instead of actual measurements of different # algorithms, a simple heuristic is used to pick a (probably # sub-optimal) plan quickly. With this flag, the input/output # arrays are not overwritten during planning. # Byte-aligned arrays oneslice = pyfftw.empty_aligned((lNy, lNx), dtype_complex) myfftw_plan = pyfftw.FFTW(oneslice, oneslice, threads=num_cores, flags=["FFTW_ESTIMATE"], axes=(0, 1)) # Create plan for IFFTW: inarr = pyfftw.empty_aligned((lNy, lNx), dtype_complex) # plan is "patient": # FFTW_PATIENT is like FFTW_MEASURE, but considers a wider range # of algorithms and often produces a “more optimal” plan # (especially for large transforms), but at the expense of # several times longer planning time (especially for large # transforms). # print(inarr.flags) myifftw_plan = pyfftw.FFTW(inarr, inarr, threads=num_cores, axes=(0, 1), direction="FFTW_BACKWARD", flags=["FFTW_MEASURE"]) # filtered projections in loop filtered_proj = np.zeros((ln, lny, lnx), dtype=dtype_complex) # Rotate all points such that we are effectively rotating everything # about the y-axis. angles = rotate_points_to_axis(points=angles, axis=tilted_axis_yz) for aa in np.arange(A): if padval is None: oneslice[:] = np.pad(uSin[aa], ((padyl, padyr), (padxl, padxr)), mode="edge") else: oneslice[:] = np.pad(uSin[aa], ((padyl, padyr), (padxl, padxr)), mode="linear_ramp", end_values=(padval,)) myfftw_plan.execute() # normalize to (lNx * lNy) for FFTW and multiply with prefactor, filter oneslice *= filterabs * prefactor / (lNx * lNy) for p in range(len(zv)): if save_memory: # compute filter2 here; # this is comparatively slower than the other case ne.evaluate("exp(factor * zvp) * projectioni", local_dict={"zvp": zv[p], "projectioni": oneslice, "factor": f2_exp_fac}, out=inarr) else: # use universal functions np.multiply(filter2[p], oneslice, out=inarr) myifftw_plan.execute() filtered_proj[p, :, :] = inarr[padyl:padyl+lny, padxl:padxl+lnx] # The Cartesian axes in our array are ordered like this: [z,y,x] # However, the rotation matrix requires [x,y,z]. Therefore, we # need to np.transpose the first and last axis and also invert the # y-axis. fil_p_t = filtered_proj.transpose(2, 1, 0)[:, ::-1, :] # get rotation matrix for this point and also rotate in plane _drot, drotinv = rotation_matrix_from_point_planerot(angles[aa], plane_angle=angz, ret_inv=True) # apply offset required by affine_transform # The offset is only required for the rotation in # the x-z-plane. # This could be achieved like so: # The offset "-.5" assures that we are rotating about # the center of the image and not the value at the center # of the array (this is also what `scipy.ndimage.rotate` does. c = 0.5 * np.array(fil_p_t.shape) - .5 offset = c - np.dot(drotinv, c) # Perform rotation # We cannot split the inplace-rotation into multiple subrotations # as we did in _Back_3d_tilted.backpropagate_3d, because the rotation # axis is arbitrarily placed in the 3d array. Rotating single # slices does not yield the same result as rotating the entire # array. Instead of using affine_transform, map_coordinates might # be faster for multiple cores. # Also undo the axis transposition that we performed previously. outarr.real += scipy.ndimage.interpolation.affine_transform( fil_p_t.real, drotinv, offset=offset, mode="constant", cval=0, order=intp_order).transpose(2, 1, 0)[:, ::-1, :] if not onlyreal: outarr.imag += scipy.ndimage.interpolation.affine_transform( fil_p_t.imag, drotinv, offset=offset, mode="constant", cval=0, order=intp_order).transpose(2, 1, 0)[:, ::-1, :] if count is not None: count.value += 1 return outarr
r"""3D backpropagation with a tilted axis of rotation Three-dimensional diffraction tomography reconstruction algorithm for scattering of a plane wave :math:`u_0(\mathbf{r}) = u_0(x,y,z)` by a dielectric object with refractive index :math:`n(x,y,z)`. This method implements the 3D backpropagation algorithm with a rotational axis that is tilted by :math:`\theta_\mathrm{tilt}` w.r.t. the imaging plane :cite:`Mueller2015tilted`. .. math:: f(\mathbf{r}) = -\frac{i k_\mathrm{m}}{2\pi} \sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j}^\mathrm{tilt} \!\! \left \{ \text{FFT}^{-1}_{\mathrm{2D}} \left \{ \left| k_\mathrm{Dx} \cdot \cos \theta_\mathrm{tilt}\right| \frac{\text{FFT}_{\mathrm{2D}} \left \{ u_{\mathrm{B},\phi_j}(x_\mathrm{D}, y_\mathrm{D}) \right \}} {u_0(l_\mathrm{D})} \exp \! \left[i k_\mathrm{m}(M - 1) \cdot (z_{\phi_j}-l_\mathrm{D}) \right] \right \} \right \} with a modified rotational operator :math:`D_{-\phi_j}^\mathrm{tilt}` and a different filter in Fourier space :math:`|k_\mathrm{Dx} \cdot \cos \theta_\mathrm{tilt}|` when compared to :func:`backpropagate_3d`. .. versionadded:: 0.1.2 Parameters ---------- uSin: (A, Ny, Nx) ndarray Three-dimensional sinogram of plane recordings :math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D}, y_\mathrm{D})` divided by the incident plane wave :math:`u_0(l_\mathrm{D})` measured at the detector. angles: ndarray of shape (A,3) or 1D array of length A If the shape is (A,3), then `angles` consists of vectors on the unit sphere that correspond to the direction of illumination and acquisition (s₀). If the shape is (A,), then `angles` is a one-dimensional array of angles in radians that determines the angular position :math:`\phi_j`. In both cases, `tilted_axis` must be set according to the tilt of the rotational axis. res: float Vacuum wavelength of the light :math:`\lambda` in pixels. nm: float Refractive index of the surrounding medium :math:`n_\mathrm{m}`. lD: float Distance from center of rotation to detector plane :math:`l_\mathrm{D}` in pixels. tilted_axis: list of floats The coordinates [x, y, z] on a unit sphere representing the tilted axis of rotation. The default is (0,1,0), which corresponds to a rotation about the y-axis and follows the behavior of :func:`odtbrain.backpropagate_3d`. coords: None [(3, M) ndarray] Only compute the output image at these coordinates. This keyword is reserved for future versions and is not implemented yet. weight_angles: bool If `True`, weights each backpropagated projection with a factor proportional to the angular distance between the neighboring projections. .. math:: \Delta \phi_0 \longmapsto \Delta \phi_j = \frac{\phi_{j+1} - \phi_{j-1}}{2} This currently only works when `angles` has the shape (A,). onlyreal: bool If `True`, only the real part of the reconstructed image will be returned. This saves computation time. padding: tuple of bool Pad the input data to the second next power of 2 before Fourier transforming. This reduces artifacts and speeds up the process for input image sizes that are not powers of 2. The default is padding in x and y: `padding=(True, True)`. For padding only in x-direction (e.g. for cylindrical symmetries), set `padding` to `(True, False)`. To turn off padding, set it to `(False, False)`. padfac: float Increase padding size of the input data. A value greater than one will trigger padding to the second-next power of two. For example, a value of 1.75 will lead to a padded size of 256 for an initial size of 144, whereas for it will lead to a padded size of 512 for an initial size of 150. Values geater than 2 are allowed. This parameter may greatly increase memory usage! padval: float The value used for padding. This is important for the Rytov approximation, where an approximat zero in the phase might translate to 2πi due to the unwrapping algorithm. In that case, this value should be a multiple of 2πi. If `padval` is `None`, then the edge values are used for padding (see documentation of :func:`numpy.pad`). intp_order: int between 0 and 5 Order of the interpolation for rotation. See :func:`scipy.ndimage.interpolation.affine_transform` for details. dtype: dtype object or argument for :func:`numpy.dtype` The data type that is used for calculations (float or double). Defaults to `numpy.float_`. num_cores: int The number of cores to use for parallel operations. This value defaults to the number of cores on the system. save_memory: bool Saves memory at the cost of longer computation time. .. versionadded:: 0.1.5 copy: bool Copy input sinogram `uSin` for data processing. If `copy` is set to `False`, then `uSin` will be overridden. .. versionadded:: 0.1.5 count, max_count: multiprocessing.Value or `None` Can be used to monitor the progress of the algorithm. Initially, the value of `max_count.value` is incremented by the total number of steps. At each step, the value of `count.value` is incremented. verbose: int Increment to increase verbosity. Returns ------- f: ndarray of shape (Nx, Ny, Nx), complex if `onlyreal==False` Reconstructed object function :math:`f(\mathbf{r})` as defined by the Helmholtz equation. :math:`f(x,z) = k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)` See Also -------- odt_to_ri: conversion of the object function :math:`f(\mathbf{r})` to refractive index :math:`n(\mathbf{r})` Notes ----- This implementation can deal with projection angles that are not distributed along a circle about the rotational axis. If there are slight deviations from this circle, simply pass the 3D rotational positions instead of the 1D angles to the `angles` argument. In principle, this should improve the reconstruction. The general problem here is that the backpropagation algorithm requires a ramp filter in Fourier space that is oriented perpendicular to the rotational axis. If the sample does not rotate about a single axis, then a 1D parametric representation of this rotation must be found to correctly determine the filter in Fourier space. Such a parametric representation could e.g. be a spiral between the poles of the unit sphere (but this kind of rotation is probably difficult to implement experimentally). If you have input images with rectangular shape, e.g. Nx!=Ny and the rotational axis deviates by approximately PI/2 from the axis (0,1,0), then data might get cropped in the reconstruction volume. You can avoid that by rotating your input data and the rotational axis by PI/2. For instance, change`tilted_axis` from [1,0,0] to [0,1,0] and `np.rot90` the sinogram images. Do not use the parameter `lD` in combination with the Rytov approximation - the propagation is not correctly described. Instead, numerically refocus the sinogram prior to converting it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`) with a numerical focusing algorithm (available in the Python package :py:mod:`nrefocus`).
def damping_kraus_map(p=0.10): """ Generate the Kraus operators corresponding to an amplitude damping noise channel. :param float p: The one-step damping probability. :return: A list [k1, k2] of the Kraus operators that parametrize the map. :rtype: list """ damping_op = np.sqrt(p) * np.array([[0, 1], [0, 0]]) residual_kraus = np.diag([1, np.sqrt(1 - p)]) return [residual_kraus, damping_op]
Generate the Kraus operators corresponding to an amplitude damping noise channel. :param float p: The one-step damping probability. :return: A list [k1, k2] of the Kraus operators that parametrize the map. :rtype: list
def trimSegments(self, minPermanence=None, minNumSyns=None): """ This method deletes all synapses whose permanence is less than minPermanence and deletes any segments that have less than minNumSyns synapses remaining. :param minPermanence: (float) Any syn whose permanence is 0 or < ``minPermanence`` will be deleted. If None is passed in, then ``self.connectedPerm`` is used. :param minNumSyns: (int) Any segment with less than ``minNumSyns`` synapses remaining in it will be deleted. If None is passed in, then ``self.activationThreshold`` is used. :returns: (tuple) ``numSegsRemoved``, ``numSynsRemoved`` """ # Fill in defaults if minPermanence is None: minPermanence = self.connectedPerm if minNumSyns is None: minNumSyns = self.activationThreshold # Loop through all cells totalSegsRemoved, totalSynsRemoved = 0, 0 for c, i in itertools.product(xrange(self.numberOfCols), xrange(self.cellsPerColumn)): (segsRemoved, synsRemoved) = self._trimSegmentsInCell( colIdx=c, cellIdx=i, segList=self.cells[c][i], minPermanence=minPermanence, minNumSyns=minNumSyns) totalSegsRemoved += segsRemoved totalSynsRemoved += synsRemoved # Print all cells if verbosity says to if self.verbosity >= 5: print "Cells, all segments:" self.printCells(predictedOnly=False) return totalSegsRemoved, totalSynsRemoved
This method deletes all synapses whose permanence is less than minPermanence and deletes any segments that have less than minNumSyns synapses remaining. :param minPermanence: (float) Any syn whose permanence is 0 or < ``minPermanence`` will be deleted. If None is passed in, then ``self.connectedPerm`` is used. :param minNumSyns: (int) Any segment with less than ``minNumSyns`` synapses remaining in it will be deleted. If None is passed in, then ``self.activationThreshold`` is used. :returns: (tuple) ``numSegsRemoved``, ``numSynsRemoved``
def to_holvi_dict(self): """Convert our Python object to JSON acceptable to Holvi API""" self._jsondata["items"] = [] for item in self.items: self._jsondata["items"].append(item.to_holvi_dict()) self._jsondata["issue_date"] = self.issue_date.isoformat() self._jsondata["due_date"] = self.due_date.isoformat() self._jsondata["receiver"] = self.receiver.to_holvi_dict() return {k: v for (k, v) in self._jsondata.items() if k in self._valid_keys}
Convert our Python object to JSON acceptable to Holvi API
def build_ast(expression, debug = False): """build an AST from an Excel formula expression in reverse polish notation""" #use a directed graph to store the tree G = DiGraph() stack = [] for n in expression: # Since the graph does not maintain the order of adding nodes/edges # add an extra attribute 'pos' so we can always sort to the correct order if isinstance(n,OperatorNode): if n.ttype == "operator-infix": arg2 = stack.pop() arg1 = stack.pop() # Hack to write the name of sheet in 2argument address if(n.tvalue == ':'): if '!' in arg1.tvalue and arg2.ttype == 'operand' and '!' not in arg2.tvalue: arg2.tvalue = arg1.tvalue.split('!')[0] + '!' + arg2.tvalue G.add_node(arg1,pos = 1) G.add_node(arg2,pos = 2) G.add_edge(arg1, n) G.add_edge(arg2, n) else: arg1 = stack.pop() G.add_node(arg1,pos = 1) G.add_edge(arg1, n) elif isinstance(n,FunctionNode): args = [] for _ in range(n.num_args): try: args.append(stack.pop()) except: raise Exception() #try: # args = [stack.pop() for _ in range(n.num_args)] #except: # print 'STACK', stack, type(n) # raise Exception('prut') args.reverse() for i,a in enumerate(args): G.add_node(a,pos = i) G.add_edge(a,n) else: G.add_node(n,pos=0) stack.append(n) return G,stack.pop()
build an AST from an Excel formula expression in reverse polish notation
def fromhdf5sorted(source, where=None, name=None, sortby=None, checkCSI=False, start=None, stop=None, step=None): """ Provides access to an HDF5 table, sorted by an indexed column, e.g.:: >>> import petl as etl >>> import tables >>> # set up a new hdf5 table to demonstrate with ... h5file = tables.open_file('example.h5', mode='w', title='Test file') >>> h5file.create_group('/', 'testgroup', 'Test Group') /testgroup (Group) 'Test Group' children := [] >>> class FooBar(tables.IsDescription): ... foo = tables.Int32Col(pos=0) ... bar = tables.StringCol(6, pos=2) ... >>> h5table = h5file.create_table('/testgroup', 'testtable', FooBar, 'Test Table') >>> # load some data into the table ... table1 = (('foo', 'bar'), ... (3, b'asdfgh'), ... (2, b'qwerty'), ... (1, b'zxcvbn')) >>> for row in table1[1:]: ... for i, f in enumerate(table1[0]): ... h5table.row[f] = row[i] ... h5table.row.append() ... >>> h5table.cols.foo.create_csindex() # CS index is required 0 >>> h5file.flush() >>> h5file.close() >>> # ... # access the data, sorted by the indexed column ... table2 = etl.fromhdf5sorted('example.h5', '/testgroup', 'testtable', ... sortby='foo') >>> table2 +-----+-----------+ | foo | bar | +=====+===========+ | 1 | b'zxcvbn' | +-----+-----------+ | 2 | b'qwerty' | +-----+-----------+ | 3 | b'asdfgh' | +-----+-----------+ """ assert sortby is not None, 'no column specified to sort by' return HDF5SortedView(source, where=where, name=name, sortby=sortby, checkCSI=checkCSI, start=start, stop=stop, step=step)
Provides access to an HDF5 table, sorted by an indexed column, e.g.:: >>> import petl as etl >>> import tables >>> # set up a new hdf5 table to demonstrate with ... h5file = tables.open_file('example.h5', mode='w', title='Test file') >>> h5file.create_group('/', 'testgroup', 'Test Group') /testgroup (Group) 'Test Group' children := [] >>> class FooBar(tables.IsDescription): ... foo = tables.Int32Col(pos=0) ... bar = tables.StringCol(6, pos=2) ... >>> h5table = h5file.create_table('/testgroup', 'testtable', FooBar, 'Test Table') >>> # load some data into the table ... table1 = (('foo', 'bar'), ... (3, b'asdfgh'), ... (2, b'qwerty'), ... (1, b'zxcvbn')) >>> for row in table1[1:]: ... for i, f in enumerate(table1[0]): ... h5table.row[f] = row[i] ... h5table.row.append() ... >>> h5table.cols.foo.create_csindex() # CS index is required 0 >>> h5file.flush() >>> h5file.close() >>> # ... # access the data, sorted by the indexed column ... table2 = etl.fromhdf5sorted('example.h5', '/testgroup', 'testtable', ... sortby='foo') >>> table2 +-----+-----------+ | foo | bar | +=====+===========+ | 1 | b'zxcvbn' | +-----+-----------+ | 2 | b'qwerty' | +-----+-----------+ | 3 | b'asdfgh' | +-----+-----------+
def argparser(self): """ Argparser option with search functionality specific for ranges. """ core_parser = self.core_parser core_parser.add_argument('-r', '--range', type=str, help="The range to search for use") return core_parser
Argparser option with search functionality specific for ranges.
def update_service(name, service_map): """Get an update from the specified service. Arguments: name (:py:class:`str`): The name of the service. service_map (:py:class:`dict`): A mapping of service names to :py:class:`flash.service.core.Service` instances. Returns: :py:class:`dict`: The updated data. """ if name in service_map: service = service_map[name] data = service.update() if not data: logger.warning('no data received for service: %s', name) else: data['service_name'] = service.service_name CACHE[name] = dict(data=data, updated=datetime.now()) else: logger.warning('service not found: %s', name) if name in CACHE: return add_time(CACHE[name]) return {}
Get an update from the specified service. Arguments: name (:py:class:`str`): The name of the service. service_map (:py:class:`dict`): A mapping of service names to :py:class:`flash.service.core.Service` instances. Returns: :py:class:`dict`: The updated data.
def _set_general_compilers(self): """Adds compiler channels to the :attr:`processes` attribute. This method will iterate over the pipeline's processes and check if any process is feeding channels to a compiler process. If so, that compiler process is added to the pipeline and those channels are linked to the compiler via some operator. """ for c, c_info in self.compilers.items(): # Instantiate compiler class object and set empty channel list compiler_cls = c_info["cls"](template=c_info["template"]) c_info["channels"] = [] for p in self.processes: if not any([isinstance(p, x) for x in self.skip_class]): # Check if process has channels to feed to a compiler if c in p.compiler: # Correct channel names according to the pid of the # process channels = ["{}_{}".format(i, p.pid) for i in p.compiler[c]] c_info["channels"].extend(channels) # If one ore more channels were detected, establish connections # and append compiler to the process list. if c_info["channels"]: compiler_cls.set_compiler_channels(c_info["channels"], operator="join") self.processes.append(compiler_cls)
Adds compiler channels to the :attr:`processes` attribute. This method will iterate over the pipeline's processes and check if any process is feeding channels to a compiler process. If so, that compiler process is added to the pipeline and those channels are linked to the compiler via some operator.
def github_repos(organization, github_url, github_token): """Return all github repositories in an organization.""" # Get github repos headers = {"Authorization": "token {}".format(github_token)} next_cursor = None while next_cursor is not False: params = {'query': query, 'variables': { 'organization': organization, 'cursor': next_cursor}} response = requests.post(github_url, headers=headers, json=params) result = response.json() if response.status_code != 200 or 'errors' in result: raise ValueError("Github api error %s" % ( response.content.decode('utf8'),)) repos = jmespath.search( 'data.organization.repositories.edges[].node', result) for r in repos: yield r page_info = jmespath.search( 'data.organization.repositories.pageInfo', result) if page_info: next_cursor = (page_info['hasNextPage'] and page_info['endCursor'] or False) else: next_cursor = False
Return all github repositories in an organization.
def cublasZgbmv(handle, trans, m, n, kl, ku, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for complex general banded matrix. """ status = _libcublas.cublasZgbmv_v2(handle, trans, m, n, kl, ku, ctypes.byref(cuda.cuDoubleComplex(alpha.real, alpha.imag)), int(A), lda, int(x), incx, ctypes.byref(cuda.cuDoubleComplex(beta.real, beta.imag)), int(y), incy) cublasCheckStatus(status)
Matrix-vector product for complex general banded matrix.
def caesar(shift, data, shift_ranges=('az', 'AZ')): """ Apply a caesar cipher to a string. The caesar cipher is a substition cipher where each letter in the given alphabet is replaced by a letter some fixed number down the alphabet. If ``shift`` is ``1``, *A* will become *B*, *B* will become *C*, etc... You can define the alphabets that will be shift by specifying one or more shift ranges. The characters will than be shifted within the given ranges. Args: shift(int): The shift to apply. data(str): The string to apply the cipher to. shift_ranges(list of str): Which alphabets to shift. Returns: str: The string with the caesar cipher applied. Examples: >>> caesar(16, 'Pwnypack') 'Fmdofqsa' >>> caesar(-16, 'Fmdofqsa') 'Pwnypack' >>> caesar(16, 'PWNYpack', shift_ranges=('AZ',)) 'FMDOpack' >>> caesar(16, 'PWNYpack', shift_ranges=('Az',)) '`g^iFqsA' """ alphabet = dict( (chr(c), chr((c - s + shift) % (e - s + 1) + s)) for s, e in map(lambda r: (ord(r[0]), ord(r[-1])), shift_ranges) for c in range(s, e + 1) ) return ''.join(alphabet.get(c, c) for c in data)
Apply a caesar cipher to a string. The caesar cipher is a substition cipher where each letter in the given alphabet is replaced by a letter some fixed number down the alphabet. If ``shift`` is ``1``, *A* will become *B*, *B* will become *C*, etc... You can define the alphabets that will be shift by specifying one or more shift ranges. The characters will than be shifted within the given ranges. Args: shift(int): The shift to apply. data(str): The string to apply the cipher to. shift_ranges(list of str): Which alphabets to shift. Returns: str: The string with the caesar cipher applied. Examples: >>> caesar(16, 'Pwnypack') 'Fmdofqsa' >>> caesar(-16, 'Fmdofqsa') 'Pwnypack' >>> caesar(16, 'PWNYpack', shift_ranges=('AZ',)) 'FMDOpack' >>> caesar(16, 'PWNYpack', shift_ranges=('Az',)) '`g^iFqsA'
def check_file_list_cache(opts, form, list_cache, w_lock): ''' Checks the cache file to see if there is a new enough file list cache, and returns the match (if found, along with booleans used by the fileserver backend to determine if the cache needs to be refreshed/written). ''' refresh_cache = False save_cache = True serial = salt.payload.Serial(opts) wait_lock(w_lock, list_cache, 5 * 60) if not os.path.isfile(list_cache) and _lock_cache(w_lock): refresh_cache = True else: attempt = 0 while attempt < 11: try: if os.path.exists(w_lock): # wait for a filelist lock for max 15min wait_lock(w_lock, list_cache, 15 * 60) if os.path.exists(list_cache): # calculate filelist age is possible cache_stat = os.stat(list_cache) # st_time can have a greater precision than time, removing # float precision makes sure age will never be a negative # number. current_time = int(time.time()) file_mtime = int(cache_stat.st_mtime) if file_mtime > current_time: log.debug( 'Cache file modified time is in the future, ignoring. ' 'file=%s mtime=%s current_time=%s', list_cache, current_time, file_mtime ) age = 0 else: age = current_time - file_mtime else: # if filelist does not exists yet, mark it as expired age = opts.get('fileserver_list_cache_time', 20) + 1 if age < 0: # Cache is from the future! Warn and mark cache invalid. log.warning('The file list_cache was created in the future!') if 0 <= age < opts.get('fileserver_list_cache_time', 20): # Young enough! Load this sucker up! with salt.utils.files.fopen(list_cache, 'rb') as fp_: log.debug( "Returning file list from cache: age=%s cache_time=%s %s", age, opts.get('fileserver_list_cache_time', 20), list_cache ) return salt.utils.data.decode(serial.load(fp_).get(form, [])), False, False elif _lock_cache(w_lock): # Set the w_lock and go refresh_cache = True break except Exception: time.sleep(0.2) attempt += 1 continue if attempt > 10: save_cache = False refresh_cache = True return None, refresh_cache, save_cache
Checks the cache file to see if there is a new enough file list cache, and returns the match (if found, along with booleans used by the fileserver backend to determine if the cache needs to be refreshed/written).
def get_random(self, size=10): """Returns (size, n_dim) array of random variates from the histogram. Inside the bins, a uniform distribution is assumed Note this assumes the histogram is an 'events per bin', not a pdf. TODO: test more. """ # Sample random bin centers bin_centers_ravel = np.array(np.meshgrid(*self.bin_centers(), indexing='ij')).reshape(self.dimensions, -1).T hist_ravel = self.histogram.ravel() hist_ravel = hist_ravel.astype(np.float) / np.nansum(hist_ravel) result = bin_centers_ravel[np.random.choice(len(bin_centers_ravel), p=hist_ravel, size=size)] # Randomize the position inside the bin for dim_i in range(self.dimensions): bin_edges = self.bin_edges[dim_i] bin_widths = np.diff(bin_edges) # Note the - 1: for the first bin's bin center, searchsorted gives 1, but we want 0 here: index_of_bin = np.searchsorted(bin_edges, result[:, dim_i]) - 1 result[:, dim_i] += (np.random.rand(size) - 0.5) * bin_widths[index_of_bin] return result
Returns (size, n_dim) array of random variates from the histogram. Inside the bins, a uniform distribution is assumed Note this assumes the histogram is an 'events per bin', not a pdf. TODO: test more.
def put_attribute(self, id, key, value, **kwargs): """ Add attribute to the BuildRecord. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.put_attribute(id, key, value, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: BuildRecord id (required) :param str key: Attribute key (required) :param str value: Attribute value (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.put_attribute_with_http_info(id, key, value, **kwargs) else: (data) = self.put_attribute_with_http_info(id, key, value, **kwargs) return data
Add attribute to the BuildRecord. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.put_attribute(id, key, value, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: BuildRecord id (required) :param str key: Attribute key (required) :param str value: Attribute value (required) :return: None If the method is called asynchronously, returns the request thread.
def UNIFAC(T, xs, chemgroups, cached=None, subgroup_data=None, interaction_data=None, modified=False): r'''Calculates activity coefficients using the UNIFAC model (optionally modified), given a mixture's temperature, liquid mole fractions, and optionally the subgroup data and interaction parameter data of your choice. The default is to use the original UNIFAC model, with the latest parameters published by DDBST. The model supports modified forms (Dortmund, NIST) when the `modified` parameter is True. Parameters ---------- T : float Temperature of the system, [K] xs : list[float] Mole fractions of all species in the system in the liquid phase, [-] chemgroups : list[dict] List of dictionaries of subgroup IDs and their counts for all species in the mixture, [-] subgroup_data : dict[UNIFAC_subgroup] UNIFAC subgroup data; available dictionaries in this module are UFSG (original), DOUFSG (Dortmund), or NISTUFSG ([4]_). interaction_data : dict[dict[tuple(a_mn, b_mn, c_mn)]] UNIFAC interaction parameter data; available dictionaries in this module are UFIP (original), DOUFIP2006 (Dortmund parameters as published by 2006), DOUFIP2016 (Dortmund parameters as published by 2016), and NISTUFIP ([4]_). modified : bool True if using the modified form and temperature dependence, otherwise False. Returns ------- gammas : list[float] Activity coefficients of all species in the mixture, [-] Notes ----- The actual implementation of UNIFAC is formulated slightly different than the formulas above for computational efficiency. DDBST switched to using the more efficient forms in their publication, but the numerical results are identical. The model is as follows: .. math:: \ln \gamma_i = \ln \gamma_i^c + \ln \gamma_i^r **Combinatorial component** .. math:: \ln \gamma_i^c = \ln \frac{\phi_i}{x_i} + \frac{z}{2} q_i \ln\frac{\theta_i}{\phi_i} + L_i - \frac{\phi_i}{x_i} \sum_{j=1}^{n} x_j L_j \theta_i = \frac{x_i q_i}{\sum_{j=1}^{n} x_j q_j} \phi_i = \frac{x_i r_i}{\sum_{j=1}^{n} x_j r_j} L_i = 5(r_i - q_i)-(r_i-1) **Residual component** .. math:: \ln \gamma_i^r = \sum_{k}^n \nu_k^{(i)} \left[ \ln \Gamma_k - \ln \Gamma_k^{(i)} \right] \ln \Gamma_k = Q_k \left[1 - \ln \sum_m \Theta_m \Psi_{mk} - \sum_m \frac{\Theta_m \Psi_{km}}{\sum_n \Theta_n \Psi_{nm}}\right] \Theta_m = \frac{Q_m X_m}{\sum_{n} Q_n X_n} X_m = \frac{ \sum_j \nu^j_m x_j}{\sum_j \sum_n \nu_n^j x_j} **R and Q** .. math:: r_i = \sum_{k=1}^{n} \nu_k R_k q_i = \sum_{k=1}^{n}\nu_k Q_k The newer forms of UNIFAC (Dortmund, NIST) calculate the combinatorial part slightly differently: .. math:: \ln \gamma_i^c = 1 - {V'}_i + \ln({V'}_i) - 5q_i \left(1 - \frac{V_i}{F_i}+ \ln\left(\frac{V_i}{F_i}\right)\right) V'_i = \frac{r_i^{3/4}}{\sum_j r_j^{3/4}x_j} This is more clear when looking at the full rearranged form as in [3]_. Examples -------- >>> UNIFAC(T=333.15, xs=[0.5, 0.5], chemgroups=[{1:2, 2:4}, {1:1, 2:1, 18:1}]) [1.4276025835624173, 1.3646545010104225] >>> UNIFAC(373.15, [0.2, 0.3, 0.2, 0.2], ... [{9:6}, {78:6}, {1:1, 18:1}, {1:1, 2:1, 14:1}], ... subgroup_data=DOUFSG, interaction_data=DOUFIP2006, modified=True) [1.186431113706829, 1.440280133911197, 1.204479833499608, 1.9720706090299824] References ---------- .. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation. Weinheim, Germany: Wiley-VCH, 2012. .. [2] Fredenslund, Aage, Russell L. Jones, and John M. Prausnitz. "Group Contribution Estimation of Activity Coefficients in Nonideal Liquid Mixtures." AIChE Journal 21, no. 6 (November 1, 1975): 1086-99. doi:10.1002/aic.690210607. .. [3] Jakob, Antje, Hans Grensemann, Jürgen Lohmann, and Jürgen Gmehling. "Further Development of Modified UNIFAC (Dortmund):  Revision and Extension 5." Industrial & Engineering Chemistry Research 45, no. 23 (November 1, 2006): 7924-33. doi:10.1021/ie060355c. .. [4] Kang, Jeong Won, Vladimir Diky, and Michael Frenkel. "New Modified UNIFAC Parameters Using Critically Evaluated Phase Equilibrium Data." Fluid Phase Equilibria 388 (February 25, 2015): 128-41. doi:10.1016/j.fluid.2014.12.042. ''' cmps = range(len(xs)) if subgroup_data is None: subgroups = UFSG else: subgroups = subgroup_data if interaction_data is None: interactions = UFIP else: interactions = interaction_data # Obtain r and q values using the subgroup values if not cached: rs = [] qs = [] for groups in chemgroups: ri = 0. qi = 0. for group, count in groups.items(): ri += subgroups[group].R*count qi += subgroups[group].Q*count rs.append(ri) qs.append(qi) group_counts = {} for groups in chemgroups: for group, count in groups.items(): if group in group_counts: group_counts[group] += count else: group_counts[group] = count else: rs, qs, group_counts = cached # Sum the denominator for calculating Xs group_sum = sum(count*xs[i] for i in cmps for count in chemgroups[i].values()) # Caclulate each numerator for calculating Xs group_count_xs = {} for group in group_counts: tot_numerator = sum(chemgroups[i][group]*xs[i] for i in cmps if group in chemgroups[i]) group_count_xs[group] = tot_numerator/group_sum rsxs = sum([rs[i]*xs[i] for i in cmps]) Vis = [rs[i]/rsxs for i in cmps] qsxs = sum([qs[i]*xs[i] for i in cmps]) Fis = [qs[i]/qsxs for i in cmps] if modified: rsxs2 = sum([rs[i]**0.75*xs[i] for i in cmps]) Vis2 = [rs[i]**0.75/rsxs2 for i in cmps] loggammacs = [1. - Vis2[i] + log(Vis2[i]) - 5.*qs[i]*(1. - Vis[i]/Fis[i] + log(Vis[i]/Fis[i])) for i in cmps] else: loggammacs = [1. - Vis[i] + log(Vis[i]) - 5.*qs[i]*(1. - Vis[i]/Fis[i] + log(Vis[i]/Fis[i])) for i in cmps] Q_sum_term = sum([subgroups[group].Q*group_count_xs[group] for group in group_counts]) area_fractions = {group: subgroups[group].Q*group_count_xs[group]/Q_sum_term for group in group_counts.keys()} UNIFAC_psis = {k: {m:(UNIFAC_psi(T, m, k, subgroups, interactions, modified=modified)) for m in group_counts} for k in group_counts} loggamma_groups = {} for k in group_counts: sum1, sum2 = 0., 0. for m in group_counts: sum1 += area_fractions[m]*UNIFAC_psis[k][m] sum3 = sum(area_fractions[n]*UNIFAC_psis[m][n] for n in group_counts) sum2 -= area_fractions[m]*UNIFAC_psis[m][k]/sum3 loggamma_groups[k] = subgroups[k].Q*(1. - log(sum1) + sum2) loggammars = [] for groups in chemgroups: chem_loggamma_groups = {} chem_group_sum = sum(groups.values()) chem_group_count_xs = {group: count/chem_group_sum for group, count in groups.items()} Q_sum_term = sum([subgroups[group].Q*chem_group_count_xs[group] for group in groups]) chem_area_fractions = {group: subgroups[group].Q*chem_group_count_xs[group]/Q_sum_term for group in groups.keys()} for k in groups: sum1, sum2 = 0., 0. for m in groups: sum1 += chem_area_fractions[m]*UNIFAC_psis[k][m] sum3 = sum(chem_area_fractions[n]*UNIFAC_psis[m][n] for n in groups) sum2 -= chem_area_fractions[m]*UNIFAC_psis[m][k]/sum3 chem_loggamma_groups[k] = subgroups[k].Q*(1. - log(sum1) + sum2) tot = sum([count*(loggamma_groups[group] - chem_loggamma_groups[group]) for group, count in groups.items()]) loggammars.append(tot) return [exp(loggammacs[i]+loggammars[i]) for i in cmps]
r'''Calculates activity coefficients using the UNIFAC model (optionally modified), given a mixture's temperature, liquid mole fractions, and optionally the subgroup data and interaction parameter data of your choice. The default is to use the original UNIFAC model, with the latest parameters published by DDBST. The model supports modified forms (Dortmund, NIST) when the `modified` parameter is True. Parameters ---------- T : float Temperature of the system, [K] xs : list[float] Mole fractions of all species in the system in the liquid phase, [-] chemgroups : list[dict] List of dictionaries of subgroup IDs and their counts for all species in the mixture, [-] subgroup_data : dict[UNIFAC_subgroup] UNIFAC subgroup data; available dictionaries in this module are UFSG (original), DOUFSG (Dortmund), or NISTUFSG ([4]_). interaction_data : dict[dict[tuple(a_mn, b_mn, c_mn)]] UNIFAC interaction parameter data; available dictionaries in this module are UFIP (original), DOUFIP2006 (Dortmund parameters as published by 2006), DOUFIP2016 (Dortmund parameters as published by 2016), and NISTUFIP ([4]_). modified : bool True if using the modified form and temperature dependence, otherwise False. Returns ------- gammas : list[float] Activity coefficients of all species in the mixture, [-] Notes ----- The actual implementation of UNIFAC is formulated slightly different than the formulas above for computational efficiency. DDBST switched to using the more efficient forms in their publication, but the numerical results are identical. The model is as follows: .. math:: \ln \gamma_i = \ln \gamma_i^c + \ln \gamma_i^r **Combinatorial component** .. math:: \ln \gamma_i^c = \ln \frac{\phi_i}{x_i} + \frac{z}{2} q_i \ln\frac{\theta_i}{\phi_i} + L_i - \frac{\phi_i}{x_i} \sum_{j=1}^{n} x_j L_j \theta_i = \frac{x_i q_i}{\sum_{j=1}^{n} x_j q_j} \phi_i = \frac{x_i r_i}{\sum_{j=1}^{n} x_j r_j} L_i = 5(r_i - q_i)-(r_i-1) **Residual component** .. math:: \ln \gamma_i^r = \sum_{k}^n \nu_k^{(i)} \left[ \ln \Gamma_k - \ln \Gamma_k^{(i)} \right] \ln \Gamma_k = Q_k \left[1 - \ln \sum_m \Theta_m \Psi_{mk} - \sum_m \frac{\Theta_m \Psi_{km}}{\sum_n \Theta_n \Psi_{nm}}\right] \Theta_m = \frac{Q_m X_m}{\sum_{n} Q_n X_n} X_m = \frac{ \sum_j \nu^j_m x_j}{\sum_j \sum_n \nu_n^j x_j} **R and Q** .. math:: r_i = \sum_{k=1}^{n} \nu_k R_k q_i = \sum_{k=1}^{n}\nu_k Q_k The newer forms of UNIFAC (Dortmund, NIST) calculate the combinatorial part slightly differently: .. math:: \ln \gamma_i^c = 1 - {V'}_i + \ln({V'}_i) - 5q_i \left(1 - \frac{V_i}{F_i}+ \ln\left(\frac{V_i}{F_i}\right)\right) V'_i = \frac{r_i^{3/4}}{\sum_j r_j^{3/4}x_j} This is more clear when looking at the full rearranged form as in [3]_. Examples -------- >>> UNIFAC(T=333.15, xs=[0.5, 0.5], chemgroups=[{1:2, 2:4}, {1:1, 2:1, 18:1}]) [1.4276025835624173, 1.3646545010104225] >>> UNIFAC(373.15, [0.2, 0.3, 0.2, 0.2], ... [{9:6}, {78:6}, {1:1, 18:1}, {1:1, 2:1, 14:1}], ... subgroup_data=DOUFSG, interaction_data=DOUFIP2006, modified=True) [1.186431113706829, 1.440280133911197, 1.204479833499608, 1.9720706090299824] References ---------- .. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation. Weinheim, Germany: Wiley-VCH, 2012. .. [2] Fredenslund, Aage, Russell L. Jones, and John M. Prausnitz. "Group Contribution Estimation of Activity Coefficients in Nonideal Liquid Mixtures." AIChE Journal 21, no. 6 (November 1, 1975): 1086-99. doi:10.1002/aic.690210607. .. [3] Jakob, Antje, Hans Grensemann, Jürgen Lohmann, and Jürgen Gmehling. "Further Development of Modified UNIFAC (Dortmund):  Revision and Extension 5." Industrial & Engineering Chemistry Research 45, no. 23 (November 1, 2006): 7924-33. doi:10.1021/ie060355c. .. [4] Kang, Jeong Won, Vladimir Diky, and Michael Frenkel. "New Modified UNIFAC Parameters Using Critically Evaluated Phase Equilibrium Data." Fluid Phase Equilibria 388 (February 25, 2015): 128-41. doi:10.1016/j.fluid.2014.12.042.
def idmap_get_new(connection, old, tbl): """ From the old ID string, obtain a replacement ID string by either grabbing it from the _idmap_ table if one has already been assigned to the old ID, or by using the current value of the Table instance's next_id class attribute. In the latter case, the new ID is recorded in the _idmap_ table, and the class attribute incremented by 1. This function is for internal use, it forms part of the code used to re-map row IDs when merging multiple documents. """ cursor = connection.cursor() cursor.execute("SELECT new FROM _idmap_ WHERE old == ?", (old,)) new = cursor.fetchone() if new is not None: # a new ID has already been created for this old ID return ilwd.ilwdchar(new[0]) # this ID was not found in _idmap_ table, assign a new ID and # record it new = tbl.get_next_id() cursor.execute("INSERT INTO _idmap_ VALUES (?, ?)", (old, new)) return new
From the old ID string, obtain a replacement ID string by either grabbing it from the _idmap_ table if one has already been assigned to the old ID, or by using the current value of the Table instance's next_id class attribute. In the latter case, the new ID is recorded in the _idmap_ table, and the class attribute incremented by 1. This function is for internal use, it forms part of the code used to re-map row IDs when merging multiple documents.
def _wait_for_ip(name, session): ''' Wait for IP to be available during create() ''' start_time = datetime.now() status = None while status is None: status = get_vm_ip(name, session) if status is not None: # ignore APIPA address if status.startswith('169'): status = None check_time = datetime.now() delta = check_time - start_time log.debug( 'Waited %s seconds for %s to report ip address...', delta.seconds, name ) if delta.seconds > 180: log.warning('Timeout getting IP address') break time.sleep(5)
Wait for IP to be available during create()
def get_value(self, key, default={}, nested=True, decrypt=True): """ Retrieve a value from the configuration based on its key. The key may be nested. :param str key: A path to the value, with nested levels joined by '.' :param default: Value to return if the key does not exist (defaults to :code:`dict()`) :param bool decrypt: If :code:`True`, decrypt an encrypted value before returning (if encrypted). Defaults to :code:`True`. """ key = key.lstrip() if key.endswith("."): key = key[:-1] if nested: path = key.split(".") curr = self.settings for p in path[:-1]: curr = curr.get(p, {}) try: value = curr[path[-1]] except KeyError: return default value = self.decrypt(value, path) return value else: return self.settings.get(key, default)
Retrieve a value from the configuration based on its key. The key may be nested. :param str key: A path to the value, with nested levels joined by '.' :param default: Value to return if the key does not exist (defaults to :code:`dict()`) :param bool decrypt: If :code:`True`, decrypt an encrypted value before returning (if encrypted). Defaults to :code:`True`.
def redirect_stdout(new_stdout): """Redirect the stdout Args: new_stdout (io.StringIO): New stdout to use instead """ old_stdout, sys.stdout = sys.stdout, new_stdout try: yield None finally: sys.stdout = old_stdout
Redirect the stdout Args: new_stdout (io.StringIO): New stdout to use instead
def get_datafeeds(self, datafeed_id=None, params=None): """ `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html>`_ :arg datafeed_id: The ID of the datafeeds to fetch :arg allow_no_datafeeds: Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified) """ return self.transport.perform_request( "GET", _make_path("_ml", "datafeeds", datafeed_id), params=params )
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html>`_ :arg datafeed_id: The ID of the datafeeds to fetch :arg allow_no_datafeeds: Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified)
def tobinary(self): """Return self as a binary string.""" entrylen = struct.calcsize(self.ENTRYSTRUCT) rslt = [] for (dpos, dlen, ulen, flag, typcd, nm) in self.data: nmlen = len(nm) + 1 # add 1 for a '\0' # version 4 # rslt.append(struct.pack(self.ENTRYSTRUCT+`nmlen`+'s', # nmlen+entrylen, dpos, dlen, ulen, flag, typcd, nm+'\0')) # version 5 # align to 16 byte boundary so xplatform C can read toclen = nmlen + entrylen if toclen % 16 == 0: pad = '\0' else: padlen = 16 - (toclen % 16) pad = '\0'*padlen nmlen = nmlen + padlen rslt.append(struct.pack(self.ENTRYSTRUCT+`nmlen`+'s', nmlen+entrylen, dpos, dlen, ulen, flag, typcd, nm+pad)) # end version 5 return ''.join(rslt)
Return self as a binary string.
def rm_env(user, name): ''' Remove cron environment variable for a specified user. CLI Example: .. code-block:: bash salt '*' cron.rm_env root MAILTO ''' lst = list_tab(user) ret = 'absent' rm_ = None for ind in range(len(lst['env'])): if name == lst['env'][ind]['name']: rm_ = ind if rm_ is not None: lst['env'].pop(rm_) ret = 'removed' comdat = _write_cron_lines(user, _render_tab(lst)) if comdat['retcode']: # Failed to commit, return the error return comdat['stderr'] return ret
Remove cron environment variable for a specified user. CLI Example: .. code-block:: bash salt '*' cron.rm_env root MAILTO
def add_key(self, key): """Adds a new key to this metric""" if key not in self.value: self.value[key] = ReducedMetric(self.reducer)
Adds a new key to this metric