code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def on_state_changed(self, state): """ Connects/Disconnects to the painted event of the editor :param state: Enable state """ if state: self.editor.painted.connect(self._paint_margin) self.editor.repaint() else: self.editor.painted.disconnect(self._paint_margin) self.editor.repaint()
Connects/Disconnects to the painted event of the editor :param state: Enable state
def write_index_and_rst_files(self, overwrite: bool = False, mock: bool = False) -> None: """ Writes both the individual RST files and the index. Args: overwrite: allow existing files to be overwritten? mock: pretend to write, but don't """ for f in self.files_to_index: if isinstance(f, FileToAutodocument): f.write_rst( prefix=self.rst_prefix, suffix=self.rst_suffix, heading_underline_char=self.source_rst_heading_underline_char, # noqa overwrite=overwrite, mock=mock, ) elif isinstance(f, AutodocIndex): f.write_index_and_rst_files(overwrite=overwrite, mock=mock) else: fail("Unknown thing in files_to_index: {!r}".format(f)) self.write_index(overwrite=overwrite, mock=mock)
Writes both the individual RST files and the index. Args: overwrite: allow existing files to be overwritten? mock: pretend to write, but don't
def murmur_hash3_x86_32(data, offset, size, seed=0x01000193): """ murmur3 hash function to determine partition :param data: (byte array), input byte array :param offset: (long), offset. :param size: (long), byte length. :param seed: murmur hash seed hazelcast uses 0x01000193 :return: (int32), calculated hash value. """ key = bytearray(data[offset: offset + size]) length = len(key) nblocks = int(length / 4) h1 = seed c1 = 0xcc9e2d51 c2 = 0x1b873593 # body for block_start in range(0, nblocks * 4, 4): # ??? big endian? k1 = key[block_start + 3] << 24 | \ key[block_start + 2] << 16 | \ key[block_start + 1] << 8 | \ key[block_start + 0] k1 = c1 * k1 & 0xFFFFFFFF k1 = (k1 << 15 | k1 >> 17) & 0xFFFFFFFF # inlined ROTL32 k1 = (c2 * k1) & 0xFFFFFFFF h1 ^= k1 h1 = (h1 << 13 | h1 >> 19) & 0xFFFFFFFF # inlined _ROTL32 h1 = (h1 * 5 + 0xe6546b64) & 0xFFFFFFFF # tail tail_index = nblocks * 4 k1 = 0 tail_size = length & 3 if tail_size >= 3: k1 ^= key[tail_index + 2] << 16 if tail_size >= 2: k1 ^= key[tail_index + 1] << 8 if tail_size >= 1: k1 ^= key[tail_index + 0] if tail_size != 0: k1 = (k1 * c1) & 0xFFFFFFFF k1 = (k1 << 15 | k1 >> 17) & 0xFFFFFFFF # _ROTL32 k1 = (k1 * c2) & 0xFFFFFFFF h1 ^= k1 result = _fmix(h1 ^ length) return -(result & 0x80000000) | (result & 0x7FFFFFFF)
murmur3 hash function to determine partition :param data: (byte array), input byte array :param offset: (long), offset. :param size: (long), byte length. :param seed: murmur hash seed hazelcast uses 0x01000193 :return: (int32), calculated hash value.
def subdir_findall(dir, subdir): """ Find all files in a subdirectory and return paths relative to dir This is similar to (and uses) setuptools.findall However, the paths returned are in the form needed for package_data """ strip_n = len(dir.split('/')) path = '/'.join((dir, subdir)) return ['/'.join(s.split('/')[strip_n:]) for s in setuptools.findall(path)]
Find all files in a subdirectory and return paths relative to dir This is similar to (and uses) setuptools.findall However, the paths returned are in the form needed for package_data
def update_event(self, event, uid): """Edit event Parameters ---------- event : iCalendar file as a string (calendar containing one event to be updated) uid : uid of event to be updated """ ev_for_change = self.calendar.get(uid) ev_for_change.content = event ev_for_change.save()
Edit event Parameters ---------- event : iCalendar file as a string (calendar containing one event to be updated) uid : uid of event to be updated
def get_meta_fields(self, fields, kwargs={}): ''' Return a dictionary of metadata fields ''' fields = to_list(fields) meta = self.get_meta() return {field: meta.get(field) for field in fields}
Return a dictionary of metadata fields
def brightness_multi(x, gamma=1, gain=1, is_random=False): """Change the brightness of multiply images, randomly or non-randomly. Usually be used for image segmentation which x=[X, Y], X and Y should be matched. Parameters ----------- x : list of numpyarray List of images with dimension of [n_images, row, col, channel] (default). others : args See ``tl.prepro.brightness``. Returns ------- numpy.array A list of processed images. """ if is_random: gamma = np.random.uniform(1 - gamma, 1 + gamma) results = [] for data in x: results.append(exposure.adjust_gamma(data, gamma, gain)) return np.asarray(results)
Change the brightness of multiply images, randomly or non-randomly. Usually be used for image segmentation which x=[X, Y], X and Y should be matched. Parameters ----------- x : list of numpyarray List of images with dimension of [n_images, row, col, channel] (default). others : args See ``tl.prepro.brightness``. Returns ------- numpy.array A list of processed images.
def _apply_sources(self): """r Update 'A' and 'b' applying source terms to specified pores Notes ----- Applying source terms to 'A' and 'b' is performed after (optionally) under-relaxing the source term to improve numerical stability. Physics are also updated before applying source terms to ensure that source terms values are associated with the current value of 'quantity'. In the case of a transient simulation, the updates in 'A' and 'b' also depend on the time scheme. """ if self.settings['t_scheme'] == 'cranknicolson': f1 = 0.5 else: f1 = 1 phase = self.project.phases()[self.settings['phase']] relax = self.settings['relaxation_source'] for item in self.settings['sources']: Ps = self.pores(item) # Add S1 to diagonal of A # TODO: We need this to NOT overwrite the A and b, but create # copy, otherwise we have to regenerate A and b on each loop datadiag = self._A.diagonal().copy() # Source term relaxation S1_old = phase[item+'.'+'S1'][Ps].copy() S2_old = phase[item+'.'+'S2'][Ps].copy() self._update_physics() S1 = phase[item+'.'+'S1'][Ps] S2 = phase[item+'.'+'S2'][Ps] S1 = relax*S1 + (1-relax)*S1_old S2 = relax*S2 + (1-relax)*S2_old phase[item+'.'+'S1'][Ps] = S1 phase[item+'.'+'S2'][Ps] = S2 datadiag[Ps] = datadiag[Ps] - f1*S1 # Add S1 to A self._A.setdiag(datadiag) # Add S2 to b self._b[Ps] = self._b[Ps] + f1*S2
r Update 'A' and 'b' applying source terms to specified pores Notes ----- Applying source terms to 'A' and 'b' is performed after (optionally) under-relaxing the source term to improve numerical stability. Physics are also updated before applying source terms to ensure that source terms values are associated with the current value of 'quantity'. In the case of a transient simulation, the updates in 'A' and 'b' also depend on the time scheme.
def _reset(self): """Clear internal data structure.""" self.records = list() self.featsbyid = dict() self.featsbyparent = dict() self.countsbytype = dict()
Clear internal data structure.
def _payload(fields, values): """Implement the ``*_payload`` methods. It's frequently useful to create a dict of values that can be encoded to JSON and sent to the server. Unfortunately, there are mismatches between the field names used by NailGun and the field names the server expects. This method provides a default translation that works in many cases. For example: >>> from nailgun.entities import Product >>> product = Product(name='foo', organization=1) >>> set(product.get_fields()) { 'description', 'gpg_key', 'id', 'label', 'name', 'organization', 'sync_plan', } >>> set(product.get_values()) {'name', 'organization'} >>> product.create_payload() {'organization_id': 1, 'name': 'foo'} :param fields: A value like what is returned by :meth:`nailgun.entity_mixins.Entity.get_fields`. :param values: A value like what is returned by :meth:`nailgun.entity_mixins.Entity.get_values`. :returns: A dict mapping field names to field values. """ for field_name, field in fields.items(): if field_name in values: if isinstance(field, OneToOneField): values[field_name + '_id'] = ( getattr(values.pop(field_name), 'id', None) ) elif isinstance(field, OneToManyField): values[field_name + '_ids'] = [ entity.id for entity in values.pop(field_name) ] elif isinstance(field, ListField): def parse(obj): """parse obj payload if it is an Entity""" if isinstance(obj, Entity): return _payload(obj.get_fields(), obj.get_values()) return obj values[field_name] = [ parse(obj) for obj in values[field_name]] return values
Implement the ``*_payload`` methods. It's frequently useful to create a dict of values that can be encoded to JSON and sent to the server. Unfortunately, there are mismatches between the field names used by NailGun and the field names the server expects. This method provides a default translation that works in many cases. For example: >>> from nailgun.entities import Product >>> product = Product(name='foo', organization=1) >>> set(product.get_fields()) { 'description', 'gpg_key', 'id', 'label', 'name', 'organization', 'sync_plan', } >>> set(product.get_values()) {'name', 'organization'} >>> product.create_payload() {'organization_id': 1, 'name': 'foo'} :param fields: A value like what is returned by :meth:`nailgun.entity_mixins.Entity.get_fields`. :param values: A value like what is returned by :meth:`nailgun.entity_mixins.Entity.get_values`. :returns: A dict mapping field names to field values.
def namedtuple_with_defaults(typename, field_names, default_values=[]): """Create a namedtuple with default values >>> Node = namedtuple_with_defaults('Node', 'val left right') >>> Node() Node(val=None, left=None, right=None) >>> Node = namedtuple_with_defaults('Node', 'val left right', [1, 2, 3]) >>> Node() Node(val=1, left=2, right=3) >>> Node = namedtuple_with_defaults('Node', 'val left right', {'right':7}) >>> Node() Node(val=None, left=None, right=7) >>> Node(4) Node(val=4, left=None, right=7) """ the_tuple = collections.namedtuple(typename, field_names) the_tuple.__new__.__defaults__ = (None, ) * len(the_tuple._fields) if isinstance(default_values, collections.Mapping): prototype = the_tuple(**default_values) else: prototype = the_tuple(*default_values) the_tuple.__new__.__defaults__ = tuple(prototype) return the_tuple
Create a namedtuple with default values >>> Node = namedtuple_with_defaults('Node', 'val left right') >>> Node() Node(val=None, left=None, right=None) >>> Node = namedtuple_with_defaults('Node', 'val left right', [1, 2, 3]) >>> Node() Node(val=1, left=2, right=3) >>> Node = namedtuple_with_defaults('Node', 'val left right', {'right':7}) >>> Node() Node(val=None, left=None, right=7) >>> Node(4) Node(val=4, left=None, right=7)
def connect(self, source, target, witnesses): """ :type source: integer :type target: integer """ # print("Adding Edge: "+source+":"+target) if self.graph.has_edge(source, target): self.graph[source][target]["label"] += ", " + str(witnesses) else: self.graph.add_edge(source, target, label=witnesses)
:type source: integer :type target: integer
def has_layer(self, class_: Type[L], became: bool=True) -> bool: """ Test the presence of a given layer type. :param class_: Layer class you're interested in. :param became: Allow transformed layers in results """ return (class_ in self._index or (became and class_ in self._transformed))
Test the presence of a given layer type. :param class_: Layer class you're interested in. :param became: Allow transformed layers in results
def interval_intersection_width(a, b, c, d): """returns the width of the intersection of intervals [a,b] and [c,d] (thinking of these as intervals on the real number line)""" return max(0, min(b, d) - max(a, c))
returns the width of the intersection of intervals [a,b] and [c,d] (thinking of these as intervals on the real number line)
def append_column(self, header, column): """Append a column to end of the table. Parameters ---------- header : str Title of the column column : iterable Any iterable of appropriate length. """ self.insert_column(self._column_count, header, column)
Append a column to end of the table. Parameters ---------- header : str Title of the column column : iterable Any iterable of appropriate length.
def extraction_to_conll(ex: Extraction) -> List[str]: """ Return a conll representation of a given input Extraction. """ ex = split_predicate(ex) toks = ex.sent.split(' ') ret = ['*'] * len(toks) args = [ex.arg1] + ex.args2 rels_and_args = [("ARG{}".format(arg_ind), arg) for arg_ind, arg in enumerate(args)] + \ [(rel_part.elem_type, rel_part) for rel_part in ex.rel] for rel, arg in rels_and_args: # Add brackets cur_start_ind = char_to_word_index(arg.span[0], ex.sent) cur_end_ind = char_to_word_index(arg.span[1], ex.sent) ret[cur_start_ind] = "({}{}".format(rel, ret[cur_start_ind]) ret[cur_end_ind] += ')' return ret
Return a conll representation of a given input Extraction.
def parse_list_objects_v2(data, bucket_name): """ Parser for list objects version 2 response. :param data: Response data for list objects. :param bucket_name: Response for the bucket. :return: Returns three distinct components: - List of :class:`Object <Object>` - True if list is truncated, False otherwise. - Continuation Token for the next request. """ root = S3Element.fromstring('ListObjectV2Result', data) is_truncated = root.get_child_text('IsTruncated').lower() == 'true' # NextContinuationToken may not be present. continuation_token = root.get_child_text('NextContinuationToken', strict=False) objects, object_dirs = _parse_objects_from_xml_elts( bucket_name, root.findall('Contents'), root.findall('CommonPrefixes') ) return objects + object_dirs, is_truncated, continuation_token
Parser for list objects version 2 response. :param data: Response data for list objects. :param bucket_name: Response for the bucket. :return: Returns three distinct components: - List of :class:`Object <Object>` - True if list is truncated, False otherwise. - Continuation Token for the next request.
def op(cls,text,*args,**kwargs): """ This method must be overriden in derived classes """ return cls.fn(text,*args,**kwargs)
This method must be overriden in derived classes
def run_simulations(self, parameter_list, data_folder): """ Run several simulations using a certain combination of parameters. Yields results as simulations are completed. Args: parameter_list (list): list of parameter combinations to simulate. data_folder (str): folder in which to save subfolders containing simulation output. """ for idx, parameter in enumerate(parameter_list): current_result = { 'params': {}, 'meta': {} } current_result['params'].update(parameter) command = [self.script_executable] + ['--%s=%s' % (param, value) for param, value in parameter.items()] # Run from dedicated temporary folder current_result['meta']['id'] = str(uuid.uuid4()) temp_dir = os.path.join(data_folder, current_result['meta']['id']) os.makedirs(temp_dir) start = time.time() # Time execution stdout_file_path = os.path.join(temp_dir, 'stdout') stderr_file_path = os.path.join(temp_dir, 'stderr') with open(stdout_file_path, 'w') as stdout_file, open( stderr_file_path, 'w') as stderr_file: return_code = subprocess.call(command, cwd=temp_dir, env=self.environment, stdout=stdout_file, stderr=stderr_file) end = time.time() # Time execution if return_code > 0: complete_command = [self.script] complete_command.extend(command[1:]) complete_command = "python waf --run \"%s\"" % ( ' '.join(complete_command)) with open(stdout_file_path, 'r') as stdout_file, open( stderr_file_path, 'r') as stderr_file: raise Exception(('Simulation exited with an error.\n' 'Params: %s\n' '\nStderr: %s\n' 'Stdout: %s\n' 'Use this command to reproduce:\n' '%s' % (parameter, stderr_file.read(), stdout_file.read(), complete_command))) current_result['meta']['elapsed_time'] = end-start yield current_result
Run several simulations using a certain combination of parameters. Yields results as simulations are completed. Args: parameter_list (list): list of parameter combinations to simulate. data_folder (str): folder in which to save subfolders containing simulation output.
def transform(self, transformer): """ Add transformer to flow and apply transformer to data in flow Parameters ---------- transformer : Transformer a transformer to transform data """ self.transformers.append(transformer) from languageflow.transformer.tagged import TaggedTransformer if isinstance(transformer, TaggedTransformer): self.X, self.y = transformer.transform(self.sentences) if isinstance(transformer, TfidfVectorizer): self.X = transformer.fit_transform(self.X) if isinstance(transformer, CountVectorizer): self.X = transformer.fit_transform(self.X) if isinstance(transformer, NumberRemover): self.X = transformer.transform(self.X) if isinstance(transformer, MultiLabelBinarizer): self.y = transformer.fit_transform(self.y)
Add transformer to flow and apply transformer to data in flow Parameters ---------- transformer : Transformer a transformer to transform data
def addDrizKeywords(self,hdr,versions): """ Add drizzle parameter keywords to header. """ # Extract some global information for the keywords _geom = 'User parameters' _imgnum = 0 for pl in self.parlist: # Start by building up the keyword prefix based # on the image number for the chip #_keyprefix = 'D%03d'%_imgnum _imgnum += 1 drizdict = DRIZ_KEYWORDS.copy() # Update drizdict with current values drizdict['VER']['value'] = pl['driz_version'][:44] drizdict['DATA']['value'] = pl['data'][:64] drizdict['DEXP']['value'] = pl['exptime'] drizdict['OUDA']['value'] = pl['outFinal'][:64] drizdict['OUWE']['value'] = pl['outWeight'][:64] if pl['outContext'] is None: outcontext = "" else: outcontext = pl['outContext'][:64] drizdict['OUCO']['value'] = outcontext if self.single: drizdict['MASK']['value'] = pl['singleDrizMask'][:64] else: drizdict['MASK']['value'] = pl['finalMask'][:64] # Process the values of WT_SCL to be consistent with # what IRAF Drizzle would output if 'wt_scl_val' in pl: _wtscl = pl['wt_scl_val'] else: if pl['wt_scl'] == 'exptime': _wtscl = pl['exptime'] elif pl['wt_scl'] == 'expsq': _wtscl = pl['exptime']*pl['exptime'] else: _wtscl = pl['wt_scl'] drizdict['WTSC']['value'] = _wtscl drizdict['KERN']['value'] = pl['kernel'] drizdict['PIXF']['value'] = pl['pixfrac'] drizdict['OUUN']['value'] = self.units if pl['fillval'] is None: _fillval = 'INDEF' else: _fillval = pl['fillval'] drizdict['FVAL']['value'] = _fillval drizdict['WKEY']['value'] = pl['driz_wcskey'] drizdict['SCAL'] = {'value':pl['scale'],'comment':'Drizzle, pixel size (arcsec) of output image'} drizdict['ISCL'] = {'value':pl['idcscale'],'comment':'Drizzle, default IDCTAB pixel size(arcsec)'} # Now update header with values writeDrizKeywords(hdr,_imgnum,drizdict) del drizdict # Add version information as HISTORY cards to the header if versions is not None: ver_str = "AstroDrizzle processing performed using: " hdr.add_history(ver_str) for k in versions.keys(): ver_str = ' '+str(k)+' Version '+str(versions[k]) hdr.add_history(ver_str)
Add drizzle parameter keywords to header.
def applyHotspot(self, lon, lat): """ Exclude objects that are too close to hotspot True if passes hotspot cut """ self.loadRealResults() cut_detect_real = (self.data_real['SIG'] >= self.config[self.algorithm]['sig_threshold']) lon_real = self.data_real['RA'][cut_detect_real] lat_real = self.data_real['DEC'][cut_detect_real] cut_hotspot = np.tile(True, len(lon)) for ii in range(0, len(lon)): cut_hotspot[ii] = ~np.any(angsep(lon[ii], lat[ii], lon_real, lat_real) < self.config[self.algorithm]['hotspot_angsep_threshold']) return cut_hotspot
Exclude objects that are too close to hotspot True if passes hotspot cut
def notify(self, level, value, target=None, ntype=None, rule=None): """Notify main reactor about event.""" # Did we see the event before? if target in self.state and level == self.state[target]: return False # Do we see the event first time? if target not in self.state and level == 'normal' \ and not self.reactor.options['send_initial']: return False self.state[target] = level return self.reactor.notify(level, self, value, target=target, ntype=ntype, rule=rule)
Notify main reactor about event.
def variables(template): '''Returns the set of keywords in a uri template''' vars = set() for varlist in TEMPLATE.findall(template): if varlist[0] in OPERATOR: varlist = varlist[1:] varspecs = varlist.split(',') for var in varspecs: # handle prefix values var = var.split(':')[0] # handle composite values if var.endswith('*'): var = var[:-1] vars.add(var) return vars
Returns the set of keywords in a uri template
def leaveEvent(self, event): """If cursor has not been restored yet, do it now""" if self.__cursor_changed: QApplication.restoreOverrideCursor() self.__cursor_changed = False self.QT_CLASS.leaveEvent(self, event)
If cursor has not been restored yet, do it now
def from_dict(data, ctx): """ Instantiate a new UnitsAvailable from a dict (generally from loading a JSON response). The data used to instantiate the UnitsAvailable is a shallow copy of the dict passed in, with any complex child types instantiated appropriately. """ data = data.copy() if data.get('default') is not None: data['default'] = \ ctx.order.UnitsAvailableDetails.from_dict( data['default'], ctx ) if data.get('reduceFirst') is not None: data['reduceFirst'] = \ ctx.order.UnitsAvailableDetails.from_dict( data['reduceFirst'], ctx ) if data.get('reduceOnly') is not None: data['reduceOnly'] = \ ctx.order.UnitsAvailableDetails.from_dict( data['reduceOnly'], ctx ) if data.get('openOnly') is not None: data['openOnly'] = \ ctx.order.UnitsAvailableDetails.from_dict( data['openOnly'], ctx ) return UnitsAvailable(**data)
Instantiate a new UnitsAvailable from a dict (generally from loading a JSON response). The data used to instantiate the UnitsAvailable is a shallow copy of the dict passed in, with any complex child types instantiated appropriately.
def standard_to_absl(level): """Converts an integer level from the standard value to the absl value. Args: level: int, a Python standard logging level. Raises: TypeError: Raised when level is not an integer. Returns: The corresponding integer level for use in absl logging. """ if not isinstance(level, int): raise TypeError('Expect an int level, found {}'.format(type(level))) if level < 0: level = 0 if level < STANDARD_DEBUG: # Maps to vlog levels. return STANDARD_DEBUG - level + 1 elif level < STANDARD_INFO: return ABSL_DEBUG elif level < STANDARD_WARNING: return ABSL_INFO elif level < STANDARD_ERROR: return ABSL_WARNING elif level < STANDARD_CRITICAL: return ABSL_ERROR else: return ABSL_FATAL
Converts an integer level from the standard value to the absl value. Args: level: int, a Python standard logging level. Raises: TypeError: Raised when level is not an integer. Returns: The corresponding integer level for use in absl logging.
def get_docs(r_session, url, encoder=None, headers=None, **params): """ Provides a helper for functions that require GET or POST requests with a JSON, text, or raw response containing documents. :param r_session: Authentication session from the client :param str url: URL containing the endpoint :param JSONEncoder encoder: Custom encoder from the client :param dict headers: Optional HTTP Headers to send with the request :returns: Raw response content from the specified endpoint """ keys_list = params.pop('keys', None) keys = None if keys_list is not None: keys = json.dumps({'keys': keys_list}, cls=encoder) f_params = python_to_couch(params) resp = None if keys is not None: # If we're using POST we are sending JSON so add the header if headers is None: headers = {} headers['Content-Type'] = 'application/json' resp = r_session.post(url, headers=headers, params=f_params, data=keys) else: resp = r_session.get(url, headers=headers, params=f_params) resp.raise_for_status() return resp
Provides a helper for functions that require GET or POST requests with a JSON, text, or raw response containing documents. :param r_session: Authentication session from the client :param str url: URL containing the endpoint :param JSONEncoder encoder: Custom encoder from the client :param dict headers: Optional HTTP Headers to send with the request :returns: Raw response content from the specified endpoint
def validateAQLQuery(self, query, bindVars = None, options = None) : "returns the server answer is the query is valid. Raises an AQLQueryError if not" if bindVars is None : bindVars = {} if options is None : options = {} payload = {'query' : query, 'bindVars' : bindVars, 'options' : options} r = self.connection.session.post(self.cursorsURL, data = json.dumps(payload, default=str)) data = r.json() if r.status_code == 201 and not data["error"] : return data else : raise AQLQueryError(data["errorMessage"], query, data)
returns the server answer is the query is valid. Raises an AQLQueryError if not
def compute_log_degrees(brands, exemplars): """ For each follower, let Z be the total number of brands they follow. Return a dictionary of 1. / log(Z), for each follower. """ counts = Counter() for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars. counts.update(followers) counts.update(counts.keys()) # Add 1 to each count. for k in counts: counts[k] = 1. / math.log(counts[k]) return counts
For each follower, let Z be the total number of brands they follow. Return a dictionary of 1. / log(Z), for each follower.
def _load_history_from_file(path, size=-1): """Load a history list from a file and split it into lines. :param path: the path to the file that should be loaded :type path: str :param size: the number of lines to load (0 means no lines, < 0 means all lines) :type size: int :returns: a list of history items (the lines of the file) :rtype: list(str) """ if size == 0: return [] if os.path.exists(path): with codecs.open(path, 'r', encoding='utf-8') as histfile: lines = [line.rstrip('\n') for line in histfile] if size > 0: lines = lines[-size:] return lines else: return []
Load a history list from a file and split it into lines. :param path: the path to the file that should be loaded :type path: str :param size: the number of lines to load (0 means no lines, < 0 means all lines) :type size: int :returns: a list of history items (the lines of the file) :rtype: list(str)
def cli(env, identifier): """View details of a placement group. IDENTIFIER can be either the Name or Id of the placement group you want to view """ manager = PlacementManager(env.client) group_id = helpers.resolve_id(manager.resolve_ids, identifier, 'placement_group') result = manager.get_object(group_id) table = formatting.Table(["Id", "Name", "Backend Router", "Rule", "Created"]) table.add_row([ result['id'], result['name'], result['backendRouter']['hostname'], result['rule']['name'], result['createDate'] ]) guest_table = formatting.Table([ "Id", "FQDN", "Primary IP", "Backend IP", "CPU", "Memory", "Provisioned", "Transaction" ]) for guest in result['guests']: guest_table.add_row([ guest.get('id'), guest.get('fullyQualifiedDomainName'), guest.get('primaryIpAddress'), guest.get('primaryBackendIpAddress'), guest.get('maxCpu'), guest.get('maxMemory'), guest.get('provisionDate'), formatting.active_txn(guest) ]) env.fout(table) env.fout(guest_table)
View details of a placement group. IDENTIFIER can be either the Name or Id of the placement group you want to view
def build(self, builder): """Build XML by appending to builder""" params = {} if self.edit_point is not None: params["EditPoint"] = self.edit_point if self.used_imputation_method is not None: params['UsedImputationMethod'] = bool_to_yes_no(self.used_imputation_method) if self.audit_id is not None: params['ID'] = str(self.audit_id) if self.include_file_oid is not None: params['mdsol:IncludeFileOID'] = bool_to_yes_no(self.include_file_oid) builder.start("AuditRecord", params) if self.user_ref is None: raise ValueError("User Reference not set.") self.user_ref.build(builder) if self.location_ref is None: raise ValueError("Location Reference not set.") self.location_ref.build(builder) if self.date_time_stamp is None: raise ValueError("DateTime not set.") self.date_time_stamp.build(builder) # Optional if self.source_id: self.source_id.build(builder) # Optional if self.reason_for_change is not None: self.reason_for_change.build(builder) builder.end("AuditRecord")
Build XML by appending to builder
def rotate(name, **kwargs): ''' Add a log to the logadm configuration name : string alias for entryname kwargs : boolean|string|int optional additional flags and parameters ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} # cleanup kwargs kwargs = salt.utils.args.clean_kwargs(**kwargs) # inject name as entryname if 'entryname' not in kwargs: kwargs['entryname'] = name # figure out log_file and entryname if 'log_file' not in kwargs or not kwargs['log_file']: if 'entryname' in kwargs and kwargs['entryname']: if kwargs['entryname'].startswith('/'): kwargs['log_file'] = kwargs['entryname'] # check for log_file if 'log_file' not in kwargs or not kwargs['log_file']: ret['result'] = False ret['comment'] = 'Missing log_file attribute!' else: # lookup old configuration old_config = __salt__['logadm.list_conf']() # remove existing entry if kwargs['log_file'] in old_config: res = __salt__['logadm.remove'](kwargs['entryname'] if 'entryname' in kwargs else kwargs['log_file']) ret['result'] = 'Error' not in res if not ret['result']: ret['comment'] = res['Error'] ret['changes'] = {} # add new entry res = __salt__['logadm.rotate'](name, **kwargs) ret['result'] = 'Error' not in res if ret['result']: new_config = __salt__['logadm.list_conf']() ret['comment'] = 'Log configuration {}'.format('updated' if kwargs['log_file'] in old_config else 'added') if kwargs['log_file'] in old_config: for key, val in salt.utils.data.compare_dicts(old_config[kwargs['log_file']], new_config[kwargs['log_file']]).items(): ret['changes'][key] = val['new'] else: ret['changes'] = new_config[kwargs['log_file']] log.debug(ret['changes']) else: ret['comment'] = res['Error'] # NOTE: we need to remove the log file first # potentially the log configuraiton can get lost :s if kwargs['log_file'] in old_config: ret['changes'] = {kwargs['log_file']: None} else: ret['changes'] = {} return ret
Add a log to the logadm configuration name : string alias for entryname kwargs : boolean|string|int optional additional flags and parameters
def _get_or_insert(*args, **kwds): """Transactionally retrieves an existing entity or creates a new one. Positional Args: name: Key name to retrieve or create. Keyword Args: namespace: Optional namespace. app: Optional app ID. parent: Parent entity key, if any. context_options: ContextOptions object (not keyword args!) or None. **kwds: Keyword arguments to pass to the constructor of the model class if an instance for the specified key name does not already exist. If an instance with the supplied key_name and parent already exists, these arguments will be discarded. Returns: Existing instance of Model class with the specified key name and parent or a new one that has just been created. """ cls, args = args[0], args[1:] return cls._get_or_insert_async(*args, **kwds).get_result()
Transactionally retrieves an existing entity or creates a new one. Positional Args: name: Key name to retrieve or create. Keyword Args: namespace: Optional namespace. app: Optional app ID. parent: Parent entity key, if any. context_options: ContextOptions object (not keyword args!) or None. **kwds: Keyword arguments to pass to the constructor of the model class if an instance for the specified key name does not already exist. If an instance with the supplied key_name and parent already exists, these arguments will be discarded. Returns: Existing instance of Model class with the specified key name and parent or a new one that has just been created.
def initLogger(): ''' This code taken from Matt's Suspenders for initializing a logger ''' global logger logger = logging.getLogger('root') logger.setLevel(logging.DEBUG) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter = logging.Formatter("[%(asctime)s] %(levelname)s: %(message)s", "%Y-%m-%d %H:%M:%S") ch.setFormatter(formatter) logger.addHandler(ch)
This code taken from Matt's Suspenders for initializing a logger
def add_job(cls, identifier, queue_name=None, priority=0, queue_model=None, prepend=False, delayed_for=None, delayed_until=None, **fields_if_new): """ Add a job to a queue. If this job already exists, check it's current priority. If its higher than the new one, don't touch it, else move the job to the wanted queue. Before setting/moving the job to the queue, check for a `delayed_for` (int/foat/timedelta) or `delayed_until` (datetime) argument to see if it must be delayed instead of queued. If the job is created, fields in fields_if_new will be set for the new job. Finally return the job. """ # check for delayed_for/delayed_until arguments delayed_until = compute_delayed_until(delayed_for, delayed_until) # create the job or get an existing one job_kwargs = {'identifier': identifier, 'queued': '1'} retries = 0 while retries < 10: retries += 1 try: job, created = cls.get_or_connect(**job_kwargs) except IndexError: # Failure during the retrieval https://friendpaste.com/5U63a8aFuV44SEgQckgMP # => retry continue except ValueError: # more than one already in the queue ! try: job = cls.collection(**job_kwargs).instances()[0] except IndexError: # but no more now ?! # => retry continue else: created = False # ok we have our job, stop now break try: # check queue_name queue_name = cls._get_queue_name(queue_name) # if the job already exists, and we want a higher priority or move it, # start by updating it if not created: current_priority = int(job.priority.hget() or 0) # if the job has a higher priority, or don't need to be moved, # don't move it if not prepend and current_priority >= priority: return job # cancel it temporarily, we'll set it as waiting later job.status.hset(STATUSES.CANCELED) # remove it from the current queue, we'll add it to the new one later if queue_model is None: queue_model = cls.queue_model current_queue = queue_model.get_queue(queue_name, current_priority) current_queue.waiting.lrem(0, job.ident) else: job.set_fields(added=str(datetime.utcnow()), **(fields_if_new or {})) # add the job to the queue job.enqueue_or_delay(queue_name, priority, delayed_until, prepend, queue_model) return job except Exception: job.queued.delete() raise
Add a job to a queue. If this job already exists, check it's current priority. If its higher than the new one, don't touch it, else move the job to the wanted queue. Before setting/moving the job to the queue, check for a `delayed_for` (int/foat/timedelta) or `delayed_until` (datetime) argument to see if it must be delayed instead of queued. If the job is created, fields in fields_if_new will be set for the new job. Finally return the job.
def resolve_return_value_options(self, options): """Handle dynamic option value lookups in the format ^^task_name.attr""" for key, value in options.items(): if isinstance(value, str) and value.startswith(RETURN_VALUE_OPTION_PREFIX): path, name = value[len(RETURN_VALUE_OPTION_PREFIX) :].rsplit(".", 1) result = self._find_result_by_path(path) options[key] = result.return_values.get(name)
Handle dynamic option value lookups in the format ^^task_name.attr
def gps_date_time_send(self, year, month, day, hour, min, sec, clockStat, visSat, useSat, GppGl, sigUsedMask, percentUsed, force_mavlink1=False): ''' Pilot console PWM messges. year : Year reported by Gps (uint8_t) month : Month reported by Gps (uint8_t) day : Day reported by Gps (uint8_t) hour : Hour reported by Gps (uint8_t) min : Min reported by Gps (uint8_t) sec : Sec reported by Gps (uint8_t) clockStat : Clock Status. See table 47 page 211 OEMStar Manual (uint8_t) visSat : Visible satellites reported by Gps (uint8_t) useSat : Used satellites in Solution (uint8_t) GppGl : GPS+GLONASS satellites in Solution (uint8_t) sigUsedMask : GPS and GLONASS usage mask (bit 0 GPS_used? bit_4 GLONASS_used?) (uint8_t) percentUsed : Percent used GPS (uint8_t) ''' return self.send(self.gps_date_time_encode(year, month, day, hour, min, sec, clockStat, visSat, useSat, GppGl, sigUsedMask, percentUsed), force_mavlink1=force_mavlink1)
Pilot console PWM messges. year : Year reported by Gps (uint8_t) month : Month reported by Gps (uint8_t) day : Day reported by Gps (uint8_t) hour : Hour reported by Gps (uint8_t) min : Min reported by Gps (uint8_t) sec : Sec reported by Gps (uint8_t) clockStat : Clock Status. See table 47 page 211 OEMStar Manual (uint8_t) visSat : Visible satellites reported by Gps (uint8_t) useSat : Used satellites in Solution (uint8_t) GppGl : GPS+GLONASS satellites in Solution (uint8_t) sigUsedMask : GPS and GLONASS usage mask (bit 0 GPS_used? bit_4 GLONASS_used?) (uint8_t) percentUsed : Percent used GPS (uint8_t)
def metadata_path(self, m_path): """Provide pointers to the paths of the metadata file Args: m_path: Path to metadata file """ if not m_path: self.metadata_dir = None self.metadata_file = None else: if not op.exists(m_path): raise OSError('{}: file does not exist!'.format(m_path)) if not op.dirname(m_path): self.metadata_dir = '.' else: self.metadata_dir = op.dirname(m_path) self.metadata_file = op.basename(m_path)
Provide pointers to the paths of the metadata file Args: m_path: Path to metadata file
def sys_info(): """Return useful information about IPython and the system, as a string. Example ------- In [2]: print sys_info() {'commit_hash': '144fdae', # random 'commit_source': 'repository', 'ipython_path': '/home/fperez/usr/lib/python2.6/site-packages/IPython', 'ipython_version': '0.11.dev', 'os_name': 'posix', 'platform': 'Linux-2.6.35-22-generic-i686-with-Ubuntu-10.10-maverick', 'sys_executable': '/usr/bin/python', 'sys_platform': 'linux2', 'sys_version': '2.6.6 (r266:84292, Sep 15 2010, 15:52:39) \\n[GCC 4.4.5]'} """ p = os.path path = p.dirname(p.abspath(p.join(__file__, '..'))) return pprint.pformat(pkg_info(path))
Return useful information about IPython and the system, as a string. Example ------- In [2]: print sys_info() {'commit_hash': '144fdae', # random 'commit_source': 'repository', 'ipython_path': '/home/fperez/usr/lib/python2.6/site-packages/IPython', 'ipython_version': '0.11.dev', 'os_name': 'posix', 'platform': 'Linux-2.6.35-22-generic-i686-with-Ubuntu-10.10-maverick', 'sys_executable': '/usr/bin/python', 'sys_platform': 'linux2', 'sys_version': '2.6.6 (r266:84292, Sep 15 2010, 15:52:39) \\n[GCC 4.4.5]'}
def derivatives(self, x, y, grid_interp_x=None, grid_interp_y=None, f_=None, f_x=None, f_y=None, f_xx=None, f_yy=None, f_xy=None): """ returns df/dx and df/dy of the function """ #self._check_interp(grid_interp_x, grid_interp_y, f_, f_x, f_y, f_xx, f_yy, f_xy) n = len(np.atleast_1d(x)) if n <= 1 and np.shape(x) == (): #if type(x) == float or type(x) == int or type(x) == type(np.float64(1)) or len(x) <= 1: f_x_out = self.f_x_interp(x, y, grid_interp_x, grid_interp_y, f_x) f_y_out = self.f_y_interp(x, y, grid_interp_x, grid_interp_y, f_y) return f_x_out[0][0], f_y_out[0][0] else: if self._grid and n >= self._min_grid_number: x_, y_ = util.get_axes(x, y) f_x_out = self.f_x_interp(x_, y_, grid_interp_x, grid_interp_y, f_x) f_y_out = self.f_y_interp(x_, y_, grid_interp_x, grid_interp_y, f_y) f_x_out = util.image2array(f_x_out) f_y_out = util.image2array(f_y_out) else: #n = len(x) f_x_out, f_y_out = np.zeros(n), np.zeros(n) for i in range(n): f_x_out[i] = self.f_x_interp(x[i], y[i], grid_interp_x, grid_interp_y, f_x) f_y_out[i] = self.f_y_interp(x[i], y[i], grid_interp_x, grid_interp_y, f_y) return f_x_out, f_y_out
returns df/dx and df/dy of the function
def vms(message, level=1): """Writes the specified message *only* if verbose output is enabled.""" if verbose is not None and verbose != False: if isinstance(verbose, bool) or (isinstance(verbose, int) and level <= verbose): std(message)
Writes the specified message *only* if verbose output is enabled.
def get_alerts_unarchived(self): """Return a list of Alerts unarchived.""" js = json.dumps({'_sort': '-time', 'archived': False}) params = urllib.urlencode({'json': js}) return self._read(self.api_url + 'list/alarm', params)
Return a list of Alerts unarchived.
def _implicit_solver(self): """Invertes and solves the matrix problem for diffusion matrix and temperature T. The method is called by the :func:`~climlab.process.implicit.ImplicitProcess._compute()` function of the :class:`~climlab.process.implicit.ImplicitProcess` class and solves the matrix problem .. math:: A \\cdot T_{\\textrm{new}} = T_{\\textrm{old}} for diffusion matrix A and corresponding temperatures. :math:`T_{\\textrm{old}}` is in this case the current state variable which already has been adjusted by the explicit processes. :math:`T_{\\textrm{new}}` is the new state of the variable. To derive the temperature tendency of the diffusion process the adjustment has to be calculated and muliplied with the timestep which is done by the :func:`~climlab.process.implicit.ImplicitProcess._compute()` function of the :class:`~climlab.process.implicit.ImplicitProcess` class. This method calculates the matrix inversion for every state variable and calling either :func:`solve_implicit_banded()` or :py:func:`numpy.linalg.solve()` dependent on the flag ``self.use_banded_solver``. :ivar dict state: method uses current state variables but does not modify them :ivar bool use_banded_solver: input flag whether to use :func:`_solve_implicit_banded()` or :py:func:`numpy.linalg.solve()` to do the matrix inversion :ivar array _diffTriDiag: the diffusion matrix which is given with the current state variable to the method solving the matrix problem """ #if self.update_diffusivity: # Time-stepping the diffusion is just inverting this matrix problem: newstate = {} for varname, value in self.state.items(): if self.use_banded_solver: newvar = _solve_implicit_banded(value, self._diffTriDiag) else: newvar = np.linalg.solve(self._diffTriDiag, value) newstate[varname] = newvar return newstate
Invertes and solves the matrix problem for diffusion matrix and temperature T. The method is called by the :func:`~climlab.process.implicit.ImplicitProcess._compute()` function of the :class:`~climlab.process.implicit.ImplicitProcess` class and solves the matrix problem .. math:: A \\cdot T_{\\textrm{new}} = T_{\\textrm{old}} for diffusion matrix A and corresponding temperatures. :math:`T_{\\textrm{old}}` is in this case the current state variable which already has been adjusted by the explicit processes. :math:`T_{\\textrm{new}}` is the new state of the variable. To derive the temperature tendency of the diffusion process the adjustment has to be calculated and muliplied with the timestep which is done by the :func:`~climlab.process.implicit.ImplicitProcess._compute()` function of the :class:`~climlab.process.implicit.ImplicitProcess` class. This method calculates the matrix inversion for every state variable and calling either :func:`solve_implicit_banded()` or :py:func:`numpy.linalg.solve()` dependent on the flag ``self.use_banded_solver``. :ivar dict state: method uses current state variables but does not modify them :ivar bool use_banded_solver: input flag whether to use :func:`_solve_implicit_banded()` or :py:func:`numpy.linalg.solve()` to do the matrix inversion :ivar array _diffTriDiag: the diffusion matrix which is given with the current state variable to the method solving the matrix problem
def interpret(self, msg): """ Try and find the image file some magic here would be good. FIXME move elsewhere and make so everyone can use. interpreter that finds things? """ for gallery in msg.get('galleries', []): self.add_folder(gallery) image_file = msg.get('image') if not image_file: return return self.find_image(image_file)
Try and find the image file some magic here would be good. FIXME move elsewhere and make so everyone can use. interpreter that finds things?
def gatk_variant_filtration(job, vcf_id, filter_name, filter_expression, ref_fasta, ref_fai, ref_dict): """ Filters VCF file using GATK VariantFiltration. Fixes extra pair of quotation marks in VCF header that may interfere with other VCF tools. :param JobFunctionWrappingJob job: passed automatically by Toil :param str vcf_id: FileStoreID for input VCF file :param str filter_name: Name of filter for VCF header :param str filter_expression: JEXL filter expression :param str ref_fasta: FileStoreID for reference genome fasta :param str ref_fai: FileStoreID for reference genome index file :param str ref_dict: FileStoreID for reference genome sequence dictionary file :return: FileStoreID for filtered VCF file :rtype: str """ inputs = {'genome.fa': ref_fasta, 'genome.fa.fai': ref_fai, 'genome.dict': ref_dict, 'input.vcf': vcf_id} work_dir = job.fileStore.getLocalTempDir() for name, file_store_id in inputs.iteritems(): job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name)) command = ['-T', 'VariantFiltration', '-R', 'genome.fa', '-V', 'input.vcf', '--filterName', filter_name, # Documents filter name in header '--filterExpression', filter_expression, '-o', 'filtered_variants.vcf'] job.fileStore.logToMaster('Running GATK VariantFiltration using {name}: ' '{expression}'.format(name=filter_name, expression=filter_expression)) docker_parameters = ['--rm', 'log-driver', 'none', '-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory)] dockerCall(job=job, workDir=work_dir, parameters=command, tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2', dockerParameters=docker_parameters) # Remove extra quotation marks around filter expression. malformed_header = os.path.join(work_dir, 'filtered_variants.vcf') fixed_header = os.path.join(work_dir, 'fixed_header.vcf') filter_regex = re.escape('"%s"' % filter_expression) with open(malformed_header, 'r') as f, open(fixed_header, 'w') as g: for line in f: g.write(re.sub(filter_regex, filter_expression, line)) return job.fileStore.writeGlobalFile(fixed_header)
Filters VCF file using GATK VariantFiltration. Fixes extra pair of quotation marks in VCF header that may interfere with other VCF tools. :param JobFunctionWrappingJob job: passed automatically by Toil :param str vcf_id: FileStoreID for input VCF file :param str filter_name: Name of filter for VCF header :param str filter_expression: JEXL filter expression :param str ref_fasta: FileStoreID for reference genome fasta :param str ref_fai: FileStoreID for reference genome index file :param str ref_dict: FileStoreID for reference genome sequence dictionary file :return: FileStoreID for filtered VCF file :rtype: str
def download_cf_standard_name_table(version, location=None): ''' Downloads the specified CF standard name table version and saves it to file :param str version: CF standard name table version number (i.e 34) :param str location: Path/filename to write downloaded xml file to ''' if location is None: # This case occurs when updating the packaged version from command line location = resource_filename('compliance_checker', 'data/cf-standard-name-table.xml') url = "http://cfconventions.org/Data/cf-standard-names/{0}/src/cf-standard-name-table.xml".format(version) r = requests.get(url, allow_redirects=True) if r.status_code == 200: print("Downloading cf-standard-names table version {0} from: {1}".format(version, url), file=sys.stderr) with open(location, 'wb') as f: f.write(r.content) else: r.raise_for_status() return
Downloads the specified CF standard name table version and saves it to file :param str version: CF standard name table version number (i.e 34) :param str location: Path/filename to write downloaded xml file to
def get_val(self, x): """Converts to int.""" try: if self.subtype == 'integer': return int(round(x[self.col_name])) else: if np.isnan(x[self.col_name]): return self.default_val return x[self.col_name] except (ValueError, TypeError): return self.default_val
Converts to int.
def parse_charset(header_string): '''Parse a "Content-Type" string for the document encoding. Returns: str, None ''' match = re.search( r'''charset[ ]?=[ ]?["']?([a-z0-9_-]+)''', header_string, re.IGNORECASE ) if match: return match.group(1)
Parse a "Content-Type" string for the document encoding. Returns: str, None
def get_ids_in_region( self, resource, resolution, x_range, y_range, z_range, time_range=[0, 1]): """Get all ids in the region defined by x_range, y_range, z_range. Args: resource (intern.resource.Resource): An annotation channel. resolution (int): 0 indicates native resolution. x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20. y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20. z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20. time_range (optional [list[int]]): time range such as [30, 40] which means t>=30 and t<40. Defaults to [0, 1]. Returns: (list[int]): Example: [1, 2, 25]. Raises: requests.HTTPError TypeError: if resource is not an annotation channel. """ return self.service.get_ids_in_region( resource, resolution, x_range, y_range, z_range, time_range, self.url_prefix, self.auth, self.session, self.session_send_opts)
Get all ids in the region defined by x_range, y_range, z_range. Args: resource (intern.resource.Resource): An annotation channel. resolution (int): 0 indicates native resolution. x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20. y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20. z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20. time_range (optional [list[int]]): time range such as [30, 40] which means t>=30 and t<40. Defaults to [0, 1]. Returns: (list[int]): Example: [1, 2, 25]. Raises: requests.HTTPError TypeError: if resource is not an annotation channel.
def markets(self): ''' 获取实时市场列表 :return: pd.dataFrame or None ''' with self.client.connect(*self.bestip): data = self.client.get_markets() return self.client.to_df(data) return None
获取实时市场列表 :return: pd.dataFrame or None
def danke(client, event, channel, nick, rest): 'Danke schön!' if rest: rest = rest.strip() Karma.store.change(rest, 1) rcpt = rest else: rcpt = channel return f'Danke schön, {rcpt}! Danke schön!'
Danke schön!
def childRecords(self): """ Returns a record set of children for this item based on the record. If no record set is manually set for this instance, then it will use the hierarchyColumn value from the tree widget with this record. If no hierarchyColumn is speified, then a blank record set is returned. :return <orb.RecordSet> """ if self._childRecords is not None: return self._childRecords tree = self.treeWidget() try: table, column = tree.hierarchyLookup(self.record()) except AttributeError: table = None column = '' # returns the children for this information if table and column: return table.select(where=Q(column) == self.record()) # returns a blank record set if no other records can be found return RecordSet()
Returns a record set of children for this item based on the record. If no record set is manually set for this instance, then it will use the hierarchyColumn value from the tree widget with this record. If no hierarchyColumn is speified, then a blank record set is returned. :return <orb.RecordSet>
def flatten_dict(d, prefix='', sep='.'): """In place dict flattening. """ def apply_and_resolve_conflicts(dest, item, prefix): for k, v in flatten_dict(item, prefix=prefix, sep=sep).items(): new_key = k i = 2 while new_key in d: new_key = '{key}{sep}{index}'.format(key=k, sep=sep, index=i) i += 1 dest[new_key] = v for key in list(d.keys()): if any(unicode(prefix)): new_key = u'{p}{sep}{key}'.format(p=prefix, key=key, sep=sep) else: new_key = key if isinstance(d[key], (dict, collections.Mapping)): apply_and_resolve_conflicts(d, d.pop(key), new_key) elif isinstance(d[key], six.string_types): d[new_key] = d.pop(key) elif isinstance(d[key], (list, collections.Mapping)): array = d.pop(key) for i in range(len(array)): index_key = '{key}{sep}{i}'.format(key=key, sep=sep, i=i) while index_key in d: i += 1 apply_and_resolve_conflicts(d, array[i], index_key) else: d[new_key] = d.pop(key) return d
In place dict flattening.
def determine_hbonds_for_drawing(self, analysis_cutoff): """ Since plotting all hydrogen bonds could lead to a messy plot, a cutoff has to be imple- mented. In this function the frequency of each hydrogen bond is summated and the total compared against analysis cutoff - a fraction multiplied by trajectory count. Those hydrogen bonds that are present for longer than analysis cutoff will be plotted in the final plot. Takes: * analysis_cutoff * - (user-defined) fraction of time a hydrogen bond has to be present for to be plotted (default - 0.3). It is multiplied by number of trajectories Output: * frequency * - dictionary of hydrogen bond donor-acceptor indices and frequencies These hydrogen bonds will be plotted in the final image. """ self.frequency = defaultdict(int) for traj in self.hbonds_by_type: for bond in self.hbonds_by_type[traj]: # frequency[(residue_atom_idx,ligand_atom_name,residue_atom_name)]=frequency # residue atom name will be used to determine if hydrogen bond is interacting with a sidechain or bakcbone if bond["donor_resnm"]!="LIG": self.frequency[(bond["donor_idx"],bond["acceptor_atom"],bond["donor_atom"],bond["acceptor_idx"])] += bond["frequency"] #check whether ligand is donor or acceptor else: self.frequency[(bond["acceptor_idx"],bond["donor_atom"],bond["acceptor_atom"],bond["donor_idx"])] += bond["frequency"] #Add the frequency counts self.frequency = {i:self.frequency[i] for i in self.frequency if self.frequency[i]>(int(len(self.trajectory))*analysis_cutoff)} #change the ligand atomname to a heavy atom - required for plot since only heavy atoms shown in final image self.hbonds_for_drawing = {} for bond in self.frequency: atomname = bond[1] if atomname.startswith("O",0) or atomname.startswith("N",0): lig_atom=atomname else: atomindex = [index for index,atom in enumerate(self.topology_data.universe.ligand.atoms) if atom.name==atomname][0] rdkit_atom = self.topology_data.mol.GetAtomWithIdx(atomindex) for neigh in rdkit_atom.GetNeighbors(): neigh_atom_id = neigh.GetIdx() lig_atom = [atom.name for index,atom in enumerate(self.topology_data.universe.ligand.atoms) if index==neigh_atom_id][0] self.hbonds_for_drawing[(bond[0],lig_atom,bond[2],bond[3])]=self.frequency[bond]
Since plotting all hydrogen bonds could lead to a messy plot, a cutoff has to be imple- mented. In this function the frequency of each hydrogen bond is summated and the total compared against analysis cutoff - a fraction multiplied by trajectory count. Those hydrogen bonds that are present for longer than analysis cutoff will be plotted in the final plot. Takes: * analysis_cutoff * - (user-defined) fraction of time a hydrogen bond has to be present for to be plotted (default - 0.3). It is multiplied by number of trajectories Output: * frequency * - dictionary of hydrogen bond donor-acceptor indices and frequencies These hydrogen bonds will be plotted in the final image.
def _http_req_user_agent(self): """Return the User-Agent value to specify in HTTP requests, defaulting to ``service/version`` if configured in the application settings, or if used in a consumer, it will attempt to obtain a user-agent from the consumer's process. If it can not auto-set the User-Agent, it defaults to ``sprockets.mixins.http/[VERSION]``. :rtype: str """ # Tornado Request Handler try: return '{}/{}'.format( self.settings['service'], self.settings['version']) except (AttributeError, KeyError): pass # Rejected Consumer if hasattr(self, '_process'): try: return '{}/{}'.format( self._process.consumer_name, self._process.consumer_version) except AttributeError: pass return DEFAULT_USER_AGENT
Return the User-Agent value to specify in HTTP requests, defaulting to ``service/version`` if configured in the application settings, or if used in a consumer, it will attempt to obtain a user-agent from the consumer's process. If it can not auto-set the User-Agent, it defaults to ``sprockets.mixins.http/[VERSION]``. :rtype: str
def update_path(self): """ Tries to update the $PATH automatically. """ if WINDOWS: return self.add_to_windows_path() # Updating any profile we can on UNIX systems export_string = self.get_export_string() addition = "\n{}\n".format(export_string) updated = [] profiles = self.get_unix_profiles() for profile in profiles: if not os.path.exists(profile): continue with open(profile, "r") as f: content = f.read() if addition not in content: with open(profile, "a") as f: f.write(addition) updated.append(os.path.relpath(profile, HOME))
Tries to update the $PATH automatically.
def delete_agent(self, agent_id): """Delete an agent. :param str agent_id: The id of the agent to delete. It must be an str containing only characters in "a-zA-Z0-9_-" and must be between 1 and 36 characters. :return: agent deleted. :rtype: dict. """ # Raises an error when agent_id is invalid self._check_agent_id(agent_id) req_url = "{}/agents/{}".format(self._base_url, agent_id) resp = self._requests_session.delete(req_url) decoded_resp = self._decode_response(resp) return decoded_resp
Delete an agent. :param str agent_id: The id of the agent to delete. It must be an str containing only characters in "a-zA-Z0-9_-" and must be between 1 and 36 characters. :return: agent deleted. :rtype: dict.
def append(self, other, inplace=False, **kwargs): """ Append any input which can be converted to MAGICCData to self. Parameters ---------- other : MAGICCData, pd.DataFrame, pd.Series, str Source of data to append. inplace : bool If True, append ``other`` inplace, otherwise return a new ``MAGICCData`` instance. **kwargs Passed to ``MAGICCData`` constructor (only used if ``MAGICCData`` is not a ``MAGICCData`` instance). """ if not isinstance(other, MAGICCData): other = MAGICCData(other, **kwargs) if inplace: super().append(other, inplace=inplace) self.metadata.update(other.metadata) else: res = super().append(other, inplace=inplace) res.metadata = deepcopy(self.metadata) res.metadata.update(other.metadata) return res
Append any input which can be converted to MAGICCData to self. Parameters ---------- other : MAGICCData, pd.DataFrame, pd.Series, str Source of data to append. inplace : bool If True, append ``other`` inplace, otherwise return a new ``MAGICCData`` instance. **kwargs Passed to ``MAGICCData`` constructor (only used if ``MAGICCData`` is not a ``MAGICCData`` instance).
def injector_ui_tree_menu_entity_2_json(self, ignore_genealogy=False): """ transform this local object to JSON :param ignore_genealogy: ignore the genealogy of this object if true (awaited format for Ariane server) :return: the resulting JSON of transformation """ LOGGER.debug("InjectorUITreeEntity.injector_ui_tree_menu_entity_2_json") if ignore_genealogy: json_obj = { 'id': self.id, 'value': self.value, 'type': self.type, 'description': self.description if self.description is not None else "", 'contextAddress': self.context_address if self.context_address is not None else "", 'icon': self.icon if self.icon is not None else "" } if self.display_permissions is not None: json_obj['displayPermissions'] = self.display_permissions if self.display_roles is not None: json_obj['displayRoles'] = self.display_roles if self.other_actions_perms is not None: json_obj['otherActionsPerms'] = self.other_actions_perms if self.other_actions_roles is not None: json_obj['otherActionsRoles'] = self.other_actions_roles if self.remote_injector_tree_entity_gears_cache_id is not None and \ self.remote_injector_tree_entity_gears_cache_id: json_obj['remoteInjectorTreeEntityGearsCacheId'] = self.remote_injector_tree_entity_gears_cache_id if self.remote_injector_tree_entity_components_cache_id is not None and \ self.remote_injector_tree_entity_components_cache_id: json_obj['remoteInjectorTreeEntityComponentsCacheId'] = \ self.remote_injector_tree_entity_components_cache_id else: json_obj = { 'id': self.id, 'value': self.value, 'type': self.type, 'description': self.description if self.description is not None else "", 'contextAddress': self.context_address if self.context_address is not None else "", 'icon': self.icon if self.icon is not None else "", 'parentTreeMenuEntityID': self.parent_id } if self.child_ids is not None: json_obj['childsID'] = self.child_ids if self.display_permissions is not None: json_obj['displayPermissions'] = self.display_permissions if self.display_roles is not None: json_obj['displayRoles'] = self.display_roles if self.other_actions_perms is not None: json_obj['otherActionsPerms'] = self.other_actions_perms if self.other_actions_roles is not None: json_obj['otherActionsRoles'] = self.other_actions_roles if self.remote_injector_tree_entity_gears_cache_id is not None and \ self.remote_injector_tree_entity_gears_cache_id: json_obj['remoteInjectorTreeEntityGearsCacheId'] = self.remote_injector_tree_entity_gears_cache_id if self.remote_injector_tree_entity_components_cache_id is not None and \ self.remote_injector_tree_entity_components_cache_id: json_obj['remoteInjectorTreeEntityComponentsCacheId'] = \ self.remote_injector_tree_entity_components_cache_id return json_obj
transform this local object to JSON :param ignore_genealogy: ignore the genealogy of this object if true (awaited format for Ariane server) :return: the resulting JSON of transformation
def _handle_argument(self, token): """Handle a case where an argument is at the head of the tokens.""" name = None self._push() while self._tokens: token = self._tokens.pop() if isinstance(token, tokens.ArgumentSeparator): name = self._pop() self._push() elif isinstance(token, tokens.ArgumentClose): if name is not None: return Argument(name, self._pop()) return Argument(self._pop()) else: self._write(self._handle_token(token)) raise ParserError("_handle_argument() missed a close token")
Handle a case where an argument is at the head of the tokens.
def GetNodes(r, bulk=False): """ Gets all nodes in the cluster. @type bulk: bool @param bulk: whether to return all information about all instances @rtype: list of dict or str @return: if bulk is true, info about nodes in the cluster, else list of nodes in the cluster """ if bulk: return r.request("get", "/2/nodes", query={"bulk": 1}) else: nodes = r.request("get", "/2/nodes") return r.applier(itemgetters("id"), nodes)
Gets all nodes in the cluster. @type bulk: bool @param bulk: whether to return all information about all instances @rtype: list of dict or str @return: if bulk is true, info about nodes in the cluster, else list of nodes in the cluster
def _apply_Create(self, change): '''A record from change must be created. :param change: a change object :type change: octodns.record.Change :type return: void ''' ar = _AzureRecord(self._resource_group, change.new) create = self._dns_client.record_sets.create_or_update create(resource_group_name=ar.resource_group, zone_name=ar.zone_name, relative_record_set_name=ar.relative_record_set_name, record_type=ar.record_type, parameters=ar.params) self.log.debug('* Success Create/Update: {}'.format(ar))
A record from change must be created. :param change: a change object :type change: octodns.record.Change :type return: void
def _make_summary_tables(self): """ prints the summary of the regression. It shows the waveform metadata, diagnostics of the fit, and results of the hypothesis tests for each comparison encoded in the design matrix """ try: self._Bhat except: raise Exception("Regression hasn't been fit yet. run .fit()") else: # check degrees of freedom num_pcs = self._basis_object.get_params()['num_components'] total_dof = self._X.shape[0] - self._X.shape[1] - num_pcs if total_dof <= 0.0: raise ValueError("degrees of freedom <= 0, Hotellings T2 not defined") # print catalog and basis info cat_table = self._catalog_object.get_params().items() bas_table = self._basis_object.get_params().items() print tabulate(cat_table+bas_table,tablefmt='plain') # then print pvalues # make T^2 & pvalue table headers = self._results[0] table = self._results[1:] print tabulate(table, headers, tablefmt="rst") print "Formula Used: %s" % self._designmatrix_object._formula print "Degrees of Freedom (n - p - k): %s" % str(total_dof) print "Condition Number of X^T*X: %.2f" % np.linalg.cond(np.dot(self._X.T, self._X))
prints the summary of the regression. It shows the waveform metadata, diagnostics of the fit, and results of the hypothesis tests for each comparison encoded in the design matrix
def filtered_rows_from_args(self, args): '''extracts filters from args, rows from manifests, returns filtered rows''' if len(self.manifests) == 0: print("fw: No manifests downloaded. Try 'manifest download'") return None (filters,remainder) = self.filters_from_args(args) all = self.all_firmwares() rows = self.rows_for_firmwares(all) filtered = self.filter_rows(filters, rows) return (filtered, remainder)
extracts filters from args, rows from manifests, returns filtered rows
def version(self): """Get the version of MongoDB that this Server runs as a tuple.""" if not self.__version: command = (self.name, '--version') logger.debug(command) stdout, _ = subprocess.Popen( command, stdout=subprocess.PIPE).communicate() version_output = str(stdout) match = re.search(self.version_patt, version_output) if match is None: raise ServersError( 'Could not determine version of %s from string: %s' % (self.name, version_output)) version_string = match.group('version') self.__version = tuple(map(int, version_string.split('.'))) return self.__version
Get the version of MongoDB that this Server runs as a tuple.
def set_state_process(self, context, process): """Method to append process for a context in the IF state. :param context: It can be a layer purpose or a section (impact function, post processor). :type context: str, unicode :param process: A text explain the process. :type process: str, unicode """ LOGGER.info('%s: %s' % (context, process)) self.state[context]["process"].append(process)
Method to append process for a context in the IF state. :param context: It can be a layer purpose or a section (impact function, post processor). :type context: str, unicode :param process: A text explain the process. :type process: str, unicode
def decimal_day_to_day_hour_min_sec( self, daysFloat): """*Convert a day from decimal format to hours mins and sec* Precision should be respected. **Key Arguments:** - ``daysFloat`` -- the day as a decimal. **Return:** - ``daysInt`` -- day as an integer - ``hoursInt`` -- hour as an integer (None if input precsion too low) - ``minsInt`` -- mins as an integer (None if input precsion too low) - ``secFloat`` -- secs as a float (None if input precsion too low) **Usage:** .. todo:: - replace `decimal_day_to_day_hour_min_sec` in all other code .. code-block:: python from astrocalc.times import conversions converter = conversions( log=log ) daysInt, hoursInt, minsInt, secFloat = converter.decimal_day_to_day_hour_min_sec( daysFloat=24.2453 ) print daysInt, hoursInt, minsInt, secFloat # OUTPUT: 24, 5, 53, None daysInt, hoursInt, minsInt, secFloat = converter.decimal_day_to_day_hour_min_sec( daysFloat=24.1232435454 ) print "%(daysInt)s days, %(hoursInt)s hours, %(minsInt)s mins, %(secFloat)s sec" % locals() # OUTPUT: 24 days, 2 hours, 57 mins, 28.242 sec """ self.log.info( 'starting the ``decimal_day_to_day_hour_min_sec`` method') daysInt = int(daysFloat) hoursFloat = (daysFloat - daysInt) * 24. hoursInt = int(hoursFloat) minsFloat = (hoursFloat - hoursInt) * 60. minsInt = int(minsFloat) secFloat = (minsFloat - minsInt) * 60. # DETERMINE PRECISION strday = repr(daysFloat) if "." not in strday: precisionUnit = "day" precision = 0 hoursInt = None minsInt = None secFloat = None else: lenDec = len(strday.split(".")[-1]) if lenDec < 2: precisionUnit = "day" precision = 0 hoursInt = None minsInt = None secFloat = None elif lenDec < 3: precisionUnit = "hour" precision = 0 minsInt = None secFloat = None elif lenDec < 5: precisionUnit = "minute" precision = 0 secFloat = None else: precisionUnit = "second" precision = lenDec - 5 if precision > 3: precision = 3 secFloat = "%02.*f" % (precision, secFloat) self.log.info( 'completed the ``decimal_day_to_day_hour_min_sec`` method') return daysInt, hoursInt, minsInt, secFloat
*Convert a day from decimal format to hours mins and sec* Precision should be respected. **Key Arguments:** - ``daysFloat`` -- the day as a decimal. **Return:** - ``daysInt`` -- day as an integer - ``hoursInt`` -- hour as an integer (None if input precsion too low) - ``minsInt`` -- mins as an integer (None if input precsion too low) - ``secFloat`` -- secs as a float (None if input precsion too low) **Usage:** .. todo:: - replace `decimal_day_to_day_hour_min_sec` in all other code .. code-block:: python from astrocalc.times import conversions converter = conversions( log=log ) daysInt, hoursInt, minsInt, secFloat = converter.decimal_day_to_day_hour_min_sec( daysFloat=24.2453 ) print daysInt, hoursInt, minsInt, secFloat # OUTPUT: 24, 5, 53, None daysInt, hoursInt, minsInt, secFloat = converter.decimal_day_to_day_hour_min_sec( daysFloat=24.1232435454 ) print "%(daysInt)s days, %(hoursInt)s hours, %(minsInt)s mins, %(secFloat)s sec" % locals() # OUTPUT: 24 days, 2 hours, 57 mins, 28.242 sec
def fast_boolean(operandA, operandB, operation, precision=0.001, max_points=199, layer=0, datatype=0): """ Execute any boolean operation between 2 polygons or polygon sets. Parameters ---------- operandA : polygon or array-like First operand. Must be a ``PolygonSet``, ``CellReference``, ``CellArray``, or an array. The array may contain any of the previous objects or an array-like[N][2] of vertices of a polygon. operandB : polygon, array-like or ``None`` Second operand. Must be ``None``, a ``PolygonSet``, ``CellReference``, ``CellArray``, or an array. The array may contain any of the previous objects or an array-like[N][2] of vertices of a polygon. operation : {'or', 'and', 'xor', 'not'} Boolean operation to be executed. The 'not' operation returns the difference ``operandA - operandB``. precision : float Desired precision for rounding vertice coordinates. max_points : integer If greater than 4, fracture the resulting polygons to ensure they have at most ``max_points`` vertices. This is not a tessellating function, so this number should be as high as possible. For example, it should be set to 199 for polygons being drawn in GDSII files. layer : integer The GDSII layer number for the resulting element. datatype : integer The GDSII datatype for the resulting element (between 0 and 255). Returns ------- out : PolygonSet or ``None`` Result of the boolean operation. """ polyA = [] polyB = [] for poly, obj in zip((polyA, polyB), (operandA, operandB)): if isinstance(obj, PolygonSet): poly.extend(obj.polygons) elif isinstance(obj, CellReference) or isinstance(obj, CellArray): poly.extend(obj.get_polygons()) elif obj is not None: for inobj in obj: if isinstance(inobj, PolygonSet): poly.extend(inobj.polygons) elif isinstance(inobj, CellReference) or isinstance( inobj, CellArray): poly.extend(inobj.get_polygons()) else: poly.append(inobj) if len(polyB) == 0: polyB.append(polyA.pop()) result = clipper.clip(polyA, polyB, operation, 1 / precision) return None if len(result) == 0 else PolygonSet( result, layer, datatype, verbose=False).fracture( max_points, precision)
Execute any boolean operation between 2 polygons or polygon sets. Parameters ---------- operandA : polygon or array-like First operand. Must be a ``PolygonSet``, ``CellReference``, ``CellArray``, or an array. The array may contain any of the previous objects or an array-like[N][2] of vertices of a polygon. operandB : polygon, array-like or ``None`` Second operand. Must be ``None``, a ``PolygonSet``, ``CellReference``, ``CellArray``, or an array. The array may contain any of the previous objects or an array-like[N][2] of vertices of a polygon. operation : {'or', 'and', 'xor', 'not'} Boolean operation to be executed. The 'not' operation returns the difference ``operandA - operandB``. precision : float Desired precision for rounding vertice coordinates. max_points : integer If greater than 4, fracture the resulting polygons to ensure they have at most ``max_points`` vertices. This is not a tessellating function, so this number should be as high as possible. For example, it should be set to 199 for polygons being drawn in GDSII files. layer : integer The GDSII layer number for the resulting element. datatype : integer The GDSII datatype for the resulting element (between 0 and 255). Returns ------- out : PolygonSet or ``None`` Result of the boolean operation.
def _set_id_from_xml_frameid(self, xml, xmlpath, var): ''' Set a single variable with the frameids of matching entity ''' e = xml.find(xmlpath) if e is not None: setattr(self, var, e.attrib['frameid'])
Set a single variable with the frameids of matching entity
def sparse_surface(self): """ Filled cells on the surface of the mesh. Returns ---------------- voxels: (n, 3) int, filled cells on mesh surface """ if self._method == 'ray': func = voxelize_ray elif self._method == 'subdivide': func = voxelize_subdivide else: raise ValueError('voxelization method incorrect') voxels, origin = func( mesh=self._data['mesh'], pitch=self._data['pitch'], max_iter=self._data['max_iter'][0]) self._cache['origin'] = origin return voxels
Filled cells on the surface of the mesh. Returns ---------------- voxels: (n, 3) int, filled cells on mesh surface
def begin(self): ''' Start over and get a track. ''' # Check for a start metasong if self.start: # We are in the beginning song self.at_beginning = True # And on the first track. self.pos = 0 else: # We aren't in the beginning song self.at_beginning = False # So we need to get new one. self._new_song() return self._get_song()
Start over and get a track.
def call(self, callname, data=None, **args): """ Generic interface to REST apiGeneric interface to REST api :param callname: query name :param data: dictionary of inputs :param args: keyword arguments added to the payload :return: """ url = f"{self.url_base}/{callname}" payload = self.payload.copy() payload.update(**args) if data is not None: payload.update(data) res = self.session.post(url, data=payload) if res.status_code > 299: self.log.error(f"URL: {url}") self.log.error(f"Payload: {payload}") self.log.error(f"STATUS: {res.status_code}") self.log.error(f"RESPONSE: {res.text}") return elif 'error' in res.json(): self.log.error(res.json()['error']) return return res.json()
Generic interface to REST apiGeneric interface to REST api :param callname: query name :param data: dictionary of inputs :param args: keyword arguments added to the payload :return:
def _ann_store_annotations(self, item_with_annotations, node, overwrite=False): """Stores annotations into an hdf5 file.""" # If we overwrite delete all annotations first if overwrite is True or overwrite == 'v_annotations': annotated = self._all_get_from_attrs(node, HDF5StorageService.ANNOTATED) if annotated: current_attrs = node._v_attrs for attr_name in current_attrs._v_attrnames: if attr_name.startswith(HDF5StorageService.ANNOTATION_PREFIX): delattr(current_attrs, attr_name) delattr(current_attrs, HDF5StorageService.ANNOTATED) self._hdf5file.flush() # Only store annotations if the item has some if not item_with_annotations.v_annotations.f_is_empty(): anno_dict = item_with_annotations.v_annotations._dict current_attrs = node._v_attrs changed = False for field_name in anno_dict: val = anno_dict[field_name] field_name_with_prefix = HDF5StorageService.ANNOTATION_PREFIX + field_name if field_name_with_prefix not in current_attrs: # Only store *new* annotations, if they already exist on disk, skip storage setattr(current_attrs, field_name_with_prefix, val) changed = True if changed: setattr(current_attrs, HDF5StorageService.ANNOTATED, True) self._hdf5file.flush()
Stores annotations into an hdf5 file.
def update_url_params(url, replace_all=False, **url_params): """ :return: url with its query updated from url_query (non-matching params are retained) """ # Ensure 'replace_all' can be sent as a url param if not (replace_all is True or replace_all is False): url_params['replace_all'] = replace_all if not url or not url_params: return url or None scheme, netloc, url_path, url_query, fragment = _urlsplit(url) if replace_all is True: url_query = url_params else: url_query = _parse_qs(url_query) url_query.update(url_params) return _urlunsplit((scheme, netloc, url_path, _unquote(_urlencode(url_query, doseq=True)), fragment))
:return: url with its query updated from url_query (non-matching params are retained)
def list(self, request, *args, **kwargs): """ To get a list of price list items, run **GET** against */api/merged-price-list-items/* as authenticated user. If service is not specified default price list items are displayed. Otherwise service specific price list items are displayed. In this case rendered object contains {"is_manually_input": true} In order to specify service pass query parameters: - service_type (Azure, OpenStack etc.) - service_uuid Example URL: http://example.com/api/merged-price-list-items/?service_type=Azure&service_uuid=cb658b491f3644a092dd223e894319be """ return super(MergedPriceListItemViewSet, self).list(request, *args, **kwargs)
To get a list of price list items, run **GET** against */api/merged-price-list-items/* as authenticated user. If service is not specified default price list items are displayed. Otherwise service specific price list items are displayed. In this case rendered object contains {"is_manually_input": true} In order to specify service pass query parameters: - service_type (Azure, OpenStack etc.) - service_uuid Example URL: http://example.com/api/merged-price-list-items/?service_type=Azure&service_uuid=cb658b491f3644a092dd223e894319be
def process_text(text, output_fmt='json', outbuf=None, cleanup=True, key='', **kwargs): """Return processor with Statements extracted by reading text with Sparser. Parameters ---------- text : str The text to be processed output_fmt: Optional[str] The output format to obtain from Sparser, with the two options being 'json' and 'xml'. Default: 'json' outbuf : Optional[file] A file like object that the Sparser output is written to. cleanup : Optional[bool] If True, the temporary file created, which is used as an input file for Sparser, as well as the output file created by Sparser are removed. Default: True key : Optional[str] A key which is embedded into the name of the temporary file passed to Sparser for reading. Default is empty string. Returns ------- SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen. """ nxml_str = make_nxml_from_text(text) return process_nxml_str(nxml_str, output_fmt, outbuf, cleanup, key, **kwargs)
Return processor with Statements extracted by reading text with Sparser. Parameters ---------- text : str The text to be processed output_fmt: Optional[str] The output format to obtain from Sparser, with the two options being 'json' and 'xml'. Default: 'json' outbuf : Optional[file] A file like object that the Sparser output is written to. cleanup : Optional[bool] If True, the temporary file created, which is used as an input file for Sparser, as well as the output file created by Sparser are removed. Default: True key : Optional[str] A key which is embedded into the name of the temporary file passed to Sparser for reading. Default is empty string. Returns ------- SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen.
def has_tensor(obj) -> bool: """ Given a possibly complex data structure, check if it has any torch.Tensors in it. """ if isinstance(obj, torch.Tensor): return True elif isinstance(obj, dict): return any(has_tensor(value) for value in obj.values()) elif isinstance(obj, (list, tuple)): return any(has_tensor(item) for item in obj) else: return False
Given a possibly complex data structure, check if it has any torch.Tensors in it.
def await_message(self, *args, **kwargs) -> 'asyncio.Future[Message]': """ Block until a message matches. See `on_message` """ fut = asyncio.Future() @self.on_message(*args, **kwargs) async def handler(message): fut.set_result(message) # remove handler when done or cancelled fut.add_done_callback(lambda _: self.remove_message_handler(handler)) return fut
Block until a message matches. See `on_message`
def getFields(self): """ Returns all the class attributues. @rtype: dict @return: A dictionary containing all the class attributes. """ d = {} for i in self._attrsList: key = i value = getattr(self, i) d[key] = value return d
Returns all the class attributues. @rtype: dict @return: A dictionary containing all the class attributes.
def get_definition(self, name): '''Returns xaddr and wsdl of specified service''' # Check if the service is supported if name not in SERVICES: raise ONVIFError('Unknown service %s' % name) wsdl_file = SERVICES[name]['wsdl'] ns = SERVICES[name]['ns'] wsdlpath = os.path.join(self.wsdl_dir, wsdl_file) if not os.path.isfile(wsdlpath): raise ONVIFError('No such file: %s' % wsdlpath) # XAddr for devicemgmt is fixed: if name == 'devicemgmt': xaddr = 'http://%s:%s/onvif/device_service' % (self.host, self.port) return xaddr, wsdlpath # Get other XAddr xaddr = self.xaddrs.get(ns) if not xaddr: raise ONVIFError('Device doesn`t support service: %s' % name) return xaddr, wsdlpath
Returns xaddr and wsdl of specified service
def put(self): """Updates this task type on the saltant server. Returns: :class:`saltant.models.container_task_type.ExecutableTaskType`: An executable task type model instance representing the task type just updated. """ return self.manager.put( id=self.id, name=self.name, description=self.description, command_to_run=self.command_to_run, environment_variables=self.environment_variables, required_arguments=self.required_arguments, required_arguments_default_values=( self.required_arguments_default_values ), json_file_option=self.json_file_option, )
Updates this task type on the saltant server. Returns: :class:`saltant.models.container_task_type.ExecutableTaskType`: An executable task type model instance representing the task type just updated.
def errorprint(): """Print out descriptions from ConfigurationError.""" try: yield except ConfigurationError as e: click.secho('%s' % e, err=True, fg='red') sys.exit(1)
Print out descriptions from ConfigurationError.
def find_group(self, star, starlist): """ Find the ids of those stars in ``starlist`` which are at a distance less than ``crit_separation`` from ``star``. Parameters ---------- star : `~astropy.table.Row` Star which will be either the head of a cluster or an isolated one. starlist : `~astropy.table.Table` List of star positions. Columns named as ``x_0`` and ``y_0``, which corresponds to the centroid coordinates of the sources, must be provided. Returns ------- Array containing the ids of those stars which are at a distance less than ``crit_separation`` from ``star``. """ star_distance = np.hypot(star['x_0'] - starlist['x_0'], star['y_0'] - starlist['y_0']) distance_criteria = star_distance < self.crit_separation return np.asarray(starlist[distance_criteria]['id'])
Find the ids of those stars in ``starlist`` which are at a distance less than ``crit_separation`` from ``star``. Parameters ---------- star : `~astropy.table.Row` Star which will be either the head of a cluster or an isolated one. starlist : `~astropy.table.Table` List of star positions. Columns named as ``x_0`` and ``y_0``, which corresponds to the centroid coordinates of the sources, must be provided. Returns ------- Array containing the ids of those stars which are at a distance less than ``crit_separation`` from ``star``.
def load(cls, fpath): """Loads a module and returns its object. :param str|unicode fpath: :rtype: module """ module_name = os.path.splitext(os.path.basename(fpath))[0] sys.path.insert(0, os.path.dirname(fpath)) try: module = import_module(module_name) finally: sys.path = sys.path[1:] return module
Loads a module and returns its object. :param str|unicode fpath: :rtype: module
def recipe(package, repository=None, depends_on=None, release=False, output_path=None, auto=False, overwrite=False, name=None): """Create a new upgrade recipe, for developers.""" upgrader = InvenioUpgrader() logger = upgrader.get_logger() try: path, found_repository = _upgrade_recipe_find_path(package) if output_path: path = output_path if not repository: repository = found_repository if not os.path.exists(path): raise RuntimeError("Path does not exists: %s" % path) if not os.path.isdir(path): raise RuntimeError("Path is not a directory: %s" % path) # Generate upgrade filename if release: filename = "%s_release_x_y_z.py" % repository else: filename = "%s_%s_%s.py" % (repository, date.today().strftime("%Y_%m_%d"), name or 'rename_me') # Check if generated repository name can be parsed test_repository = upgrader._parse_plugin_id(filename[:-3]) if repository != test_repository: raise RuntimeError( "Generated repository name cannot be parsed. " "Please override it with --repository option." ) upgrade_file = os.path.join(path, filename) if os.path.exists(upgrade_file) and not overwrite: raise RuntimeError( "Could not generate upgrade - %s already exists." % upgrade_file ) # Determine latest installed upgrade if depends_on is None: depends_on = ["CHANGE_ME"] u = upgrader.latest_applied_upgrade(repository=repository) if u: depends_on = [u] # Write upgrade template file _write_template( upgrade_file, name or 'rename_me', depends_on, repository, auto=auto) logger.info("Created new upgrade %s" % upgrade_file) except RuntimeError as e: for msg in e.args: logger.error(unicode(msg)) raise
Create a new upgrade recipe, for developers.
def parse_string(progression): """Return a tuple (roman numeral, accidentals, chord suffix). Examples: >>> parse_string('I') ('I', 0, '') >>> parse_string('bIM7') ('I', -1, 'M7') """ acc = 0 roman_numeral = '' suffix = '' i = 0 for c in progression: if c == '#': acc += 1 elif c == 'b': acc -= 1 elif c.upper() == 'I' or c.upper() == 'V': roman_numeral += c.upper() else: break i += 1 suffix = progression[i:] return (roman_numeral, acc, suffix)
Return a tuple (roman numeral, accidentals, chord suffix). Examples: >>> parse_string('I') ('I', 0, '') >>> parse_string('bIM7') ('I', -1, 'M7')
def get_queryset(self): """ Fixes get_query_set vs get_queryset for Django <1.6 """ try: qs = super(UserManager, self).get_queryset() except AttributeError: # pragma: no cover qs = super(UserManager, self).get_query_set() return qs
Fixes get_query_set vs get_queryset for Django <1.6
def parse_unit(name, parse_strict='warn', format='gwpy'): """Attempt to intelligently parse a `str` as a `~astropy.units.Unit` Parameters ---------- name : `str` unit name to parse parse_strict : `str` one of 'silent', 'warn', or 'raise' depending on how pedantic you want the parser to be format : `~astropy.units.format.Base` the formatter class to use when parsing the unit string Returns ------- unit : `~astropy.units.UnitBase` the unit parsed by `~astropy.units.Unit` Raises ------ ValueError if the unit cannot be parsed and `parse_strict='raise'` """ if name is None or isinstance(name, units.UnitBase): return name try: # have we already identified this unit as unrecognised? return UNRECOGNIZED_UNITS[name] except KeyError: # no, this is new # pylint: disable=unexpected-keyword-arg try: return units.Unit(name, parse_strict='raise') except ValueError as exc: if (parse_strict == 'raise' or 'did not parse as unit' not in str(exc)): raise # try again using out own lenient parser GWpyFormat.warn = parse_strict != 'silent' return units.Unit(name, parse_strict='silent', format=format) finally: GWpyFormat.warn = True
Attempt to intelligently parse a `str` as a `~astropy.units.Unit` Parameters ---------- name : `str` unit name to parse parse_strict : `str` one of 'silent', 'warn', or 'raise' depending on how pedantic you want the parser to be format : `~astropy.units.format.Base` the formatter class to use when parsing the unit string Returns ------- unit : `~astropy.units.UnitBase` the unit parsed by `~astropy.units.Unit` Raises ------ ValueError if the unit cannot be parsed and `parse_strict='raise'`
def editAccountInfo(self, short_name=None, author_name=None, author_url=None): """Use this method to update information about a Telegraph account. :param short_name: Optional. New account name. :type short_name: str :param author_name: Optional. New default author name used when creating new articles. :type author_name: str :param author_url: Optional. New default profile link, opened when users click on the author's name below the title. Can be any link, not necessarily to a Telegram profile or channel. :type author_url: str :returns: Account object with the default fields. """ return self.make_method("editAccountInfo", { "access_token": self.access_token, "short_name": short_name, "author_name": author_name, "author_url": author_url })
Use this method to update information about a Telegraph account. :param short_name: Optional. New account name. :type short_name: str :param author_name: Optional. New default author name used when creating new articles. :type author_name: str :param author_url: Optional. New default profile link, opened when users click on the author's name below the title. Can be any link, not necessarily to a Telegram profile or channel. :type author_url: str :returns: Account object with the default fields.
def openFile(self, openDQ=False): """ Open file and set up filehandle for image file """ if self._im.closed: if not self._dq.closed: self._dq.release() assert(self._dq.closed) fi = FileExtMaskInfo(clobber=False, doNotOpenDQ=not openDQ, im_fmode=self.open_mode) fi.image = self.name self._im = fi.image fi.append_ext(spu.get_ext_list(self._im, extname='SCI')) fi.finalize() self._im = fi.image self._dq = fi.DQimage self._imext = fi.fext self._dqext = fi.dqext
Open file and set up filehandle for image file
def cache_as_field(cache_name): """Cache a functions return value as the field 'cache_name'.""" def cache_wrapper(func): @functools.wraps(func) def inner_wrapper(self, *args, **kwargs): value = getattr(self, cache_name, UndefToken) if value != UndefToken: return value ret = func(self, *args, **kwargs) setattr(self, cache_name, ret) return ret return inner_wrapper return cache_wrapper
Cache a functions return value as the field 'cache_name'.
def bhattacharyya(Ks, dim, required, clamp=True, to_self=False): r''' Estimate the Bhattacharyya coefficient between distributions, based on kNN distances: \int \sqrt{p q} If clamp (the default), enforces 0 <= BC <= 1. Returns an array of shape (num_Ks,). ''' est = required if clamp: est = np.minimum(est, 1) # BC <= 1 return est
r''' Estimate the Bhattacharyya coefficient between distributions, based on kNN distances: \int \sqrt{p q} If clamp (the default), enforces 0 <= BC <= 1. Returns an array of shape (num_Ks,).
def pull(self, path, use_sudo=False, user=None, force=False): """ Fetch changes from the default remote repository and merge them. :param path: Path of the working copy directory. This directory must exist and be a Git working copy with a default remote to pull from. :type path: str :param use_sudo: If ``True`` execute ``git`` with :func:`fabric.operations.sudo`, else with :func:`fabric.operations.run`. :type use_sudo: bool :param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo` with the given user. If ``use_sudo is False`` this parameter has no effect. :type user: str :param force: If ``True``, append the ``--force`` option to the command. :type force: bool """ if path is None: raise ValueError("Path to the working copy is needed to pull from a remote repository.") options = [] if force: options.append('--force') options = ' '.join(options) cmd = 'git pull %s' % options with cd(path): if use_sudo and user is None: run_as_root(cmd) elif use_sudo: sudo(cmd, user=user) else: run(cmd)
Fetch changes from the default remote repository and merge them. :param path: Path of the working copy directory. This directory must exist and be a Git working copy with a default remote to pull from. :type path: str :param use_sudo: If ``True`` execute ``git`` with :func:`fabric.operations.sudo`, else with :func:`fabric.operations.run`. :type use_sudo: bool :param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo` with the given user. If ``use_sudo is False`` this parameter has no effect. :type user: str :param force: If ``True``, append the ``--force`` option to the command. :type force: bool
def sequence_set(self) -> SequenceSet: """The sequence set to use when finding the messages to match against. This will default to all messages unless the search criteria set contains a sequence set. """ try: seqset_crit = next(crit for crit in self.all_criteria if isinstance(crit, SequenceSetSearchCriteria)) except StopIteration: return SequenceSet.all() else: return seqset_crit.seq_set
The sequence set to use when finding the messages to match against. This will default to all messages unless the search criteria set contains a sequence set.
def parse_refresh_header(self, refresh): """ >>> parse_refresh_header("1; url=http://example.com/") (1.0, 'http://example.com/') >>> parse_refresh_header("1; url='http://example.com/'") (1.0, 'http://example.com/') >>> parse_refresh_header("1") (1.0, None) >>> parse_refresh_header("blah") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ValueError: invalid literal for float(): blah """ ii = refresh.find(";") if ii != -1: pause, newurl_spec = float(refresh[:ii]), refresh[ii+1:] jj = newurl_spec.find("=") key = None if jj != -1: key, newurl = newurl_spec[:jj], newurl_spec[jj+1:] newurl = self.clean_refresh_url(newurl) if key is None or key.strip().lower() != "url": raise ValueError() else: pause, newurl = float(refresh), None return pause, newurl
>>> parse_refresh_header("1; url=http://example.com/") (1.0, 'http://example.com/') >>> parse_refresh_header("1; url='http://example.com/'") (1.0, 'http://example.com/') >>> parse_refresh_header("1") (1.0, None) >>> parse_refresh_header("blah") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ValueError: invalid literal for float(): blah
def StartingKey(self, evt): """ If the editor is enabled by pressing keys on the grid, this will be called to let the editor do something about that first key if desired. """ key = evt.GetKeyCode() ch = None if key in [ wx.WXK_NUMPAD0, wx.WXK_NUMPAD1, wx.WXK_NUMPAD2, wx.WXK_NUMPAD3, wx.WXK_NUMPAD4, wx.WXK_NUMPAD5, wx.WXK_NUMPAD6, wx.WXK_NUMPAD7, wx.WXK_NUMPAD8, wx.WXK_NUMPAD9]: ch = ch = chr(ord('0') + key - wx.WXK_NUMPAD0) elif key < 256 and key >= 0 and chr(key) in string.printable: ch = chr(key) if ch is not None and self._tc.IsEnabled(): # For this example, replace the text. Normally we would append it. #self._tc.AppendText(ch) self._tc.SetValue(ch) self._tc.SetInsertionPointEnd() else: evt.Skip()
If the editor is enabled by pressing keys on the grid, this will be called to let the editor do something about that first key if desired.
def temperature(self, what): """Set temperature.""" self._temperature = units.validate_quantity(what, u.K)
Set temperature.
def estimate(self, significance_level=0.01): """ Estimates a DAG for the data set, using the PC constraint-based structure learning algorithm. Independencies are identified from the data set using a chi-squared statistic with the acceptance threshold of `significance_level`. PC identifies a partially directed acyclic graph (PDAG), given that the tested independencies admit a faithful Bayesian network representation. This method returns a DAG that is a completion of this PDAG. Parameters ---------- significance_level: float, default: 0.01 The significance level to use for conditional independence tests in the data set. `significance_level` is the desired Type 1 error probability of falsely rejecting the null hypothesis that variables are independent, given that they are. The lower `significance_level`, the less likely we are to accept dependencies, resulting in a sparser graph. Returns ------- model: DAG()-instance An estimate for the DAG for the data set (not yet parametrized). Reference --------- Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550) http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf Examples -------- >>> import pandas as pd >>> import numpy as np >>> from pgmpy.estimators import ConstraintBasedEstimator >>> data = pd.DataFrame(np.random.randint(0, 5, size=(2500, 3)), columns=list('XYZ')) >>> data['sum'] = data.sum(axis=1) >>> print(data) X Y Z sum 0 3 0 1 4 1 1 4 3 8 2 0 0 3 3 3 0 2 3 5 4 2 1 1 4 ... .. .. .. ... 2495 2 3 0 5 2496 1 1 2 4 2497 0 4 2 6 2498 0 0 0 0 2499 2 4 0 6 [2500 rows x 4 columns] >>> c = ConstraintBasedEstimator(data) >>> model = c.estimate() >>> print(model.edges()) [('Z', 'sum'), ('X', 'sum'), ('Y', 'sum')] """ skel, separating_sets = self.estimate_skeleton(significance_level) pdag = self.skeleton_to_pdag(skel, separating_sets) model = self.pdag_to_dag(pdag) return model
Estimates a DAG for the data set, using the PC constraint-based structure learning algorithm. Independencies are identified from the data set using a chi-squared statistic with the acceptance threshold of `significance_level`. PC identifies a partially directed acyclic graph (PDAG), given that the tested independencies admit a faithful Bayesian network representation. This method returns a DAG that is a completion of this PDAG. Parameters ---------- significance_level: float, default: 0.01 The significance level to use for conditional independence tests in the data set. `significance_level` is the desired Type 1 error probability of falsely rejecting the null hypothesis that variables are independent, given that they are. The lower `significance_level`, the less likely we are to accept dependencies, resulting in a sparser graph. Returns ------- model: DAG()-instance An estimate for the DAG for the data set (not yet parametrized). Reference --------- Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550) http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf Examples -------- >>> import pandas as pd >>> import numpy as np >>> from pgmpy.estimators import ConstraintBasedEstimator >>> data = pd.DataFrame(np.random.randint(0, 5, size=(2500, 3)), columns=list('XYZ')) >>> data['sum'] = data.sum(axis=1) >>> print(data) X Y Z sum 0 3 0 1 4 1 1 4 3 8 2 0 0 3 3 3 0 2 3 5 4 2 1 1 4 ... .. .. .. ... 2495 2 3 0 5 2496 1 1 2 4 2497 0 4 2 6 2498 0 0 0 0 2499 2 4 0 6 [2500 rows x 4 columns] >>> c = ConstraintBasedEstimator(data) >>> model = c.estimate() >>> print(model.edges()) [('Z', 'sum'), ('X', 'sum'), ('Y', 'sum')]