code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def pre_save(self, instance, add: bool): """Ran just before the model is saved, allows us to built the slug. Arguments: instance: The model that is being saved. add: Indicates whether this is a new entry to the database or an update. Returns: The localized slug that was generated. """ if not isinstance(instance, AtomicSlugRetryMixin): raise ImproperlyConfigured(( 'Model \'%s\' does not inherit from AtomicSlugRetryMixin. ' 'Without this, the LocalizedUniqueSlugField will not work.' ) % type(instance).__name__) slugs = LocalizedValue() for lang_code, value in self._get_populate_values(instance): if not value: continue slug = slugify(value, allow_unicode=True) # verify whether it's needed to re-generate a slug, # if not, re-use the same slug if instance.pk is not None: current_slug = getattr(instance, self.name).get(lang_code) if current_slug is not None: stripped_slug = current_slug[0:current_slug.rfind('-')] if slug == stripped_slug: slugs.set(lang_code, current_slug) continue if self.include_time: slug += '-%d' % datetime.now().microsecond retries = getattr(instance, 'retries', 0) if retries > 0: # do not add another - if we already added time if not self.include_time: slug += '-' slug += '%d' % retries slugs.set(lang_code, slug) setattr(instance, self.name, slugs) return slugs
Ran just before the model is saved, allows us to built the slug. Arguments: instance: The model that is being saved. add: Indicates whether this is a new entry to the database or an update. Returns: The localized slug that was generated.
def getFullFMAtIndex(self, index): ''' This function creates a complete FM-index for a specific position in the BWT. Example using the above example: BWT Full FM-index $ A C G T C 0 1 2 4 4 $ 0 1 3 4 4 C 1 1 3 4 4 A 1 1 4 4 4 1 2 4 4 4 @return - the above information in the form of an array that already incorporates the offset value into the counts ''' if index == self.totalSize: return np.cumsum(self.totalCounts) #get the bin we start from binID = index >> self.bitPower bwtIndex = self.refFM[binID] #figure out how far in we really are ret = np.copy(self.partialFM[binID]) trueIndex = np.sum(ret)-self.offsetSum dist = index-trueIndex if dist == 0: return ret #find the end of the region of interest if binID == self.refFM.shape[0]-1: endRange = self.bwt.shape[0] else: endRange = self.refFM[binID+1]+1 while endRange < self.bwt.shape[0] and (self.bwt[endRange] & self.mask) == (self.bwt[endRange-1] & self.mask): endRange += 1 #split the letters and numbers in the compressed bwt letters = np.bitwise_and(self.bwt[bwtIndex:endRange], self.mask) counts = np.right_shift(self.bwt[bwtIndex:endRange], self.letterBits, dtype='<u8') i = 1 same = (letters[0:-1] == letters[1:]) while np.count_nonzero(same) > 0: (counts[i:])[same] *= self.numPower i += 1 same = np.bitwise_and(same[0:-1], same[1:]) cs = np.subtract(np.cumsum(counts), counts) x = np.searchsorted(cs, dist, 'left') if x > 1: ret += np.bincount(letters[0:x-1], counts[0:x-1], minlength=self.vcLen) ret[letters[x-1]] += dist-cs[x-1] return ret
This function creates a complete FM-index for a specific position in the BWT. Example using the above example: BWT Full FM-index $ A C G T C 0 1 2 4 4 $ 0 1 3 4 4 C 1 1 3 4 4 A 1 1 4 4 4 1 2 4 4 4 @return - the above information in the form of an array that already incorporates the offset value into the counts
def list(self, page=1, per_page=50): """ Lists Jobs. https://app.zencoder.com/docs/api/jobs/list """ data = {"page": page, "per_page": per_page} return self.get(self.base_url, data=data)
Lists Jobs. https://app.zencoder.com/docs/api/jobs/list
def _get_create_table_sql(self, table_name, columns, options=None): """ Returns the SQL used to create a table. :param table_name: The name of the table to create :type table_name: str :param columns: The table columns :type columns: dict :param options: The options :type options: dict :rtype: str """ options = options or {} column_list_sql = self.get_column_declaration_list_sql(columns) if options.get("unique_constraints"): for name, definition in options["unique_constraints"].items(): column_list_sql += ", %s" % self.get_unique_constraint_declaration_sql( name, definition ) if options.get("primary"): column_list_sql += ", PRIMARY KEY(%s)" % ", ".join(options["primary"]) if options.get("indexes"): for index, definition in options["indexes"]: column_list_sql += ", %s" % self.get_index_declaration_sql( index, definition ) query = "CREATE TABLE %s (%s" % (table_name, column_list_sql) check = self.get_check_declaration_sql(columns) if check: query += ", %s" % check query += ")" sql = [query] if options.get("foreign_keys"): for definition in options["foreign_keys"]: sql.append(self.get_create_foreign_key_sql(definition, table_name)) return sql
Returns the SQL used to create a table. :param table_name: The name of the table to create :type table_name: str :param columns: The table columns :type columns: dict :param options: The options :type options: dict :rtype: str
def new_pic(cls, id_, name, desc, rId, left, top, width, height): """ Return a new ``<p:pic>`` element tree configured with the supplied parameters. """ xml = cls._pic_tmpl() % ( id_, name, desc, rId, left, top, width, height ) pic = parse_xml(xml) return pic
Return a new ``<p:pic>`` element tree configured with the supplied parameters.
def _try_larger_image(self, roi, cur_text, cur_mrz, filter_order=3): """Attempts to improve the OCR result by scaling the image. If the new mrz is better, returns it, otherwise returns the old mrz.""" if roi.shape[1] <= 700: scale_by = int(1050.0 / roi.shape[1] + 0.5) roi_lg = transform.rescale(roi, scale_by, order=filter_order, mode='constant', multichannel=False, anti_aliasing=True) new_text = ocr(roi_lg, extra_cmdline_params=self.extra_cmdline_params) new_mrz = MRZ.from_ocr(new_text) new_mrz.aux['method'] = 'rescaled(%d)' % filter_order if new_mrz.valid_score > cur_mrz.valid_score: cur_mrz = new_mrz cur_text = new_text return cur_text, cur_mrz
Attempts to improve the OCR result by scaling the image. If the new mrz is better, returns it, otherwise returns the old mrz.
def _get_symbol_index(stroke_id_needle, segmentation): """ Parameters ---------- stroke_id_needle : int Identifier for the stroke of which the symbol should get found. segmentation : list of lists of integers An ordered segmentation of strokes to symbols. Returns ------- The symbol index in which stroke_id_needle occurs Examples -------- >>> _get_symbol_index(3, [[0, 1, 2], [3, 4, 5], [6, 7]]) 1 >>> _get_symbol_index(6, [[0, 1, 2], [3, 4, 5], [6, 7]]) 2 >>> _get_symbol_index(7, [[0, 1, 2], [3, 4, 5], [6, 7]]) 2 """ for symbol_index, symbol in enumerate(segmentation): if stroke_id_needle in symbol: return symbol_index return None
Parameters ---------- stroke_id_needle : int Identifier for the stroke of which the symbol should get found. segmentation : list of lists of integers An ordered segmentation of strokes to symbols. Returns ------- The symbol index in which stroke_id_needle occurs Examples -------- >>> _get_symbol_index(3, [[0, 1, 2], [3, 4, 5], [6, 7]]) 1 >>> _get_symbol_index(6, [[0, 1, 2], [3, 4, 5], [6, 7]]) 2 >>> _get_symbol_index(7, [[0, 1, 2], [3, 4, 5], [6, 7]]) 2
def connect_attenuator(self, connect=True): """Establish a connection to the TDT PA5 attenuator""" if connect: try: pa5 = win32com.client.Dispatch("PA5.x") success = pa5.ConnectPA5('GB', 1) if success == 1: print 'Connection to PA5 attenuator established' pass else: print 'Connection to PA5 attenuator failed' errmsg = pa5.GetError() print u"Error: ", errmsg raise Exception(u"Attenuator connection failed") except: print "Error connecting to attenuator" pa5 = None self.attenuator = pa5 else: # if there is an attenuator, make sure it is set to 0 before disconnecting if self.attenuator: self.attenuator.setAtten(0) self.attenuator = None return self.attenuator
Establish a connection to the TDT PA5 attenuator
def select_python_parser(parser=None): """ Select default parser for loading and refactoring steps. Passing `redbaron` as argument will select the old paring engine from v0.3.3 Replacing the redbaron parser was necessary to support Python 3 syntax. We have tried our best to make sure there is no user impact on users. However, there may be regressions with new parser backend. To revert to the old parser implementation, add `GETGAUGE_USE_0_3_3_PARSER=true` property to the `python.properties` file in the `<PROJECT_DIR>/env/default directory. This property along with the redbaron parser will be removed in future releases. """ if parser == 'redbaron' or os.environ.get('GETGAUGE_USE_0_3_3_PARSER'): PythonFile.Class = RedbaronPythonFile else: PythonFile.Class = ParsoPythonFile
Select default parser for loading and refactoring steps. Passing `redbaron` as argument will select the old paring engine from v0.3.3 Replacing the redbaron parser was necessary to support Python 3 syntax. We have tried our best to make sure there is no user impact on users. However, there may be regressions with new parser backend. To revert to the old parser implementation, add `GETGAUGE_USE_0_3_3_PARSER=true` property to the `python.properties` file in the `<PROJECT_DIR>/env/default directory. This property along with the redbaron parser will be removed in future releases.
def get_authorizations_by_ids(self, authorization_ids): """Gets an ``AuthorizationList`` corresponding to the given ``IdList``. In plenary mode, the returned list contains all of the authorizations specified in the ``Id`` list, in the order of the list, including duplicates, or an error results if an ``Id`` in the supplied list is not found or inaccessible. Otherwise, inaccessible ``Authorizations`` may be omitted from the list and may present the elements in any order including returning a unique set. arg: authorization_ids (osid.id.IdList): the list of ``Ids`` to retrieve return: (osid.authorization.AuthorizationList) - the returned ``Authorization list`` raise: NotFound - an ``Id was`` not found raise: NullArgument - ``authorization_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources_by_ids # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('authorization', collection='Authorization', runtime=self._runtime) object_id_list = [] for i in authorization_ids: object_id_list.append(ObjectId(self._get_id(i, 'authorization').get_identifier())) result = collection.find( dict({'_id': {'$in': object_id_list}}, **self._view_filter())) result = list(result) sorted_result = [] for object_id in object_id_list: for object_map in result: if object_map['_id'] == object_id: sorted_result.append(object_map) break return objects.AuthorizationList(sorted_result, runtime=self._runtime, proxy=self._proxy)
Gets an ``AuthorizationList`` corresponding to the given ``IdList``. In plenary mode, the returned list contains all of the authorizations specified in the ``Id`` list, in the order of the list, including duplicates, or an error results if an ``Id`` in the supplied list is not found or inaccessible. Otherwise, inaccessible ``Authorizations`` may be omitted from the list and may present the elements in any order including returning a unique set. arg: authorization_ids (osid.id.IdList): the list of ``Ids`` to retrieve return: (osid.authorization.AuthorizationList) - the returned ``Authorization list`` raise: NotFound - an ``Id was`` not found raise: NullArgument - ``authorization_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def json_encoder_default(obj): """Handle more data types than the default JSON encoder. Specifically, it treats a `set` and a `numpy.array` like a `list`. Example usage: ``json.dumps(obj, default=json_encoder_default)`` """ if np is not None and hasattr(obj, 'size') and hasattr(obj, 'dtype'): if obj.size == 1: if np.issubdtype(obj.dtype, np.integer): return int(obj) elif np.issubdtype(obj.dtype, np.floating): return float(obj) if isinstance(obj, set): return list(obj) elif hasattr(obj, 'to_native'): # DatastoreList, DatastoreDict return obj.to_native() elif hasattr(obj, 'tolist') and hasattr(obj, '__iter__'): # for np.array return obj.tolist() return obj
Handle more data types than the default JSON encoder. Specifically, it treats a `set` and a `numpy.array` like a `list`. Example usage: ``json.dumps(obj, default=json_encoder_default)``
def _merge_results(self, results): """Combine results of test run with exisiting dict.""" self.results['tests'] += results['tests'] for key, value in results['summary'].items(): self.results['summary'][key] += value
Combine results of test run with exisiting dict.
def hsl_to_rgb(h, s=None, l=None): """Convert the color from HSL coordinates to RGB. Parameters: :h: The Hue component value [0...1] :s: The Saturation component value [0...1] :l: The Lightness component value [0...1] Returns: The color as an (r, g, b) tuple in the range: r[0...1], g[0...1], b[0...1] >>> hsl_to_rgb(30.0, 1.0, 0.5) (1.0, 0.5, 0.0) """ if type(h) in [list,tuple]: h, s, l = h if s==0: return (l, l, l) # achromatic (gray) if l<0.5: n2 = l * (1.0 + s) else: n2 = l+s - (l*s) n1 = (2.0 * l) - n2 h /= 60.0 hueToRgb = _hue_to_rgb r = hueToRgb(n1, n2, h + 2) g = hueToRgb(n1, n2, h) b = hueToRgb(n1, n2, h - 2) return (r, g, b)
Convert the color from HSL coordinates to RGB. Parameters: :h: The Hue component value [0...1] :s: The Saturation component value [0...1] :l: The Lightness component value [0...1] Returns: The color as an (r, g, b) tuple in the range: r[0...1], g[0...1], b[0...1] >>> hsl_to_rgb(30.0, 1.0, 0.5) (1.0, 0.5, 0.0)
def weld_iloc_int(array, index): """Retrieves the value at index. Parameters ---------- array : numpy.ndarray or WeldObject Input data. Assumed to be bool data. index : int The array index from which to retrieve value. Returns ------- WeldObject Representation of this computation. """ obj_id, weld_obj = create_weld_object(array) weld_template = 'lookup({array}, {index}L)' weld_obj.weld_code = weld_template.format(array=obj_id, index=index) return weld_obj
Retrieves the value at index. Parameters ---------- array : numpy.ndarray or WeldObject Input data. Assumed to be bool data. index : int The array index from which to retrieve value. Returns ------- WeldObject Representation of this computation.
def port(self, value): """ Setter for **self.__port** attribute. :param value: Attribute value. :type value: int """ if value is not None: assert type(value) is int, "'{0}' attribute: '{1}' type is not 'int'!".format( "port", value) assert type(value) >= 0 and type(value) >= 65535, \ "'{0}' attribute: '{1}' value must be in 0-65535 range!".format("port", value) self.__port = value
Setter for **self.__port** attribute. :param value: Attribute value. :type value: int
def approve(self): """Approve object. This reverts a removal, resets the report counter, marks it with a green check mark (only visible to other moderators) on the website view and sets the approved_by attribute to the logged in user. :returns: The json response from the server. """ url = self.reddit_session.config['approve'] data = {'id': self.fullname} response = self.reddit_session.request_json(url, data=data) urls = [self.reddit_session.config[x] for x in ['modqueue', 'spam']] if isinstance(self, Submission): urls += self.subreddit._listing_urls # pylint: disable=W0212 self.reddit_session.evict(urls) return response
Approve object. This reverts a removal, resets the report counter, marks it with a green check mark (only visible to other moderators) on the website view and sets the approved_by attribute to the logged in user. :returns: The json response from the server.
def _add_request_data(data, request): """ Attempts to build request data; if successful, sets the 'request' key on `data`. """ try: request_data = _build_request_data(request) except Exception as e: log.exception("Exception while building request_data for Rollbar payload: %r", e) else: if request_data: _filter_ip(request_data, SETTINGS['capture_ip']) data['request'] = request_data
Attempts to build request data; if successful, sets the 'request' key on `data`.
def createTileUrl(self, x, y, z): ''' returns new tile url based on template ''' return self.tileTemplate.replace('{{x}}', str(x)).replace('{{y}}', str( y)).replace('{{z}}', str(z))
returns new tile url based on template
def run_apidoc(_): """This method is required by the setup method below.""" import os dirname = os.path.dirname(__file__) ignore_paths = [os.path.join(dirname, '../../aaf2/model'),] # https://github.com/sphinx-doc/sphinx/blob/master/sphinx/ext/apidoc.py argv = [ '--force', '--no-toc', '--separate', '--module-first', '--output-dir', os.path.join(dirname, 'api'), os.path.join(dirname, '../../aaf2'), ] + ignore_paths from sphinx.ext import apidoc apidoc.main(argv)
This method is required by the setup method below.
def validate_args(self): """Input validation!""" def validate_name(): allowed_re = '^[a-z](([a-z0-9_-]+)?([a-z0-9])?)?' assert isinstance(self.params['name'], basestring), ( 'Name must be a string, not %s' % repr(self.params['name'])) assert re.match(allowed_re, self.params['name']), ( 'Invalid rule name: %s. Must match %s.' % ( repr(self.params['name']), repr(allowed_re))) validate_name() def validate_deps(): if 'deps' in self.params: assert type(self.params['deps']) in (type(None), list), ( 'Deps must be a list, not %s' % repr(self.params['deps'])) validate_deps()
Input validation!
def get_change_values(change): """ In the case of deletions, we pull the change values for the XML request from the ResourceRecordSet._initial_vals dict, since we want the original values. For creations, we pull from the attributes on ResourceRecordSet. Since we're dealing with attributes vs. dict key/vals, we'll abstract this part away here and just always pass a dict to write_change. :rtype: dict :returns: A dict of change data, used by :py:func:`write_change` to write the change request XML. """ action, rrset = change if action == 'CREATE': # For creations, we want the current values, since they don't need to # match an existing record set. values = dict() for key, val in rrset._initial_vals.items(): # Pull from the record set's attributes, which are the current # values. values[key] = getattr(rrset, key) return values else: # We can look at the initial values dict for deletions, since we # have to match against the values currently in Route53. return rrset._initial_vals
In the case of deletions, we pull the change values for the XML request from the ResourceRecordSet._initial_vals dict, since we want the original values. For creations, we pull from the attributes on ResourceRecordSet. Since we're dealing with attributes vs. dict key/vals, we'll abstract this part away here and just always pass a dict to write_change. :rtype: dict :returns: A dict of change data, used by :py:func:`write_change` to write the change request XML.
def count(args): """ %prog count cdhit.consensus.fasta Scan the headers for the consensus clusters and count the number of reads. """ from jcvi.graphics.histogram import stem_leaf_plot from jcvi.utils.cbook import SummaryStats p = OptionParser(count.__doc__) p.add_option("--csv", help="Write depth per contig to file") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args csv = open(opts.csv, "w") if opts.csv else None f = Fasta(fastafile, lazy=True) sizes = [] for desc, rec in f.iterdescriptions_ordered(): if desc.startswith("singleton"): sizes.append(1) continue # consensus_for_cluster_0 with 63 sequences if "with" in desc: name, w, size, seqs = desc.split() if csv: print("\t".join(str(x) for x in (name, size, len(rec))), file=csv) assert w == "with" sizes.append(int(size)) # MRD85:00603:02472;size=167; else: name, size, tail = desc.split(";") sizes.append(int(size.replace("size=", ""))) if csv: csv.close() logging.debug("File written to `{0}`".format(opts.csv)) s = SummaryStats(sizes) print(s, file=sys.stderr) stem_leaf_plot(s.data, 0, 100, 20, title="Cluster size")
%prog count cdhit.consensus.fasta Scan the headers for the consensus clusters and count the number of reads.
def volume(self) -> float: """ Volume of the unit cell. """ m = self._matrix return float(abs(dot(np.cross(m[0], m[1]), m[2])))
Volume of the unit cell.
def expand_file_names(path, files_root): """ Expands paths (e.g. css/*.css in files_root /actual/path/to/css/files/) """ # For non-wildcards just return the path. This allows us to detect when # explicitly listed files are missing. if not any(wildcard in path for wildcard in '*?['): return [path] else: dir_path, filename = os.path.split(path) return [os.path.join(dir_path, f) for f in fnmatch.filter(os.listdir(os.path.join(files_root, dir_path)), filename)]
Expands paths (e.g. css/*.css in files_root /actual/path/to/css/files/)
def get_annotations(self): """ Fetch the annotations field if it does not exist. """ try: obj_list = self.__dict__['annotations'] return [Annotation(i) for i in obj_list] except KeyError: self._lazy_load() obj_list = self.__dict__['annotations'] return [Annotation(i) for i in obj_list]
Fetch the annotations field if it does not exist.
def posted_data_dict(self): """ All the data that PayPal posted to us, as a correctly parsed dictionary of values. """ if not self.query: return None from django.http import QueryDict roughdecode = dict(item.split('=', 1) for item in self.query.split('&')) encoding = roughdecode.get('charset', None) if encoding is None: encoding = DEFAULT_ENCODING query = self.query.encode('ascii') data = QueryDict(query, encoding=encoding) return data.dict()
All the data that PayPal posted to us, as a correctly parsed dictionary of values.
def delete_pool_member(hostname, username, password, name, member): ''' Delete an existing pool member. hostname The host/address of the bigip device username The iControl REST username password The iControl REST password name The name of the pool to be modified member The name of the member to delete from the pool ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if __opts__['test']: return _test_output(ret, 'delete', params={ 'hostname': hostname, 'username': username, 'password': password, 'name': name, 'members': member } ) #is this pool currently configured? existing = __salt__['bigip.list_pool'](hostname, username, password, name) # if it exists by name if existing['code'] == 200: #what are the current members? current_members = existing['content']['membersReference']['items'] #loop through them exists = False for current_member in current_members: if current_member['name'] == member: exists = True existing_member = current_member break if exists: deleted = __salt__['bigip.delete_pool_member'](hostname, username, password, name, member) # did we get rid of it? if deleted['code'] == 200: ret['result'] = True ret['comment'] = 'Pool Member: {member} was successfully deleted.'.format(member=member) ret['changes']['old'] = existing_member ret['changes']['new'] = {} # something bad happened else: ret['result'] = True ret['comment'] = 'This pool member already does not exist. No changes made.' ret['changes']['old'] = {} ret['changes']['new'] = {} else: ret = _load_result(existing, ret) return ret
Delete an existing pool member. hostname The host/address of the bigip device username The iControl REST username password The iControl REST password name The name of the pool to be modified member The name of the member to delete from the pool
def render(template, namespace, app=None): ''' Render the specified template using the Pecan rendering framework with the specified template namespace as a dictionary. Useful in a controller where you have no template specified in the ``@expose``. :param template: The path to your template, as you would specify in ``@expose``. :param namespace: The namespace to use for rendering the template, as a dictionary. :param app: The instance of :class:`pecan.Pecan` to use ''' app = app or state.app return app.render(template, namespace)
Render the specified template using the Pecan rendering framework with the specified template namespace as a dictionary. Useful in a controller where you have no template specified in the ``@expose``. :param template: The path to your template, as you would specify in ``@expose``. :param namespace: The namespace to use for rendering the template, as a dictionary. :param app: The instance of :class:`pecan.Pecan` to use
def follow(user, obj, send_action=True, actor_only=True, flag='', **kwargs): """ Creates a relationship allowing the object's activities to appear in the user's stream. Returns the created ``Follow`` instance. If ``send_action`` is ``True`` (the default) then a ``<user> started following <object>`` action signal is sent. Extra keyword arguments are passed to the action.send call. If ``actor_only`` is ``True`` (the default) then only actions where the object is the actor will appear in the user's activity stream. Set to ``False`` to also include actions where this object is the action_object or the target. If ``flag`` not an empty string then the relationship would marked by this flag. Example:: follow(request.user, group, actor_only=False) follow(request.user, group, actor_only=False, flag='liking') """ check(obj) instance, created = apps.get_model('actstream', 'follow').objects.get_or_create( user=user, object_id=obj.pk, flag=flag, content_type=ContentType.objects.get_for_model(obj), actor_only=actor_only ) if send_action and created: if not flag: action.send(user, verb=_('started following'), target=obj, **kwargs) else: action.send(user, verb=_('started %s' % flag), target=obj, **kwargs) return instance
Creates a relationship allowing the object's activities to appear in the user's stream. Returns the created ``Follow`` instance. If ``send_action`` is ``True`` (the default) then a ``<user> started following <object>`` action signal is sent. Extra keyword arguments are passed to the action.send call. If ``actor_only`` is ``True`` (the default) then only actions where the object is the actor will appear in the user's activity stream. Set to ``False`` to also include actions where this object is the action_object or the target. If ``flag`` not an empty string then the relationship would marked by this flag. Example:: follow(request.user, group, actor_only=False) follow(request.user, group, actor_only=False, flag='liking')
def make_mixture_prior(latent_size, mixture_components): """Creates the mixture of Gaussians prior distribution. Args: latent_size: The dimensionality of the latent representation. mixture_components: Number of elements of the mixture. Returns: random_prior: A `tfd.Distribution` instance representing the distribution over encodings in the absence of any evidence. """ if mixture_components == 1: # See the module docstring for why we don't learn the parameters here. return tfd.MultivariateNormalDiag( loc=tf.zeros([latent_size]), scale_identity_multiplier=1.0) loc = tf.compat.v1.get_variable( name="loc", shape=[mixture_components, latent_size]) raw_scale_diag = tf.compat.v1.get_variable( name="raw_scale_diag", shape=[mixture_components, latent_size]) mixture_logits = tf.compat.v1.get_variable( name="mixture_logits", shape=[mixture_components]) return tfd.MixtureSameFamily( components_distribution=tfd.MultivariateNormalDiag( loc=loc, scale_diag=tf.nn.softplus(raw_scale_diag)), mixture_distribution=tfd.Categorical(logits=mixture_logits), name="prior")
Creates the mixture of Gaussians prior distribution. Args: latent_size: The dimensionality of the latent representation. mixture_components: Number of elements of the mixture. Returns: random_prior: A `tfd.Distribution` instance representing the distribution over encodings in the absence of any evidence.
def unpack_thin(thin_path): ''' Unpack the Salt thin archive. ''' tfile = tarfile.TarFile.gzopen(thin_path) old_umask = os.umask(0o077) # pylint: disable=blacklisted-function tfile.extractall(path=OPTIONS.saltdir) tfile.close() os.umask(old_umask) # pylint: disable=blacklisted-function try: os.unlink(thin_path) except OSError: pass reset_time(OPTIONS.saltdir)
Unpack the Salt thin archive.
def _iter_module_files(): """This iterates over all relevant Python files. It goes through all loaded files from modules, all files in folders of already loaded modules as well as all files reachable through a package. """ # The list call is necessary on Python 3 in case the module # dictionary modifies during iteration. for module in list(sys.modules.values()): if module is None: continue filename = getattr(module, "__file__", None) if filename: if os.path.isdir(filename) and os.path.exists( os.path.join(filename, "__init__.py") ): filename = os.path.join(filename, "__init__.py") old = None while not os.path.isfile(filename): old = filename filename = os.path.dirname(filename) if filename == old: break else: if filename[-4:] in (".pyc", ".pyo"): filename = filename[:-1] yield filename
This iterates over all relevant Python files. It goes through all loaded files from modules, all files in folders of already loaded modules as well as all files reachable through a package.
def smooth(x, window_len=7, window='hanning'): """ Smooth the data in x using convolution with a window of requested size and type. Parameters ---------- x : array_like(float) A flat NumPy array containing the data to smooth window_len : scalar(int), optional An odd integer giving the length of the window. Defaults to 7. window : string A string giving the window type. Possible values are 'flat', 'hanning', 'hamming', 'bartlett' or 'blackman' Returns ------- array_like(float) The smoothed values Notes ----- Application of the smoothing window at the top and bottom of x is done by reflecting x around these points to extend it sufficiently in each direction. """ if len(x) < window_len: raise ValueError("Input vector length must be >= window length.") if window_len < 3: raise ValueError("Window length must be at least 3.") if not window_len % 2: # window_len is even window_len += 1 print("Window length reset to {}".format(window_len)) windows = {'hanning': np.hanning, 'hamming': np.hamming, 'bartlett': np.bartlett, 'blackman': np.blackman, 'flat': np.ones # moving average } # === Reflect x around x[0] and x[-1] prior to convolution === # k = int(window_len / 2) xb = x[:k] # First k elements xt = x[-k:] # Last k elements s = np.concatenate((xb[::-1], x, xt[::-1])) # === Select window values === # if window in windows.keys(): w = windows[window](window_len) else: msg = "Unrecognized window type '{}'".format(window) print(msg + " Defaulting to hanning") w = windows['hanning'](window_len) return np.convolve(w / w.sum(), s, mode='valid')
Smooth the data in x using convolution with a window of requested size and type. Parameters ---------- x : array_like(float) A flat NumPy array containing the data to smooth window_len : scalar(int), optional An odd integer giving the length of the window. Defaults to 7. window : string A string giving the window type. Possible values are 'flat', 'hanning', 'hamming', 'bartlett' or 'blackman' Returns ------- array_like(float) The smoothed values Notes ----- Application of the smoothing window at the top and bottom of x is done by reflecting x around these points to extend it sufficiently in each direction.
def search(name, jail=None, chroot=None, root=None, exact=False, glob=False, regex=False, pcre=False, comment=False, desc=False, full=False, depends=False, size=False, quiet=False, origin=False, prefix=False): ''' Searches in remote package repositories CLI Example: .. code-block:: bash salt '*' pkg.search pattern jail Perform the search using the ``pkg.conf(5)`` from the specified jail CLI Example: .. code-block:: bash salt '*' pkg.search pattern jail=<jail name or id> chroot Perform the search using the ``pkg.conf(5)`` from the specified chroot (ignored if ``jail`` is specified) root Perform the search using the ``pkg.conf(5)`` from the specified root (ignored if ``jail`` is specified) CLI Example: .. code-block:: bash salt '*' pkg.search pattern chroot=/path/to/chroot exact Treat pattern as exact pattern. CLI Example: .. code-block:: bash salt '*' pkg.search pattern exact=True glob Treat pattern as a shell glob pattern. CLI Example: .. code-block:: bash salt '*' pkg.search pattern glob=True regex Treat pattern as a regular expression. CLI Example: .. code-block:: bash salt '*' pkg.search pattern regex=True pcre Treat pattern as an extended regular expression. CLI Example: .. code-block:: bash salt '*' pkg.search pattern pcre=True comment Search for pattern in the package comment one-line description. CLI Example: .. code-block:: bash salt '*' pkg.search pattern comment=True desc Search for pattern in the package description. CLI Example: .. code-block:: bash salt '*' pkg.search pattern desc=True full Displays full information about the matching packages. CLI Example: .. code-block:: bash salt '*' pkg.search pattern full=True depends Displays the dependencies of pattern. CLI Example: .. code-block:: bash salt '*' pkg.search pattern depends=True size Displays the size of the package CLI Example: .. code-block:: bash salt '*' pkg.search pattern size=True quiet Be quiet. Prints only the requested information without displaying many hints. CLI Example: .. code-block:: bash salt '*' pkg.search pattern quiet=True origin Displays pattern origin. CLI Example: .. code-block:: bash salt '*' pkg.search pattern origin=True prefix Displays the installation prefix for each package matching pattern. CLI Example: .. code-block:: bash salt '*' pkg.search pattern prefix=True ''' opts = '' if exact: opts += 'e' if glob: opts += 'g' if regex: opts += 'x' if pcre: opts += 'X' if comment: opts += 'c' if desc: opts += 'D' if full: opts += 'f' if depends: opts += 'd' if size: opts += 's' if quiet: opts += 'q' if origin: opts += 'o' if prefix: opts += 'p' cmd = _pkg(jail, chroot, root) cmd.append('search') if opts: cmd.append('-' + opts) cmd.append(name) return __salt__['cmd.run']( cmd, output_loglevel='trace', python_shell=False )
Searches in remote package repositories CLI Example: .. code-block:: bash salt '*' pkg.search pattern jail Perform the search using the ``pkg.conf(5)`` from the specified jail CLI Example: .. code-block:: bash salt '*' pkg.search pattern jail=<jail name or id> chroot Perform the search using the ``pkg.conf(5)`` from the specified chroot (ignored if ``jail`` is specified) root Perform the search using the ``pkg.conf(5)`` from the specified root (ignored if ``jail`` is specified) CLI Example: .. code-block:: bash salt '*' pkg.search pattern chroot=/path/to/chroot exact Treat pattern as exact pattern. CLI Example: .. code-block:: bash salt '*' pkg.search pattern exact=True glob Treat pattern as a shell glob pattern. CLI Example: .. code-block:: bash salt '*' pkg.search pattern glob=True regex Treat pattern as a regular expression. CLI Example: .. code-block:: bash salt '*' pkg.search pattern regex=True pcre Treat pattern as an extended regular expression. CLI Example: .. code-block:: bash salt '*' pkg.search pattern pcre=True comment Search for pattern in the package comment one-line description. CLI Example: .. code-block:: bash salt '*' pkg.search pattern comment=True desc Search for pattern in the package description. CLI Example: .. code-block:: bash salt '*' pkg.search pattern desc=True full Displays full information about the matching packages. CLI Example: .. code-block:: bash salt '*' pkg.search pattern full=True depends Displays the dependencies of pattern. CLI Example: .. code-block:: bash salt '*' pkg.search pattern depends=True size Displays the size of the package CLI Example: .. code-block:: bash salt '*' pkg.search pattern size=True quiet Be quiet. Prints only the requested information without displaying many hints. CLI Example: .. code-block:: bash salt '*' pkg.search pattern quiet=True origin Displays pattern origin. CLI Example: .. code-block:: bash salt '*' pkg.search pattern origin=True prefix Displays the installation prefix for each package matching pattern. CLI Example: .. code-block:: bash salt '*' pkg.search pattern prefix=True
def _cloglog_transform_deriv_v(systematic_utilities, alt_IDs, rows_to_alts, shape_params, output_array=None, *args, **kwargs): """ Parameters ---------- systematic_utilities : 1D ndarray. All elements should be ints, floats, or longs. Should contain the systematic utilities of each observation per available alternative. Note that this vector is formed by the dot product of the design matrix with the vector of utility coefficients. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_alts : 2D scipy sparse matrix. There should be one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. All elements should be zeros or ones. shape_params : None or 1D ndarray. If an array, each element should be an int, float, or long. There should be one value per shape parameter of the model being used. output_array : 2D scipy sparse array. The array should be square and it should have `systematic_utilities.shape[0]` rows. It's data is to be replaced with the correct derivatives of the transformation vector with respect to the vector of systematic utilities. This argument is NOT optional. Returns ------- output_array : 2D scipy sparse array. The shape of the returned array is `(systematic_utilities.shape[0], systematic_utilities.shape[0])`. The returned array specifies the derivative of the transformed utilities with respect to the systematic utilities. All elements are ints, floats, or longs. """ exp_neg_v = np.exp(-1 * systematic_utilities) exp_v = np.exp(systematic_utilities) denom_part_1 = 1 - np.exp(-1 * exp_v) ########## # Guard against numeric overflow and underflow ########## exp_neg_v[np.isposinf(exp_neg_v)] = max_comp_value exp_neg_v[np.where(exp_neg_v == 0)] = min_comp_value # Note that we don't care about the limiting cases of exp_v. # This term can go to positive infinity or zero. If it goes to positive # infinity, then this is okay because denom_part_1 will just go to 1. # If exp_v goes to zero, then denom_part_1 will go to zero. We will simply # cater to that last outcome since we can't divide by zero. The next line # is retained to show what should NOT be done. We will use L'Hopital's rule # after calculating derivs, as should be done. # denom_part_1[np.where(denom_part_1 == 0)] = min_comp_value ########## # Calculate the required derivatives and guard against underflow ########## derivs = 1.0 / (denom_part_1 * exp_neg_v) # Note that the limiting value of the expression above, as the systematic # utility goes to negative infinity (i.e. as denom_part_1 goes to zero), # is one. This can be checked using L'Hopital's rule. We will define # infinity as being so negative that `denom_part_1 == 0` derivs[np.where(denom_part_1 == 0)] = 1 derivs[np.isposinf(derivs)] = max_comp_value # Assign the calculated derivatives to the output array output_array.data = derivs # Return the matrix of dh_dv. Note the off-diagonal entries are zero # because each transformation only depends on its value of v and no others return output_array
Parameters ---------- systematic_utilities : 1D ndarray. All elements should be ints, floats, or longs. Should contain the systematic utilities of each observation per available alternative. Note that this vector is formed by the dot product of the design matrix with the vector of utility coefficients. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_alts : 2D scipy sparse matrix. There should be one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. All elements should be zeros or ones. shape_params : None or 1D ndarray. If an array, each element should be an int, float, or long. There should be one value per shape parameter of the model being used. output_array : 2D scipy sparse array. The array should be square and it should have `systematic_utilities.shape[0]` rows. It's data is to be replaced with the correct derivatives of the transformation vector with respect to the vector of systematic utilities. This argument is NOT optional. Returns ------- output_array : 2D scipy sparse array. The shape of the returned array is `(systematic_utilities.shape[0], systematic_utilities.shape[0])`. The returned array specifies the derivative of the transformed utilities with respect to the systematic utilities. All elements are ints, floats, or longs.
def dashboard(request): "Counts, aggregations and more!" end_time = now() start_time = end_time - timedelta(days=7) defaults = {'start': start_time, 'end': end_time} form = DashboardForm(data=request.GET or defaults) if form.is_valid(): start_time = form.cleaned_data['start'] end_time = form.cleaned_data['end'] # determine when tracking began try: obj = Visitor.objects.order_by('start_time')[0] track_start_time = obj.start_time except (IndexError, Visitor.DoesNotExist): track_start_time = now() # If the start_date is before tracking began, warn about incomplete data warn_incomplete = (start_time < track_start_time) # queries take `date` objects (for now) user_stats = Visitor.objects.user_stats(start_time, end_time) visitor_stats = Visitor.objects.stats(start_time, end_time) if TRACK_PAGEVIEWS: pageview_stats = Pageview.objects.stats(start_time, end_time) else: pageview_stats = None context = { 'form': form, 'track_start_time': track_start_time, 'warn_incomplete': warn_incomplete, 'user_stats': user_stats, 'visitor_stats': visitor_stats, 'pageview_stats': pageview_stats, } return render(request, 'tracking/dashboard.html', context)
Counts, aggregations and more!
def get(cls, resource_type): """Returns the ResourceType object for `resource_type`. If no existing object was found, a new type will be created in the database and returned Args: resource_type (str): Resource type name Returns: :obj:`ResourceType` """ if isinstance(resource_type, str): obj = getattr(db, cls.__name__).find_one(cls.resource_type == resource_type) elif isinstance(resource_type, int): obj = getattr(db, cls.__name__).find_one(cls.resource_type_id == resource_type) elif isinstance(resource_type, cls): return resource_type else: obj = None if not obj: obj = cls() obj.resource_type = resource_type db.session.add(obj) db.session.commit() db.session.refresh(obj) return obj
Returns the ResourceType object for `resource_type`. If no existing object was found, a new type will be created in the database and returned Args: resource_type (str): Resource type name Returns: :obj:`ResourceType`
def CreatePattern(patternId: int, pattern: ctypes.POINTER(comtypes.IUnknown)): """Create a concreate pattern by pattern id and pattern(POINTER(IUnknown)).""" subPattern = pattern.QueryInterface(GetPatternIdInterface(patternId)) if subPattern: return PatternConstructors[patternId](pattern=subPattern)
Create a concreate pattern by pattern id and pattern(POINTER(IUnknown)).
def transform_folder(args): """ Transform all the files in the source dataset for the given command and save the results as a single pickle file in the destination dataset :param args: tuple with the following arguments: - the command name: 'zero', 'one', 'two', ... - transforms to apply to wav file - full path of the source dataset - full path of the destination dataset """ command, (transform, src, dest) = args try: print(progress.value, "remaining") # Apply transformations to all files data = [] data_dir = os.path.join(src, command) for filename in os.listdir(data_dir): path = os.path.join(data_dir, filename) data.append(transform({'path': path})) # Save results pickleFile = os.path.join(dest, "{}.pkl".format(command)) gc.disable() with open(pickleFile, "wb") as f: pickle.dump(data, f, pickle.HIGHEST_PROTOCOL) gc.enable() # Update progress with progress.get_lock(): progress.value -= 1 except Exception as e: print(command, e, file=sys.stderr) traceback.print_exc()
Transform all the files in the source dataset for the given command and save the results as a single pickle file in the destination dataset :param args: tuple with the following arguments: - the command name: 'zero', 'one', 'two', ... - transforms to apply to wav file - full path of the source dataset - full path of the destination dataset
def zadd(self, key, score, member, *pairs, exist=None): """Add one or more members to a sorted set or update its score. :raises TypeError: score not int or float :raises TypeError: length of pairs is not even number """ if not isinstance(score, (int, float)): raise TypeError("score argument must be int or float") if len(pairs) % 2 != 0: raise TypeError("length of pairs must be even number") scores = (item for i, item in enumerate(pairs) if i % 2 == 0) if any(not isinstance(s, (int, float)) for s in scores): raise TypeError("all scores must be int or float") args = [] if exist is self.ZSET_IF_EXIST: args.append(b'XX') elif exist is self.ZSET_IF_NOT_EXIST: args.append(b'NX') args.extend([score, member]) if pairs: args.extend(pairs) return self.execute(b'ZADD', key, *args)
Add one or more members to a sorted set or update its score. :raises TypeError: score not int or float :raises TypeError: length of pairs is not even number
def _set_channels(self): """Sets the main channels for the pipeline This method will parse de the :attr:`~Process.processes` attribute and perform the following tasks for each process: - Sets the input/output channels and main input forks and adds them to the process's :attr:`flowcraft.process.Process._context` attribute (See :func:`~NextflowGenerator.set_channels`). - Automatically updates the main input channel of the first process of each lane so that they fork from the user provide parameters (See :func:`~NextflowGenerator._update_raw_input`). - Check for the presence of secondary channels and adds them to the :attr:`~NextflowGenerator.secondary_channels` attribute. Notes ----- **On the secondary channel setup**: With this approach, there can only be one secondary link start for each type of secondary link. For instance, If there are two processes that start a secondary channel for the ``SIDE_max_len`` channel, only the last one will be recorded, and all receiving processes will get the channel from the latest process. Secondary channels can only link if the source process if downstream of the sink process in its "forking" path. """ logger.debug("=====================") logger.debug("Setting main channels") logger.debug("=====================") for i, p in enumerate(self.processes): # Set main channels for the process logger.debug("[{}] Setting main channels with pid: {}".format( p.template, i)) p.set_channels(pid=i) # If there is no parent lane, set the raw input channel from user logger.debug("{} {} {}".format(p.parent_lane, p.input_type, p.template)) if not p.parent_lane and p.input_type: self._update_raw_input(p) self._update_extra_inputs(p) self._update_secondary_channels(p) logger.info(colored_print( "\tChannels set for {} \u2713".format(p.template)))
Sets the main channels for the pipeline This method will parse de the :attr:`~Process.processes` attribute and perform the following tasks for each process: - Sets the input/output channels and main input forks and adds them to the process's :attr:`flowcraft.process.Process._context` attribute (See :func:`~NextflowGenerator.set_channels`). - Automatically updates the main input channel of the first process of each lane so that they fork from the user provide parameters (See :func:`~NextflowGenerator._update_raw_input`). - Check for the presence of secondary channels and adds them to the :attr:`~NextflowGenerator.secondary_channels` attribute. Notes ----- **On the secondary channel setup**: With this approach, there can only be one secondary link start for each type of secondary link. For instance, If there are two processes that start a secondary channel for the ``SIDE_max_len`` channel, only the last one will be recorded, and all receiving processes will get the channel from the latest process. Secondary channels can only link if the source process if downstream of the sink process in its "forking" path.
def last_first_initial(self): """Return a name in the format of: Lastname, F [(Nickname)] """ return ("{}{} ".format(self.last_name, ", " + self.first_name[:1] + "." if self.first_name else "") + ("({}) ".format(self.nickname) if self.nickname else ""))
Return a name in the format of: Lastname, F [(Nickname)]
def get_state_actions(self, state, **kwargs): """ Creates all missing containers, networks, and volumes. :param state: Configuration state. :type state: dockermap.map.state.ConfigState :param kwargs: Additional keyword arguments. :return: Actions on the client, map, and configurations. :rtype: list[dockermap.map.action.ItemAction] """ if state.base_state == State.ABSENT: if state.config_id.config_type == ItemType.IMAGE: return [ItemAction(state, ImageAction.PULL)] actions = [ItemAction(state, Action.CREATE, extra_data=kwargs)] if state.config_id.config_type == ItemType.CONTAINER: actions.append(ItemAction(state, ContainerUtilAction.CONNECT_ALL)) return actions
Creates all missing containers, networks, and volumes. :param state: Configuration state. :type state: dockermap.map.state.ConfigState :param kwargs: Additional keyword arguments. :return: Actions on the client, map, and configurations. :rtype: list[dockermap.map.action.ItemAction]
def getfield(self, pkt, s): """ We try to compute a length, usually from a msglen parsed earlier. If this length is 0, we consider 'selection_present' (from RFC 5246) to be False. This means that there should not be any length field. However, with TLS 1.3, zero lengths are always explicit. """ ext = pkt.get_field(self.length_of) tmp_len = ext.length_from(pkt) if tmp_len is None or tmp_len <= 0: v = pkt.tls_session.tls_version if v is None or v < 0x0304: return s, None return super(_ExtensionsLenField, self).getfield(pkt, s)
We try to compute a length, usually from a msglen parsed earlier. If this length is 0, we consider 'selection_present' (from RFC 5246) to be False. This means that there should not be any length field. However, with TLS 1.3, zero lengths are always explicit.
def edit(i): # pragma: no cover """ Input: { (repo_uoa) - repo UOA module_uoa - module UOA data_uoa - data UOA (ignore_update) - (default==yes) if 'yes', do not add info about update (sort_keys) - (default==yes) if 'yes', sort keys (edit_desc) - if 'yes', edit description rather than meta (useful for compiler descriptions) } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ o=i.get('out','') ruoa=i.get('repo_uoa','') muoa=i.get('module_uoa','') duoa=i.get('data_uoa','') iu=i.get('ignore_update','') if iu=='': iu='yes' ed=i.get('edit_desc','') sk=i.get('sort_keys','') if sk=='': sk='yes' ii={'action':'load', 'repo_uoa':ruoa, 'module_uoa':muoa, 'data_uoa':duoa, 'common_func':'yes'} r=access(ii) if r['return']>0: return r desc=r.get('desc',{}) meta=r['dict'] # Record to tmp file import tempfile fd, fn=tempfile.mkstemp(suffix='.tmp', prefix='ck-') # suffix is important - CK will delete such file! os.close(fd) os.remove(fn) if ed=='yes': dd=desc else: dd=meta r=save_json_to_file({'json_file':fn, 'dict':dd, 'sort_keys':sk}) if r['return']>0: return r # Get OS r=get_os_ck({}) if r['return']>0: return r plat=r['platform'] x=cfg['external_editor'][plat].replace('$#filename#$', fn) os.system(x) # Load file r=load_json_file({'json_file':fn}) if r['return']>0: return r if ed=='yes': desc=r['dict'] else: meta=r['dict'] # Update entry to finish sync/indexing ii={'action':'update', 'repo_uoa':ruoa, 'module_uoa':muoa, 'data_uoa':duoa, 'common_func':'yes', 'ignore_update':iu, 'dict':meta, 'desc':desc, 'substitute':'yes', 'sort_keys':sk, 'out':o} r=access(ii) # Delete tmp file if os.path.isfile(fn): os.remove(fn) return r
Input: { (repo_uoa) - repo UOA module_uoa - module UOA data_uoa - data UOA (ignore_update) - (default==yes) if 'yes', do not add info about update (sort_keys) - (default==yes) if 'yes', sort keys (edit_desc) - if 'yes', edit description rather than meta (useful for compiler descriptions) } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 }
def get_subscription_by_channel_id_and_endpoint_id( self, channel_id, endpoint_id): """ Search for subscription by a given channel and endpoint """ subscriptions = self.search_subscriptions( channel_id=channel_id, endpoint_id=endpoint_id) try: return subscriptions[0] except IndexError: raise DataFailureException(url, 404, "No subscription found")
Search for subscription by a given channel and endpoint
def search_reports(self, search_term=None, enclave_ids=None, from_time=None, to_time=None, tags=None, excluded_tags=None): """ Uses the |search_reports_page| method to create a generator that returns each successive report. :param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must be at least 3 characters. :param list(str) enclave_ids: list of enclave ids used to restrict reports to specific enclaves (optional - by default reports from all of user's enclaves are returned) :param int from_time: start of time window in milliseconds since epoch (optional) :param int to_time: end of time window in milliseconds since epoch (optional) :param list(str) tags: Name (or list of names) of tag(s) to filter reports by. Only reports containing ALL of these tags will be returned. (optional) :param list(str) excluded_tags: Reports containing ANY of these tags will be excluded from the results. :return: The generator of Report objects. Note that the body attributes of these reports will be ``None``. """ return Page.get_generator(page_generator=self._search_reports_page_generator(search_term, enclave_ids, from_time, to_time, tags, excluded_tags))
Uses the |search_reports_page| method to create a generator that returns each successive report. :param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must be at least 3 characters. :param list(str) enclave_ids: list of enclave ids used to restrict reports to specific enclaves (optional - by default reports from all of user's enclaves are returned) :param int from_time: start of time window in milliseconds since epoch (optional) :param int to_time: end of time window in milliseconds since epoch (optional) :param list(str) tags: Name (or list of names) of tag(s) to filter reports by. Only reports containing ALL of these tags will be returned. (optional) :param list(str) excluded_tags: Reports containing ANY of these tags will be excluded from the results. :return: The generator of Report objects. Note that the body attributes of these reports will be ``None``.
def main(device_type): """Run ssh-agent using given hardware client factory.""" args = create_agent_parser(device_type=device_type).parse_args() util.setup_logging(verbosity=args.verbose, filename=args.log_file) public_keys = None filename = None if args.identity.startswith('/'): filename = args.identity contents = open(filename, 'rb').read().decode('utf-8') # Allow loading previously exported SSH public keys if filename.endswith('.pub'): public_keys = list(import_public_keys(contents)) identities = list(parse_config(contents)) else: identities = [device.interface.Identity( identity_str=args.identity, curve_name=args.ecdsa_curve_name)] for index, identity in enumerate(identities): identity.identity_dict['proto'] = u'ssh' log.info('identity #%d: %s', index, identity.to_string()) # override default PIN/passphrase entry tools (relevant for TREZOR/Keepkey): device_type.ui = device.ui.UI(device_type=device_type, config=vars(args)) device_type.ui.cached_passphrase_ack = util.ExpiringCache( args.cache_expiry_seconds) conn = JustInTimeConnection( conn_factory=lambda: client.Client(device_type()), identities=identities, public_keys=public_keys) sock_path = _get_sock_path(args) command = args.command context = _dummy_context() if args.connect: command = ['ssh'] + ssh_args(conn) + args.command elif args.mosh: command = ['mosh'] + mosh_args(conn) + args.command elif args.daemonize: out = 'SSH_AUTH_SOCK={0}; export SSH_AUTH_SOCK;\n'.format(sock_path) sys.stdout.write(out) sys.stdout.flush() context = daemon.DaemonContext() log.info('running the agent as a daemon on %s', sock_path) elif args.foreground: log.info('running the agent on %s', sock_path) use_shell = bool(args.shell) if use_shell: command = os.environ['SHELL'] sys.stdin.close() if command or args.daemonize or args.foreground: with context: return run_server(conn=conn, command=command, sock_path=sock_path, debug=args.debug, timeout=args.timeout) else: for pk in conn.public_keys(): sys.stdout.write(pk) return 0
Run ssh-agent using given hardware client factory.
def send(self, stream, retry=16, timeout=60, quiet=0, callback=None): ''' Send a stream via the XMODEM protocol. >>> stream = file('/etc/issue', 'rb') >>> print modem.send(stream) True Returns ``True`` upon succesful transmission or ``False`` in case of failure. :param stream: The stream object to send data from. :type stream: stream (file, etc.) :param retry: The maximum number of times to try to resend a failed packet before failing. :type retry: int :param timeout: The number of seconds to wait for a response before timing out. :type timeout: int :param quiet: If 0, it prints info to stderr. If 1, it does not print any info. :type quiet: int :param callback: Reference to a callback function that has the following signature. This is useful for getting status updates while a xmodem transfer is underway. Expected callback signature: def callback(total_packets, success_count, error_count) :type callback: callable ''' # initialize protocol try: packet_size = dict( xmodem = 128, xmodem1k = 1024, )[self.mode] except AttributeError: raise ValueError("An invalid mode was supplied") error_count = 0 crc_mode = 0 cancel = 0 while True: char = self.getc(1) if char: if char == NAK: crc_mode = 0 break elif char == CRC: crc_mode = 1 break elif char == CAN: if not quiet: print >> sys.stderr, 'received CAN' if cancel: return False else: cancel = 1 else: log.error('send ERROR expected NAK/CRC, got %s' % \ (ord(char),)) error_count += 1 if error_count >= retry: self.abort(timeout=timeout) return False # send data error_count = 0 success_count = 0 total_packets = 0 sequence = 1 while True: data = stream.read(packet_size) if not data: log.info('sending EOT') # end of stream break total_packets += 1 data = data.ljust(packet_size, self.pad) if crc_mode: crc = self.calc_crc(data) else: crc = self.calc_checksum(data) # emit packet while True: if packet_size == 128: self.putc(SOH) else: # packet_size == 1024 self.putc(STX) self.putc(chr(sequence)) self.putc(chr(0xff - sequence)) self.putc(data) if crc_mode: self.putc(chr(crc >> 8)) self.putc(chr(crc & 0xff)) else: self.putc(chr(crc)) char = self.getc(1, timeout) if char == ACK: success_count += 1 if callable(callback): callback(total_packets, success_count, error_count) break if char == NAK: error_count += 1 if callable(callback): callback(total_packets, success_count, error_count) if error_count >= retry: # excessive amounts of retransmissions requested, # abort transfer self.abort(timeout=timeout) log.warning('excessive NAKs, transfer aborted') return False # return to loop and resend continue # protocol error self.abort(timeout=timeout) log.error('protocol error') return False # keep track of sequence sequence = (sequence + 1) % 0x100 while True: # end of transmission self.putc(EOT) #An ACK should be returned char = self.getc(1, timeout) if char == ACK: break else: error_count += 1 if error_count >= retry: self.abort(timeout=timeout) log.warning('EOT was not ACKd, transfer aborted') return False return True
Send a stream via the XMODEM protocol. >>> stream = file('/etc/issue', 'rb') >>> print modem.send(stream) True Returns ``True`` upon succesful transmission or ``False`` in case of failure. :param stream: The stream object to send data from. :type stream: stream (file, etc.) :param retry: The maximum number of times to try to resend a failed packet before failing. :type retry: int :param timeout: The number of seconds to wait for a response before timing out. :type timeout: int :param quiet: If 0, it prints info to stderr. If 1, it does not print any info. :type quiet: int :param callback: Reference to a callback function that has the following signature. This is useful for getting status updates while a xmodem transfer is underway. Expected callback signature: def callback(total_packets, success_count, error_count) :type callback: callable
def update_widget(self, idx=None): """Forces the widget at given index to be updated from the property value. If index is not given, all controlled widgets will be updated. This method should be called directly by the user when the property is not observable, or in very unusual conditions.""" if idx is None: for w in self._widgets: idx = self._get_idx_from_widget(w) self._write_widget(self._read_property(idx), idx) pass else: self._write_widget(self._read_property(idx), idx) return
Forces the widget at given index to be updated from the property value. If index is not given, all controlled widgets will be updated. This method should be called directly by the user when the property is not observable, or in very unusual conditions.
def delete(config, username, type): """Delete an LDAP user.""" client = Client() client.prepare_connection() user_api = API(client) user_api.delete(username, type)
Delete an LDAP user.
def parse_user_params(user_params): """ Parse the user params (-p/--params) and them as a dict. """ if user_params: params = {} try: for param in options.params.split(','): param_key, param_value = param.split('=', 1) params[param_key] = param_value except ValueError as e: sys.stdout.write("Invalid params specified. Should be in format: <key=value>[,<key=value>..]\n") sys.exit(1) return params else: return {}
Parse the user params (-p/--params) and them as a dict.
def detach(self): """Returns a new NDArray, detached from the current graph.""" from . import _ndarray_cls hdl = NDArrayHandle() check_call(_LIB.MXNDArrayDetach(self.handle, ctypes.byref(hdl))) return _ndarray_cls(hdl)
Returns a new NDArray, detached from the current graph.
def add_relation(app_f, app_t, weight=1): ''' Adding relation between two posts. ''' recs = TabRel.select().where( (TabRel.post_f_id == app_f) & (TabRel.post_t_id == app_t) ) if recs.count() > 1: for record in recs: MRelation.delete(record.uid) if recs.count() == 0: uid = tools.get_uuid() entry = TabRel.create( uid=uid, post_f_id=app_f, post_t_id=app_t, count=1, ) return entry.uid elif recs.count() == 1: MRelation.update_relation(app_f, app_t, weight) else: return False
Adding relation between two posts.
def add_sources_from_roi(self, names, roi, free=False, **kwargs): """Add multiple sources to the current ROI model copied from another ROI model. Parameters ---------- names : list List of str source names to add. roi : `~fermipy.roi_model.ROIModel` object The roi model from which to add sources. free : bool Initialize the source with a free normalization paramter. """ for name in names: self.add_source(name, roi[name].data, free=free, **kwargs)
Add multiple sources to the current ROI model copied from another ROI model. Parameters ---------- names : list List of str source names to add. roi : `~fermipy.roi_model.ROIModel` object The roi model from which to add sources. free : bool Initialize the source with a free normalization paramter.
def yaml2tree(cls, yamltree): """Class method that creates a tree from YAML. | # Example yamltree data: | - !Node &root | name: "root node" | parent: null | data: | testpara: 111 | - !Node &child1 | name: "child node" | parent: *root | - !Node &gc1 | name: "grand-child node" | parent: *child1 :param yamltree: a string of YAML describing the nodes in the tree, or the path to a file containing the data. :type yamltree: str :returns: the root node of the tree. :rtype: Node """ if not cls.YAML_setup: cls.setup_yaml() cls.YAML_setup = True if os.path.isfile(yamltree): with open(yamltree) as fh: yaml_data = fh.read() else: yaml_data = yamltree list_of_nodes = yaml.safe_load(yaml_data) yamltree_root = list_of_nodes[0] return yamltree_root
Class method that creates a tree from YAML. | # Example yamltree data: | - !Node &root | name: "root node" | parent: null | data: | testpara: 111 | - !Node &child1 | name: "child node" | parent: *root | - !Node &gc1 | name: "grand-child node" | parent: *child1 :param yamltree: a string of YAML describing the nodes in the tree, or the path to a file containing the data. :type yamltree: str :returns: the root node of the tree. :rtype: Node
def get_ser_val_alt(lat: float, lon: float, da_alt_x: xr.DataArray, da_alt: xr.DataArray, da_val: xr.DataArray)->pd.Series: '''interpolate atmospheric variable to a specified altitude Parameters ---------- lat : float latitude of specified site lon : float longitude of specified site da_alt_x : xr.DataArray desired altitude to interpolate variable at da_alt : xr.DataArray altitude associated with `da_val`: variable array to interpolate da_val : xr.DataArray atmospheric varialble to interpolate Returns ------- pd.Series interpolated values at the specified altitude of site positioned by [`lat`, `lon`] ''' alt_t_1d = da_alt.sel( latitude=lat, longitude=lon, method='nearest') val_t_1d = da_val.sel( latitude=lat, longitude=lon, method='nearest') alt_x = da_alt_x.sel( latitude=lat, longitude=lon, method='nearest')[0] val_alt = np.array( [interp1d(alt_1d, val_1d)(alt_x) for alt_1d, val_1d in zip(alt_t_1d, val_t_1d)]) ser_alt = pd.Series( val_alt, index=da_val.time.values, name=da_val.name, ) return ser_alt
interpolate atmospheric variable to a specified altitude Parameters ---------- lat : float latitude of specified site lon : float longitude of specified site da_alt_x : xr.DataArray desired altitude to interpolate variable at da_alt : xr.DataArray altitude associated with `da_val`: variable array to interpolate da_val : xr.DataArray atmospheric varialble to interpolate Returns ------- pd.Series interpolated values at the specified altitude of site positioned by [`lat`, `lon`]
def transform(self, X): ''' Transform a list of bag features into its projection series representation. Parameters ---------- X : :class:`skl_groups.features.Features` or list of bag feature arrays New data to transform. The data should all lie in [0, 1]; use :class:`skl_groups.preprocessing.BagMinMaxScaler` if not. Returns ------- X_new : integer array, shape ``[len(X), dim_]`` X transformed into the new space. ''' self._check_fitted() M = self.smoothness dim = self.dim_ inds = self.inds_ do_check = self.do_bounds_check X = as_features(X) if X.dim != dim: msg = "model fit for dimension {} but got dim {}" raise ValueError(msg.format(dim, X.dim)) Xt = np.empty((len(X), self.inds_.shape[0])) Xt.fill(np.nan) if self.basis == 'cosine': # TODO: put this in a C extension? coefs = (np.pi * np.arange(M + 1))[..., :] for i, bag in enumerate(X): if do_check: if np.min(bag) < 0 or np.max(bag) > 1: raise ValueError("Bag {} not in [0, 1]".format(i)) # apply each phi func to each dataset point: n x dim x M phi = coefs * bag[..., np.newaxis] np.cos(phi, out=phi) phi[:, :, 1:] *= np.sqrt(2) # B is the evaluation of each tensor-prodded basis func # at each point: n x inds.shape[0] B = reduce(op.mul, (phi[:, i, inds[:, i]] for i in xrange(dim))) Xt[i, :] = np.mean(B, axis=0) else: raise ValueError("unknown basis '{}'".format(self.basis)) return Xt
Transform a list of bag features into its projection series representation. Parameters ---------- X : :class:`skl_groups.features.Features` or list of bag feature arrays New data to transform. The data should all lie in [0, 1]; use :class:`skl_groups.preprocessing.BagMinMaxScaler` if not. Returns ------- X_new : integer array, shape ``[len(X), dim_]`` X transformed into the new space.
def open(self): """ Setup serial port and set is as escpos device """ self.device = serial.Serial(port=self.devfile, baudrate=self.baudrate, bytesize=self.bytesize, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=self.timeout, dsrdtr=True) if self.device is not None: print "Serial printer enabled" else: print "Unable to open serial printer on: %s" % self.devfile
Setup serial port and set is as escpos device
def get_free_sphere_params(structure, rad_dict=None, probe_rad=0.1): """ Analyze the void space in the input structure using voronoi decomposition Calls Zeo++ for Voronoi decomposition. Args: structure: pymatgen.core.structure.Structure rad_dict (optional): Dictionary of radii of elements in structure. If not given, Zeo++ default values are used. Note: Zeo++ uses atomic radii of elements. For ionic structures, pass rad_dict with ionic radii probe_rad (optional): Sampling probe radius in Angstroms. Default is 0.1 A Returns: voronoi nodes as pymatgen.core.structure.Strucutre within the unit cell defined by the lattice of input structure voronoi face centers as pymatgen.core.structure.Strucutre within the unit cell defined by the lattice of input structure """ with ScratchDir('.'): name = "temp_zeo1" zeo_inp_filename = name + ".cssr" ZeoCssr(structure).write_file(zeo_inp_filename) rad_file = None rad_flag = False if rad_dict: rad_file = name + ".rad" rad_flag = True with open(rad_file, 'w+') as fp: for el in rad_dict.keys(): fp.write("{} {}\n".format(el, rad_dict[el].real)) atmnet = AtomNetwork.read_from_CSSR( zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file) out_file = "temp.res" atmnet.calculate_free_sphere_parameters(out_file) if os.path.isfile(out_file) and os.path.getsize(out_file) > 0: with open(out_file, "rt") as fp: output = fp.readline() else: output = "" fields = [val.strip() for val in output.split()][1:4] if len(fields) == 3: fields = [float(field) for field in fields] free_sphere_params = {'inc_sph_max_dia': fields[0], 'free_sph_max_dia': fields[1], 'inc_sph_along_free_sph_path_max_dia': fields[2]} return free_sphere_params
Analyze the void space in the input structure using voronoi decomposition Calls Zeo++ for Voronoi decomposition. Args: structure: pymatgen.core.structure.Structure rad_dict (optional): Dictionary of radii of elements in structure. If not given, Zeo++ default values are used. Note: Zeo++ uses atomic radii of elements. For ionic structures, pass rad_dict with ionic radii probe_rad (optional): Sampling probe radius in Angstroms. Default is 0.1 A Returns: voronoi nodes as pymatgen.core.structure.Strucutre within the unit cell defined by the lattice of input structure voronoi face centers as pymatgen.core.structure.Strucutre within the unit cell defined by the lattice of input structure
def generate_files(engine, crypto_factory, min_dt=None, max_dt=None, logger=None): """ Create a generator of decrypted files. Files are yielded in ascending order of their timestamp. This function selects all current notebooks (optionally, falling within a datetime range), decrypts them, and returns a generator yielding dicts, each containing a decoded notebook and metadata including the user, filepath, and timestamp. Parameters ---------- engine : SQLAlchemy.engine Engine encapsulating database connections. crypto_factory : function[str -> Any] A function from user_id to an object providing the interface required by PostgresContentsManager.crypto. Results of this will be used for decryption of the selected notebooks. min_dt : datetime.datetime, optional Minimum last modified datetime at which a file will be included. max_dt : datetime.datetime, optional Last modified datetime at and after which a file will be excluded. logger : Logger, optional """ return _generate_notebooks(files, files.c.created_at, engine, crypto_factory, min_dt, max_dt, logger)
Create a generator of decrypted files. Files are yielded in ascending order of their timestamp. This function selects all current notebooks (optionally, falling within a datetime range), decrypts them, and returns a generator yielding dicts, each containing a decoded notebook and metadata including the user, filepath, and timestamp. Parameters ---------- engine : SQLAlchemy.engine Engine encapsulating database connections. crypto_factory : function[str -> Any] A function from user_id to an object providing the interface required by PostgresContentsManager.crypto. Results of this will be used for decryption of the selected notebooks. min_dt : datetime.datetime, optional Minimum last modified datetime at which a file will be included. max_dt : datetime.datetime, optional Last modified datetime at and after which a file will be excluded. logger : Logger, optional
def nv_tuple_list_replace(l, v): """ replace a tuple in a tuple list """ _found = False for i, x in enumerate(l): if x[0] == v[0]: l[i] = v _found = True if not _found: l.append(v)
replace a tuple in a tuple list
def _hybrid_select_metrics(self, dup_bam, bait_file, target_file): """Generate metrics for hybrid selection efficiency. """ metrics = self._check_metrics_file(dup_bam, "hs_metrics") if not file_exists(metrics): with bed_to_interval(bait_file, dup_bam) as ready_bait: with bed_to_interval(target_file, dup_bam) as ready_target: with file_transaction(metrics) as tx_metrics: opts = [("BAIT_INTERVALS", ready_bait), ("TARGET_INTERVALS", ready_target), ("INPUT", dup_bam), ("OUTPUT", tx_metrics)] try: self._picard.run("CollectHsMetrics", opts) # HsMetrics fails regularly with memory errors # so we catch and skip instead of aborting the # full process except subprocess.CalledProcessError: return None return metrics
Generate metrics for hybrid selection efficiency.
def deleted(message): """Create a Deleted response builder with specified message.""" def deleted(value, _context, **_params): return Deleted(value, message) return deleted
Create a Deleted response builder with specified message.
def get_software_package_compilation_timestamp(cls,calc,**kwargs): """ Returns the timestamp of package/program compilation in ISO 8601 format. """ from dateutil.parser import parse try: date = calc.out.job_info.get_dict()['compiled'] return parse(date.replace('_', ' ')).isoformat() except Exception: return None
Returns the timestamp of package/program compilation in ISO 8601 format.
def update_features(self, poly): """Evaluate wavelength at xpos using the provided polynomial.""" for feature in self.features: feature.wavelength = poly(feature.xpos)
Evaluate wavelength at xpos using the provided polynomial.
def error_and_result(f): """ Format task result into json dictionary `{'data': task return value}` if no exception was raised during the task execution. If there was raised an exception during task execution, formats task result into dictionary `{'error': exception message with traceback}`. """ @wraps(f) def error_and_result_decorator(*args, **kwargs): return error_and_result_decorator_inner_fn(f, False, *args, **kwargs) return error_and_result_decorator
Format task result into json dictionary `{'data': task return value}` if no exception was raised during the task execution. If there was raised an exception during task execution, formats task result into dictionary `{'error': exception message with traceback}`.
def dict_from_prefix(cls, prefix, dictionary): """ >>> from collections import OrderedDict >>> od = OrderedDict() >>> od["problem[q0][a]"]=1 >>> od["problem[q0][b][c]"]=2 >>> od["problem[q1][first]"]=1 >>> od["problem[q1][second]"]=2 >>> AdminCourseEditTask.dict_from_prefix("problem",od) OrderedDict([('q0', OrderedDict([('a', 1), ('b', OrderedDict([('c', 2)]))])), ('q1', OrderedDict([('first', 1), ('second', 2)]))]) """ o_dictionary = OrderedDict() for key, val in dictionary.items(): if key.startswith(prefix): o_dictionary[key[len(prefix):].strip()] = val dictionary = o_dictionary if len(dictionary) == 0: return None elif len(dictionary) == 1 and "" in dictionary: return dictionary[""] else: return_dict = OrderedDict() for key, val in dictionary.items(): ret = re.search(r"^\[([^\]]+)\](.*)$", key) if ret is None: continue return_dict[ret.group(1)] = cls.dict_from_prefix("[{}]".format(ret.group(1)), dictionary) return return_dict
>>> from collections import OrderedDict >>> od = OrderedDict() >>> od["problem[q0][a]"]=1 >>> od["problem[q0][b][c]"]=2 >>> od["problem[q1][first]"]=1 >>> od["problem[q1][second]"]=2 >>> AdminCourseEditTask.dict_from_prefix("problem",od) OrderedDict([('q0', OrderedDict([('a', 1), ('b', OrderedDict([('c', 2)]))])), ('q1', OrderedDict([('first', 1), ('second', 2)]))])
def OnPasteFormat(self, event): """Paste format event handler""" with undo.group(_("Paste format")): self.grid.actions.paste_format() self.grid.ForceRefresh() self.grid.update_attribute_toolbar() self.grid.actions.zoom()
Paste format event handler
def iteritems(self, indices=None): 'Iterate through items in the ``indices`` (defaults to all indices)' if indices is None: indices = force_list(self.indices.keys()) for x in self.itervalues(indices): yield x
Iterate through items in the ``indices`` (defaults to all indices)
def gen_cartesian_product(*args): """ generate cartesian product for lists Args: args (list of list): lists to be generated with cartesian product Returns: list: cartesian product in list Examples: >>> arg1 = [{"a": 1}, {"a": 2}] >>> arg2 = [{"x": 111, "y": 112}, {"x": 121, "y": 122}] >>> args = [arg1, arg2] >>> gen_cartesian_product(*args) >>> # same as below >>> gen_cartesian_product(arg1, arg2) [ {'a': 1, 'x': 111, 'y': 112}, {'a': 1, 'x': 121, 'y': 122}, {'a': 2, 'x': 111, 'y': 112}, {'a': 2, 'x': 121, 'y': 122} ] """ if not args: return [] elif len(args) == 1: return args[0] product_list = [] for product_item_tuple in itertools.product(*args): product_item_dict = {} for item in product_item_tuple: product_item_dict.update(item) product_list.append(product_item_dict) return product_list
generate cartesian product for lists Args: args (list of list): lists to be generated with cartesian product Returns: list: cartesian product in list Examples: >>> arg1 = [{"a": 1}, {"a": 2}] >>> arg2 = [{"x": 111, "y": 112}, {"x": 121, "y": 122}] >>> args = [arg1, arg2] >>> gen_cartesian_product(*args) >>> # same as below >>> gen_cartesian_product(arg1, arg2) [ {'a': 1, 'x': 111, 'y': 112}, {'a': 1, 'x': 121, 'y': 122}, {'a': 2, 'x': 111, 'y': 112}, {'a': 2, 'x': 121, 'y': 122} ]
def AddContract(self, contract): """ Add a contract to the database. Args: contract(neo.SmartContract.Contract): a Contract instance. """ super(UserWallet, self).AddContract(contract) try: db_contract = Contract.get(ScriptHash=contract.ScriptHash.ToBytes()) db_contract.delete_instance() except Exception as e: logger.debug("contract does not exist yet") sh = bytes(contract.ScriptHash.ToArray()) address, created = Address.get_or_create(ScriptHash=sh) address.IsWatchOnly = False address.save() db_contract = Contract.create(RawData=contract.ToArray(), ScriptHash=contract.ScriptHash.ToBytes(), PublicKeyHash=contract.PublicKeyHash.ToBytes(), Address=address, Account=self.__dbaccount) logger.debug("Creating db contract %s " % db_contract) db_contract.save()
Add a contract to the database. Args: contract(neo.SmartContract.Contract): a Contract instance.
def load_conf(cfg_path): """ Try to load the given conf file. """ global config try: cfg = open(cfg_path, 'r') except Exception as ex: if verbose: print("Unable to open {0}".format(cfg_path)) print(str(ex)) return False # Read the entire contents of the conf file cfg_json = cfg.read() cfg.close() # print(cfg_json) # Try to parse the conf file into a Python structure try: config = json.loads(cfg_json) except Exception as ex: print("Unable to parse configuration file as JSON") print(str(ex)) return False # This config was successfully loaded return True
Try to load the given conf file.
def add_keywords_from_list(self, keyword_list): """To add keywords from a list Args: keyword_list (list(str)): List of keywords to add Examples: >>> keyword_processor.add_keywords_from_list(["java", "python"]}) Raises: AttributeError: If `keyword_list` is not a list. """ if not isinstance(keyword_list, list): raise AttributeError("keyword_list should be a list") for keyword in keyword_list: self.add_keyword(keyword)
To add keywords from a list Args: keyword_list (list(str)): List of keywords to add Examples: >>> keyword_processor.add_keywords_from_list(["java", "python"]}) Raises: AttributeError: If `keyword_list` is not a list.
def start_adc_comparator(self, channel, high_threshold, low_threshold, gain=1, data_rate=None, active_low=True, traditional=True, latching=False, num_readings=1): """Start continuous ADC conversions on the specified channel (0-3) with the comparator enabled. When enabled the comparator to will check if the ADC value is within the high_threshold & low_threshold value (both should be signed 16-bit integers) and trigger the ALERT pin. The behavior can be controlled by the following parameters: - active_low: Boolean that indicates if ALERT is pulled low or high when active/triggered. Default is true, active low. - traditional: Boolean that indicates if the comparator is in traditional mode where it fires when the value is within the threshold, or in window mode where it fires when the value is _outside_ the threshold range. Default is true, traditional mode. - latching: Boolean that indicates if the alert should be held until get_last_result() is called to read the value and clear the alert. Default is false, non-latching. - num_readings: The number of readings that match the comparator before triggering the alert. Can be 1, 2, or 4. Default is 1. Will return an initial conversion result, then call the get_last_result() function continuously to read the most recent conversion result. Call stop_adc() to stop conversions. """ assert 0 <= channel <= 3, 'Channel must be a value within 0-3!' # Start continuous reads with comparator and set the mux value to the # channel plus the highest bit (bit 3) set. return self._read_comparator(channel + 0x04, gain, data_rate, ADS1x15_CONFIG_MODE_CONTINUOUS, high_threshold, low_threshold, active_low, traditional, latching, num_readings)
Start continuous ADC conversions on the specified channel (0-3) with the comparator enabled. When enabled the comparator to will check if the ADC value is within the high_threshold & low_threshold value (both should be signed 16-bit integers) and trigger the ALERT pin. The behavior can be controlled by the following parameters: - active_low: Boolean that indicates if ALERT is pulled low or high when active/triggered. Default is true, active low. - traditional: Boolean that indicates if the comparator is in traditional mode where it fires when the value is within the threshold, or in window mode where it fires when the value is _outside_ the threshold range. Default is true, traditional mode. - latching: Boolean that indicates if the alert should be held until get_last_result() is called to read the value and clear the alert. Default is false, non-latching. - num_readings: The number of readings that match the comparator before triggering the alert. Can be 1, 2, or 4. Default is 1. Will return an initial conversion result, then call the get_last_result() function continuously to read the most recent conversion result. Call stop_adc() to stop conversions.
def po_to_ods(languages, locale_root, po_files_path, temp_file_path): """ Converts po file to csv GDocs spreadsheet readable format. :param languages: list of language codes :param locale_root: path to locale root folder containing directories with languages :param po_files_path: path from lang directory to po file :param temp_file_path: path where temporary files will be saved """ title_row = ['file', 'comment', 'msgid'] title_row += map(lambda s: s + ':msgstr', languages) ods = ODS() _prepare_ods_columns(ods, title_row) po_files = _get_all_po_filenames(locale_root, languages[0], po_files_path) i = 1 for po_filename in po_files: po_file_path = os.path.join(locale_root, languages[0], po_files_path, po_filename) start_row = i po = polib.pofile(po_file_path) for entry in po: meta = dict(entry.__dict__) meta.pop('msgid', None) meta.pop('msgstr', None) meta.pop('tcomment', None) ods.content.getSheet(1) ods.content.getCell(0, i).stringValue( str(meta)).setCellColor(settings.EVEN_COLUMN_BG_COLOR) ods.content.getSheet(0) ods.content.getCell(0, i) \ .stringValue(po_filename) \ .setCellColor(settings.ODD_COLUMN_BG_COLOR) ods.content.getCell(1, i) \ .stringValue(_escape_apostrophe(entry.tcomment)) \ .setCellColor(settings.ODD_COLUMN_BG_COLOR) ods.content.getCell(2, i) \ .stringValue(_escape_apostrophe(entry.msgid)) \ .setCellColor(settings.EVEN_COLUMN_BG_COLOR) ods.content.getCell(3, i) \ .stringValue(_escape_apostrophe(entry.msgstr))\ .setCellColor(settings.ODD_COLUMN_BG_COLOR) i += 1 _write_trans_into_ods(ods, languages, locale_root, po_files_path, po_filename, start_row) ods.save(temp_file_path)
Converts po file to csv GDocs spreadsheet readable format. :param languages: list of language codes :param locale_root: path to locale root folder containing directories with languages :param po_files_path: path from lang directory to po file :param temp_file_path: path where temporary files will be saved
def ParseOptions(cls, options, analysis_plugin): """Parses and validates options. Args: options (argparse.Namespace): parser options. analysis_plugin (ViperAnalysisPlugin): analysis plugin to configure. Raises: BadConfigObject: when the output module object is of the wrong type. BadConfigOption: when unable to connect to Viper instance. """ if not isinstance(analysis_plugin, viper.ViperAnalysisPlugin): raise errors.BadConfigObject( 'Analysis plugin is not an instance of ViperAnalysisPlugin') lookup_hash = cls._ParseStringOption( options, 'viper_hash', default_value=cls._DEFAULT_HASH) analysis_plugin.SetLookupHash(lookup_hash) host = cls._ParseStringOption( options, 'viper_host', default_value=cls._DEFAULT_HOST) analysis_plugin.SetHost(host) port = cls._ParseNumericOption( options, 'viper_port', default_value=cls._DEFAULT_PORT) analysis_plugin.SetPort(port) protocol = cls._ParseStringOption( options, 'viper_protocol', default_value=cls._DEFAULT_PROTOCOL) protocol = protocol.lower().strip() analysis_plugin.SetProtocol(protocol) if not analysis_plugin.TestConnection(): raise errors.BadConfigOption( 'Unable to connect to Viper {0:s}:{1:d}'.format(host, port))
Parses and validates options. Args: options (argparse.Namespace): parser options. analysis_plugin (ViperAnalysisPlugin): analysis plugin to configure. Raises: BadConfigObject: when the output module object is of the wrong type. BadConfigOption: when unable to connect to Viper instance.
def to_string(value, ctx): """ Tries conversion of any value to a string """ if isinstance(value, bool): return "TRUE" if value else "FALSE" elif isinstance(value, int): return str(value) elif isinstance(value, Decimal): return format_decimal(value) elif isinstance(value, str): return value elif type(value) == datetime.date: return value.strftime(ctx.get_date_format(False)) elif isinstance(value, datetime.time): return value.strftime('%H:%M') elif isinstance(value, datetime.datetime): return value.astimezone(ctx.timezone).isoformat() raise EvaluationError("Can't convert '%s' to a string" % str(value))
Tries conversion of any value to a string
def start(self, historics_id): """ Start the historics job with the given ID. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstart :param historics_id: hash of the job to start :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('start', data=dict(id=historics_id))
Start the historics job with the given ID. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstart :param historics_id: hash of the job to start :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
def authorize(): """Authorize to twitter. Use PIN authentification. :returns: Token for authentificate with Twitter. :rtype: :class:`autotweet.twitter.OAuthToken` """ auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) url = auth.get_authorization_url() print('Open this url on your webbrowser: {0}'.format(url)) webbrowser.open(url) pin = input('Input verification number here: ').strip() token_key, token_secret = auth.get_access_token(verifier=pin) return OAuthToken(token_key, token_secret)
Authorize to twitter. Use PIN authentification. :returns: Token for authentificate with Twitter. :rtype: :class:`autotweet.twitter.OAuthToken`
def install_extensions(extensions, **connection_parameters): """Install Postgres extension if available. Notes ----- - superuser is generally required for installing extensions. - Currently does not support specific schema. """ from postpy.connections import connect conn = connect(**connection_parameters) conn.autocommit = True for extension in extensions: install_extension(conn, extension)
Install Postgres extension if available. Notes ----- - superuser is generally required for installing extensions. - Currently does not support specific schema.
def _compile(cls, lines): '''Return both variable names used in the #for loop in the current line.''' m = cls.RE_FOR.match(lines.current) if m is None: raise DefineBlockError( 'Incorrect block definition at line {}, {}\nShould be ' 'something like: #for @item in @items:' .format(lines.pos, lines.current)) return m.group(1), m.group(2).replace('.', '-')
Return both variable names used in the #for loop in the current line.
def get_loader(vm_, **kwargs): ''' Returns the information on the loader for a given vm :param vm_: name of the domain :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults CLI Example: .. code-block:: bash salt '*' virt.get_loader <domain> .. versionadded:: 2019.2.0 ''' conn = __get_conn(**kwargs) loader = _get_loader(_get_domain(conn, vm_)) conn.close() return loader
Returns the information on the loader for a given vm :param vm_: name of the domain :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults CLI Example: .. code-block:: bash salt '*' virt.get_loader <domain> .. versionadded:: 2019.2.0
def get_default_config(self): """ Returns the default collector settings """ config = super(MonitCollector, self).get_default_config() config.update({ 'host': '127.0.0.1', 'port': 2812, 'user': 'monit', 'passwd': 'monit', 'path': 'monit', 'byte_unit': ['byte'], 'send_totals': False, }) return config
Returns the default collector settings
def _epd_residual(coeffs, mags, fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd): ''' This is the residual function to minimize using scipy.optimize.leastsq. ''' f = _epd_function(coeffs, fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd) residual = mags - f return residual
This is the residual function to minimize using scipy.optimize.leastsq.
def add(self, post_id): ''' Adding reply to a post. ''' post_data = self.get_post_data() post_data['user_name'] = self.userinfo.user_name post_data['user_id'] = self.userinfo.uid post_data['post_id'] = post_id replyid = MReply.create_reply(post_data) if replyid: out_dic = {'pinglun': post_data['cnt_reply'], 'uid': replyid} logger.info('add reply result dic: {0}'.format(out_dic)) return json.dump(out_dic, self)
Adding reply to a post.
def add_bias(self, name, b, input_name, output_name, shape_bias = [1]): """ Add bias layer to the model. Parameters ---------- name: str The name of this layer. b: int | numpy.array Bias to add to the input. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. shape_bias: [int] List of ints that specifies the shape of the bias parameter (if present). Can be [1] or [C] or [1,H,W] or [C,H,W]. See Also -------- add_scale """ spec = self.spec nn_spec = self.nn_spec spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.bias #add bias and its shape bias = spec_layer_params.bias spec_layer_params.shape.extend(shape_bias) if isinstance(b, int): bias.floatValue.append(float(b)) else: bias.floatValue.extend(map(float, b.flatten())) if len(bias.floatValue) != np.prod(shape_bias): raise ValueError("Dimensions of 'shape_bias' do not match the size of the provided 'b' parameter")
Add bias layer to the model. Parameters ---------- name: str The name of this layer. b: int | numpy.array Bias to add to the input. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. shape_bias: [int] List of ints that specifies the shape of the bias parameter (if present). Can be [1] or [C] or [1,H,W] or [C,H,W]. See Also -------- add_scale
def receive_request(self, transaction): """ Manage the observe option in the request end eventually initialize the client for adding to the list of observers or remove from the list. :type transaction: Transaction :param transaction: the transaction that owns the request :rtype : Transaction :return: the modified transaction """ if transaction.request.observe == 0: # Observe request host, port = transaction.request.source key_token = hash(str(host) + str(port) + str(transaction.request.token)) non_counter = 0 if key_token in self._relations: # Renew registration allowed = True else: allowed = False self._relations[key_token] = ObserveItem(time.time(), non_counter, allowed, transaction) elif transaction.request.observe == 1: host, port = transaction.request.source key_token = hash(str(host) + str(port) + str(transaction.request.token)) logger.info("Remove Subscriber") try: del self._relations[key_token] except KeyError: pass return transaction
Manage the observe option in the request end eventually initialize the client for adding to the list of observers or remove from the list. :type transaction: Transaction :param transaction: the transaction that owns the request :rtype : Transaction :return: the modified transaction
def get_route_shape_segments(cur, route_id): """ Given a route_id, return its stop-sequence. Parameters ---------- cur: sqlite3.Cursor cursor to a GTFS database route_id: str id of the route Returns ------- shape_points: list elements are dictionaries containing the 'seq', 'lat', and 'lon' of the shape """ cur.execute('''SELECT seq, lat, lon FROM ( SELECT shape_id FROM route LEFT JOIN trips USING (route_I) WHERE route_id=? limit 1 ) JOIN shapes USING (shape_id) ORDER BY seq''', (route_id,)) shape_points = [dict(seq=row[0], lat=row[1], lon=row[2]) for row in cur] return shape_points
Given a route_id, return its stop-sequence. Parameters ---------- cur: sqlite3.Cursor cursor to a GTFS database route_id: str id of the route Returns ------- shape_points: list elements are dictionaries containing the 'seq', 'lat', and 'lon' of the shape
def info(path): ''' Display synchronization information. ''' output, err = cli_syncthing_adapter.info(folder=path) if err: click.echo(output, err=err) else: stat = output['status'] click.echo("State: %s" % stat['state']) click.echo("\nTotal Files: %s" % stat['localFiles']) click.echo("Files Needed: %s" % stat['needFiles']) click.echo("\nTotal Bytes: %s" % stat['localBytes']) click.echo("Bytes Needed: %s" % stat['needBytes']) progress = output['files_needed']['progress'] queued = output['files_needed']['queued'] rest = output['files_needed']['rest'] if len(progress) or len(queued) or len(rest): click.echo("\nFiles Needed:") for f in progress: click.echo(" " + f['name']) for f in queued: click.echo(" " + f['name']) for f in rest: click.echo(" " + f['name']) click.echo("\nDevices Authorized:\n%s" % output['auth_ls'])
Display synchronization information.
def parse_response(self, connection, command_name, **options): """ Parses a response from the ssdb server """ response = connection.read_response() if command_name in self.response_callbacks and len(response): status = nativestr(response[0]) if status == RES_STATUS.OK: return self.response_callbacks[command_name](response[1:], **options) elif status == RES_STATUS.NOT_FOUND: return None else: raise DataError(RES_STATUS_MSG[status]+':'.join(response)) #raise DataError('Not Found') return response
Parses a response from the ssdb server
def send_media_group( self, media: str, disable_notification: bool = False, reply_to_message_id: int = None, **options ): """ Send a group of photos or videos as an album :param media: A JSON-serialized array describing photos and videos to be sent, must include 2–10 items :param disable_notification: Sends the messages silently. Users will receive a notification with no sound. :param reply_to_message_id: If the messages are a reply, ID of the original message :param options: Additional sendMediaGroup options (see https://core.telegram.org/bots/api#sendmediagroup) :Example: >>> from json import dumps >>> photos_urls = [ >>> "https://telegram.org/img/t_logo.png", >>> "https://telegram.org/img/SiteAndroid.jpg?1", >>> "https://telegram.org/img/SiteiOs.jpg?1", >>> "https://telegram.org/img/SiteWP.jpg?2" >>> ] >>> tg_album = [] >>> count = len(photos_urls) >>> for i, p in enumerate(photos_urls): >>> { >>> 'type': 'photo', >>> 'media': p, >>> 'caption': f'{i} of {count}' >>> } >>> await chat.send_media_group(dumps(tg_album)) """ return self.bot.api_call( "sendMediaGroup", chat_id=str(self.id), media=media, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, **options )
Send a group of photos or videos as an album :param media: A JSON-serialized array describing photos and videos to be sent, must include 2–10 items :param disable_notification: Sends the messages silently. Users will receive a notification with no sound. :param reply_to_message_id: If the messages are a reply, ID of the original message :param options: Additional sendMediaGroup options (see https://core.telegram.org/bots/api#sendmediagroup) :Example: >>> from json import dumps >>> photos_urls = [ >>> "https://telegram.org/img/t_logo.png", >>> "https://telegram.org/img/SiteAndroid.jpg?1", >>> "https://telegram.org/img/SiteiOs.jpg?1", >>> "https://telegram.org/img/SiteWP.jpg?2" >>> ] >>> tg_album = [] >>> count = len(photos_urls) >>> for i, p in enumerate(photos_urls): >>> { >>> 'type': 'photo', >>> 'media': p, >>> 'caption': f'{i} of {count}' >>> } >>> await chat.send_media_group(dumps(tg_album))
def displayEmptyInputWarningBox(display=True, parent=None): """ Displays a warning box for the 'input' parameter. """ if sys.version_info[0] >= 3: from tkinter.messagebox import showwarning else: from tkMessageBox import showwarning if display: msg = 'No valid input files found! '+\ 'Please check the value for the "input" parameter.' showwarning(parent=parent,message=msg, title="No valid inputs!") return "yes"
Displays a warning box for the 'input' parameter.
def _main(args): """Batch compression. args contains: * input - path to input directory * output - path to output directory or None * apikey - TinyPNG API key * overwrite - boolean flag """ if not args.apikey: print("\nPlease provide TinyPNG API key") print("To obtain key visit https://api.tinypng.com/developers\n") sys.exit(1) input_dir = realpath(args.input) if not args.output: output_dir = input_dir + "-output" else: output_dir = realpath(args.output) if input_dir == output_dir: print("\nPlease specify different output directory\n") sys.exit(1) handler = ScreenHandler() try: process_directory(input_dir, output_dir, args.apikey, handler) except KeyboardInterrupt: handler.on_finish(output_dir=output_dir)
Batch compression. args contains: * input - path to input directory * output - path to output directory or None * apikey - TinyPNG API key * overwrite - boolean flag
def setup_ui(self, ): """Create the layouts and set some attributes of the ui :returns: None :rtype: None :raises: None """ grid = QtGui.QGridLayout(self) grid.setContentsMargins(0, 0, 0, 0) self.setLayout(grid)
Create the layouts and set some attributes of the ui :returns: None :rtype: None :raises: None
def construct_mail(self): """ compiles the information contained in this envelope into a :class:`email.Message`. """ # Build body text part. To properly sign/encrypt messages later on, we # convert the text to its canonical format (as per RFC 2015). canonical_format = self.body.encode('utf-8') textpart = MIMEText(canonical_format, 'plain', 'utf-8') # wrap it in a multipart container if necessary if self.attachments: inner_msg = MIMEMultipart() inner_msg.attach(textpart) # add attachments for a in self.attachments: inner_msg.attach(a.get_mime_representation()) else: inner_msg = textpart if self.sign: plaintext = inner_msg.as_bytes(policy=email.policy.SMTP) logging.debug('signing plaintext: %s', plaintext) try: signatures, signature_str = crypto.detached_signature_for( plaintext, [self.sign_key]) if len(signatures) != 1: raise GPGProblem("Could not sign message (GPGME " "did not return a signature)", code=GPGCode.KEY_CANNOT_SIGN) except gpg.errors.GPGMEError as e: if e.getcode() == gpg.errors.BAD_PASSPHRASE: # If GPG_AGENT_INFO is unset or empty, the user just does # not have gpg-agent running (properly). if os.environ.get('GPG_AGENT_INFO', '').strip() == '': msg = "Got invalid passphrase and GPG_AGENT_INFO\ not set. Please set up gpg-agent." raise GPGProblem(msg, code=GPGCode.BAD_PASSPHRASE) else: raise GPGProblem("Bad passphrase. Is gpg-agent " "running?", code=GPGCode.BAD_PASSPHRASE) raise GPGProblem(str(e), code=GPGCode.KEY_CANNOT_SIGN) micalg = crypto.RFC3156_micalg_from_algo(signatures[0].hash_algo) unencrypted_msg = MIMEMultipart( 'signed', micalg=micalg, protocol='application/pgp-signature') # wrap signature in MIMEcontainter stype = 'pgp-signature; name="signature.asc"' signature_mime = MIMEApplication( _data=signature_str.decode('ascii'), _subtype=stype, _encoder=encode_7or8bit) signature_mime['Content-Description'] = 'signature' signature_mime.set_charset('us-ascii') # add signed message and signature to outer message unencrypted_msg.attach(inner_msg) unencrypted_msg.attach(signature_mime) unencrypted_msg['Content-Disposition'] = 'inline' else: unencrypted_msg = inner_msg if self.encrypt: plaintext = unencrypted_msg.as_bytes(policy=email.policy.SMTP) logging.debug('encrypting plaintext: %s', plaintext) try: encrypted_str = crypto.encrypt( plaintext, list(self.encrypt_keys.values())) except gpg.errors.GPGMEError as e: raise GPGProblem(str(e), code=GPGCode.KEY_CANNOT_ENCRYPT) outer_msg = MIMEMultipart('encrypted', protocol='application/pgp-encrypted') version_str = 'Version: 1' encryption_mime = MIMEApplication(_data=version_str, _subtype='pgp-encrypted', _encoder=encode_7or8bit) encryption_mime.set_charset('us-ascii') encrypted_mime = MIMEApplication( _data=encrypted_str.decode('ascii'), _subtype='octet-stream', _encoder=encode_7or8bit) encrypted_mime.set_charset('us-ascii') outer_msg.attach(encryption_mime) outer_msg.attach(encrypted_mime) else: outer_msg = unencrypted_msg headers = self.headers.copy() # add Message-ID if 'Message-ID' not in headers: headers['Message-ID'] = [email.utils.make_msgid()] if 'User-Agent' in headers: uastring_format = headers['User-Agent'][0] else: uastring_format = settings.get('user_agent').strip() uastring = uastring_format.format(version=__version__) if uastring: headers['User-Agent'] = [uastring] # copy headers from envelope to mail for k, vlist in headers.items(): for v in vlist: outer_msg.add_header(k, v) return outer_msg
compiles the information contained in this envelope into a :class:`email.Message`.
def dict_merge(a, b, dict_boundary): """ Recursively merges dicts. not just simple a['key'] = b['key'], if both a and b have a key who's value is a dict then dict_merge is called on both values and the result stored in the returned dictionary. Also, if keys contain `self._dict_boundary`, they will be split into sub dictionaries. :param a: :param b: :return: """ if not isinstance(b, dict): return b result = deepcopy(a) for k, v in b.iteritems(): exploded_k = k.split(dict_boundary) if len(exploded_k) > 1: new_dict = None #@todo: seem to be clobbering existing data with this one... fix it for key in reversed(exploded_k): if not key: continue if not new_dict: new_dict = OrderedDict([(key, v)]) else: new_dict = OrderedDict([(key, deepcopy(new_dict))]) result = dict_merge(result, new_dict, dict_boundary) elif k in result and isinstance(result[k], dict): result[k] = dict_merge(result[k], v, dict_boundary) else: result[k] = deepcopy(v) return result
Recursively merges dicts. not just simple a['key'] = b['key'], if both a and b have a key who's value is a dict then dict_merge is called on both values and the result stored in the returned dictionary. Also, if keys contain `self._dict_boundary`, they will be split into sub dictionaries. :param a: :param b: :return:
def init_common(app): """Post initialization.""" if app.config['USERPROFILES_EXTEND_SECURITY_FORMS']: security_ext = app.extensions['security'] security_ext.confirm_register_form = confirm_register_form_factory( security_ext.confirm_register_form) security_ext.register_form = register_form_factory( security_ext.register_form)
Post initialization.
def _attribute_iterator(self, mapped_class, key): """ Returns an iterator over the attributes in this mapping for the given mapped class and attribute key. If this is a pruning mapping, attributes that are ignored because of a custom configuration or because of the default ignore rules are skipped. """ for attr in \ itervalues_(self.__get_attribute_map(mapped_class, key, 0)): if self.is_pruning: do_ignore = attr.should_ignore(key) else: do_ignore = False if not do_ignore: yield attr
Returns an iterator over the attributes in this mapping for the given mapped class and attribute key. If this is a pruning mapping, attributes that are ignored because of a custom configuration or because of the default ignore rules are skipped.
def __prepare_dataset_parameter(self, dataset): """ Processes the dataset parameter for type correctness. Returns it as an SFrame. """ # Translate the dataset argument into the proper type if not isinstance(dataset, _SFrame): def raise_dataset_type_exception(): raise TypeError("The dataset parameter must be either an SFrame, " "or a dictionary of (str : list) or (str : value).") if type(dataset) is dict: if not all(type(k) is str for k in _six.iterkeys(dataset)): raise_dataset_type_exception() if all(type(v) in (list, tuple, _array.array) for v in _six.itervalues(dataset)): dataset = _SFrame(dataset) else: dataset = _SFrame({k : [v] for k, v in _six.iteritems(dataset)}) else: raise_dataset_type_exception() return dataset
Processes the dataset parameter for type correctness. Returns it as an SFrame.