positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def create_user(backend, details, response, uid, username, user=None, *args, **kwargs): """ Creates user. Depends on get_username pipeline. """ if user: return {'user': user} if not username: return None email = details.get('email') original_email = None # email is required if not email: message = _("""your social account needs to have a verified email address in order to proceed.""") raise AuthFailed(backend, message) # Avoid hitting field max length if email and len(email) > 75: original_email = email email = '' return { 'user': UserSocialAuth.create_user(username=username, email=email, sync_emailaddress=False), 'original_email': original_email, 'is_new': True }
Creates user. Depends on get_username pipeline.
def _region_code_for_number_from_list(numobj, regions): """Find the region in a list that matches a number""" national_number = national_significant_number(numobj) for region_code in regions: # If leading_digits is present, use this. Otherwise, do full # validation. # Metadata cannot be None because the region codes come from # the country calling code map. metadata = PhoneMetadata.metadata_for_region(region_code.upper(), None) if metadata is None: continue if metadata.leading_digits is not None: leading_digit_re = re.compile(metadata.leading_digits) match = leading_digit_re.match(national_number) if match: return region_code elif _number_type_helper(national_number, metadata) != PhoneNumberType.UNKNOWN: return region_code return None
Find the region in a list that matches a number
def configure_ghostboxes(self, nghostx=0, nghosty=0, nghostz=0): """ Initialize the ghost boxes. This function only needs to be called it boundary conditions other than "none" or "open" are used. In such a case the number of ghostboxes must be known and is set with this function. Parameters ---------- nghostx, nghosty, nghostz : int The number of ghost boxes in each direction. All values default to 0 (no ghost boxes). """ clibrebound.nghostx = c_int(nghostx) clibrebound.nghosty = c_int(nghosty) clibrebound.nghostz = c_int(nghostz) return
Initialize the ghost boxes. This function only needs to be called it boundary conditions other than "none" or "open" are used. In such a case the number of ghostboxes must be known and is set with this function. Parameters ---------- nghostx, nghosty, nghostz : int The number of ghost boxes in each direction. All values default to 0 (no ghost boxes).
def waitForResponse(self, timeOut=None): """blocks until the response arrived or timeout is reached.""" self.__evt.wait(timeOut) if self.waiting(): raise Timeout() else: if self.response["error"]: raise Exception(self.response["error"]) else: return self.response["result"]
blocks until the response arrived or timeout is reached.
def upvoters(self): """获取答案点赞用户,返回生成器. :return: 点赞用户 :rtype: Author.Iterable """ self._make_soup() next_req = '/answer/' + str(self.aid) + '/voters_profile' while next_req != '': data = self._session.get(Zhihu_URL + next_req).json() next_req = data['paging']['next'] for html in data['payload']: soup = BeautifulSoup(html) yield self._parse_author_soup(soup)
获取答案点赞用户,返回生成器. :return: 点赞用户 :rtype: Author.Iterable
def resolve_att(a, fallback): """ replace '' and 'default' by fallback values """ if a is None: return fallback if a.background in ['default', '']: bg = fallback.background else: bg = a.background if a.foreground in ['default', '']: fg = fallback.foreground else: fg = a.foreground return AttrSpec(fg, bg)
replace '' and 'default' by fallback values
def pierson(self, ddof=0): """Matrix of pierson linear correlation coefficients (rho values) for each pair of columns https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient >>> Columns([[1, 2, 3], [4, 5, 6]]).pierson() [[1.0, 1.0], [1.0, 1.0]] >>> Columns([[1, 2, 3], [2.5, 3.5, 4.5]], transpose=True).pierson() [[1.0, 1.0], [1.0, 1.0]] >>> Columns([[1, 3, 2], [4, 5, 7]], transpose=1).pierson() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE [[1.0, 0.327326835...], [0.327326835..., 1.0]] """ C = self.cov(ddof=ddof) rho = [] N = len(C) for i in range(N): rho += [[1.] * N] for i in range(N): for j in range(i + 1, N): rho[i][j] = C[i][j] / (C[i][i] * C[j][j] or 1.) ** 0.5 rho[j][i] = rho[i][j] return rho
Matrix of pierson linear correlation coefficients (rho values) for each pair of columns https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient >>> Columns([[1, 2, 3], [4, 5, 6]]).pierson() [[1.0, 1.0], [1.0, 1.0]] >>> Columns([[1, 2, 3], [2.5, 3.5, 4.5]], transpose=True).pierson() [[1.0, 1.0], [1.0, 1.0]] >>> Columns([[1, 3, 2], [4, 5, 7]], transpose=1).pierson() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE [[1.0, 0.327326835...], [0.327326835..., 1.0]]
def _latch_file_info(self): """Internal function to update the dictionaries keeping track of input and output files """ self.files.file_dict.clear() self.files.latch_file_info(self.args)
Internal function to update the dictionaries keeping track of input and output files
def abort_thread(): """ This function checks to see if the user has indicated that they want the currently running execution to stop prematurely by marking the running thread as aborted. It only applies to operations that are run within CauldronThreads and not the main thread. """ thread = threading.current_thread() if not isinstance(thread, CauldronThread): return if thread.is_executing and thread.abort: raise ThreadAbortError('User Aborted Execution')
This function checks to see if the user has indicated that they want the currently running execution to stop prematurely by marking the running thread as aborted. It only applies to operations that are run within CauldronThreads and not the main thread.
def write_data(self, buf): """Send data to the device. If the write fails for any reason, an :obj:`IOError` exception is raised. :param buf: the data to send. :type buf: list(int) :return: success status. :rtype: bool """ bmRequestType = usb.util.build_request_type( usb.util.ENDPOINT_OUT, usb.util.CTRL_TYPE_CLASS, usb.util.CTRL_RECIPIENT_INTERFACE ) result = self.dev.ctrl_transfer( bmRequestType=bmRequestType, bRequest=usb.REQ_SET_CONFIGURATION, data_or_wLength=buf, wValue=0x200, timeout=50) if result != len(buf): raise IOError('pywws.device_pyusb1.USBDevice.write_data failed') return True
Send data to the device. If the write fails for any reason, an :obj:`IOError` exception is raised. :param buf: the data to send. :type buf: list(int) :return: success status. :rtype: bool
def get(self, key, default=None): """Retrieve the first value for a marker or None.""" for k, v in self: if k == key: return v return default
Retrieve the first value for a marker or None.
def is_user_in_group(self, user, group): """Test for whether a user is in a group. There is also the ability in the API to test for whether multiple users are members of an LDAP group, but you should just call is_user_in_group over an enumerated list of users. Args: user: String username. group: String group name. Returns bool. """ search_url = "%s/%s/%s/%s/%s" % (self.url, "group", group, "user", user) response = self.jss.get(search_url) # Sanity check length = len(response) result = False if length == 1: # User doesn't exist. Use default False value. pass elif length == 2: if response.findtext("ldap_user/username") == user: if response.findtext("ldap_user/is_member") == "Yes": result = True elif len(response) >= 2: raise JSSGetError("Unexpected response.") return result
Test for whether a user is in a group. There is also the ability in the API to test for whether multiple users are members of an LDAP group, but you should just call is_user_in_group over an enumerated list of users. Args: user: String username. group: String group name. Returns bool.
def get_user( self, identified_with, identifier, req, resp, resource, uri_kwargs ): """Return default user object.""" return self.user
Return default user object.
def get_symmetric_image(self): """Creates a new DirectedHypergraph object that is the symmetric image of this hypergraph (i.e., identical hypergraph with all edge directions reversed). Copies of each of the nodes' and hyperedges' attributes are stored and used in the new hypergraph. :returns: DirectedHypergraph -- a new hypergraph that is the symmetric image of the current hypergraph. """ new_H = self.copy() # No change to _node_attributes necessary, as nodes remain the same # Reverse the tail and head (and __frozen_tail and __frozen_head) for # every hyperedge for hyperedge_id in self.get_hyperedge_id_set(): attr_dict = new_H._hyperedge_attributes[hyperedge_id] attr_dict["tail"], attr_dict["head"] = \ attr_dict["head"], attr_dict["tail"] attr_dict["__frozen_tail"], attr_dict["__frozen_head"] = \ attr_dict["__frozen_head"], attr_dict["__frozen_tail"] # Reverse the definition of forward star and backward star new_H._forward_star, new_H._backward_star = \ new_H._backward_star, new_H._forward_star # Reverse the definition of successor and predecessor new_H._successors, new_H._predecessors = \ new_H._predecessors, new_H._successors return new_H
Creates a new DirectedHypergraph object that is the symmetric image of this hypergraph (i.e., identical hypergraph with all edge directions reversed). Copies of each of the nodes' and hyperedges' attributes are stored and used in the new hypergraph. :returns: DirectedHypergraph -- a new hypergraph that is the symmetric image of the current hypergraph.
def some(args): """ %prog some idsfile afastq [bfastq] Select a subset of the reads with ids present in the idsfile. `bfastq` is optional (only if reads are paired) """ p = OptionParser(some.__doc__) opts, args = p.parse_args(args) if len(args) not in (2, 3): sys.exit(not p.print_help()) idsfile, afastq, = args[:2] bfastq = args[2] if len(args) == 3 else None ids = DictFile(idsfile, valuepos=None) ai = iter_fastq(open(afastq)) arec = next(ai) if bfastq: bi = iter_fastq(open(bfastq)) brec = next(bi) while arec: if arec.name[1:] in ids: print(arec) if bfastq: print(brec) arec = next(ai) if bfastq: brec = next(bi)
%prog some idsfile afastq [bfastq] Select a subset of the reads with ids present in the idsfile. `bfastq` is optional (only if reads are paired)
def handleOACK(self, pkt): """This method handles an OACK from the server, syncing any accepted options.""" if len(pkt.options.keys()) > 0: if pkt.match_options(self.context.options): log.info("Successful negotiation of options") # Set options to OACK options self.context.options = pkt.options for key in self.context.options: log.info(" %s = %s" % (key, self.context.options[key])) else: log.error("Failed to negotiate options") raise TftpException("Failed to negotiate options") else: raise TftpException("No options found in OACK")
This method handles an OACK from the server, syncing any accepted options.
def _clean_java_out(version_str): """Remove extra environmental information reported in java when querying for versions. Java will report information like _JAVA_OPTIONS environmental variables in the output. """ out = [] for line in version_str.decode().split("\n"): if line.startswith("Picked up"): pass if line.find("setlocale") > 0: pass else: out.append(line) return "\n".join(out)
Remove extra environmental information reported in java when querying for versions. Java will report information like _JAVA_OPTIONS environmental variables in the output.
def custom_update(self, data, pred, obj): ''' Updates existing entity proprty based on the predicate input ''' if isinstance(data[pred], str): # for all simple properties of str value data[pred] = str(obj) else: # synonyms, superclasses, and existing_ids have special requirements if pred == 'synonyms': literals = [d['literal'] for d in data[pred]] if obj not in literals: data[pred].append({'literal': obj}) # synonyms req for post elif pred == 'superclasses': ilx_ids = [d['ilx'] for d in data[pred]] if obj not in ilx_ids: _obj = obj.replace('ILX:', 'ilx_') super_data, success = self.get_data_from_ilx(ilx_id=_obj) super_data = super_data['data'] if success: # superclass req post data[pred].append({'id': super_data['id'], 'ilx': _obj}) else: return self.test_check('Your superclass ILX ID ' + _obj + ' does not exist.') elif pred == 'existing_ids': # FIXME need to autogenerate curies from a map iris = [d['iri'] for d in data[pred]] if obj not in iris: if 'http' not in obj: return self.test_check('exisiting id value must \ be a uri containing "http"') data[pred].append({ 'curie': self.qname(obj), 'iri': obj, 'preferred': '0' # preferred is auto generated by preferred_change }) #data[pred] = [] data = self.preferred_change(data) # One ex id is determined to be preferred else: # Somehow broke this code return self.test_check(pred + ' Has slipped through the cracks') return data
Updates existing entity proprty based on the predicate input
def _input_likelihood(self, logits: torch.Tensor, mask: torch.Tensor) -> torch.Tensor: """ Computes the (batch_size,) denominator term for the log-likelihood, which is the sum of the likelihoods across all possible state sequences. """ batch_size, sequence_length, num_tags = logits.size() # Transpose batch size and sequence dimensions mask = mask.float().transpose(0, 1).contiguous() logits = logits.transpose(0, 1).contiguous() # Initial alpha is the (batch_size, num_tags) tensor of likelihoods combining the # transitions to the initial states and the logits for the first timestep. if self.include_start_end_transitions: alpha = self.start_transitions.view(1, num_tags) + logits[0] else: alpha = logits[0] # For each i we compute logits for the transitions from timestep i-1 to timestep i. # We do so in a (batch_size, num_tags, num_tags) tensor where the axes are # (instance, current_tag, next_tag) for i in range(1, sequence_length): # The emit scores are for time i ("next_tag") so we broadcast along the current_tag axis. emit_scores = logits[i].view(batch_size, 1, num_tags) # Transition scores are (current_tag, next_tag) so we broadcast along the instance axis. transition_scores = self.transitions.view(1, num_tags, num_tags) # Alpha is for the current_tag, so we broadcast along the next_tag axis. broadcast_alpha = alpha.view(batch_size, num_tags, 1) # Add all the scores together and logexp over the current_tag axis inner = broadcast_alpha + emit_scores + transition_scores # In valid positions (mask == 1) we want to take the logsumexp over the current_tag dimension # of ``inner``. Otherwise (mask == 0) we want to retain the previous alpha. alpha = (util.logsumexp(inner, 1) * mask[i].view(batch_size, 1) + alpha * (1 - mask[i]).view(batch_size, 1)) # Every sequence needs to end with a transition to the stop_tag. if self.include_start_end_transitions: stops = alpha + self.end_transitions.view(1, num_tags) else: stops = alpha # Finally we log_sum_exp along the num_tags dim, result is (batch_size,) return util.logsumexp(stops)
Computes the (batch_size,) denominator term for the log-likelihood, which is the sum of the likelihoods across all possible state sequences.
def prepare(self, rule): """ Parse and/or compile given rule into rule tree. :param rule: Filtering grammar rule. :return: Parsed and/or compiled rule. """ if self.parser: rule = self.parser.parse(rule) if self.compiler: rule = self.compiler.compile(rule) return rule
Parse and/or compile given rule into rule tree. :param rule: Filtering grammar rule. :return: Parsed and/or compiled rule.
def solar_zenith(self, dateandtime, latitude, longitude): """Calculates the solar zenith angle. :param dateandtime: The date and time for which to calculate the angle. :type dateandtime: :class:`~datetime.datetime` :param latitude: Latitude - Northern latitudes should be positive :type latitude: float :param longitude: Longitude - Eastern longitudes should be positive :type longitude: float :return: The zenith angle in degrees from vertical. :rtype: float If `dateandtime` is a naive Python datetime then it is assumed to be in the UTC timezone. """ return 90.0 - self.solar_elevation(dateandtime, latitude, longitude)
Calculates the solar zenith angle. :param dateandtime: The date and time for which to calculate the angle. :type dateandtime: :class:`~datetime.datetime` :param latitude: Latitude - Northern latitudes should be positive :type latitude: float :param longitude: Longitude - Eastern longitudes should be positive :type longitude: float :return: The zenith angle in degrees from vertical. :rtype: float If `dateandtime` is a naive Python datetime then it is assumed to be in the UTC timezone.
def replacebranch(idf, loop, branch, listofcomponents, fluid=None, debugsave=False, testing=None): """It will replace the components in the branch with components in listofcomponents""" if fluid is None: fluid = '' # -------- testing --------- testn = 0 # -------- testing --------- # join them into a branch # ----------------------- # np1_inlet -> np1 -> np1_np2_node -> np2 -> np2_outlet # change the node names in the component # empty the old branch # fill in the new components with the node names into this branch listofcomponents = _clean_listofcomponents(listofcomponents) components = [item[0] for item in listofcomponents] connectcomponents(idf, listofcomponents, fluid=fluid) if debugsave: idf.savecopy("hhh3.idf") # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- fields = SomeFields.a_fields thebranch = branch componentsintobranch(idf, thebranch, listofcomponents, fluid=fluid) if debugsave: idf.savecopy("hhh4.idf") # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- # # gather all renamed nodes # # do the renaming renamenodes(idf, 'node') if debugsave: idf.savecopy("hhh7.idf") # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- # check for the end nodes of the loop if loop.key == 'AIRLOOPHVAC': fields = SomeFields.a_fields if loop.key == 'PLANTLOOP': fields = SomeFields.p_fields if loop.key == 'CONDENSERLOOP': fields = SomeFields.c_fields # for use in bunch flnames = [field.replace(' ', '_') for field in fields] if fluid.upper() == 'WATER': supplyconlistname = loop[flnames[3]] # Plant_Side_Connector_List_Name or Condenser_Side_Connector_List_Name elif fluid.upper() == 'AIR': supplyconlistname = loop[flnames[1]] # Connector_List_Name' supplyconlist = idf.getobject('CONNECTORLIST', supplyconlistname) for i in range(1, 100000): # large range to hit end try: fieldname = 'Connector_%s_Object_Type' % (i,) ctype = supplyconlist[fieldname] except bunch_subclass.BadEPFieldError: break if ctype.strip() == '': break fieldname = 'Connector_%s_Name' % (i,) cname = supplyconlist[fieldname] connector = idf.getobject(ctype.upper(), cname) if connector.key == 'CONNECTOR:SPLITTER': firstbranchname = connector.Inlet_Branch_Name cbranchname = firstbranchname isfirst = True if connector.key == 'CONNECTOR:MIXER': lastbranchname = connector.Outlet_Branch_Name cbranchname = lastbranchname isfirst = False if cbranchname == thebranch.Name: # rename end nodes comps = getbranchcomponents(idf, thebranch) if isfirst: comp = comps[0] inletnodename = getnodefieldname( comp, "Inlet_Node_Name", fluid) comp[inletnodename] = [ comp[inletnodename], loop[flnames[0]]] # Plant_Side_Inlet_Node_Name else: comp = comps[-1] outletnodename = getnodefieldname( comp, "Outlet_Node_Name", fluid) comp[outletnodename] = [ comp[outletnodename], loop[flnames[1]]] # .Plant_Side_Outlet_Node_Name # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- if fluid.upper() == 'WATER': demandconlistname = loop[flnames[7]] # .Demand_Side_Connector_List_Name demandconlist = idf.getobject('CONNECTORLIST', demandconlistname) for i in range(1, 100000): # large range to hit end try: fieldname = 'Connector_%s_Object_Type' % (i,) ctype = demandconlist[fieldname] except bunch_subclass.BadEPFieldError: break if ctype.strip() == '': break fieldname = 'Connector_%s_Name' % (i,) cname = demandconlist[fieldname] connector = idf.getobject(ctype.upper(), cname) if connector.key == 'CONNECTOR:SPLITTER': firstbranchname = connector.Inlet_Branch_Name cbranchname = firstbranchname isfirst = True if connector.key == 'CONNECTOR:MIXER': lastbranchname = connector.Outlet_Branch_Name cbranchname = lastbranchname isfirst = False if cbranchname == thebranch.Name: # rename end nodes comps = getbranchcomponents(idf, thebranch) if isfirst: comp = comps[0] inletnodename = getnodefieldname( comp, "Inlet_Node_Name", fluid) comp[inletnodename] = [ comp[inletnodename], loop[flnames[4]]] # .Demand_Side_Inlet_Node_Name if not isfirst: comp = comps[-1] outletnodename = getnodefieldname( comp, "Outlet_Node_Name", fluid) comp[outletnodename] = [ comp[outletnodename], loop[flnames[5]]] # .Demand_Side_Outlet_Node_Name # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- if debugsave: idf.savecopy("hhh8.idf") # # gather all renamed nodes # # do the renaming renamenodes(idf, 'node') # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- if debugsave: idf.savecopy("hhh9.idf") return thebranch
It will replace the components in the branch with components in listofcomponents
def get_logs(self, request): """ Get logs from log service. Unsuccessful opertaion will cause an LogException. Note: for larger volume of data (e.g. > 1 million logs), use get_log_all :type request: GetLogsRequest :param request: the GetLogs request parameters class. :return: GetLogsResponse :raise: LogException """ project = request.get_project() logstore = request.get_logstore() from_time = request.get_from() to_time = request.get_to() topic = request.get_topic() query = request.get_query() reverse = request.get_reverse() offset = request.get_offset() size = request.get_line() return self.get_log(project, logstore, from_time, to_time, topic, query, reverse, offset, size)
Get logs from log service. Unsuccessful opertaion will cause an LogException. Note: for larger volume of data (e.g. > 1 million logs), use get_log_all :type request: GetLogsRequest :param request: the GetLogs request parameters class. :return: GetLogsResponse :raise: LogException
def add(self, date_range, library_name): """ Adds the library with the given date range to the underlying collection of libraries used by this store. The underlying libraries should not overlap as the date ranges are assumed to be CLOSED_CLOSED by this function and the rest of the class. Arguments: date_range: A date range provided on the assumption that it is CLOSED_CLOSED. If for example the underlying libraries were split by year, the start of the date range would be datetime.datetime(year, 1, 1) and the end would be datetime.datetime(year, 12, 31, 23, 59, 59, 999000). The date range must fall on UTC day boundaries, that is the start must be add midnight and the end must be 1 millisecond before midnight. library_name: The name of the underlying library. This must be the name of a valid Arctic library """ # check that the library is valid try: self._arctic_lib.arctic[library_name] except Exception as e: logger.error("Could not load library") raise e assert date_range.start and date_range.end, "Date range should have start and end properties {}".format(date_range) start = date_range.start.astimezone(mktz('UTC')) if date_range.start.tzinfo is not None else date_range.start.replace(tzinfo=mktz('UTC')) end = date_range.end.astimezone(mktz('UTC')) if date_range.end.tzinfo is not None else date_range.end.replace(tzinfo=mktz('UTC')) assert start.time() == time.min and end.time() == end_time_min, "Date range should fall on UTC day boundaries {}".format(date_range) # check that the date range does not overlap library_metadata = self._get_library_metadata(date_range) if len(library_metadata) > 1 or (len(library_metadata) == 1 and library_metadata[0] != library_name): raise OverlappingDataException("""There are libraries that overlap with the date range: library: {} overlapping libraries: {}""".format(library_name, [l.library for l in library_metadata])) self._collection.update_one({'library_name': library_name}, {'$set': {'start': start, 'end': end}}, upsert=True)
Adds the library with the given date range to the underlying collection of libraries used by this store. The underlying libraries should not overlap as the date ranges are assumed to be CLOSED_CLOSED by this function and the rest of the class. Arguments: date_range: A date range provided on the assumption that it is CLOSED_CLOSED. If for example the underlying libraries were split by year, the start of the date range would be datetime.datetime(year, 1, 1) and the end would be datetime.datetime(year, 12, 31, 23, 59, 59, 999000). The date range must fall on UTC day boundaries, that is the start must be add midnight and the end must be 1 millisecond before midnight. library_name: The name of the underlying library. This must be the name of a valid Arctic library
def cleanup_deployments(self): """ Delete all deployments created in namespaces associated with this backend :return: None """ deployments = self.list_deployments() for deployment in deployments: if deployment.namespace in self.managed_namespaces: deployment.delete()
Delete all deployments created in namespaces associated with this backend :return: None
def get_parent_bank_ids(self, bank_id): """Gets the parent ``Ids`` of the given bank. arg: bank_id (osid.id.Id): a bank ``Id`` return: (osid.id.IdList) - the parent ``Ids`` of the bank raise: NotFound - ``bank_id`` is not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_parent_bin_ids if self._catalog_session is not None: return self._catalog_session.get_parent_catalog_ids(catalog_id=bank_id) return self._hierarchy_session.get_parents(id_=bank_id)
Gets the parent ``Ids`` of the given bank. arg: bank_id (osid.id.Id): a bank ``Id`` return: (osid.id.IdList) - the parent ``Ids`` of the bank raise: NotFound - ``bank_id`` is not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.*
def discrete_random_draw(data, nb=1): ''' Code from Steve Nguyen''' data = np.array(data) if not data.any(): data = np.ones_like(data) data = data/data.sum() xk = np.arange(len(data)) custm = stats.rv_discrete(name='custm', values=(xk, data)) return custm.rvs(size=nb)
Code from Steve Nguyen
def head(self, n=10): """ Display the top of the file. Args: n (int): Number of lines to display """ r = self.__repr__().split('\n') print('\n'.join(r[:n]), end=' ')
Display the top of the file. Args: n (int): Number of lines to display
def _represent_undefined(self, data): """Raises flag for objects that cannot be represented""" raise RepresenterError( _format("Cannot represent an object: {0!A} of type: {1}; " "yaml_representers: {2!A}, " "yaml_multi_representers: {3!A}", data, type(data), self.yaml_representers.keys(), self.yaml_multi_representers.keys()))
Raises flag for objects that cannot be represented
def is_uuid(value, **kwargs): """Indicate whether ``value`` contains a :class:`UUID <python:uuid.UUID>` :param value: The value to evaluate. :returns: ``True`` if ``value`` is valid, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator """ try: validators.uuid(value, **kwargs) except SyntaxError as error: raise error except Exception: return False return True
Indicate whether ``value`` contains a :class:`UUID <python:uuid.UUID>` :param value: The value to evaluate. :returns: ``True`` if ``value`` is valid, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator
def fields(self, *fields): ''' Only return the specified fields from the object. Accessing a \ field that was not specified in ``fields`` will result in a \ :class:``ommongo.document.FieldNotRetrieved`` exception being \ raised :param fields: Instances of :class:``ommongo.query.QueryField`` specifying \ which fields to return ''' if self._fields is None: self._fields = set() for f in fields: f = resolve_name(self.type, f) self._fields.add(f) self._fields.add(self.type.mongo_id) return self
Only return the specified fields from the object. Accessing a \ field that was not specified in ``fields`` will result in a \ :class:``ommongo.document.FieldNotRetrieved`` exception being \ raised :param fields: Instances of :class:``ommongo.query.QueryField`` specifying \ which fields to return
def run_process(self, slug, inputs): """Run a new process from a running process.""" def export_files(value): """Export input files of spawned process.""" if isinstance(value, str) and os.path.isfile(value): # TODO: Use the protocol to export files and get the # process schema to check field type. print("export {}".format(value)) elif isinstance(value, dict): for item in value.values(): export_files(item) elif isinstance(value, list): for item in value: export_files(item) export_files(inputs) print('run {}'.format(json.dumps({'process': slug, 'input': inputs}, separators=(',', ':'))))
Run a new process from a running process.
def _build_hash_string(self): """Function for build password hash string. Raises: PybooruError: When isn't provide hash string. PybooruError: When aren't provide username or password. PybooruError: When Pybooru can't add password to hash strring. """ # Build AUTENTICATION hash_string # Check if hash_string exists if self.site_name in SITE_LIST or self.hash_string: if self.username and self.password: try: hash_string = self.hash_string.format(self.password) except TypeError: raise PybooruError("Pybooru can't add 'password' " "to 'hash_string'") # encrypt hashed_string to SHA1 and return hexdigest string self.password_hash = hashlib.sha1( hash_string.encode('utf-8')).hexdigest() else: raise PybooruError("Specify the 'username' and 'password' " "parameters of the Pybooru object, for " "setting 'password_hash' attribute.") else: raise PybooruError( "Specify the 'hash_string' parameter of the Pybooru" " object, for the functions that requires login.")
Function for build password hash string. Raises: PybooruError: When isn't provide hash string. PybooruError: When aren't provide username or password. PybooruError: When Pybooru can't add password to hash strring.
def snapshots(self, owner=None, restorable_by=None): """ Get all snapshots related to this volume. Note that this requires that all available snapshots for the account be retrieved from EC2 first and then the list is filtered client-side to contain only those for this volume. :type owner: str :param owner: If present, only the snapshots owned by the specified user will be returned. Valid values are: self | amazon | AWS Account ID :type restorable_by: str :param restorable_by: If present, only the snapshots that are restorable by the specified account id will be returned. :rtype: list of L{boto.ec2.snapshot.Snapshot} :return: The requested Snapshot objects """ rs = self.connection.get_all_snapshots(owner=owner, restorable_by=restorable_by) mine = [] for snap in rs: if snap.volume_id == self.id: mine.append(snap) return mine
Get all snapshots related to this volume. Note that this requires that all available snapshots for the account be retrieved from EC2 first and then the list is filtered client-side to contain only those for this volume. :type owner: str :param owner: If present, only the snapshots owned by the specified user will be returned. Valid values are: self | amazon | AWS Account ID :type restorable_by: str :param restorable_by: If present, only the snapshots that are restorable by the specified account id will be returned. :rtype: list of L{boto.ec2.snapshot.Snapshot} :return: The requested Snapshot objects
def get_init_container(self, init_command, init_args, env_vars, context_mounts, persistence_outputs, persistence_data): """Pod init container for setting outputs path.""" env_vars = to_list(env_vars, check_none=True) if self.original_name is not None and self.cloning_strategy == CloningStrategy.RESUME: return [] if self.original_name is not None and self.cloning_strategy == CloningStrategy.COPY: command = InitCommands.COPY original_outputs_path = stores.get_experiment_outputs_path( persistence=persistence_outputs, experiment_name=self.original_name) else: command = InitCommands.CREATE original_outputs_path = None outputs_path = stores.get_experiment_outputs_path( persistence=persistence_outputs, experiment_name=self.experiment_name) _, outputs_volume_mount = get_pod_outputs_volume(persistence_outputs=persistence_outputs) volume_mounts = outputs_volume_mount + to_list(context_mounts, check_none=True) init_command = init_command or ["/bin/sh", "-c"] init_args = init_args or to_list( get_output_args(command=command, outputs_path=outputs_path, original_outputs_path=original_outputs_path)) init_args += to_list(get_auth_context_args(entity='experiment', entity_name=self.experiment_name)) return [ client.V1Container( name=self.init_container_name, image=self.init_docker_image, image_pull_policy=self.init_docker_image_pull_policy, command=init_command, args=[''.join(init_args)], env=env_vars, volume_mounts=volume_mounts) ]
Pod init container for setting outputs path.
def sprite(map, sprite, offset_x=None, offset_y=None, cache_buster=True): """ Returns the image and background position for use in a single shorthand property """ map = map.render() sprite_maps = _get_cache('sprite_maps') sprite_map = sprite_maps.get(map) sprite_name = String.unquoted(sprite).value sprite = sprite_map and sprite_map.get(sprite_name) if not sprite_map: log.error("No sprite map found: %s", map, extra={'stack': True}) elif not sprite: log.error("No sprite found: %s in %s", sprite_name, sprite_map['*n*'], extra={'stack': True}) if sprite: url = '%s%s' % (config.ASSETS_URL, sprite_map['*f*']) if cache_buster: url += '?_=%s' % sprite_map['*t*'] x = Number(offset_x or 0, 'px') y = Number(offset_y or 0, 'px') if not x.value or (x.value <= -1 or x.value >= 1) and not x.is_simple_unit('%'): x -= Number(sprite[2], 'px') if not y.value or (y.value <= -1 or y.value >= 1) and not y.is_simple_unit('%'): y -= Number(sprite[3], 'px') url = "url(%s)" % escape(url) return List([String.unquoted(url), x, y]) return List([Number(0), Number(0)])
Returns the image and background position for use in a single shorthand property
def sbo(self, name): """Build all dependencies of a package """ if (self.meta.rsl_deps in ["on", "ON"] and "--resolve-off" not in self.flag): sys.setrecursionlimit(10000) dependencies = [] requires = SBoGrep(name).requires() if requires: for req in requires: status(0.03) # toolbar_width = status(index, toolbar_width, 1) # avoid to add %README% as dependency and # if require in blacklist if "%README%" not in req and req not in self.blacklist: dependencies.append(req) if dependencies: self.dep_results.append(dependencies) for dep in dependencies: self.sbo(dep) return self.dep_results else: return []
Build all dependencies of a package
def results(self): """If successfully created, add the cleaned `CifData` and `StructureData` as output nodes to the workchain. The filter and select calculations were successful, so we return the cleaned CifData node. If the `group_cif` was defined in the inputs, the node is added to it. If the structure should have been parsed, verify that it is was put in the context by the `parse_cif_structure` step and add it to the group and outputs, otherwise return the finish status that should correspond to the exit code of the `primitive_structure_from_cif` function. """ self.out('cif', self.ctx.cif) if 'group_cif' in self.inputs: self.inputs.group_cif.add_nodes([self.ctx.cif]) if 'group_structure' in self.inputs: try: structure = self.ctx.structure except AttributeError: return self.ctx.exit_code else: self.inputs.group_structure.add_nodes([structure]) self.out('structure', structure) self.report('workchain finished successfully')
If successfully created, add the cleaned `CifData` and `StructureData` as output nodes to the workchain. The filter and select calculations were successful, so we return the cleaned CifData node. If the `group_cif` was defined in the inputs, the node is added to it. If the structure should have been parsed, verify that it is was put in the context by the `parse_cif_structure` step and add it to the group and outputs, otherwise return the finish status that should correspond to the exit code of the `primitive_structure_from_cif` function.
def get_session_id(self): """ get a unique id (shortish string) to allow simple aggregation of log records from multiple sources. This id is used for the life of the running program to allow extraction from all logs. WARING - this can give duplicate sessions when 2 apps hit it at the same time. """ max_session = '0' try: with open(self.log_folder + os.sep + '_sessions.txt', 'r') as f: for _ in f: txt = f.readline() if txt.strip('\n') != '': max_session = txt except Exception: max_session = '1' this_session = str(int(max_session) + random.randint(9,100)).zfill(9) # not a great way to ensure uniqueness - TODO FIX with open(self.log_folder + os.sep + '_sessions.txt', 'a') as f2: f2.write(this_session + '\n') return this_session
get a unique id (shortish string) to allow simple aggregation of log records from multiple sources. This id is used for the life of the running program to allow extraction from all logs. WARING - this can give duplicate sessions when 2 apps hit it at the same time.
def sg_aconv(tensor, opt): r"""Applies a 2-D atrous (or dilated) convolution. Args: tensor: A 4-D `Tensor` (automatically passed by decorator). opt: size: A tuple/list of positive integers of length 2 representing `[kernel height, kernel width]`. Can be an integer if both values are the same. If not specified, (3, 3) is set automatically. rate: A positive integer. The stride with which we sample input values across the `height` and `width` dimensions. Default is 2. in_dim: A positive `integer`. The size of input dimension. dim: A positive `integer`. The size of output dimension. pad: Either `SAME` (Default) or `VALID`. bias: Boolean. If True, biases are added. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization summary: If True, summaries are added. The default is True. Returns: A `Tensor` with the same type as `tensor`. """ # default options opt += tf.sg_opt(size=(3, 3), rate=2, pad='SAME') opt.size = opt.size if isinstance(opt.size, (tuple, list)) else [opt.size, opt.size] # parameter tf.sg_initializer w = tf.sg_initializer.he_uniform('W', (opt.size[0], opt.size[1], opt.in_dim, opt.dim), regularizer=opt.regularizer, summary=opt.summary) b = tf.sg_initializer.constant('b', opt.dim, summary=opt.summary) if opt.bias else 0 # apply convolution out = tf.nn.atrous_conv2d(tensor, w, rate=opt.rate, padding=opt.pad) + b return out
r"""Applies a 2-D atrous (or dilated) convolution. Args: tensor: A 4-D `Tensor` (automatically passed by decorator). opt: size: A tuple/list of positive integers of length 2 representing `[kernel height, kernel width]`. Can be an integer if both values are the same. If not specified, (3, 3) is set automatically. rate: A positive integer. The stride with which we sample input values across the `height` and `width` dimensions. Default is 2. in_dim: A positive `integer`. The size of input dimension. dim: A positive `integer`. The size of output dimension. pad: Either `SAME` (Default) or `VALID`. bias: Boolean. If True, biases are added. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization summary: If True, summaries are added. The default is True. Returns: A `Tensor` with the same type as `tensor`.
def number(self, p_todo): """ Returns the line number or text ID of a todo (depends on the configuration. """ if config().identifiers() == "text": return self.uid(p_todo) else: return self.linenumber(p_todo)
Returns the line number or text ID of a todo (depends on the configuration.
def _build_doc(self): """ Raises ------ ValueError * If a URL that lxml cannot parse is passed. Exception * Any other ``Exception`` thrown. For example, trying to parse a URL that is syntactically correct on a machine with no internet connection will fail. See Also -------- pandas.io.html._HtmlFrameParser._build_doc """ from lxml.html import parse, fromstring, HTMLParser from lxml.etree import XMLSyntaxError parser = HTMLParser(recover=True, encoding=self.encoding) try: if _is_url(self.io): with urlopen(self.io) as f: r = parse(f, parser=parser) else: # try to parse the input in the simplest way r = parse(self.io, parser=parser) try: r = r.getroot() except AttributeError: pass except (UnicodeDecodeError, IOError) as e: # if the input is a blob of html goop if not _is_url(self.io): r = fromstring(self.io, parser=parser) try: r = r.getroot() except AttributeError: pass else: raise e else: if not hasattr(r, 'text_content'): raise XMLSyntaxError("no text parsed from document", 0, 0, 0) return r
Raises ------ ValueError * If a URL that lxml cannot parse is passed. Exception * Any other ``Exception`` thrown. For example, trying to parse a URL that is syntactically correct on a machine with no internet connection will fail. See Also -------- pandas.io.html._HtmlFrameParser._build_doc
def _build_model_factories(store): """Generate factories to construct objects from schemata""" result = {} for schemaname in store: schema = None try: schema = store[schemaname]['schema'] except KeyError: schemata_log("No schema found for ", schemaname, lvl=critical, exc=True) try: result[schemaname] = warmongo.model_factory(schema) except Exception as e: schemata_log("Could not create factory for schema ", schemaname, schema, lvl=critical, exc=True) return result
Generate factories to construct objects from schemata
def __list_uniques(self, date_range, field_name): """Retrieve a list of unique values in a given field within a date range. :param date_range: :param field_name: :return: list of unique values. """ # Get project list s = Search(using=self._es_conn, index=self._es_index) s = s.filter('range', **date_range) # from:to parameters (=> from: 0, size: 0) s = s[0:0] s.aggs.bucket('uniques', 'terms', field=field_name, size=1000) response = s.execute() uniques_list = [] for item in response.aggregations.uniques.buckets: uniques_list.append(item.key) return uniques_list
Retrieve a list of unique values in a given field within a date range. :param date_range: :param field_name: :return: list of unique values.
def get_regex(regex): """ Ensure we have a compiled regular expression object. >>> import re >>> get_regex('string') # doctest: +ELLIPSIS <_sre.SRE_Pattern object at 0x...> >>> pattern = re.compile(r'string') >>> get_regex(pattern) is pattern True >>> get_regex(3) # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: Invalid regex type: 3 """ if isinstance(regex, basestring): return re.compile(regex) elif not isinstance(regex, re._pattern_type): raise TypeError("Invalid regex type: %r" % (regex,)) return regex
Ensure we have a compiled regular expression object. >>> import re >>> get_regex('string') # doctest: +ELLIPSIS <_sre.SRE_Pattern object at 0x...> >>> pattern = re.compile(r'string') >>> get_regex(pattern) is pattern True >>> get_regex(3) # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: Invalid regex type: 3
def replace_u_end_month(month): """Find the latest legitimate month.""" month = month.lstrip('-') if month == 'uu' or month == '1u': return '12' if month == 'u0': return '10' if month == '0u': return '09' if month[1] in ['1', '2']: # 'u1' or 'u2' return month.replace('u', '1') # Otherwise it should match r'u[3-9]'. return month.replace('u', '0')
Find the latest legitimate month.
def get_string(ea): """Read the string at the given ea. This function uses IDA's string APIs and does not implement any special logic. """ # We get the item-head because the `GetStringType` function only works on the head of an item. string_type = idc.GetStringType(idaapi.get_item_head(ea)) if string_type is None: raise exceptions.SarkNoString("No string at 0x{:08X}".format(ea)) string = idc.GetString(ea, strtype=string_type) if not string: raise exceptions.SarkNoString("No string at 0x{:08X}".format(ea)) return string
Read the string at the given ea. This function uses IDA's string APIs and does not implement any special logic.
def _repr(self, *args, **kwargs): """Return a __repr__ string from the arguments provided to __init__. @param args: list of arguments to __init__ @param kwargs: dictionary of keyword arguments to __init__ @return: __repr__ string """ # Remove unnecessary empty keywords arguments and sort the arguments kwargs = {k: v for k, v in kwargs.items() if v is not None} kwargs = OrderedDict(sorted(kwargs.items())) # Build the __repr__ string pieces args_repr = ', '.join(repr(arg) for arg in args) kwargs_repr = ', '.join(k + '=' + repr(v) for k, v in kwargs.items()) if args_repr and kwargs_repr: kwargs_repr = ', ' + kwargs_repr name = self.__class__.__name__ return "{}({}{})".format(name, args_repr, kwargs_repr)
Return a __repr__ string from the arguments provided to __init__. @param args: list of arguments to __init__ @param kwargs: dictionary of keyword arguments to __init__ @return: __repr__ string
def decode(self, query): """I transform query parameters into an L{OpenIDRequest}. If the query does not seem to be an OpenID request at all, I return C{None}. @param query: The query parameters as a dictionary with each key mapping to one value. @type query: dict @raises ProtocolError: When the query does not seem to be a valid OpenID request. @returntype: L{OpenIDRequest} """ if not query: return None try: message = Message.fromPostArgs(query) except InvalidOpenIDNamespace, err: # It's useful to have a Message attached to a ProtocolError, so we # override the bad ns value to build a Message out of it. Kinda # kludgy, since it's made of lies, but the parts that aren't lies # are more useful than a 'None'. query = query.copy() query['openid.ns'] = OPENID2_NS message = Message.fromPostArgs(query) raise ProtocolError(message, str(err)) mode = message.getArg(OPENID_NS, 'mode') if not mode: fmt = "No mode value in message %s" raise ProtocolError(message, text=fmt % (message,)) handler = self._handlers.get(mode, self.defaultDecoder) return handler(message, self.server.op_endpoint)
I transform query parameters into an L{OpenIDRequest}. If the query does not seem to be an OpenID request at all, I return C{None}. @param query: The query parameters as a dictionary with each key mapping to one value. @type query: dict @raises ProtocolError: When the query does not seem to be a valid OpenID request. @returntype: L{OpenIDRequest}
def prj_create_user(self, *args, **kwargs): """Create a new project :returns: None :rtype: None :raises: None """ if not self.cur_prj: return user = self.create_user(projects=[self.cur_prj]) if user: userdata = djitemdata.UserItemData(user) treemodel.TreeItem(userdata, self.prj_user_model.root)
Create a new project :returns: None :rtype: None :raises: None
def set_ytick_labels(self, row, column, labels): """Manually specify the y-axis tick labels. :param row,column: specify the subplot. :param labels: list of tick labels. """ subplot = self.get_subplot_at(row, column) subplot.set_ytick_labels(labels)
Manually specify the y-axis tick labels. :param row,column: specify the subplot. :param labels: list of tick labels.
def add_transform_columns(self): """ add transformed values to the Pst.parameter_data attribute """ for col in ["parval1","parlbnd","parubnd","increment"]: if col not in self.parameter_data.columns: continue self.parameter_data.loc[:,col+"_trans"] = (self.parameter_data.loc[:,col] * self.parameter_data.scale) +\ self.parameter_data.offset #isnotfixed = self.parameter_data.partrans != "fixed" islog = self.parameter_data.partrans == "log" self.parameter_data.loc[islog,col+"_trans"] = \ self.parameter_data.loc[islog,col+"_trans"].\ apply(lambda x:np.log10(x))
add transformed values to the Pst.parameter_data attribute
def put(self, path=None, url_kwargs=None, **kwargs): """ Sends a PUT request. :param path: The HTTP path (either absolute or relative). :param url_kwargs: Parameters to override in the generated URL. See `~hyperlink.URL`. :param **kwargs: Optional arguments that ``request`` takes. :return: response object """ return self._session.put(self._url(path, url_kwargs), **kwargs)
Sends a PUT request. :param path: The HTTP path (either absolute or relative). :param url_kwargs: Parameters to override in the generated URL. See `~hyperlink.URL`. :param **kwargs: Optional arguments that ``request`` takes. :return: response object
async def login(cls, credentials: AuthenticationCredentials, config: Config) -> 'Session': """Checks the given credentials for a valid login and returns a new session. The mailbox data is shared between concurrent and future sessions, but only for the lifetime of the process. """ user = credentials.authcid password = cls._get_password(config, user) if user != credentials.identity: raise InvalidAuth() elif not credentials.check_secret(password): raise InvalidAuth() mailbox_set, filter_set = config.set_cache.get(user, (None, None)) if not mailbox_set or not filter_set: mailbox_set = MailboxSet() filter_set = FilterSet() if config.demo_data: await cls._load_demo(mailbox_set, filter_set) config.set_cache[user] = (mailbox_set, filter_set) return cls(credentials.identity, config, mailbox_set, filter_set)
Checks the given credentials for a valid login and returns a new session. The mailbox data is shared between concurrent and future sessions, but only for the lifetime of the process.
def permissions(self, perms): """ A decorator that sets a list of permissions for a function. :param perms: The list of permission instances or classes. :return: A function """ if not isinstance(perms, (list, tuple)): perms = [perms] instances = [] for perm in perms: if isclass(perm): instances.append(perm()) else: instances.append(perm) def decorator(func): func.permissions = instances return func return decorator
A decorator that sets a list of permissions for a function. :param perms: The list of permission instances or classes. :return: A function
def get_regions(self): """GetRegions. [Preview API] :rtype: :class:`<ProfileRegions> <azure.devops.v5_1.profile-regions.models.ProfileRegions>` """ response = self._send(http_method='GET', location_id='b129ca90-999d-47bb-ab37-0dcf784ee633', version='5.1-preview.1') return self._deserialize('ProfileRegions', response)
GetRegions. [Preview API] :rtype: :class:`<ProfileRegions> <azure.devops.v5_1.profile-regions.models.ProfileRegions>`
def required_opts(opt, parser, opt_list, required_by=None): """Check that all the opts are defined Parameters ---------- opt : object Result of option parsing parser : object OptionParser instance. opt_list : list of strings required_by : string, optional the option that requires these options (if applicable) """ for name in opt_list: attr = name[2:].replace('-', '_') if not hasattr(opt, attr) or (getattr(opt, attr) is None): err_str = "%s is missing " % name if required_by is not None: err_str += ", required by %s" % required_by parser.error(err_str)
Check that all the opts are defined Parameters ---------- opt : object Result of option parsing parser : object OptionParser instance. opt_list : list of strings required_by : string, optional the option that requires these options (if applicable)
def preprocessing(self, algorithms): """Apply preprocessing algorithms. Parameters ---------- algorithms : a list objects Preprocessing allgorithms which get applied in order. Examples -------- >>> import preprocessing >>> a = HandwrittenData(...) >>> preprocessing_queue = [(preprocessing.scale_and_shift, []), ... (preprocessing.connect_strokes, []), ... (preprocessing.douglas_peucker, ... {'EPSILON': 0.2}), ... (preprocessing.space_evenly, ... {'number': 100, ... 'KIND': 'cubic'})] >>> a.preprocessing(preprocessing_queue) """ assert type(algorithms) is list for algorithm in algorithms: algorithm(self)
Apply preprocessing algorithms. Parameters ---------- algorithms : a list objects Preprocessing allgorithms which get applied in order. Examples -------- >>> import preprocessing >>> a = HandwrittenData(...) >>> preprocessing_queue = [(preprocessing.scale_and_shift, []), ... (preprocessing.connect_strokes, []), ... (preprocessing.douglas_peucker, ... {'EPSILON': 0.2}), ... (preprocessing.space_evenly, ... {'number': 100, ... 'KIND': 'cubic'})] >>> a.preprocessing(preprocessing_queue)
def previous_row(self): """Move to previous row from currently selected row.""" row = self.currentIndex().row() rows = self.source_model.rowCount() if row == 0: row = rows self.selectRow(row - 1)
Move to previous row from currently selected row.
def get_available_tf_versions(include_prerelease=False): """Return available Terraform versions.""" tf_releases = json.loads( requests.get('https://releases.hashicorp.com/index.json').text )['terraform'] tf_versions = sorted([k # descending for k, _v in tf_releases['versions'].items()], key=LooseVersion, reverse=True) if include_prerelease: return tf_versions return [i for i in tf_versions if '-' not in i]
Return available Terraform versions.
def load_raw_rules(cls, url): "Load raw rules from url or package file." raw_rules = [] filename = url.split('/')[-1] # e.g.: easylist.txt try: with closing(request.get(url, stream=True)) as file: file.raise_for_status() # lines = 0 # to be removed for rule in file.iter_lines(): raw_rules.append(rule.strip()) # lines += 1 # tbr # if lines == 2500: break # tbr, only for windoze with no re2 logger.info("Adblock online %s: %d", filename, len(raw_rules)) except: # file server down or bad url with open(resource_filename('summary', filename), 'r') as file: for rule in file: raw_rules.append(rule.strip()) logger.info("Adblock offline %s: %d", filename, len(raw_rules)) return raw_rules
Load raw rules from url or package file.
def initialize_ocean_and_thresholds(world, ocean_level=1.0): """ Calculate the ocean, the sea depth and the elevation thresholds :param world: a world having elevation but not thresholds :param ocean_level: the elevation representing the ocean level :return: nothing, the world will be changed """ e = world.layers['elevation'].data ocean = fill_ocean(e, ocean_level) hl = find_threshold_f(e, 0.10) # the highest 10% of all (!) land are declared hills ml = find_threshold_f(e, 0.03) # the highest 3% are declared mountains e_th = [('sea', ocean_level), ('plain', hl), ('hill', ml), ('mountain', None)] harmonize_ocean(ocean, e, ocean_level) world.ocean = ocean world.elevation = (e, e_th) world.sea_depth = sea_depth(world, ocean_level)
Calculate the ocean, the sea depth and the elevation thresholds :param world: a world having elevation but not thresholds :param ocean_level: the elevation representing the ocean level :return: nothing, the world will be changed
def output(memory, ofile=None): """ Filters the output removing useless preprocessor #directives and writes it to the given file or to the screen if no file is passed """ for m in memory: m = m.rstrip('\r\n\t ') # Ensures no trailing newlines (might with upon includes) if m and m[0] == '#': # Preprocessor directive? if ofile is None: print(m) else: ofile.write('%s\n' % m) continue # Prints a 4 spaces "tab" for non labels if m and ':' not in m: if ofile is None: print(' '), else: ofile.write('\t') if ofile is None: print(m) else: ofile.write('%s\n' % m)
Filters the output removing useless preprocessor #directives and writes it to the given file or to the screen if no file is passed
def listProcessingEras(self, processing_version=''): """ Returns all processing eras in dbs """ conn = self.dbi.connection() try: result = self.pelst.execute(conn, processing_version) return result finally: if conn: conn.close()
Returns all processing eras in dbs
def limit_pos(p, se_pos, nw_pos): """ Limits position p to stay inside containing state :param p: Position to limit :param se_pos: Bottom/Right boundary :param nw_pos: Top/Left boundary :return: """ if p > se_pos: _update(p, se_pos) elif p < nw_pos: _update(p, nw_pos)
Limits position p to stay inside containing state :param p: Position to limit :param se_pos: Bottom/Right boundary :param nw_pos: Top/Left boundary :return:
def dump_autogen_code(fpath, autogen_text, codetype='python', fullprint=None, show_diff=None, dowrite=None): """ Helper that write a file if -w is given on command line, otherwise it just prints it out. It has the opption of comparing a diff to the file. """ import utool as ut if dowrite is None: dowrite = ut.get_argflag(('-w', '--write')) if show_diff is None: show_diff = ut.get_argflag('--diff') num_context_lines = ut.get_argval('--diff', type_=int, default=None) show_diff = show_diff or num_context_lines is not None num_context_lines = ut.get_argval('--diff', type_=int, default=None) if fullprint is None: fullprint = True if fullprint is False: fullprint = ut.get_argflag('--print') print('[autogen] Autogenerated %s...\n+---\n' % (fpath,)) if not dowrite: if fullprint: ut.print_code(autogen_text, lexer_name=codetype) print('\nL___') else: print('specify --print to write to stdout') pass print('specify -w to write, or --diff to compare') print('...would write to: %s' % fpath) if show_diff: if ut.checkpath(fpath, verbose=True): prev_text = ut.read_from(fpath) textdiff = ut.get_textdiff(prev_text, autogen_text, num_context_lines=num_context_lines) try: ut.print_difftext(textdiff) except UnicodeDecodeError: import unicodedata textdiff = unicodedata.normalize('NFKD', textdiff).encode('ascii', 'ignore') ut.print_difftext(textdiff) if dowrite: print('WARNING: Not writing. Remove --diff from command line') elif dowrite: ut.write_to(fpath, autogen_text)
Helper that write a file if -w is given on command line, otherwise it just prints it out. It has the opption of comparing a diff to the file.
def _strip_column_name(col_name, keep_paren_contents=True): """ Utility script applying several regexs to a string. Intended to be used by `strip_column_names`. This function will: 1. replace informative punctuation components with text 2. (optionally) remove text within parentheses 3. replace remaining punctuation/whitespace with _ 4. strip leading/trailing punctuation/whitespace Parameters ---------- col_name (str): input character string keep_paren_contents (logical): controls behavior of within-paren elements of text - if True, (the default) all text within parens retained - if False, text within parens will be removed from the field name Returns -------- modified string for new field name Examples -------- > print([_strip_column_name(col) for col in ['PD-L1','PD L1','PD L1_']]) """ # start with input new_col_name = col_name # replace meaningful punctuation with text equivalents # surround each with whitespace to enforce consistent use of _ punctuation_to_text = { '<=': 'le', '>=': 'ge', '=<': 'le', '=>': 'ge', '<': 'lt', '>': 'gt', '#': 'num' } for punctuation, punctuation_text in punctuation_to_text.items(): new_col_name = new_col_name.replace(punctuation, punctuation_text) # remove contents within () if not(keep_paren_contents): new_col_name = re.sub('\([^)]*\)', '', new_col_name) # replace remaining punctuation/whitespace with _ punct_pattern = '[\W_]+' punct_replacement = '_' new_col_name = re.sub(punct_pattern, punct_replacement, new_col_name) # remove leading/trailing _ if it exists (if last char was punctuation) new_col_name = new_col_name.strip("_") # TODO: check for empty string # return lower-case version of column name return new_col_name.lower()
Utility script applying several regexs to a string. Intended to be used by `strip_column_names`. This function will: 1. replace informative punctuation components with text 2. (optionally) remove text within parentheses 3. replace remaining punctuation/whitespace with _ 4. strip leading/trailing punctuation/whitespace Parameters ---------- col_name (str): input character string keep_paren_contents (logical): controls behavior of within-paren elements of text - if True, (the default) all text within parens retained - if False, text within parens will be removed from the field name Returns -------- modified string for new field name Examples -------- > print([_strip_column_name(col) for col in ['PD-L1','PD L1','PD L1_']])
def numpy_to_texture(image): """Convert a NumPy image array to a vtk.vtkTexture""" if not isinstance(image, np.ndarray): raise TypeError('Unknown input type ({})'.format(type(image))) if image.ndim != 3 or image.shape[2] != 3: raise AssertionError('Input image must be nn by nm by RGB') grid = vtki.UniformGrid((image.shape[1], image.shape[0], 1)) grid.point_arrays['Image'] = np.flip(image.swapaxes(0,1), axis=1).reshape((-1, 3), order='F') grid.set_active_scalar('Image') return image_to_texture(grid)
Convert a NumPy image array to a vtk.vtkTexture
def click_chain(self, selectors_list, by=By.CSS_SELECTOR, timeout=settings.SMALL_TIMEOUT, spacing=0): """ This method clicks on a list of elements in succession. 'spacing' is the amount of time to wait between clicks. (sec) """ if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT: timeout = self.__get_new_timeout(timeout) for selector in selectors_list: self.click(selector, by=by, timeout=timeout) if spacing > 0: time.sleep(spacing)
This method clicks on a list of elements in succession. 'spacing' is the amount of time to wait between clicks. (sec)
def abi_splitext(filename): """ Split the ABINIT extension from a filename. "Extension" are found by searching in an internal database. Returns "(root, ext)" where ext is the registered ABINIT extension The final ".nc" is included (if any) >>> assert abi_splitext("foo_WFK") == ('foo_', 'WFK') >>> assert abi_splitext("/home/guido/foo_bar_WFK.nc") == ('foo_bar_', 'WFK.nc') """ filename = os.path.basename(filename) is_ncfile = False if filename.endswith(".nc"): is_ncfile = True filename = filename[:-3] known_extensions = abi_extensions() # This algorith fails if we have two files # e.g. HAYDR_SAVE, ANOTHER_HAYDR_SAVE for i in range(len(filename)-1, -1, -1): ext = filename[i:] if ext in known_extensions: break else: raise ValueError("Cannot find a registered extension in %s" % filename) root = filename[:i] if is_ncfile: ext += ".nc" return root, ext
Split the ABINIT extension from a filename. "Extension" are found by searching in an internal database. Returns "(root, ext)" where ext is the registered ABINIT extension The final ".nc" is included (if any) >>> assert abi_splitext("foo_WFK") == ('foo_', 'WFK') >>> assert abi_splitext("/home/guido/foo_bar_WFK.nc") == ('foo_bar_', 'WFK.nc')
def __cyrillic_to_roman(self, word): """ Transliterate a Russian word into the Roman alphabet. A Russian word whose letters consist of the Cyrillic alphabet are transliterated into the Roman alphabet in order to ease the forthcoming stemming process. :param word: The word that is transliterated. :type word: unicode :return: the transliterated word. :rtype: unicode :note: This helper method is invoked by the stem method of the subclass RussianStemmer. It is not to be invoked directly! """ word = (word.replace("\u0410", "a").replace("\u0430", "a") .replace("\u0411", "b").replace("\u0431", "b") .replace("\u0412", "v").replace("\u0432", "v") .replace("\u0413", "g").replace("\u0433", "g") .replace("\u0414", "d").replace("\u0434", "d") .replace("\u0415", "e").replace("\u0435", "e") .replace("\u0401", "e").replace("\u0451", "e") .replace("\u0416", "zh").replace("\u0436", "zh") .replace("\u0417", "z").replace("\u0437", "z") .replace("\u0418", "i").replace("\u0438", "i") .replace("\u0419", "i`").replace("\u0439", "i`") .replace("\u041A", "k").replace("\u043A", "k") .replace("\u041B", "l").replace("\u043B", "l") .replace("\u041C", "m").replace("\u043C", "m") .replace("\u041D", "n").replace("\u043D", "n") .replace("\u041E", "o").replace("\u043E", "o") .replace("\u041F", "p").replace("\u043F", "p") .replace("\u0420", "r").replace("\u0440", "r") .replace("\u0421", "s").replace("\u0441", "s") .replace("\u0422", "t").replace("\u0442", "t") .replace("\u0423", "u").replace("\u0443", "u") .replace("\u0424", "f").replace("\u0444", "f") .replace("\u0425", "kh").replace("\u0445", "kh") .replace("\u0426", "t^s").replace("\u0446", "t^s") .replace("\u0427", "ch").replace("\u0447", "ch") .replace("\u0428", "sh").replace("\u0448", "sh") .replace("\u0429", "shch").replace("\u0449", "shch") .replace("\u042A", "''").replace("\u044A", "''") .replace("\u042B", "y").replace("\u044B", "y") .replace("\u042C", "'").replace("\u044C", "'") .replace("\u042D", "e`").replace("\u044D", "e`") .replace("\u042E", "i^u").replace("\u044E", "i^u") .replace("\u042F", "i^a").replace("\u044F", "i^a")) return word
Transliterate a Russian word into the Roman alphabet. A Russian word whose letters consist of the Cyrillic alphabet are transliterated into the Roman alphabet in order to ease the forthcoming stemming process. :param word: The word that is transliterated. :type word: unicode :return: the transliterated word. :rtype: unicode :note: This helper method is invoked by the stem method of the subclass RussianStemmer. It is not to be invoked directly!
def adapt(self, d, x): """ Adapt weights according one desired value and its input. **Args:** * `d` : desired value (float) * `x` : input array (1-dimensional array) """ y = np.dot(self.w, x) e = d - y R1 = np.dot(np.dot(np.dot(self.R,x),x.T),self.R) R2 = self.mu + np.dot(np.dot(x,self.R),x.T) self.R = 1/self.mu * (self.R - R1/R2) dw = np.dot(self.R, x.T) * e self.w += dw
Adapt weights according one desired value and its input. **Args:** * `d` : desired value (float) * `x` : input array (1-dimensional array)
def delegate(self, success, **kwargs): """Delegate success/failure to the right method.""" if success: self.succeeded(**kwargs) else: self.failed(**kwargs)
Delegate success/failure to the right method.
def get_variables(self) -> Set[str]: """Find all the variables specified in a format string. This returns a list of all the different variables specified in a format string, that is the variables inside the braces. """ variables = set() for cmd in self._cmd: for var in self.__formatter.parse(cmd): logger.debug("Checking variable: %s", var) # creates and requires are special class values if var[1] is not None and var[1] not in ["creates", "requires"]: variables.add(var[1]) return variables
Find all the variables specified in a format string. This returns a list of all the different variables specified in a format string, that is the variables inside the braces.
def list_certificates(self, **kwargs): """List certificates registered to organisation. Currently returns partially populated certificates. To obtain the full certificate object: `[get_certificate(certificate_id=cert['id']) for cert in list_certificates]` :param int limit: The number of certificates to retrieve. :param str order: The ordering direction, ascending (asc) or descending (desc). :param str after: Get certificates after/starting at given `certificate_id`. :param dict filters: Dictionary of filters to apply: type (eq), expire (eq), owner (eq) :return: list of :py:class:`Certificate` objects :rtype: Certificate """ kwargs = self._verify_sort_options(kwargs) kwargs = self._verify_filters(kwargs, Certificate) if "service__eq" in kwargs: if kwargs["service__eq"] == CertificateType.bootstrap: pass elif kwargs["service__eq"] == CertificateType.developer: kwargs["device_execution_mode__eq"] = 1 kwargs.pop("service__eq") elif kwargs["service__eq"] == CertificateType.lwm2m: pass else: raise CloudValueError( "Incorrect value for CertificateType filter: %s" % (kwargs["service__eq"]) ) owner = kwargs.pop('owner_id__eq', None) if owner is not None: kwargs['owner__eq'] = owner api = self._get_api(iam.DeveloperApi) return PaginatedResponse(api.get_all_certificates, lwrap_type=Certificate, **kwargs)
List certificates registered to organisation. Currently returns partially populated certificates. To obtain the full certificate object: `[get_certificate(certificate_id=cert['id']) for cert in list_certificates]` :param int limit: The number of certificates to retrieve. :param str order: The ordering direction, ascending (asc) or descending (desc). :param str after: Get certificates after/starting at given `certificate_id`. :param dict filters: Dictionary of filters to apply: type (eq), expire (eq), owner (eq) :return: list of :py:class:`Certificate` objects :rtype: Certificate
def make_data(self, message): """ make data string from message according to transport_content_type Returns: str: message data """ if not isinstance(message, Message): return message return message.export(self.transport_content_type)
make data string from message according to transport_content_type Returns: str: message data
def add(self, count, timestamp=None): """Add a value at the specified time to the series. :param count: The number of work items ready at the specified time. :param timestamp: The timestamp to add. Defaults to None, meaning current time. It should be strictly greater (newer) than the last added timestamp. """ if timestamp is None: timestamp = time.time() if self.last_data >= timestamp: raise ValueError("Time {} >= {} in load average calculation".format(self.last_data, timestamp)) self.last_data = timestamp for meta in self.intervals.values(): meta.push(count, timestamp)
Add a value at the specified time to the series. :param count: The number of work items ready at the specified time. :param timestamp: The timestamp to add. Defaults to None, meaning current time. It should be strictly greater (newer) than the last added timestamp.
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'document_id') and self.document_id is not None: _dict['document_id'] = self.document_id if hasattr(self, 'field') and self.field is not None: _dict['field'] = self.field if hasattr(self, 'start_offset') and self.start_offset is not None: _dict['start_offset'] = self.start_offset if hasattr(self, 'end_offset') and self.end_offset is not None: _dict['end_offset'] = self.end_offset if hasattr(self, 'entities') and self.entities is not None: _dict['entities'] = [x._to_dict() for x in self.entities] return _dict
Return a json dictionary representing this model.
def find_version(file_path): """ Scrape version information from specified file path. """ with open(file_path, 'r') as f: file_contents = f.read() version_match = re.search(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]", file_contents, re.M) if version_match: return version_match.group(1) else: raise RuntimeError("unable to find version string")
Scrape version information from specified file path.
def cmd_output(self, args): '''handle output commands''' if len(args) < 1 or args[0] == "list": self.cmd_output_list() elif args[0] == "add": if len(args) != 2: print("Usage: output add OUTPUT") return self.cmd_output_add(args[1:]) elif args[0] == "remove": if len(args) != 2: print("Usage: output remove OUTPUT") return self.cmd_output_remove(args[1:]) elif args[0] == "sysid": if len(args) != 3: print("Usage: output sysid SYSID OUTPUT") return self.cmd_output_sysid(args[1:]) else: print("usage: output <list|add|remove|sysid>")
handle output commands
def _inform_if_path_does_not_exist(path): """ If the path does not exist, print a message saying so. This is intended to be helpful to users if they specify a custom path that eg cannot find. """ expanded_path = get_expanded_path(path) if not os.path.exists(expanded_path): print('Could not find custom path at: {}'.format(expanded_path))
If the path does not exist, print a message saying so. This is intended to be helpful to users if they specify a custom path that eg cannot find.
def lookup(self, short_url): ''' Lookup an URL shortened with `is.gd - v.gd url service <http://is.gd/developers.php>`_ and return the real url :param short_url: the url shortened with .gd service :type short_url: str. :returns: str. -- The original url that was shortened with .gd service :raises: **IOError** when timeout with .gd service occurs **ValueError** if .gd response is malformed :class:`gdshortener.GDMalformedURLError` if the previously shortened URL provided is malformed :class:`gdshortener.GDShortURLError` if the custom URL requested is not available or disabled by .gd service :class:`gdshortener.GDRateLimitError` if the request rate is exceeded for .gd service :class:`gdshortener.GDGenericError` in case of generic error from .gd service (mainteinance) ''' if short_url is None or not isinstance(short_url, basestring) or len(short_url.strip()) == 0: raise GDMalformedURLError('The shortened URL must be a non empty string') # Build data for porst data = { 'format': 'json', 'shorturl': short_url } opener = urllib2.build_opener() headers = { 'User-Agent' : self._user_agent } req = urllib2.Request("{0}/forward.php".format(self.shortener_url), urllib.urlencode(data), headers) f_desc = opener.open(req, timeout = self._timeout) response = json.loads(f_desc.read()) if 'url' in response: # Success! return HTMLParser.HTMLParser().unescape(urllib.unquote(response['url'])) else: # Error error_code = int(response['errorcode']) error_description = str(response['errormessage']) if error_code == 1: raise GDMalformedURLError(error_description) if error_code == 2: raise GDShortURLError(error_description) if error_code == 3: raise GDRateLimitError(error_description) if error_code == 4: raise GDGenericError(error_description)
Lookup an URL shortened with `is.gd - v.gd url service <http://is.gd/developers.php>`_ and return the real url :param short_url: the url shortened with .gd service :type short_url: str. :returns: str. -- The original url that was shortened with .gd service :raises: **IOError** when timeout with .gd service occurs **ValueError** if .gd response is malformed :class:`gdshortener.GDMalformedURLError` if the previously shortened URL provided is malformed :class:`gdshortener.GDShortURLError` if the custom URL requested is not available or disabled by .gd service :class:`gdshortener.GDRateLimitError` if the request rate is exceeded for .gd service :class:`gdshortener.GDGenericError` in case of generic error from .gd service (mainteinance)
def ShowInfo(self): """Shows information about available hashers, parsers, plugins, etc.""" self._output_writer.Write( '{0:=^80s}\n'.format(' log2timeline/plaso information ')) plugin_list = self._GetPluginData() for header, data in plugin_list.items(): table_view = views.ViewsFactory.GetTableView( self._views_format_type, column_names=['Name', 'Description'], title=header) for entry_header, entry_data in sorted(data): table_view.AddRow([entry_header, entry_data]) table_view.Write(self._output_writer)
Shows information about available hashers, parsers, plugins, etc.
def triggerHapticPulse(self, unControllerDeviceIndex, unAxisId, usDurationMicroSec): """ Trigger a single haptic pulse on a controller. After this call the application may not trigger another haptic pulse on this controller and axis combination for 5ms. This function is deprecated in favor of the new IVRInput system. """ fn = self.function_table.triggerHapticPulse fn(unControllerDeviceIndex, unAxisId, usDurationMicroSec)
Trigger a single haptic pulse on a controller. After this call the application may not trigger another haptic pulse on this controller and axis combination for 5ms. This function is deprecated in favor of the new IVRInput system.
def delete_server_cert(cert_name, region=None, key=None, keyid=None, profile=None): ''' Deletes a certificate from Amazon. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion boto_iam.delete_server_cert mycert_name ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: return conn.delete_server_cert(cert_name) except boto.exception.BotoServerError as e: log.debug(e) log.error('Failed to delete certificate %s.', cert_name) return False
Deletes a certificate from Amazon. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion boto_iam.delete_server_cert mycert_name
def send(self, data=None, headers=None, ttl=0, gcm_key=None, reg_id=None, content_encoding="aes128gcm", curl=False, timeout=None): """Encode and send the data to the Push Service. :param data: A serialized block of data (see encode() ). :type data: str :param headers: A dictionary containing any additional HTTP headers. :type headers: dict :param ttl: The Time To Live in seconds for this message if the recipient is not online. (Defaults to "0", which discards the message immediately if the recipient is unavailable.) :type ttl: int :param gcm_key: API key obtained from the Google Developer Console. Needed if endpoint is https://android.googleapis.com/gcm/send :type gcm_key: string :param reg_id: registration id of the recipient. If not provided, it will be extracted from the endpoint. :type reg_id: str :param content_encoding: ECE content encoding (defaults to "aes128gcm") :type content_encoding: str :param curl: Display output as `curl` command instead of sending :type curl: bool :param timeout: POST requests timeout :type timeout: float or tuple """ # Encode the data. if headers is None: headers = dict() encoded = {} headers = CaseInsensitiveDict(headers) if data: encoded = self.encode(data, content_encoding) if "crypto_key" in encoded: # Append the p256dh to the end of any existing crypto-key crypto_key = headers.get("crypto-key", "") if crypto_key: # due to some confusion by a push service provider, we # should use ';' instead of ',' to append the headers. # see # https://github.com/webpush-wg/webpush-encryption/issues/6 crypto_key += ';' crypto_key += ( "dh=" + encoded["crypto_key"].decode('utf8')) headers.update({ 'crypto-key': crypto_key }) if "salt" in encoded: headers.update({ 'encryption': "salt=" + encoded['salt'].decode('utf8') }) headers.update({ 'content-encoding': content_encoding, }) if gcm_key: # guess if it is a legacy GCM project key or actual FCM key # gcm keys are all about 40 chars (use 100 for confidence), # fcm keys are 153-175 chars if len(gcm_key) < 100: endpoint = 'https://android.googleapis.com/gcm/send' else: endpoint = 'https://fcm.googleapis.com/fcm/send' reg_ids = [] if not reg_id: reg_id = self.subscription_info['endpoint'].rsplit('/', 1)[-1] reg_ids.append(reg_id) gcm_data = dict() gcm_data['registration_ids'] = reg_ids if data: gcm_data['raw_data'] = base64.b64encode( encoded.get('body')).decode('utf8') gcm_data['time_to_live'] = int( headers['ttl'] if 'ttl' in headers else ttl) encoded_data = json.dumps(gcm_data) headers.update({ 'Authorization': 'key='+gcm_key, 'Content-Type': 'application/json', }) else: encoded_data = encoded.get('body') endpoint = self.subscription_info['endpoint'] if 'ttl' not in headers or ttl: headers['ttl'] = str(ttl or 0) # Additionally useful headers: # Authorization / Crypto-Key (VAPID headers) if curl: return self.as_curl(endpoint, encoded_data, headers) return self.requests_method.post(endpoint, data=encoded_data, headers=headers, timeout=timeout)
Encode and send the data to the Push Service. :param data: A serialized block of data (see encode() ). :type data: str :param headers: A dictionary containing any additional HTTP headers. :type headers: dict :param ttl: The Time To Live in seconds for this message if the recipient is not online. (Defaults to "0", which discards the message immediately if the recipient is unavailable.) :type ttl: int :param gcm_key: API key obtained from the Google Developer Console. Needed if endpoint is https://android.googleapis.com/gcm/send :type gcm_key: string :param reg_id: registration id of the recipient. If not provided, it will be extracted from the endpoint. :type reg_id: str :param content_encoding: ECE content encoding (defaults to "aes128gcm") :type content_encoding: str :param curl: Display output as `curl` command instead of sending :type curl: bool :param timeout: POST requests timeout :type timeout: float or tuple
def read(self, num): """Read ``num`` number of bytes from the stream. Note that this will automatically resets/ends the current bit-reading if it does not end on an even byte AND ``self.padded`` is True. If ``self.padded`` is True, then the entire stream is treated as a bitstream. :num: number of bytes to read :returns: the read bytes, or empty string if EOF has been reached """ start_pos = self.tell() if self.padded: # we toss out any uneven bytes self._bits.clear() res = utils.binary(self._stream.read(num)) else: bits = self.read_bits(num * 8) res = bits_to_bytes(bits) res = utils.binary(res) end_pos = self.tell() self._update_consumed_ranges(start_pos, end_pos) return res
Read ``num`` number of bytes from the stream. Note that this will automatically resets/ends the current bit-reading if it does not end on an even byte AND ``self.padded`` is True. If ``self.padded`` is True, then the entire stream is treated as a bitstream. :num: number of bytes to read :returns: the read bytes, or empty string if EOF has been reached
def calculate_tensor_to_probability_map_output_shapes(operator): ''' Allowed input/output patterns are ONNX < 1.2 1. [1, C] ---> ---> A map 2. [1, C_1, ..., C_n] ---> A map ONNX >= 1.2 1. [N, C] ---> ---> A sequence of maps 2. [N, C_1, ..., C_n] ---> A sequence of maps Note that N must be 1 currently if you're using ONNX<1.2 because old ZipMap doesn't produce a seqneuce of map If the input is not [N, C], it will be reshaped into [N, C_1 x C_2, x ... x C_n] before being fed into ONNX ZipMap. ''' check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1) check_input_and_output_types(operator, good_input_types=[FloatTensorType]) model_type = operator.raw_operator.WhichOneof('Type') if model_type == 'neuralNetworkClassifier': class_label_type = operator.raw_operator.neuralNetworkClassifier.WhichOneof('ClassLabels') else: raise TypeError('%s has no class label' % model_type) N = operator.inputs[0].type.shape[0] doc_string = operator.outputs[0].type.doc_string if class_label_type == 'stringClassLabels': if operator.target_opset < 7: operator.outputs[0].type = DictionaryType(StringTensorType([1]), FloatTensorType([1]), doc_string) else: operator.outputs[0].type = \ SequenceType(DictionaryType(StringTensorType([]), FloatTensorType([])), N, doc_string) elif class_label_type == 'int64ClassLabels': if operator.target_opset < 7: operator.outputs[0].type = DictionaryType(Int64TensorType([1]), FloatTensorType([1]), doc_string) else: operator.outputs[0].type = \ SequenceType(DictionaryType(Int64TensorType([]), FloatTensorType([])), N, doc_string) else: raise ValueError('Unsupported label type')
Allowed input/output patterns are ONNX < 1.2 1. [1, C] ---> ---> A map 2. [1, C_1, ..., C_n] ---> A map ONNX >= 1.2 1. [N, C] ---> ---> A sequence of maps 2. [N, C_1, ..., C_n] ---> A sequence of maps Note that N must be 1 currently if you're using ONNX<1.2 because old ZipMap doesn't produce a seqneuce of map If the input is not [N, C], it will be reshaped into [N, C_1 x C_2, x ... x C_n] before being fed into ONNX ZipMap.
def create_matching_kernel(source_psf, target_psf, window=None): """ Create a kernel to match 2D point spread functions (PSF) using the ratio of Fourier transforms. Parameters ---------- source_psf : 2D `~numpy.ndarray` The source PSF. The source PSF should have higher resolution (i.e. narrower) than the target PSF. ``source_psf`` and ``target_psf`` must have the same shape and pixel scale. target_psf : 2D `~numpy.ndarray` The target PSF. The target PSF should have lower resolution (i.e. broader) than the source PSF. ``source_psf`` and ``target_psf`` must have the same shape and pixel scale. window : callable, optional The window (or taper) function or callable class instance used to remove high frequency noise from the PSF matching kernel. Some examples include: * `~photutils.psf.matching.HanningWindow` * `~photutils.psf.matching.TukeyWindow` * `~photutils.psf.matching.CosineBellWindow` * `~photutils.psf.matching.SplitCosineBellWindow` * `~photutils.psf.matching.TopHatWindow` For more information on window functions and example usage, see :ref:`psf_matching`. Returns ------- kernel : 2D `~numpy.ndarray` The matching kernel to go from ``source_psf`` to ``target_psf``. The output matching kernel is normalized such that it sums to 1. """ # inputs are copied so that they are not changed when normalizing source_psf = np.copy(np.asanyarray(source_psf)) target_psf = np.copy(np.asanyarray(target_psf)) if source_psf.shape != target_psf.shape: raise ValueError('source_psf and target_psf must have the same shape ' '(i.e. registered with the same pixel scale).') # ensure input PSFs are normalized source_psf /= source_psf.sum() target_psf /= target_psf.sum() source_otf = fftshift(fft2(source_psf)) target_otf = fftshift(fft2(target_psf)) ratio = target_otf / source_otf # apply a window function in frequency space if window is not None: ratio *= window(target_psf.shape) kernel = np.real(fftshift((ifft2(ifftshift(ratio))))) return kernel / kernel.sum()
Create a kernel to match 2D point spread functions (PSF) using the ratio of Fourier transforms. Parameters ---------- source_psf : 2D `~numpy.ndarray` The source PSF. The source PSF should have higher resolution (i.e. narrower) than the target PSF. ``source_psf`` and ``target_psf`` must have the same shape and pixel scale. target_psf : 2D `~numpy.ndarray` The target PSF. The target PSF should have lower resolution (i.e. broader) than the source PSF. ``source_psf`` and ``target_psf`` must have the same shape and pixel scale. window : callable, optional The window (or taper) function or callable class instance used to remove high frequency noise from the PSF matching kernel. Some examples include: * `~photutils.psf.matching.HanningWindow` * `~photutils.psf.matching.TukeyWindow` * `~photutils.psf.matching.CosineBellWindow` * `~photutils.psf.matching.SplitCosineBellWindow` * `~photutils.psf.matching.TopHatWindow` For more information on window functions and example usage, see :ref:`psf_matching`. Returns ------- kernel : 2D `~numpy.ndarray` The matching kernel to go from ``source_psf`` to ``target_psf``. The output matching kernel is normalized such that it sums to 1.
def type_errors(self, context=None): """Get a list of type errors which can occur during inference. Each TypeError is represented by a :class:`BadBinaryOperationMessage`, which holds the original exception. :returns: The list of possible type errors. :rtype: list(BadBinaryOperationMessage) """ try: results = self._infer_unaryop(context=context) return [ result for result in results if isinstance(result, util.BadUnaryOperationMessage) ] except exceptions.InferenceError: return []
Get a list of type errors which can occur during inference. Each TypeError is represented by a :class:`BadBinaryOperationMessage`, which holds the original exception. :returns: The list of possible type errors. :rtype: list(BadBinaryOperationMessage)
def plot_summary_distributions(df,ax=None,label_post=False,label_prior=False, subplots=False,figsize=(11,8.5),pt_color='b'): """ helper function to plot gaussian distrbutions from prior and posterior means and standard deviations Parameters ---------- df : pandas.DataFrame a dataframe and csv file. Must have columns named: 'prior_mean','prior_stdev','post_mean','post_stdev'. If loaded from a csv file, column 0 is assumed to tbe the index ax: matplotlib.pyplot.axis If None, and not subplots, then one is created and all distributions are plotted on a single plot label_post: bool flag to add text labels to the peak of the posterior label_prior: bool flag to add text labels to the peak of the prior subplots: (boolean) flag to use subplots. If True, then 6 axes per page are used and a single prior and posterior is plotted on each figsize: tuple matplotlib figure size Returns ------- figs : list list of figures axes : list list of axes Note ---- This is useful for demystifying FOSM results if subplots is False, a single axis is returned Example ------- ``>>>import matplotlib.pyplot as plt`` ``>>>import pyemu`` ``>>>pyemu.helpers.plot_summary_distributions("pest.par.usum.csv")`` ``>>>plt.show()`` """ import matplotlib.pyplot as plt if isinstance(df,str): df = pd.read_csv(df,index_col=0) if ax is None and not subplots: fig = plt.figure(figsize=figsize) ax = plt.subplot(111) ax.grid() if "post_stdev" not in df.columns and "post_var" in df.columns: df.loc[:,"post_stdev"] = df.post_var.apply(np.sqrt) if "prior_stdev" not in df.columns and "prior_var" in df.columns: df.loc[:,"prior_stdev"] = df.prior_var.apply(np.sqrt) if "prior_expt" not in df.columns and "prior_mean" in df.columns: df.loc[:,"prior_expt"] = df.prior_mean if "post_expt" not in df.columns and "post_mean" in df.columns: df.loc[:,"post_expt"] = df.post_mean if subplots: fig = plt.figure(figsize=figsize) ax = plt.subplot(2,3,1) ax_per_page = 6 ax_count = 0 axes = [] figs = [] for name in df.index: x,y = gaussian_distribution(df.loc[name,"post_expt"], df.loc[name,"post_stdev"]) ax.fill_between(x,0,y,facecolor=pt_color,edgecolor="none",alpha=0.25) if label_post: mx_idx = np.argmax(y) xtxt,ytxt = x[mx_idx],y[mx_idx] * 1.001 ax.text(xtxt,ytxt,name,ha="center",alpha=0.5) x,y = gaussian_distribution(df.loc[name,"prior_expt"], df.loc[name,"prior_stdev"]) ax.plot(x,y,color='0.5',lw=3.0,dashes=(2,1)) if label_prior: mx_idx = np.argmax(y) xtxt,ytxt = x[mx_idx],y[mx_idx] * 1.001 ax.text(xtxt,ytxt,name,ha="center",alpha=0.5) #ylim = list(ax.get_ylim()) #ylim[1] *= 1.2 #ylim[0] = 0.0 #ax.set_ylim(ylim) if subplots: ax.set_title(name) ax_count += 1 ax.set_yticklabels([]) axes.append(ax) if name == df.index[-1]: break if ax_count >= ax_per_page: figs.append(fig) fig = plt.figure(figsize=figsize) ax_count = 0 ax = plt.subplot(2,3,ax_count+1) if subplots: figs.append(fig) return figs, axes ylim = list(ax.get_ylim()) ylim[1] *= 1.2 ylim[0] = 0.0 ax.set_ylim(ylim) ax.set_yticklabels([]) return ax
helper function to plot gaussian distrbutions from prior and posterior means and standard deviations Parameters ---------- df : pandas.DataFrame a dataframe and csv file. Must have columns named: 'prior_mean','prior_stdev','post_mean','post_stdev'. If loaded from a csv file, column 0 is assumed to tbe the index ax: matplotlib.pyplot.axis If None, and not subplots, then one is created and all distributions are plotted on a single plot label_post: bool flag to add text labels to the peak of the posterior label_prior: bool flag to add text labels to the peak of the prior subplots: (boolean) flag to use subplots. If True, then 6 axes per page are used and a single prior and posterior is plotted on each figsize: tuple matplotlib figure size Returns ------- figs : list list of figures axes : list list of axes Note ---- This is useful for demystifying FOSM results if subplots is False, a single axis is returned Example ------- ``>>>import matplotlib.pyplot as plt`` ``>>>import pyemu`` ``>>>pyemu.helpers.plot_summary_distributions("pest.par.usum.csv")`` ``>>>plt.show()``
def _update_limits_from_api(self): """ Query EC2's DescribeAccountAttributes API action and update the network interface limit, as needed. Updates ``self.limits``. More info on the network interface limit, from the docs: 'This limit is the greater of either the default limit (350) or your On-Demand Instance limit multiplied by 5. The default limit for On-Demand Instances is 20.' """ self.connect() self.connect_resource() logger.info("Querying EC2 DescribeAccountAttributes for limits") attribs = self.conn.describe_account_attributes() for attrib in attribs['AccountAttributes']: if attrib['AttributeName'] == 'max-instances': val = attrib['AttributeValues'][0]['AttributeValue'] if int(val) * 5 > DEFAULT_ENI_LIMIT: limit_name = 'Network interfaces per Region' self.limits[limit_name]._set_api_limit(int(val) * 5) logger.debug("Done setting limits from API")
Query EC2's DescribeAccountAttributes API action and update the network interface limit, as needed. Updates ``self.limits``. More info on the network interface limit, from the docs: 'This limit is the greater of either the default limit (350) or your On-Demand Instance limit multiplied by 5. The default limit for On-Demand Instances is 20.'
def apply_to_event(self, event, hint=None): # type: (Dict[str, Any], Dict[str, Any]) -> Optional[Dict[str, Any]] """Applies the information contained on the scope to the given event.""" def _drop(event, cause, ty): # type: (Dict[str, Any], Callable, str) -> Optional[Any] logger.info("%s (%s) dropped event (%s)", ty, cause, event) return None if self._level is not None: event["level"] = self._level event.setdefault("breadcrumbs", []).extend(self._breadcrumbs) if event.get("user") is None and self._user is not None: event["user"] = self._user if event.get("transaction") is None and self._transaction is not None: event["transaction"] = self._transaction if event.get("fingerprint") is None and self._fingerprint is not None: event["fingerprint"] = self._fingerprint if self._extras: event.setdefault("extra", {}).update(object_to_json(self._extras)) if self._tags: event.setdefault("tags", {}).update(self._tags) if self._contexts: event.setdefault("contexts", {}).update(self._contexts) if self._span is not None: event.setdefault("contexts", {})["trace"] = { "trace_id": self._span.trace_id, "span_id": self._span.span_id, } exc_info = hint.get("exc_info") if hint is not None else None if exc_info is not None: for processor in self._error_processors: new_event = processor(event, exc_info) if new_event is None: return _drop(event, processor, "error processor") event = new_event for processor in chain(global_event_processors, self._event_processors): new_event = event with capture_internal_exceptions(): new_event = processor(event, hint) if new_event is None: return _drop(event, processor, "event processor") event = new_event return event
Applies the information contained on the scope to the given event.
def _parse_signal_lines(signal_lines): """ Extract fields from a list of signal line strings into a dictionary. """ n_sig = len(signal_lines) # Dictionary for signal fields signal_fields = {} # Each dictionary field is a list for field in SIGNAL_SPECS.index: signal_fields[field] = n_sig * [None] # Read string fields from signal line for ch in range(n_sig): (signal_fields['file_name'][ch], signal_fields['fmt'][ch], signal_fields['samps_per_frame'][ch], signal_fields['skew'][ch], signal_fields['byte_offset'][ch], signal_fields['adc_gain'][ch], signal_fields['baseline'][ch], signal_fields['units'][ch], signal_fields['adc_res'][ch], signal_fields['adc_zero'][ch], signal_fields['init_value'][ch], signal_fields['checksum'][ch], signal_fields['block_size'][ch], signal_fields['sig_name'][ch]) = _rx_signal.findall(signal_lines[ch])[0] for field in SIGNAL_SPECS.index: # Replace empty strings with their read defaults (which are mostly None) # Note: Never set a field to None. [None]* n_sig is accurate, indicating # that different channels can be present or missing. if signal_fields[field][ch] == '': signal_fields[field][ch] = SIGNAL_SPECS.loc[field, 'read_default'] # Special case: missing baseline defaults to ADCzero if present if field == 'baseline' and signal_fields['adc_zero'][ch] != '': signal_fields['baseline'][ch] = int(signal_fields['adc_zero'][ch]) # Typecast non-empty strings for numerical fields else: if SIGNAL_SPECS.loc[field, 'allowed_types'] is int_types: signal_fields[field][ch] = int(signal_fields[field][ch]) elif SIGNAL_SPECS.loc[field, 'allowed_types'] is float_types: signal_fields[field][ch] = float(signal_fields[field][ch]) # Special case: adc_gain of 0 means 200 if field == 'adc_gain' and signal_fields['adc_gain'][ch] == 0: signal_fields['adc_gain'][ch] = 200. return signal_fields
Extract fields from a list of signal line strings into a dictionary.
def create(self, friendly_name=None, description=None): """Creates the Dataset with the specified friendly name and description. Args: friendly_name: (optional) the friendly name for the dataset if it is being created. description: (optional) a description for the dataset if it is being created. Returns: The Dataset. Raises: Exception if the Dataset could not be created. """ if not self.exists(): try: response = self._api.datasets_insert(self._name_parts, friendly_name=friendly_name, description=description) except Exception as e: raise e if 'selfLink' not in response: raise Exception("Could not create dataset %s" % self._full_name) return self
Creates the Dataset with the specified friendly name and description. Args: friendly_name: (optional) the friendly name for the dataset if it is being created. description: (optional) a description for the dataset if it is being created. Returns: The Dataset. Raises: Exception if the Dataset could not be created.
def clone(self, config, **kwargs): """Make a clone of this analysis instance.""" gta = GTAnalysis(config, **kwargs) gta._roi = copy.deepcopy(self.roi) return gta
Make a clone of this analysis instance.
def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel=None, optimize=True, plot=True): """ Run a Gaussian process classification on the crescent data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood. :param model_type: type of model to fit ['Full', 'FITC', 'DTC']. :param inducing: number of inducing variables (only used for 'FITC' or 'DTC'). :type inducing: int :param seed: seed value for data generation. :type seed: int :param kernel: kernel to use in the model :type kernel: a GPy kernel """ try:import pods except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets') data = pods.datasets.crescent_data(seed=seed) Y = data['Y'] Y[Y.flatten()==-1] = 0 if model_type == 'Full': m = GPy.models.GPClassification(data['X'], Y, kernel=kernel) elif model_type == 'DTC': m = GPy.models.SparseGPClassification(data['X'], Y, kernel=kernel, num_inducing=num_inducing) m['.*len'] = 10. elif model_type == 'FITC': m = GPy.models.FITCClassification(data['X'], Y, kernel=kernel, num_inducing=num_inducing) m['.*len'] = 3. if optimize: m.optimize(messages=1) if plot: m.plot() print(m) return m
Run a Gaussian process classification on the crescent data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood. :param model_type: type of model to fit ['Full', 'FITC', 'DTC']. :param inducing: number of inducing variables (only used for 'FITC' or 'DTC'). :type inducing: int :param seed: seed value for data generation. :type seed: int :param kernel: kernel to use in the model :type kernel: a GPy kernel
def _get_converter(self, converter_str): """find converter function reference by name find converter by name, converter name follows this convention: Class.method or: method The first type of converter class/function must be available in current module. The second type of converter must be available in `__builtin__` (or `builtins` in python3) module. :param converter_str: string representation of the converter func :return: function reference """ ret = None if converter_str is not None: converter_desc_list = converter_str.split('.') if len(converter_desc_list) == 1: converter = converter_desc_list[0] # default to `converter` ret = getattr(cvt, converter, None) if ret is None: # try module converter ret = self.get_converter(converter) if ret is None: ret = self.get_resource_clz_by_name(converter) if ret is None: ret = self.get_enum_by_name(converter) if ret is None: # try parser config ret = self.get(converter) if ret is None and converter_str is not None: raise ValueError( 'Specified converter not supported: {}'.format( converter_str)) return ret
find converter function reference by name find converter by name, converter name follows this convention: Class.method or: method The first type of converter class/function must be available in current module. The second type of converter must be available in `__builtin__` (or `builtins` in python3) module. :param converter_str: string representation of the converter func :return: function reference
def query(cls, volume=None, state=None, offset=None, limit=None, api=None): """ Query (List) exports. :param volume: Optional volume identifier. :param state: Optional import sate. :param api: Api instance. :return: Collection object. """ api = api or cls._API if volume: volume = Transform.to_volume(volume) return super(Export, cls)._query( url=cls._URL['query'], volume=volume, state=state, offset=offset, limit=limit, fields='_all', api=api )
Query (List) exports. :param volume: Optional volume identifier. :param state: Optional import sate. :param api: Api instance. :return: Collection object.
def host(value): """ Validates that the value is a valid network location """ if not value: return (True, "") try: host,port = value.split(":") except ValueError as _: return (False, "value needs to be <host>:<port>") try: int(port) except ValueError as _: return (False, "port component of the host address needs to be a number") return (True, "")
Validates that the value is a valid network location