positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def _parse_world_info(self, world_info_table): """ Parses the World Information table from Tibia.com and adds the found values to the object. Parameters ---------- world_info_table: :class:`list`[:class:`bs4.Tag`] """ world_info = {} for row in world_info_table: cols_raw = row.find_all('td') cols = [ele.text.strip() for ele in cols_raw] field, value = cols field = field.replace("\xa0", "_").replace(" ", "_").replace(":", "").lower() value = value.replace("\xa0", " ") world_info[field] = value try: self.online_count = int(world_info.pop("players_online")) except KeyError: self.online_count = 0 self.location = try_enum(WorldLocation, world_info.pop("location")) self.pvp_type = try_enum(PvpType, world_info.pop("pvp_type")) self.transfer_type = try_enum(TransferType, world_info.pop("transfer_type", None), TransferType.REGULAR) m = record_regexp.match(world_info.pop("online_record")) if m: self.record_count = int(m.group("count")) self.record_date = parse_tibia_datetime(m.group("date")) if "world_quest_titles" in world_info: self.world_quest_titles = [q.strip() for q in world_info.pop("world_quest_titles").split(",")] self.experimental = world_info.pop("game_world_type") != "Regular" self._parse_battleye_status(world_info.pop("battleye_status")) self.premium_only = "premium_type" in world_info month, year = world_info.pop("creation_date").split("/") month = int(month) year = int(year) if year > 90: year += 1900 else: year += 2000 self.creation_date = "%d-%02d" % (year, month) for k, v in world_info.items(): try: setattr(self, k, v) except AttributeError: pass
Parses the World Information table from Tibia.com and adds the found values to the object. Parameters ---------- world_info_table: :class:`list`[:class:`bs4.Tag`]
def _rotate_and_chop(self, verts, normal, axis=[0, 0, 1]): r""" Method to rotate a set of vertices (or coords) to align with an axis points must be coplanar and normal must be given Chops axis coord to give vertices back in 2D Used to prepare verts for printing or calculating convex hull in order to arrange them in hull order for calculations and printing """ xaxis = [1, 0, 0] yaxis = [0, 1, 0] zaxis = [0, 0, 1] angle = tr.angle_between_vectors(normal, axis) if angle == 0.0 or angle == np.pi: # We are already aligned facet = verts else: M = tr.rotation_matrix(tr.angle_between_vectors(normal, axis), tr.vector_product(normal, axis)) try: facet = np.dot(verts, M[:3, :3].T) except ValueError: pass try: x = facet[:, 0] y = facet[:, 1] z = facet[:, 2] except IndexError: x = facet[0] y = facet[1] z = facet[2] # Work out span of points and set axes scales to cover this and be # equal in both dimensions if axis == xaxis: output = np.column_stack((y, z)) elif axis == yaxis: output = np.column_stack((x, z)) elif axis == zaxis: output = np.column_stack((x, y)) else: output = facet return output
r""" Method to rotate a set of vertices (or coords) to align with an axis points must be coplanar and normal must be given Chops axis coord to give vertices back in 2D Used to prepare verts for printing or calculating convex hull in order to arrange them in hull order for calculations and printing
def split_certificate(certificate_path, destination_folder, password=None): """Splits a PKCS12 certificate into Base64-encoded DER certificate and key. This method splits a potentially password-protected `PKCS12 <https://en.wikipedia.org/wiki/PKCS_12>`_ certificate (format ``.p12`` or ``.pfx``) into one certificate and one key part, both in `pem <https://en.wikipedia.org/wiki/X.509#Certificate_filename_extensions>`_ format. :returns: Tuple of certificate and key string data. :rtype: tuple """ try: # Attempt Linux and Darwin call first. p = subprocess.Popen( ["openssl", "version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) sout, serr = p.communicate() openssl_executable_version = sout.decode().lower() if not ( openssl_executable_version.startswith("openssl") or openssl_executable_version.startswith("libressl") ): raise BankIDError( "OpenSSL executable could not be found. " "Splitting cannot be performed." ) openssl_executable = "openssl" except Exception: # Attempt to call on standard Git for Windows path. p = subprocess.Popen( ["C:\\Program Files\\Git\\mingw64\\bin\\openssl.exe", "version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) sout, serr = p.communicate() if not sout.decode().lower().startswith("openssl"): raise BankIDError( "OpenSSL executable could not be found. " "Splitting cannot be performed." ) openssl_executable = "C:\\Program Files\\Git\\mingw64\\bin\\openssl.exe" if not os.path.exists(os.path.abspath(os.path.expanduser(destination_folder))): os.makedirs(os.path.abspath(os.path.expanduser(destination_folder))) # Paths to output files. out_cert_path = os.path.join( os.path.abspath(os.path.expanduser(destination_folder)), "certificate.pem" ) out_key_path = os.path.join( os.path.abspath(os.path.expanduser(destination_folder)), "key.pem" ) # Use openssl for converting to pem format. pipeline_1 = [ openssl_executable, "pkcs12", "-in", "{0}".format(certificate_path), "-passin" if password is not None else "", "pass:{0}".format(password) if password is not None else "", "-out", "{0}".format(out_cert_path), "-clcerts", "-nokeys", ] p = subprocess.Popen( list(filter(None, pipeline_1)), stdout=subprocess.PIPE, stderr=subprocess.PIPE ) p.communicate() pipeline_2 = [ openssl_executable, "pkcs12", "-in", "{0}".format(certificate_path), "-passin" if password is not None else "", "pass:{0}".format(password) if password is not None else "", "-out", "{0}".format(out_key_path), "-nocerts", "-nodes", ] p = subprocess.Popen( list(filter(None, pipeline_2)), stdout=subprocess.PIPE, stderr=subprocess.PIPE ) p.communicate() # Return path tuples. return out_cert_path, out_key_path
Splits a PKCS12 certificate into Base64-encoded DER certificate and key. This method splits a potentially password-protected `PKCS12 <https://en.wikipedia.org/wiki/PKCS_12>`_ certificate (format ``.p12`` or ``.pfx``) into one certificate and one key part, both in `pem <https://en.wikipedia.org/wiki/X.509#Certificate_filename_extensions>`_ format. :returns: Tuple of certificate and key string data. :rtype: tuple
def click(self, force_click=False): """ Clicks the element @type force_click: bool @param force_click: force a click on the element using javascript, skipping webdriver @rtype: WebElementWrapper @return: Returns itself """ js_executor = self.driver_wrapper.js_executor def click_element(): """ Wrapper to call click """ return self.element.click() def force_click_element(): """ Javascript wrapper to force_click the element """ js_executor.execute_template('clickElementTemplate', {}, self.element) return True if force_click: self.execute_and_handle_webelement_exceptions(force_click_element, 'click element by javascript') else: self.execute_and_handle_webelement_exceptions(click_element, 'click') return self
Clicks the element @type force_click: bool @param force_click: force a click on the element using javascript, skipping webdriver @rtype: WebElementWrapper @return: Returns itself
def default_unmarshaller(instance, state): """ Restore the state of an object. If the ``__setstate__()`` method exists on the instance, it is called with the state object as the argument. Otherwise, the instance's ``__dict__`` is replaced with ``state``. :param instance: an uninitialized instance :param state: the state object, as returned by :func:`default_marshaller` """ if hasattr(instance, '__setstate__'): instance.__setstate__(state) else: try: instance.__dict__.update(state) except AttributeError: raise TypeError('{!r} has no __dict__ attribute and does not implement __setstate__()' .format(instance.__class__.__name__))
Restore the state of an object. If the ``__setstate__()`` method exists on the instance, it is called with the state object as the argument. Otherwise, the instance's ``__dict__`` is replaced with ``state``. :param instance: an uninitialized instance :param state: the state object, as returned by :func:`default_marshaller`
def GroupSizer(field_number, is_repeated, is_packed): """Returns a sizer for a group field.""" tag_size = _TagSize(field_number) * 2 assert not is_packed if is_repeated: def RepeatedFieldSize(value): result = tag_size * len(value) for element in value: result += element.ByteSize() return result return RepeatedFieldSize else: def FieldSize(value): return tag_size + value.ByteSize() return FieldSize
Returns a sizer for a group field.
def marvcli_undiscard(datasets): """Undiscard DATASETS previously discarded.""" create_app() setids = parse_setids(datasets, discarded=True) dataset = Dataset.__table__ stmt = dataset.update()\ .where(dataset.c.setid.in_(setids))\ .values(discarded=False) db.session.execute(stmt) db.session.commit()
Undiscard DATASETS previously discarded.
def gap_to_sorl(time_gap): """ P1D to +1DAY :param time_gap: :return: solr's format duration. """ quantity, unit = parse_ISO8601(time_gap) if unit[0] == "WEEKS": return "+{0}DAYS".format(quantity * 7) else: return "+{0}{1}".format(quantity, unit[0])
P1D to +1DAY :param time_gap: :return: solr's format duration.
def str_to_etree(xml_str, encoding='utf-8'): """Deserialize API XML doc to an ElementTree. Args: xml_str: bytes DataONE API XML doc encoding: str Decoder to use when converting the XML doc ``bytes`` to a Unicode str. Returns: ElementTree: Matching the API version of the XML doc. """ parser = xml.etree.ElementTree.XMLParser(encoding=encoding) return xml.etree.ElementTree.fromstring(xml_str, parser=parser)
Deserialize API XML doc to an ElementTree. Args: xml_str: bytes DataONE API XML doc encoding: str Decoder to use when converting the XML doc ``bytes`` to a Unicode str. Returns: ElementTree: Matching the API version of the XML doc.
def zs(inlist): """ Returns a list of z-scores, one for each score in the passed list. Usage: lzs(inlist) """ zscores = [] for item in inlist: zscores.append(z(inlist, item)) return zscores
Returns a list of z-scores, one for each score in the passed list. Usage: lzs(inlist)
def list_namespaced_cron_job(self, namespace, **kwargs): """ list or watch objects of kind CronJob This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_cron_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V2alpha1CronJobList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_cron_job_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_cron_job_with_http_info(namespace, **kwargs) return data
list or watch objects of kind CronJob This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_cron_job(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V2alpha1CronJobList If the method is called asynchronously, returns the request thread.
def describe_api_integration_response(restApiId, resourcePath, httpMethod, statusCode, region=None, key=None, keyid=None, profile=None): ''' Get an integration response for a given method in a given API CLI Example: .. code-block:: bash salt myminion boto_apigateway.describe_api_integration_response restApiId resourcePath httpMethod statusCode ''' try: resource = describe_api_resource(restApiId, resourcePath, region=region, key=key, keyid=keyid, profile=profile).get('resource') if resource: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) response = conn.get_integration_response(restApiId=restApiId, resourceId=resource['id'], httpMethod=httpMethod, statusCode=statusCode) return {'response': _convert_datetime_str(response)} return {'error': 'no such resource'} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
Get an integration response for a given method in a given API CLI Example: .. code-block:: bash salt myminion boto_apigateway.describe_api_integration_response restApiId resourcePath httpMethod statusCode
def add(self, item): """ Transactional implementation of :func:`List.add(item) <hazelcast.proxy.list.List.add>` :param item: (object), the new item to be added. :return: (bool), ``true`` if the item is added successfully, ``false`` otherwise. """ check_not_none(item, "item can't be none") return self._encode_invoke(transactional_list_add_codec, item=self._to_data(item))
Transactional implementation of :func:`List.add(item) <hazelcast.proxy.list.List.add>` :param item: (object), the new item to be added. :return: (bool), ``true`` if the item is added successfully, ``false`` otherwise.
def _from_args(args): """Factory method to create a new instance from command line args. :param args: instance of :class:`argparse.Namespace` """ return bugzscout.BugzScout(args.url, args.user, args.project, args.area)
Factory method to create a new instance from command line args. :param args: instance of :class:`argparse.Namespace`
def movingaverage(arr, window): """ Calculates the moving average ("rolling mean") of an array of a certain window size. """ m = np.ones(int(window)) / int(window) return scipy.ndimage.convolve1d(arr, m, axis=0, mode='reflect')
Calculates the moving average ("rolling mean") of an array of a certain window size.
def riseset(self, crd, ev="5deg"): """This will give the rise/set times of a source. It needs the position in the frame, and a time. If the latter is not set, the current time will be used. :param crd: a direction measure :param ev: the elevation limit as a quantity or string :returns: The returned value is a `dict` with a 'solved' key, which is `False` if the source is always below or above the horizon. In that case the rise and set fields will all have a string value. The `dict` also returns a rise and set `dict`, with 'last' and 'utc' keys showing the rise and set times as epochs. """ a = self.rise(crd, ev) if isinstance(a['rise'], str): return {"rise": {"last": a[0], "utc": a[0]}, "set": {"last": a[1], "utc": a[1]}, "solved": False} ofe = self.measure(self._framestack["epoch"], "utc") if not is_measure(ofe): ofe = self.epoch('utc', 'today') x = a.copy() for k in x: x[k] = self.measure( self.epoch("last", a[k].totime(), off=self.epoch("r_utc", (dq.quantity(ofe["m0"]) + dq.quantity("0.5d") )) ), "utc") return {"rise": {"last": self.epoch("last", a["rise"].totime()), "utc": x["rise"]}, "set": {"last": self.epoch("last", a["set"].totime()), "utc": x["set"]}, "solved": True }
This will give the rise/set times of a source. It needs the position in the frame, and a time. If the latter is not set, the current time will be used. :param crd: a direction measure :param ev: the elevation limit as a quantity or string :returns: The returned value is a `dict` with a 'solved' key, which is `False` if the source is always below or above the horizon. In that case the rise and set fields will all have a string value. The `dict` also returns a rise and set `dict`, with 'last' and 'utc' keys showing the rise and set times as epochs.
def prepare(args): """ %prog prepare pairsfile cdsfile [pepfile] -o paired.cds.fasta Pick sequences from cdsfile to form pairs, ready to be calculated. The pairsfile can be generated from formats.blast.cscore(). The first two columns contain the pair. """ from jcvi.formats.fasta import Fasta p = OptionParser(prepare.__doc__) p.set_outfile() opts, args = p.parse_args(args) outfile = opts.outfile if len(args) == 2: pairsfile, cdsfile = args pepfile = None elif len(args) == 3: pairsfile, cdsfile, pepfile = args else: sys.exit(not p.print_help()) f = Fasta(cdsfile) fp = open(pairsfile) fw = must_open(outfile, "w") if pepfile: assert outfile != "stdout", "Please specify outfile name." f2 = Fasta(pepfile) fw2 = must_open(outfile + ".pep", "w") for row in fp: if row[0] == '#': continue a, b = row.split()[:2] if a == b: logging.debug("Self pairs found: {0} - {1}. Ignored".format(a, b)) continue if a not in f: a = find_first_isoform(a, f) assert a, a if b not in f: b = find_first_isoform(b, f) assert b, b acds = f[a] bcds = f[b] SeqIO.write((acds, bcds), fw, "fasta") if pepfile: apep = f2[a] bpep = f2[b] SeqIO.write((apep, bpep), fw2, "fasta") fw.close() if pepfile: fw2.close()
%prog prepare pairsfile cdsfile [pepfile] -o paired.cds.fasta Pick sequences from cdsfile to form pairs, ready to be calculated. The pairsfile can be generated from formats.blast.cscore(). The first two columns contain the pair.
def find_path(self, node_source, node_target, type='nodes'): """Determines shortest path Determines the shortest path from `node_source` to `node_target` in _graph using networkx' shortest path algorithm. Args ---- node_source: GridDing0 source node, member of _graph node_target: GridDing0 target node, member of _graph type : str Specify if nodes or edges should be returned. Default is `nodes` Returns ------- :any:`list` of :obj:`GridDing0` path: shortest path from `node_source` to `node_target` (list of nodes in _graph) Notes ----- WARNING: The shortest path is calculated using the count of hops, not the actual line lengths! As long as the circuit breakers are open, this works fine since there's only one path. But if they are closed, there are 2 possible paths. The result is a path which have min. count of hops but might have a longer total path length than the second sone. See networkx' function shortest_path() function for details on how the path is calculated. """ if (node_source in self._graph.nodes()) and (node_target in self._graph.nodes()): path = nx.shortest_path(self._graph, node_source, node_target) else: raise Exception('At least one of the nodes is not a member of graph.') if type == 'nodes': return path elif type == 'edges': return [_ for _ in self._graph.edges(nbunch=path, data=True) if (_[0] in path and _[1] in path)] else: raise ValueError('Please specify type as nodes or edges')
Determines shortest path Determines the shortest path from `node_source` to `node_target` in _graph using networkx' shortest path algorithm. Args ---- node_source: GridDing0 source node, member of _graph node_target: GridDing0 target node, member of _graph type : str Specify if nodes or edges should be returned. Default is `nodes` Returns ------- :any:`list` of :obj:`GridDing0` path: shortest path from `node_source` to `node_target` (list of nodes in _graph) Notes ----- WARNING: The shortest path is calculated using the count of hops, not the actual line lengths! As long as the circuit breakers are open, this works fine since there's only one path. But if they are closed, there are 2 possible paths. The result is a path which have min. count of hops but might have a longer total path length than the second sone. See networkx' function shortest_path() function for details on how the path is calculated.
def manage_admins(): """Page for viewing and managing build admins.""" build = g.build # Do not show cached data db.session.add(build) db.session.refresh(build) add_form = forms.AddAdminForm() if add_form.validate_on_submit(): invitation_user_id = '%s:%s' % ( models.User.EMAIL_INVITATION, add_form.email_address.data) invitation_user = models.User.query.get(invitation_user_id) if not invitation_user: invitation_user = models.User( id=invitation_user_id, email_address=add_form.email_address.data) db.session.add(invitation_user) db.session.add(build) db.session.add(invitation_user) db.session.refresh(build, lockmode='update') build.owners.append(invitation_user) save_admin_log(build, invited_new_admin=True, message=invitation_user.email_address) db.session.commit() logging.info('Added user=%r as owner to build_id=%r', invitation_user.id, build.id) return redirect(url_for('manage_admins', build_id=build.id)) add_form.build_id.data = build.id revoke_form_list = [] for user in build.owners: form = forms.RemoveAdminForm() form.user_id.data = user.id form.build_id.data = build.id form.revoke.data = True revoke_form_list.append((user, form)) return render_template( 'view_admins.html', build=build, add_form=add_form, revoke_form_list=revoke_form_list)
Page for viewing and managing build admins.
def _load_same_codes(self, refresh=False): """Loads the Same Codes into this object""" if refresh is True: self._get_same_codes() else: self._cached_same_codes()
Loads the Same Codes into this object
def remove_actor(self, actor, reset_camera=False): """ Removes an actor from the Renderer. Parameters ---------- actor : vtk.vtkActor Actor that has previously added to the Renderer. reset_camera : bool, optional Resets camera so all actors can be seen. Returns ------- success : bool True when actor removed. False when actor has not been removed. """ name = None if isinstance(actor, str): name = actor keys = list(self._actors.keys()) names = [] for k in keys: if k.startswith('{}-'.format(name)): names.append(k) if len(names) > 0: self.remove_actor(names, reset_camera=reset_camera) try: actor = self._actors[name] except KeyError: # If actor of that name is not present then return success return False if isinstance(actor, collections.Iterable): success = False for a in actor: rv = self.remove_actor(a, reset_camera=reset_camera) if rv or success: success = True return success if actor is None: return False # First remove this actor's mapper from _scalar_bar_mappers _remove_mapper_from_plotter(self.parent, actor, False) self.RemoveActor(actor) if name is None: for k, v in self._actors.items(): if v == actor: name = k self._actors.pop(name, None) self.update_bounds_axes() if reset_camera: self.reset_camera() elif not self.camera_set and reset_camera is None: self.reset_camera() else: self.parent._render() return True
Removes an actor from the Renderer. Parameters ---------- actor : vtk.vtkActor Actor that has previously added to the Renderer. reset_camera : bool, optional Resets camera so all actors can be seen. Returns ------- success : bool True when actor removed. False when actor has not been removed.
def has_neigh(tag_name, params=None, content=None, left=True): """ This function generates functions, which matches all tags with neighbours defined by parameters. Args: tag_name (str): Tag has to have neighbour with this tagname. params (dict): Tag has to have neighbour with this parameters. params (str): Tag has to have neighbour with this content. left (bool, default True): Tag has to have neigbour on the left, or right (set to ``False``). Returns: bool: True for every matching tag. Note: This function can be used as parameter for ``.find()`` method in HTMLElement. """ def has_neigh_closure(element): if not element.parent \ or not (element.isTag() and not element.isEndTag()): return False # filter only visible tags/neighbours childs = element.parent.childs childs = filter( lambda x: (x.isTag() and not x.isEndTag()) \ or x.getContent().strip() or x is element, childs ) if len(childs) <= 1: return False ioe = childs.index(element) if left and ioe > 0: return is_equal_tag(childs[ioe - 1], tag_name, params, content) if not left and ioe + 1 < len(childs): return is_equal_tag(childs[ioe + 1], tag_name, params, content) return False return has_neigh_closure
This function generates functions, which matches all tags with neighbours defined by parameters. Args: tag_name (str): Tag has to have neighbour with this tagname. params (dict): Tag has to have neighbour with this parameters. params (str): Tag has to have neighbour with this content. left (bool, default True): Tag has to have neigbour on the left, or right (set to ``False``). Returns: bool: True for every matching tag. Note: This function can be used as parameter for ``.find()`` method in HTMLElement.
def query_order(self, transaction_id=None, out_trade_no=None): """ 查询订单 api :param transaction_id: 二选一 微信订单号 微信的订单号,优先使用 :param out_trade_no: 二选一 商户订单号 商户系统内部的订单号,当没提供transaction_id时需要传这个。 :return: 返回的结果信息 """ if not transaction_id and not out_trade_no: raise ValueError("transaction_id and out_trade_no must be a choice.") data = { "appid": self.appid, "mch_id": self.mch_id, "transaction_id": transaction_id, "out_trade_no": out_trade_no, } return self._post("pay/paporderquery", data=data)
查询订单 api :param transaction_id: 二选一 微信订单号 微信的订单号,优先使用 :param out_trade_no: 二选一 商户订单号 商户系统内部的订单号,当没提供transaction_id时需要传这个。 :return: 返回的结果信息
def set_webhook(self, webhook_path: Optional[str] = None, request_handler: Any = WebhookRequestHandler, route_name: str = DEFAULT_ROUTE_NAME, web_app: Optional[Application] = None): """ Set webhook for bot :param webhook_path: Optional[str] (default: None) :param request_handler: Any (default: WebhookRequestHandler) :param route_name: str Name of webhook handler route (default: 'webhook_handler') :param web_app: Optional[Application] (default: None) :return: """ self._prepare_webhook(webhook_path, request_handler, route_name, web_app) self.loop.run_until_complete(self._startup_webhook())
Set webhook for bot :param webhook_path: Optional[str] (default: None) :param request_handler: Any (default: WebhookRequestHandler) :param route_name: str Name of webhook handler route (default: 'webhook_handler') :param web_app: Optional[Application] (default: None) :return:
def findall(self, title=None): """Return a list of worksheets with the given title. Args: title(str): title/name of the worksheets to return, or ``None`` for all Returns: list: list of contained worksheet instances (possibly empty) """ if title is None: return list(self._sheets) if title not in self._titles: return [] return list(self._titles[title])
Return a list of worksheets with the given title. Args: title(str): title/name of the worksheets to return, or ``None`` for all Returns: list: list of contained worksheet instances (possibly empty)
def predict(self, u): '''Predicts the output value at u from the fitted polynomial expansion. Therefore the method train() must be called first. :param numpy.ndarray u: input value at which to predict the output. :return: q_approx - the predicted value of the output at u :rtype: float *Sample Usage*:: >>> thePC = PolySurrogate(dimensions=2) >>> U = thePC.getQuadraturePoints() >>> Q = [myFunc(u) for u in U] >>> thePC.train(U, Q) >>> thePC.predict([0, 1]) ''' y, ysub = 0, np.zeros(self.N_poly) for ip in range(self.N_poly): inds = tuple(self.index_polys[ip]) ysub[ip] = self.coeffs[inds]*eval_poly(u, inds, self.J_list) y += ysub[ip] self.response_components = ysub return y
Predicts the output value at u from the fitted polynomial expansion. Therefore the method train() must be called first. :param numpy.ndarray u: input value at which to predict the output. :return: q_approx - the predicted value of the output at u :rtype: float *Sample Usage*:: >>> thePC = PolySurrogate(dimensions=2) >>> U = thePC.getQuadraturePoints() >>> Q = [myFunc(u) for u in U] >>> thePC.train(U, Q) >>> thePC.predict([0, 1])
def delimited(items, character='|'): """Returns a character delimited version of the provided list as a Python string""" return '|'.join(items) if type(items) in (list, tuple, set) else items
Returns a character delimited version of the provided list as a Python string
def write(self, writer=None, encoding='utf-8', indent=0, newline='', omit_declaration=False, node_depth=0, quote_char='"'): """ Serialize this node and its descendants to text, writing the output to a given *writer* or to stdout. :param writer: an object such as a file or stream to which XML text is sent. If *None* text is sent to :attr:`sys.stdout`. :type writer: a file, stream, etc or None :param string encoding: the character encoding for serialized text. :param indent: indentation prefix to apply to descendent nodes for pretty-printing. The value can take many forms: - *int*: the number of spaces to indent. 0 means no indent. - *string*: a literal prefix for indented nodes, such as ``\\t``. - *bool*: no indent if *False*, four spaces indent if *True*. - *None*: no indent :type indent: string, int, bool, or None :param newline: the string value used to separate lines of output. The value can take a number of forms: - *string*: the literal newline value, such as ``\\n`` or ``\\r``. An empty string means no newline. - *bool*: no newline if *False*, ``\\n`` newline if *True*. - *None*: no newline. :type newline: string, bool, or None :param boolean omit_declaration: if *True* the XML declaration header is omitted, otherwise it is included. Note that the declaration is only output when serializing an :class:`xml4h.nodes.Document` node. :param int node_depth: the indentation level to start at, such as 2 to indent output as if the given *node* has two ancestors. This parameter will only be useful if you need to output XML text fragments that can be assembled into a document. This parameter has no effect unless indentation is applied. :param string quote_char: the character that delimits quoted content. You should never need to mess with this. Delegates to :func:`xml4h.writer.write_node` applied to this node. """ xml4h.write_node(self, writer=writer, encoding=encoding, indent=indent, newline=newline, omit_declaration=omit_declaration, node_depth=node_depth, quote_char=quote_char)
Serialize this node and its descendants to text, writing the output to a given *writer* or to stdout. :param writer: an object such as a file or stream to which XML text is sent. If *None* text is sent to :attr:`sys.stdout`. :type writer: a file, stream, etc or None :param string encoding: the character encoding for serialized text. :param indent: indentation prefix to apply to descendent nodes for pretty-printing. The value can take many forms: - *int*: the number of spaces to indent. 0 means no indent. - *string*: a literal prefix for indented nodes, such as ``\\t``. - *bool*: no indent if *False*, four spaces indent if *True*. - *None*: no indent :type indent: string, int, bool, or None :param newline: the string value used to separate lines of output. The value can take a number of forms: - *string*: the literal newline value, such as ``\\n`` or ``\\r``. An empty string means no newline. - *bool*: no newline if *False*, ``\\n`` newline if *True*. - *None*: no newline. :type newline: string, bool, or None :param boolean omit_declaration: if *True* the XML declaration header is omitted, otherwise it is included. Note that the declaration is only output when serializing an :class:`xml4h.nodes.Document` node. :param int node_depth: the indentation level to start at, such as 2 to indent output as if the given *node* has two ancestors. This parameter will only be useful if you need to output XML text fragments that can be assembled into a document. This parameter has no effect unless indentation is applied. :param string quote_char: the character that delimits quoted content. You should never need to mess with this. Delegates to :func:`xml4h.writer.write_node` applied to this node.
def Open(self, file_object): """Opens the CPIO archive file. Args: file_object (FileIO): a file-like object. Raises: IOError: if the file format signature is not supported. OSError: if the file format signature is not supported. """ file_object.seek(0, os.SEEK_SET) signature_data = file_object.read(6) self.file_format = None if len(signature_data) > 2: if signature_data[:2] == self._CPIO_SIGNATURE_BINARY_BIG_ENDIAN: self.file_format = 'bin-big-endian' elif signature_data[:2] == self._CPIO_SIGNATURE_BINARY_LITTLE_ENDIAN: self.file_format = 'bin-little-endian' elif signature_data == self._CPIO_SIGNATURE_PORTABLE_ASCII: self.file_format = 'odc' elif signature_data == self._CPIO_SIGNATURE_NEW_ASCII: self.file_format = 'newc' elif signature_data == self._CPIO_SIGNATURE_NEW_ASCII_WITH_CHECKSUM: self.file_format = 'crc' if self.file_format is None: raise IOError('Unsupported CPIO format.') self._file_object = file_object self._file_size = file_object.get_size() self._ReadFileEntries(self._file_object)
Opens the CPIO archive file. Args: file_object (FileIO): a file-like object. Raises: IOError: if the file format signature is not supported. OSError: if the file format signature is not supported.
def forget_exporter(name): ''' forget_exporter(name) yields True if an exporter of type name was successfully forgotten from the neuropythy exporters list and false otherwise. This function must be called before an exporter can be replaced. ''' global exporters name = name.lower() if name in exporters: exporters = exporters.discard(name) delattr(save, name) return True else: return False
forget_exporter(name) yields True if an exporter of type name was successfully forgotten from the neuropythy exporters list and false otherwise. This function must be called before an exporter can be replaced.
def legislator_inactive(request, abbr, legislator): ''' Context: - vote_preview_row_template - old_roles - abbr - metadata - legislator - sources - sponsored_bills - legislator_votes - has_votes - nav_active Templates: - billy/web/public/legislator.html - billy/web/public/vote_preview_row.html ''' sponsored_bills = legislator.sponsored_bills( limit=6, sort=[('action_dates.first', pymongo.DESCENDING)]) legislator_votes = list(legislator.votes_6_sorted()) has_votes = bool(legislator_votes) return render( request, templatename('legislator'), dict(vote_preview_row_template=templatename('vote_preview_row'), old_roles=legislator.old_roles_manager, abbr=abbr, metadata=legislator.metadata, legislator=legislator, sources=legislator['sources'], sponsored_bills=list(sponsored_bills), legislator_votes=legislator_votes, has_votes=has_votes, nav_active='legislators'))
Context: - vote_preview_row_template - old_roles - abbr - metadata - legislator - sources - sponsored_bills - legislator_votes - has_votes - nav_active Templates: - billy/web/public/legislator.html - billy/web/public/vote_preview_row.html
def solve_let(expr, vars): """Solves a let-form by calling RHS with nested scope.""" lhs_value = solve(expr.lhs, vars).value if not isinstance(lhs_value, structured.IStructured): raise errors.EfilterTypeError( root=expr.lhs, query=expr.original, message="The LHS of 'let' must evaluate to an IStructured. Got %r." % (lhs_value,)) return solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value))
Solves a let-form by calling RHS with nested scope.
def initialize_page(title, style, script, header=None): """ A function that returns a markup.py page object with the required html header. """ page = markup.page(mode="strict_html") page._escape = False page.init(title=title, css=style, script=script, header=header) return page
A function that returns a markup.py page object with the required html header.
def task_done(self, **kws): """Indicate that a formerly enqueued task is complete. Used by Queue consumer threads. For each get() used to fetch a task, a subsequent call to task_done() tells the queue that the processing on the task is complete. If a join() is currently blocking, it will resume when all items have been processed (meaning that a task_done() call was received for every item that had been put() into the queue). Raises a ValueError if called more times than there were items placed in the queue. """ unfinished = self.unfinished_tasks - 1 op = None if unfinished <= 0: if unfinished < 0: raise ValueError('task_done() called too many times') op = QDone(self, **kws) self.unfinished_tasks = unfinished return op
Indicate that a formerly enqueued task is complete. Used by Queue consumer threads. For each get() used to fetch a task, a subsequent call to task_done() tells the queue that the processing on the task is complete. If a join() is currently blocking, it will resume when all items have been processed (meaning that a task_done() call was received for every item that had been put() into the queue). Raises a ValueError if called more times than there were items placed in the queue.
def do_display(self, arg): """ display expression Add expression to the display list; expressions in this list are evaluated at each step, and printed every time its value changes. WARNING: since the expressions is evaluated multiple time, pay attention not to put expressions with side-effects in the display list. """ try: value = self._getval_or_undefined(arg) except: return self._get_display_list()[arg] = value
display expression Add expression to the display list; expressions in this list are evaluated at each step, and printed every time its value changes. WARNING: since the expressions is evaluated multiple time, pay attention not to put expressions with side-effects in the display list.
def remove(self, func): """Remove any provisioned log sink if auto created""" if not self.data['name'].startswith(self.prefix): return parent = self.get_parent(self.get_log()) _, sink_path, _ = self.get_sink() client = self.session.client( 'logging', 'v2', '%s.sinks' % (parent.split('/', 1)[0])) try: client.execute_command( 'delete', {'sinkName': sink_path}) except HttpError as e: if e.resp.status != 404: raise
Remove any provisioned log sink if auto created
def evaluate_binop_comparison(self, operation, left, right, **kwargs): """ Evaluate given comparison binary operation with given operands. """ if not operation in self.binops_comparison: raise ValueError("Invalid comparison binary operation '{}'".format(operation)) if left is None or right is None: return None if not isinstance(left, (list, ListIP)): left = [left] if not isinstance(right, (list, ListIP)): right = [right] if not left or not right: return None if operation in ['OP_IS']: res = self.binops_comparison[operation](left, right) if res: return True elif operation in ['OP_IN']: for iteml in left: res = self.binops_comparison[operation](iteml, right) if res: return True else: for iteml in left: if iteml is None: continue for itemr in right: if itemr is None: continue res = self.binops_comparison[operation](iteml, itemr) if res: return True return False
Evaluate given comparison binary operation with given operands.
def append_once(cls, code, **kwargs): """One-off code generation using append. If keyword args are provided, initialized using :meth:`with_id_processor`. """ if kwargs: g = cls.with_id_processor() g._append_context(kwargs) else: g = cls() g.append(code) return g.code
One-off code generation using append. If keyword args are provided, initialized using :meth:`with_id_processor`.
def get_loaded_project(self, project_id): """ Returns a project or raise a 404 error. If project is not finished to load wait for it """ project = self.get_project(project_id) yield from project.wait_loaded() return project
Returns a project or raise a 404 error. If project is not finished to load wait for it
def _geometric_intersect(nodes1, degree1, nodes2, degree2, verify): r"""Find all intersections among edges of two surfaces. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Uses :func:`generic_intersect` with the :attr:`~.IntersectionStrategy.GEOMETRIC` intersection strategy. Args: nodes1 (numpy.ndarray): The nodes defining the first surface in the intersection (assumed in :math:\mathbf{R}^2`). degree1 (int): The degree of the surface given by ``nodes1``. nodes2 (numpy.ndarray): The nodes defining the second surface in the intersection (assumed in :math:\mathbf{R}^2`). degree2 (int): The degree of the surface given by ``nodes2``. verify (Optional[bool]): Indicates if duplicate intersections should be checked. Returns: Tuple[Optional[list], Optional[bool], tuple]: 3-tuple of * List of "edge info" lists. Each list represents a curved polygon and contains 3-tuples of edge index, start and end (see the output of :func:`ends_to_curve`). * "Contained" boolean. If not :data:`None`, indicates that one of the surfaces is contained in the other. * The nodes of three edges of the first surface being intersected followed by the nodes of the three edges of the second. """ all_intersections = _geometric_intersection.all_intersections return generic_intersect( nodes1, degree1, nodes2, degree2, verify, all_intersections )
r"""Find all intersections among edges of two surfaces. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Uses :func:`generic_intersect` with the :attr:`~.IntersectionStrategy.GEOMETRIC` intersection strategy. Args: nodes1 (numpy.ndarray): The nodes defining the first surface in the intersection (assumed in :math:\mathbf{R}^2`). degree1 (int): The degree of the surface given by ``nodes1``. nodes2 (numpy.ndarray): The nodes defining the second surface in the intersection (assumed in :math:\mathbf{R}^2`). degree2 (int): The degree of the surface given by ``nodes2``. verify (Optional[bool]): Indicates if duplicate intersections should be checked. Returns: Tuple[Optional[list], Optional[bool], tuple]: 3-tuple of * List of "edge info" lists. Each list represents a curved polygon and contains 3-tuples of edge index, start and end (see the output of :func:`ends_to_curve`). * "Contained" boolean. If not :data:`None`, indicates that one of the surfaces is contained in the other. * The nodes of three edges of the first surface being intersected followed by the nodes of the three edges of the second.
def to_utf8(value): """Returns a string encoded using UTF-8. This function comes from `Tornado`_. :param value: A unicode or string to be encoded. :returns: The encoded string. """ if isinstance(value, unicode): return value.encode('utf-8') assert isinstance(value, str) return value
Returns a string encoded using UTF-8. This function comes from `Tornado`_. :param value: A unicode or string to be encoded. :returns: The encoded string.
def push_script(self, scriptable, script, callback=None): """Run the script and add it to the list of threads.""" if script in self.threads: self.threads[script].finish() thread = Thread(self.run_script(scriptable, script), scriptable, callback) self.new_threads[script] = thread return thread
Run the script and add it to the list of threads.
def run(self, argv): """Dispatch the given command.""" command = argv[0] handlers = { 'cancel': self.cancel, 'create': self.create, 'events': self.events, 'finalize': self.finalize, 'list': self.list, 'pause': self.pause, 'preview': self.preview, 'results': self.results, 'searchlog': self.searchlog, 'summary': self.summary, 'perf': self.perf, 'timeline': self.timeline, 'touch': self.touch, 'unpause': self.unpause, } handler = handlers.get(command, None) if handler is None: error("Unrecognized command: %s" % command, 2) handler(argv[1:])
Dispatch the given command.
def dict2pb(cls, adict, strict=False): """ Takes a class representing the ProtoBuf Message and fills it with data from the dict. """ obj = cls() for field in obj.DESCRIPTOR.fields: if not field.label == field.LABEL_REQUIRED: continue if not field.has_default_value: continue if not field.name in adict: raise ConvertException('Field "%s" missing from descriptor dictionary.' % field.name) field_names = set([field.name for field in obj.DESCRIPTOR.fields]) if strict: for key in adict.keys(): if key not in field_names: raise ConvertException( 'Key "%s" can not be mapped to field in %s class.' % (key, type(obj))) for field in obj.DESCRIPTOR.fields: if not field.name in adict: continue msg_type = field.message_type if field.label == FD.LABEL_REPEATED: if field.type == FD.TYPE_MESSAGE: for sub_dict in adict[field.name]: item = getattr(obj, field.name).add() item.CopyFrom(dict2pb(msg_type._concrete_class, sub_dict)) else: # fix python3 map用法变更 list(map(getattr(obj, field.name).append, adict[field.name])) else: if field.type == FD.TYPE_MESSAGE: value = dict2pb(msg_type._concrete_class, adict[field.name]) getattr(obj, field.name).CopyFrom(value) elif field.type in [FD.TYPE_UINT64, FD.TYPE_INT64, FD.TYPE_SINT64]: setattr(obj, field.name, int(adict[field.name])) else: setattr(obj, field.name, adict[field.name]) return obj
Takes a class representing the ProtoBuf Message and fills it with data from the dict.
def calculate_up_moves(high_data): """ Up Move. Formula: UPMOVE = Ht - Ht-1 """ up_moves = [high_data[idx] - high_data[idx-1] for idx in range(1, len(high_data))] return [np.nan] + up_moves
Up Move. Formula: UPMOVE = Ht - Ht-1
def dict2tlist(this_dict,**kwargs): ''' #sequence will be losted d = {'a':'b','c':'d'} dict2tlist(d) ''' if('check' in kwargs): check = kwargs['check'] else: check = 1 if(check): if(isinstance(this_dict,dict)): pass else: return(None) else: pass if('deepcopy' in kwargs): deepcopy = kwargs['deepcopy'] else: deepcopy = 1 tuple_list = [] if(deepcopy): new = copy.deepcopy(this_dict) else: new = this_dict i = 0 for key in this_dict: value = this_dict[key] tuple_list.append((key,value)) return(tuple_list)
#sequence will be losted d = {'a':'b','c':'d'} dict2tlist(d)
def associations(self, association_resource): """Retrieve Association for this resource of the type in association_resource. This method will return all *resources* (group, indicators, task, victims, etc) for this resource that are associated with the provided association resource_type. **Example Endpoints URI's** +--------+----------------------------------------------------------------------+ | Method | API Endpoint URI's | +========+======================================================================+ | {base} | /v2/{resourceClass}/{resourceType}/{resourceId} | +--------+----------------------------------------------------------------------+ | GET | {base}/{assoc resourceClass}/{assoc resourceType} | +--------+----------------------------------------------------------------------+ | POST | {base}/{assoc resourceClass}/{assoc resourceType}/{assoc resourceId} | +--------+----------------------------------------------------------------------+ | DELETE | {base}/{assoc resourceClass}/{assoc resourceType}/{assoc resourceId} | +--------+----------------------------------------------------------------------+ + resourceClass - Groups/Indicators + resourceType - Adversary, Incident, etc / Address, EmailAddress, etc + resourceId - Group Id / Indicator Value Args: association_resource (Resource Instance): A resource object with optional resource_id. Return: (instance): A copy of this resource instance cleaned and updated for associations. """ resource = self.copy() resource._request_entity = association_resource.api_entity resource._request_uri = '{}/{}'.format( resource._request_uri, association_resource.request_uri ) return resource
Retrieve Association for this resource of the type in association_resource. This method will return all *resources* (group, indicators, task, victims, etc) for this resource that are associated with the provided association resource_type. **Example Endpoints URI's** +--------+----------------------------------------------------------------------+ | Method | API Endpoint URI's | +========+======================================================================+ | {base} | /v2/{resourceClass}/{resourceType}/{resourceId} | +--------+----------------------------------------------------------------------+ | GET | {base}/{assoc resourceClass}/{assoc resourceType} | +--------+----------------------------------------------------------------------+ | POST | {base}/{assoc resourceClass}/{assoc resourceType}/{assoc resourceId} | +--------+----------------------------------------------------------------------+ | DELETE | {base}/{assoc resourceClass}/{assoc resourceType}/{assoc resourceId} | +--------+----------------------------------------------------------------------+ + resourceClass - Groups/Indicators + resourceType - Adversary, Incident, etc / Address, EmailAddress, etc + resourceId - Group Id / Indicator Value Args: association_resource (Resource Instance): A resource object with optional resource_id. Return: (instance): A copy of this resource instance cleaned and updated for associations.
def result(self, r=None, **kwargs): ''' Validates a result, stores it in self.results and prints it. Accepts the same kwargs as the binwalk.core.module.Result class. @r - An existing instance of binwalk.core.module.Result. Returns an instance of binwalk.core.module.Result. ''' if r is None: r = Result(**kwargs) # Add the name of the current module to the result r.module = self.__class__.__name__ # Any module that is reporting results, valid or not, should be marked # as enabled if not self.enabled: self.enabled = True self.validate(r) self._plugins_result(r) # Update the progress status automatically if it is not being done # manually by the module if r.offset and r.file and self.AUTO_UPDATE_STATUS: self.status.total = r.file.length self.status.completed = r.offset self.status.fp = r.file for dependency in self.dependencies: try: getattr(self, dependency.attribute).callback(r) except AttributeError: continue if r.valid: self.results.append(r) if r.display: display_args = self._build_display_args(r) if display_args: self.config.display.format_strings(self.HEADER_FORMAT, self.RESULT_FORMAT) self.config.display.result(*display_args) return r
Validates a result, stores it in self.results and prints it. Accepts the same kwargs as the binwalk.core.module.Result class. @r - An existing instance of binwalk.core.module.Result. Returns an instance of binwalk.core.module.Result.
def _consume_add_and_get_tag(self, consume_rpc_result): """Add the tag to the channel and return it. :param dict consume_rpc_result: :rtype: str """ consumer_tag = consume_rpc_result['consumer_tag'] self._channel.add_consumer_tag(consumer_tag) return consumer_tag
Add the tag to the channel and return it. :param dict consume_rpc_result: :rtype: str
def find_bookmark_file (): """Return the bookmark file of the Opera profile. Returns absolute filename if found, or empty string if no bookmark file could be found. """ try: dirname = get_profile_dir() if os.path.isdir(dirname): for name in OperaBookmarkFiles: fname = os.path.join(dirname, name) if os.path.isfile(fname): return fname except Exception: pass return u""
Return the bookmark file of the Opera profile. Returns absolute filename if found, or empty string if no bookmark file could be found.
def train(): """Training process""" start_pipeline_time = time.time() # Training/Testing best_valid_acc = 0 stop_early = 0 for epoch in range(args.epochs): # Epoch training stats start_epoch_time = time.time() epoch_L = 0.0 epoch_sent_num = 0 epoch_wc = 0 # Log interval training stats start_log_interval_time = time.time() log_interval_wc = 0 log_interval_sent_num = 0 log_interval_L = 0.0 for i, ((data, valid_length), label) in enumerate(train_dataloader): data = mx.nd.transpose(data.as_in_context(context)) label = label.as_in_context(context) valid_length = valid_length.as_in_context(context).astype(np.float32) wc = valid_length.sum().asscalar() log_interval_wc += wc epoch_wc += wc log_interval_sent_num += data.shape[1] epoch_sent_num += data.shape[1] with autograd.record(): output = net(data, valid_length) L = loss(output, label).mean() L.backward() # Clip gradient if args.clip is not None: grads = [p.grad(context) for p in net.collect_params().values()] gluon.utils.clip_global_norm(grads, args.clip) # Update parameter trainer.step(1) log_interval_L += L.asscalar() epoch_L += L.asscalar() if (i + 1) % args.log_interval == 0: print('[Epoch %d Batch %d/%d] avg loss %g, throughput %gK wps' % ( epoch, i + 1, len(train_dataloader), log_interval_L / log_interval_sent_num, log_interval_wc / 1000 / (time.time() - start_log_interval_time))) # Clear log interval training stats start_log_interval_time = time.time() log_interval_wc = 0 log_interval_sent_num = 0 log_interval_L = 0 end_epoch_time = time.time() valid_avg_L, valid_acc = evaluate(valid_dataloader) test_avg_L, test_acc = evaluate(test_dataloader) print('[Epoch %d] train avg loss %g, ' 'valid acc %.4f, valid avg loss %g, ' 'test acc %.4f, test avg loss %g, throughput %gK wps' % ( epoch, epoch_L / epoch_sent_num, valid_acc, valid_avg_L, test_acc, test_avg_L, epoch_wc / 1000 / (end_epoch_time - start_epoch_time))) if valid_acc < best_valid_acc: print('No Improvement.') stop_early += 1 if stop_early == 3: break else: # Reset stop_early if the validation loss finds a new low value print('Observed Improvement.') stop_early = 0 net.save_parameters(args.save_prefix + '_{:04d}.params'.format(epoch)) best_valid_acc = valid_acc net.load_parameters(glob.glob(args.save_prefix+'_*.params')[-1], context) valid_avg_L, valid_acc = evaluate(valid_dataloader) test_avg_L, test_acc = evaluate(test_dataloader) print('Best validation loss %g, validation acc %.4f'%(valid_avg_L, valid_acc)) print('Best test loss %g, test acc %.4f'%(test_avg_L, test_acc)) print('Total time cost %.2fs'%(time.time()-start_pipeline_time))
Training process
def class_sealer(fields, defaults, base=__base__, make_init_func=make_init_func, initializer=True, comparable=True, printable=True, convertible=False, pass_kwargs=False): """ This sealer makes a normal container class. It's mutable and supports arguments with default values. """ baseclass_name = 'FieldsBase_for__{0}'.format('__'.join(fields)) if pass_kwargs: options = dict( header_end=', **__fields_kwargs__):\n', super_call_end=', **__fields_kwargs__)\n', super_call_pass_allargs=False, ) else: options = {} if initializer: global_namespace, local_namespace = make_init_func(fields, defaults, baseclass_name, **options) class FieldsBase(base): if initializer: __init__ = local_namespace['__init__'] if comparable: def __eq__(self, other): if isinstance(other, self.__class__): return tuple(getattr(self, a) for a in fields) == tuple(getattr(other, a) for a in fields) else: return NotImplemented def __ne__(self, other): result = self.__eq__(other) if result is NotImplemented: return NotImplemented else: return not result def __lt__(self, other): if isinstance(other, self.__class__): return tuple(getattr(self, a) for a in fields) < tuple(getattr(other, a) for a in fields) else: return NotImplemented def __le__(self, other): if isinstance(other, self.__class__): return tuple(getattr(self, a) for a in fields) <= tuple(getattr(other, a) for a in fields) else: return NotImplemented def __gt__(self, other): if isinstance(other, self.__class__): return tuple(getattr(self, a) for a in fields) > tuple(getattr(other, a) for a in fields) else: return NotImplemented def __ge__(self, other): if isinstance(other, self.__class__): return tuple(getattr(self, a) for a in fields) >= tuple(getattr(other, a) for a in fields) else: return NotImplemented def __hash__(self): return hash(tuple(getattr(self, a) for a in fields)) if printable: def __repr__(self): return "{0}({1})".format( self.__class__.__name__, ", ".join("{0}={1}".format(attr, repr(getattr(self, attr))) for attr in fields) ) if convertible: @property def as_dict(self): return dict((attr, getattr(self, attr)) for attr in fields) @property def as_tuple(self): return tuple(getattr(self, attr) for attr in fields) if initializer: global_namespace[baseclass_name] = FieldsBase return FieldsBase
This sealer makes a normal container class. It's mutable and supports arguments with default values.
def send_request(self, method, action, body=None, headers=None, ipaddr=None): """Perform the HTTP request. The response is in either JSON format or plain text. A GET method will invoke a JSON response while a PUT/POST/DELETE returns message from the the server in plain text format. Exception is raised when server replies with an INTERNAL SERVER ERROR status code (500) i.e. an error has occurred on the server or SERVICE UNAVAILABLE (404) i.e. server is not reachable. :param method: type of the HTTP request. POST, GET, PUT or DELETE :param action: path to which the client makes request :param body: dict of arguments which are sent as part of the request :param headers: header for the HTTP request :param server_ip: server_ip for the HTTP request. :returns: JSON or plain text in HTTP response """ action = ''.join([self.scheme, '://%s/', action]) if netaddr.valid_ipv6(ipaddr): # Enclose IPv6 address in [] in the URL action = action % ("[%s]" % ipaddr) else: # IPv4 address action = action % ipaddr config = action + " : " + body if body else action # if cookie needed and one not previously created if self.request_cookie: cookie, verify = self._get_cookie(ipaddr, config) headers = {"Content-type": "application/json", "Accept": "text/plain", "Cookie": cookie} else: if ipaddr not in self.credentials: raise cexc.NexusCredentialNotFound(switch_ip=ipaddr) else: headers = {'Content-Type': 'application/json'} security_data = self.credentials[ipaddr] verify = security_data[const.HTTPS_CERT_TUPLE] if not verify: verify = security_data[const.HTTPS_VERIFY_TUPLE] self.session.auth = (security_data[0], security_data[1]) if self.status != requests.codes.OK: return {} for attempt in range(self.max_retries + 1): try: LOG.debug("[Nexus %(ipaddr)s attempt %(id)s]: Connecting.." % {"ipaddr": ipaddr, "id": attempt}) response = self.session.request( method, action, data=body, headers=headers, verify=verify, timeout=self.timeout) if (self.request_cookie and response.status_code in CREDENTIAL_EXPIRED): # if need new cookie cookie, verify = self._get_cookie( ipaddr, config, refresh=True) headers = {"Content-type": "application/json", "Accept": "text/plain", "Cookie": cookie} continue except Exception as e: LOG.error( "Exception raised %(err)s for Rest/NXAPI %(cfg)s", {'err': str(e), 'cfg': config}) raise cexc.NexusConfigFailed(nexus_host=ipaddr, config=config, exc=e) else: break status_string = requests.status_codes._codes[response.status_code][0] if response.status_code in self.accepted_codes: LOG.debug( "Good status %(status)s(%(code)d) returned for %(url)s", {'status': status_string, 'code': response.status_code, 'url': action}) # 'text/json' used with nxapi else application/json with restapi output = {} if ('application/json' in response.headers['content-type'] or 'text/json' in response.headers['content-type']): try: output = response.json() except Exception as e: LOG.exception( "Unexpected error encountered extracting " "json body from response.") if 'ins_api' in output: # do special nxapi response handling try: cli_resp = output['ins_api']['outputs']['output'] except Exception: cli_resp = [] # Check results for each command for cli in cli_resp: try: status = int((cli['code'])) except ValueError: status = 'bad_status %s' % cli['code'] if status not in self.accepted_codes: excpt = "ins_api CLI failure occurred " "with cli return code %s" % str(status) raise cexc.NexusConfigFailed( nexus_host=ipaddr, config=config, exc=excpt) return output else: LOG.error( "Bad status %(status)s(%(code)d) returned for %(url)s", {'status': status_string, 'code': response.status_code, 'url': action}) LOG.error("Response text: %(txt)s", {'txt': response.text}) raise cexc.NexusConfigFailed(nexus_host=ipaddr, config=config, exc=response.text)
Perform the HTTP request. The response is in either JSON format or plain text. A GET method will invoke a JSON response while a PUT/POST/DELETE returns message from the the server in plain text format. Exception is raised when server replies with an INTERNAL SERVER ERROR status code (500) i.e. an error has occurred on the server or SERVICE UNAVAILABLE (404) i.e. server is not reachable. :param method: type of the HTTP request. POST, GET, PUT or DELETE :param action: path to which the client makes request :param body: dict of arguments which are sent as part of the request :param headers: header for the HTTP request :param server_ip: server_ip for the HTTP request. :returns: JSON or plain text in HTTP response
def specialspaceless(parser, token): """ Removes whitespace between HTML tags, and introduces a whitespace after buttons an inputs, necessary for Bootstrap to place them correctly in the layout. """ nodelist = parser.parse(('endspecialspaceless',)) parser.delete_first_token() return SpecialSpacelessNode(nodelist)
Removes whitespace between HTML tags, and introduces a whitespace after buttons an inputs, necessary for Bootstrap to place them correctly in the layout.
def get_state(self): """ Create state from sensors and battery """ # Include battery level in state battery = self.player.stats['battery']/100 # Create observation from sensor proximities # TODO: Have state persist, then update columns by `sensed_type` # Multi-channel; detecting `items` if len(self.mode['items']) > 0: observation = [] for sensor in self.player.sensors: col = [] # Always include range in channel 0 col.append(sensor.proximity_norm()) for item_type in self.mode['items']: if sensor.sensed_type == item_type: col.append(sensor.proximity_norm()) else: # Default to 1 (`max_range/max_range`) col.append(1) observation.append(col) if 'battery' in self.mode: observation.append([battery,1,1]) # Single-channel; walls only else: observation = [o.proximity_norm() for o in self.player.sensors] if 'battery' in self.mode: observation.append(battery) return observation
Create state from sensors and battery
def get_cash_balance(self): """ Returns the account cash balance available for investing Returns ------- float The cash balance in your account. """ cash = False try: response = self.session.get('/browse/cashBalanceAj.action') json_response = response.json() if self.session.json_success(json_response): self.__log('Cash available: {0}'.format(json_response['cashBalance'])) cash_value = json_response['cashBalance'] # Convert currency to float value # Match values like $1,000.12 or 1,0000$ cash_match = re.search('^[^0-9]?([0-9\.,]+)[^0-9]?', cash_value) if cash_match: cash_str = cash_match.group(1) cash_str = cash_str.replace(',', '') cash = float(cash_str) else: self.__log('Could not get cash balance: {0}'.format(response.text)) except Exception as e: self.__log('Could not get the cash balance on the account: Error: {0}\nJSON: {1}'.format(str(e), response.text)) raise e return cash
Returns the account cash balance available for investing Returns ------- float The cash balance in your account.
def reply_video( self, video: str, quote: bool = None, caption: str = "", parse_mode: str = "", duration: int = 0, width: int = 0, height: int = 0, thumb: str = None, supports_streaming: bool = True, disable_notification: bool = None, reply_to_message_id: int = None, reply_markup: Union[ "pyrogram.InlineKeyboardMarkup", "pyrogram.ReplyKeyboardMarkup", "pyrogram.ReplyKeyboardRemove", "pyrogram.ForceReply" ] = None, progress: callable = None, progress_args: tuple = () ) -> "Message": """Bound method *reply_video* of :obj:`Message <pyrogram.Message>`. Use as a shortcut for: .. code-block:: python client.send_video( chat_id=message.chat.id, video=video ) Example: .. code-block:: python message.reply_video(video) Args: video (``str``): Video to send. Pass a file_id as string to send a video that exists on the Telegram servers, pass an HTTP URL as a string for Telegram to get a video from the Internet, or pass a file path as string to upload a new video that exists on your local machine. quote (``bool``, *optional*): If ``True``, the message will be sent as a reply to this message. If *reply_to_message_id* is passed, this parameter will be ignored. Defaults to ``True`` in group chats and ``False`` in private chats. caption (``str``, *optional*): Video caption, 0-1024 characters. parse_mode (``str``, *optional*): Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>` if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your caption. Defaults to Markdown. duration (``int``, *optional*): Duration of sent video in seconds. width (``int``, *optional*): Video width. height (``int``, *optional*): Video height. thumb (``str``, *optional*): Thumbnail of the video sent. The thumbnail should be in JPEG format and less than 200 KB in size. A thumbnail's width and height should not exceed 90 pixels. Thumbnails can't be reused and can be only uploaded as a new file. supports_streaming (``bool``, *optional*): Pass True, if the uploaded video is suitable for streaming. disable_notification (``bool``, *optional*): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (``int``, *optional*): If the message is a reply, ID of the original message. reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*): Additional interface options. An object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. progress (``callable``, *optional*): Pass a callback function to view the upload progress. The function must take *(client, current, total, \*args)* as positional arguments (look at the section below for a detailed description). progress_args (``tuple``, *optional*): Extra custom arguments for the progress callback function. Useful, for example, if you want to pass a chat_id and a message_id in order to edit a message with the updated progress. Other Parameters: client (:obj:`Client <pyrogram.Client>`): The Client itself, useful when you want to call other API methods inside the callback function. current (``int``): The amount of bytes uploaded so far. total (``int``): The size of the file. *args (``tuple``, *optional*): Extra custom arguments as defined in the *progress_args* parameter. You can either keep *\*args* or add every single extra argument in your function signature. Returns: On success, the sent :obj:`Message <pyrogram.Message>` is returned. In case the upload is deliberately stopped with :meth:`stop_transmission`, None is returned instead. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. """ if quote is None: quote = self.chat.type != "private" if reply_to_message_id is None and quote: reply_to_message_id = self.message_id return self._client.send_video( chat_id=self.chat.id, video=video, caption=caption, parse_mode=parse_mode, duration=duration, width=width, height=height, thumb=thumb, supports_streaming=supports_streaming, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, progress=progress, progress_args=progress_args )
Bound method *reply_video* of :obj:`Message <pyrogram.Message>`. Use as a shortcut for: .. code-block:: python client.send_video( chat_id=message.chat.id, video=video ) Example: .. code-block:: python message.reply_video(video) Args: video (``str``): Video to send. Pass a file_id as string to send a video that exists on the Telegram servers, pass an HTTP URL as a string for Telegram to get a video from the Internet, or pass a file path as string to upload a new video that exists on your local machine. quote (``bool``, *optional*): If ``True``, the message will be sent as a reply to this message. If *reply_to_message_id* is passed, this parameter will be ignored. Defaults to ``True`` in group chats and ``False`` in private chats. caption (``str``, *optional*): Video caption, 0-1024 characters. parse_mode (``str``, *optional*): Use :obj:`MARKDOWN <pyrogram.ParseMode.MARKDOWN>` or :obj:`HTML <pyrogram.ParseMode.HTML>` if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your caption. Defaults to Markdown. duration (``int``, *optional*): Duration of sent video in seconds. width (``int``, *optional*): Video width. height (``int``, *optional*): Video height. thumb (``str``, *optional*): Thumbnail of the video sent. The thumbnail should be in JPEG format and less than 200 KB in size. A thumbnail's width and height should not exceed 90 pixels. Thumbnails can't be reused and can be only uploaded as a new file. supports_streaming (``bool``, *optional*): Pass True, if the uploaded video is suitable for streaming. disable_notification (``bool``, *optional*): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (``int``, *optional*): If the message is a reply, ID of the original message. reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*): Additional interface options. An object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. progress (``callable``, *optional*): Pass a callback function to view the upload progress. The function must take *(client, current, total, \*args)* as positional arguments (look at the section below for a detailed description). progress_args (``tuple``, *optional*): Extra custom arguments for the progress callback function. Useful, for example, if you want to pass a chat_id and a message_id in order to edit a message with the updated progress. Other Parameters: client (:obj:`Client <pyrogram.Client>`): The Client itself, useful when you want to call other API methods inside the callback function. current (``int``): The amount of bytes uploaded so far. total (``int``): The size of the file. *args (``tuple``, *optional*): Extra custom arguments as defined in the *progress_args* parameter. You can either keep *\*args* or add every single extra argument in your function signature. Returns: On success, the sent :obj:`Message <pyrogram.Message>` is returned. In case the upload is deliberately stopped with :meth:`stop_transmission`, None is returned instead. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
def select_hcons(xmrs, hi=None, relation=None, lo=None): """ Return the list of matching HCONS for *xmrs*. :class:`~delphin.mrs.components.HandleConstraint` objects for *xmrs* match if their `hi` matches *hi*, `relation` matches *relation*, and `lo` matches *lo*. The *hi*, *relation*, and *lo* filters are ignored if they are `None`. Args: xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to query hi (str, optional): hi handle (hole) to match relation (str, optional): handle constraint relation to match lo (str, optional): lo handle (label) to match Returns: list: matching HCONS """ hcmatch = lambda hc: ( (hi is None or hc.hi == hi) and (relation is None or hc.relation == relation) and (lo is None or hc.lo == lo)) return list(filter(hcmatch, xmrs.hcons()))
Return the list of matching HCONS for *xmrs*. :class:`~delphin.mrs.components.HandleConstraint` objects for *xmrs* match if their `hi` matches *hi*, `relation` matches *relation*, and `lo` matches *lo*. The *hi*, *relation*, and *lo* filters are ignored if they are `None`. Args: xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to query hi (str, optional): hi handle (hole) to match relation (str, optional): handle constraint relation to match lo (str, optional): lo handle (label) to match Returns: list: matching HCONS
def acosh(x): """ acosh(x) Hyperbolic arc cos function. """ _math = infer_math(x) if _math is math: return _math.acosh(x) else: return _math.arccosh(x)
acosh(x) Hyperbolic arc cos function.
def get_reader(self): """Return a read-only :py:class:`~zipfile.ZipFile`.""" assert self._closed, "Archive not closed" buf = io.BytesIO(self.get_bytes()) return zipfile.ZipFile(buf, mode='r')
Return a read-only :py:class:`~zipfile.ZipFile`.
def merge_files(context): """ Given a context containing path to template, env, and service: merge config into template and output the result to stdout Args: context: a populated context object """ resolver = EFTemplateResolver( profile=context.profile, region=context.region, env=context.env, service=context.service ) try: with open(context.template_path, 'r') as f: template_body = f.read() f.close() except IOError as error: raise IOError("Error loading template file: {} {}".format(context.template_path, repr(error))) if context.no_params is False: try: with open(context.param_path, 'r') as f: param_body = f.read() f.close() except IOError as error: raise IOError("Error loading param file: {} {}".format(context.param_path, repr(error))) dest = yaml.safe_load(param_body)["dest"] # if 'dest' for the current object contains an 'environments' list, check it if "environments" in dest: if not resolver.resolved["ENV_SHORT"] in dest["environments"]: print("Environment: {} not enabled for {}".format(resolver.resolved["ENV_SHORT"], context.template_path)) return # Process the template_body - apply context + parameters resolver.load(template_body, param_body) else: resolver.load(template_body) rendered_body = resolver.render() if not resolver.resolved_ok(): raise RuntimeError("Couldn't resolve all symbols; template has leftover {{ or }}: {}".format(resolver.unresolved_symbols())) if context.lint: if context.template_path.endswith(".json"): try: json.loads(rendered_body, strict=False) print("JSON passed linting process.") except ValueError as e: fail("JSON failed linting process.", e) elif context.template_path.endswith((".yml", ".yaml")): conf = yamllint_config.YamlLintConfig(content='extends: relaxed') lint_output = yamllinter.run(rendered_body, conf) lint_level = 'error' lint_errors = [issue for issue in lint_output if issue.level == lint_level] if lint_errors: split_body = rendered_body.splitlines() for error in lint_errors: print(error) # printing line - 1 because lists start at 0, but files at 1 print("\t", split_body[error.line - 1]) fail("YAML failed linting process.") if context.verbose: print(context) if context.no_params: print('no_params flag set to true!') print('Inline template resolution based on external symbol lookup only and no destination for file write.\n') else: dir_path = normpath(dirname(dest["path"])) print("make directories: {} {}".format(dir_path, dest["dir_perm"])) print("chmod file to: " + dest["file_perm"]) user, group = dest["user_group"].split(":") print("chown last directory in path to user: {}, group: {}".format(user, group)) print("chown file to user: {}, group: {}\n".format(user, group)) print("template body:\n{}\nrendered body:\n{}\n".format(template_body, rendered_body)) elif context.silent: print("Config template rendered successfully.") else: print(rendered_body)
Given a context containing path to template, env, and service: merge config into template and output the result to stdout Args: context: a populated context object
def mctransp(I,J,K,c,d,M): """mctransp -- model for solving the Multi-commodity Transportation Problem Parameters: - I: set of customers - J: set of facilities - K: set of commodities - c[i,j,k]: unit transportation cost on arc (i,j) for commodity k - d[i][k]: demand for commodity k at node i - M[j]: capacity Returns a model, ready to be solved. """ model = Model("multi-commodity transportation") # Create variables x = {} for (i,j,k) in c: x[i,j,k] = model.addVar(vtype="C", name="x(%s,%s,%s)" % (i,j,k), obj=c[i,j,k]) # tuplelist is a Gurobi data structure to manage lists of equal sized tuples - try itertools as alternative arcs = tuplelist([(i,j,k) for (i,j,k) in x]) # Demand constraints for i in I: for k in K: model.addCons(sum(x[i,j,k] for (i,j,k) in arcs.select(i,"*",k)) == d[i,k], "Demand(%s,%s)" % (i,k)) # Capacity constraints for j in J: model.addCons(sum(x[i,j,k] for (i,j,k) in arcs.select("*",j,"*")) <= M[j], "Capacity(%s)" % j) model.data = x return model
mctransp -- model for solving the Multi-commodity Transportation Problem Parameters: - I: set of customers - J: set of facilities - K: set of commodities - c[i,j,k]: unit transportation cost on arc (i,j) for commodity k - d[i][k]: demand for commodity k at node i - M[j]: capacity Returns a model, ready to be solved.
def _unpack_lookupswitch(bc, offset): """ function for unpacking the lookupswitch op arguments """ jump = (offset % 4) if jump: offset += (4 - jump) (default, npairs), offset = _unpack(_struct_ii, bc, offset) switches = list() for _index in range(npairs): pair, offset = _unpack(_struct_ii, bc, offset) switches.append(pair) return (default, switches), offset
function for unpacking the lookupswitch op arguments
def goto_line(self, line_number): """Go to specified line number in current active editor.""" if line_number: line_number = int(line_number) try: self.plugin.go_to_line(line_number) except AttributeError: pass
Go to specified line number in current active editor.
def computePreRec(cm, class_names): ''' This function computes the precision, recall and f1 measures, given a confusion matrix ''' n_classes = cm.shape[0] if len(class_names) != n_classes: print("Error in computePreRec! Confusion matrix and class_names " "list must be of the same size!") return precision = [] recall = [] f1 = [] for i, c in enumerate(class_names): precision.append(cm[i,i] / numpy.sum(cm[:,i])) recall.append(cm[i,i] / numpy.sum(cm[i,:])) f1.append( 2 * precision[-1] * recall[-1] / (precision[-1] + recall[-1])) return recall, precision, f1
This function computes the precision, recall and f1 measures, given a confusion matrix
def import_pipeline(conf, args): """Import a pipeline from json.""" with open(args.pipeline_json) as pipeline_json: dst = conf.config['instances'][args.dst_instance] dst_url = api.build_pipeline_url(build_instance_url(dst)) dst_auth = tuple([conf.creds['instances'][args.dst_instance]['user'], conf.creds['instances'][args.dst_instance]['pass']]) parsed_json = json.load(pipeline_json) verify_ssl = dst.get('verify_ssl', True) return api.import_pipeline(dst_url, args.pipeline_id, dst_auth, parsed_json, verify_ssl, overwrite=args.overwrite)
Import a pipeline from json.
def __setitem(self, chunk, key, keys, value, extend=False): """Helper function to fill up the dictionary.""" def setitem(chunk): if keys: return self.__setitem(chunk, keys[0], keys[1:], value, extend) else: return value if key in ['.', ']']: chunk[key] = value elif ']' in key: # list key = int(key[:-1].replace('n', '-1')) if extend: if chunk is None: chunk = [None, ] else: if not isinstance(chunk, list): chunk = [chunk, ] if key != -1: chunk.insert(key, None) else: chunk.append(None) else: if chunk is None: chunk = [None, ] chunk[key] = setitem(chunk[key]) else: # dict if extend: if chunk is None: chunk = {} chunk[key] = None chunk[key] = setitem(chunk[key]) elif key not in chunk: chunk[key] = None chunk[key] = setitem(chunk[key]) else: if keys: chunk[key] = setitem(chunk[key]) else: if not isinstance(chunk[key], list): chunk[key] = [chunk[key], ] chunk[key].append(None) chunk[key][-1] = setitem(chunk[key][-1]) else: if chunk is None: chunk = {} if key not in chunk: chunk[key] = None chunk[key] = setitem(chunk[key]) return chunk
Helper function to fill up the dictionary.
def register_post_processor(func): """ Register a post processor function to be run as the final step in serialization. The data passed in will already have gone through the sideloading processor. Usage: @register_post_processor def my_post_processor(data): # do stuff with `data` return data """ global POST_PROCESSORS key = func.__name__ POST_PROCESSORS[key] = func return func
Register a post processor function to be run as the final step in serialization. The data passed in will already have gone through the sideloading processor. Usage: @register_post_processor def my_post_processor(data): # do stuff with `data` return data
def write_file_to_zip_with_neutral_metadata(zfile, filename, content): """ Write the string `content` to `filename` in the open ZipFile `zfile`. Args: zfile (ZipFile): open ZipFile to write the content into filename (str): the file path within the zip file to write into content (str): the content to write into the zip Returns: None """ info = zipfile.ZipInfo(filename, date_time=(2015, 10, 21, 7, 28, 0)) info.compress_type = zipfile.ZIP_DEFLATED info.comment = "".encode() info.create_system = 0 zfile.writestr(info, content)
Write the string `content` to `filename` in the open ZipFile `zfile`. Args: zfile (ZipFile): open ZipFile to write the content into filename (str): the file path within the zip file to write into content (str): the content to write into the zip Returns: None
def sprite_map_name(map): """ Returns the name of a sprite map The name is derived from the folder than contains the sprites. """ map = map.render() sprite_maps = _get_cache('sprite_maps') sprite_map = sprite_maps.get(map) if not sprite_map: log.error("No sprite map found: %s", map, extra={'stack': True}) if sprite_map: return String.unquoted(sprite_map['*n*']) return String.unquoted('')
Returns the name of a sprite map The name is derived from the folder than contains the sprites.
def afw_union(afw_1: dict, afw_2: dict) -> dict: """ Returns a AFW that reads the union of the languages read by input AFWs. Let :math:`A_1 = (Σ, S_1 , s^0_1, ρ_1 , F_1 )` and :math:`A_2 = (Σ, S_2 , s^0_2, ρ_2 , F_2 )` be alternating automata accepting the languages :math:`L( A_1)` and :math:`L(A_2)`. Then, :math:`B_∪ = (Σ, S_1 ∪ S_2 ∪ {root}, ρ_∪ , root , F_1 ∪ F_2 )` with :math:`ρ_∪ = ρ_1 ∪ ρ_2 ∪ [(root, a): ρ(s^0_1 , a) ∨ ρ(s^0_2 , a)]` accepts :math:`L(A_1) ∪ L(A_2)`. Pay attention to avoid having the AFWs with state names in common, in case use :mod:`PySimpleAutomata.AFW.rename_afw_states` function. :param dict afw_1: first input AFW; :param dict afw_2: second input AFW;. :return: *(dict)* representing the united AFW. """ # make sure new root state is unique initial_state = 'root' i = 0 while initial_state in afw_1['states'] or initial_state in afw_2['states']: initial_state = 'root' + str(i) i += 1 union = { 'alphabet': afw_1['alphabet'].union(afw_2['alphabet']), 'states': afw_1['states'].union(afw_2['states']).union({initial_state}), 'initial_state': initial_state, 'accepting_states': afw_1['accepting_states'].union(afw_2['accepting_states']), 'transitions': deepcopy(afw_1['transitions']) } # add also afw_2 transitions union['transitions'].update(afw_2['transitions']) # if just one initial state is accepting, so the new one is if afw_1['initial_state'] in afw_1['accepting_states'] \ or afw_2['initial_state'] in afw_2['accepting_states']: union['accepting_states'].add(union['initial_state']) # copy all transitions of initial states and eventually their conjunction # into the new initial state for action in union['alphabet']: if (afw_1['initial_state'], action) in afw_1['transitions']: union['transitions'][initial_state, action] = \ '(' + \ afw_1['transitions'][afw_1['initial_state'], action] + \ ')' if (afw_2['initial_state'], action) in afw_2['transitions']: union['transitions'][initial_state, action] += \ ' or (' + \ afw_2['transitions'][afw_2['initial_state'], action] + \ ')' elif (afw_2['initial_state'], action) in afw_2['transitions']: union['transitions'][initial_state, action] = \ '(' + \ afw_2['transitions'][afw_2['initial_state'], action] + \ ')' return union
Returns a AFW that reads the union of the languages read by input AFWs. Let :math:`A_1 = (Σ, S_1 , s^0_1, ρ_1 , F_1 )` and :math:`A_2 = (Σ, S_2 , s^0_2, ρ_2 , F_2 )` be alternating automata accepting the languages :math:`L( A_1)` and :math:`L(A_2)`. Then, :math:`B_∪ = (Σ, S_1 ∪ S_2 ∪ {root}, ρ_∪ , root , F_1 ∪ F_2 )` with :math:`ρ_∪ = ρ_1 ∪ ρ_2 ∪ [(root, a): ρ(s^0_1 , a) ∨ ρ(s^0_2 , a)]` accepts :math:`L(A_1) ∪ L(A_2)`. Pay attention to avoid having the AFWs with state names in common, in case use :mod:`PySimpleAutomata.AFW.rename_afw_states` function. :param dict afw_1: first input AFW; :param dict afw_2: second input AFW;. :return: *(dict)* representing the united AFW.
def submit_reading(self, input_fname, start_ix, end_ix, ids_per_job, num_tries=1, stagger=0): """Submit a batch of reading jobs Parameters ---------- input_fname : str The name of the file containing the ids to be read. start_ix : int The line index of the first item in the list to read. end_ix : int The line index of the last item in the list to be read. ids_per_job : int The number of ids to be given to each job. num_tries : int The number of times a job may be attempted. stagger : float The number of seconds to wait between job submissions. Returns ------- job_list : list[str] A list of job id strings. """ # stash this for later. self.ids_per_job = ids_per_job # Upload the pmid_list to Amazon S3 id_list_key = 'reading_results/%s/%s' % (self.basename, self._s3_input_name) s3_client = boto3.client('s3') s3_client.upload_file(input_fname, bucket_name, id_list_key) # If no end index is specified, read all the PMIDs if end_ix is None: with open(input_fname, 'rt') as f: lines = f.readlines() end_ix = len(lines) if start_ix is None: start_ix = 0 # Get environment variables environment_vars = get_environment() # Iterate over the list of PMIDs and submit the job in chunks batch_client = boto3.client('batch', region_name='us-east-1') job_list = [] for job_start_ix in range(start_ix, end_ix, ids_per_job): sleep(stagger) job_end_ix = job_start_ix + ids_per_job if job_end_ix > end_ix: job_end_ix = end_ix job_name, cmd = self._make_command(job_start_ix, job_end_ix) command_list = get_batch_command(cmd, purpose=self._purpose, project=self.project_name) logger.info('Command list: %s' % str(command_list)) job_info = batch_client.submit_job( jobName=job_name, jobQueue=self._job_queue, jobDefinition=self._job_def, containerOverrides={ 'environment': environment_vars, 'command': command_list}, retryStrategy={'attempts': num_tries} ) logger.info("submitted...") job_list.append({'jobId': job_info['jobId']}) self.job_list = job_list return job_list
Submit a batch of reading jobs Parameters ---------- input_fname : str The name of the file containing the ids to be read. start_ix : int The line index of the first item in the list to read. end_ix : int The line index of the last item in the list to be read. ids_per_job : int The number of ids to be given to each job. num_tries : int The number of times a job may be attempted. stagger : float The number of seconds to wait between job submissions. Returns ------- job_list : list[str] A list of job id strings.
def initialize_grid(world_size, inner): """ Creates an empty grid (2d list) with the dimensions specified in world_size. Each element is initialized to the inner argument. """ data = [] for i in range(world_size[1]): data.append([]) for j in range(world_size[0]): data[i].append(deepcopy(inner)) return data
Creates an empty grid (2d list) with the dimensions specified in world_size. Each element is initialized to the inner argument.
def updateData(self,exten,data): """ Write out updated data and header to the original input file for this object. """ _extnum=self._interpretExten(exten) fimg = fileutil.openImage(self._filename, mode='update', memmap=False) fimg[_extnum].data = data fimg[_extnum].header = self._image[_extnum].header fimg.close()
Write out updated data and header to the original input file for this object.
def cmd(send, _, args): """Shows the currently guarded nicks. Syntax: {command} """ guarded = args['handler'].guarded if not guarded: send("Nobody is guarded.") else: send(", ".join(guarded))
Shows the currently guarded nicks. Syntax: {command}
def get_expanded_path(self, path): """ expands a path that starts with an ~ to an absolute path :param path: :return: """ if path.startswith('~'): return os.path.expanduser('~') + path[1:] else: return path
expands a path that starts with an ~ to an absolute path :param path: :return:
def load(self): """Return our application to be run.""" app = util.import_app("dallinger.experiment_server.sockets:app") if self.options.get("mode") == "debug": app.debug = True return app
Return our application to be run.
def activate(self, span, finish_on_close): """ Make a :class:`~opentracing.Span` instance active. :param span: the :class:`~opentracing.Span` that should become active. :param finish_on_close: whether *span* should automatically be finished when :meth:`Scope.close()` is called. If no :func:`tracer_stack_context()` is detected, thread-local storage will be used to store the :class:`~opentracing.Scope`. Observe that in this case the active :class:`~opentracing.Span` will not be automatically propagated to the child corotuines. :return: a :class:`~opentracing.Scope` instance to control the end of the active period for the :class:`~opentracing.Span`. It is a programming error to neglect to call :meth:`Scope.close()` on the returned instance. """ context = self._get_context() if context is None: return super(TornadoScopeManager, self).activate(span, finish_on_close) scope = _TornadoScope(self, span, finish_on_close) context.active = scope return scope
Make a :class:`~opentracing.Span` instance active. :param span: the :class:`~opentracing.Span` that should become active. :param finish_on_close: whether *span* should automatically be finished when :meth:`Scope.close()` is called. If no :func:`tracer_stack_context()` is detected, thread-local storage will be used to store the :class:`~opentracing.Scope`. Observe that in this case the active :class:`~opentracing.Span` will not be automatically propagated to the child corotuines. :return: a :class:`~opentracing.Scope` instance to control the end of the active period for the :class:`~opentracing.Span`. It is a programming error to neglect to call :meth:`Scope.close()` on the returned instance.
def parse(fp): """ Parse the contents of the `~io.IOBase.readline`-supporting file-like object ``fp`` as a simple line-oriented ``.properties`` file and return a generator of ``(key, value, original_lines)`` triples for every entry in ``fp`` (including duplicate keys) in order of occurrence. The third element of each triple is the concatenation of the unmodified lines in ``fp`` (including trailing newlines) from which the key and value were extracted. The generator also includes comments and blank/all-whitespace lines found in ``fp``, one triple per line, with the first two elements of the triples set to `None`. This is the only way to extract comments from a ``.properties`` file with this library. ``fp`` may be either a text or binary filehandle, with or without universal newlines enabled. If it is a binary filehandle, its contents are decoded as Latin-1. .. versionchanged:: 0.5.0 Invalid ``\\uXXXX`` escape sequences will now cause an `InvalidUEscapeError` to be raised :param fp: the file from which to read the ``.properties`` document :type fp: file-like object :rtype: generator of triples of text strings :raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence occurs in the input """ def lineiter(): while True: ln = fp.readline() if isinstance(ln, binary_type): ln = ln.decode('iso-8859-1') if ln == '': return for l in ascii_splitlines(ln): yield l liter = lineiter() for source in liter: line = source if re.match(r'^[ \t\f]*(?:[#!]|\r?\n?$)', line): yield (None, None, source) continue line = line.lstrip(' \t\f').rstrip('\r\n') while re.search(r'(?<!\\)(?:\\\\)*\\$', line): line = line[:-1] nextline = next(liter, '') source += nextline line += nextline.lstrip(' \t\f').rstrip('\r\n') if line == '': # series of otherwise-blank lines with continuations yield (None, None, source) continue m = re.search(r'(?<!\\)(?:\\\\)*([ \t\f]*[=:]|[ \t\f])[ \t\f]*', line) if m: yield (unescape(line[:m.start(1)]),unescape(line[m.end():]),source) else: yield (unescape(line), '', source)
Parse the contents of the `~io.IOBase.readline`-supporting file-like object ``fp`` as a simple line-oriented ``.properties`` file and return a generator of ``(key, value, original_lines)`` triples for every entry in ``fp`` (including duplicate keys) in order of occurrence. The third element of each triple is the concatenation of the unmodified lines in ``fp`` (including trailing newlines) from which the key and value were extracted. The generator also includes comments and blank/all-whitespace lines found in ``fp``, one triple per line, with the first two elements of the triples set to `None`. This is the only way to extract comments from a ``.properties`` file with this library. ``fp`` may be either a text or binary filehandle, with or without universal newlines enabled. If it is a binary filehandle, its contents are decoded as Latin-1. .. versionchanged:: 0.5.0 Invalid ``\\uXXXX`` escape sequences will now cause an `InvalidUEscapeError` to be raised :param fp: the file from which to read the ``.properties`` document :type fp: file-like object :rtype: generator of triples of text strings :raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence occurs in the input
def _parse_call_args(self,*args,**kwargs): """Helper function to parse the arguments to the __call__ and related functions, return [6,nobj] array of frequencies (:3) and angles (3:)""" interp= kwargs.get('interp',self._useInterp) if len(args) == 5: raise IOError("Must specify phi for streamdf") elif len(args) == 6: if kwargs.get('aAInput',False): if isinstance(args[0],(int,float,numpy.float32,numpy.float64)): out= numpy.empty((6,1)) else: out= numpy.empty((6,len(args[0]))) for ii in range(6): out[ii,:]= args[ii] return out else: return self._approxaA(*args,interp=interp) elif isinstance(args[0],Orbit): o= args[0] return self._approxaA(o.R(),o.vR(),o.vT(),o.z(),o.vz(),o.phi(), interp=interp) elif isinstance(args[0],list) and isinstance(args[0][0],Orbit): R, vR, vT, z, vz, phi= [], [], [], [], [], [] for o in args[0]: R.append(o.R()) vR.append(o.vR()) vT.append(o.vT()) z.append(o.z()) vz.append(o.vz()) phi.append(o.phi()) return self._approxaA(numpy.array(R),numpy.array(vR), numpy.array(vT),numpy.array(z), numpy.array(vz),numpy.array(phi), interp=interp)
Helper function to parse the arguments to the __call__ and related functions, return [6,nobj] array of frequencies (:3) and angles (3:)
def post_stats(cls, stats_url, stats, timeout=2, auth_provider=None): """POST stats to the given url. :return: True if upload was successful, False otherwise. """ def error(msg): # Report aleady closed, so just print error. print('WARNING: Failed to upload stats to {}. due to {}'.format(stats_url, msg), file=sys.stderr) return False # TODO(benjy): The upload protocol currently requires separate top-level params, with JSON # values. Probably better for there to be one top-level JSON value, namely json.dumps(stats). # But this will first require changing the upload receiver at every shop that uses this. params = {k: cls._json_dump_options(v) for (k, v) in stats.items()} cookies = Cookies.global_instance() auth_provider = auth_provider or '<provider>' # We can't simply let requests handle redirects, as we only allow them for specific codes: # 307 and 308 indicate that the redirected request must use the same method, POST in this case. # So they indicate a true redirect of the POST itself, and we allow them. # The other redirect codes either must, or in practice do, cause the user agent to switch the # method to GET. So when they are encountered on a POST, it indicates an auth problem (a # redirection to a login page). def do_post(url, num_redirects_allowed): if num_redirects_allowed < 0: return error('too many redirects.') r = requests.post(url, data=params, timeout=timeout, cookies=cookies.get_cookie_jar(), allow_redirects=False) if r.status_code in {307, 308}: return do_post(r.headers['location'], num_redirects_allowed - 1) elif r.status_code != 200: error('HTTP error code: {}. Reason: {}.'.format(r.status_code, r.reason)) if 300 <= r.status_code < 400 or r.status_code == 401: print('Use `path/to/pants login --to={}` to authenticate against the stats ' 'upload service.'.format(auth_provider), file=sys.stderr) return False return True try: return do_post(stats_url, num_redirects_allowed=6) except Exception as e: # Broad catch - we don't want to fail the build over upload errors. return error('Error: {}'.format(e))
POST stats to the given url. :return: True if upload was successful, False otherwise.
def wrap_case_result(raw, expr): """Wrap a CASE statement result in a Series and handle returning scalars. Parameters ---------- raw : ndarray[T] The raw results of executing the ``CASE`` expression expr : ValueExpr The expression from the which `raw` was computed Returns ------- Union[scalar, Series] """ raw_1d = np.atleast_1d(raw) if np.any(pd.isnull(raw_1d)): result = pd.Series(raw_1d) else: result = pd.Series( raw_1d, dtype=constants.IBIS_TYPE_TO_PANDAS_TYPE[expr.type()] ) if result.size == 1 and isinstance(expr, ir.ScalarExpr): return result.item() return result
Wrap a CASE statement result in a Series and handle returning scalars. Parameters ---------- raw : ndarray[T] The raw results of executing the ``CASE`` expression expr : ValueExpr The expression from the which `raw` was computed Returns ------- Union[scalar, Series]
def get_variables(self, variables=None): """Get runtime variables for policy interpolation. Runtime variables are merged with the passed in variables if any. """ # Global policy variable expansion, we have to carry forward on # various filter/action local vocabularies. Where possible defer # by using a format string. # # See https://github.com/capitalone/cloud-custodian/issues/2330 if not variables: variables = {} if 'mode' in self.data: if 'role' in self.data['mode'] and not self.data['mode']['role'].startswith("arn:aws"): self.data['mode']['role'] = "arn:aws:iam::%s:role/%s" % \ (self.options.account_id, self.data['mode']['role']) variables.update({ # standard runtime variables for interpolation 'account': '{account}', 'account_id': self.options.account_id, 'region': self.options.region, # non-standard runtime variables from local filter/action vocabularies # # notify action 'policy': self.data, 'event': '{event}', # mark for op action 'op': '{op}', 'action_date': '{action_date}', # tag action pyformat-date handling 'now': utils.FormatDate(datetime.utcnow()), # account increase limit action 'service': '{service}', # s3 set logging action :-( see if we can revisit this one. 'bucket_region': '{bucket_region}', 'bucket_name': '{bucket_name}', 'source_bucket_name': '{source_bucket_name}', 'target_bucket_name': '{target_bucket_name}', 'target_prefix': '{target_prefix}', 'LoadBalancerName': '{LoadBalancerName}' }) return variables
Get runtime variables for policy interpolation. Runtime variables are merged with the passed in variables if any.
def _add_vector_layer(self, vector_layer, layer_name, save_style=False): """Add a vector layer to the folder. :param vector_layer: The layer to add. :type vector_layer: QgsVectorLayer :param layer_name: The name of the layer in the datastore. :type layer_name: str :param save_style: If we have to save a QML too. Default to False. :type save_style: bool :returns: A two-tuple. The first element will be True if we could add the layer to the datastore. The second element will be the layer name which has been used or the error message. :rtype: (bool, str) .. versionadded:: 4.0 """ if not self.is_writable(): return False, 'The destination is not writable.' output = QFileInfo( self.uri.filePath(layer_name + '.' + self._default_vector_format)) driver_mapping = { 'shp': 'ESRI Shapefile', 'kml': 'KML', 'geojson': 'GeoJSON', } QgsVectorFileWriter.writeAsVectorFormat( vector_layer, output.absoluteFilePath(), 'utf-8', QgsCoordinateTransform(), # No tranformation driver_mapping[self._default_vector_format]) if save_style: style_path = QFileInfo(self.uri.filePath(layer_name + '.qml')) vector_layer.saveNamedStyle(style_path.absoluteFilePath()) assert output.exists() return True, output.baseName()
Add a vector layer to the folder. :param vector_layer: The layer to add. :type vector_layer: QgsVectorLayer :param layer_name: The name of the layer in the datastore. :type layer_name: str :param save_style: If we have to save a QML too. Default to False. :type save_style: bool :returns: A two-tuple. The first element will be True if we could add the layer to the datastore. The second element will be the layer name which has been used or the error message. :rtype: (bool, str) .. versionadded:: 4.0
def keep_only_positive_boxes(boxes): """ Given a set of BoxList containing the `labels` field, return a set of BoxList for which `labels > 0`. Arguments: boxes (list of BoxList) """ assert isinstance(boxes, (list, tuple)) assert isinstance(boxes[0], BoxList) assert boxes[0].has_field("labels") positive_boxes = [] positive_inds = [] num_boxes = 0 for boxes_per_image in boxes: labels = boxes_per_image.get_field("labels") inds_mask = labels > 0 inds = inds_mask.nonzero().squeeze(1) positive_boxes.append(boxes_per_image[inds]) positive_inds.append(inds_mask) return positive_boxes, positive_inds
Given a set of BoxList containing the `labels` field, return a set of BoxList for which `labels > 0`. Arguments: boxes (list of BoxList)
def hotp(key,counter,format='dec6',hash=hashlib.sha1): ''' Compute a HOTP value as prescribed by RFC4226 :param key: the HOTP secret key given as an hexadecimal string :param counter: the OTP generation counter :param format: the output format, can be: - hex, for a variable length hexadecimal format, - hex-notrunc, for a 40 characters hexadecimal non-truncated format, - dec4, for a 4 characters decimal format, - dec6, - dec7, or - dec8 it defaults to dec6. :param hash: the hash module (usually from the hashlib package) to use, it defaults to hashlib.sha1. :returns: a string representation of the OTP value (as instructed by the format parameter). Examples: >>> hotp('343434', 2, format='dec6') '791903' ''' bin_hotp = __hotp(key, counter, hash) if format == 'dec4': return dec(bin_hotp, 4) elif format == 'dec6': return dec(bin_hotp, 6) elif format == 'dec7': return dec(bin_hotp, 7) elif format == 'dec8': return dec(bin_hotp, 8) elif format == 'hex': return '%x' % truncated_value(bin_hotp) elif format == 'hex-notrunc': return _utils.tohex(bin_hotp) elif format == 'bin': return bin_hotp elif format == 'dec': return str(truncated_value(bin_hotp)) else: raise ValueError('unknown format')
Compute a HOTP value as prescribed by RFC4226 :param key: the HOTP secret key given as an hexadecimal string :param counter: the OTP generation counter :param format: the output format, can be: - hex, for a variable length hexadecimal format, - hex-notrunc, for a 40 characters hexadecimal non-truncated format, - dec4, for a 4 characters decimal format, - dec6, - dec7, or - dec8 it defaults to dec6. :param hash: the hash module (usually from the hashlib package) to use, it defaults to hashlib.sha1. :returns: a string representation of the OTP value (as instructed by the format parameter). Examples: >>> hotp('343434', 2, format='dec6') '791903'
def rental_report(self, address, zipcode, format_type="json"): """Call the rental_report component Rental Report only supports a single address. Args: - address - zipcode Kwargs: - format_type - "json", "xlsx" or "all". Default is "json". """ # only json is supported by rental report. query_params = { "format": format_type, "address": address, "zipcode": zipcode } return self._api_client.fetch_synchronous("property/rental_report", query_params)
Call the rental_report component Rental Report only supports a single address. Args: - address - zipcode Kwargs: - format_type - "json", "xlsx" or "all". Default is "json".
def _safe_gremlin_string(value): """Sanitize and represent a string argument in Gremlin.""" if not isinstance(value, six.string_types): if isinstance(value, bytes): # should only happen in py3 value = value.decode('utf-8') else: raise GraphQLInvalidArgumentError(u'Attempting to convert a non-string into a string: ' u'{}'.format(value)) # Using JSON encoding means that all unicode literals and special chars # (e.g. newlines and backslashes) are replaced by appropriate escape sequences. # However, the quoted result is wrapped in double quotes, and $ signs are not escaped, # so that would allow arbitrary code execution in Gremlin. # We will therefore turn the double-quoted string into a single-quoted one to avoid this risk. escaped_and_quoted = json.dumps(value) # Double-quoted string literals in Gremlin/Groovy allow # arbitrary code execution via string interpolation and closures. # To avoid this, we perform the following steps: # - we strip the wrapping double quotes; # - we un-escape any double-quotes in the string, by replacing \" with "; # - we escape any single-quotes in the string, by replacing ' with \'; # - finally, we wrap the string in single quotes. # http://www.groovy-lang.org/syntax.html#_double_quoted_string if not escaped_and_quoted[0] == escaped_and_quoted[-1] == '"': raise AssertionError(u'Unreachable state reached: {} {}'.format(value, escaped_and_quoted)) no_quotes = escaped_and_quoted[1:-1] re_escaped = no_quotes.replace('\\"', '"').replace('\'', '\\\'') final_escaped_value = '\'' + re_escaped + '\'' return final_escaped_value
Sanitize and represent a string argument in Gremlin.
def filetime_from_git(content, git_content): ''' Update modification and creation times from git ''' if not content.settings['GIT_FILETIME_FROM_GIT']: # Disabled for everything return if not string_to_bool(content.metadata.get('gittime', 'yes')): # Disable for this content return path = content.source_path fs_creation_time = datetime_from_timestamp(os.stat(path).st_ctime, content) fs_modified_time = datetime_from_timestamp(os.stat(path).st_mtime, content) # 1. file is not managed by git # date: fs time # 2. file is staged, but has no commits # date: fs time # 3. file is managed, and clean # date: first commit time, update: last commit time or None # 4. file is managed, but dirty # date: first commit time, update: fs time if git_content.is_managed_by_git(): if git_content.is_committed(): content.date = git_content.get_oldest_commit_date() if git_content.is_modified(): content.modified = fs_modified_time else: content.modified = git_content.get_newest_commit_date() else: # File isn't committed content.date = fs_creation_time else: # file is not managed by git content.date = fs_creation_time # Clean up content attributes if not hasattr(content, 'modified'): content.modified = content.date if hasattr(content, 'date'): content.locale_date = strftime(content.date, content.date_format) if hasattr(content, 'modified'): content.locale_modified = strftime( content.modified, content.date_format)
Update modification and creation times from git
def sigfig_round(values, sigfig=1): """ Round a single value to a specified number of significant figures. Parameters ---------- values: float, value to be rounded sigfig: int, number of significant figures to reduce to Returns ---------- rounded: values, but rounded to the specified number of significant figures Examples ---------- In [1]: trimesh.util.round_sigfig(-232453.00014045456, 1) Out[1]: -200000.0 In [2]: trimesh.util.round_sigfig(.00014045456, 1) Out[2]: 0.0001 In [3]: trimesh.util.round_sigfig(.00014045456, 4) Out[3]: 0.0001405 """ as_int, multiplier = sigfig_int(values, sigfig) rounded = as_int * (10 ** multiplier) return rounded
Round a single value to a specified number of significant figures. Parameters ---------- values: float, value to be rounded sigfig: int, number of significant figures to reduce to Returns ---------- rounded: values, but rounded to the specified number of significant figures Examples ---------- In [1]: trimesh.util.round_sigfig(-232453.00014045456, 1) Out[1]: -200000.0 In [2]: trimesh.util.round_sigfig(.00014045456, 1) Out[2]: 0.0001 In [3]: trimesh.util.round_sigfig(.00014045456, 4) Out[3]: 0.0001405
def convert_softmax(builder, layer, input_names, output_names, keras_layer): """Convert a softmax layer from keras to coreml. Parameters keras_layer: layer ---------- A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = (input_names[0], output_names[0]) builder.add_softmax(name = layer, input_name = input_name, output_name = output_name)
Convert a softmax layer from keras to coreml. Parameters keras_layer: layer ---------- A keras layer object. builder: NeuralNetworkBuilder A neural network builder object.
def __default_emit_trace(self, data): """ Writes the given tracing data to Python Logging. :param data: The tracing data to be written. """ message = format_trace_data(data) self.io.logger.info(message)
Writes the given tracing data to Python Logging. :param data: The tracing data to be written.
def interpol_hist2d(h2d, oversamp_factor=10): """Sample the interpolator of a root 2d hist. Root's hist2d has a weird internal interpolation routine, also using neighbouring bins. """ from rootpy import ROOTError xlim = h2d.bins(axis=0) ylim = h2d.bins(axis=1) xn = h2d.nbins(0) yn = h2d.nbins(1) x = np.linspace(xlim[0], xlim[1], xn * oversamp_factor) y = np.linspace(ylim[0], ylim[1], yn * oversamp_factor) mat = np.zeros((xn, yn)) for xi in range(xn): for yi in range(yn): try: mat[xi, yi] = h2d.interpolate(x[xi], y[yi]) except ROOTError: continue return mat, x, y
Sample the interpolator of a root 2d hist. Root's hist2d has a weird internal interpolation routine, also using neighbouring bins.
def get_wide_interims(self): """Returns a dictionary with the analyses services from the current worksheet which have at least one interim with 'Wide' attribute set to true and that have not been yet submitted The structure of the returned dictionary is the following: <Analysis_keyword>: { 'analysis': <Analysis_name>, 'keyword': <Analysis_keyword>, 'interims': { <Interim_keyword>: { 'value': <Interim_default_value>, 'keyword': <Interim_key>, 'title': <Interim_title> } } } """ outdict = {} allowed_states = ['assigned', 'unassigned'] for analysis in self.context.getAnalyses(): # TODO Workflow - Analysis Use a query instead of this if api.get_workflow_status_of(analysis) not in allowed_states: continue if analysis.getKeyword() in outdict.keys(): continue calculation = analysis.getCalculation() if not calculation: continue andict = { "analysis": analysis.Title(), "keyword": analysis.getKeyword(), "interims": {} } # Analysis Service interim defaults for field in analysis.getInterimFields(): if field.get("wide", False): andict["interims"][field["keyword"]] = field # Interims from calculation for field in calculation.getInterimFields(): if field["keyword"] not in andict["interims"].keys() \ and field.get("wide", False): andict["interims"][field["keyword"]] = field if andict["interims"]: outdict[analysis.getKeyword()] = andict return outdict
Returns a dictionary with the analyses services from the current worksheet which have at least one interim with 'Wide' attribute set to true and that have not been yet submitted The structure of the returned dictionary is the following: <Analysis_keyword>: { 'analysis': <Analysis_name>, 'keyword': <Analysis_keyword>, 'interims': { <Interim_keyword>: { 'value': <Interim_default_value>, 'keyword': <Interim_key>, 'title': <Interim_title> } } }
def register_cmdfinalization_hook(self, func: Callable[[plugin.CommandFinalizationData], plugin.CommandFinalizationData]) -> None: """Register a hook to be called after a command is completed, whether it completes successfully or not.""" self._validate_cmdfinalization_callable(func) self._cmdfinalization_hooks.append(func)
Register a hook to be called after a command is completed, whether it completes successfully or not.
def auto_correlation(sequence): """ test for the autocorrelation of a sequence between t and t - 1 as the 'auto_correlation' it is less likely that the sequence is generated randomly. :param sequence: any iterable with at most 2 values that can be turned into a float via np.float . e.g. '1001001' [1, 0, 1, 0, 1] [1.2,.1,.5,1] :rtype: returns a dict of the linear regression stats of sequence[1:] vs. sequence[:-1] >>> result = auto_correlation('00000001111111111100000000') >>> result['p'] < 0.05 True >>> result['auto_correlation'] 0.83766233766233755 """ if isinstance(sequence, basestring): sequence = map(int, sequence) seq = np.array(list(sequence), dtype=np.float) dseq = np.column_stack((seq[1:], seq[:-1])) slope, intercept, r, ttp, see = linregress(seq[1:], seq[:-1]) cc = np.corrcoef(dseq, rowvar=0)[0][1] return {'slope': slope, 'intercept': intercept, 'r-squared': r ** 2, 'p': ttp, 'see': see, 'auto_correlation': cc}
test for the autocorrelation of a sequence between t and t - 1 as the 'auto_correlation' it is less likely that the sequence is generated randomly. :param sequence: any iterable with at most 2 values that can be turned into a float via np.float . e.g. '1001001' [1, 0, 1, 0, 1] [1.2,.1,.5,1] :rtype: returns a dict of the linear regression stats of sequence[1:] vs. sequence[:-1] >>> result = auto_correlation('00000001111111111100000000') >>> result['p'] < 0.05 True >>> result['auto_correlation'] 0.83766233766233755
def dpi(self): """ Physical resolution of the document coordinate system (dots per inch). """ if self._dpi is None: if self._canvas is None: return None else: return self.canvas.dpi else: return self._dpi
Physical resolution of the document coordinate system (dots per inch).
def rescale_field(self, new_dim): """ Changes the discretization of the potential field by linear interpolation. This is necessary if the potential field obtained from DFT is strangely skewed, or is too fine or coarse. Obeys periodic boundary conditions at the edges of the cell. Alternatively useful for mixing potentials that originally are on different grids. :param new_dim: tuple giving the numpy shape of the new grid """ v_dim = self.__v.shape padded_v = np.lib.pad(self.__v, ((0, 1), (0, 1), (0, 1)), mode='wrap') ogrid_list = np.array([list(c) for c in list( np.ndindex(v_dim[0] + 1, v_dim[1] + 1, v_dim[2] + 1))]) v_ogrid = padded_v.reshape( ((v_dim[0] + 1) * (v_dim[1] + 1) * (v_dim[2] + 1), -1)) ngrid_a, ngrid_b, ngrid_c = np.mgrid[0: v_dim[0]: v_dim[0] / new_dim[0], 0: v_dim[1]: v_dim[1] / new_dim[1], 0: v_dim[2]: v_dim[2] / new_dim[2]] v_ngrid = scipy.interpolate.griddata(ogrid_list, v_ogrid, (ngrid_a, ngrid_b, ngrid_c), method='linear').reshape( (new_dim[0], new_dim[1], new_dim[2])) self.__v = v_ngrid
Changes the discretization of the potential field by linear interpolation. This is necessary if the potential field obtained from DFT is strangely skewed, or is too fine or coarse. Obeys periodic boundary conditions at the edges of the cell. Alternatively useful for mixing potentials that originally are on different grids. :param new_dim: tuple giving the numpy shape of the new grid
def close(self) -> None: """Close the task. .. versionadded:: 1.0 This method must be called when the task is no longer needed. """ self.__data_channel_buffer.stop() self.__data_channel_buffer.close() self.__data_channel_buffer = None if not self.__was_playing: self.__hardware_source.stop_playing()
Close the task. .. versionadded:: 1.0 This method must be called when the task is no longer needed.
def fit(sim_mat, D_len, cidx): """ Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contains mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters. D: numpy array - Symmetric distance matrix k: int - number of clusters """ min_energy = np.inf for j in range(3): # select indices in each sample that maximizes its dimension inds = [np.argmin([sim_mat[idy].get(idx, 0) for idx in cidx]) for idy in range(D_len) if idy in sim_mat] cidx = [] energy = 0 # current enengy for i in np.unique(inds): indsi = np.where(inds == i)[0] # find indices for every cluster minind, min_value = 0, 0 for index, idy in enumerate(indsi): if idy in sim_mat: # value = sum([sim_mat[idy].get(idx,0) for idx in indsi]) value = 0 for idx in indsi: value += sim_mat[idy].get(idx, 0) if value < min_value: minind, min_value = index, value energy += min_value cidx.append(indsi[minind]) # new centers if energy < min_energy: min_energy, inds_min, cidx_min = energy, inds, cidx return inds_min, cidx_min
Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contains mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters. D: numpy array - Symmetric distance matrix k: int - number of clusters