code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def groups_replies(self, *, channel: str, thread_ts: str, **kwargs) -> SlackResponse: self._validate_xoxp_token() kwargs.update({"channel": channel, "thread_ts": thread_ts}) return self.api_call("groups.replies", http_verb="GET", params=kwargs)
Retrieve a thread of messages posted to a private channel Args: channel (str): The channel id. e.g. 'C1234567890' thread_ts (str): The timestamp of an existing message with 0 or more replies. e.g. '1234567890.123456'
def on(self, event_name, *args, **kwargs): def decorator(f): self.add_event_handler(event_name, f, *args, **kwargs) return f return decorator
Decorator shortcut for add_event_handler. Args: event_name: An event to attach the handler to. Valid events are from :class:`~ignite.engine.Events` or any `event_name` added by :meth:`~ignite.engine.Engine.register_events`. *args: optional args to be passed to `handler`. **kwargs: optional keyword args to be passed to `handler`.
def write_chunk(outfile, tag, data=b''): data = bytes(data) outfile.write(struct.pack("!I", len(data))) outfile.write(tag) outfile.write(data) checksum = zlib.crc32(tag) checksum = zlib.crc32(data, checksum) checksum &= 2 ** 32 - 1 outfile.write(struct.pack("!I", checksum))
Write a PNG chunk to the output file, including length and checksum.
def copy_openapi_specs(output_path, component): if component == 'reana-server': file = 'reana_server.json' elif component == 'reana-workflow-controller': file = 'reana_workflow_controller.json' elif component == 'reana-job-controller': file = 'reana_job_controller.json' if os.environ.get('REANA_SRCDIR'): reana_srcdir = os.environ.get('REANA_SRCDIR') else: reana_srcdir = os.path.join('..') try: reana_commons_specs_path = os.path.join( reana_srcdir, 'reana-commons', 'reana_commons', 'openapi_specifications') if os.path.exists(reana_commons_specs_path): if os.path.isfile(output_path): shutil.copy(output_path, os.path.join(reana_commons_specs_path, file)) shutil.copy(output_path, os.path.join('docs', 'openapi.json')) except Exception as e: click.echo('Something went wrong, could not copy openapi ' 'specifications to reana-commons \n{0}'.format(e))
Copy generated and validated openapi specs to reana-commons module.
def get_src_address_from_data(self, decoded=True): src_address_label = next((lbl for lbl in self.message_type if lbl.field_type and lbl.field_type.function == FieldType.Function.SRC_ADDRESS), None) if src_address_label: start, end = self.get_label_range(src_address_label, view=1, decode=decoded) if decoded: src_address = self.decoded_hex_str[start:end] else: src_address = self.plain_hex_str[start:end] else: src_address = None return src_address
Return the SRC address of a message if SRC_ADDRESS label is present in message type of the message Return None otherwise :param decoded: :return:
def difference(self, other, sort=None): self._validate_sort_keyword(sort) self._assert_can_do_setop(other) if self.equals(other): return self._shallow_copy(self._data[:0]) other, result_name = self._convert_can_do_setop(other) this = self._get_unique_index() indexer = this.get_indexer(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) the_diff = this.values.take(label_diff) if sort is None: try: the_diff = sorting.safe_sort(the_diff) except TypeError: pass return this._shallow_copy(the_diff, name=result_name, freq=None)
Return a new Index with elements from the index that are not in `other`. This is the set difference of two Index objects. Parameters ---------- other : Index or array-like sort : False or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- difference : Index Examples -------- >>> idx1 = pd.Index([2, 1, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.difference(idx2) Int64Index([1, 2], dtype='int64') >>> idx1.difference(idx2, sort=False) Int64Index([2, 1], dtype='int64')
def create(self): out = helm( "repo", "add", "jupyterhub", self.helm_repo ) out = helm("repo", "update") secret_yaml = self.get_security_yaml() out = helm( "upgrade", "--install", self.release, "jupyterhub/jupyterhub", namespace=self.namespace, version=self.version, input=secret_yaml ) if out.returncode != 0: print(out.stderr) else: print(out.stdout)
Create a single instance of notebook.
def filename(self): self._filename = getattr(self, '_filename', None) self._root_path = getattr(self, '_root_path', None) if self._filename is None and self._root_path is None: return self._filename_global() else: return self._filename_projects()
Defines the name of the configuration file to use.
def distance_to_contact(D, alpha=1): if callable(alpha): distance_function = alpha else: try: a = np.float64(alpha) def distance_function(x): return 1 / (x ** (1 / a)) except TypeError: print("Alpha parameter must be callable or an array-like") raise except ZeroDivisionError: raise ValueError("Alpha parameter must be non-zero") m = np.max(distance_function(D[D != 0])) M = np.zeros(D.shape) M[D != 0] = distance_function(D[D != 0]) M[D == 0] = m return M
Compute contact matrix from input distance matrix. Distance values of zeroes are given the largest contact count otherwise inferred non-zero distance values.
def remove_file(profile, branch, file_path, commit_message=None): branch_sha = get_branch_sha(profile, branch) tree = get_files_in_branch(profile, branch_sha) new_tree = remove_file_from_tree(tree, file_path) data = trees.create_tree(profile, new_tree) sha = data.get("sha") if not commit_message: commit_message = "Deleted " + file_path + "." parents = [branch_sha] commit_data = commits.create_commit(profile, commit_message, sha, parents) commit_sha = commit_data.get("sha") ref_data = refs.update_ref(profile, "heads/" + branch, commit_sha) return ref_data
Remove a file from a branch. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. branch The name of a branch. file_path The path of the file to delete. commit_message A commit message to give to the commit. Returns: A dict with data about the branch's new ref (it includes the new SHA the branch's HEAD points to, after committing the new file).
def infer_shape(self, node, input_shapes): if isinstance(self.operator, Functional): return [()] else: return [tuple(native(si) for si in self.operator.range.shape)]
Return a list of output shapes based on ``input_shapes``. This method is optional. It allows to compute the shape of the output without having to evaluate. Parameters ---------- node : `theano.gof.graph.Apply` The node of this Op in the computation graph. input_shapes : 1-element list of `theano.compile.ops.Shape` Symbolic shape of the input. Returns ------- output_shapes : 1-element list of tuples Fixed shape of the output determined by `odl_op`.
def component(self, **kwargs): kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) try: return NameFactory.component_format.format(**kwargs_copy) except KeyError: return None
Return a key that specifies data the sub-selection
def delete(self): redis = type(self).get_redis() for fieldname, field in self.proxy: field.delete(redis) redis.delete(self.key()) redis.srem(type(self).members_key(), self.id) if isinstance(self, PermissionHolder): redis.delete(self.allow_key()) if self.notify: data = json.dumps({ 'event': 'delete', 'data': self.to_json(), }) redis.publish(type(self).cls_key(), data) redis.publish(self.key(), data) return self
Deletes this model from the database, calling delete in each field to properly delete special cases
def loadJSON(self, jdata): super(ReferenceColumn, self).loadJSON(jdata) self.__reference = jdata.get('reference') or self.__reference self.__removeAction = jdata.get('removeAction') or self.__removeAction
Loads the given JSON information for this column. :param jdata: <dict>
def skip_signatures_and_duplicates_concat_well_known_metadata(cls, default_dup_action=None, additional_rules=None): default_dup_action = Duplicate.validate_action(default_dup_action or Duplicate.SKIP) additional_rules = assert_list(additional_rules, expected_type=(Duplicate, Skip)) rules = [Skip(r'^META-INF/[^/]+\.SF$'), Skip(r'^META-INF/[^/]+\.DSA$'), Skip(r'^META-INF/[^/]+\.RSA$'), Skip(r'^META-INF/INDEX.LIST$'), Duplicate(r'^META-INF/services/', Duplicate.CONCAT_TEXT)] return JarRules(rules=rules + additional_rules, default_dup_action=default_dup_action)
Produces a rule set useful in many deploy jar creation contexts. The rule set skips duplicate entries by default, retaining the 1st encountered. In addition it has the following special handling: - jar signature metadata is dropped - jar indexing files INDEX.LIST are dropped - ``java.util.ServiceLoader`` provider-configuration files are concatenated in the order encountered :param default_dup_action: An optional default action to take for duplicates. Defaults to `Duplicate.SKIP` if not specified. :param additional_rules: Optionally one or more jar rules to add to those described above. :returns: JarRules
def stop(self): if not self._started: raise NotRunningError("This AutobahnSync instance is not started") self._callbacks_runner.stop() self._started = False
Terminate the WAMP session .. note:: If the :meth:`AutobahnSync.run` has been run with ``blocking=True``, it will returns then.
def upsert_module_file(module_ident, fileid, filename): with db_connect() as db_conn: with db_conn.cursor() as cursor: cursor.execute("SELECT true FROM module_files " "WHERE module_ident = %s " "AND filename = %s", (module_ident, filename,)) try: cursor.fetchone()[0] except (IndexError, TypeError): cursor.execute("INSERT INTO module_files " "(module_ident, fileid, filename) " "VALUES (%s, %s, %s)", (module_ident, fileid, filename,)) else: cursor.execute("UPDATE module_files " "SET (fileid) = (%s) " "WHERE module_ident = %s AND filename = %s", (fileid, module_ident, filename,))
Upsert a file associated with ``fileid`` with ``filename`` as a module_files entry associated with content at ``module_ident``.
def similarity(w1, w2, threshold=0.5): ratio = SM(None, str(w1).lower(), str(w2).lower()).ratio() return ratio if ratio > threshold else 0
compare two strings 'words', and return ratio of smiliarity, be it larger than the threshold, or 0 otherwise. NOTE: if the result more like junk, increase the threshold value.
def http_request(self, verb, uri, data=None, headers=None, files=None, response_format=None, is_rdf = True, stream = False ): if is_rdf: if verb == 'GET': if not response_format: response_format = self.repo.default_serialization if headers and 'Accept' not in headers.keys(): headers['Accept'] = response_format else: headers = {'Accept':response_format} if type(uri) == rdflib.term.URIRef: uri = uri.toPython() logger.debug("%s request for %s, format %s, headers %s" % (verb, uri, response_format, headers)) session = requests.Session() request = requests.Request(verb, uri, auth=(self.repo.username, self.repo.password), data=data, headers=headers, files=files) prepped_request = session.prepare_request(request) response = session.send(prepped_request, stream=stream, ) return response
Primary route for all HTTP requests to repository. Ability to set most parameters for requests library, with some additional convenience parameters as well. Args: verb (str): HTTP verb to use for request, e.g. PUT, POST, GET, HEAD, PATCH, etc. uri (rdflib.term.URIRef,str): input URI data (str,file): payload of data to send for request, may be overridden in preperation of request headers (dict): optional dictionary of headers passed directly to requests.request files (dict): optional dictionary of files passed directly to requests.request response_format (str): desired response format for resource's payload, e.g. 'application/rdf+xml', 'text/turtle', etc. is_rdf (bool): if True, set Accept header based on combination of response_format and headers stream (bool): passed directly to requests.request for stream parameter Returns: requests.models.Response
def touch(self): key = self.get_related().get_key_name() columns = self.get_related_fresh_update() ids = self.get_related_ids() if len(ids) > 0: self.get_related().new_query().where_in(key, ids).update(columns)
Touch all of the related models of the relationship.
def predict(self,Xstar): KV = self._update_cache() self.covar.setXstar(Xstar) Kstar = self.covar.Kcross() Ystar = SP.dot(Kstar,KV['alpha']) return Ystar
predict on Xstar
def _calculate_unique_id(self, xblock): key = scope_key(self, xblock) return hashlib.sha1(key.encode('utf-8')).hexdigest()
Provide a default value for fields with `default=UNIQUE_ID`. Returned string is a SHA1 hex digest that is deterministically calculated for the field in its given scope.
def extract_datetime_hour(cls, datetime_str): if not datetime_str: raise DateTimeFormatterException('datetime_str must a valid string') try: return cls._extract_timestamp(datetime_str, cls.DATETIME_HOUR_FORMAT) except (TypeError, ValueError): raise DateTimeFormatterException('Invalid datetime string {}.'.format(datetime_str))
Tries to extract a `datetime` object from the given string, including only hours. Raises `DateTimeFormatterException` if the extraction fails.
def _term(self, term): term = str(term) if term: self.__query["q"] += term return self
Add a term to the query. Arguments: term (str): The term to add. Returns: SearchHelper: Self
def c_module_relocs(self): if self.opts.no_structs or self.opts.windll: return '', '' x86 = reloc_var( self.name, self._c_struct_names()[1], self.opts.reloc_delta, self._c_uses_pointer() ) x64 = '{0} *{1} = &_{1};\n'.format( self._c_struct_names()[1], self.name ) if self._c_uses_pointer() else '' return x86, x64
Build relocation for the module variable.
def parent_workspace(context): if IWorkspaceFolder.providedBy(context): return context for parent in aq_chain(context): if IWorkspaceFolder.providedBy(parent): return parent
Return containing workspace Returns None if not found.
def add_field(self, field): self.remove_field(field.name) self._fields[field.name] = field if field.default is not None: if six.callable(field.default): self._default_callables[field.key] = field.default else: self._defaults[field.key] = field.default
Add the received field to the model.
def apply_async(self, args=None, kwargs=None, **options): key = self._get_cache_key(args, kwargs) counter, penalty = cache.get(key, (0, 0)) if not counter: return super(PenalizedBackgroundTask, self).apply_async(args=args, kwargs=kwargs, **options) cache.set(key, (counter - 1, penalty), self.CACHE_LIFETIME) logger.info('The task %s will not be executed due to the penalty.' % self.name) return self.AsyncResult(options.get('task_id') or str(uuid4()))
Checks whether task must be skipped and decreases the counter in that case.
def generate_blocks(self, assume_complete_blocks=None): _track_assume_blocks = self.assume_complete_blocks try: if assume_complete_blocks != None: self.assume_complete_blocks = assume_complete_blocks if self.processed_tables == None: self.preprocess() self.processed_blocks = [] for worksheet in range(len(self.processed_tables)): ptable = self.processed_tables[worksheet] flags = self.flags_by_table[worksheet] units = self.units_by_table[worksheet] if not self.assume_complete_blocks: self.fill_in_table(ptable, worksheet, flags) self.processed_blocks.extend(self._find_blocks(ptable, worksheet, flags, units, { 'worksheet': worksheet })) return self.processed_blocks finally: self.assume_complete_blocks = _track_assume_blocks
Identifies and extracts all blocks from the input tables. These blocks are logical identifiers for where related information resides in the original table. Any block can be converted into a row-titled table which can then be stitched together with other tables from other blocks to form a fully converted data set. Args: assume_complete_blocks: Optimizes block loopups by not allowing titles to be extended. Blocks should be perfectly dense to be found when active. Optional, defaults to constructor value.
def get_subdomain(url): if url not in URLHelper.__cache: URLHelper.__cache[url] = urlparse(url) return ".".join(URLHelper.__cache[url].netloc.split(".")[:-2])
Get the subdomain of the given URL. Args: url (str): The URL to get the subdomain from. Returns: str: The subdomain(s)
def connect(url): url = urlparse(url) if url.scheme == 'tcp': sock = socket() netloc = tuple(url.netloc.rsplit(':', 1)) hostname = socket.gethostname() elif url.scheme == 'ipc': sock = socket(AF_UNIX) netloc = url.path hostname = 'localhost' else: raise ValueError('unknown socket type: %s' % url.scheme) sock.connect(netloc) return sock, hostname
Connect to UNIX or TCP socket. url can be either tcp://<host>:port or ipc://<path>
def get_attribute(self, attrkey, as_string=False, as_list=False): assert not as_string or not as_list if attrkey not in self._attrs: return None if attrkey == 'ID': return self._attrs[attrkey] attrvalues = list(self._attrs[attrkey]) attrvalues.sort() if len(attrvalues) == 1 and not as_list: return attrvalues[0] elif as_string: return ','.join(attrvalues) return attrvalues
Get the value of an attribute. By default, returns a string for ID and attributes with a single value, and a list of strings for attributes with multiple values. The `as_string` and `as_list` options can be used to force the function to return values as a string (comma-separated in case of multiple values) or a list.
def matches_pattern(self, other): ismatch = False if isinstance(other, Userdata): for key in self._userdata: if self._userdata[key] is None or other[key] is None: ismatch = True elif self._userdata[key] == other[key]: ismatch = True else: ismatch = False break return ismatch
Test if the current instance matches a template instance.
def WriteBlobs(self, blob_id_data_map, cursor=None): chunks = [] for blob_id, blob in iteritems(blob_id_data_map): chunks.extend(_BlobToChunks(blob_id.AsBytes(), blob)) for values in _PartitionChunks(chunks): _Insert(cursor, "blobs", values)
Writes given blobs.
def client_end(request, socket, context): for channel in socket.channels: events.on_unsubscribe.send(request, socket, context, channel) events.on_finish.send(request, socket, context) for channel in socket.channels[:]: socket.unsubscribe(channel) del CLIENTS[socket.session.session_id]
Handles cleanup when a session ends for the given client triple. Sends unsubscribe and finish events, actually unsubscribes from any channels subscribed to, and removes the client triple from CLIENTS.
def count(self, with_limit_and_skip=False): validate_boolean("with_limit_and_skip", with_limit_and_skip) cmd = SON([("count", self.__collection.name), ("query", self.__spec)]) if self.__max_time_ms is not None: cmd["maxTimeMS"] = self.__max_time_ms if self.__comment: cmd["$comment"] = self.__comment if self.__hint is not None: cmd["hint"] = self.__hint if with_limit_and_skip: if self.__limit: cmd["limit"] = self.__limit if self.__skip: cmd["skip"] = self.__skip return self.__collection._count(cmd, self.__collation)
Get the size of the results set for this query. Returns the number of documents in the results set for this query. Does not take :meth:`limit` and :meth:`skip` into account by default - set `with_limit_and_skip` to ``True`` if that is the desired behavior. Raises :class:`~pymongo.errors.OperationFailure` on a database error. When used with MongoDB >= 2.6, :meth:`~count` uses any :meth:`~hint` applied to the query. In the following example the hint is passed to the count command: collection.find({'field': 'value'}).hint('field_1').count() The :meth:`count` method obeys the :attr:`~pymongo.collection.Collection.read_preference` of the :class:`~pymongo.collection.Collection` instance on which :meth:`~pymongo.collection.Collection.find` was called. :Parameters: - `with_limit_and_skip` (optional): take any :meth:`limit` or :meth:`skip` that has been applied to this cursor into account when getting the count .. note:: The `with_limit_and_skip` parameter requires server version **>= 1.1.4-** .. versionchanged:: 2.8 The :meth:`~count` method now supports :meth:`~hint`.
def set_connection_logging(self, loadbalancer, val): uri = "/loadbalancers/%s/connectionlogging" % utils.get_id(loadbalancer) val = str(val).lower() req_body = {"connectionLogging": { "enabled": val, }} resp, body = self.api.method_put(uri, body=req_body) return body
Sets the connection logging for the given load balancer.
def create_window(self, pane, name=None, set_active=True): assert isinstance(pane, Pane) assert name is None or isinstance(name, six.text_type) taken_indexes = [w.index for w in self.windows] index = self.base_index while index in taken_indexes: index += 1 w = Window(index) w.add_pane(pane) self.windows.append(w) self.windows = sorted(self.windows, key=lambda w: w.index) app = get_app(return_none=True) if app is not None and set_active: self.set_active_window(w) if name is not None: w.chosen_name = name assert w.active_pane == pane assert w._get_parent(pane)
Create a new window that contains just this pane. :param pane: The :class:`.Pane` instance to put in the new window. :param name: If given, name for the new window. :param set_active: When True, focus the new window.
def openid_authorization_validator(self, request): request_info = super(HybridGrant, self).openid_authorization_validator(request) if not request_info: return request_info if request.response_type in ["code id_token", "code id_token token"]: if not request.nonce: raise InvalidRequestError( request=request, description='Request is missing mandatory nonce parameter.' ) return request_info
Additional validation when following the Authorization Code flow.
def param_upload(field, path): if not path: return None param = {} param['field'] = field param['path'] = path return param
Pack upload metadata.
def xAxisIsMinor(self): return min(self.radius.x, self.radius.y) == self.radius.x
Returns True if the minor axis is parallel to the X axis, boolean.
def non_parallel(self, vector): if (self.is_parallel(vector) is not True and self.is_perpendicular(vector) is not True): return True return False
Return True if vectors are non-parallel. Non-parallel vectors are vectors which are neither parallel nor perpendicular to each other.
def __set_date(self, value): if not issubclass(value.__class__, date): raise ValueError('Invalid date value') self.__date = value
Sets the date of the payment. @param value:datetime
def _find_column(input_, token): i = token.lexpos while i > 0: if input_[i] == '\n': break i -= 1 column = token.lexpos - i - 1 return column
Find the column in file where error occured. This is taken from token.lexpos converted to the position on the current line by finding the previous EOL.
def widen(self): t, h = self.time, self.half_duration h *= self.scaling_coeff_x self.set_interval((t - h, t + h))
Increase the interval size.
def write_PIA0_B_control(self, cpu_cycles, op_address, address, value): log.critical( "%04x| write $%02x (%s) to $%04x -> PIA 0 B side Control reg.\t|%s", op_address, value, byte2bit_string(value), address, self.cfg.mem_info.get_shortest(op_address) ) if is_bit_set(value, bit=0): log.critical( "%04x| write $%02x (%s) to $%04x -> VSYNC IRQ: enable\t|%s", op_address, value, byte2bit_string(value), address, self.cfg.mem_info.get_shortest(op_address) ) self.cpu.irq_enabled = True value = set_bit(value, bit=7) else: log.critical( "%04x| write $%02x (%s) to $%04x -> VSYNC IRQ: disable\t|%s", op_address, value, byte2bit_string(value), address, self.cfg.mem_info.get_shortest(op_address) ) self.cpu.irq_enabled = False if not is_bit_set(value, bit=2): self.pia_0_B_control.select_pdr() else: self.pia_0_B_control.deselect_pdr() self.pia_0_B_control.set(value)
write to 0xff03 -> PIA 0 B side Control reg. TODO: Handle IRQ bit 7 | IRQ 1 (VSYNC) flag bit 6 | IRQ 2 flag(not used) bit 5 | Control line 2 (CB2) is an output = 1 bit 4 | Control line 2 (CB2) set by bit 3 = 1 bit 3 | select line MSB of analog multiplexor (MUX): 0 = control line 2 LO / 1 = control line 2 HI bit 2 | set data direction: 0 = $FF02 is DDR / 1 = $FF02 is normal data lines bit 1 | control line 1 (CB1): IRQ polarity 0 = IRQ on HI to LO / 1 = IRQ on LO to HI bit 0 | VSYNC IRQ: 0 = disable IRQ / 1 = enable IRQ
def make_inst2(): I,d = multidict({1:45, 2:20, 3:30 , 4:30}) J,M = multidict({1:35, 2:50, 3:40}) c = {(1,1):8, (1,2):9, (1,3):14 , (2,1):6, (2,2):12, (2,3):9 , (3,1):10, (3,2):13, (3,3):16 , (4,1):9, (4,2):7, (4,3):5 , } return I,J,c,d,M
creates example data set 2
def XanyKXany(self): result = np.empty((self.P,self.F_any.shape[1],self.F_any.shape[1]), order='C') for p in range(self.P): X1D = self.Fstar_any * self.D[:,p:p+1] X1X2 = X1D.T.dot(self.Fstar_any) result[p] = X1X2 return result
compute self covariance for any
def _parse_identifier(self, identifier, zone=None): rdtype, name, content = None, None, None if len(identifier) > 7: parts = identifier.split('/') rdtype, name, content = parts[0], parts[1], '/'.join(parts[2:]) else: records = self._list_records_in_zone(zone) for record in records: if record['id'] == identifier: rdtype, name, content = record['type'], record['name'] + '.', record['content'] return rdtype, name, content
Parses the record identifier and returns type, name & content of the associated record as tuple. The tuple is empty if no associated record found.
def _calculate_degree_days(temperature_equivalent, base_temperature, cooling=False): if cooling: ret = temperature_equivalent - base_temperature else: ret = base_temperature - temperature_equivalent ret[ret < 0] = 0 prefix = 'CDD' if cooling else 'HDD' ret.name = '{}_{}'.format(prefix, base_temperature) return ret
Calculates degree days, starting with a series of temperature equivalent values Parameters ---------- temperature_equivalent : Pandas Series base_temperature : float cooling : bool Set True if you want cooling degree days instead of heating degree days Returns ------- Pandas Series called HDD_base_temperature for heating degree days or CDD_base_temperature for cooling degree days.
def content_break(self, el): should_break = False if self.type == 'odp': if el.name == 'page' and el.namespace and el.namespace == self.namespaces['draw']: should_break = True return should_break
Break on specified boundaries.
def stop(self): if self._started is False: raise ArgumentError("EmulationLoop.stop() called without calling start()") self.verify_calling_thread(False, "Cannot call EmulationLoop.stop() from inside the event loop") if self._thread.is_alive(): self._loop.call_soon_threadsafe(self._loop.create_task, self._clean_shutdown()) self._thread.join()
Stop the background emulation loop.
def address(self, street, city=None, state=None, zipcode=None, **kwargs): fields = { 'street': street, 'city': city, 'state': state, 'zip': zipcode, } return self._fetch('address', fields, **kwargs)
Geocode an address.
def _type_check_pointers(utype): result = [] for mname, member in utype.members.items(): if ("pointer" in member.modifiers and member.D > 0 and (member.default is None or "null" not in member.default)): result.append(member) return result
Checks the user-derived type for non-nullified pointer array declarations in its base definition. Returns (list of offending members).
def _loop_no_cache(self, helper_function, num, fragment): self.log([u"Examining fragment %d (no cache)...", num]) voice_code = self._language_to_voice_code(fragment.language) self.log(u"Calling helper function") succeeded, data = helper_function( text=fragment.filtered_text, voice_code=voice_code, output_file_path=None, return_audio_data=True ) if not succeeded: self.log_crit(u"An unexpected error occurred in helper_function") return (False, None) self.log([u"Examining fragment %d (no cache)... done", num]) return (True, data)
Synthesize all fragments without using the cache
def to_array(self): array = super(MaskPosition, self).to_array() array['point'] = u(self.point) array['x_shift'] = float(self.x_shift) array['y_shift'] = float(self.y_shift) array['scale'] = float(self.scale) return array
Serializes this MaskPosition to a dictionary. :return: dictionary representation of this object. :rtype: dict
def generate_host_meta(template=None, *args, **kwargs): if template == "diaspora": hostmeta = DiasporaHostMeta(*args, **kwargs) else: hostmeta = BaseHostMeta(*args, **kwargs) return hostmeta.render()
Generate a host-meta XRD document. Template specific key-value pairs need to be passed as ``kwargs``, see classes. :arg template: Ready template to fill with args, for example "diaspora" (optional) :returns: Rendered XRD document (str)
def averageAbove(requestContext, seriesList, n): results = [] for series in seriesList: val = safeAvg(series) if val is not None and val >= n: results.append(series) return results
Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the metrics with an average value above N for the time period specified. Example:: &target=averageAbove(server*.instance*.threads.busy,25) Draws the servers with average values above 25.
def WriteSignedBinaryBlobs(binary_urn, blobs, token = None): if _ShouldUseLegacyDatastore(): aff4.FACTORY.Delete(binary_urn, token=token) with data_store.DB.GetMutationPool() as mutation_pool: with aff4.FACTORY.Create( binary_urn, collects.GRRSignedBlob, mode="w", mutation_pool=mutation_pool, token=token) as fd: for blob in blobs: fd.Add(blob, mutation_pool=mutation_pool) if data_store.RelationalDBEnabled(): blob_references = rdf_objects.BlobReferences() current_offset = 0 for blob in blobs: blob_id = data_store.BLOBS.WriteBlobWithUnknownHash( blob.SerializeToString()) blob_references.items.Append( rdf_objects.BlobReference( offset=current_offset, size=len(blob.data), blob_id=blob_id)) current_offset += len(blob.data) data_store.REL_DB.WriteSignedBinaryReferences( _SignedBinaryIDFromURN(binary_urn), blob_references)
Saves signed blobs to the datastore. If a signed binary with the given URN already exists, its contents will get overwritten. Args: binary_urn: RDFURN that should serve as a unique identifier for the binary. blobs: An Iterable of signed blobs to write to the datastore. token: ACL token to use with the legacy (non-relational) datastore.
def new_item(self, hash_key, range_key=None, attrs=None): return Item(self, hash_key, range_key, attrs)
Return an new, unsaved Item which can later be PUT to Amazon DynamoDB.
def _process_params(self): self._sort_to_str() if 'rows' not in self._solr_params: self._solr_params['rows'] = self._cfg['row_size'] for key, val in self._solr_params.items(): if isinstance(val, str) and six.PY2: self._solr_params[key] = val.encode(encoding='UTF-8') return self._solr_params
Adds default row size if it's not given in the query. Converts param values into unicode strings. Returns: Processed self._solr_params dict.
def _find_current_phase(self, global_step): epoch_size = sum(phase.steps for phase in self._phases) epoch = int(global_step // epoch_size) steps_in = global_step % epoch_size for phase in self._phases: if steps_in < phase.steps: return phase, epoch, steps_in steps_in -= phase.steps
Determine the current phase based on the global step. This ensures continuing the correct phase after restoring checkoints. Args: global_step: The global number of steps performed across all phases. Returns: Tuple of phase object, epoch number, and phase steps within the epoch.
def show_one(request, post_process_fun, object_class, id, template='common_json.html'): obj = get_object_or_404(object_class, pk=id) json = post_process_fun(request, obj) return render_json(request, json, template=template, help_text=show_one.__doc__)
Return object of the given type with the specified identifier. GET parameters: user: identifier of the current user stats: turn on the enrichment of the objects by some statistics html turn on the HTML version of the API
def _ssweek_num_weeks(ssweek_year): "Get the number of Sundaystarting-weeks in this year" year_start = _ssweek_year_start(ssweek_year) next_year_start = _ssweek_year_start(ssweek_year+1) year_num_weeks = ((next_year_start - year_start).days) // 7 return year_num_weeks
Get the number of Sundaystarting-weeks in this year
def settle(self, reserveTransactionId, transactionAmount=None): params = {} params['ReserveTransactionId'] = reserveTransactionId if(transactionAmount != None): params['TransactionAmount'] = transactionAmount response = self.make_request("Settle", params) body = response.read() if(response.status == 200): rs = ResultSet() h = handler.XmlHandler(rs, self) xml.sax.parseString(body, h) return rs else: raise FPSResponseError(response.status, response.reason, body)
Charges for a reserved payment.
def user_addmedia(userids, active, mediatypeid, period, sendto, severity, **kwargs): conn_args = _login(**kwargs) ret = {} try: if conn_args: method = 'user.addmedia' params = {"users": []} if not isinstance(userids, list): userids = [userids] for user in userids: params['users'].append({"userid": user}) params['medias'] = [{"active": active, "mediatypeid": mediatypeid, "period": period, "sendto": sendto, "severity": severity}, ] ret = _query(method, params, conn_args['url'], conn_args['auth']) return ret['result']['mediaids'] else: raise KeyError except KeyError: return ret
Add new media to multiple users. .. versionadded:: 2016.3.0 :param userids: ID of the user that uses the media :param active: Whether the media is enabled (0 enabled, 1 disabled) :param mediatypeid: ID of the media type used by the media :param period: Time when the notifications can be sent as a time period :param sendto: Address, user name or other identifier of the recipient :param severity: Trigger severities to send notifications about :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: IDs of the created media. CLI Example: .. code-block:: bash salt '*' zabbix.user_addmedia 4 active=0 mediatypeid=1 period='1-7,00:00-24:00' sendto='support2@example.com' severity=63
def _auditpol_cmd(cmd): ret = salt.modules.cmdmod.run_all(cmd='auditpol {0}'.format(cmd), python_shell=True) if ret['retcode'] == 0: return ret['stdout'].splitlines() msg = 'Error executing auditpol command: {0}\n'.format(cmd) msg += '\n'.join(ret['stdout']) raise CommandExecutionError(msg)
Helper function for running the auditpol command Args: cmd (str): the auditpol command to run Returns: list: A list containing each line of the return (splitlines) Raises: CommandExecutionError: If the command encounters an error
def input_list_parser(infile_list): final_list_of_files = [] for x in infile_list: if op.isdir(x): os.chdir(x) final_list_of_files.extend(glob.glob('*')) if op.isfile(x): final_list_of_files.append(x) return final_list_of_files
Always return a list of files with varying input. >>> input_list_parser(['/path/to/folder/']) ['/path/to/folder/file1.txt', '/path/to/folder/file2.txt', '/path/to/folder/file3.txt'] >>> input_list_parser(['/path/to/file.txt']) ['/path/to/file.txt'] >>> input_list_parser(['file1.txt']) ['file1.txt'] Args: infile_list: List of arguments Returns: list: Standardized list of files
def dumps(self): return {table_name: getattr(self, table_name).dumps() for table_name in self.TABLES}
Return a dictionnary of current tables
def show_mode_indicator(viewer, tf, corner='ur'): tag = '_$mode_indicator' canvas = viewer.get_private_canvas() try: indic = canvas.get_object_by_tag(tag) if not tf: canvas.delete_object_by_tag(tag) else: indic.corner = corner except KeyError: if tf: bm = viewer.get_bindmap() bm.add_callback('mode-set', lambda *args: viewer.redraw(whence=3)) Indicator = canvas.get_draw_class('modeindicator') canvas.add(Indicator(corner=corner), tag=tag, redraw=False) canvas.update_canvas(whence=3)
Show a keyboard mode indicator in one of the corners. Parameters ---------- viewer : an ImageView subclass instance If True, show the color bar; else remove it if present. tf : bool If True, show the mark; else remove it if present. corner : str One of 'll', 'lr', 'ul' or 'ur' selecting a corner. The default is 'ur'.
def _init_modules_stub(self, **_): from google.appengine.api import request_info all_versions = {} def_versions = {} m2h = {} for module in self.configuration.modules: module_name = module._module_name or 'default' module_version = module._version or '1' all_versions[module_name] = [module_version] def_versions[module_name] = module_version m2h[module_name] = {module_version: 'localhost:8080'} request_info._local_dispatcher = request_info._LocalFakeDispatcher( module_names=list(all_versions), module_name_to_versions=all_versions, module_name_to_default_versions=def_versions, module_name_to_version_to_hostname=m2h) self.testbed.init_modules_stub()
Initializes the modules stub based off of your current yaml files Implements solution from http://stackoverflow.com/questions/28166558/invalidmoduleerror-when-using-testbed-to-unit-test-google-app-engine
def push_activations(activations, from_layer, to_layer): inverse_covariance_matrix = layer_inverse_covariance(from_layer) activations_decorrelated = np.dot(inverse_covariance_matrix, activations.T).T covariance_matrix = layer_covariance(from_layer, to_layer) activation_recorrelated = np.dot(activations_decorrelated, covariance_matrix) return activation_recorrelated
Push activations from one model to another using prerecorded correlations
def _finalize_metadata(self, node): final = {} for key, val in iter(node.metadata.items()): final[key] = list(val) node.metadata = final return node
Convert node metadata back into a standard dictionary and list.
def eval(self): max_size = _get_max_size(self.parts) parts_list = _grow([[]], max_size-1) counter = Ticker(max_size) parts = self.parts[:] while len(parts) > 0: parts_list, counter = _get_parts_list(parts, parts_list, counter) commands = [] for i, parts in enumerate(parts_list): alias = self._get_alias(i+1) new_parts = copy.deepcopy(parts) commands.append(Command(alias=alias, parts=new_parts)) return commands
Returns a list of Command objects that can be evaluated as their string values. Each command will track it's preliminary dependencies, but these values should not be depended on for running commands.
def move_mouse(self, x, y, screen=0): x = ctypes.c_int(x) y = ctypes.c_int(y) screen = ctypes.c_int(screen) _libxdo.xdo_move_mouse(self._xdo, x, y, screen)
Move the mouse to a specific location. :param x: the target X coordinate on the screen in pixels. :param y: the target Y coordinate on the screen in pixels. :param screen: the screen (number) you want to move on.
def _get_names(self): for line in self.content: line = line.strip() if line and re.search("^[a-zA-Z0-9]", line): yield line
Get the names of the objects to include in the table. :returns: The names of the objects to include. :rtype: generator(str)
def key_from_file(filename, passphrase): hexdigest = sha256_file(filename) if passphrase is None: passphrase = DEFAULT_HMAC_PASSPHRASE return keyed_hash(hexdigest, passphrase)
Calculate convergent encryption key. This takes a filename and an optional passphrase. If no passphrase is given, a default is used. Using the default passphrase means you will be vulnerable to confirmation attacks and learn-partial-information attacks. :param filename: The filename you want to create a key for. :type filename: str :param passphrase: The passphrase you want to use to encrypt the file. :type passphrase: str or None :returns: A convergent encryption key. :rtype: str
def compose_all(tups): from . import ast return functools.reduce(lambda x, y: x.compose(y), map(ast.make_tuple, tups), ast.make_tuple({}))
Compose all given tuples together.
def _get_decimal128(data, position, dummy0, dummy1, dummy2): end = position + 16 return Decimal128.from_bid(data[position:end]), end
Decode a BSON decimal128 to bson.decimal128.Decimal128.
def treynor_ratio(self, benchmark, rf=0.02): benchmark = _try_to_squeeze(benchmark) if benchmark.ndim > 1: raise ValueError("Treynor ratio requires a single benchmark") rf = self._validate_rf(rf) beta = self.beta(benchmark) return (self.anlzd_ret() - rf) / beta
Return over `rf` per unit of systematic risk. A measure of risk-adjusted performance that relates a portfolio's excess returns to the portfolio's beta. [Source: CFA Institute] Parameters ---------- benchmark : {pd.Series, TSeries, 1d np.ndarray} The benchmark security to which `self` is compared. rf : {float, TSeries, pd.Series}, default 0.02 If float, this represents an *compounded annualized* risk-free rate; 2.0% is the default. If a TSeries or pd.Series, this represents a time series of periodic returns to a risk-free security. To download a risk-free rate return series using 3-month US T-bill yields, see:`pyfinance.datasets.load_rf`. Returns ------- float
def get_pval_field(self): pval_fld = self.args.pval_field if pval_fld is not None: if pval_fld[:2] != 'p_': pval_fld = 'p_' + pval_fld elif len(self.methods) == 1: pval_fld = 'p_' + self.methods[0] else: pval_fld = 'p_uncorrected' if self.results_all: assert hasattr(next(iter(self.results_all)), pval_fld), \ 'NO PVAL({P}). EXPECTED ONE OF: {E}'.format( P=self.args.pval_field, E=" ".join([k for k in dir(next(iter(self.results_all))) if k[:2] == 'p_'])) return pval_fld
Get 'p_uncorrected' or the user-specified field for determining significant results.
def count_items(self, unique=True): if unique: return len(self.items) return sum([item.quantity for item in self.items.values()])
Count items in the cart. Parameters ---------- unique : bool-convertible, optional Returns ------- int If `unique` is truthy, then the result is the number of items in the cart. Otherwise, it's the sum of all item quantities.
def _save_file(self, data): if platform.system() == 'Windows': with open(self.file, "w") as outfile: json.dump(data, outfile) else: newpath = self.file + '.new' with open(newpath, "w") as outfile: json.dump(data, outfile) os.rename( os.path.realpath(newpath), os.path.realpath(self.file) )
Attempt to atomically save file by saving and then moving into position The goal is to make it difficult for a crash to corrupt our data file since the move operation can be made atomic if needed on mission critical filesystems.
def resource_listing(cls, request) -> [(200, 'Ok', ResourceListingModel)]: apis = [api.get_swagger_fragment() for api in Api if not api.private] Respond(200, { 'apiVersion': cls.api.version, 'swaggerVersion': cls.api.swagger_version, 'apis': apis })
Return the list of all available resources on the system. Resources are filtered according to the permission system, so querying this resource as different users may bare different results.
def load(filepath, update=True): if filepath == 'automatic' or filepath == 'example': fpath = os.path.dirname(os.path.abspath(__file__)) + '/data/automatic.egg' return load_egg(fpath) elif filepath == 'manual': fpath = os.path.dirname(os.path.abspath(__file__)) + '/data/manual.egg' return load_egg(fpath, update=False) elif filepath == 'naturalistic': fpath = os.path.dirname(os.path.abspath(__file__)) + '/data/naturalistic.egg' elif filepath.split('.')[-1]=='egg': return load_egg(filepath, update=update) elif filepath.split('.')[-1]=='fegg': return load_fegg(filepath, update=False) else: raise ValueError('Could not load file.')
Loads eggs, fried eggs ands example data Parameters ---------- filepath : str Location of file update : bool If true, updates egg to latest format Returns ---------- data : quail.Egg or quail.FriedEgg Data loaded from disk
def longitude(self, value=0.0): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `longitude`'.format(value)) if value < -180.0: raise ValueError('value need to be greater or equal -180.0 ' 'for field `longitude`') if value > 180.0: raise ValueError('value need to be smaller 180.0 ' 'for field `longitude`') self._longitude = value
Corresponds to IDD Field `longitude` - is West, + is East, degree minutes represented in decimal (i.e. 30 minutes is .5) Args: value (float): value for IDD Field `longitude` Unit: deg Default value: 0.0 value >= -180.0 value <= 180.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def _http_req_apply_default_headers(self, request_headers, content_type, body): if not request_headers: request_headers = {} request_headers.setdefault( 'Accept', ', '.join([str(ct) for ct in AVAILABLE_CONTENT_TYPES])) if body: request_headers.setdefault( 'Content-Type', str(content_type) or str(CONTENT_TYPE_MSGPACK)) if hasattr(self, 'correlation_id'): request_headers.setdefault( 'Correlation-Id', self.correlation_id) elif hasattr(self, 'request') and \ self.request.headers.get('Correlation-Id'): request_headers.setdefault( 'Correlation-Id', self.request.headers['Correlation-Id']) return request_headers
Set default values for common HTTP request headers :param dict request_headers: The HTTP request headers :param content_type: The mime-type used in the request/response :type content_type: :py:class:`ietfparse.datastructures.ContentType` or str :param mixed body: The request body :rtype: dict
def json_encoder_default(obj): if np is not None and hasattr(obj, 'size') and hasattr(obj, 'dtype'): if obj.size == 1: if np.issubdtype(obj.dtype, np.integer): return int(obj) elif np.issubdtype(obj.dtype, np.floating): return float(obj) if isinstance(obj, set): return list(obj) elif hasattr(obj, 'to_native'): return obj.to_native() elif hasattr(obj, 'tolist') and hasattr(obj, '__iter__'): return obj.tolist() return obj
Handle more data types than the default JSON encoder. Specifically, it treats a `set` and a `numpy.array` like a `list`. Example usage: ``json.dumps(obj, default=json_encoder_default)``
def set_prior_probs(self, statements): self.scorer.check_prior_probs(statements) for st in statements: st.belief = self.scorer.score_statement(st)
Sets the prior belief probabilities for a list of INDRA Statements. The Statements are assumed to be de-duplicated. In other words, each Statement in the list passed to this function is assumed to have a list of Evidence objects that support it. The prior probability of each Statement is calculated based on the number of Evidences it has and their sources. Parameters ---------- statements : list[indra.statements.Statement] A list of INDRA Statements whose belief scores are to be calculated. Each Statement object's belief attribute is updated by this function.
def values(self, predicate=None): if predicate: predicate_data = self._to_data(predicate) return self._encode_invoke(map_values_with_predicate_codec, predicate=predicate_data) else: return self._encode_invoke(map_values_codec)
Returns a list clone of the values contained in this map or values of the entries which are filtered with the predicate if provided. **Warning: The list is NOT backed by the map, so changes to the map are NOT reflected in the list, and vice-versa.** :param predicate: (Predicate), predicate to filter the entries (optional). :return: (Sequence), a list of clone of the values contained in this map. .. seealso:: :class:`~hazelcast.serialization.predicate.Predicate` for more info about predicates.
def _retryable_write(self, retryable, func, session): with self._tmp_session(session) as s: return self._retry_with_session(retryable, func, s, None)
Internal retryable write helper.
def connection(self): if self._connection: return self._connection self.log.debug('Initializing connection to %s' % (self.bosh_service. netloc)) if self.bosh_service.scheme == 'http': Connection = httplib.HTTPConnection elif self.bosh_service.scheme == 'https': Connection = httplib.HTTPSConnection else: raise Exception('Invalid URL scheme %s' % self.bosh_service.scheme) self._connection = Connection(self.bosh_service.netloc, timeout=10) self.log.debug('Connection initialized') return self._connection
Returns an stablished connection
def make_simple_gt_aware_merged_vcf_with_no_combinations(self, ref_seq): if len(self) <= 1: return merged_vcf_record = self.vcf_records[0] for i in range(1, len(self.vcf_records), 1): if self.vcf_records[i].intersects(merged_vcf_record): return else: merged_vcf_record = merged_vcf_record.gt_aware_merge(self.vcf_records[i], ref_seq) self.vcf_records = [merged_vcf_record]
Does a simple merging of all variants in this cluster. Assumes one ALT in each variant. Uses the called allele for each variant, making one new vcf_record that has all the variants put together
def _translate_config_path(location): package_name, filename = resolve_asset_spec(location.strip()) if not package_name: path = filename else: package = __import__(package_name) path = os.path.join(package_path(package), filename) return path
Translate location into fullpath according asset specification. Might be package:path for package related paths, or simply path :param str location: resource location :returns: fullpath :rtype: str
def get_last_month_range(): today = date.today() end_of_last_month = snap_to_beginning_of_month(today) - timedelta(days=1) start_of_last_month = snap_to_beginning_of_month(end_of_last_month) return (start_of_last_month, end_of_last_month)
Gets the date for the first and the last day of the previous complete month. :returns: A tuple containing two date objects, for the first and the last day of the month respectively.
def filter(self, qs, value): _id = None if value is not None: _, _id = from_global_id(value) return super(GlobalIDFilter, self).filter(qs, _id)
Convert the filter value to a primary key before filtering
def extract_lambda_package(self, package_name, path): lambda_package = lambda_packages[package_name][self.runtime] shutil.rmtree(os.path.join(path, package_name), ignore_errors=True) tar = tarfile.open(lambda_package['path'], mode="r:gz") for member in tar.getmembers(): tar.extract(member, path)
Extracts the lambda package into a given path. Assumes the package exists in lambda packages.
def has_type(type, names): if type.arg in names: return type for t in type.search('type'): r = has_type(t, names) if r is not None: return r if not hasattr(type, 'i_typedef'): return None if (type.i_typedef is not None and hasattr(type.i_typedef, 'i_is_circular') and type.i_typedef.i_is_circular == False): t = type.i_typedef.search_one('type') if t is not None: return has_type(t, names) return None
Return type with name if `type` has name as one of its base types, and name is in the `names` list. otherwise, return None.
def wait(self, progress=None): if not len(self._threads): return self desc = None if type(progress) is str: desc = progress last = self._inserted with tqdm(total=self._inserted, disable=(not progress), desc=desc) as pbar: while not self._queue.empty(): size = self._queue.qsize() delta = last - size if delta != 0: pbar.update(delta) last = size self._check_errors() time.sleep(0.1) self._queue.join() self._check_errors() final = self._inserted - last if final: pbar.update(final) if self._queue.empty(): self._inserted = 0 return self
Allow background threads to process until the task queue is empty. If there are no threads, in theory the queue should always be empty as processing happens immediately on the main thread. Optional: progress: (bool or str) show a tqdm progress bar optionally with a description if a string is provided Returns: self (for chaining) Raises: The first exception recieved from threads
def find_rotation_scale(im0, im1, isccs=False): im0 = np.asarray(im0, dtype=np.float32) im1 = np.asarray(im1, dtype=np.float32) truesize = None if isccs: truesize = im0.shape im0 = centered_mag_sq_ccs(im0) im1 = centered_mag_sq_ccs(im1) lp1, log_base = polar_fft(im1, logpolar=True, isshiftdft=isccs, logoutput=True, truesize=truesize) lp0, log_base = polar_fft(im0, logpolar=True, isshiftdft=isccs, logoutput=True, truesize=truesize, nangle=lp1.shape[0], radiimax=lp1.shape[1]) angle, scale = find_shift_dft(lp0, lp1) angle *= np.pi / lp1.shape[0] scale = log_base ** (scale) return angle, scale
Compares the images and return the best guess for the rotation angle, and scale difference. Parameters ---------- im0: 2d array First image im1: 2d array Second image isccs: boolean, default False Set to True if the images are alredy DFT and in CCS representation Returns ------- angle: number The angle difference scale: number The scale difference Notes ----- Uses find_shift_dft