code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def maybe_from_tuple(tup_or_range): if isinstance(tup_or_range, tuple): return from_tuple(tup_or_range) elif isinstance(tup_or_range, range): return tup_or_range raise ValueError( 'maybe_from_tuple expects a tuple or range, got %r: %r' % ( type(tup_or_range).__name__, tup_or_range, ), )
Convert a tuple into a range but pass ranges through silently. This is useful to ensure that input is a range so that attributes may be accessed with `.start`, `.stop` or so that containment checks are constant time. Parameters ---------- tup_or_range : tuple or range A tuple to pass to from_tuple or a range to return. Returns ------- range : range The input to convert to a range. Raises ------ ValueError Raised when the input is not a tuple or a range. ValueError is also raised if the input is a tuple whose length is not 2 or 3.
def authorization_code(self, code, redirect_uri): return self._token_request(grant_type='authorization_code', code=code, redirect_uri=redirect_uri)
Retrieve access token by `authorization_code` grant. https://tools.ietf.org/html/rfc6749#section-4.1.3 :param str code: The authorization code received from the authorization server. :param str redirect_uri: the identical value of the "redirect_uri" parameter in the authorization request. :rtype: dict :return: Access token response
def get(self, sid): return TranscriptionContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )
Constructs a TranscriptionContext :param sid: The unique string that identifies the resource :returns: twilio.rest.api.v2010.account.transcription.TranscriptionContext :rtype: twilio.rest.api.v2010.account.transcription.TranscriptionContext
def _check_devices(self): "Enumerate OpenVR tracked devices and check whether any need to be initialized" for i in range(1, len(self.poses)): pose = self.poses[i] if not pose.bDeviceIsConnected: continue if not pose.bPoseIsValid: continue if self.show_controllers_only: device_class = openvr.VRSystem().getTrackedDeviceClass(i) if not device_class == openvr.TrackedDeviceClass_Controller: continue model_name = openvr.VRSystem().getStringTrackedDeviceProperty(i, openvr.Prop_RenderModelName_String) if model_name not in self.meshes: self.meshes[model_name] = TrackedDeviceMesh(model_name)
Enumerate OpenVR tracked devices and check whether any need to be initialized
def _get_container_infos(config, container): client = _get_client(config) infos = None try: infos = _set_id(client.inspect_container(container)) except Exception: pass return infos
Get container infos container Image Id / grain name return: dict
def fromProfileName(cls, name): session = bones.SessionAPI.fromProfileName(name) return cls(session)
Return an `Origin` from a given configuration profile name. :see: `ProfileStore`.
def _read(path, encoding="utf-8", comment=";;;"): if path: if isinstance(path, basestring) and os.path.exists(path): if PY2: f = codecs.open(path, 'r', encoding='utf-8') else: f = open(path, 'r', encoding='utf-8') elif isinstance(path, basestring): f = path.splitlines() else: f = path for i, line in enumerate(f): line = line.strip(codecs.BOM_UTF8) if i == 0 and isinstance(line, binary_type) else line line = line.strip() line = decode_utf8(line, encoding) if not line or (comment and line.startswith(comment)): continue yield line return
Returns an iterator over the lines in the file at the given path, strippping comments and decoding each line to Unicode.
def check_version(current_version: str): app_version = parse_version(current_version) while True: try: _do_check_version(app_version) except requests.exceptions.HTTPError as herr: click.secho('Error while checking for version', fg='red') print(herr) except ValueError as verr: click.secho('Error while checking the version', fg='red') print(verr) finally: gevent.sleep(CHECK_VERSION_INTERVAL)
Check periodically for a new release
def get_swift_codename(version): codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] if len(codenames) > 1: for codename in reversed(codenames): releases = UBUNTU_OPENSTACK_RELEASE release = [k for k, v in six.iteritems(releases) if codename in v] ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) if six.PY3: ret = ret.decode('UTF-8') if codename in ret or release[0] in ret: return codename elif len(codenames) == 1: return codenames[0] match = re.match(r'^(\d+)\.(\d+)', version) if match: major_minor_version = match.group(0) for codename, versions in six.iteritems(SWIFT_CODENAMES): for release_version in versions: if release_version.startswith(major_minor_version): return codename return None
Determine OpenStack codename that corresponds to swift version.
def reverseCommit(self): self.baseClass.setText(self.oldText) self.qteWidget.SCISetStylingEx(0, 0, self.style)
Replace the current widget content with the original text. Note that the original text has styling information available, whereas the new text does not.
def schema_import(conn, dbpath): conn.execute( "ATTACH DATABASE ? AS source", (str(dbpath),)) conn.execute( "INSERT OR IGNORE INTO profiles (name, data)" " SELECT name, data FROM source.profiles" " WHERE data IS NOT NULL") conn.commit() conn.execute( "DETACH DATABASE source")
Import profiles from another database. This does not overwrite existing profiles in the target database. Profiles in the source database that share names with those in the target database are ignored. :param conn: A connection to an SQLite3 database into which to copy profiles. :param dbpath: The filesystem path to the source SQLite3 database.
def close(self, status=1000, reason=u''): try: if self.closed is False: close_msg = bytearray() close_msg.extend(struct.pack("!H", status)) if _check_unicode(reason): close_msg.extend(reason.encode('utf-8')) else: close_msg.extend(reason) self._send_message(False, CLOSE, close_msg) finally: self.closed = True
Send Close frame to the client. The underlying socket is only closed when the client acknowledges the Close frame. status is the closing identifier. reason is the reason for the close.
def _get_annotation_heading(self, handler, route, heading=None): if hasattr(handler, '_doctor_heading'): return handler._doctor_heading heading = '' handler_path = str(handler) try: handler_file_name = handler_path.split('.')[-2] except IndexError: handler_file_name = 'handler' if handler_file_name.startswith('handler'): class_name = handler_path.split('.')[-1] internal = False for word in CAMEL_CASE_RE.findall(class_name): if word == 'Internal': internal = True continue elif word.startswith(('List', 'Handler', 'Resource')): break heading += '%s ' % (word,) if internal: heading = heading.strip() heading += ' (Internal)' else: heading = ' '.join(handler_file_name.split('_')).title() if 'internal' in route: heading += ' (Internal)' return heading.strip()
Returns the heading text for an annotation. Attempts to get the name of the heading from the handler attribute `schematic_title` first. If `schematic_title` it is not present, it attempts to generate the title from the class path. This path: advertiser_api.handlers.foo_bar.FooListHandler would translate to 'Foo Bar' If the file name with the resource is generically named handlers.py or it doesn't have a full path then we attempt to get the resource name from the class name. So FooListHandler and FooHandler would translate to 'Foo'. If the handler class name starts with 'Internal', then that will be appended to the heading. So InternalFooListHandler would translate to 'Foo (Internal)' :param mixed handler: The handler class. Will be a flask resource class :param str route: The route to the handler. :returns: The text for the heading as a string.
def scene_add(frames): reader = MessageReader(frames) results = reader.string("command").uint32("animation_id").string("name").uint8_3("color").uint32("velocity").string("config").get() if results.command != "scene.add": raise MessageParserError("Command is not 'scene.add'") return (results.animation_id, results.name, np.array([results.color[0]/255, results.color[1]/255, results.color[2]/255]), results.velocity/1000, results.config)
parse a scene.add message
def WriteHuntOutputPluginsStates(self, hunt_id, states, cursor=None): columns = ", ".join(_HUNT_OUTPUT_PLUGINS_STATES_COLUMNS) placeholders = mysql_utils.Placeholders( 2 + len(_HUNT_OUTPUT_PLUGINS_STATES_COLUMNS)) hunt_id_int = db_utils.HuntIDToInt(hunt_id) for index, state in enumerate(states): query = ("INSERT INTO hunt_output_plugins_states " "(hunt_id, plugin_id, {columns}) " "VALUES {placeholders}".format( columns=columns, placeholders=placeholders)) args = [hunt_id_int, index, state.plugin_descriptor.plugin_name] if state.plugin_descriptor.plugin_args is None: args.append(None) else: args.append(state.plugin_descriptor.plugin_args.SerializeToString()) args.append(state.plugin_state.SerializeToString()) try: cursor.execute(query, args) except MySQLdb.IntegrityError as e: raise db.UnknownHuntError(hunt_id=hunt_id, cause=e)
Writes hunt output plugin states for a given hunt.
def get_samples(self, init_points_count): init_points_count = self._adjust_init_points_count(init_points_count) samples = np.empty((init_points_count, self.space.dimensionality)) random_design = RandomDesign(self.space) random_design.fill_noncontinous_variables(samples) if self.space.has_continuous(): X_design = multigrid(self.space.get_continuous_bounds(), self.data_per_dimension) samples[:,self.space.get_continuous_dims()] = X_design return samples
This method may return less points than requested. The total number of generated points is the smallest closest integer of n^d to the selected amount of points.
def _load(self, **kwargs): if 'uri' in self._meta_data: error = "There was an attempt to assign a new uri to this "\ "resource, the _meta_data['uri'] is %s and it should"\ " not be changed." % (self._meta_data['uri']) raise URICreationCollision(error) requests_params = self._handle_requests_params(kwargs) self._check_load_parameters(**kwargs) kwargs['uri_as_parts'] = True refresh_session = self._meta_data['bigip']._meta_data['icr_session'] base_uri = self._meta_data['container']._meta_data['uri'] kwargs.update(requests_params) for key1, key2 in self._meta_data['reduction_forcing_pairs']: kwargs = self._reduce_boolean_pair(kwargs, key1, key2) kwargs = self._check_for_python_keywords(kwargs) response = refresh_session.get(base_uri, **kwargs) return self._produce_instance(response)
wrapped with load, override that in a subclass to customize
def s_find_first(pred, first, lst): if pred(first): return first elif lst: return s_find_first(pred, unquote(lst[0]), lst[1:]) else: return None
Evaluate `first`; if predicate `pred` succeeds on the result of `first`, return the result; otherwise recur on the first element of `lst`. :param pred: a predicate. :param first: a promise. :param lst: a list of quoted promises. :return: the first element for which predicate is true.
def _getDefaultCombinedL4Params(self, numInputBits, inputSize, numExternalInputBits, externalInputSize, L2CellCount): sampleSize = numExternalInputBits + numInputBits activationThreshold = int(max(numExternalInputBits, numInputBits) * .6) minThreshold = activationThreshold return { "columnCount": inputSize, "cellsPerColumn": 16, "learn": True, "learnOnOneCell": False, "initialPermanence": 0.41, "connectedPermanence": 0.6, "permanenceIncrement": 0.1, "permanenceDecrement": 0.02, "minThreshold": minThreshold, "basalPredictedSegmentDecrement": 0.001, "apicalPredictedSegmentDecrement": 0.0, "reducedBasalThreshold": int(activationThreshold*0.6), "activationThreshold": activationThreshold, "sampleSize": sampleSize, "implementation": "ApicalTiebreak", "seed": self.seed, "basalInputWidth": inputSize*16 + externalInputSize, "apicalInputWidth": L2CellCount, }
Returns a good default set of parameters to use in a combined L4 region.
def get_attribute(self, node, column): if column > 0 and column < len(self.__horizontal_headers): return node.get(self.__horizontal_headers[self.__horizontal_headers.keys()[column]], None)
Returns the given Node attribute associated to the given column. :param node: Node. :type node: AbstractCompositeNode or GraphModelNode :param column: Column. :type column: int :return: Attribute. :rtype: Attribute
def stop_playback(self): self._sink.flush() self._sink.stop() self._playing = False
Stop playback from the audio sink.
def instruction_size(op, opc): if op < opc.HAVE_ARGUMENT: return 2 if opc.version >= 3.6 else 1 else: return 2 if opc.version >= 3.6 else 3
For a given opcode, `op`, in opcode module `opc`, return the size, in bytes, of an `op` instruction. This is the size of the opcode (1 byte) and any operand it has. In Python before version 3.6 this will be either 1 or 3 bytes. In Python 3.6 or later, it is 2 bytes or a "word".
def convert(cls, obj, parent): replacement_type = cls._type_mapping.get(type(obj)) if replacement_type is not None: new = replacement_type(obj) new.parent = parent return new return obj
Converts objects to registered tracked types This checks the type of the given object against the registered tracked types. When a match is found, the given object will be converted to the tracked type, its parent set to the provided parent, and returned. If its type does not occur in the registered types mapping, the object is returned unchanged.
def do_cleanup(cleanup): log.info('Cleaning up after exception') for leftover in cleanup: what = leftover['what'] item = leftover['item'] if what == 'domain': log.info('Cleaning up %s %s', what, item.name()) try: item.destroy() log.debug('%s %s forced off', what, item.name()) except libvirtError: pass try: item.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE+ libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA+ libvirt.VIR_DOMAIN_UNDEFINE_NVRAM) log.debug('%s %s undefined', what, item.name()) except libvirtError: pass if what == 'volume': try: item.delete() log.debug('%s %s cleaned up', what, item.name()) except libvirtError: pass
Clean up clone domain leftovers as much as possible. Extra robust clean up in order to deal with some small changes in libvirt behavior over time. Passed in volumes and domains are deleted, any errors are ignored. Used when cloning/provisioning a domain fails. :param cleanup: list containing dictonaries with two keys: 'what' and 'item'. If 'what' is domain the 'item' is a libvirt domain object. If 'what' is volume then the item is a libvirt volume object. Returns: none .. versionadded: 2017.7.3
def update(self): if self.single_channel: self.im.set_data(self.data[self.ind, :, :]) else: self.im.set_data(self.data[self.ind, :, :, :]) self.ax.set_ylabel('time frame %s' % self.ind) self.im.axes.figure.canvas.draw()
Updates image to be displayed with new time frame.
def register_views(*args): config = args[0] settings = config.get_settings() pages_config = settings[CONFIG_MODELS] resources = resources_of_config(pages_config) for resource in resources: if hasattr(resource, '__table__')\ and not hasattr(resource, 'model'): continue resource.model.pyramid_pages_template = resource.template config.add_view(resource.view, attr=resource.attr, route_name=PREFIX_PAGE, renderer=resource.template, context=resource, permission=PREFIX_PAGE)
Registration view for each resource from config.
def statuses_show(self, id, trim_user=None, include_my_retweet=None, include_entities=None): params = {'id': id} set_bool_param(params, 'trim_user', trim_user) set_bool_param(params, 'include_my_retweet', include_my_retweet) set_bool_param(params, 'include_entities', include_entities) return self._get_api('statuses/show.json', params)
Returns a single Tweet, specified by the id parameter. https://dev.twitter.com/docs/api/1.1/get/statuses/show/%3Aid :param str id: (*required*) The numerical ID of the desired tweet. :param bool trim_user: When set to ``True``, the tweet's user object includes only the status author's numerical ID. :param bool include_my_retweet: When set to ``True``, any Tweet returned that has been retweeted by the authenticating user will include an additional ``current_user_retweet`` node, containing the ID of the source status for the retweet. :param bool include_entities: When set to ``False``, the ``entities`` node will not be included. :returns: A tweet dict.
def get_culprit(omit_top_frames=1): try: caller_stack = stack()[omit_top_frames:] while len(caller_stack) > 0: frame = caller_stack.pop(0) filename = frame[1] if '<decorator' in filename or __file__ in filename: continue else: break lineno = frame[2] del caller_stack, frame except OSError: filename = 'unknown' lineno = -1 return filename, lineno
get the filename and line number calling this. Parameters ---------- omit_top_frames: int, default=1 omit n frames from top of stack stack. Purpose is to get the real culprit and not intermediate functions on the stack. Returns ------- (filename: str, fileno: int) filename and line number of the culprit.
def su(self) -> 'Gate': rank = 2**self.qubit_nb U = asarray(self.asoperator()) U /= np.linalg.det(U) ** (1/rank) return Gate(tensor=U, qubits=self.qubits)
Convert gate tensor to the special unitary group.
def output_size(self) -> Tuple[Sequence[Shape], Sequence[Shape], Sequence[Shape], int]: return self._cell.output_size
Returns the simulation output size.
def value_to_sql_str(v): if v is None: return 'null' if type(v) in (types.IntType, types.FloatType, types.LongType): return str(v) if type(v) in (types.StringType, types.UnicodeType): return "'%s'" %(v.replace(u"'", u"\\'")) if isinstance(v, datetime): return "'%s'" %(v.strftime("%Y-%m-%d %H:%M:%S")) if isinstance(v, date): return "'%s'" %(v.strftime("%Y-%m-%d")) return str(v)
transform a python variable to the appropriate representation in SQL
def _GetTableNames(self, database): table_names = [] for esedb_table in database.tables: table_names.append(esedb_table.name) return table_names
Retrieves the table names in a database. Args: database (pyesedb.file): ESE database. Returns: list[str]: table names.
def calculate_file_access_time(workflow_workspace): access_times = {} for subdir, dirs, files in os.walk(workflow_workspace): for file in files: file_path = os.path.join(subdir, file) access_times[file_path] = os.stat(file_path).st_atime return access_times
Calculate access times of files in workspace.
def get_file_extension(filepath): _ext = os.path.splitext(filepath)[-1] if _ext: return _ext[1:] if _ext.startswith('.') else _ext return ''
Copy if anyconfig.utils.get_file_extension is not available. >>> get_file_extension("/a/b/c") '' >>> get_file_extension("/a/b.txt") 'txt' >>> get_file_extension("/a/b/c.tar.xz") 'xz'
def start_msstitch(exec_drivers, sysargs): parser = populate_parser(exec_drivers) args = parser.parse_args(sysargs[1:]) args.func(**vars(args))
Passed all drivers of executable, checks which command is passed to the executable and then gets the options for a driver, parses them from command line and runs the driver
def fit_df(self, dfs, pstate_col=PSTATE_COL): obs_cols = list(self.emission_name) obs = [df[df.columns.difference([pstate_col])][obs_cols].values for df in dfs] pstates = [df[pstate_col].values for df in dfs] return self.fit(obs, pstates)
Convenience function to fit a model from a list of dataframes
def pick(self, req_authn_context=None): if req_authn_context is None: return self._pick_by_class_ref(UNSPECIFIED, "minimum") if req_authn_context.authn_context_class_ref: if req_authn_context.comparison: _cmp = req_authn_context.comparison else: _cmp = "exact" if _cmp == 'exact': res = [] for cls_ref in req_authn_context.authn_context_class_ref: res += (self._pick_by_class_ref(cls_ref.text, _cmp)) return res else: return self._pick_by_class_ref( req_authn_context.authn_context_class_ref[0].text, _cmp) elif req_authn_context.authn_context_decl_ref: if req_authn_context.comparison: _cmp = req_authn_context.comparison else: _cmp = "exact" return self._pick_by_class_ref( req_authn_context.authn_context_decl_ref, _cmp)
Given the authentication context find zero or more places where the user could be sent next. Ordered according to security level. :param req_authn_context: The requested context as an RequestedAuthnContext instance :return: An URL
def get(self, sid): return AuthCallsIpAccessControlListMappingContext( self._version, account_sid=self._solution['account_sid'], domain_sid=self._solution['domain_sid'], sid=sid, )
Constructs a AuthCallsIpAccessControlListMappingContext :param sid: The unique string that identifies the resource :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_ip_access_control_list_mapping.AuthCallsIpAccessControlListMappingContext :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_ip_access_control_list_mapping.AuthCallsIpAccessControlListMappingContext
def set_custom_getter_compose(custom_getter): tf.get_variable_scope().set_custom_getter( _compose_custom_getters(tf.get_variable_scope().custom_getter, custom_getter))
Set a custom getter in the current variable scope. Do not overwrite the existing custom getter - rather compose with it. Args: custom_getter: a custom getter.
def help(*args): from . import commands parser = argparse.ArgumentParser(prog="%s %s" % (__package__, help.__name__), description=help.__doc__) parser.add_argument('COMMAND', help="command to show help for", nargs="?", choices=__all__) args = parser.parse_args(args) if args.COMMAND: for l in getattr(commands, args.COMMAND)('-h'): yield l else: parser.parse_args(['-h'])
Prints help.
def rotate_image(filename, line, sdir, image_list): file_loc = get_image_location(filename, sdir, image_list) degrees = re.findall('(angle=[-\\d]+|rotate=[-\\d]+)', line) if len(degrees) < 1: return False degrees = degrees[0].split('=')[-1].strip() if file_loc is None or file_loc == 'ERROR' or\ not re.match('-*\\d+', degrees): return False if degrees: try: degrees = int(degrees) except (ValueError, TypeError): return False if not os.path.exists(file_loc): return False with Image(filename=file_loc) as image: with image.clone() as rotated: rotated.rotate(degrees) rotated.save(filename=file_loc) return True return False
Rotate a image. Given a filename and a line, figure out what it is that the author wanted to do wrt changing the rotation of the image and convert the file so that this rotation is reflected in its presentation. :param: filename (string): the name of the file as specified in the TeX :param: line (string): the line where the rotate command was found :output: the image file rotated in accordance with the rotate command :return: True if something was rotated
def hashmodel(model, library=None): library = library or 'python-stdnet' meta = model._meta sha = hashlib.sha1(to_bytes('{0}({1})'.format(library, meta))) hash = sha.hexdigest()[:8] meta.hash = hash if hash in _model_dict: raise KeyError('Model "{0}" already in hash table.\ Rename your model or the module containing the model.'.format(meta)) _model_dict[hash] = model
Calculate the Hash id of metaclass ``meta``
def fit_transform(self, X, y, step_size=0.1, init_weights=None, warm_start=False): self.fit(X=X, y=y, step_size=step_size, init_weights=init_weights, warm_start=warm_start) return self.transform(X=X)
Fit optimizer to X, then transforms X. See `fit` and `transform` for further explanation.
def send_event(self, name, *args, **kwargs): n = len(self._bridge_queue) self._bridge_queue.append((name, args)) if n == 0: self._bridge_last_scheduled = time() self.deferred_call(self._bridge_send) return elif kwargs.get('now'): self._bridge_send(now=True) return dt = time() - self._bridge_last_scheduled if dt > self._bridge_max_delay: self._bridge_send(now=True)
Send an event to the native handler. This call is queued and batched. Parameters ---------- name : str The event name to be processed by MainActivity.processMessages. *args: args The arguments required by the event. **kwargs: kwargs Options for sending. These are: now: boolean Send the event now
def approx_contains(self, other, atol): other = np.atleast_1d(other) return (other.shape == (self.ndim,) and all(np.any(np.isclose(vector, coord, atol=atol, rtol=0.0)) for vector, coord in zip(self.coord_vectors, other)))
Test if ``other`` belongs to this grid up to a tolerance. Parameters ---------- other : `array-like` or float The object to test for membership in this grid atol : float Allow deviations up to this number in absolute value per vector entry. Examples -------- >>> g = RectGrid([0, 1], [-1, 0, 2]) >>> g.approx_contains([0, 0], atol=0.0) True >>> [0, 0] in g # equivalent True >>> g.approx_contains([0.1, -0.1], atol=0.0) False >>> g.approx_contains([0.1, -0.1], atol=0.15) True
def repo(name: str, owner: str) -> snug.Query[dict]: request = snug.GET(f'https://api.github.com/repos/{owner}/{name}') response = yield request return json.loads(response.content)
a repo lookup by owner and name
def _check_span_id(self, span_id): if span_id is None: return None assert isinstance(span_id, six.string_types) if span_id is INVALID_SPAN_ID: logging.warning( 'Span_id {} is invalid (cannot be all zero)'.format(span_id)) self.from_header = False return None match = SPAN_ID_PATTERN.match(span_id) if match: return span_id else: logging.warning( 'Span_id {} does not the match the ' 'required format'.format(span_id)) self.from_header = False return None
Check the format of the span_id to ensure it is 16-character hex value representing a 64-bit number. If span_id is invalid, logs a warning message and returns None :type span_id: str :param span_id: Identifier for the span, unique within a span. :rtype: str :returns: Span_id for the current span.
def events(self): if not self.event_reflector: return [] events = [] for event in self.event_reflector.events: if event.involved_object.name != self.pod_name: continue if self._last_event and event.metadata.uid == self._last_event: events = [] else: events.append(event) return events
Filter event-reflector to just our events Returns list of all events that match our pod_name since our ._last_event (if defined). ._last_event is set at the beginning of .start().
def main(ctx, connection): ctx.obj = Manager(connection=connection) ctx.obj.bind()
Command line interface for PyBEL.
def run_stop_backup(cls): def handler(popen): assert popen.returncode != 0 raise UserException('Could not stop hot backup') return cls._dict_transform(psql_csv_run( "SELECT file_name, " " lpad(file_offset::text, 8, '0') AS file_offset " "FROM pg_{0}file_name_offset(" " pg_stop_backup())".format(cls._wal_name()), error_handler=handler))
Stop a hot backup, if it was running, or error Return the last WAL file name and position that is required to gain consistency on the captured heap.
def get_out_of_order(list_of_numbers): result = [] for i in range(len(list_of_numbers)): if i == 0: continue if list_of_numbers[i] < list_of_numbers[i - 1]: result.append((list_of_numbers[i - 1], list_of_numbers[i])) return result
Returns elements that break the monotonically non-decreasing trend. This is used to find instances of global step values that are "out-of-order", which may trigger TensorBoard event discarding logic. Args: list_of_numbers: A list of numbers. Returns: A list of tuples in which each tuple are two elements are adjacent, but the second element is lower than the first.
def _choose_capture_side(self): ALWAYS_RUNNING_NODES_TYPE = ("cloud", "nat", "ethernet_switch", "ethernet_hub") for node in self._nodes: if node["node"].compute.id == "local" and node["node"].node_type in ALWAYS_RUNNING_NODES_TYPE and node["node"].status == "started": return node for node in self._nodes: if node["node"].node_type in ALWAYS_RUNNING_NODES_TYPE and node["node"].status == "started": return node for node in self._nodes: if node["node"].compute.id == "local" and node["node"].status == "started": return node for node in self._nodes: if node["node"].node_type and node["node"].status == "started": return node raise aiohttp.web.HTTPConflict(text="Cannot capture because there is no running device on this link")
Run capture on the best candidate. The ideal candidate is a node who on controller server and always running (capture will not be cut off) :returns: Node where the capture should run
def verify(path): valid = False try: zf = zipfile.ZipFile(path) except (zipfile.BadZipfile, IsADirectoryError): pass else: names = sorted(zf.namelist()) names = [nn for nn in names if nn.endswith(".tif")] names = [nn for nn in names if nn.startswith("SID PHA")] for name in names: with zf.open(name) as pt: fd = io.BytesIO(pt.read()) if SingleTifPhasics.verify(fd): valid = True break zf.close() return valid
Verify that `path` is a zip file with Phasics TIFF files
def visit_global(self, node, parent): newnode = nodes.Global( node.names, getattr(node, "lineno", None), getattr(node, "col_offset", None), parent, ) if self._global_names: for name in node.names: self._global_names[-1].setdefault(name, []).append(newnode) return newnode
visit a Global node to become astroid
def _decrypt_entity(entity, encrypted_properties_list, content_encryption_key, entityIV, isJavaV1): _validate_not_none('entity', entity) decrypted_entity = deepcopy(entity) try: for property in entity.keys(): if property in encrypted_properties_list: value = entity[property] propertyIV = _generate_property_iv(entityIV, entity['PartitionKey'], entity['RowKey'], property, isJavaV1) cipher = _generate_AES_CBC_cipher(content_encryption_key, propertyIV) decryptor = cipher.decryptor() decrypted_data = (decryptor.update(value.value) + decryptor.finalize()) unpadder = PKCS7(128).unpadder() decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) decrypted_data = decrypted_data.decode('utf-8') decrypted_entity[property] = decrypted_data decrypted_entity.pop('_ClientEncryptionMetadata1') decrypted_entity.pop('_ClientEncryptionMetadata2') return decrypted_entity except: raise AzureException(_ERROR_DECRYPTION_FAILURE)
Decrypts the specified entity using AES256 in CBC mode with 128 bit padding. Unwraps the CEK using either the specified KEK or the key returned by the key_resolver. Properties specified in the encrypted_properties_list, will be decrypted and decoded to utf-8 strings. :param entity: The entity being retrieved and decrypted. Could be a dict or an entity object. :param list encrypted_properties_list: The encrypted list of all the properties that are encrypted. :param bytes[] content_encryption_key: The key used internally to encrypt the entity. Extrated from the entity metadata. :param bytes[] entityIV: The intialization vector used to seed the encryption algorithm. Extracted from the entity metadata. :return: The decrypted entity :rtype: Entity
def parse_feature(obj): if hasattr(obj, '__geo_interface__'): gi = obj.__geo_interface__ if gi['type'] in geom_types: return wrap_geom(gi) elif gi['type'] == 'Feature': return gi try: shape = wkt.loads(obj) return wrap_geom(shape.__geo_interface__) except (ReadingError, TypeError, AttributeError): pass try: shape = wkb.loads(obj) return wrap_geom(shape.__geo_interface__) except (ReadingError, TypeError): pass try: if obj['type'] in geom_types: return wrap_geom(obj) elif obj['type'] == 'Feature': return obj except (AssertionError, TypeError): pass raise ValueError("Can't parse %s as a geojson Feature object" % obj)
Given a python object attemp to a GeoJSON-like Feature from it
def complete_contexts(self): if self._complete_contexts: return self._complete_contexts self.context() return self._complete_contexts
Return a list of interfaces that have satisfied contexts.
def remove_component(self, entity, component_type): relation = self._get_relation(component_type) del relation[entity] self._entities_with(component_type).remove(entity)
Remove the component of component_type from entity. Long-hand for :func:`essence.Entity.remove`. :param entity: entity to associate :type entity: :class:`essence.Entity` :param component_type: Type of component :type component_type: The :class:`type` of a :class:`Component` subclass
def run_check(self, check, argument_names): arguments = [] for name in argument_names: arguments.append(getattr(self, name)) return check(*arguments)
Run a check plugin.
def dem(bounds, src_crs, dst_crs, out_file, resolution): if not dst_crs: dst_crs = "EPSG:3005" bcdata.get_dem(bounds, out_file=out_file, src_crs=src_crs, dst_crs=dst_crs, resolution=resolution)
Dump BC DEM to TIFF
def attach(self, image_in, sampler=None, show=True): if len(image_in.shape) < 3: raise ValueError('Image must be atleast 3D') if sampler is None: temp_sampler = self.sampler else: temp_sampler = sampler slicer = SlicePicker(image_in=image_in, view_set=self.view_set, num_slices=self.num_slices, sampler=temp_sampler) try: for img_obj, slice_data in zip(self.images, slicer.get_slices()): img_obj.set_data(slice_data) except: self._data_attached = False raise ValueError('unable to attach the given image data to current collage') else: self._data_attached = True if show: self.show()
Attaches the relevant cross-sections to each axis. Parameters ---------- attach_image : ndarray The image to be attached to the collage, once it is created. Must be atleast 3d. sampler : str or list or callable selection strategy: to identify the type of sampling done to select the slices to return. All sampling is done between the first and last non-empty slice in that view/dimension. - if 'linear' : linearly spaced slices - if list, it is treated as set of percentages at which slices to be sampled (must be in the range of [1-100], not [0-1]). This could be used to more/all slices in the middle e.g. range(40, 60, 5) or at the end e.g. [ 5, 10, 15, 85, 90, 95] - if callable, it must take a 2D image of arbitray size, return True/False to indicate whether to select that slice or not. Only non-empty slices (atleas one non-zero voxel) are provided as input. Simple examples for callable could be based on 1) percentage of non-zero voxels > x etc 2) presence of desired texture ? 3) certain properties of distribution (skewe: dark/bright, energy etc) etc If the sampler returns more than requested `num_slices`, only the first num_slices will be selected. show : bool Flag to request immediate display of collage
def install(self, pip_args=None): if path.isdir(self.env): print_pretty("<FG_RED>This seems to already be installed.<END>") else: print_pretty("<FG_BLUE>Creating environment {}...<END>\n".format(self.env)) self.create_env() self.install_program(pip_args) self.create_links()
Install the program and put links in place.
async def create_virtual_environment(loop=None): tmp_dir = tempfile.mkdtemp() venv_dir = os.path.join(tmp_dir, VENV_NAME) proc1 = await asyncio.create_subprocess_shell( 'virtualenv {}'.format(venv_dir), loop=loop) await proc1.communicate() if sys.platform == 'win32': python = os.path.join(venv_dir, 'Scripts', 'python.exe') else: python = os.path.join(venv_dir, 'bin', 'python') venv_site_pkgs = install_dependencies(python) log.info("Created virtual environment at {}".format(venv_dir)) return venv_dir, python, venv_site_pkgs
Create a virtual environment, and return the path to the virtual env directory, which should contain a "bin" directory with the `python` and `pip` binaries that can be used to a test install of a software package. :return: the path to the virtual environment, its python, and its site pkgs
def run(self, messages, env=None): if self.args.score or self.args.unlock or self.args.testing: return tests = self.assignment.specified_tests for test in tests: if self.args.suite and hasattr(test, 'suites'): test.run_only = int(self.args.suite) try: suite = test.suites[int(self.args.suite) - 1] except IndexError as e: sys.exit(('python3 ok: error: ' 'Suite number must be valid.({})'.format(len(test.suites)))) if self.args.case: suite.run_only = [int(c) for c in self.args.case] grade(tests, messages, env, verbose=self.args.verbose)
Run gradeable tests and print results and return analytics. RETURNS: dict; a mapping of test name -> JSON-serializable object. It is up to each test to determine what kind of data it wants to return as significant for analytics. However, all tests must include the number passed, the number of locked tests and the number of failed tests.
def get_external_account(resource_root, name, view=None): return call(resource_root.get, EXTERNAL_ACCOUNT_FETCH_PATH % ("account", name,), ApiExternalAccount, False, params=view and dict(view=view) or None)
Lookup an external account by name @param resource_root: The root Resource object. @param name: Account name @param view: View @return: An ApiExternalAccount object
def stop(self): self.working = False for w in self.workers: w.join() self.workers = []
Stops the worker threads and waits for them to finish
def focus(self, force_first=False, force_last=False, force_column=None, force_widget=None): self._has_focus = True if force_widget is not None and force_column is not None: self._live_col = force_column self._live_widget = force_widget elif force_first: self._live_col = 0 self._live_widget = -1 self._find_next_widget(1) elif force_last: self._live_col = len(self._columns) - 1 self._live_widget = len(self._columns[self._live_col]) self._find_next_widget(-1) self._columns[self._live_col][self._live_widget].focus()
Call this to give this Layout the input focus. :param force_first: Optional parameter to force focus to first widget. :param force_last: Optional parameter to force focus to last widget. :param force_column: Optional parameter to mandate the new column index. :param force_widget: Optional parameter to mandate the new widget index. The force_column and force_widget parameters must both be set together or they will otherwise be ignored. :raises IndexError: if a force option specifies a bad column or widget, or if the whole Layout is readonly.
def get_query_schema(self, job_id): query_reply = self.get_query_results(job_id, offset=0, limit=0) if not query_reply['jobComplete']: logger.warning('BigQuery job %s not complete' % job_id) raise UnfinishedQueryException() return query_reply['schema']['fields']
Retrieve the schema of a query by job id. Parameters ---------- job_id : str The job_id that references a BigQuery query Returns ------- list A ``list`` of ``dict`` objects that represent the schema.
def dbmax_stddev(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `dbmax_stddev`'.format(value)) self._dbmax_stddev = value
Corresponds to IDD Field `dbmax_stddev` Standard deviation of extreme annual maximum dry-bulb temperature Args: value (float): value for IDD Field `dbmax_stddev` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def attachable(name, path=None): cachekey = 'lxc.attachable{0}{1}'.format(name, path) try: return __context__[cachekey] except KeyError: _ensure_exists(name, path=path) log.debug('Checking if LXC container %s is attachable', name) cmd = 'lxc-attach' if path: cmd += ' -P {0}'.format(pipes.quote(path)) cmd += ' --clear-env -n {0} -- /usr/bin/env'.format(name) result = __salt__['cmd.retcode'](cmd, python_shell=False, output_loglevel='quiet', ignore_retcode=True) == 0 __context__[cachekey] = result return __context__[cachekey]
Return True if the named container can be attached to via the lxc-attach command path path to the container parent default: /var/lib/lxc (system default) .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt 'minion' lxc.attachable ubuntu
def _try_fetch(self, size=None): if self._query_job is None: raise exceptions.InterfaceError( "No query results: execute() must be called before fetch." ) is_dml = ( self._query_job.statement_type and self._query_job.statement_type.upper() != "SELECT" ) if is_dml: self._query_data = iter([]) return if self._query_data is None: client = self.connection._client rows_iter = client.list_rows( self._query_job.destination, selected_fields=self._query_job._query_results.schema, page_size=self.arraysize, ) self._query_data = iter(rows_iter)
Try to start fetching data, if not yet started. Mutates self to indicate that iteration has started.
def conj(self, out=None): if out is None: return self.space.element(self.tensor.conj()) else: self.tensor.conj(out=out.tensor) return out
Complex conjugate of this element. Parameters ---------- out : `DiscreteLpElement`, optional Element to which the complex conjugate is written. Must be an element of this element's space. Returns ------- out : `DiscreteLpElement` The complex conjugate element. If ``out`` is provided, the returned object is a reference to it. Examples -------- >>> discr = uniform_discr(0, 1, 4, dtype=complex) >>> x = discr.element([5+1j, 3, 2-2j, 1j]) >>> y = x.conj() >>> print(y) [ 5.-1.j, 3.-0.j, 2.+2.j, 0.-1.j] The out parameter allows you to avoid a copy: >>> z = discr.element() >>> z_out = x.conj(out=z) >>> print(z) [ 5.-1.j, 3.-0.j, 2.+2.j, 0.-1.j] >>> z_out is z True It can also be used for in-place conjugation: >>> x_out = x.conj(out=x) >>> print(x) [ 5.-1.j, 3.-0.j, 2.+2.j, 0.-1.j] >>> x_out is x True
def unpack_rgb(packed): orig_shape = None if isinstance(packed, np.ndarray): assert packed.dtype == int orig_shape = packed.shape packed = packed.reshape((-1, 1)) rgb = ((packed >> 16) & 0xff, (packed >> 8) & 0xff, (packed) & 0xff) if orig_shape is None: return rgb else: return np.hstack(rgb).reshape(orig_shape + (3,))
Unpacks a single integer or array of integers into one or more 24-bit RGB values.
def as_string(self): if self.headers_only: self.msgobj = self._get_content() from email.generator import Generator fp = StringIO() g = Generator(fp, maxheaderlen=60) g.flatten(self.msgobj) text = fp.getvalue() return text
Get the underlying message object as a string
def _initLayerCtors(self): ctors = { 'lmdb': s_lmdblayer.LmdbLayer, 'remote': s_remotelayer.RemoteLayer, } self.layrctors.update(**ctors)
Registration for built-in Layer ctors
def parse_multipart_upload_result(data): root = S3Element.fromstring('CompleteMultipartUploadResult', data) return MultipartUploadResult( root.get_child_text('Bucket'), root.get_child_text('Key'), root.get_child_text('Location'), root.get_etag_elem() )
Parser for complete multipart upload response. :param data: Response data for complete multipart upload. :return: :class:`MultipartUploadResult <MultipartUploadResult>`.
def integrate(self, function, lower_bound, upper_bound): ret = 0.0 n = self.nsteps xStep = (float(upper_bound) - float(lower_bound)) / float(n) self.log_info("xStep" + str(xStep)) x = lower_bound val1 = function(x) self.log_info("val1: " + str(val1)) for i in range(n): x = (i + 1) * xStep + lower_bound self.log_info("x: " + str(x)) val2 = function(x) self.log_info("val2: " + str(val2)) ret += 0.5 * xStep * (val1 + val2) val1 = val2 return ret
Calculates the integral of the given one dimensional function in the interval from lower_bound to upper_bound, with the simplex integration method.
def drdlat(r, lon, lat): r = ctypes.c_double(r) lon = ctypes.c_double(lon) lat = ctypes.c_double(lat) jacobi = stypes.emptyDoubleMatrix() libspice.drdlat_c(r, lon, lat, jacobi) return stypes.cMatrixToNumpy(jacobi)
Compute the Jacobian of the transformation from latitudinal to rectangular coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/drdlat_c.html :param r: Distance of a point from the origin. :type r: float :param lon: Angle of the point from the XZ plane in radians. :type lon: float :param lat: Angle of the point from the XY plane in radians. :type lat: float :return: Matrix of partial derivatives. :rtype: 3x3-Element Array of floats
def aa_counts(aln, weights=None, gap_chars='-.'): if weights is None: counts = Counter() for rec in aln: seq_counts = Counter(str(rec.seq)) counts.update(seq_counts) else: if weights == True: weights = sequence_weights(aln) else: assert len(weights) == len(aln), ( "Length mismatch: weights = %d, alignment = %d" % (len(weights), len(aln))) counts = defaultdict(float) for col in zip(*aln): for aa, wt in zip(col, weights): counts[aa] += wt for gap_char in gap_chars: if gap_char in counts: del counts[gap_char] return counts
Calculate the amino acid frequencies in a set of SeqRecords. Weights for each sequence in the alignment can be given as a list/tuple, usually calculated with the sequence_weights function. For convenience, you can also pass "weights=True" and the weights will be calculated with sequence_weights here.
def join(self, timeout_s=None): if not self.thread: return False self.thread.join(timeout_s) return self.running
Joins blocking until the interval ends or until timeout is reached. Args: timeout_s: The time in seconds to wait, defaults to forever. Returns: True if the interval is still running and we reached the timeout.
def to_text(sentence): text = "" for i, tok in enumerate(sentence.token): if i != 0: text += tok.before text += tok.word return text
Helper routine that converts a Sentence protobuf to a string from its tokens.
def login(self, username, password=None, blob=None, zeroconf=None): username = utils.to_char(username) if password is not None: password = utils.to_char(password) spotifyconnect.Error.maybe_raise( lib.SpConnectionLoginPassword( username, password)) elif blob is not None: blob = utils.to_char(blob) spotifyconnect.Error.maybe_raise( lib.SpConnectionLoginBlob(username, blob)) elif zeroconf is not None: spotifyconnect.Error.maybe_raise( lib.SpConnectionLoginZeroConf( username, *zeroconf)) else: raise AttributeError( "Must specify a login method (password, blob or zeroconf)")
Authenticate to Spotify's servers. You can login with one of three combinations: - ``username`` and ``password`` - ``username`` and ``blob`` - ``username`` and ``zeroconf`` To get the ``blob`` string, you must once log in with ``username`` and ``password``. You'll then get the ``blob`` string passed to the :attr:`~ConnectionCallbacks.new_credentials` callback.
def get_labels(self, depth=None): labels = libCopy.deepcopy(self.labels) if depth is None or depth > 0: for element in self.elements: if isinstance(element, CellReference): labels.extend( element.get_labels(None if depth is None else depth - 1)) elif isinstance(element, CellArray): labels.extend( element.get_labels(None if depth is None else depth - 1)) return labels
Returns a list with a copy of the labels in this cell. Parameters ---------- depth : integer or ``None`` If not ``None``, defines from how many reference levels to retrieve labels from. Returns ------- out : list of ``Label`` List containing the labels in this cell and its references.
def get(self, name, hint): if name: return name if hint not in self._counter: self._counter[hint] = 0 name = '%s%d' % (hint, self._counter[hint]) self._counter[hint] += 1 return name
Get the canonical name for a symbol. This is the default implementation. If the user specifies a name, the user-specified name will be used. When user does not specify a name, we automatically generate a name based on the hint string. Parameters ---------- name : str or None The name specified by the user. hint : str A hint string, which can be used to generate name. Returns ------- full_name : str A canonical name for the symbol.
def distill(p, K): q = p.reshape(p.shape[0], -1) for _ in range(K): _accupy.distill(q) return q.reshape(p.shape)
Algorithm 4.3. Error-free vector transformation for summation. The vector p is transformed without changing the sum, and p_n is replaced by float(sum(p)). Kahan [21] calls this a 'distillation algorithm.'
def reset(self): self._components = OrderedDict() self.clear_selections() self._logger.info("<block: %s> reset component list" % (self.name))
Removes all the components of the block
def handle_call_response(self, result, node): if not result[0]: log.warning("no response from %s, removing from router", node) self.router.remove_contact(node) return result log.info("got successful response from %s", node) self.welcome_if_new(node) return result
If we get a response, add the node to the routing table. If we get no response, make sure it's removed from the routing table.
def pop_event(self): with self.lock: if not self.events: raise ValueError('no events queued') return self.events.popleft()
Pop the next queued event from the queue. :raise ValueError: If there is no event queued.
def create_env_section(pairs, name): section = ['%' + name ] for pair in pairs: section.append("export %s" %pair) return section
environment key value pairs need to be joined by an equal, and exported at the end. Parameters ========== section: the list of values to return as a parsed list of lines name: the name of the section to write (e.g., files)
def decimal_format(value, TWOPLACES=Decimal(100) ** -2): 'Format a decimal.Decimal like to 2 decimal places.' if not isinstance(value, Decimal): value = Decimal(str(value)) return value.quantize(TWOPLACES)
Format a decimal.Decimal like to 2 decimal places.
def expiring_memoize(obj): cache = obj.cache = {} last_access = obj.last_access = defaultdict(int) @wraps(obj) def memoizer(*args, **kwargs): key = str(args) + str(kwargs) if last_access[key] and last_access[key] + 10 < time(): if key in cache: del cache[key] last_access[key] = time() if key not in cache: cache[key] = obj(*args, **kwargs) return cache[key] return memoizer
Like memoize, but forgets after 10 seconds.
def run_simulation(c1, c2): print('running simulation...') traits = character.CharacterCollection(character.fldr) c1 = traits.generate_random_character() c2 = traits.generate_random_character() print(c1) print(c2) rules = battle.BattleRules(battle.rules_file) b = battle.Battle(c1, c2, traits, rules, print_console='Yes') print(b.status)
using character and planet, run the simulation
def extract_sort(self, params): sorts = params.pop('sort', []) sorts = [sorts] if isinstance(sorts, basestring) else sorts sorts = [(s[1:], 'desc') if s.startswith('-') else (s, 'asc') for s in sorts] self.sorts = [ {self.adapter.sorts[s]: d} for s, d in sorts if s in self.adapter.sorts ]
Extract and build sort query from parameters
def results(self, use_cache=True, dialect=None, billing_tier=None): return self._materialization.results(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier)
Materialize the view synchronously. If you require more control over the execution, use execute() or execute_async(). Args: use_cache: whether to use cached results or not. dialect : {'legacy', 'standard'}, default 'legacy' 'legacy' : Use BigQuery's legacy SQL dialect. 'standard' : Use BigQuery's standard SQL (beta), which is compliant with the SQL 2011 standard. billing_tier: Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default. This can also be used to override your project-wide default billing tier on a per-query basis. Returns: A QueryResultsTable containing the result set. Raises: Exception if the query could not be executed or query response was malformed.
def minifyspace(parser, token): nodelist = parser.parse(('endminifyspace',)) parser.delete_first_token() return MinifiedNode(nodelist)
Removes whitespace including tab and newline characters. Do not use this if you are using a <pre> tag. Example usage:: {% minifyspace %} <p> <a title="foo" href="foo/"> Foo </a> </p> {% endminifyspace %} This example would return this HTML:: <p><a title="foo" href="foo/">Foo</a></p>
def _drop_remaining_rules(self, *rules): if rules: for rule in rules: try: self._remaining_rules.remove(rule) except ValueError: pass else: self._remaining_rules = []
Drops rules from the queue of the rules that still need to be evaluated for the currently processed field. If no arguments are given, the whole queue is emptied.
def outer_product_sum(A, B=None): if B is None: B = A outer = np.einsum('ij,ik->ijk', A, B) return np.sum(outer, axis=0)
Computes the sum of the outer products of the rows in A and B P = \Sum {A[i] B[i].T} for i in 0..N Notionally: P = 0 for y in A: P += np.outer(y, y) This is a standard computation for sigma points used in the UKF, ensemble Kalman filter, etc., where A would be the residual of the sigma points and the filter's state or measurement. The computation is vectorized, so it is much faster than the for loop for large A. Parameters ---------- A : np.array, shape (M, N) rows of N-vectors to have the outer product summed B : np.array, shape (M, N) rows of N-vectors to have the outer product summed If it is `None`, it is set to A. Returns ------- P : np.array, shape(N, N) sum of the outer product of the rows of A and B Examples -------- Here sigmas is of shape (M, N), and x is of shape (N). The two sets of code compute the same thing. >>> P = outer_product_sum(sigmas - x) >>> >>> P = 0 >>> for s in sigmas: >>> y = s - x >>> P += np.outer(y, y)
def iterate(self): if not self._inLoop: raise RuntimeError('run loop not started') elif self._driverLoop: raise RuntimeError('iterate not valid in driver run loop') self.proxy.iterate()
Must be called regularly when using an external event loop.
def is_super_admin(self, req): return req.headers.get('x-auth-admin-user') == '.super_admin' and \ self.super_admin_key and \ req.headers.get('x-auth-admin-key') == self.super_admin_key
Returns True if the admin specified in the request represents the .super_admin. :param req: The swob.Request to check. :param returns: True if .super_admin.
def has_parent_vaults(self, vault_id): if self._catalog_session is not None: return self._catalog_session.has_parent_catalogs(catalog_id=vault_id) return self._hierarchy_session.has_parents(id_=vault_id)
Tests if the ``Vault`` has any parents. arg: vault_id (osid.id.Id): a vault ``Id`` return: (boolean) - ``true`` if the vault has parents, ``false`` otherwise raise: NotFound - ``vault_id`` is not found raise: NullArgument - ``vault_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*