code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def GetDomain(self): """Returns the domain of the B-Spline""" return (self.knots[self.degree - 1], self.knots[len(self.knots) - self.degree])
Returns the domain of the B-Spline
def DEFINE_choice(self, name, default, choices, help, constant=False): """A helper for defining choice string options.""" self.AddOption( type_info.Choice( name=name, default=default, choices=choices, description=help), constant=constant)
A helper for defining choice string options.
def connectionLost(self, reason): """ Mostly handles clean-up of node + candidate structures. Avoids memory exhaustion for a large number of connections. """ try: self.connected = False if debug: print(self.log_entry("CLOSED =", "none")) # Every five minutes: cleanup t = time.time() if time.time() - self.factory.last_cleanup >= self.cleanup: self.factory.last_cleanup = t # Delete old passive nodes. old_node_ips = [] for node_ip in list(self.factory.nodes["passive"]): passive_node = self.factory.nodes["passive"][node_ip] # Gives enough time for passive nodes to receive clients. if t - passive_node["time"] >= self.node_lifetime: old_node_ips.append(node_ip) for node_ip in old_node_ips: del self.factory.nodes["passive"][node_ip] # Delete old simultaneous nodes. old_node_ips = [] for node_ip in list(self.factory.nodes["simultaneous"]): simultaneous_node =\ self.factory.nodes["simultaneous"][node_ip] # Gives enough time for passive nodes to receive clients. if t - simultaneous_node["time"] >= self.node_lifetime: old_node_ips.append(node_ip) for node_ip in old_node_ips: del self.factory.nodes["simultaneous"][node_ip] # Delete old candidates and candidate structs. old_node_ips = [] for node_ip in list(self.factory.candidates): # Record old candidates. old_candidates = [] for candidate in self.factory.candidates[node_ip]: # Hole punching is ms time sensitive. # Candidates older than this is safe to assume # they're not needed. if node_ip not in self.factory.nodes["simultaneous"] \ and t - candidate["time"] >= self.challenge_timeout * 5: old_candidates.append(candidate) # Remove old candidates. for candidate in old_candidates: self.factory.candidates[node_ip].remove(candidate) # Record old node IPs. if not len(self.factory.candidates[node_ip]) and \ node_ip not in self.factory.nodes["simultaneous"]: old_node_ips.append(node_ip) # Remove old node IPs. for node_ip in old_node_ips: del self.factory.candidates[node_ip] except Exception as e: error = parse_exception(e) log_exception(error_log_path, error) print(self.log_entry("ERROR =", error))
Mostly handles clean-up of node + candidate structures. Avoids memory exhaustion for a large number of connections.
def consume_token(self, tokens, index, tokens_len): """Consume a token. Returns a tuple of (tokens, tokens_len, index) when consumption is completed and tokens have been merged together. """ del tokens_len if tokens[index].type == TokenType.EndInlineRST: return _paste_tokens_line_by_line(tokens, TokenType.RST, self.begin, index + 1)
Consume a token. Returns a tuple of (tokens, tokens_len, index) when consumption is completed and tokens have been merged together.
def boolean_flag(parser, name, default=False, help=None): """Add a boolean flag to argparse parser. Parameters ---------- parser: argparse.Parser parser to add the flag to name: str --<name> will enable the flag, while --no-<name> will disable it default: bool or None default value of the flag help: str help string for the flag """ dest = name.replace('-', '_') parser.add_argument("--" + name, action="store_true", default=default, dest=dest, help=help) parser.add_argument("--no-" + name, action="store_false", dest=dest)
Add a boolean flag to argparse parser. Parameters ---------- parser: argparse.Parser parser to add the flag to name: str --<name> will enable the flag, while --no-<name> will disable it default: bool or None default value of the flag help: str help string for the flag
def del_permission_role(self, role, perm_view): """ Remove permission-ViewMenu object to Role :param role: The role object :param perm_view: The PermissionViewMenu object """ if perm_view in role.permissions: try: role.permissions.remove(perm_view) self.get_session.merge(role) self.get_session.commit() log.info( c.LOGMSG_INF_SEC_DEL_PERMROLE.format(str(perm_view), role.name) ) except Exception as e: log.error(c.LOGMSG_ERR_SEC_DEL_PERMROLE.format(str(e))) self.get_session.rollback()
Remove permission-ViewMenu object to Role :param role: The role object :param perm_view: The PermissionViewMenu object
def worker_id(self): """A unique identifier for this queue instance and the items it owns.""" if self._worker_id is not None: return self._worker_id return self._get_worker_id(self._conn())
A unique identifier for this queue instance and the items it owns.
def one(self, filter_by=None): """ Parameters ---------- filter_by: callable, default None Callable must take one argument (a record of table), and return True to keep record, or False to skip it. Example : .one(lambda x: x.name == "my_name"). If None, records are not filtered. Returns ------- Record instance if one and only one record is found. Else raises. Raises ------ RecordDoesNotExistError if no record is found MultipleRecordsReturnedError if multiple records are found """ return Queryset(self, records=self._records.values()).one(filter_by=filter_by)
Parameters ---------- filter_by: callable, default None Callable must take one argument (a record of table), and return True to keep record, or False to skip it. Example : .one(lambda x: x.name == "my_name"). If None, records are not filtered. Returns ------- Record instance if one and only one record is found. Else raises. Raises ------ RecordDoesNotExistError if no record is found MultipleRecordsReturnedError if multiple records are found
def gradient(self, ts): """ Find the gradient of the log likelihood with respect to the given time series. Based on http://www.unc.edu/~jbhill/Bollerslev_GARCH_1986.pdf Returns an 3-element array containing the gradient for the alpha, beta, and omega parameters. """ gradient = self._jmodel.gradient(_py2java(self._ctx, Vectors.dense(ts))) return _java2py(self._ctx, gradient)
Find the gradient of the log likelihood with respect to the given time series. Based on http://www.unc.edu/~jbhill/Bollerslev_GARCH_1986.pdf Returns an 3-element array containing the gradient for the alpha, beta, and omega parameters.
def quantity(*args): """Create a quantity. This can be from a scalar or vector. Example:: q1 = quantity(1.0, "km/s") q2 = quantity("1km/s") q1 = quantity([1.0,2.0], "km/s") """ if len(args) == 1: if isinstance(args[0], str): # use copy constructor to create quantity from string return Quantity(from_string(args[0])) elif isinstance(args[0], dict): if hasattr(args[0]["value"], "__len__"): return QuantVec(from_dict_v(args[0])) else: return Quantity(from_dict(args[0])) elif isinstance(args[0], Quantity) or isinstance(args[0], QuantVec): return args[0] else: raise TypeError("Invalid argument type for") else: if hasattr(args[0], "__len__"): return QuantVec(*args) else: return Quantity(*args)
Create a quantity. This can be from a scalar or vector. Example:: q1 = quantity(1.0, "km/s") q2 = quantity("1km/s") q1 = quantity([1.0,2.0], "km/s")
def load_and_init(self, modules): """Import, instantiate & "init" the modules we manage :param modules: list of the managed modules :return: True if no errors """ self.load(modules) self.get_instances() return len(self.configuration_errors) == 0
Import, instantiate & "init" the modules we manage :param modules: list of the managed modules :return: True if no errors
def register_hook(self, hook_name, fn): """Register a function to be called on a GitHub event.""" if hook_name not in self._hooks: self._hooks[hook_name] = fn else: raise Exception('%s hook already registered' % hook_name)
Register a function to be called on a GitHub event.
def _create_prelim(self): """ Step 0: Register intent to upload files """ self._verify(self.payload) if "key" in self.payload[0] and self.payload[0]["key"]: if next((i for i in self.payload if "key" not in i), False): raise ze.UnsupportedParams( "Can't pass payload entries with and without keys to Zupload" ) return None # Don't do anything if payload comes with keys liblevel = "/{t}/{u}/items" # Create one or more new attachments headers = {"Zotero-Write-Token": token(), "Content-Type": "application/json"} headers.update(self.zinstance.default_headers()) # If we have a Parent ID, add it as a parentItem if self.parentid: for child in self.payload: child["parentItem"] = self.parentid to_send = json.dumps(self.payload) req = requests.post( url=self.zinstance.endpoint + liblevel.format( t=self.zinstance.library_type, u=self.zinstance.library_id ), data=to_send, headers=headers, ) try: req.raise_for_status() except requests.exceptions.HTTPError: error_handler(req) data = req.json() for k in data["success"]: self.payload[int(k)]["key"] = data["success"][k] return data
Step 0: Register intent to upload files
def clear_feature(dev, feature, recipient = None): r"""Clear/disable a specific feature. dev is the Device object to which the request will be sent to. feature is the feature you want to disable. The recipient can be None (on which the status will be queried from the device), an Interface or Endpoint descriptors. """ if feature == ENDPOINT_HALT: dev.clear_halt(recipient) else: bmRequestType, wIndex = _parse_recipient(recipient, util.CTRL_OUT) dev.ctrl_transfer(bmRequestType = bmRequestType, bRequest = 0x01, wIndex = wIndex, wValue = feature)
r"""Clear/disable a specific feature. dev is the Device object to which the request will be sent to. feature is the feature you want to disable. The recipient can be None (on which the status will be queried from the device), an Interface or Endpoint descriptors.
def check_ace(path, objectType, user, permission=None, acetype=None, propagation=None, exactPermissionMatch=False): ''' Checks a path to verify the ACE (access control entry) specified exists Args: path: path to the file/reg key objectType: The type of object (FILE, DIRECTORY, REGISTRY) user: user that the ACL is for permission: permission to test for (READ, FULLCONTROL, etc) acetype: the type of ACE (ALLOW or DENY) propagation: the propagation type of the ACE (FILES, FOLDERS, KEY, KEY&SUBKEYS, SUBKEYS, etc) exactPermissionMatch: the ACL must match exactly, IE if READ is specified, the user must have READ exactly and not FULLCONTROL (which also has the READ permission obviously) Returns (dict): 'Exists' true if the ACE exists, false if it does not CLI Example: .. code-block:: bash salt 'minion-id' win_dacl.check_ace c:\temp directory <username> fullcontrol ''' ret = {'result': False, 'Exists': False, 'comment': ''} dc = daclConstants() objectTypeBit = dc.getObjectTypeBit(objectType) path = dc.processPath(path, objectTypeBit) permission = permission.upper() if permission else None acetype = acetype.upper() if permission else None propagation = propagation.upper() if propagation else None permissionbit = dc.getPermissionBit(objectTypeBit, permission) if permission else None acetypebit = dc.getAceTypeBit(acetype) if acetype else None propagationbit = dc.getPropagationBit(objectTypeBit, propagation) if propagation else None sidRet = _getUserSid(user) if not sidRet['result']: return sidRet dacls = _get_dacl(path, objectTypeBit) ret['result'] = True if dacls: for counter in range(0, dacls.GetAceCount()): ace = dacls.GetAce(counter) if ace[2] == sidRet['sid']: if not acetypebit or ace[0][0] == acetypebit: if not propagationbit or (ace[0][1] & propagationbit) == propagationbit: if not permissionbit: ret['Exists'] = True return ret if exactPermissionMatch: if ace[1] == permissionbit: ret['Exists'] = True return ret else: if (ace[1] & permissionbit) == permissionbit: ret['Exists'] = True return ret else: ret['comment'] = 'No DACL found for object.' return ret
Checks a path to verify the ACE (access control entry) specified exists Args: path: path to the file/reg key objectType: The type of object (FILE, DIRECTORY, REGISTRY) user: user that the ACL is for permission: permission to test for (READ, FULLCONTROL, etc) acetype: the type of ACE (ALLOW or DENY) propagation: the propagation type of the ACE (FILES, FOLDERS, KEY, KEY&SUBKEYS, SUBKEYS, etc) exactPermissionMatch: the ACL must match exactly, IE if READ is specified, the user must have READ exactly and not FULLCONTROL (which also has the READ permission obviously) Returns (dict): 'Exists' true if the ACE exists, false if it does not CLI Example: .. code-block:: bash salt 'minion-id' win_dacl.check_ace c:\temp directory <username> fullcontrol
def plot(args): """ %prog plot workdir sample chr1,chr2 Plot some chromosomes for visual proof. Separate multiple chromosomes with comma. Must contain folder workdir/sample-cn/. """ from jcvi.graphics.base import savefig p = OptionParser(plot.__doc__) opts, args, iopts = p.set_image_options(args, figsize="8x7", format="png") if len(args) != 3: sys.exit(not p.print_help()) workdir, sample_key, chrs = args chrs = chrs.split(",") hmm = CopyNumberHMM(workdir=workdir) hmm.plot(sample_key, chrs=chrs) image_name = sample_key + "_cn." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
%prog plot workdir sample chr1,chr2 Plot some chromosomes for visual proof. Separate multiple chromosomes with comma. Must contain folder workdir/sample-cn/.
def construct_channel(self, *args, **kwargs): """ Create ChannelNode and build topic tree. """ channel = self.get_channel(*args, **kwargs) # creates ChannelNode from data in self.channel_info _build_tree(channel, SAMPLE_TREE) raise_for_invalid_channel(channel) return channel
Create ChannelNode and build topic tree.
def get_transition_viewset_method(transition_name, **kwargs): ''' Create a viewset method for the provided `transition_name` ''' @detail_route(methods=['post'], **kwargs) def inner_func(self, request, pk=None, **kwargs): object = self.get_object() transition_method = getattr(object, transition_name) transition_method(by=self.request.user) if self.save_after_transition: object.save() serializer = self.get_serializer(object) return Response(serializer.data) return inner_func
Create a viewset method for the provided `transition_name`
def _wrap_layer(name, input_layer, build_func, dropout_rate=0.0, trainable=True): """Wrap layers with residual, normalization and dropout. :param name: Prefix of names for internal layers. :param input_layer: Input layer. :param build_func: A callable that takes the input tensor and generates the output tensor. :param dropout_rate: Dropout rate. :param trainable: Whether the layers are trainable. :return: Output layer. """ build_output = build_func(input_layer) if dropout_rate > 0.0: dropout_layer = keras.layers.Dropout( rate=dropout_rate, name='%s-Dropout' % name, )(build_output) else: dropout_layer = build_output if isinstance(input_layer, list): input_layer = input_layer[0] add_layer = keras.layers.Add(name='%s-Add' % name)([input_layer, dropout_layer]) normal_layer = LayerNormalization( trainable=trainable, name='%s-Norm' % name, )(add_layer) return normal_layer
Wrap layers with residual, normalization and dropout. :param name: Prefix of names for internal layers. :param input_layer: Input layer. :param build_func: A callable that takes the input tensor and generates the output tensor. :param dropout_rate: Dropout rate. :param trainable: Whether the layers are trainable. :return: Output layer.
def get(self, specification, *args, **kwargs): """ A more convenient version of :py:meth:`acquire()` for when you can provide positional arguments in a right order. """ arguments = dict(enumerate(args)) arguments.update(kwargs) return self.acquire(specification, arguments=arguments)
A more convenient version of :py:meth:`acquire()` for when you can provide positional arguments in a right order.
def contour_mask(self, contour): """ Generates a binary image with only the given contour filled in. """ # fill in new data new_data = np.zeros(self.data.shape) num_boundary = contour.boundary_pixels.shape[0] boundary_px_ij_swapped = np.zeros([num_boundary, 1, 2]) boundary_px_ij_swapped[:, 0, 0] = contour.boundary_pixels[:, 1] boundary_px_ij_swapped[:, 0, 1] = contour.boundary_pixels[:, 0] cv2.fillPoly( new_data, pts=[ boundary_px_ij_swapped.astype( np.int32)], color=( BINARY_IM_MAX_VAL, BINARY_IM_MAX_VAL, BINARY_IM_MAX_VAL)) orig_zeros = np.where(self.data == 0) new_data[orig_zeros[0], orig_zeros[1]] = 0 return BinaryImage(new_data.astype(np.uint8), frame=self._frame)
Generates a binary image with only the given contour filled in.
def cmServiceAbort(): """CM SERVICE ABORT Section 9.2.7""" a = TpPd(pd=0x5) b = MessageType(mesType=0x23) # 00100011 packet = a / b return packet
CM SERVICE ABORT Section 9.2.7
def step_command_output_should_not_contain_log_records(context): """ Verifies that the command output contains the specified log records (in any order). .. code-block: gherkin Then the command output should contain the following log records: | category | level | message | | bar | CURRENT | xxx | """ assert context.table, "REQUIRE: context.table" context.table.require_columns(["category", "level", "message"]) format = getattr(context, "log_record_format", context.config.logging_format) for row in context.table.rows: output = LogRecordTable.make_output_for_row(row, format) context.execute_steps(u''' Then the command output should not contain: """ {expected_output} """ '''.format(expected_output=output))
Verifies that the command output contains the specified log records (in any order). .. code-block: gherkin Then the command output should contain the following log records: | category | level | message | | bar | CURRENT | xxx |
def OnButtonCell(self, event): """Event handler for cell button toggle button""" if self.button_cell_button_id == event.GetId(): if event.IsChecked(): label = self._get_button_label() post_command_event(self, self.ButtonCellMsg, text=label) else: post_command_event(self, self.ButtonCellMsg, text=False) event.Skip()
Event handler for cell button toggle button
def kappa_se_calc(PA, PE, POP): """ Calculate kappa standard error. :param PA: observed agreement among raters (overall accuracy) :type PA : float :param PE: hypothetical probability of chance agreement (random accuracy) :type PE : float :param POP: population :type POP:int :return: kappa standard error as float """ try: result = math.sqrt((PA * (1 - PA)) / (POP * ((1 - PE)**2))) return result except Exception: return "None"
Calculate kappa standard error. :param PA: observed agreement among raters (overall accuracy) :type PA : float :param PE: hypothetical probability of chance agreement (random accuracy) :type PE : float :param POP: population :type POP:int :return: kappa standard error as float
def syslog(server, enable=True): ''' Configure syslog remote logging, by default syslog will automatically be enabled if a server is specified. However, if you want to disable syslog you will need to specify a server followed by False CLI Example: .. code-block:: bash salt dell drac.syslog [SYSLOG IP] [ENABLE/DISABLE] salt dell drac.syslog 0.0.0.0 False ''' if enable and __execute_cmd('config -g cfgRemoteHosts -o \ cfgRhostsSyslogEnable 1'): return __execute_cmd('config -g cfgRemoteHosts -o \ cfgRhostsSyslogServer1 {0}'.format(server)) return __execute_cmd('config -g cfgRemoteHosts -o cfgRhostsSyslogEnable 0')
Configure syslog remote logging, by default syslog will automatically be enabled if a server is specified. However, if you want to disable syslog you will need to specify a server followed by False CLI Example: .. code-block:: bash salt dell drac.syslog [SYSLOG IP] [ENABLE/DISABLE] salt dell drac.syslog 0.0.0.0 False
def Lewis(D=None, alpha=None, Cp=None, k=None, rho=None): r'''Calculates Lewis number or `Le` for a fluid with the given parameters. .. math:: Le = \frac{k}{\rho C_p D} = \frac{\alpha}{D} Inputs can be either of the following sets: * Diffusivity and Thermal diffusivity * Diffusivity, heat capacity, thermal conductivity, and density Parameters ---------- D : float Diffusivity of a species, [m^2/s] alpha : float, optional Thermal diffusivity, [m^2/s] Cp : float, optional Heat capacity, [J/kg/K] k : float, optional Thermal conductivity, [W/m/K] rho : float, optional Density, [kg/m^3] Returns ------- Le : float Lewis number [] Notes ----- .. math:: Le=\frac{\text{Thermal diffusivity}}{\text{Mass diffusivity}} = \frac{Sc}{Pr} An error is raised if none of the required input sets are provided. Examples -------- >>> Lewis(D=22.6E-6, alpha=19.1E-6) 0.8451327433628318 >>> Lewis(D=22.6E-6, rho=800., k=.2, Cp=2200) 0.00502815768302494 References ---------- .. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook, Eighth Edition. McGraw-Hill Professional, 2007. .. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and Applications. Boston: McGraw Hill Higher Education, 2006. .. [3] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition. Berlin; New York:: Springer, 2010. ''' if k and Cp and rho: alpha = k/(rho*Cp) elif alpha: pass else: raise Exception('Insufficient information provided for Le calculation') return alpha/D
r'''Calculates Lewis number or `Le` for a fluid with the given parameters. .. math:: Le = \frac{k}{\rho C_p D} = \frac{\alpha}{D} Inputs can be either of the following sets: * Diffusivity and Thermal diffusivity * Diffusivity, heat capacity, thermal conductivity, and density Parameters ---------- D : float Diffusivity of a species, [m^2/s] alpha : float, optional Thermal diffusivity, [m^2/s] Cp : float, optional Heat capacity, [J/kg/K] k : float, optional Thermal conductivity, [W/m/K] rho : float, optional Density, [kg/m^3] Returns ------- Le : float Lewis number [] Notes ----- .. math:: Le=\frac{\text{Thermal diffusivity}}{\text{Mass diffusivity}} = \frac{Sc}{Pr} An error is raised if none of the required input sets are provided. Examples -------- >>> Lewis(D=22.6E-6, alpha=19.1E-6) 0.8451327433628318 >>> Lewis(D=22.6E-6, rho=800., k=.2, Cp=2200) 0.00502815768302494 References ---------- .. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook, Eighth Edition. McGraw-Hill Professional, 2007. .. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and Applications. Boston: McGraw Hill Higher Education, 2006. .. [3] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition. Berlin; New York:: Springer, 2010.
def parse_media_type(media_type): '''Returns type, subtype, parameter tuple from an http media_type. Can be applied to the 'Accept' or 'Content-Type' http header fields. ''' media_type, sep, parameter = str(media_type).partition(';') media_type, sep, subtype = media_type.partition('/') return tuple(x.strip() or None for x in (media_type, subtype, parameter))
Returns type, subtype, parameter tuple from an http media_type. Can be applied to the 'Accept' or 'Content-Type' http header fields.
def sample_indexes_by_sequence(indexes, sequence): """Samples trajectory/time indexes according to the given sequence of states Parameters ---------- indexes : list of ndarray( (N_i, 2) ) For each state, all trajectory and time indexes where this state occurs. Each matrix has a number of rows equal to the number of occurrences of the corresponding state, with rows consisting of a tuple (i, t), where i is the index of the trajectory and t is the time index within the trajectory. sequence : array of integers A sequence of discrete states. For each state, a trajectory/time index will be sampled at which dtrajs have an occurrences of this state Returns ------- indexes : ndarray( (N, 2) ) The sampled index sequence. Index array with a number of rows equal to N=len(sequence), with rows consisting of a tuple (i, t), where i is the index of the trajectory and t is the time index within the trajectory. """ N = len(sequence) res = np.zeros((N,2), dtype=int) for t in range(N): s = sequence[t] i = np.random.randint(indexes[s].shape[0]) res[t,:] = indexes[s][i,:] return res
Samples trajectory/time indexes according to the given sequence of states Parameters ---------- indexes : list of ndarray( (N_i, 2) ) For each state, all trajectory and time indexes where this state occurs. Each matrix has a number of rows equal to the number of occurrences of the corresponding state, with rows consisting of a tuple (i, t), where i is the index of the trajectory and t is the time index within the trajectory. sequence : array of integers A sequence of discrete states. For each state, a trajectory/time index will be sampled at which dtrajs have an occurrences of this state Returns ------- indexes : ndarray( (N, 2) ) The sampled index sequence. Index array with a number of rows equal to N=len(sequence), with rows consisting of a tuple (i, t), where i is the index of the trajectory and t is the time index within the trajectory.
def receive(self): ''' Return the message received and the address. ''' try: msg, addr = self.skt.recvfrom(self.buffer_size) except socket.error as error: log.error('Received listener socket error: %s', error, exc_info=True) raise ListenerException(error) log.debug('[%s] Received %s from %s', msg, addr, time.time()) return msg, addr[0]
Return the message received and the address.
def get_segmentize_value(input_file=None, tile_pyramid=None): """ Return the recommended segmentation value in input file units. It is calculated by multiplyling raster pixel size with tile shape in pixels. Parameters ---------- input_file : str location of a file readable by rasterio tile_pyramied : ``TilePyramid`` or ``BufferedTilePyramid`` tile pyramid to estimate target tile size Returns ------- segmenize value : float length suggested of line segmentation to reproject file bounds """ warnings.warn(DeprecationWarning("get_segmentize_value() has moved to mapchete.io")) return io.get_segmentize_value(input_file, tile_pyramid)
Return the recommended segmentation value in input file units. It is calculated by multiplyling raster pixel size with tile shape in pixels. Parameters ---------- input_file : str location of a file readable by rasterio tile_pyramied : ``TilePyramid`` or ``BufferedTilePyramid`` tile pyramid to estimate target tile size Returns ------- segmenize value : float length suggested of line segmentation to reproject file bounds
def main(global_config, **settings): """ Get a PyShop WSGI application configured with settings. """ if sys.version_info[0] < 3: reload(sys) sys.setdefaultencoding('utf-8') settings = dict(settings) # Scoping sessions for Pyramid ensure session are commit/rollback # after the template has been rendered create_engine(settings, scoped=True) authn_policy = RouteSwitchAuthPolicy(secret=settings['pyshop.cookie_key'], callback=groupfinder) authz_policy = ACLPolicy() route_prefix = settings.get('pyshop.route_prefix') config = Configurator(settings=settings, root_factory=RootFactory, route_prefix=route_prefix, locale_negotiator=locale_negotiator, authentication_policy=authn_policy, authorization_policy=authz_policy) config.end() return config.make_wsgi_app()
Get a PyShop WSGI application configured with settings.
def clear_messages(self): """ Clears all messages. """ while len(self._messages): msg = self._messages.pop(0) usd = msg.block.userData() if usd and hasattr(usd, 'messages'): usd.messages[:] = [] if msg.decoration: self.editor.decorations.remove(msg.decoration)
Clears all messages.
def p_block_replace(self, p): """ block_decl : identifier t_semicolon """ m = p[1].parse(None) block = self.scope.blocks(m.raw()) if block: p[0] = block.copy_inner(self.scope) else: # fallback to mixin. Allow calls to mixins without parens p[0] = Deferred(p[1], None, p.lineno(2))
block_decl : identifier t_semicolon
def model_to_owl(model, fname): """Save a BioPAX model object as an OWL file. Parameters ---------- model : org.biopax.paxtools.model.Model A BioPAX model object (java object). fname : str The name of the OWL file to save the model in. """ io_class = autoclass('org.biopax.paxtools.io.SimpleIOHandler') io = io_class(autoclass('org.biopax.paxtools.model.BioPAXLevel').L3) try: fileOS = autoclass('java.io.FileOutputStream')(fname) except JavaException: logger.error('Could not open data file %s' % fname) return l3_factory = autoclass('org.biopax.paxtools.model.BioPAXLevel').L3.getDefaultFactory() model_out = l3_factory.createModel() for r in model.getObjects().toArray(): model_out.add(r) io.convertToOWL(model_out, fileOS) fileOS.close()
Save a BioPAX model object as an OWL file. Parameters ---------- model : org.biopax.paxtools.model.Model A BioPAX model object (java object). fname : str The name of the OWL file to save the model in.
def create_notification_plan(self, label=None, name=None, critical_state=None, ok_state=None, warning_state=None): """ Creates a notification plan to be executed when a monitoring check triggers an alarm. """ return self._notification_plan_manager.create(label=label, name=name, critical_state=critical_state, ok_state=ok_state, warning_state=warning_state)
Creates a notification plan to be executed when a monitoring check triggers an alarm.
def get_root_subject(self): 'Returns the BNode which describes the topmost subject of the graph.' manifest = URIRef(self.manifest) if list(self.rdf.triples((manifest, None, None))): return manifest else: return self.rdf.subjects(None, self.manifest).next()
Returns the BNode which describes the topmost subject of the graph.
def config_absent(name): ''' Ensure configuration property is absent in /usbkey/config name : string name of property ''' name = name.lower() ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} # load configuration config = _load_config() if name in config: # delete property ret['result'] = True ret['comment'] = 'property {0} deleted'.format(name) ret['changes'][name] = None del config[name] else: # we're good ret['result'] = True ret['comment'] = 'property {0} is absent'.format(name) # apply change if needed if not __opts__['test'] and ret['changes']: ret['result'] = _write_config(config) return ret
Ensure configuration property is absent in /usbkey/config name : string name of property
def _getfunctionlist(self): """(internal use) """ try: eventhandler = self.obj.__eventhandler__ except AttributeError: eventhandler = self.obj.__eventhandler__ = {} return eventhandler.setdefault(self.event, [])
(internal use)
def vcenter_interval(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch") id_key = ET.SubElement(vcenter, "id") id_key.text = kwargs.pop('id') interval = ET.SubElement(vcenter, "interval") interval.text = kwargs.pop('interval') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def initialize_concept_scheme(rdf, cs, label, language, set_modified): """Initialize a concept scheme: Optionally add a label if the concept scheme doesn't have a label, and optionally add a dct:modified timestamp.""" # check whether the concept scheme is unlabeled, and label it if possible labels = list(rdf.objects(cs, RDFS.label)) + \ list(rdf.objects(cs, SKOS.prefLabel)) if len(labels) == 0: if not label: logging.warning( "Concept scheme has no label(s). " "Use --label option to set the concept scheme label.") else: logging.info( "Unlabeled concept scheme detected. Setting label to '%s'" % label) rdf.add((cs, RDFS.label, Literal(label, language))) if set_modified: curdate = datetime.datetime.utcnow().replace(microsecond=0).isoformat() + 'Z' rdf.remove((cs, DCTERMS.modified, None)) rdf.add((cs, DCTERMS.modified, Literal(curdate, datatype=XSD.dateTime)))
Initialize a concept scheme: Optionally add a label if the concept scheme doesn't have a label, and optionally add a dct:modified timestamp.
def validate_block(self, block: BaseBlock) -> None: """ Validate the the given block. """ if not isinstance(block, self.get_block_class()): raise ValidationError( "This vm ({0!r}) is not equipped to validate a block of type {1!r}".format( self, block, ) ) if block.is_genesis: validate_length_lte(block.header.extra_data, 32, title="BlockHeader.extra_data") else: parent_header = get_parent_header(block.header, self.chaindb) self.validate_header(block.header, parent_header) tx_root_hash, _ = make_trie_root_and_nodes(block.transactions) if tx_root_hash != block.header.transaction_root: raise ValidationError( "Block's transaction_root ({0}) does not match expected value: {1}".format( block.header.transaction_root, tx_root_hash)) if len(block.uncles) > MAX_UNCLES: raise ValidationError( "Blocks may have a maximum of {0} uncles. Found " "{1}.".format(MAX_UNCLES, len(block.uncles)) ) if not self.chaindb.exists(block.header.state_root): raise ValidationError( "`state_root` was not found in the db.\n" "- state_root: {0}".format( block.header.state_root, ) ) local_uncle_hash = keccak(rlp.encode(block.uncles)) if local_uncle_hash != block.header.uncles_hash: raise ValidationError( "`uncles_hash` and block `uncles` do not match.\n" " - num_uncles : {0}\n" " - block uncle_hash : {1}\n" " - header uncle_hash: {2}".format( len(block.uncles), local_uncle_hash, block.header.uncles_hash, ) )
Validate the the given block.
def insert(self, context): """ Create connection pool. :param resort.engine.execution.Context context: Current execution context. """ status_code, msg = self.__endpoint.post( "/resources/jdbc-connection-pool", data={ "id": self.__name, "resType": self.__res_type, "datasourceClassname": self.__ds_classname, "property": props_value(self.__props) } ) self.__available = True
Create connection pool. :param resort.engine.execution.Context context: Current execution context.
def convert_advanced_relu(builder, layer, input_names, output_names, keras_layer): """ Convert an ReLU layer with maximum value from keras to coreml. Parameters ---------- keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ # Get input and output names input_name, output_name = (input_names[0], output_names[0]) if keras_layer.max_value is None: builder.add_activation(layer, 'RELU', input_name, output_name) return # No direct support of RELU with max-activation value - use negate and # clip layers relu_output_name = output_name + '_relu' builder.add_activation(layer, 'RELU', input_name, relu_output_name) # negate it neg_output_name = relu_output_name + '_neg' builder.add_activation(layer+'__neg__', 'LINEAR', relu_output_name, neg_output_name,[-1.0, 0]) # apply threshold clip_output_name = relu_output_name + '_clip' builder.add_unary(layer+'__clip__', neg_output_name, clip_output_name, 'threshold', alpha = -keras_layer.max_value) # negate it back builder.add_activation(layer+'_neg2', 'LINEAR', clip_output_name, output_name,[-1.0, 0])
Convert an ReLU layer with maximum value from keras to coreml. Parameters ---------- keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object.
def set_key(cls, k, v): """Allows attaching stateless information to the class using the flask session dict """ k = cls.__name__ + "__" + k session[k] = v
Allows attaching stateless information to the class using the flask session dict
def request_ride( self, ride_type=None, start_latitude=None, start_longitude=None, start_address=None, end_latitude=None, end_longitude=None, end_address=None, primetime_confirmation_token=None, ): """Request a ride on behalf of an Lyft user. Parameters ride_type (str) Name of the type of ride you're requesting. E.g., lyft, lyft_plus start_latitude (float) Latitude component of a start location. start_longitude (float) Longitude component of a start location. start_address (str) Optional pickup address. end_latitude (float) Optional latitude component of a end location. Destination would be NULL in this case. end_longitude (float) Optional longitude component of a end location. Destination would be NULL in this case. end_address (str) Optional destination address. primetime_confirmation_token (str) Optional string containing the Prime Time confirmation token to book rides having Prime Time Pricing. Returns (Response) A Response object containing the ride request ID and other details about the requested ride.. """ args = { 'ride_type': ride_type, 'origin': { 'lat': start_latitude, 'lng': start_longitude, 'address': start_address, }, 'destination': { 'lat': end_latitude, 'lng': end_longitude, 'address': end_address, }, 'primetime_confirmation_token': primetime_confirmation_token, } return self._api_call('POST', 'v1/rides', args=args)
Request a ride on behalf of an Lyft user. Parameters ride_type (str) Name of the type of ride you're requesting. E.g., lyft, lyft_plus start_latitude (float) Latitude component of a start location. start_longitude (float) Longitude component of a start location. start_address (str) Optional pickup address. end_latitude (float) Optional latitude component of a end location. Destination would be NULL in this case. end_longitude (float) Optional longitude component of a end location. Destination would be NULL in this case. end_address (str) Optional destination address. primetime_confirmation_token (str) Optional string containing the Prime Time confirmation token to book rides having Prime Time Pricing. Returns (Response) A Response object containing the ride request ID and other details about the requested ride..
def cli(env, identifier): """Remove SSL certificate.""" manager = SoftLayer.SSLManager(env.client) if not (env.skip_confirmations or formatting.no_going_back('yes')): raise exceptions.CLIAbort("Aborted.") manager.remove_certificate(identifier)
Remove SSL certificate.
def delete_instance(self, instance_id, project_id=None): """ Deletes the specified Cloud Bigtable instance. Raises google.api_core.exceptions.NotFound if the Cloud Bigtable instance does not exist. :param project_id: Optional, Google Cloud Platform project ID where the BigTable exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :param instance_id: The ID of the Cloud Bigtable instance. :type instance_id: str """ instance = self.get_instance(instance_id=instance_id, project_id=project_id) if instance: instance.delete() else: self.log.info("The instance '%s' does not exist in project '%s'. Exiting", instance_id, project_id)
Deletes the specified Cloud Bigtable instance. Raises google.api_core.exceptions.NotFound if the Cloud Bigtable instance does not exist. :param project_id: Optional, Google Cloud Platform project ID where the BigTable exists. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :param instance_id: The ID of the Cloud Bigtable instance. :type instance_id: str
def fix_whitespace(tokens, start, result): """Fix whitespace around hyphens and commas. Can be used to remove whitespace tokenization artefacts.""" for e in result: for child in e.iter(): child.text = child.text.replace(' , ', ', ') for hyphen in HYPHENS: child.text = child.text.replace(' %s ' % hyphen, '%s' % hyphen) child.text = re.sub(r'- (.) -', r'-\1-', child.text) return result
Fix whitespace around hyphens and commas. Can be used to remove whitespace tokenization artefacts.
def authenticate(self, bound_route, actual_params) -> bool: """ Runs the pre-defined authenticaton service :param bound_route str route matched :param actual_params dict actual url parameters :rtype: bool """ if self.__auth_service is not None: auth_route = "{0}_{1}{2}".format(self.__method, self.__route, bound_route) auth_data = self.__auth_service.authenticate(self.__request, auth_route, actual_params) if auth_data is True: self.app.auth_data = self.__auth_service.auth_data else: return False return True
Runs the pre-defined authenticaton service :param bound_route str route matched :param actual_params dict actual url parameters :rtype: bool
def read_api_service_status(self, name, **kwargs): # noqa: E501 """read_api_service_status # noqa: E501 read status of the specified APIService # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_api_service_status(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the APIService (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1beta1APIService If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_api_service_status_with_http_info(name, **kwargs) # noqa: E501 else: (data) = self.read_api_service_status_with_http_info(name, **kwargs) # noqa: E501 return data
read_api_service_status # noqa: E501 read status of the specified APIService # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_api_service_status(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the APIService (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1beta1APIService If the method is called asynchronously, returns the request thread.
def get_states(self, action_name, config_name, instances=None, map_name=None, **kwargs): """ Returns a generator of states in relation to the indicated action. :param action_name: Action name. :type action_name: unicode | str :param config_name: Name(s) of container configuration(s) or MapConfigId tuple(s). :type config_name: unicode | str | collections.Iterable[unicode | str] | dockermap.map.input.InputConfigId | collections.Iterable[dockermap.map.input.InputConfigId] :param instances: Optional instance names, where applicable but not included in ``config_name``. :type instances: unicode | str | collections.Iterable[unicode | str] :param map_name: Optional map name, where not inlcuded in ``config_name``. :param kwargs: Additional kwargs for state generation, action generation, runner, or the client action. :return: Resulting states of the configurations. :rtype: collections.Iterable[dockermap.map.state.ConfigState] """ policy = self.get_policy() _set_forced_update_ids(kwargs, policy.container_maps, map_name or self._default_map, instances) state_generator = self.get_state_generator(action_name, policy, kwargs) log.debug("Remaining kwargs passed to client actions: %s", kwargs) config_ids = get_map_config_ids(config_name, policy.container_maps, map_name or self._default_map, instances) log.debug("Generating states for configurations: %s", config_ids) return state_generator.get_states(config_ids)
Returns a generator of states in relation to the indicated action. :param action_name: Action name. :type action_name: unicode | str :param config_name: Name(s) of container configuration(s) or MapConfigId tuple(s). :type config_name: unicode | str | collections.Iterable[unicode | str] | dockermap.map.input.InputConfigId | collections.Iterable[dockermap.map.input.InputConfigId] :param instances: Optional instance names, where applicable but not included in ``config_name``. :type instances: unicode | str | collections.Iterable[unicode | str] :param map_name: Optional map name, where not inlcuded in ``config_name``. :param kwargs: Additional kwargs for state generation, action generation, runner, or the client action. :return: Resulting states of the configurations. :rtype: collections.Iterable[dockermap.map.state.ConfigState]
def combine_HSPs(a): """ Combine HSPs into a single BlastLine. """ m = a[0] if len(a) == 1: return m for b in a[1:]: assert m.query == b.query assert m.subject == b.subject m.hitlen += b.hitlen m.nmismatch += b.nmismatch m.ngaps += b.ngaps m.qstart = min(m.qstart, b.qstart) m.qstop = max(m.qstop, b.qstop) m.sstart = min(m.sstart, b.sstart) m.sstop = max(m.sstop, b.sstop) if m.has_score: m.score += b.score m.pctid = 100 - (m.nmismatch + m.ngaps) * 100. / m.hitlen return m
Combine HSPs into a single BlastLine.
def set_status(self, value): """ Set the status of the motor to the specified value if not already set. """ if not self._status == value: old = self._status self._status = value logger.info("{} changing status from {} to {}".format(self, old.name, value.name)) self._statusChanged(old, value)
Set the status of the motor to the specified value if not already set.
def full_y(self, Y): """Add self(shunt) into full Jacobian Y""" if not self.n: return Ysh = matrix(self.g, (self.n, 1), 'd') + 1j * matrix(self.b, (self.n, 1), 'd') uYsh = mul(self.u, Ysh) Y += spmatrix(uYsh, self.a, self.a, Y.size, 'z')
Add self(shunt) into full Jacobian Y
def targets(self): """ Search the targets folder for FASTA files, create the multi-FASTA file of all targets if necessary, and populate objects """ logging.info('Performing analysis with {} targets folder'.format(self.analysistype)) for sample in self.runmetadata: sample[self.analysistype].runanalysis = True sample[self.analysistype].targetpath = (os.path.join(self.targetpath, sample[self.analysistype].pointfindergenus)) # There is a relatively strict databasing scheme necessary for the custom targets. Eventually, # there will be a helper script to combine individual files into a properly formatted combined file try: sample[self.analysistype].baitfile = glob(os.path.join(sample[self.analysistype].targetpath, '*.fasta'))[0] # If the fasta file is missing, raise a custom error except IndexError: # Combine any .tfa files in the directory into a combined targets .fasta file fsafiles = glob(os.path.join(sample[self.analysistype].targetpath, '*.fsa')) if fsafiles: combinetargets(fsafiles, sample[self.analysistype].targetpath) try: sample[self.analysistype].baitfile = glob(os.path.join(sample[self.analysistype].targetpath, '*.fasta'))[0] except IndexError as e: # noinspection PyPropertyAccess e.args = [ 'Cannot find the combined fasta file in {}. Please note that the file must have a ' '.fasta extension'.format(sample[self.analysistype].targetpath)] if os.path.isdir(sample[self.analysistype].targetpath): raise else: sample[self.analysistype].runanalysis = False for sample in self.runmetadata: # Set the necessary attributes sample[self.analysistype].outputdir = os.path.join(sample.run.outputdirectory, self.analysistype) make_path(sample[self.analysistype].outputdir) sample[self.analysistype].logout = os.path.join(sample[self.analysistype].outputdir, 'logout.txt') sample[self.analysistype].logerr = os.path.join(sample[self.analysistype].outputdir, 'logerr.txt') sample[self.analysistype].baitedfastq = \ os.path.join(sample[self.analysistype].outputdir, '{at}_targetMatches.fastq.gz'.format(at=self.analysistype))
Search the targets folder for FASTA files, create the multi-FASTA file of all targets if necessary, and populate objects
def put(self, resource, obj, operation_timeout=None, max_envelope_size=None, locale=None): """ resource can be a URL or a ResourceLocator """ headers = None return self.service.invoke(headers, obj)
resource can be a URL or a ResourceLocator
def facets(self, *args, **kwargs): """ Returns a dictionary with the requested facets. The facets function supports string args, and keyword args. q.facets('field_1', 'field_2') will return facets for field_1 and field_2. q.facets(field_1={'limit': 0}, field_2={'limit': 10}) will return all facets for field_1 and 10 facets for field_2. """ # Combine args and kwargs into facet format. facets = dict((a, {}) for a in args) facets.update(kwargs) if not facets: raise AttributeError('Faceting requires at least one field') for f in facets.keys(): if not isinstance(f, six.string_types): raise AttributeError('Facet field arguments must be strings') q = self._clone() q._limit = 0 q.execute(offset=0, facets=facets) return q._response.get('facets')
Returns a dictionary with the requested facets. The facets function supports string args, and keyword args. q.facets('field_1', 'field_2') will return facets for field_1 and field_2. q.facets(field_1={'limit': 0}, field_2={'limit': 10}) will return all facets for field_1 and 10 facets for field_2.
def stop(self): """ Stops the ``Pipers`` according to pipeline topology. """ self.log.debug('%s begins stopping routine' % repr(self)) self.log.debug('%s triggers stopping in input pipers' % repr(self)) inputs = self.get_inputs() for piper in inputs: piper.stop(forced=True) self.log.debug('%s pulls output pipers until stop' % repr(self)) outputs = self.get_outputs() while outputs: for piper in outputs: try: # for i in xrange(stride)? piper.next() except StopIteration: outputs.remove(piper) self.log.debug("%s stopped output piper: %s" % \ (repr(self), repr(piper))) continue except Exception, excp: self.log.debug("%s %s raised an exception: %s" % \ (repr(self), piper, excp)) self.log.debug("%s stops the remaining pipers" % repr(self)) postorder = self.postorder() for piper in postorder: if piper not in inputs: piper.stop(ends=[0]) self.log.debug("%s finishes stopping of input pipers" % repr(self)) for piper in inputs: if hasattr(piper.imap, 'stop'): piper.imap.stop(ends=[0]) self.log.debug('%s finishes stopping routine' % repr(self))
Stops the ``Pipers`` according to pipeline topology.
def lookup(self, asn=None, inc_raw=False, retry_count=3, response=None, field_list=None, asn_alts=None, asn_methods=None): """ The function for retrieving and parsing ASN origin whois information via port 43/tcp (WHOIS). Args: asn (:obj:`str`): The ASN (required). inc_raw (:obj:`bool`): Whether to include the raw results in the returned dictionary. Defaults to False. retry_count (:obj:`int`): The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Defaults to 3. response (:obj:`str`): Optional response object, this bypasses the Whois lookup. Defaults to None. field_list (:obj:`list`): If provided, fields to parse: ['description', 'maintainer', 'updated', 'source'] If None, defaults to all. asn_alts (:obj:`list`): Additional lookup types to attempt if the ASN whois lookup fails. If None, defaults to all ['http']. *WARNING* deprecated in favor of new argument asn_methods. asn_methods (:obj:`list`): ASN lookup types to attempt, in order. If None, defaults to all ['whois', 'http']. Returns: dict: The ASN origin lookup results :: { 'query' (str) - The Autonomous System Number 'nets' (list) - Dictionaries containing network information which consists of the fields listed in the ASN_ORIGIN_WHOIS dictionary. 'raw' (str) - Raw ASN origin whois results if the inc_raw parameter is True. } Raises: ValueError: methods argument requires one of whois, http. ASNOriginLookupError: ASN origin lookup failed. """ if asn[0:2] != 'AS': asn = 'AS{0}'.format(asn) if asn_methods is None: if asn_alts is None: lookups = ['whois', 'http'] else: from warnings import warn warn('ASNOrigin.lookup() asn_alts argument has been deprecated' ' and will be removed. You should now use the asn_methods' ' argument.') lookups = ['whois'] + asn_alts else: if {'whois', 'http'}.isdisjoint(asn_methods): raise ValueError('methods argument requires at least one of ' 'whois, http.') lookups = asn_methods # Create the return dictionary. results = { 'query': asn, 'nets': [], 'raw': None } is_http = False # Only fetch the response if we haven't already. if response is None: for index, lookup_method in enumerate(lookups): if lookup_method == 'whois': try: log.debug('Response not given, perform ASN origin ' 'WHOIS lookup for {0}'.format(asn)) # Retrieve the whois data. response = self._net.get_asn_origin_whois( asn=asn, retry_count=retry_count ) except (WhoisLookupError, WhoisRateLimitError) as e: log.debug('ASN origin WHOIS lookup failed: {0}' ''.format(e)) pass elif lookup_method == 'http': try: log.debug('Response not given, perform ASN origin ' 'HTTP lookup for: {0}'.format(asn)) tmp = ASN_ORIGIN_HTTP['radb']['form_data'] tmp[str(ASN_ORIGIN_HTTP['radb']['form_data_asn_field'] )] = asn response = self._net.get_http_raw( url=ASN_ORIGIN_HTTP['radb']['url'], retry_count=retry_count, request_type='POST', form_data=tmp ) is_http = True # pragma: no cover except HTTPLookupError as e: log.debug('ASN origin HTTP lookup failed: {0}' ''.format(e)) pass if response is None: raise ASNOriginLookupError('ASN origin lookup failed with no ' 'more methods to try.') # If inc_raw parameter is True, add the response to return dictionary. if inc_raw: results['raw'] = response nets = [] nets_response = self.get_nets_radb(response, is_http) nets.extend(nets_response) if is_http: # pragma: no cover fields = ASN_ORIGIN_HTTP else: fields = ASN_ORIGIN_WHOIS # Iterate through all of the network sections and parse out the # appropriate fields for each. log.debug('Parsing ASN origin data') for index, net in enumerate(nets): section_end = None if index + 1 < len(nets): section_end = nets[index + 1]['start'] temp_net = self.parse_fields( response, fields['radb']['fields'], section_end, net['end'], field_list ) # Merge the net dictionaries. net.update(temp_net) # The start and end values are no longer needed. del net['start'], net['end'] # Add the networks to the return dictionary. results['nets'] = nets return results
The function for retrieving and parsing ASN origin whois information via port 43/tcp (WHOIS). Args: asn (:obj:`str`): The ASN (required). inc_raw (:obj:`bool`): Whether to include the raw results in the returned dictionary. Defaults to False. retry_count (:obj:`int`): The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Defaults to 3. response (:obj:`str`): Optional response object, this bypasses the Whois lookup. Defaults to None. field_list (:obj:`list`): If provided, fields to parse: ['description', 'maintainer', 'updated', 'source'] If None, defaults to all. asn_alts (:obj:`list`): Additional lookup types to attempt if the ASN whois lookup fails. If None, defaults to all ['http']. *WARNING* deprecated in favor of new argument asn_methods. asn_methods (:obj:`list`): ASN lookup types to attempt, in order. If None, defaults to all ['whois', 'http']. Returns: dict: The ASN origin lookup results :: { 'query' (str) - The Autonomous System Number 'nets' (list) - Dictionaries containing network information which consists of the fields listed in the ASN_ORIGIN_WHOIS dictionary. 'raw' (str) - Raw ASN origin whois results if the inc_raw parameter is True. } Raises: ValueError: methods argument requires one of whois, http. ASNOriginLookupError: ASN origin lookup failed.
def simulate(self, data, mime=None): """Simulate the arrival of feeddata into the feed. Useful if the remote Thing doesn't publish very often. `data` (mandatory) (as applicable) The data you want to use to simulate the arrival of remote feed data `mime` (optional) (string) The mime type of your data. See: [share()](./Point.m.html#IoticAgent.IOT.Point.Feed.share) """ self._client.simulate_feeddata(self.__pointid, data, mime)
Simulate the arrival of feeddata into the feed. Useful if the remote Thing doesn't publish very often. `data` (mandatory) (as applicable) The data you want to use to simulate the arrival of remote feed data `mime` (optional) (string) The mime type of your data. See: [share()](./Point.m.html#IoticAgent.IOT.Point.Feed.share)
def idxterms(self): """List of index terms.""" try: terms = listify(self._json.get("idxterms", {}).get('mainterm', [])) except AttributeError: # idxterms is empty return None try: return [d['$'] for d in terms] except AttributeError: return None
List of index terms.
def readadd(file, system): """read DYR file""" dyr = {} data = [] end = 0 retval = True sep = ',' fid = open(file, 'r') for line in fid.readlines(): if line.find('/') >= 0: line = line.split('/')[0] end = 1 if line.find(',') >= 0: # mixed comma and space splitter not allowed line = [to_number(item.strip()) for item in line.split(sep)] else: line = [to_number(item.strip()) for item in line.split()] if not line: end = 0 continue data.extend(line) if end == 1: field = data[1] if field not in dyr.keys(): dyr[field] = [] dyr[field].append(data) end = 0 data = [] fid.close() # elem_add device elements to system supported = [ 'GENROU', 'GENCLS', 'ESST3A', 'ESDC2A', 'SEXS', 'EXST1', 'ST2CUT', 'IEEEST', 'TGOV1', ] used = list(supported) for model in supported: if model not in dyr.keys(): used.remove(model) continue for data in dyr[model]: add_dyn(system, model, data) needed = list(dyr.keys()) for i in supported: if i in needed: needed.remove(i) logger.warning('Models currently unsupported: {}'.format( ', '.join(needed))) return retval
read DYR file
def _start_console(self): """ Start streaming the console via telnet """ class InputStream: def __init__(self): self._data = b"" def write(self, data): self._data += data @asyncio.coroutine def drain(self): if not self.ws.closed: self.ws.send_bytes(self._data) self._data = b"" output_stream = asyncio.StreamReader() input_stream = InputStream() telnet = AsyncioTelnetServer(reader=output_stream, writer=input_stream, echo=True) self._telnet_servers.append((yield from asyncio.start_server(telnet.run, self._manager.port_manager.console_host, self.console))) self._console_websocket = yield from self.manager.websocket_query("containers/{}/attach/ws?stream=1&stdin=1&stdout=1&stderr=1".format(self._cid)) input_stream.ws = self._console_websocket output_stream.feed_data(self.name.encode() + b" console is now available... Press RETURN to get started.\r\n") asyncio.async(self._read_console_output(self._console_websocket, output_stream))
Start streaming the console via telnet
def damerau_levenshtein_distance(self, s1, s2): """ Dervied algorithm from the following website: https://www.guyrutenberg.com/2008/12/15/damerau-levenshtein-distance-in-python/ Gives us the distance between two words. """ d = {} lenstr1 = len(s1) lenstr2 = len(s2) for i in xrange(-1,lenstr1+1): d[(i,-1)] = i+1 for j in xrange(-1,lenstr2+1): d[(-1,j)] = j+1 for i in xrange(lenstr1): for j in xrange(lenstr2): if s1[i] == s2[j]: cost = 0 else: cost = 1 d[(i,j)] = min( d[(i-1,j)] + 1, # deletion d[(i,j-1)] + 1, # insertion d[(i-1,j-1)] + cost, # substitution ) if i and j and s1[i]==s2[j-1] and s1[i-1] == s2[j]: d[(i,j)] = min (d[(i,j)], d[i-2,j-2] + cost) # transposition return d[lenstr1-1,lenstr2-1]
Dervied algorithm from the following website: https://www.guyrutenberg.com/2008/12/15/damerau-levenshtein-distance-in-python/ Gives us the distance between two words.
def render_math(self, token): """ Ensure Math tokens are all enclosed in two dollar signs. """ if token.content.startswith('$$'): return self.render_raw_text(token) return '${}$'.format(self.render_raw_text(token))
Ensure Math tokens are all enclosed in two dollar signs.
def update_firewall_rule(self, server_name, name, start_ip_address, end_ip_address): ''' Update a firewall rule for an Azure SQL Database server. server_name: Name of the server to set the firewall rule on. name: The name of the firewall rule to update. start_ip_address: The lowest IP address in the range of the server-level firewall setting. IP addresses equal to or greater than this can attempt to connect to the server. The lowest possible IP address is 0.0.0.0. end_ip_address: The highest IP address in the range of the server-level firewall setting. IP addresses equal to or less than this can attempt to connect to the server. The highest possible IP address is 255.255.255.255. ''' _validate_not_none('server_name', server_name) _validate_not_none('name', name) _validate_not_none('start_ip_address', start_ip_address) _validate_not_none('end_ip_address', end_ip_address) return self._perform_put( self._get_firewall_rules_path(server_name, name), _SqlManagementXmlSerializer.update_firewall_rule_to_xml( name, start_ip_address, end_ip_address ) )
Update a firewall rule for an Azure SQL Database server. server_name: Name of the server to set the firewall rule on. name: The name of the firewall rule to update. start_ip_address: The lowest IP address in the range of the server-level firewall setting. IP addresses equal to or greater than this can attempt to connect to the server. The lowest possible IP address is 0.0.0.0. end_ip_address: The highest IP address in the range of the server-level firewall setting. IP addresses equal to or less than this can attempt to connect to the server. The highest possible IP address is 255.255.255.255.
def delete_webhook(self, webhook): """ Deletes the specified webhook from this policy. """ return self.manager.delete_webhook(self.scaling_group, self, webhook)
Deletes the specified webhook from this policy.
def _redirect(self, request, response): """Generic redirect for item editor.""" if '_addanother' in request.POST: return HttpResponseRedirect('../item_add/') elif '_save' in request.POST: return HttpResponseRedirect('../') elif '_continue' in request.POST: return response return HttpResponseRedirect('')
Generic redirect for item editor.
def visit_Include(self, node, frame): """Handles includes.""" if node.ignore_missing: self.writeline('try:') self.indent() func_name = 'get_or_select_template' if isinstance(node.template, nodes.Const): if isinstance(node.template.value, string_types): func_name = 'get_template' elif isinstance(node.template.value, (tuple, list)): func_name = 'select_template' elif isinstance(node.template, (nodes.Tuple, nodes.List)): func_name = 'select_template' self.writeline('template = environment.%s(' % func_name, node) self.visit(node.template, frame) self.write(', %r)' % self.name) if node.ignore_missing: self.outdent() self.writeline('except TemplateNotFound:') self.indent() self.writeline('pass') self.outdent() self.writeline('else:') self.indent() skip_event_yield = False if node.with_context: loop = self.environment.is_async and 'async for' or 'for' self.writeline('%s event in template.root_render_func(' 'template.new_context(context.get_all(), True, ' '%s)):' % (loop, self.dump_local_context(frame))) elif self.environment.is_async: self.writeline('for event in (await ' 'template._get_default_module_async())' '._body_stream:') else: if supports_yield_from: self.writeline('yield from template._get_default_module()' '._body_stream') skip_event_yield = True else: self.writeline('for event in template._get_default_module()' '._body_stream:') if not skip_event_yield: self.indent() self.simple_write('event', frame) self.outdent() if node.ignore_missing: self.outdent()
Handles includes.
def read_file(filepath, **kwargs): """ Read a data file into a DataFrameModel. :param filepath: The rows/columns filepath to read. :param kwargs: xls/x files - see pandas.read_excel(**kwargs) .csv/.txt/etc - see pandas.read_csv(**kwargs) :return: DataFrameModel """ return DataFrameModel(dataFrame=superReadFile(filepath, **kwargs), filePath=filepath)
Read a data file into a DataFrameModel. :param filepath: The rows/columns filepath to read. :param kwargs: xls/x files - see pandas.read_excel(**kwargs) .csv/.txt/etc - see pandas.read_csv(**kwargs) :return: DataFrameModel
def calc_acceleration_bca(jackknife_replicates): """ Calculate the acceleration constant for the Bias Corrected and Accelerated (BCa) bootstrap confidence intervals. Parameters ---------- jackknife_replicates : 2D ndarray. Each row should correspond to a different jackknife parameter sample, formed by deleting a particular observation and then re-estimating the desired model. Each column should correspond to an element of the parameter vector being estimated. Returns ------- acceleration : 1D ndarray. There will be one element for each element in `mle_estimate`. Elements denote the acceleration factors for each component of the parameter vector. References ---------- Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap. CRC press, 1994. Section 14.3, Equation 14.15. """ # Get the mean of the bootstrapped statistics. jackknife_mean = jackknife_replicates.mean(axis=0)[None, :] # Calculate the differences between the mean of the bootstrapped statistics differences = jackknife_mean - jackknife_replicates numerator = (differences**3).sum(axis=0) denominator = 6 * ((differences**2).sum(axis=0))**1.5 # guard against division by zero. Note that this guard shouldn't distort # the computational results since the numerator should be zero whenever the # denominator is zero. zero_denom = np.where(denominator == 0) denominator[zero_denom] = MIN_COMP_VALUE # Compute the acceleration. acceleration = numerator / denominator return acceleration
Calculate the acceleration constant for the Bias Corrected and Accelerated (BCa) bootstrap confidence intervals. Parameters ---------- jackknife_replicates : 2D ndarray. Each row should correspond to a different jackknife parameter sample, formed by deleting a particular observation and then re-estimating the desired model. Each column should correspond to an element of the parameter vector being estimated. Returns ------- acceleration : 1D ndarray. There will be one element for each element in `mle_estimate`. Elements denote the acceleration factors for each component of the parameter vector. References ---------- Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap. CRC press, 1994. Section 14.3, Equation 14.15.
def bytes_array(self): '''Get the param as an array of raw byte strings.''' assert len(self.dimensions) == 2, \ '{}: cannot get value as bytes array!'.format(self.name) l, n = self.dimensions return [self.bytes[i*l:(i+1)*l] for i in range(n)]
Get the param as an array of raw byte strings.
def reload(self, client=None): """Update this notification from the server configuration. See: https://cloud.google.com/storage/docs/json_api/v1/notifications/get If :attr:`user_project` is set on the bucket, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: bool :returns: True, if the notification exists, else False. :raises ValueError: if the notification has no ID. """ if self.notification_id is None: raise ValueError("Notification not intialized by server") client = self._require_client(client) query_params = {} if self.bucket.user_project is not None: query_params["userProject"] = self.bucket.user_project response = client._connection.api_request( method="GET", path=self.path, query_params=query_params ) self._set_properties(response)
Update this notification from the server configuration. See: https://cloud.google.com/storage/docs/json_api/v1/notifications/get If :attr:`user_project` is set on the bucket, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: bool :returns: True, if the notification exists, else False. :raises ValueError: if the notification has no ID.
def new_parallel(self, function, *params): ''' Register a new thread executing a parallel method. ''' # Create a pool if not created (processes or Gevent...) if self.ppool is None: if core_type == 'thread': from multiprocessing.pool import ThreadPool self.ppool = ThreadPool(500) else: from gevent.pool import Pool self.ppool = Pool(500) # Add the new task to the pool self.ppool.apply_async(function, *params)
Register a new thread executing a parallel method.
def new(params, event_size, num_components, dtype=None, validate_args=False, name=None): """Create the distribution instance from a `params` vector.""" with tf.compat.v1.name_scope(name, 'CategoricalMixtureOfOneHotCategorical', [params, event_size, num_components]): dist = MixtureSameFamily.new( params, num_components, OneHotCategorical( event_size, validate_args=False, # So we can eval on simplex interior. name=name), validate_args=validate_args, name=name) # pylint: disable=protected-access dist._mean = functools.partial( _eval_all_one_hot, tfd.Distribution.prob, dist) dist.log_mean = functools.partial( _eval_all_one_hot, tfd.Distribution.log_prob, dist) # pylint: enable=protected-access return dist
Create the distribution instance from a `params` vector.
def encode_request(name, items): """ Encode request into client_message""" client_message = ClientMessage(payload_size=calculate_size(name, items)) client_message.set_message_type(REQUEST_TYPE) client_message.set_retryable(RETRYABLE) client_message.append_str(name) client_message.append_int(len(items)) for items_item in items: client_message.append_data(items_item) client_message.update_frame_length() return client_message
Encode request into client_message
def encrypt(self, data, nounce=None): """Encrypt data with counter or specified nounce.""" if nounce is None: nounce = self._out_counter.to_bytes(length=8, byteorder='little') self._out_counter += 1 return self._enc_out.seal(b'\x00\x00\x00\x00' + nounce, data, bytes())
Encrypt data with counter or specified nounce.
def at_line(self, line: FileLine) -> Iterator[InsertionPoint]: """ Returns an iterator over all of the insertion points located at a given line. """ logger.debug("finding insertion points at line: %s", str(line)) filename = line.filename # type: str line_num = line.num # type: int for ins in self.in_file(filename): if line_num == ins.location.line: logger.debug("found insertion point at line [%s]: %s", str(line), ins) yield ins
Returns an iterator over all of the insertion points located at a given line.
def cli(obj, roles): """List users.""" client = obj['client'] query = [('roles', r) for r in roles] if obj['output'] == 'json': r = client.http.get('/users', query) click.echo(json.dumps(r['users'], sort_keys=True, indent=4, ensure_ascii=False)) else: timezone = obj['timezone'] headers = {'id': 'ID', 'name': 'USER', 'email': 'EMAIL', 'roles': 'ROLES', 'status': 'STATUS', 'text': 'TEXT', 'createTime': 'CREATED', 'updateTime': 'LAST UPDATED', 'lastLogin': 'LAST LOGIN', 'email_verified': 'VERIFIED'} click.echo( tabulate([u.tabular(timezone) for u in client.get_users(query)], headers=headers, tablefmt=obj['output']) )
List users.
def delete_tag(context, id, tag_id): """delete_tag(context, id, tag_id) Delete a tag from a job. >>> dcictl job-delete-tag [OPTIONS] :param string id: ID of the job to attach the meta to [required] :param string tag_id: ID of the tag to be removed from the job [required] """ result = job.delete_tag(context, id=id, tag_id=tag_id) if result.status_code == 204: utils.print_json({'id': id, 'message': 'Tag removed.'}) else: utils.format_output(result, context.format)
delete_tag(context, id, tag_id) Delete a tag from a job. >>> dcictl job-delete-tag [OPTIONS] :param string id: ID of the job to attach the meta to [required] :param string tag_id: ID of the tag to be removed from the job [required]
def split_comma_argument(comma_sep_str): """Split a comma separated option into a list.""" terms = [] for term in comma_sep_str.split(','): if term: terms.append(term) return terms
Split a comma separated option into a list.
def _mkdirs_impacket(path, share='C$', conn=None, host=None, username=None, password=None): ''' Recursively create a directory structure on an SMB share Paths should be passed in with forward-slash delimiters, and should not start with a forward-slash. ''' if conn is None: conn = get_conn(host, username, password) if conn is False: return False comps = path.split('/') pos = 1 for comp in comps: cwd = '\\'.join(comps[0:pos]) try: conn.listPath(share, cwd) except (smbSessionError, smb3SessionError): log.exception('Encountered error running conn.listPath') conn.createDirectory(share, cwd) pos += 1
Recursively create a directory structure on an SMB share Paths should be passed in with forward-slash delimiters, and should not start with a forward-slash.
def concatenate(x, other): """Returns the concatenation of the dimension in `x` and `other`. *Note:* If either `x` or `other` is completely unknown, concatenation will discard information about the other shape. In future, we might support concatenation that preserves this information for use with slicing. For more details, see `help(tf.TensorShape.concatenate)`. Args: x: object representing a shape; convertible to `tf.TensorShape`. other: object representing a shape; convertible to `tf.TensorShape`. Returns: new_shape: an object like `x` whose elements are the concatenation of the dimensions in `x` and `other`. """ return type(x)(tf.TensorShape(x).concatenate(other))
Returns the concatenation of the dimension in `x` and `other`. *Note:* If either `x` or `other` is completely unknown, concatenation will discard information about the other shape. In future, we might support concatenation that preserves this information for use with slicing. For more details, see `help(tf.TensorShape.concatenate)`. Args: x: object representing a shape; convertible to `tf.TensorShape`. other: object representing a shape; convertible to `tf.TensorShape`. Returns: new_shape: an object like `x` whose elements are the concatenation of the dimensions in `x` and `other`.
def transform(testtype): ''' A lot of these transformations are from tasks before task labels and some of them are if we grab data directly from Treeherder jobs endpoint instead of runnable jobs API. ''' # XXX: Evaluate which of these transformations are still valid if testtype.startswith('[funsize'): return None testtype = testtype.split('/opt-')[-1] testtype = testtype.split('/debug-')[-1] # this is plain-reftests for android testtype = testtype.replace('plain-', '') testtype = testtype.strip() # https://bugzilla.mozilla.org/show_bug.cgi?id=1313844 testtype = testtype.replace('browser-chrome-e10s', 'e10s-browser-chrome') testtype = testtype.replace('devtools-chrome-e10s', 'e10s-devtools-chrome') testtype = testtype.replace('[TC] Android 4.3 API15+ ', '') # mochitest-gl-1 <-- Android 4.3 armv7 API 15+ mozilla-inbound opt test mochitest-gl-1 # mochitest-webgl-9 <-- test-android-4.3-arm7-api-15/opt-mochitest-webgl-9 testtype = testtype.replace('webgl-', 'gl-') return testtype
A lot of these transformations are from tasks before task labels and some of them are if we grab data directly from Treeherder jobs endpoint instead of runnable jobs API.
def map_aliases_to_device_objects(self): """ A device object knows its rid, but not its alias. A portal object knows its device rids and aliases. This function adds an 'portals_aliases' key to all of the device objects so they can be sorted by alias. """ all_devices = self.get_all_devices_in_portal() for dev_o in all_devices: dev_o['portals_aliases'] = self.get_portal_by_name( self.portal_name() )[2][1]['info']['aliases'][ dev_o['rid'] ] return all_devices
A device object knows its rid, but not its alias. A portal object knows its device rids and aliases. This function adds an 'portals_aliases' key to all of the device objects so they can be sorted by alias.
def predict_proba(self, X): """Predict the distances for X to center of the training set. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- leverages: array of shape = [n_samples] The objects distances to center of the training set. """ # Check is fit had been called check_is_fitted(self, ['inverse_influence_matrix']) # Check that X have correct shape X = check_array(X) return self.__find_leverages(X, self.inverse_influence_matrix)
Predict the distances for X to center of the training set. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- leverages: array of shape = [n_samples] The objects distances to center of the training set.
def turn_on(host, did, token=None): """Turn on bulb or fixture""" urllib3.disable_warnings() if token: scheme = "https" if not token: scheme = "http" token = "1234567890" url = ( scheme + '://' + host + '/gwr/gop.php?cmd=DeviceSendCommand&data=<gip><version>1</version><token>' + token + '</token><did>' + did + '</did><value>1</value></gip>&fmt=xml') response = requests.get(url, verify=False) if response.status_code == '200': return True else: return False
Turn on bulb or fixture
async def async_delete_all_keys(session, host, port, api_key, api_keys=[]): """Delete all API keys except for the ones provided to the method.""" url = 'http://{}:{}/api/{}/config'.format(host, str(port), api_key) response = await async_request(session.get, url) api_keys.append(api_key) for key in response['whitelist'].keys(): if key not in api_keys: await async_delete_api_key(session, host, port, key)
Delete all API keys except for the ones provided to the method.
def cov_dvrpmllbb_to_vxyz(d,e_d,e_vr,pmll,pmbb,cov_pmllbb,l,b, plx=False,degree=False): """ NAME: cov_dvrpmllbb_to_vxyz PURPOSE: propagate distance, radial velocity, and proper motion uncertainties to Galactic coordinates INPUT: d - distance [kpc, as/mas for plx] e_d - distance uncertainty [kpc, [as/mas] for plx] e_vr - low velocity uncertainty [km/s] pmll - proper motion in l (*cos(b)) [ [as/mas]/yr ] pmbb - proper motion in b [ [as/mas]/yr ] cov_pmllbb - uncertainty covariance for proper motion [pmll is pmll x cos(b)] l - Galactic longitude b - Galactic lattitude KEYWORDS: plx - if True, d is a parallax, and e_d is a parallax uncertainty degree - if True, l and b are given in degree OUTPUT: cov(vx,vy,vz) [3,3] or [:,3,3] HISTORY: 2010-04-12 - Written - Bovy (NYU) """ if plx: d= 1./d e_d*= d**2. if degree: l*= _DEGTORAD b*= _DEGTORAD if sc.array(d).shape == (): return cov_dvrpmllbb_to_vxyz_single(d,e_d,e_vr,pmll,pmbb,cov_pmllbb, l,b) else: ndata= len(d) out= sc.zeros((ndata,3,3)) for ii in range(ndata): out[ii,:,:]= cov_dvrpmllbb_to_vxyz_single(d[ii],e_d[ii],e_vr[ii], pmll[ii],pmbb[ii], cov_pmllbb[ii,:,:], l[ii],b[ii]) return out
NAME: cov_dvrpmllbb_to_vxyz PURPOSE: propagate distance, radial velocity, and proper motion uncertainties to Galactic coordinates INPUT: d - distance [kpc, as/mas for plx] e_d - distance uncertainty [kpc, [as/mas] for plx] e_vr - low velocity uncertainty [km/s] pmll - proper motion in l (*cos(b)) [ [as/mas]/yr ] pmbb - proper motion in b [ [as/mas]/yr ] cov_pmllbb - uncertainty covariance for proper motion [pmll is pmll x cos(b)] l - Galactic longitude b - Galactic lattitude KEYWORDS: plx - if True, d is a parallax, and e_d is a parallax uncertainty degree - if True, l and b are given in degree OUTPUT: cov(vx,vy,vz) [3,3] or [:,3,3] HISTORY: 2010-04-12 - Written - Bovy (NYU)
def mission_count_send(self, target_system, target_component, count, force_mavlink1=False): ''' This message is emitted as response to MISSION_REQUEST_LIST by the MAV and to initiate a write transaction. The GCS can then request the individual mission item based on the knowledge of the total number of MISSIONs. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) count : Number of mission items in the sequence (uint16_t) ''' return self.send(self.mission_count_encode(target_system, target_component, count), force_mavlink1=force_mavlink1)
This message is emitted as response to MISSION_REQUEST_LIST by the MAV and to initiate a write transaction. The GCS can then request the individual mission item based on the knowledge of the total number of MISSIONs. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) count : Number of mission items in the sequence (uint16_t)
def r(op, rc=None, r=None, iq=None, ico=None, pl=None): # pylint: disable=redefined-outer-name, invalid-name, invalid-name """ This function is a wrapper for :meth:`~pywbem.WBEMConnection.References`. Instance-level use: Retrieve the association instances referencing a source instance. Class-level use: Retrieve the association classes referencing a source class. Parameters: op (:class:`~pywbem.CIMInstanceName`): Source instance path; select instance-level use. op (:class:`~pywbem.CIMClassName`): Source class path; select class-level use. rc (:term:`string`): ResultClass filter: Include only traversals across this association (result) class. `None` means this filter is not applied. r (:term:`string`): Role filter: Include only traversals from this role (= reference name) in source object. `None` means this filter is not applied. iq (:class:`py:bool`): IncludeQualifiers flag: Include qualifiers. `None` will cause the server default of `False` to be used. Deprecated in :term:`DSP0200`: Clients cannot rely on qualifiers to be returned in this operation. ico (:class:`py:bool`): IncludeClassOrigin flag: Include class origin information for the properties in the retrieved instances or for the properties and methods in the retrieved classes. `None` will cause the server default of `False` to be used. Deprecated in :term:`DSP0200` for instance-level use: WBEM servers may either implement this parameter as specified, or may treat any specified value as `False`. pl (:term:`string` or :term:`py:iterable` of :term:`string`): PropertyList: Names of properties to be included (if not otherwise excluded). An empty iterable indicates to include no properties. If `None`, all properties will be included. Returns: list of result objects: * For instance-level use, a list of :class:`~pywbem.CIMInstance` objects representing the retrieved instances, with their `path` attribute being a :class:`~pywbem.CIMInstanceName` object with its attributes set as follows: * `classname`: Name of the creation class of the instance. * `keybindings`: Keybindings of the instance. * `namespace`: Name of the CIM namespace containing the instance. * `host`: Host and optionally port of the WBEM server containing the CIM namespace, or `None` if the server did not return host information. * For class-level use, a list of tuple (classpath, class) representing the retrieved classes, with the following (unnamed) tuple items: * classpath (:class:`~pywbem.CIMClassName`): Class path with its attributes set as follows: * `classname`: Name of the class. * `namespace`: Name of the CIM namespace containing the class. * `host`: Host and optionally port of the WBEM server containing the CIM namespace, or `None` if the server did not return host information. * class (:class:`~pywbem.CIMClass`): The representation of the class. """ return CONN.References(op, ResultClass=rc, Role=r, IncludeQualifiers=iq, IncludeClassOrigin=ico, PropertyList=pl)
This function is a wrapper for :meth:`~pywbem.WBEMConnection.References`. Instance-level use: Retrieve the association instances referencing a source instance. Class-level use: Retrieve the association classes referencing a source class. Parameters: op (:class:`~pywbem.CIMInstanceName`): Source instance path; select instance-level use. op (:class:`~pywbem.CIMClassName`): Source class path; select class-level use. rc (:term:`string`): ResultClass filter: Include only traversals across this association (result) class. `None` means this filter is not applied. r (:term:`string`): Role filter: Include only traversals from this role (= reference name) in source object. `None` means this filter is not applied. iq (:class:`py:bool`): IncludeQualifiers flag: Include qualifiers. `None` will cause the server default of `False` to be used. Deprecated in :term:`DSP0200`: Clients cannot rely on qualifiers to be returned in this operation. ico (:class:`py:bool`): IncludeClassOrigin flag: Include class origin information for the properties in the retrieved instances or for the properties and methods in the retrieved classes. `None` will cause the server default of `False` to be used. Deprecated in :term:`DSP0200` for instance-level use: WBEM servers may either implement this parameter as specified, or may treat any specified value as `False`. pl (:term:`string` or :term:`py:iterable` of :term:`string`): PropertyList: Names of properties to be included (if not otherwise excluded). An empty iterable indicates to include no properties. If `None`, all properties will be included. Returns: list of result objects: * For instance-level use, a list of :class:`~pywbem.CIMInstance` objects representing the retrieved instances, with their `path` attribute being a :class:`~pywbem.CIMInstanceName` object with its attributes set as follows: * `classname`: Name of the creation class of the instance. * `keybindings`: Keybindings of the instance. * `namespace`: Name of the CIM namespace containing the instance. * `host`: Host and optionally port of the WBEM server containing the CIM namespace, or `None` if the server did not return host information. * For class-level use, a list of tuple (classpath, class) representing the retrieved classes, with the following (unnamed) tuple items: * classpath (:class:`~pywbem.CIMClassName`): Class path with its attributes set as follows: * `classname`: Name of the class. * `namespace`: Name of the CIM namespace containing the class. * `host`: Host and optionally port of the WBEM server containing the CIM namespace, or `None` if the server did not return host information. * class (:class:`~pywbem.CIMClass`): The representation of the class.
def get_configs( config_filepath, local_filepath_override='', ): """go and fetch the global/local configs from file and load them with configparser Args: config_filepath (str): path to config local_filepath_override (str): secondary place to locate config file Returns: ConfigParser: global_config ConfigParser: local_config """ global_config = read_config(config_filepath) local_filepath = get_local_config_filepath(config_filepath, True) if local_filepath_override: local_filepath = local_filepath_override local_config = read_config(local_filepath) return global_config, local_config
go and fetch the global/local configs from file and load them with configparser Args: config_filepath (str): path to config local_filepath_override (str): secondary place to locate config file Returns: ConfigParser: global_config ConfigParser: local_config
def setup_logger(log_level, log_file=None): """setup root logger with ColoredFormatter.""" level = getattr(logging, log_level.upper(), None) if not level: color_print("Invalid log level: %s" % log_level, "RED") sys.exit(1) # hide traceback when log level is INFO/WARNING/ERROR/CRITICAL if level >= logging.INFO: sys.tracebacklimit = 0 formatter = ColoredFormatter( u"%(log_color)s%(bg_white)s%(levelname)-8s%(reset)s %(message)s", datefmt=None, reset=True, log_colors=log_colors_config ) if log_file: handler = logging.FileHandler(log_file, encoding="utf-8") else: handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(level)
setup root logger with ColoredFormatter.
def _set_property(self, name, value): """ Set property `name` to `value`, but only if it is part of the mapping returned from `worker_mapping` (ie - data transported to frontend). This method is used from the REST API DB, so it knows what to set and what not, to prevent users from setting internal values. Args: name (str): Name of the property to set. value (obj): Any python value. Raises: KeyError: If `name` can't be set. """ if name in worker_mapping().keys(): setattr(self, name, value) return raise KeyError("Can't set `%s`!" % name)
Set property `name` to `value`, but only if it is part of the mapping returned from `worker_mapping` (ie - data transported to frontend). This method is used from the REST API DB, so it knows what to set and what not, to prevent users from setting internal values. Args: name (str): Name of the property to set. value (obj): Any python value. Raises: KeyError: If `name` can't be set.
def convex_conj(self): """The convex conjugate functional of the group L1-norm.""" conj_exp = conj_exponent(self.pointwise_norm.exponent) return IndicatorGroupL1UnitBall(self.domain, exponent=conj_exp)
The convex conjugate functional of the group L1-norm.
def xywh_from_points(points): """ Constructs an dict representing a rectangle with keys x, y, w, h """ xys = [[int(p) for p in pair.split(',')] for pair in points.split(' ')] minx = sys.maxsize miny = sys.maxsize maxx = 0 maxy = 0 for xy in xys: if xy[0] < minx: minx = xy[0] if xy[0] > maxx: maxx = xy[0] if xy[1] < miny: miny = xy[1] if xy[1] > maxy: maxy = xy[1] return { 'x': minx, 'y': miny, 'w': maxx - minx, 'h': maxy - miny, }
Constructs an dict representing a rectangle with keys x, y, w, h
def set_password(self, password, user='', note=None): """Sets the password for the current user or passed-in user. As a side effect, installs the "password" package. @param user: username to set the password for. Defaults to '' (i.e. current user) @param password: password to set for the user @param note: See send() """ shutit = self.shutit shutit.handle_note(note) if isinstance(password, str): shutit_global.shutit_global_object.secret_words_set.add(password) self.install('passwd') if self.current_environment.install_type == 'apt': self.send(ShutItSendSpec(self, send='passwd ' + user, expect='Enter new', check_exit=False, ignore_background=True)) self.send(ShutItSendSpec(self, send=password, expect='Retype new', check_exit=False, echo=False, ignore_background=True )) self.send(ShutItSendSpec(self, send=password, expect=self.default_expect, echo=False, ignore_background=True, )) elif self.current_environment.install_type == 'yum': self.send(ShutItSendSpec(self, send='passwd ' + user, expect='ew password', check_exit=False, ignore_background=True)) self.send(ShutItSendSpec(self, send=password, expect='ew password', check_exit=False, echo=False, ignore_background=True)) self.send(ShutItSendSpec(self, send=password, expect=self.default_expect, echo=False, ignore_background=True)) else: self.send(ShutItSendSpec(self, send='passwd ' + user, expect='Enter new', check_exit=False, ignore_background=True)) self.send(ShutItSendSpec(self, send=password, expect='Retype new', check_exit=False, echo=False, ignore_background=True)) self.send(ShutItSendSpec(self, send=password, expect=self.default_expect, echo=False, ignore_background=True)) shutit.handle_note_after(note=note) return True
Sets the password for the current user or passed-in user. As a side effect, installs the "password" package. @param user: username to set the password for. Defaults to '' (i.e. current user) @param password: password to set for the user @param note: See send()
def geoframe(self, *args, **kwargs): """Return a Geo dataframe""" from geopandas import GeoDataFrame import geopandas as gpd from shapely.geometry.polygon import BaseGeometry from shapely.wkt import loads gdf = None try: gdf = self.resolved_url.geoframe(*args, **kwargs) except AttributeError: pass if gdf is None: try: gdf = self.resolved_url.geo_generator.geoframe(*args, **kwargs) except AttributeError: pass if gdf is None: try: gdf = self.row_generator.geoframe(*args, **kwargs) except AttributeError: pass if gdf is None: try: gdf = GeoDataFrame(self.dataframe(*args, **kwargs)) first = next(gdf.iterrows())[1]['geometry'] if isinstance(first, str): # We have a GeoDataframe, but the geometry column is still strings, so # it must be converted shapes = [loads(row['geometry']) for i, row in gdf.iterrows()] elif not isinstance(first, BaseGeometry): # If we are reading a metatab package, the geometry column's type should be # 'geometry' which will give the geometry values class type of # rowpipe.valuetype.geo.ShapeValue. However, there are other # types of objects that have a 'shape' property. shapes = [row['geometry'].shape for i, row in gdf.iterrows()] else: shapes = gdf['geometry'] gdf['geometry'] = gpd.GeoSeries(shapes) gdf.set_geometry('geometry') # Wild guess. This case should be most often for Metatab processed geo files, # which are all 4326 if gdf.crs is None: gdf.crs = {'init': 'epsg:4326'} except KeyError as e: raise ResourceError("Failed to create GeoDataFrame for resource '{}': No geometry column".format(self.name)) except (KeyError,TypeError) as e: raise ResourceError("Failed to create GeoDataFrame for resource '{}': {}".format(self.name, str(e))) assert gdf.crs is not None return gdf
Return a Geo dataframe
def ListFileEntries(self, base_path_specs, output_writer): """Lists file entries in the base path specification. Args: base_path_specs (list[dfvfs.PathSpec]): source path specification. output_writer (StdoutWriter): output writer. """ for base_path_spec in base_path_specs: file_system = resolver.Resolver.OpenFileSystem(base_path_spec) file_entry = resolver.Resolver.OpenFileEntry(base_path_spec) if file_entry is None: logging.warning( 'Unable to open base path specification:\n{0:s}'.format( base_path_spec.comparable)) return self._ListFileEntry(file_system, file_entry, '', output_writer)
Lists file entries in the base path specification. Args: base_path_specs (list[dfvfs.PathSpec]): source path specification. output_writer (StdoutWriter): output writer.