positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def main(): """Main entry point for script.""" parser = argparse.ArgumentParser( description='Auto-generate a RESTful API service ' 'from an existing database.' ) parser.add_argument( 'URI', help='Database URI in the format ' 'postgresql+psycopg2://user:password@host/database') parser.add_argument( '-d', '--debug', help='Turn on debug logging', action='store_true', default=False) parser.add_argument( '-p', '--port', help='Port for service to listen on', default=5000) parser.add_argument( '-l', '--local-only', help='Only provide service on localhost (will not be accessible' ' from other machines)', action='store_true', default=False) parser.add_argument( '-r', '--read-only', help='Make all database resources read-only (i.e. only the HTTP GET method is supported)', action='store_true', default=False) parser.add_argument( '-s', '--schema', help='Use this named schema instead of default', default=None) args = parser.parse_args() app = get_app(args.URI, read_only=args.read_only, schema=args.schema) if args.debug: app.config['DEBUG'] = True if args.local_only: host = '127.0.0.1' else: host = '0.0.0.0' app.config['SECRET_KEY'] = '42' app.run(host=host, port=int(args.port))
Main entry point for script.
def __parse_direct_mention(self, message_text): """ Finds a direct mention (a mention that is at the beginning) in message text and returns the user ID which was mentioned. If there is no direct mention, returns None """ matches = re.search(MENTION_REGEX, message_text) # the first group contains the username, the second group contains the remaining message return (matches.group(1), listify(matches.group(2).strip())) if matches else (None, None)
Finds a direct mention (a mention that is at the beginning) in message text and returns the user ID which was mentioned. If there is no direct mention, returns None
def create_activity(self, activity_name, desc=None, started_on=None, ended_on=None): """ Send POST to /activities creating a new activity with the specified name and desc. Raises DataServiceError on error. :param activity_name: str name of the activity :param desc: str description of the activity (optional) :param started_on: str datetime when the activity started (optional) :param ended_on: str datetime when the activity ended (optional) :return: requests.Response containing the successful result """ data = { "name": activity_name, "description": desc, "started_on": started_on, "ended_on": ended_on } return self._post("/activities", data)
Send POST to /activities creating a new activity with the specified name and desc. Raises DataServiceError on error. :param activity_name: str name of the activity :param desc: str description of the activity (optional) :param started_on: str datetime when the activity started (optional) :param ended_on: str datetime when the activity ended (optional) :return: requests.Response containing the successful result
def next_turn(self, *args): """Advance time by one turn, if it's not blocked. Block time by setting ``engine.universal['block'] = True``""" if self.tmp_block: return eng = self.app.engine dial = self.dialoglayout if eng.universal.get('block'): Logger.info("MainScreen: next_turn blocked, delete universal['block'] to unblock") return if dial.idx < len(dial.todo): Logger.info("MainScreen: not advancing time while there's a dialog") return self.tmp_block = True self.app.unbind( branch=self.app._push_time, turn=self.app._push_time, tick=self.app._push_time ) eng.next_turn(cb=self._update_from_next_turn)
Advance time by one turn, if it's not blocked. Block time by setting ``engine.universal['block'] = True``
def get_control(self, right=None, left=None): """ Returns joint velocities to control the robot after the target end effector positions and orientations are updated from arguments @left and @right. If no arguments are provided, joint velocities will be computed based on the previously recorded target. Args: left (dict): A dictionary to control the left end effector with these keys. dpos (numpy array): a 3 dimensional array corresponding to the desired change in x, y, and z left end effector position. rotation (numpy array): a rotation matrix of shape (3, 3) corresponding to the desired orientation of the left end effector. right (dict): A dictionary to control the left end effector with these keys. dpos (numpy array): a 3 dimensional array corresponding to the desired change in x, y, and z right end effector position. rotation (numpy array): a rotation matrix of shape (3, 3) corresponding to the desired orientation of the right end effector. Returns: velocities (numpy array): a flat array of joint velocity commands to apply to try and achieve the desired input control. """ # Sync joint positions for IK. self.sync_ik_robot(self.robot_jpos_getter()) # Compute new target joint positions if arguments are provided if (right is not None) and (left is not None): self.commanded_joint_positions = self.joint_positions_for_eef_command( right, left ) # P controller from joint positions (from IK) to velocities velocities = np.zeros(14) deltas = self._get_current_error( self.robot_jpos_getter(), self.commanded_joint_positions ) for i, delta in enumerate(deltas): velocities[i] = -2 * delta velocities = self.clip_joint_velocities(velocities) self.commanded_joint_velocities = velocities return velocities
Returns joint velocities to control the robot after the target end effector positions and orientations are updated from arguments @left and @right. If no arguments are provided, joint velocities will be computed based on the previously recorded target. Args: left (dict): A dictionary to control the left end effector with these keys. dpos (numpy array): a 3 dimensional array corresponding to the desired change in x, y, and z left end effector position. rotation (numpy array): a rotation matrix of shape (3, 3) corresponding to the desired orientation of the left end effector. right (dict): A dictionary to control the left end effector with these keys. dpos (numpy array): a 3 dimensional array corresponding to the desired change in x, y, and z right end effector position. rotation (numpy array): a rotation matrix of shape (3, 3) corresponding to the desired orientation of the right end effector. Returns: velocities (numpy array): a flat array of joint velocity commands to apply to try and achieve the desired input control.
def intersperse(lis, value): """Put value between each existing item in list. Parameters ---------- lis : list List to intersperse. value : object Value to insert. Returns ------- list interspersed list """ out = [value] * (len(lis) * 2 - 1) out[0::2] = lis return out
Put value between each existing item in list. Parameters ---------- lis : list List to intersperse. value : object Value to insert. Returns ------- list interspersed list
def start(cls, ev=None): """ Start the analysis. """ ViewController.log_view.add("Beginning AnalysisRunner request..") # reset all inputs ViewController.reset_bars() # read the urlbox url = ViewController.url.strip() # make sure, that `url` was filled if not url: ViewController.urlbox_error.show("URL musí být vyplněna.") return if is_issn(url): ViewController.url_progressbar.hide() ViewController.url = "" ViewController.issn = url AlephISSNReaderAdapter.start() return ViewController.urlbox_error.hide() # normalize the `url` if not (url.startswith("http://") or url.startswith("https://")): url = "http://" + url ViewController.url = url # store normalized url back to input make_request( url=join(settings.API_PATH, "analyze"), data={'url': url}, on_complete=cls.on_complete )
Start the analysis.
def _policy_loss( self, old_policy, policy, action, advantage, length): """Compute the policy loss composed of multiple components. 1. The policy gradient loss is importance sampled from the data-collecting policy at the beginning of training. 2. The second term is a KL penalty between the policy at the beginning of training and the current policy. 3. Additionally, if this KL already changed more than twice the target amount, we activate a strong penalty discouraging further divergence. Args: old_policy: Action distribution of the behavioral policy. policy: Sequences of distribution params of the current policy. action: Sequences of actions. advantage: Sequences of advantages. length: Batch of sequence lengths. Returns: Tuple of loss tensor and summary tensor. """ with tf.name_scope('policy_loss'): kl = tf.contrib.distributions.kl_divergence(old_policy, policy) # Infinite values in the KL, even for padding frames that we mask out, # cause NaN gradients since TensorFlow computes gradients with respect to # the whole input tensor. kl = tf.check_numerics(kl, 'kl') kl = tf.reduce_mean(self._mask(kl, length), 1) policy_gradient = tf.exp( policy.log_prob(action) - old_policy.log_prob(action)) surrogate_loss = -tf.reduce_mean(self._mask( policy_gradient * tf.stop_gradient(advantage), length), 1) surrogate_loss = tf.check_numerics(surrogate_loss, 'surrogate_loss') kl_penalty = self._penalty * kl cutoff_threshold = self._config.kl_target * self._config.kl_cutoff_factor cutoff_count = tf.reduce_sum( tf.cast(kl > cutoff_threshold, tf.int32)) with tf.control_dependencies([tf.cond( cutoff_count > 0, lambda: tf.Print(0, [cutoff_count], 'kl cutoff! '), int)]): kl_cutoff = ( self._config.kl_cutoff_coef * tf.cast(kl > cutoff_threshold, tf.float32) * (kl - cutoff_threshold) ** 2) policy_loss = surrogate_loss + kl_penalty + kl_cutoff entropy = tf.reduce_mean(policy.entropy(), axis=1) if self._config.entropy_regularization: policy_loss -= self._config.entropy_regularization * entropy summary = tf.summary.merge([ tf.summary.histogram('entropy', entropy), tf.summary.histogram('kl', kl), tf.summary.histogram('surrogate_loss', surrogate_loss), tf.summary.histogram('kl_penalty', kl_penalty), tf.summary.histogram('kl_cutoff', kl_cutoff), tf.summary.histogram('kl_penalty_combined', kl_penalty + kl_cutoff), tf.summary.histogram('policy_loss', policy_loss), tf.summary.scalar('avg_surr_loss', tf.reduce_mean(surrogate_loss)), tf.summary.scalar('avg_kl_penalty', tf.reduce_mean(kl_penalty)), tf.summary.scalar('avg_policy_loss', tf.reduce_mean(policy_loss))]) policy_loss = tf.reduce_mean(policy_loss, 0) return tf.check_numerics(policy_loss, 'policy_loss'), summary
Compute the policy loss composed of multiple components. 1. The policy gradient loss is importance sampled from the data-collecting policy at the beginning of training. 2. The second term is a KL penalty between the policy at the beginning of training and the current policy. 3. Additionally, if this KL already changed more than twice the target amount, we activate a strong penalty discouraging further divergence. Args: old_policy: Action distribution of the behavioral policy. policy: Sequences of distribution params of the current policy. action: Sequences of actions. advantage: Sequences of advantages. length: Batch of sequence lengths. Returns: Tuple of loss tensor and summary tensor.
def peekuntil(self, token, size=0): """ Peeks for token into the FIFO. Performs the same function as readuntil() without removing data from the FIFO. See readuntil() for further information. """ self.__append() i = self.buf.find(token, self.pos) if i < 0: index = max(len(token) - 1, size) newpos = max(len(self.buf) - index, self.pos) return False, self.buf[self.pos:newpos] newpos = i + len(token) return True, self.buf[self.pos:newpos]
Peeks for token into the FIFO. Performs the same function as readuntil() without removing data from the FIFO. See readuntil() for further information.
def _create(self): """Creates a new and empty database.""" from .tools import makedirs_safe # create directory for sql database makedirs_safe(os.path.dirname(self._database)) # create all the tables Base.metadata.create_all(self._engine) logger.debug("Created new empty database '%s'" % self._database)
Creates a new and empty database.
def kwargs(self): """Returns a dict of the kwargs for this Struct which were not interpreted by the baseclass. This excludes fields like `extends`, `merges`, and `abstract`, which are consumed by SerializableFactory.create and Validatable.validate. """ return {k: v for k, v in self._kwargs.items() if k not in self._INTERNAL_FIELDS}
Returns a dict of the kwargs for this Struct which were not interpreted by the baseclass. This excludes fields like `extends`, `merges`, and `abstract`, which are consumed by SerializableFactory.create and Validatable.validate.
def _propagate(self, path, val): """ Propagate the value up to the root node. """ if val == '_DELETE': if path in self.data: del self.data[path] else: items = [(key, v) for key, v in self.data.items() if not all(k==p for k, p in zip(key, path))] self.data = OrderedDict(items) else: self.data[path] = val if self.parent is not None: self.parent._propagate((self.identifier,)+path, val)
Propagate the value up to the root node.
def _set_ipv6_address(self, v, load=False): """ Setter method for ipv6_address, mapped from YANG variable /interface/fortygigabitethernet/ipv6/ipv6_config/address/ipv6_address (list) If this variable is read-only (config: false) in the source YANG file, then _set_ipv6_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ipv6_address() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("address",ipv6_address.ipv6_address, yang_name="ipv6-address", rest_name="ipv6-address", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='address', extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}), is_container='list', yang_name="ipv6-address", rest_name="ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ipv6_address must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("address",ipv6_address.ipv6_address, yang_name="ipv6-address", rest_name="ipv6-address", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='address', extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}), is_container='list', yang_name="ipv6-address", rest_name="ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='list', is_config=True)""", }) self.__ipv6_address = t if hasattr(self, '_set'): self._set()
Setter method for ipv6_address, mapped from YANG variable /interface/fortygigabitethernet/ipv6/ipv6_config/address/ipv6_address (list) If this variable is read-only (config: false) in the source YANG file, then _set_ipv6_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ipv6_address() directly.
def setObsoletedBy(self, pid, obsoletedByPid, serialVersion, vendorSpecific=None): """See Also: setObsoletedByResponse() Args: pid: obsoletedByPid: serialVersion: vendorSpecific: Returns: """ response = self.setObsoletedByResponse( pid, obsoletedByPid, serialVersion, vendorSpecific ) return self._read_boolean_response(response)
See Also: setObsoletedByResponse() Args: pid: obsoletedByPid: serialVersion: vendorSpecific: Returns:
def entrance_beveled(Di, l, angle, method='Rennels'): r'''Returns loss coefficient for a beveled or chamfered entrance to a pipe flush with the wall of a reservoir. This calculation has two methods available. The 'Rennels' and 'Idelchik' methods have similar trends, but the 'Rennels' formulation is centered around a straight loss coefficient of 0.57, so it is normally at least 0.07 higher. The Rennels [1]_ formulas are: .. math:: K = 0.0696\left(1 - C_b\frac{l}{d}\right)\lambda^2 + (\lambda-1)^2 .. math:: \lambda = 1 + 0.622\left[1-1.5C_b\left(\frac{l}{d} \right)^{\frac{1-(l/d)^{1/4}}{2}}\right] .. math:: C_b = \left(1 - \frac{\theta}{90}\right)\left(\frac{\theta}{90} \right)^{\frac{1}{1+l/d}} .. figure:: fittings/flush_mounted_beveled_entrance.png :scale: 30 % :alt: Beveled entrace mounted straight; after [1]_ Parameters ---------- Di : float Inside diameter of pipe, [m] l : float Length of bevel measured parallel to the pipe length, [m] angle : float Angle of bevel with respect to the pipe length, [degrees] method : str, optional One of 'Rennels', or 'Idelchik', [-] Returns ------- K : float Loss coefficient [-] Notes ----- A cheap way of getting a lower pressure drop. Little credible data is available. The table of data in [2]_ uses the angle for both bevels, so it runs from 0 to 180 degrees; this function follows the convention in [1]_ which uses only one angle, with the angle varying from 0 to 90 degrees. .. plot:: plots/entrance_beveled.py Examples -------- >>> entrance_beveled(Di=0.1, l=0.003, angle=45) 0.45086864221916984 >>> entrance_beveled(Di=0.1, l=0.003, angle=45, method='Idelchik') 0.3995000000000001 References ---------- .. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012. .. [2] Idel’chik, I. E. Handbook of Hydraulic Resistance: Coefficients of Local Resistance and of Friction (Spravochnik Po Gidravlicheskim Soprotivleniyam, Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya Treniya). National technical information Service, 1966. ''' if method is None: method = 'Rennels' if method == 'Rennels': Cb = (1-angle/90.)*(angle/90.)**(1./(1 + l/Di )) lbd = 1 + 0.622*(1 - 1.5*Cb*(l/Di)**((1 - (l/Di)**0.25)/2.)) return 0.0696*(1 - Cb*l/Di)*lbd**2 + (lbd - 1.)**2 elif method == 'Idelchik': return float(entrance_beveled_Idelchik_obj(angle*2.0, l/Di)) else: raise ValueError('Specified method not recognized; methods are %s' %(entrance_beveled_methods))
r'''Returns loss coefficient for a beveled or chamfered entrance to a pipe flush with the wall of a reservoir. This calculation has two methods available. The 'Rennels' and 'Idelchik' methods have similar trends, but the 'Rennels' formulation is centered around a straight loss coefficient of 0.57, so it is normally at least 0.07 higher. The Rennels [1]_ formulas are: .. math:: K = 0.0696\left(1 - C_b\frac{l}{d}\right)\lambda^2 + (\lambda-1)^2 .. math:: \lambda = 1 + 0.622\left[1-1.5C_b\left(\frac{l}{d} \right)^{\frac{1-(l/d)^{1/4}}{2}}\right] .. math:: C_b = \left(1 - \frac{\theta}{90}\right)\left(\frac{\theta}{90} \right)^{\frac{1}{1+l/d}} .. figure:: fittings/flush_mounted_beveled_entrance.png :scale: 30 % :alt: Beveled entrace mounted straight; after [1]_ Parameters ---------- Di : float Inside diameter of pipe, [m] l : float Length of bevel measured parallel to the pipe length, [m] angle : float Angle of bevel with respect to the pipe length, [degrees] method : str, optional One of 'Rennels', or 'Idelchik', [-] Returns ------- K : float Loss coefficient [-] Notes ----- A cheap way of getting a lower pressure drop. Little credible data is available. The table of data in [2]_ uses the angle for both bevels, so it runs from 0 to 180 degrees; this function follows the convention in [1]_ which uses only one angle, with the angle varying from 0 to 90 degrees. .. plot:: plots/entrance_beveled.py Examples -------- >>> entrance_beveled(Di=0.1, l=0.003, angle=45) 0.45086864221916984 >>> entrance_beveled(Di=0.1, l=0.003, angle=45, method='Idelchik') 0.3995000000000001 References ---------- .. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012. .. [2] Idel’chik, I. E. Handbook of Hydraulic Resistance: Coefficients of Local Resistance and of Friction (Spravochnik Po Gidravlicheskim Soprotivleniyam, Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya Treniya). National technical information Service, 1966.
def get_waveform_end_frequency(template=None, **kwargs): """Return the stop frequency of a template """ input_params = props(template,**kwargs) approximant = kwargs['approximant'] if approximant in _filter_ends: return _filter_ends[approximant](**input_params) else: return None
Return the stop frequency of a template
def logstream_policy(): """Policy needed for logspout -> kinesis log streaming.""" p = Policy( Statement=[ Statement( Effect=Allow, Resource=["*"], Action=[ kinesis.CreateStream, kinesis.DescribeStream, Action(kinesis.prefix, "AddTagsToStream"), Action(kinesis.prefix, "PutRecords") ])]) return p
Policy needed for logspout -> kinesis log streaming.
def qteAbort(self, msgObj): """ Restore the original cursor position because the user hit abort. """ self.qteWidget.setCursorPosition(*self.cursorPosOrig) self.qteMain.qtesigAbort.disconnect(self.qteAbort)
Restore the original cursor position because the user hit abort.
def separate(polylines, f_mx_dist=2, mn_group_len=4): """ split polylines wherever crinkles are found """ s = [] for n in range(len(polylines) - 1, -1, -1): c = polylines[n] separated = False start = 0 for m in range(mn_group_len, len(c) - 1): if m - start < mn_group_len: continue m += 1 group = c[m - mn_group_len:m] x, y = group[:, 0], group[:, 1] asc, offs, _, _, _ = linregress(x, y) yfit = asc * x + offs # check whether next point would fit in: p1 = c[m] l = (x[0], yfit[0], p1[-1], asc * p1[-1] + offs) std = np.mean([line.distance(l, g) for g in group]) dist = line.distance(l, p1) if dist > 2 and dist > f_mx_dist * std: separated = True s.append(c[start:m - 1]) start = m - 1 if separated: if len(c) - start >= 2: s.append(c[start:]) polylines.pop(n) polylines.extend(s) return polylines
split polylines wherever crinkles are found
def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return
Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address
def get_source_files(target, build_context) -> list: """Return list of source files for `target`.""" all_sources = list(target.props.sources) for proto_dep_name in target.props.protos: proto_dep = build_context.targets[proto_dep_name] all_sources.extend(proto_dep.artifacts.get(AT.gen_cc).keys()) return all_sources
Return list of source files for `target`.
def registerWebAdaptor(self, webAdaptorURL, machineName, machineIP, isAdminEnabled, description, httpPort, httpsPort): """ You can use this operation to register the ArcGIS Web Adaptor from your ArcGIS Server. By registering the Web Adaptor with the server, you are telling the server to trust requests (including security credentials) that have been submitted through this Web Adaptor. Inputs: webAdaptorURL - The URL of the web adaptor through which ArcGIS resources will be accessed. machineName - The machine name on which the web adaptor is installed. machineIP - The local IP address of the machine on which the web adaptor is installed. isAdminEnabled - A boolean flag to indicate if administrative access is allowed through the web adaptor. The default is false. description - An optional description for the web adaptor. httpPort - An optional parameter to indicate the HTTP port of the web adaptor. If this parameter is not provided, it is derived from the URL. httpsPort - An optional parameter to indicate the HTTPS port of the web adaptor. If this parameter is not provided, it is derived from the URL. """ url = self._url + "/webadaptors/register" params = { "f" : "json", "webAdaptorURL" : webAdaptorURL, "machineName" : machineName, "machineIP" : machineIP, "isAdminEnabled" : isAdminEnabled, "description" : description, "httpPort" : httpPort, "httpsPort" : httpsPort } return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
You can use this operation to register the ArcGIS Web Adaptor from your ArcGIS Server. By registering the Web Adaptor with the server, you are telling the server to trust requests (including security credentials) that have been submitted through this Web Adaptor. Inputs: webAdaptorURL - The URL of the web adaptor through which ArcGIS resources will be accessed. machineName - The machine name on which the web adaptor is installed. machineIP - The local IP address of the machine on which the web adaptor is installed. isAdminEnabled - A boolean flag to indicate if administrative access is allowed through the web adaptor. The default is false. description - An optional description for the web adaptor. httpPort - An optional parameter to indicate the HTTP port of the web adaptor. If this parameter is not provided, it is derived from the URL. httpsPort - An optional parameter to indicate the HTTPS port of the web adaptor. If this parameter is not provided, it is derived from the URL.
def active_link(context, viewnames, css_class=None, strict=None, *args, **kwargs): """ Renders the given CSS class if the request path matches the path of the view. :param context: The context where the tag was called. Used to access the request object. :param viewnames: The name of the view or views separated by || (include namespaces if any). :param css_class: The CSS class to render. :param strict: If True, the tag will perform an exact match with the request path. :return: """ if css_class is None: css_class = getattr(settings, 'ACTIVE_LINK_CSS_CLASS', 'active') if strict is None: strict = getattr(settings, 'ACTIVE_LINK_STRICT', False) request = context.get('request') if request is None: # Can't work without the request object. return '' active = False views = viewnames.split('||') for viewname in views: path = reverse(viewname.strip(), args=args, kwargs=kwargs) request_path = escape_uri_path(request.path) if strict: active = request_path == path else: active = request_path.find(path) == 0 if active: break if active: return css_class return ''
Renders the given CSS class if the request path matches the path of the view. :param context: The context where the tag was called. Used to access the request object. :param viewnames: The name of the view or views separated by || (include namespaces if any). :param css_class: The CSS class to render. :param strict: If True, the tag will perform an exact match with the request path. :return:
def files_rm(self, path, recursive=False, **kwargs): """Removes a file from the MFS. .. code-block:: python >>> c.files_rm("/bla/file") b'' Parameters ---------- path : str Filepath within the MFS recursive : bool Recursively remove directories? """ kwargs.setdefault("opts", {"recursive": recursive}) args = (path,) return self._client.request('/files/rm', args, **kwargs)
Removes a file from the MFS. .. code-block:: python >>> c.files_rm("/bla/file") b'' Parameters ---------- path : str Filepath within the MFS recursive : bool Recursively remove directories?
def next(self): """ (internal) returns the next result from the ``itertools.tee`` object for the wrapped ``Piper`` instance or re-raises an ``Exception``. """ # do not acquire lock if NuMap is not finished. if self.finished: raise StopIteration # get per-tee lock self.piper.tee_locks[self.i].acquire() # get result or exception exception = True try: result = self.piper.tees[self.i].next() exception = False except StopIteration, result: self.finished = True except Exception, result: pass # release per-tee lock either self or next if self.s == self.stride or self.finished: self.s = 1 self.piper.tee_locks[(self.i + 1) % len(self.piper.tees)].release() else: self.s += 1 self.piper.tee_locks[self.i].release() if exception: raise result else: return result
(internal) returns the next result from the ``itertools.tee`` object for the wrapped ``Piper`` instance or re-raises an ``Exception``.
def _get_src(tree_base, source, saltenv='base'): ''' Get the named sources and place them into the tree_base ''' parsed = _urlparse(source) sbase = os.path.basename(source) dest = os.path.join(tree_base, sbase) if parsed.scheme: __salt__['cp.get_url'](source, dest, saltenv=saltenv) else: shutil.copy(source, dest)
Get the named sources and place them into the tree_base
def initialize_logging(args): """Configure the root logger with some sensible defaults.""" log_handler = logging.StreamHandler() log_formatter = logging.Formatter( "%(levelname)s %(asctime)s %(name)s:%(lineno)04d - %(message)s") log_handler.setFormatter(log_formatter) root_logger = logging.getLogger() root_logger.addHandler(log_handler) root_logger.setLevel(getattr(logging, args.loglevel))
Configure the root logger with some sensible defaults.
def _add_partial_key_entity_pb(self): """Adds a new mutation for an entity with a partial key. :rtype: :class:`.entity_pb2.Entity` :returns: The newly created entity protobuf that will be updated and sent with a commit. """ new_mutation = _datastore_pb2.Mutation() self._mutations.append(new_mutation) return new_mutation.insert
Adds a new mutation for an entity with a partial key. :rtype: :class:`.entity_pb2.Entity` :returns: The newly created entity protobuf that will be updated and sent with a commit.
def parse_map_Ks(self): """Specular color map""" Kd = os.path.join(self.dir, " ".join(self.values[1:])) self.this_material.set_texture_specular_color(Kd)
Specular color map
def grab_selenium_driver(driver_name=None): """ pip install selenium -U """ from selenium import webdriver if driver_name is None: driver_name = 'firefox' if driver_name.lower() == 'chrome': grab_selenium_chromedriver() return webdriver.Chrome() elif driver_name.lower() == 'firefox': # grab_selenium_chromedriver() return webdriver.Firefox() else: raise AssertionError('unknown name = %r' % (driver_name,))
pip install selenium -U
def _set_security_profile(self, v, load=False): """ Setter method for security_profile, mapped from YANG variable /port_profile/security_profile (container) If this variable is read-only (config: false) in the source YANG file, then _set_security_profile is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_security_profile() directly. YANG Description: The Security profile. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=security_profile.security_profile, is_container='container', presence=True, yang_name="security-profile", rest_name="security-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-add-mode': None, u'cli-full-no': None, u'info': u'Security profile', u'callpoint': u'security-profile-config'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """security_profile must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=security_profile.security_profile, is_container='container', presence=True, yang_name="security-profile", rest_name="security-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-add-mode': None, u'cli-full-no': None, u'info': u'Security profile', u'callpoint': u'security-profile-config'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='container', is_config=True)""", }) self.__security_profile = t if hasattr(self, '_set'): self._set()
Setter method for security_profile, mapped from YANG variable /port_profile/security_profile (container) If this variable is read-only (config: false) in the source YANG file, then _set_security_profile is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_security_profile() directly. YANG Description: The Security profile.
def iterkeys(self, key_type=None, return_all_keys=False): """ Returns an iterator over the dictionary's keys. @param key_type if specified, iterator for a dictionary of this type will be used. Otherwise (if not specified) tuples containing all (multiple) keys for this dictionary will be generated. @param return_all_keys if set to True - tuple of keys is retuned instead of a key of this type.""" if(key_type is not None): the_key = str(key_type) if the_key in self.__dict__: for key in self.__dict__[the_key].keys(): if return_all_keys: yield self.__dict__[the_key][key] else: yield key else: for keys in self.items_dict.keys(): yield keys
Returns an iterator over the dictionary's keys. @param key_type if specified, iterator for a dictionary of this type will be used. Otherwise (if not specified) tuples containing all (multiple) keys for this dictionary will be generated. @param return_all_keys if set to True - tuple of keys is retuned instead of a key of this type.
def encode(self, data: mx.sym.Symbol, data_length: Optional[mx.sym.Symbol], seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]: """ Encodes data given sequence lengths of individual examples and maximum sequence length. :param data: Input data. :param data_length: Vector with sequence lengths. :param seq_len: Maximum sequence length. :return: Encoded versions of input data (data, data_length, seq_len). """ factor_embeddings = [] # type: List[mx.sym.Symbol] if self.is_source: data, *data_factors = mx.sym.split(data=data, num_outputs=self.config.num_factors, axis=2, squeeze_axis=True, name=self.prefix + "factor_split") if self.config.factor_configs is not None: for i, (factor_data, factor_config, factor_weight) in enumerate(zip(data_factors, self.config.factor_configs, self.embed_factor_weights)): factor_embeddings.append(mx.sym.Embedding(data=factor_data, input_dim=factor_config.vocab_size, weight=factor_weight, output_dim=factor_config.num_embed, name=self.prefix + "factor%d_embed" % i)) embedding = mx.sym.Embedding(data=data, input_dim=self.config.vocab_size, weight=self.embed_weight, output_dim=self.config.num_embed, name=self.prefix + "embed") if self.config.factor_configs is not None: if self.config.source_factors_combine == C.SOURCE_FACTORS_COMBINE_CONCAT: embedding = mx.sym.concat(embedding, *factor_embeddings, dim=2, name=self.prefix + "embed_plus_factors") else: embedding = mx.sym.add_n(embedding, *factor_embeddings, name=self.prefix + "embed_plus_factors") if self.config.dropout > 0: embedding = mx.sym.Dropout(data=embedding, p=self.config.dropout, name="source_embed_dropout") return embedding, data_length, seq_len
Encodes data given sequence lengths of individual examples and maximum sequence length. :param data: Input data. :param data_length: Vector with sequence lengths. :param seq_len: Maximum sequence length. :return: Encoded versions of input data (data, data_length, seq_len).
def errors(self): """ Get the errors of the tag. If invalid then the list will consist of errors containing each a code and message explaining the error. Each error also refers to the respective (sub)tag(s). :return: list of errors of the tag. If the tag is valid, it returns an empty list. """ errors = [] data = self.data error = self.error # Check if the tag is grandfathered and if the grandfathered tag is deprecated (e.g. no-nyn). if 'record' in data: if 'Deprecated' in data['record']: errors.append(error(self.ERR_DEPRECATED)) # Only check every subtag if the tag is not explicitly listed as grandfathered or redundant. return errors # Check that all subtag codes are meaningful. codes = data['tag'].split('-') for i, code in enumerate(codes): # Ignore anything after a singleton (break) if len(code) < 2: # Check that each private-use subtag is within the maximum allowed length. for code in codes[i + 1:]: if len(code) > 8: errors.append(error(self.ERR_TOO_LONG, code)) break if code not in index: errors.append(error(self.ERR_UNKNOWN, code)) # Continue to the next item. continue # Check that first tag is a language tag. subtags = self.subtags if not len(subtags): errors.append(error(self.ERR_NO_LANGUAGE)) return errors elif subtags[0].type != 'language': errors.append(error(self.ERR_NO_LANGUAGE)) return errors # Check for more than one of some types and for deprecation. found = dict(language=[], extlang=[], variant=[], script=[], region=[]) for subtag in subtags: type = subtag.type if subtag.deprecated: errors.append(error(self.ERR_SUBTAG_DEPRECATED, subtag)) if type in found: found[type].append(subtag) if 'language' == type: if len(found['language']) > 1: errors.append(error(self.ERR_EXTRA_LANGUAGE, subtag)) elif 'region' == type: if len(found['region']) > 1: errors.append(error(self.ERR_EXTRA_REGION, subtag)) elif 'extlang' == type: if len(found['extlang']) > 1: errors.append(error(self.ERR_EXTRA_EXTLANG, subtag)) elif 'script' == type: if len(found['script']) > 1: errors.append(error(self.ERR_EXTRA_SCRIPT, subtag)) # Check if script is same as language suppress-script. else: script = subtags[0].script if script: if script.format == subtag.format: errors.append(error(self.ERR_SUPPRESS_SCRIPT, subtag)) elif 'variant' == type: if len(found['variant']) > 1: for variant in found['variant']: if variant.format == subtag.format: errors.append(error(self.ERR_DUPLICATE_VARIANT, subtag)) break # Check for correct order. if len(subtags) > 1: priority = dict(language=4, extlang=5, script=6, region=7, variant=8) for i, subtag in enumerate(subtags[0:len(subtags)-1]): next = subtags[i + 1] if next: if priority[subtag.type] > priority[next.type]: errors.append(error(self.ERR_WRONG_ORDER, [subtag, next])) return errors
Get the errors of the tag. If invalid then the list will consist of errors containing each a code and message explaining the error. Each error also refers to the respective (sub)tag(s). :return: list of errors of the tag. If the tag is valid, it returns an empty list.
def createHeadingPointer(self): '''Creates the pointer for the current heading.''' self.headingTri = patches.RegularPolygon((0.0,0.80),3,0.05,color='k',zorder=4) self.axes.add_patch(self.headingTri) self.headingText = self.axes.text(0.0,0.675,'0',color='k',size=self.fontSize,horizontalalignment='center',verticalalignment='center',zorder=4)
Creates the pointer for the current heading.
def register_multi_flags_validator(flag_names, multi_flags_checker, message='Flags validation failed', flag_values=FLAGS): """Adds a constraint to multiple flags. The constraint is validated when flags are initially parsed, and after each change of the corresponding flag's value. Args: flag_names: [str], a list of the flag names to be checked. multi_flags_checker: callable, a function to validate the flag. input - dictionary, with keys() being flag_names, and value for each key being the value of the corresponding flag (string, boolean, etc). output - Boolean. Must return True if validator constraint is satisfied. If constraint is not satisfied, it should either return False or raise gflags.ValidationError. message: Error text to be shown to the user if checker returns False. If checker raises gflags.ValidationError, message from the raised error will be shown. flag_values: An optional FlagValues instance to validate against. Raises: AttributeError: If a flag is not registered as a valid flag name. """ v = gflags_validators.MultiFlagsValidator( flag_names, multi_flags_checker, message) _add_validator(flag_values, v)
Adds a constraint to multiple flags. The constraint is validated when flags are initially parsed, and after each change of the corresponding flag's value. Args: flag_names: [str], a list of the flag names to be checked. multi_flags_checker: callable, a function to validate the flag. input - dictionary, with keys() being flag_names, and value for each key being the value of the corresponding flag (string, boolean, etc). output - Boolean. Must return True if validator constraint is satisfied. If constraint is not satisfied, it should either return False or raise gflags.ValidationError. message: Error text to be shown to the user if checker returns False. If checker raises gflags.ValidationError, message from the raised error will be shown. flag_values: An optional FlagValues instance to validate against. Raises: AttributeError: If a flag is not registered as a valid flag name.
def hid_device_path_exists(device_path, guid = None): """Test if required device_path is still valid (HID device connected to host) """ # expecing HID devices if not guid: guid = winapi.GetHidGuid() info_data = winapi.SP_DEVINFO_DATA() info_data.cb_size = sizeof(winapi.SP_DEVINFO_DATA) with winapi.DeviceInterfaceSetInfo(guid) as h_info: for interface_data in winapi.enum_device_interfaces(h_info, guid): test_device_path = winapi.get_device_path(h_info, interface_data, byref(info_data)) if test_device_path == device_path: return True # Not any device now with that path return False
Test if required device_path is still valid (HID device connected to host)
def pick_cert_for_twisted(netloc, possible): """ Pick the right client key/certificate to use for the given server and return it in the form Twisted wants. :param NetLocation netloc: The location of the server to consider. :param dict[TLSCredentials] possible: The available credentials from which to choose. :return: A two-tuple. If no credentials were found, the elements are ``None`` and ``[]``. Otherwise, the first element is a ``twisted.internet.ssl.PrivateCertificate`` instance representing the client certificate to use and the second element is a ``tuple`` of ``twisted.internet.ssl.Certificate`` instances representing the rest of the chain necessary to validate the client certificate. """ try: creds = possible[netloc] except KeyError: return (None, ()) key = ssl.KeyPair.load(creds.key.as_bytes(), FILETYPE_PEM) return ( ssl.PrivateCertificate.load( creds.chain.certificates[0].as_bytes(), key, FILETYPE_PEM, ), tuple( ssl.Certificate.load(cert.as_bytes(), FILETYPE_PEM) for cert in creds.chain.certificates[1:] ), )
Pick the right client key/certificate to use for the given server and return it in the form Twisted wants. :param NetLocation netloc: The location of the server to consider. :param dict[TLSCredentials] possible: The available credentials from which to choose. :return: A two-tuple. If no credentials were found, the elements are ``None`` and ``[]``. Otherwise, the first element is a ``twisted.internet.ssl.PrivateCertificate`` instance representing the client certificate to use and the second element is a ``tuple`` of ``twisted.internet.ssl.Certificate`` instances representing the rest of the chain necessary to validate the client certificate.
def get_data(): """Retrieve static data from the game.""" run_config = run_configs.get() with run_config.start(want_rgb=False) as controller: m = maps.get("Sequencer") # Arbitrary ladder map. create = sc_pb.RequestCreateGame(local_map=sc_pb.LocalMap( map_path=m.path, map_data=m.data(run_config))) create.player_setup.add(type=sc_pb.Participant) create.player_setup.add(type=sc_pb.Computer, race=sc_common.Random, difficulty=sc_pb.VeryEasy) join = sc_pb.RequestJoinGame(race=sc_common.Random, options=sc_pb.InterfaceOptions(raw=True)) controller.create_game(create) controller.join_game(join) return controller.data()
Retrieve static data from the game.
def identity_to_string(identity_dict): """Dump Identity dictionary into its string representation.""" result = [] if identity_dict.get('proto'): result.append(identity_dict['proto'] + '://') if identity_dict.get('user'): result.append(identity_dict['user'] + '@') result.append(identity_dict['host']) if identity_dict.get('port'): result.append(':' + identity_dict['port']) if identity_dict.get('path'): result.append(identity_dict['path']) log.debug('identity parts: %s', result) return ''.join(result)
Dump Identity dictionary into its string representation.
def _state_stopped(self): """ The service is not running. This is the initial state, and the state after L{stopService} was called. To get out of this state, call L{startService}. If there is a current connection, we disconnect. """ if self._reconnectDelayedCall: self._reconnectDelayedCall.cancel() self._reconnectDelayedCall = None self.loseConnection()
The service is not running. This is the initial state, and the state after L{stopService} was called. To get out of this state, call L{startService}. If there is a current connection, we disconnect.
def log(args, number=None, oneline=False, quiet=False): """Run a "git log ..." command, and return stdout args is anything which can be added after a normal "git log ..." it can be blank number, if true-ish, will be added as a "-n" option oneline, if true-ish, will add the "--oneline" option """ options = ' '.join([ number and str('-n %s' % number) or '', oneline and '--oneline' or '' ]) try: return run('log %s %s' % (options, args), quiet=quiet) except UnknownRevision: return ''
Run a "git log ..." command, and return stdout args is anything which can be added after a normal "git log ..." it can be blank number, if true-ish, will be added as a "-n" option oneline, if true-ish, will add the "--oneline" option
def update(self, update_cameras=False, update_base_station=False): """Refresh object.""" self._authenticate() # update attributes in all cameras to avoid duped queries if update_cameras: url = DEVICES_ENDPOINT response = self.query(url) if not response or not isinstance(response, dict): return for camera in self.cameras: for dev_info in response.get('data'): if dev_info.get('deviceName') == camera.name: _LOGGER.debug("Refreshing %s attributes", camera.name) camera.attrs = dev_info # preload cached videos # the user is still able to force a new query by # calling the Arlo.video() camera.make_video_cache() # force update base_station if update_base_station: for base in self.base_stations: base.update()
Refresh object.
def end_of_month(val): """ Return a new datetime.datetime object with values that represent a end of a month. :param val: Date to ... :type val: datetime.datetime | datetime.date :rtype: datetime.datetime """ if type(val) == date: val = datetime.fromordinal(val.toordinal()) if val.month == 12: return start_of_month(val).replace(year=val.year + 1, month=1) \ - timedelta(microseconds=1) else: return start_of_month(val).replace(month=val.month + 1) \ - timedelta(microseconds=1)
Return a new datetime.datetime object with values that represent a end of a month. :param val: Date to ... :type val: datetime.datetime | datetime.date :rtype: datetime.datetime
def set(self, mode, disable): """ create logger object, enable or disable logging """ global logger try: if logger: if disable: logger.disabled = True else: if mode in ('STREAM', 'FILE'): logger = logd.getLogger(mode, __version__) except Exception as e: logger.exception( '%s: Problem incurred during logging setup' % inspect.stack()[0][3] ) return False return True
create logger object, enable or disable logging
def _init_metadata(self): """stub""" self._files_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'files'), 'element_label': 'Files', 'instructions': 'enter a file id with optional label', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_object_values': [{}], 'syntax': 'OBJECT', 'object_set': [] } self._file_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'file'), 'element_label': 'File', 'instructions': 'accepts an Asset Id', 'required': True, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': [] } self._label_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'label'), 'element_label': 'Label', 'instructions': 'enter a string', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_string_values': [str(ObjectId())], 'syntax': 'STRING', 'minimum_string_length': 0, 'maximum_string_length': 128, 'string_set': [] }
stub
def computePCsPlink(plink_path,k,out_dir,bfile,ffile): """ computing the covariance matrix via plink """ print("Using plink to compute principal components") cmd = '%s --bfile %s --pca %d '%(plink_path,bfile,k) cmd+= '--out %s'%(os.path.join(out_dir,'plink')) subprocess.call(cmd,shell=True) plink_fn = os.path.join(out_dir, 'plink.eigenvec') M = sp.loadtxt(plink_fn,dtype=str) U = sp.array(M[:,2:],dtype=float) U-= U.mean(0) U/= U.std(0) sp.savetxt(ffile,U)
computing the covariance matrix via plink
def hpforest(self, data: ['SASdata', str] = None, freq: str = None, id: str = None, input: [str, list, dict] = None, save: str = None, score: [str, bool, 'SASdata'] = True, target: [str, list, dict] = None, procopts: str = None, stmtpassthrough: str = None, **kwargs: dict) -> 'SASresults': """ Python method to call the HPFOREST procedure Documentation link: https://support.sas.com/documentation/solutions/miner/emhp/14.1/emhpprcref.pdf :param data: SASdata object or string. This parameter is required. :parm freq: The freq variable can only be a string type. :parm id: The id variable can only be a string type. :parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable. This parameter is required :parm save: The save variable can only be a string type. :parm score: The score variable can only be a string type. :parm target: The target variable can be a string, list or dict type. It refers to the dependent, y, or label variable. This parameter is required :parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type. :parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type. :return: SAS Result Object """
Python method to call the HPFOREST procedure Documentation link: https://support.sas.com/documentation/solutions/miner/emhp/14.1/emhpprcref.pdf :param data: SASdata object or string. This parameter is required. :parm freq: The freq variable can only be a string type. :parm id: The id variable can only be a string type. :parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable. This parameter is required :parm save: The save variable can only be a string type. :parm score: The score variable can only be a string type. :parm target: The target variable can be a string, list or dict type. It refers to the dependent, y, or label variable. This parameter is required :parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type. :parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type. :return: SAS Result Object
def _thumbnail_s3(self, original_filename, thumb_filename, thumb_size, thumb_url, bucket_name, crop=None, bg=None, quality=85): """Finds or creates a thumbnail for the specified image on Amazon S3.""" scheme = self.app.config.get('THUMBNAIL_S3_USE_HTTPS') and 'https' or 'http' thumb_url_full = url_for_s3( 'static', bucket_name=self.app.config.get('THUMBNAIL_S3_BUCKET_NAME'), filename=thumb_url, scheme=scheme) original_url_full = url_for_s3( 'static', bucket_name=bucket_name, filename=self._get_s3_path(original_filename).replace('static/', ''), scheme=scheme) # Return the thumbnail URL now if it already exists on S3. # HTTP HEAD request saves us actually downloading the image # for this check. # Thanks to: # http://stackoverflow.com/a/16778749/2066849 try: resp = httplib2.Http().request(thumb_url_full, 'HEAD') resp_status = int(resp[0]['status']) assert(resp_status < 400) return thumb_url_full except Exception: pass # Thanks to: # http://stackoverflow.com/a/12020860/2066849 try: fd = urllib.urlopen(original_url_full) temp_file = BytesIO(fd.read()) image = Image.open(temp_file) except Exception: return '' img = self._thumbnail_resize(image, thumb_size, crop=crop, bg=bg) temp_file = BytesIO() img.save(temp_file, image.format, quality=quality) conn = S3Connection(self.app.config.get('THUMBNAIL_S3_ACCESS_KEY_ID'), self.app.config.get('THUMBNAIL_S3_ACCESS_KEY_SECRET')) bucket = conn.get_bucket(self.app.config.get('THUMBNAIL_S3_BUCKET_NAME')) path = self._get_s3_path(thumb_filename) k = bucket.new_key(path) try: k.set_contents_from_string(temp_file.getvalue()) k.set_acl(self.app.config.get('THUMBNAIL_S3_ACL', 'public-read')) except S3ResponseError: return '' return thumb_url_full
Finds or creates a thumbnail for the specified image on Amazon S3.
def remove(self, builder, model): """ Remove the scope from a given query builder. :param builder: The query builder :type builder: eloquent.orm.builder.Builder :param model: The model :type model: eloquent.orm.Model """ column = model.get_qualified_deleted_at_column() query = builder.get_query() wheres = [] for where in query.wheres: # If the where clause is a soft delete date constraint, # we will remove it from the query and reset the keys # on the wheres. This allows the developer to include # deleted model in a relationship result set that is lazy loaded. if not self._is_soft_delete_constraint(where, column): wheres.append(where) query.wheres = wheres
Remove the scope from a given query builder. :param builder: The query builder :type builder: eloquent.orm.builder.Builder :param model: The model :type model: eloquent.orm.Model
def replace(self, left=None, lower=None, upper=None, right=None, ignore_inf=True): """ Create a new interval based on the current one and the provided values. Callable can be passed instead of values. In that case, it is called with the current corresponding value except if ignore_inf if set (default) and the corresponding bound is an infinity. :param left: (a function of) left boundary. :param lower: (a function of) value of the lower bound. :param upper: (a function of) value of the upper bound. :param right: (a function of) right boundary. :param ignore_inf: ignore infinities if functions are provided (default is True). :return: an Interval instance """ if callable(left): left = left(self._left) else: left = self._left if left is None else left if callable(lower): lower = self._lower if ignore_inf and self._lower in [-inf, inf] else lower(self._lower) else: lower = self._lower if lower is None else lower if callable(upper): upper = self._upper if ignore_inf and self._upper in [-inf, inf] else upper(self._upper) else: upper = self._upper if upper is None else upper if callable(right): right = right(self._right) else: right = self._right if right is None else right return AtomicInterval(left, lower, upper, right)
Create a new interval based on the current one and the provided values. Callable can be passed instead of values. In that case, it is called with the current corresponding value except if ignore_inf if set (default) and the corresponding bound is an infinity. :param left: (a function of) left boundary. :param lower: (a function of) value of the lower bound. :param upper: (a function of) value of the upper bound. :param right: (a function of) right boundary. :param ignore_inf: ignore infinities if functions are provided (default is True). :return: an Interval instance
def addPharLapPaths(env): """This function adds the path to the Phar Lap binaries, includes, and libraries, if they are not already there.""" ph_path = getPharLapPath() try: env_dict = env['ENV'] except KeyError: env_dict = {} env['ENV'] = env_dict SCons.Util.AddPathIfNotExists(env_dict, 'PATH', os.path.join(ph_path, 'bin')) SCons.Util.AddPathIfNotExists(env_dict, 'INCLUDE', os.path.join(ph_path, 'include')) SCons.Util.AddPathIfNotExists(env_dict, 'LIB', os.path.join(ph_path, 'lib')) SCons.Util.AddPathIfNotExists(env_dict, 'LIB', os.path.join(ph_path, os.path.normpath('lib/vclib'))) env['PHARLAP_PATH'] = getPharLapPath() env['PHARLAP_VERSION'] = str(getPharLapVersion())
This function adds the path to the Phar Lap binaries, includes, and libraries, if they are not already there.
def generalize_sql(sql): """ Removes most variables from an SQL query and replaces them with X or N for numbers. Based on Mediawiki's DatabaseBase::generalizeSQL :type sql str|None :rtype: str """ if sql is None: return None # multiple spaces sql = re.sub(r'\s{2,}', ' ', sql) # MW comments # e.g. /* CategoryDataService::getMostVisited N.N.N.N */ sql = remove_comments_from_sql(sql) # handle LIKE statements sql = normalize_likes(sql) sql = re.sub(r"\\\\", '', sql) sql = re.sub(r"\\'", '', sql) sql = re.sub(r'\\"', '', sql) sql = re.sub(r"'[^\']*'", 'X', sql) sql = re.sub(r'"[^\"]*"', 'X', sql) # All newlines, tabs, etc replaced by single space sql = re.sub(r'\s+', ' ', sql) # All numbers => N sql = re.sub(r'-?[0-9]+', 'N', sql) # WHERE foo IN ('880987','882618','708228','522330') sql = re.sub(r' (IN|VALUES)\s*\([^,]+,[^)]+\)', ' \\1 (XYZ)', sql, flags=re.IGNORECASE) return sql.strip()
Removes most variables from an SQL query and replaces them with X or N for numbers. Based on Mediawiki's DatabaseBase::generalizeSQL :type sql str|None :rtype: str
def analyze(self, handle, filename): """Submit a file for analysis. :type handle: File handle :param handle: Handle to file to upload for analysis. :type filename: str :param filename: File name. :rtype: str :return: File hash as a string """ # multipart post files. files = {"file" : (filename, handle)} # ensure the handle is at offset 0. handle.seek(0) response = self._request("/submit/file", method='POST', files=files) try: if response.status_code == 201: # good response return response.json()['job_id'] else: raise sandboxapi.SandboxError("api error in analyze: {r}".format(r=response.content.decode('utf-8'))) except (ValueError, KeyError) as e: raise sandboxapi.SandboxError("error in analyze: {e}".format(e=e))
Submit a file for analysis. :type handle: File handle :param handle: Handle to file to upload for analysis. :type filename: str :param filename: File name. :rtype: str :return: File hash as a string
def rotateImage(image, angle): """ rotates a 2d array to a multiple of 90 deg. 0 = default 1 = 90 deg. cw 2 = 180 deg. 3 = 90 deg. ccw """ image = [list(row) for row in image] for n in range(angle % 4): image = list(zip(*image[::-1])) return image
rotates a 2d array to a multiple of 90 deg. 0 = default 1 = 90 deg. cw 2 = 180 deg. 3 = 90 deg. ccw
def extract_chunks(blob): """Splits the blob into chucks grouped by kind.""" chunks = [] stream = BytesIO(blob.bytes) current_pos = stream.tell() stream.seek(0, 2) length = stream.tell() stream.seek(current_pos, 0) while stream.tell() < length: chunks.append(read_chunk(stream)) return chunks
Splits the blob into chucks grouped by kind.
def getExtensions(self): """returns objects for all map service extensions""" extensions = [] if isinstance(self.supportedExtensions, list): for ext in self.supportedExtensions: extensionURL = self._url + "/exts/%s" % ext if ext == "SchematicsServer": extensions.append(SchematicsService(url=extensionURL, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)) return extensions else: extensionURL = self._url + "/exts/%s" % self.supportedExtensions if self.supportedExtensions == "SchematicsServer": extensions.append(SchematicsService(url=extensionURL, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)) return extensions
returns objects for all map service extensions
def sync_out(self, file_name=None, force=False): """Synchronize from objects to records""" self.log('---- Sync Out ----') from ambry.bundle.files import BuildSourceFile self.dstate = self.STATES.BUILDING for f in self.build_source_files.list_records(): if (f.sync_dir() == BuildSourceFile.SYNC_DIR.RECORD_TO_FILE or f.record.path == file_name) or force: self.log('Sync: {}'.format(f.record.path)) f.record_to_fs() self.commit()
Synchronize from objects to records
def reset(self): """Resets the internal evaluation result to initial state.""" self.num_inst = 0 self.sum_metric = 0.0 self.global_num_inst = 0 self.global_sum_metric = 0.0
Resets the internal evaluation result to initial state.
def register_dn(self): """Called by WorkerThread objects to register themselves. Acquire the condition variable for the WorkerThread objects. Decrement the running-thread count. If we are the last thread to start, release the ThreadPool thread, which is stuck in start() """ with self.regcond: self.runningcount -= 1 tid = thread.get_ident() self.tids.remove(tid) self.logger.debug("register_dn: count_dn is %d" % self.runningcount) self.logger.debug("register_dn: remaining: %s" % str(self.tids)) if self.runningcount == 0: self.status = 'down' self.regcond.notify()
Called by WorkerThread objects to register themselves. Acquire the condition variable for the WorkerThread objects. Decrement the running-thread count. If we are the last thread to start, release the ThreadPool thread, which is stuck in start()
def get_result_category(expected_results, result, properties): ''' This function determines the relation between actual result and expected result for the given file and properties. @param filename: The file name of the input file. @param result: The result given by the tool (needs to be one of the RESULT_* strings to be recognized). @param properties: The list of property names to check. @return One of the CATEGORY_* strings. ''' result_class = get_result_classification(result) if result_class == RESULT_CLASS_OTHER: if result == RESULT_UNKNOWN: return CATEGORY_UNKNOWN elif result == RESULT_DONE: return CATEGORY_MISSING else: return CATEGORY_ERROR if not properties: # Without property we cannot return correct or wrong results. return CATEGORY_MISSING # For now, we have at most one property assert len(properties) == 1, properties prop = properties[0] expected_result = expected_results.get(prop.filename) if not expected_result or expected_result.result is None: # expected result of task is unknown return CATEGORY_MISSING if prop.is_well_known: # for well-known properties, only support hard-coded results is_valid_result = result in _VALID_RESULTS_PER_PROPERTY[prop.name] elif expected_result.subproperty: is_valid_result = result in { RESULT_TRUE_PROP, RESULT_FALSE_PROP + "(" + expected_result.subproperty + ")"} else: is_valid_result = (result == RESULT_TRUE_PROP) or result.startswith(RESULT_FALSE_PROP) if not is_valid_result: return CATEGORY_UNKNOWN # result does not match property if expected_result.result: return CATEGORY_CORRECT if result_class == RESULT_CLASS_TRUE else CATEGORY_WRONG else: if expected_result.subproperty: return CATEGORY_CORRECT if result == RESULT_FALSE_PROP + "(" + expected_result.subproperty + ")" else CATEGORY_WRONG else: return CATEGORY_CORRECT if result_class == RESULT_CLASS_FALSE else CATEGORY_WRONG
This function determines the relation between actual result and expected result for the given file and properties. @param filename: The file name of the input file. @param result: The result given by the tool (needs to be one of the RESULT_* strings to be recognized). @param properties: The list of property names to check. @return One of the CATEGORY_* strings.
def listify(value): """ Wrap the given value into a list, with the below provisions: * If the value is a list or a tuple, it's coerced into a new list. * If the value is None, an empty list is returned. * Otherwise, a single-element list is returned, containing the value. :param value: A value. :return: a list! :rtype: list """ if value is None: return [] if isinstance(value, (list, tuple)): return list(value) return [value]
Wrap the given value into a list, with the below provisions: * If the value is a list or a tuple, it's coerced into a new list. * If the value is None, an empty list is returned. * Otherwise, a single-element list is returned, containing the value. :param value: A value. :return: a list! :rtype: list
def name(self, new_name): """ Sets the name of this node. :param new_name: name """ log.info("{module}: {name} [{id}] renamed to {new_name}".format(module=self.manager.module_name, name=self.name, id=self.id, new_name=new_name)) self._name = new_name
Sets the name of this node. :param new_name: name
def find_candidates(candidates): """ Find candidate addresses using LCS algorithm perform a scoring based on how often a candidate appears in a longer candidate Input is something like ------------------------ ['1b6033', '1b6033fd57', '701b603378e289', '20701b603378e289000c62', '1b603300', '78e289757e', '7078e2891b6033000000', '207078e2891b6033000000'] Output like ----------- {'1b6033': 18, '1b6033fd57': 1, '701b603378e289': 2, '207078e2891b6033000000': 1, '57': 1, '7078e2891b6033000000': 2, '78e289757e': 1, '20701b603378e289000c62': 1, '78e289': 4, '1b603300': 3} :type candidates: list of CommonRange :return: """ result = defaultdict(int) for i, c_i in enumerate(candidates): for j in range(i, len(candidates)): lcs = util.longest_common_substring(c_i.hex_value, candidates[j].hex_value) if lcs: result[lcs] += 1 return result
Find candidate addresses using LCS algorithm perform a scoring based on how often a candidate appears in a longer candidate Input is something like ------------------------ ['1b6033', '1b6033fd57', '701b603378e289', '20701b603378e289000c62', '1b603300', '78e289757e', '7078e2891b6033000000', '207078e2891b6033000000'] Output like ----------- {'1b6033': 18, '1b6033fd57': 1, '701b603378e289': 2, '207078e2891b6033000000': 1, '57': 1, '7078e2891b6033000000': 2, '78e289757e': 1, '20701b603378e289000c62': 1, '78e289': 4, '1b603300': 3} :type candidates: list of CommonRange :return:
def convert(self, string): """Return a copy of string converted to case. Parameters ---------- string : `str` Returns ------- `str` Examples -------- >>> CharCase.LOWER.convert('sTr InG') 'str ing' >>> CharCase.UPPER.convert('sTr InG') 'STR ING' >>> CharCase.TITLE.convert('sTr InG') 'Str ing' >>> CharCase.PRESERVE.convert('sTr InG') 'sTr InG' """ if self == self.__class__.TITLE: return capitalize(string) if self == self.__class__.UPPER: return string.upper() if self == self.__class__.LOWER: return string.lower() return string
Return a copy of string converted to case. Parameters ---------- string : `str` Returns ------- `str` Examples -------- >>> CharCase.LOWER.convert('sTr InG') 'str ing' >>> CharCase.UPPER.convert('sTr InG') 'STR ING' >>> CharCase.TITLE.convert('sTr InG') 'Str ing' >>> CharCase.PRESERVE.convert('sTr InG') 'sTr InG'
def _compute_f5(self, C, pga_rock): """ Compute f5 term (non-linear soil response) """ return C['a10'] + C['a11'] * np.log(pga_rock + C['c5'])
Compute f5 term (non-linear soil response)
def stop(self): """Stop Modis and log it out of Discord.""" self.button_toggle_text.set("Start Modis") self.state = "off" logger.info("Stopping Discord Modis") from ._client import client asyncio.run_coroutine_threadsafe(client.logout(), client.loop) self.status_bar.set_status(0)
Stop Modis and log it out of Discord.
def slice_cardinal(self, key): """ Slice the container according to its (primary) cardinal axis. The "cardinal" axis can have any name so long as the name matches a data object attached to the container. The index name for this object should also match the value of the cardinal axis. The algorithm builds a network graph representing the data relationships (including information about the type of relationship) and then traverses the edge tree (starting from the cardinal table). Each subsequent child object in the tree is sliced based on its relationship with its parent. Note: Breadth first traversal is performed. Warning: This function does not make a copy (if possible): to ensure a new object is created (a copy) use :func:`~exa.core.container.Container.copy` after slicing. .. code-block:: Python myslice = mycontainer[::2].copy() See Also: For data network generation, see :func:`~exa.core.container.Container.network`. For information about relationships between data objects see :mod:`~exa.core.numerical`. """ if self._cardinal: cls = self.__class__ key = check_key(self[self._cardinal], key, cardinal=True) g = self.network(fig=False) kwargs = {self._cardinal: self[self._cardinal].ix[key], 'name': self.name, 'description': self.description, 'meta': self.meta} # Next traverse, breadth first, all data objects for parent, child in nx.bfs_edges(g, self._cardinal): if child in kwargs: continue typ = g.edge_types[(parent, child)] if self._cardinal in self[child].columns and hasattr(self[child], 'slice_cardinal'): kwargs[child] = self[child].slice_cardinal(key) elif typ == 'index-index': # Select from the child on the parent's index (the parent is # in the kwargs already). kwargs[child] = self[child].ix[kwargs[parent].index.values] elif typ == 'index-column': # Select from the child where the column (of the same name as # the parent) is in the parent's index values cdf = self[child] kwargs[child] = cdf[cdf[parent].isin(kwargs[parent].index.values)] elif typ == 'column-index': # Select from the child where the child's index is in the # column of the parent. Note that this relationship cdf = self[child] cin = cdf.index.name cols = [col for col in kwargs[parent] if cin == col or (cin == col[:-1] and col[-1].isdigit())] index = kwargs[parent][cols].stack().astype(np.int64).values kwargs[child] = cdf[cdf.index.isin(index)] return cls(**kwargs)
Slice the container according to its (primary) cardinal axis. The "cardinal" axis can have any name so long as the name matches a data object attached to the container. The index name for this object should also match the value of the cardinal axis. The algorithm builds a network graph representing the data relationships (including information about the type of relationship) and then traverses the edge tree (starting from the cardinal table). Each subsequent child object in the tree is sliced based on its relationship with its parent. Note: Breadth first traversal is performed. Warning: This function does not make a copy (if possible): to ensure a new object is created (a copy) use :func:`~exa.core.container.Container.copy` after slicing. .. code-block:: Python myslice = mycontainer[::2].copy() See Also: For data network generation, see :func:`~exa.core.container.Container.network`. For information about relationships between data objects see :mod:`~exa.core.numerical`.
def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]: """Get an iterator for Entity, Component pairs. :param component_type: The Component type to retrieve. :return: An iterator for (Entity, Component) tuples. """ entity_db = self._entities for entity in self._components.get(component_type, []): yield entity, entity_db[entity][component_type]
Get an iterator for Entity, Component pairs. :param component_type: The Component type to retrieve. :return: An iterator for (Entity, Component) tuples.
def rebuild(self): """ Rebuilds the widget based on the position and current size/location of its parent. """ if not self.isVisible(): return self.raise_() max_size = self.maximumPixmapSize() min_size = self.minimumPixmapSize() widget = self.window() rect = widget.rect() rect.setBottom(rect.bottom() - widget.statusBar().height()) rect.setTop(widget.menuBar().height()) offset = self.padding() # align this widget to the north if self.position() == XDockToolbar.Position.North: self.move(rect.left(), rect.top()) self.resize(rect.width(), min_size.height() + offset) # align this widget to the east elif self.position() == XDockToolbar.Position.East: self.move(rect.left(), rect.top()) self.resize(min_size.width() + offset, rect.height()) # align this widget to the south elif self.position() == XDockToolbar.Position.South: self.move(rect.left(), rect.top() - min_size.height() - offset) self.resize(rect.width(), min_size.height() + offset) # align this widget to the west else: self.move(rect.right() - min_size.width() - offset, rect.top()) self.resize(min_size.width() + offset, rect.height())
Rebuilds the widget based on the position and current size/location of its parent.
def inc(self, exception=None): # type: (Optional[ParseError.__class__]) -> bool """ Increments the parser if the end of the input has not been reached. Returns whether or not it was able to advance. """ try: self._idx, self._current = next(self._chars) return True except StopIteration: self._idx = len(self) self._current = self.EOF if exception: raise self.parse_error(exception) return False
Increments the parser if the end of the input has not been reached. Returns whether or not it was able to advance.
def manager(self, obj): """Returns the :class:`AttachmentsManager` instance for this object.""" manager = getattr(obj, _MANAGER_ATTR, None) if manager is None: manager = AttachmentsManager() setattr(obj.__class__, _MANAGER_ATTR, manager) return manager
Returns the :class:`AttachmentsManager` instance for this object.
def find_api_id(self): """Given API name, find API ID.""" allapis = self.client.get_rest_apis() api_name = self.trigger_settings['api_name'] api_id = None for api in allapis['items']: if api['name'] == api_name: api_id = api['id'] self.log.info("Found API for: %s", api_name) break else: api_id = self.create_api() return api_id
Given API name, find API ID.
def to_css(self): ''' Generate the CSS representation of this RGB color. Returns: str, ``"rgb(...)"`` or ``"rgba(...)"`` ''' if self.a == 1.0: return "rgb(%d, %d, %d)" % (self.r, self.g, self.b) else: return "rgba(%d, %d, %d, %s)" % (self.r, self.g, self.b, self.a)
Generate the CSS representation of this RGB color. Returns: str, ``"rgb(...)"`` or ``"rgba(...)"``
def get_all_keys(tweet, parent_key=''): """ Takes a tweet object and recursively returns a list of all keys contained in this level and all nexstted levels of the tweet. Args: tweet (Tweet): the tweet dict parent_key (str): key from which this process will start, e.g., you can get keys only under some key that is not the top-level key. Returns: list of all keys in nested dicts. Example: >>> import tweet_parser.tweet_checking as tc >>> tweet = {"created_at": 124125125125, "text": "just setting up my twttr", ... "nested_field": {"nested_1": "field", "nested_2": "field2"}} >>> tc.get_all_keys(tweet) ['created_at', 'text', 'nested_field nested_1', 'nested_field nested_2'] """ items = [] for k, v in tweet.items(): new_key = parent_key + " " + k if isinstance(v, dict): items.extend(get_all_keys(v, parent_key=new_key)) else: items.append(new_key.strip(" ")) return items
Takes a tweet object and recursively returns a list of all keys contained in this level and all nexstted levels of the tweet. Args: tweet (Tweet): the tweet dict parent_key (str): key from which this process will start, e.g., you can get keys only under some key that is not the top-level key. Returns: list of all keys in nested dicts. Example: >>> import tweet_parser.tweet_checking as tc >>> tweet = {"created_at": 124125125125, "text": "just setting up my twttr", ... "nested_field": {"nested_1": "field", "nested_2": "field2"}} >>> tc.get_all_keys(tweet) ['created_at', 'text', 'nested_field nested_1', 'nested_field nested_2']
def _check_and_apply_deprecations(self, scope, values): """Checks whether a ScopeInfo has options specified in a deprecated scope. There are two related cases here. Either: 1) The ScopeInfo has an associated deprecated_scope that was replaced with a non-deprecated scope, meaning that the options temporarily live in two locations. 2) The entire ScopeInfo is deprecated (as in the case of deprecated SubsystemDependencies), meaning that the options live in one location. In the first case, this method has the sideeffect of merging options values from deprecated scopes into the given values. """ si = self.known_scope_to_info[scope] # If this Scope is itself deprecated, report that. if si.removal_version: explicit_keys = self.for_scope(scope, inherit_from_enclosing_scope=False).get_explicit_keys() if explicit_keys: warn_or_error( removal_version=si.removal_version, deprecated_entity_description='scope {}'.format(scope), hint=si.removal_hint, ) # Check if we're the new name of a deprecated scope, and clone values from that scope. # Note that deprecated_scope and scope share the same Optionable class, so deprecated_scope's # Optionable has a deprecated_options_scope equal to deprecated_scope. Therefore we must # check that scope != deprecated_scope to prevent infinite recursion. deprecated_scope = si.deprecated_scope if deprecated_scope is not None and scope != deprecated_scope: # Do the deprecation check only on keys that were explicitly set on the deprecated scope # (and not on its enclosing scopes). explicit_keys = self.for_scope(deprecated_scope, inherit_from_enclosing_scope=False).get_explicit_keys() if explicit_keys: # Update our values with those of the deprecated scope (now including values inherited # from its enclosing scope). # Note that a deprecated val will take precedence over a val of equal rank. # This makes the code a bit neater. values.update(self.for_scope(deprecated_scope)) warn_or_error( removal_version=self.known_scope_to_info[scope].deprecated_scope_removal_version, deprecated_entity_description='scope {}'.format(deprecated_scope), hint='Use scope {} instead (options: {})'.format(scope, ', '.join(explicit_keys)) )
Checks whether a ScopeInfo has options specified in a deprecated scope. There are two related cases here. Either: 1) The ScopeInfo has an associated deprecated_scope that was replaced with a non-deprecated scope, meaning that the options temporarily live in two locations. 2) The entire ScopeInfo is deprecated (as in the case of deprecated SubsystemDependencies), meaning that the options live in one location. In the first case, this method has the sideeffect of merging options values from deprecated scopes into the given values.
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: WorkspaceStatisticsContext for this WorkspaceStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext """ if self._context is None: self._context = WorkspaceStatisticsContext( self._version, workspace_sid=self._solution['workspace_sid'], ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: WorkspaceStatisticsContext for this WorkspaceStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.workspace_statistics.WorkspaceStatisticsContext
def get_resources(minify=False): """Find all resources which subclass ResourceBase. Keyword arguments: minify -- select minified resources if available. Returns: Dictionary of available resources. Keys are resource names (part of the config variable names), values are dicts with css and js keys, and tuples of resources as values. """ all_resources = dict() subclasses = resource_base.ResourceBase.__subclasses__() + resource_definitions.ResourceAngular.__subclasses__() for resource in subclasses: obj = resource(minify) all_resources[resource.RESOURCE_NAME] = dict(css=tuple(obj.resources_css), js=tuple(obj.resources_js)) return all_resources
Find all resources which subclass ResourceBase. Keyword arguments: minify -- select minified resources if available. Returns: Dictionary of available resources. Keys are resource names (part of the config variable names), values are dicts with css and js keys, and tuples of resources as values.
def get_desc2nts(self, **kws_usr): """Return grouped, sorted namedtuples in either format: flat, sections.""" # desc2nts contains: (sections hdrgo_prt sortobj) or (flat hdrgo_prt sortobj) # keys_nts: hdrgo_prt section_prt top_n use_sections kws_nts = {k:v for k, v in kws_usr.items() if k in self.keys_nts} return self.get_desc2nts_fnc(**kws_nts)
Return grouped, sorted namedtuples in either format: flat, sections.
def ping(): ''' Is the marathon api responding? ''' try: response = salt.utils.http.query( "{0}/ping".format(CONFIG[CONFIG_BASE_URL]), decode_type='plain', decode=True, ) log.debug( 'marathon.info returned successfully: %s', response, ) if 'text' in response and response['text'].strip() == 'pong': return True except Exception as ex: log.error( 'error calling marathon.info with base_url %s: %s', CONFIG[CONFIG_BASE_URL], ex, ) return False
Is the marathon api responding?
def minimum_spanning_tree_kruskal(self, display = None, components = None): ''' API: minimum_spanning_tree_kruskal(self, display = None, components = None) Description: Determines a minimum spanning tree using Kruskal's Algorithm. Input: display: Display method. component: component number. Post: 'color' attribute of nodes and edges may change. Return: Returns list of edges where edges are tuples in (source,sink) format. ''' if display == None: display = self.attr['display'] else: self.set_display_mode(display) if components is None: components = DisjointSet(display = display, layout = 'dot', optimize = False) sorted_edge_list = sorted(self.get_edge_list(), key=self.get_edge_cost) edges = [] for n in self.get_node_list(): components.add([n]) components.display() for e in sorted_edge_list: if len(edges) == len(self.get_node_list()) - 1: break self.set_edge_attr(e[0], e[1], 'color', 'yellow') self.display() if components.union(e[0], e[1]): self.set_edge_attr(e[0], e[1], 'color', 'green') self.display() edges.append(e) else: self.set_edge_attr(e[0], e[1], 'color', 'black') self.display() components.display() return edges
API: minimum_spanning_tree_kruskal(self, display = None, components = None) Description: Determines a minimum spanning tree using Kruskal's Algorithm. Input: display: Display method. component: component number. Post: 'color' attribute of nodes and edges may change. Return: Returns list of edges where edges are tuples in (source,sink) format.
def clean_dataframes(dfs): """Fill NaNs with the previous value, the next value or if all are NaN then 1.0 TODO: Linear interpolation and extrapolation Arguments: dfs (list of dataframes): list of dataframes that contain NaNs to be removed Returns: list of dataframes: list of dataframes with NaNs replaced by interpolated values """ if isinstance(dfs, (list)): for df in dfs: df = clean_dataframe(df) return dfs else: return [clean_dataframe(dfs)]
Fill NaNs with the previous value, the next value or if all are NaN then 1.0 TODO: Linear interpolation and extrapolation Arguments: dfs (list of dataframes): list of dataframes that contain NaNs to be removed Returns: list of dataframes: list of dataframes with NaNs replaced by interpolated values
def _fetch(self, statement, commit, max_attempts=5): """ Execute a SQL query and return a result. Recursively disconnect and reconnect to the database if an error occurs. """ if self._auto_reconnect: attempts = 0 while attempts < max_attempts: try: # Execute statement self._cursor.execute(statement) fetch = self._cursor.fetchall() rows = self._fetch_rows(fetch) if commit: self._commit() # Return a single item if the list only has one item return rows[0] if len(rows) == 1 else rows except Exception as e: if attempts >= max_attempts: raise e else: attempts += 1 self.reconnect() continue else: # Execute statement self._cursor.execute(statement) fetch = self._cursor.fetchall() rows = self._fetch_rows(fetch) if commit: self._commit() # Return a single item if the list only has one item return rows[0] if len(rows) == 1 else rows
Execute a SQL query and return a result. Recursively disconnect and reconnect to the database if an error occurs.
def process_config(config, config_data): """ Populates config with data from the configuration data dict. It handles components, data, log, management and session sections from the configuration data. :param config: The config reference of the object that will hold the configuration data from the config_data. :param config_data: The configuration data loaded from a configuration file. """ if 'components' in config_data: process_components_config_section(config, config_data['components']) if 'data' in config_data: process_data_config_section(config, config_data['data']) if 'log' in config_data: process_log_config_section(config, config_data['log']) if 'management' in config_data: process_management_config_section(config, config_data['management']) if 'session' in config_data: process_session_config_section(config, config_data['session'])
Populates config with data from the configuration data dict. It handles components, data, log, management and session sections from the configuration data. :param config: The config reference of the object that will hold the configuration data from the config_data. :param config_data: The configuration data loaded from a configuration file.
def make_symlink(source, link_path): """ Create a symlink at `link_path` referring to `source`. """ if not supports_symlinks(): dbt.exceptions.system_error('create a symbolic link') return os.symlink(source, link_path)
Create a symlink at `link_path` referring to `source`.
def uninstall_package_and_data( package_name, service_name=None, role=None, principal=None, zk_node=None, timeout_sec=600): """ Uninstall a package via the DC/OS library, wait for completion, and delete any persistent data :param package_name: name of the package :type package_name: str :param service_name: unique service name for the package :type service_name: str :param role: role to use when deleting data, or <service_name>-role if unset :type role: str, or None :param principal: principal to use when deleting data, or <service_name>-principal if unset :type principal: str, or None :param zk_node: zk node to delete, or dcos-service-<service_name> if unset :type zk_node: str, or None :param wait_for_completion: whether or not to wait for task completion before returning :type wait_for_completion: bool :param timeout_sec: number of seconds to wait for task completion :type timeout_sec: int """ start = time.time() if service_name is None: pkg = _get_package_manager().get_package_version(package_name, None) service_name = _get_service_name(package_name, pkg) print('\n{}uninstalling/deleting {}'.format(shakedown.cli.helpers.fchr('>>'), service_name)) try: uninstall_package_and_wait(package_name, service_name=service_name, timeout_sec=timeout_sec) except (errors.DCOSException, ValueError) as e: print('Got exception when uninstalling package, ' + 'continuing with janitor anyway: {}'.format(e)) data_start = time.time() if (not role or not principal or not zk_node) and service_name is None: raise DCOSException('service_name must be provided when data params are missing AND the package isn\'t installed') if not role: role = '{}-role'.format(service_name) if not zk_node: zk_node = 'dcos-service-{}'.format(service_name) delete_persistent_data(role, zk_node) finish = time.time() print('\n{}uninstall/delete done after pkg({}) + data({}) = total({})\n'.format( shakedown.cli.helpers.fchr('>>'), pretty_duration(data_start - start), pretty_duration(finish - data_start), pretty_duration(finish - start)))
Uninstall a package via the DC/OS library, wait for completion, and delete any persistent data :param package_name: name of the package :type package_name: str :param service_name: unique service name for the package :type service_name: str :param role: role to use when deleting data, or <service_name>-role if unset :type role: str, or None :param principal: principal to use when deleting data, or <service_name>-principal if unset :type principal: str, or None :param zk_node: zk node to delete, or dcos-service-<service_name> if unset :type zk_node: str, or None :param wait_for_completion: whether or not to wait for task completion before returning :type wait_for_completion: bool :param timeout_sec: number of seconds to wait for task completion :type timeout_sec: int
def get_element_type(_list, dimens): """ Given the dimensions of a nested list and the list, returns the type of the elements in the inner list. """ elem = _list for _ in range(len(dimens)): elem = elem[0] return type(elem)
Given the dimensions of a nested list and the list, returns the type of the elements in the inner list.
def query(self, coords, mode='random_sample', return_flags=False, pct=None): """ Returns reddening at the requested coordinates. There are several different query modes, which handle the probabilistic nature of the map differently. Args: coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query. mode (Optional[:obj:`str`]): Seven different query modes are available: 'random_sample', 'random_sample_per_pix' 'samples', 'median', 'mean', 'best' and 'percentile'. The :obj:`mode` determines how the output will reflect the probabilistic nature of the Bayestar dust maps. return_flags (Optional[:obj:`bool`]): If :obj:`True`, then QA flags will be returned in a second numpy structured array. That is, the query will return :obj:`ret`, :obj:'flags`, where :obj:`ret` is the normal return value, containing reddening. Defaults to :obj:`False`. pct (Optional[:obj:`float` or list/array of :obj:`float`]): If the mode is :obj:`percentile`, then :obj:`pct` specifies which percentile(s) is (are) returned. Returns: Reddening at the specified coordinates, in magnitudes of reddening. The conversion to E(B-V) (or other reddening units) depends on whether :obj:`version='bayestar2017'` (the default) or :obj:`'bayestar2015'` was selected when the :obj:`BayestarQuery` object was created. To convert Bayestar2017 to Pan-STARRS 1 extinctions, multiply by the coefficients given in Table 1 of Green et al. (2018). Conversion to extinction in non-PS1 passbands depends on the choice of extinction law. To convert Bayestar2015 to extinction in various passbands, multiply by the coefficients in Table 6 of Schlafly & Finkbeiner (2011). See Green et al. (2015, 2018) for more detailed discussion of how to convert the Bayestar dust maps into reddenings or extinctions in different passbands. The shape of the output depends on the :obj:`mode`, and on whether :obj:`coords` contains distances. If :obj:`coords` does not specify distance(s), then the shape of the output begins with :obj:`coords.shape`. If :obj:`coords` does specify distance(s), then the shape of the output begins with :obj:`coords.shape + ([number of distance bins],)`. If :obj:`mode` is :obj:`'random_sample'`, then at each coordinate/distance, a random sample of reddening is given. If :obj:`mode` is :obj:`'random_sample_per_pix'`, then the sample chosen for each angular pixel of the map will be consistent. For example, if two query coordinates lie in the same map pixel, then the same random sample will be chosen from the map for both query coordinates. If :obj:`mode` is :obj:`'median'`, then at each coordinate/distance, the median reddening is returned. If :obj:`mode` is :obj:`'mean'`, then at each coordinate/distance, the mean reddening is returned. If :obj:`mode` is :obj:`'best'`, then at each coordinate/distance, the maximum posterior density reddening is returned (the "best fit"). If :obj:`mode` is :obj:`'percentile'`, then an additional keyword argument, :obj:`pct`, must be specified. At each coordinate/distance, the requested percentiles (in :obj:`pct`) will be returned. If :obj:`pct` is a list/array, then the last axis of the output will correspond to different percentiles. Finally, if :obj:`mode` is :obj:`'samples'`, then at each coordinate/distance, all samples are returned. The last axis of the output will correspond to different samples. If :obj:`return_flags` is :obj:`True`, then in addition to reddening, a structured array containing QA flags will be returned. If the input coordinates include distances, the QA flags will be :obj:`"converged"` (whether or not the line-of-sight fit converged in a given pixel) and :obj:`"reliable_dist"` (whether or not the requested distance is within the range considered reliable, based on the inferred stellar distances). If the input coordinates do not include distances, then instead of :obj:`"reliable_dist"`, the flags will include :obj:`"min_reliable_distmod"` and :obj:`"max_reliable_distmod"`, the minimum and maximum reliable distance moduli in the given pixel. """ # Check that the query mode is supported self._raise_on_mode(mode) # Validate percentile specification pct, scalar_pct = self._interpret_percentile(mode, pct) # Get number of coordinates requested n_coords_ret = coords.shape[0] # Determine if distance has been requested has_dist = hasattr(coords.distance, 'kpc') d = coords.distance.kpc if has_dist else None # Extract the correct angular pixel(s) # t0 = time.time() pix_idx = self._find_data_idx(coords.l.deg, coords.b.deg) in_bounds_idx = (pix_idx != -1) # t1 = time.time() # Extract the correct samples if mode == 'random_sample': # A different sample in each queried coordinate samp_idx = np.random.randint(0, self._n_samples, pix_idx.size) n_samp_ret = 1 elif mode == 'random_sample_per_pix': # Choose same sample in all coordinates that fall in same angular # HEALPix pixel samp_idx = np.random.randint(0, self._n_samples, self._n_pix)[pix_idx] n_samp_ret = 1 elif mode == 'best': samp_idx = slice(None) n_samp_ret = 1 else: # Return all samples in each queried coordinate samp_idx = slice(None) n_samp_ret = self._n_samples # t2 = time.time() if mode == 'best': val = self._best_fit else: val = self._samples # Create empty array to store flags if return_flags: if has_dist: # If distances are provided in query, return only covergence and # whether or not this distance is reliable dtype = [('converged', 'bool'), ('reliable_dist', 'bool')] # shape = (n_coords_ret) else: # Return convergence and reliable distance ranges dtype = [('converged', 'bool'), ('min_reliable_distmod', 'f4'), ('max_reliable_distmod', 'f4')] flags = np.empty(n_coords_ret, dtype=dtype) # samples = self._samples[pix_idx, samp_idx] # samples[pix_idx == -1] = np.nan # t3 = time.time() # Extract the correct distance bin (possibly using linear interpolation) if has_dist: # Distance has been provided # Determine ceiling bin index for each coordinate dm = 5. * (np.log10(d) + 2.) bin_idx_ceil = np.searchsorted(self._DM_bin_edges, dm) # Create NaN-filled return arrays if isinstance(samp_idx, slice): ret = np.full((n_coords_ret, n_samp_ret), np.nan, dtype='f4') else: ret = np.full((n_coords_ret,), np.nan, dtype='f4') # d < d(nearest distance slice) idx_near = (bin_idx_ceil == 0) & in_bounds_idx if np.any(idx_near): a = 10.**(0.2 * (dm[idx_near] - self._DM_bin_edges[0])) if isinstance(samp_idx, slice): ret[idx_near] = ( a[:,None] * val[pix_idx[idx_near], samp_idx, 0]) else: # print('idx_near: {} true'.format(np.sum(idx_near))) # print('ret[idx_near].shape = {}'.format(ret[idx_near].shape)) # print('val.shape = {}'.format(val.shape)) # print('pix_idx[idx_near].shape = {}'.format(pix_idx[idx_near].shape)) ret[idx_near] = ( a * val[pix_idx[idx_near], samp_idx[idx_near], 0]) # d > d(farthest distance slice) idx_far = (bin_idx_ceil == self._n_distances) & in_bounds_idx if np.any(idx_far): # print('idx_far: {} true'.format(np.sum(idx_far))) # print('pix_idx[idx_far].shape = {}'.format(pix_idx[idx_far].shape)) # print('ret[idx_far].shape = {}'.format(ret[idx_far].shape)) # print('val.shape = {}'.format(val.shape)) if isinstance(samp_idx, slice): ret[idx_far] = val[pix_idx[idx_far], samp_idx, -1] else: ret[idx_far] = val[pix_idx[idx_far], samp_idx[idx_far], -1] # d(nearest distance slice) < d < d(farthest distance slice) idx_btw = ~idx_near & ~idx_far & in_bounds_idx if np.any(idx_btw): DM_ceil = self._DM_bin_edges[bin_idx_ceil[idx_btw]] DM_floor = self._DM_bin_edges[bin_idx_ceil[idx_btw]-1] a = (DM_ceil - dm[idx_btw]) / (DM_ceil - DM_floor) if isinstance(samp_idx, slice): ret[idx_btw] = ( (1.-a[:,None]) * val[pix_idx[idx_btw], samp_idx, bin_idx_ceil[idx_btw]] + a[:,None] * val[pix_idx[idx_btw], samp_idx, bin_idx_ceil[idx_btw]-1] ) else: ret[idx_btw] = ( (1.-a) * val[pix_idx[idx_btw], samp_idx[idx_btw], bin_idx_ceil[idx_btw]] + a * val[pix_idx[idx_btw], samp_idx[idx_btw], bin_idx_ceil[idx_btw]-1] ) # Flag: distance in reliable range? if return_flags: dm_min = self._pixel_info['DM_reliable_min'][pix_idx] dm_max = self._pixel_info['DM_reliable_max'][pix_idx] flags['reliable_dist'] = ( (dm >= dm_min) & (dm <= dm_max) & np.isfinite(dm_min) & np.isfinite(dm_max)) flags['reliable_dist'][~in_bounds_idx] = False else: # No distances provided ret = val[pix_idx, samp_idx, :] # Return all distances ret[~in_bounds_idx] = np.nan # Flag: reliable distance bounds if return_flags: dm_min = self._pixel_info['DM_reliable_min'][pix_idx] dm_max = self._pixel_info['DM_reliable_max'][pix_idx] flags['min_reliable_distmod'] = dm_min flags['max_reliable_distmod'] = dm_max flags['min_reliable_distmod'][~in_bounds_idx] = np.nan flags['max_reliable_distmod'][~in_bounds_idx] = np.nan # t4 = time.time() # Flag: convergence if return_flags: flags['converged'] = ( self._pixel_info['converged'][pix_idx].astype(np.bool)) flags['converged'][~in_bounds_idx] = False # t5 = time.time() # Reduce the samples in the requested manner if mode == 'median': ret = np.median(ret, axis=1) elif mode == 'mean': ret = np.mean(ret, axis=1) elif mode == 'percentile': ret = np.nanpercentile(ret, pct, axis=1) if not scalar_pct: # (percentile, pixel) -> (pixel, percentile) # (pctile, pixel, distance) -> (pixel, distance, pctile) ret = np.moveaxis(ret, 0, -1) elif mode == 'best': # Remove "samples" axis s = ret.shape ret.shape = s[:1] + s[2:] elif mode == 'samples': # Swap sample and distance axes to be consistent with other 3D dust # maps. The output shape will be (pixel, distance, sample). if not has_dist: np.swapaxes(ret, 1, 2) # t6 = time.time() # # print('') # print('time inside bayestar.query: {:.4f} s'.format(t6-t0)) # print('{: >7.4f} s : {: >6.4f} s : _find_data_idx'.format(t1-t0, t1-t0)) # print('{: >7.4f} s : {: >6.4f} s : sample slice spec'.format(t2-t0, t2-t1)) # print('{: >7.4f} s : {: >6.4f} s : create empty return flag array'.format(t3-t0, t3-t2)) # print('{: >7.4f} s : {: >6.4f} s : extract results'.format(t4-t0, t4-t3)) # print('{: >7.4f} s : {: >6.4f} s : convergence flag'.format(t5-t0, t5-t4)) # print('{: >7.4f} s : {: >6.4f} s : reduce'.format(t6-t0, t6-t5)) # print('') if return_flags: return ret, flags return ret
Returns reddening at the requested coordinates. There are several different query modes, which handle the probabilistic nature of the map differently. Args: coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query. mode (Optional[:obj:`str`]): Seven different query modes are available: 'random_sample', 'random_sample_per_pix' 'samples', 'median', 'mean', 'best' and 'percentile'. The :obj:`mode` determines how the output will reflect the probabilistic nature of the Bayestar dust maps. return_flags (Optional[:obj:`bool`]): If :obj:`True`, then QA flags will be returned in a second numpy structured array. That is, the query will return :obj:`ret`, :obj:'flags`, where :obj:`ret` is the normal return value, containing reddening. Defaults to :obj:`False`. pct (Optional[:obj:`float` or list/array of :obj:`float`]): If the mode is :obj:`percentile`, then :obj:`pct` specifies which percentile(s) is (are) returned. Returns: Reddening at the specified coordinates, in magnitudes of reddening. The conversion to E(B-V) (or other reddening units) depends on whether :obj:`version='bayestar2017'` (the default) or :obj:`'bayestar2015'` was selected when the :obj:`BayestarQuery` object was created. To convert Bayestar2017 to Pan-STARRS 1 extinctions, multiply by the coefficients given in Table 1 of Green et al. (2018). Conversion to extinction in non-PS1 passbands depends on the choice of extinction law. To convert Bayestar2015 to extinction in various passbands, multiply by the coefficients in Table 6 of Schlafly & Finkbeiner (2011). See Green et al. (2015, 2018) for more detailed discussion of how to convert the Bayestar dust maps into reddenings or extinctions in different passbands. The shape of the output depends on the :obj:`mode`, and on whether :obj:`coords` contains distances. If :obj:`coords` does not specify distance(s), then the shape of the output begins with :obj:`coords.shape`. If :obj:`coords` does specify distance(s), then the shape of the output begins with :obj:`coords.shape + ([number of distance bins],)`. If :obj:`mode` is :obj:`'random_sample'`, then at each coordinate/distance, a random sample of reddening is given. If :obj:`mode` is :obj:`'random_sample_per_pix'`, then the sample chosen for each angular pixel of the map will be consistent. For example, if two query coordinates lie in the same map pixel, then the same random sample will be chosen from the map for both query coordinates. If :obj:`mode` is :obj:`'median'`, then at each coordinate/distance, the median reddening is returned. If :obj:`mode` is :obj:`'mean'`, then at each coordinate/distance, the mean reddening is returned. If :obj:`mode` is :obj:`'best'`, then at each coordinate/distance, the maximum posterior density reddening is returned (the "best fit"). If :obj:`mode` is :obj:`'percentile'`, then an additional keyword argument, :obj:`pct`, must be specified. At each coordinate/distance, the requested percentiles (in :obj:`pct`) will be returned. If :obj:`pct` is a list/array, then the last axis of the output will correspond to different percentiles. Finally, if :obj:`mode` is :obj:`'samples'`, then at each coordinate/distance, all samples are returned. The last axis of the output will correspond to different samples. If :obj:`return_flags` is :obj:`True`, then in addition to reddening, a structured array containing QA flags will be returned. If the input coordinates include distances, the QA flags will be :obj:`"converged"` (whether or not the line-of-sight fit converged in a given pixel) and :obj:`"reliable_dist"` (whether or not the requested distance is within the range considered reliable, based on the inferred stellar distances). If the input coordinates do not include distances, then instead of :obj:`"reliable_dist"`, the flags will include :obj:`"min_reliable_distmod"` and :obj:`"max_reliable_distmod"`, the minimum and maximum reliable distance moduli in the given pixel.
def checkSetSaveChildren(self, doSave=True): """Check, then set, then save the parameter settings for all child (pset) windows. Prompts if any problems are found. Returns None on success, list of bad entries on failure. """ if self.isChild: return # Need to get all the entries and verify them. # Save the children in backwards order to coincide with the # display of the dialogs (LIFO) for n in range (len(self.top.childList)-1, -1, -1): self.badEntriesList = self.top.childList[n]. \ checkSetSaveEntries(doSave=doSave) if self.badEntriesList: ansOKCANCEL = self.processBadEntries(self.badEntriesList, self.top.childList[n].taskName) if not ansOKCANCEL: return self.badEntriesList # If there were no invalid entries or the user says OK, # close down the child and increment to the next child self.top.childList[n].top.focus_set() self.top.childList[n].top.withdraw() del self.top.childList[n] # all windows saved successfully return
Check, then set, then save the parameter settings for all child (pset) windows. Prompts if any problems are found. Returns None on success, list of bad entries on failure.
def add_path(tdict, path): """ Create or extend an argument tree `tdict` from `path`. :param tdict: a dictionary representing a argument tree :param path: a path list :return: a dictionary Convert a list of items in a 'path' into a nested dict, where the second to last item becomes the key for the final item. The remaining items in the path become keys in the nested dict around that final pair of items. For example, for input values of: tdict={} path = ['assertion', 'subject', 'subject_confirmation', 'method', 'urn:oasis:names:tc:SAML:2.0:cm:bearer'] Returns an output value of: {'assertion': {'subject': {'subject_confirmation': {'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'}}}} Another example, this time with a non-empty tdict input: tdict={'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'}, path=['subject_confirmation_data', 'in_response_to', '_012345'] Returns an output value of: {'subject_confirmation_data': {'in_response_to': '_012345'}, 'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'} """ t = tdict for step in path[:-2]: try: t = t[step] except KeyError: t[step] = {} t = t[step] t[path[-2]] = path[-1] return tdict
Create or extend an argument tree `tdict` from `path`. :param tdict: a dictionary representing a argument tree :param path: a path list :return: a dictionary Convert a list of items in a 'path' into a nested dict, where the second to last item becomes the key for the final item. The remaining items in the path become keys in the nested dict around that final pair of items. For example, for input values of: tdict={} path = ['assertion', 'subject', 'subject_confirmation', 'method', 'urn:oasis:names:tc:SAML:2.0:cm:bearer'] Returns an output value of: {'assertion': {'subject': {'subject_confirmation': {'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'}}}} Another example, this time with a non-empty tdict input: tdict={'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'}, path=['subject_confirmation_data', 'in_response_to', '_012345'] Returns an output value of: {'subject_confirmation_data': {'in_response_to': '_012345'}, 'method': 'urn:oasis:names:tc:SAML:2.0:cm:bearer'}
def complex_mult(A, B, shifts, start): """ Generate shift-and-add multiplier that can shift and add multiple bits per clock cycle. Uses substantially more space than `simple_mult()` but is much faster. :param WireVector A, B: two input wires for the multiplication :param int shifts: number of spaces Register is to be shifted per clk cycle (cannot be greater than the length of `A` or `B`) :param bool start: start signal :returns: Register containing the product; the "done" signal """ alen = len(A) blen = len(B) areg = pyrtl.Register(alen) breg = pyrtl.Register(alen + blen) accum = pyrtl.Register(alen + blen) done = (areg == 0) # Multiplication is finished when a becomes 0 if (shifts > alen) or (shifts > blen): raise pyrtl.PyrtlError("shift is larger than one or both of the parameters A or B," "please choose smaller shift") # During multiplication, shift a right every cycle 'shift' times, # shift b left every cycle 'shift' times with pyrtl.conditional_assignment: with start: # initialization areg.next |= A breg.next |= B accum.next |= 0 with ~done: # don't run when there's no work to do # "Multiply" shifted breg by LSB of areg by cond. adding areg.next |= libutils._shifted_reg_next(areg, 'r', shifts) # right shift breg.next |= libutils._shifted_reg_next(breg, 'l', shifts) # left shift accum.next |= accum + _one_cycle_mult(areg, breg, shifts) return accum, done
Generate shift-and-add multiplier that can shift and add multiple bits per clock cycle. Uses substantially more space than `simple_mult()` but is much faster. :param WireVector A, B: two input wires for the multiplication :param int shifts: number of spaces Register is to be shifted per clk cycle (cannot be greater than the length of `A` or `B`) :param bool start: start signal :returns: Register containing the product; the "done" signal
def main(args): ''' surface_to_rubbon.main(args) can be given a list of arguments, such as sys.argv[1:]; these arguments may include any options and must include exactly one subject id and one output filename. Additionally one or two surface input filenames must be given. The surface files are projected into the ribbon and written to the output filename. For more information see the string stored in surface_to_image.info. ''' # Parse the arguments (args, opts) = _surface_to_ribbon_parser(args) # First, help? if opts['help']: print(info, file=sys.stdout) return 1 # and if we are verbose, lets setup a note function verbose = opts['verbose'] def note(s): if verbose: print(s, file=sys.stdout) return verbose # Add the subjects directory, if there is one if 'subjects_dir' in opts and opts['subjects_dir'] is not None: add_subject_path(opts['subjects_dir']) # figure out our arguments: (lhfl, rhfl) = (opts['lh_file'], opts['rh_file']) if len(args) == 0: raise ValueError('Not enough arguments provided!') elif len(args) == 1: # must be that the subject is in the env? sub = find_subject_path(os.getenv('SUBJECT')) outfl = args[0] elif len(args) == 2: sbpth = find_subject_path(args[0]) if sbpth is not None: sub = sbpth else: sub = find_subject_path(os.getenv('SUBJECT')) if lhfl is not None: rhfl = args[0] elif rhfl is not None: lhfl = args[0] else: raise ValueError('Given arg is not a subject: %s' % args[0]) outfl = args[1] elif len(args) == 3: sbpth0 = find_subject_path(args[0]) sbpth1 = find_subject_path(args[1]) if sbpth0 is not None: sub = sbpth0 if lhfl is not None: rhfl = args[1] elif rhfl is not None: lhfl = args[1] else: raise ValueError('Too many arguments given: %s' % args[1]) elif sbpth1 is not None: sub = sbpth1 if lhfl is not None: rhfl = args[0] elif rhfl is not None: lhfl = args[0] else: raise ValueError('Too many arguments given: %s' % args[0]) else: sub = find_subject_path(os.getenv('SUBJECT')) if lhfl is not None or rhfl is not None: raise ValueError('Too many arguments and no subject given') (lhfl, rhfl) = args outfl = args[2] elif len(args) == 4: if lhfl is not None or rhfl is not None: raise ValueError('Too many arguments and no subject given') subidx = next((i for (i,a) in enumerate(args) if find_subject_path(a) is not None), None) if subidx is None: raise ValueError('No subject given') sub = find_subject_path(args[subidx]) del args[subidx] (lhfl, rhfl, outfl) = args else: raise ValueError('Too many arguments provided!') if sub is None: raise ValueError('No subject specified or found in $SUBJECT') if lhfl is None and rhfl is None: raise ValueError('No surfaces provided') # check the method method = opts['method'].lower() if method not in ['linear', 'lines', 'nearest', 'auto']: raise ValueError('Unsupported method: %s' % method) # and the datatype if opts['dtype'] is None: dtyp = None elif opts['dtype'].lower() == 'float': dtyp = np.float32 elif opts['dtype'].lower() == 'int': dtyp = np.int32 else: raise ValueError('Type argument must be float or int') if method == 'auto': if dtyp is np.float32: method = 'linear' elif dtyp is np.int32: method = 'nearest' else: method = 'linear' # Now, load the data: note('Reading surfaces...') (lhdat, rhdat) = (None, None) if lhfl is not None: note(' - Reading LH file: %s' % lhfl) lhdat = read_surf_file(lhfl) if rhfl is not None: note(' - Reading RH file: %s' % rhfl) rhdat = read_surf_file(rhfl) (dat, hemi) = (rhdat, 'rh') if lhdat is None else \ (lhdat, 'lh') if rhdat is None else \ ((lhdat, rhdat), None) sub = subject(sub) # okay, make the volume... note('Generating volume...') vol = sub.cortex_to_image(dat, hemi=hemi, method=method, fill=opts['fill'], dtype=dtyp) # and write out the file note('Exporting volume file: %s' % outfl) save(outfl, vol, affine=sub.voxel_to_native_matrix) note('surface_to_image complete!') return 0
surface_to_rubbon.main(args) can be given a list of arguments, such as sys.argv[1:]; these arguments may include any options and must include exactly one subject id and one output filename. Additionally one or two surface input filenames must be given. The surface files are projected into the ribbon and written to the output filename. For more information see the string stored in surface_to_image.info.
def find_one(self, *args, **kwargs): """ Get a single document from the database. """ doc = self._collection_with_options(kwargs).find_one(*args, **kwargs) if doc is None: return None return doc
Get a single document from the database.
def main(self) -> None: """The main function for generating the config file""" path = ask_path("where should the config be stored?", ".snekrc") conf = configobj.ConfigObj() tools = self.get_tools() for tool in tools: conf[tool] = getattr(self, tool)() # pylint: disable=assignment-from-no-return conf.filename = path conf.write() print("Written config file!") if "pylint" in tools: print( "Please also run `pylint --generate-rcfile` to complete setup")
The main function for generating the config file
def _assert_sframe_equal(sf1, sf2, check_column_names=True, check_column_order=True, check_row_order=True, float_column_delta=None): """ Assert the two SFrames are equal. The default behavior of this function uses the strictest possible definition of equality, where all columns must be in the same order, with the same names and have the same data in the same order. Each of these stipulations can be relaxed individually and in concert with another, with the exception of `check_column_order` and `check_column_names`, we must use one of these to determine which columns to compare with one another. Parameters ---------- sf1 : SFrame sf2 : SFrame check_column_names : bool If true, assert if the data values in two columns are the same, but they have different names. If False, column order is used to determine which columns to compare. check_column_order : bool If true, assert if the data values in two columns are the same, but are not in the same column position (one is the i-th column and the other is the j-th column, i != j). If False, column names are used to determine which columns to compare. check_row_order : bool If true, assert if all rows in the first SFrame exist in the second SFrame, but they are not in the same order. float_column_delta : float The acceptable delta that two float values can be and still be considered "equal". When this is None, only exact equality is accepted. This is the default behavior since columns of all Nones are often of float type. Applies to all float columns. """ from .. import SFrame as _SFrame if (type(sf1) is not _SFrame) or (type(sf2) is not _SFrame): raise TypeError("Cannot function on types other than SFrames.") if not check_column_order and not check_column_names: raise ValueError("Cannot ignore both column order and column names.") sf1.__materialize__() sf2.__materialize__() if sf1.num_columns() != sf2.num_columns(): raise AssertionError("Number of columns mismatched: " + str(sf1.num_columns()) + " != " + str(sf2.num_columns())) s1_names = sf1.column_names() s2_names = sf2.column_names() sorted_s1_names = sorted(s1_names) sorted_s2_names = sorted(s2_names) if check_column_names: if (check_column_order and (s1_names != s2_names)) or (sorted_s1_names != sorted_s2_names): raise AssertionError("SFrame does not have same column names: " + str(sf1.column_names()) + " != " + str(sf2.column_names())) if sf1.num_rows() != sf2.num_rows(): raise AssertionError("Number of rows mismatched: " + str(sf1.num_rows()) + " != " + str(sf2.num_rows())) if not check_row_order and (sf1.num_rows() > 1): sf1 = sf1.sort(s1_names) sf2 = sf2.sort(s2_names) names_to_check = None if check_column_names: names_to_check = list(zip(sorted_s1_names, sorted_s2_names)) else: names_to_check = list(zip(s1_names, s2_names)) for i in names_to_check: col1 = sf1[i[0]] col2 = sf2[i[1]] if col1.dtype != col2.dtype: raise AssertionError("Columns " + str(i) + " types mismatched.") compare_ary = None if col1.dtype == float and float_column_delta is not None: dt = float_column_delta compare_ary = ((col1 > col2-dt) & (col1 < col2+dt)) else: compare_ary = (sf1[i[0]] == sf2[i[1]]) if not compare_ary.all(): count = 0 for j in compare_ary: if not j: first_row = count break count += 1 raise AssertionError("Columns " + str(i) + " are not equal! First differing element is at row " + str(first_row) + ": " + str((col1[first_row],col2[first_row])))
Assert the two SFrames are equal. The default behavior of this function uses the strictest possible definition of equality, where all columns must be in the same order, with the same names and have the same data in the same order. Each of these stipulations can be relaxed individually and in concert with another, with the exception of `check_column_order` and `check_column_names`, we must use one of these to determine which columns to compare with one another. Parameters ---------- sf1 : SFrame sf2 : SFrame check_column_names : bool If true, assert if the data values in two columns are the same, but they have different names. If False, column order is used to determine which columns to compare. check_column_order : bool If true, assert if the data values in two columns are the same, but are not in the same column position (one is the i-th column and the other is the j-th column, i != j). If False, column names are used to determine which columns to compare. check_row_order : bool If true, assert if all rows in the first SFrame exist in the second SFrame, but they are not in the same order. float_column_delta : float The acceptable delta that two float values can be and still be considered "equal". When this is None, only exact equality is accepted. This is the default behavior since columns of all Nones are often of float type. Applies to all float columns.
def _copy(self, other, copy_func): """ Copies the contents of another Asn1Value object to itself :param object: Another instance of the same class :param copy_func: An reference of copy.copy() or copy.deepcopy() to use when copying lists, dicts and objects """ if self.__class__ != other.__class__: raise TypeError(unwrap( ''' Can not copy values from %s object to %s object ''', type_name(other), type_name(self) )) self.contents = other.contents self._native = copy_func(other._native)
Copies the contents of another Asn1Value object to itself :param object: Another instance of the same class :param copy_func: An reference of copy.copy() or copy.deepcopy() to use when copying lists, dicts and objects
def execute_builtin_action(self, p_action_str, p_size=None): """ Executes built-in action specified in p_action_str. Currently supported actions are: 'up', 'down', 'home', 'end', 'first_column', 'last_column', 'prev_column', 'next_column', 'append_column', 'insert_column', 'edit_column', 'delete_column', 'copy_column', swap_right', 'swap_left', 'postpone', 'postpone_s', 'pri', 'mark', 'mark_all, 'reset' and 'repeat'. """ column_actions = ['first_column', 'last_column', 'prev_column', 'next_column', 'append_column', 'insert_column', 'edit_column', 'delete_column', 'copy_column', 'swap_left', 'swap_right', 'reset', ] if p_action_str in column_actions: urwid.emit_signal(self, 'column_action', p_action_str) elif p_action_str in ['up', 'down']: self.listbox.keypress(p_size, p_action_str) elif p_action_str == 'home': self._scroll_to_top(p_size) elif p_action_str == 'end': self._scroll_to_bottom(p_size) elif p_action_str in ['postpone', 'postpone_s']: pass elif p_action_str == 'pri': pass elif p_action_str == 'mark': self._toggle_marked_status() elif p_action_str == 'mark_all': self._mark_all() elif p_action_str == 'repeat': self._repeat_cmd()
Executes built-in action specified in p_action_str. Currently supported actions are: 'up', 'down', 'home', 'end', 'first_column', 'last_column', 'prev_column', 'next_column', 'append_column', 'insert_column', 'edit_column', 'delete_column', 'copy_column', swap_right', 'swap_left', 'postpone', 'postpone_s', 'pri', 'mark', 'mark_all, 'reset' and 'repeat'.
def add_webhook(self, name, url, key, **attrs): """ Add a new Webhook and return a :class:`Webhook` object. :param name: name of the :class:`Webhook` :param url: payload url of the :class:`Webhook` :param key: secret key of the :class:`Webhook` :param attrs: optional attributes for :class:`Webhook` """ return Webhooks(self.requester).create( self.id, name, url, key, **attrs )
Add a new Webhook and return a :class:`Webhook` object. :param name: name of the :class:`Webhook` :param url: payload url of the :class:`Webhook` :param key: secret key of the :class:`Webhook` :param attrs: optional attributes for :class:`Webhook`
def extract_hash(hash_fn, hash_type='sha256', file_name='', source='', source_hash_name=None): ''' .. versionchanged:: 2016.3.5 Prior to this version, only the ``file_name`` argument was considered for filename matches in the hash file. This would be problematic for cases in which the user was relying on a remote checksum file that they do not control, and they wished to use a different name for that file on the minion from the filename on the remote server (and in the checksum file). For example, managing ``/tmp/myfile.tar.gz`` when the remote file was at ``https://mydomain.tld/different_name.tar.gz``. The :py:func:`file.managed <salt.states.file.managed>` state now also passes this function the source URI as well as the ``source_hash_name`` (if specified). In cases where ``source_hash_name`` is specified, it takes precedence over both the ``file_name`` and ``source``. When it is not specified, ``file_name`` takes precedence over ``source``. This allows for better capability for matching hashes. .. versionchanged:: 2016.11.0 File name and source URI matches are no longer disregarded when ``source_hash_name`` is specified. They will be used as fallback matches if there is no match to the ``source_hash_name`` value. This routine is called from the :mod:`file.managed <salt.states.file.managed>` state to pull a hash from a remote file. Regular expressions are used line by line on the ``source_hash`` file, to find a potential candidate of the indicated hash type. This avoids many problems of arbitrary file layout rules. It specifically permits pulling hash codes from debian ``*.dsc`` files. If no exact match of a hash and filename are found, then the first hash found (if any) will be returned. If no hashes at all are found, then ``None`` will be returned. For example: .. code-block:: yaml openerp_7.0-latest-1.tar.gz: file.managed: - name: /tmp/openerp_7.0-20121227-075624-1_all.deb - source: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.tar.gz - source_hash: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.dsc CLI Example: .. code-block:: bash salt '*' file.extract_hash /path/to/hash/file sha512 /etc/foo ''' hash_len = HASHES.get(hash_type) if hash_len is None: if hash_type: log.warning( 'file.extract_hash: Unsupported hash_type \'%s\', falling ' 'back to matching any supported hash_type', hash_type ) hash_type = '' hash_len_expr = '{0},{1}'.format(min(HASHES_REVMAP), max(HASHES_REVMAP)) else: hash_len_expr = six.text_type(hash_len) filename_separators = string.whitespace + r'\/' if source_hash_name: if not isinstance(source_hash_name, six.string_types): source_hash_name = six.text_type(source_hash_name) source_hash_name_idx = (len(source_hash_name) + 1) * -1 log.debug( 'file.extract_hash: Extracting %s hash for file matching ' 'source_hash_name \'%s\'', 'any supported' if not hash_type else hash_type, source_hash_name ) if file_name: if not isinstance(file_name, six.string_types): file_name = six.text_type(file_name) file_name_basename = os.path.basename(file_name) file_name_idx = (len(file_name_basename) + 1) * -1 if source: if not isinstance(source, six.string_types): source = six.text_type(source) urlparsed_source = _urlparse(source) source_basename = os.path.basename( urlparsed_source.path or urlparsed_source.netloc ) source_idx = (len(source_basename) + 1) * -1 basename_searches = [x for x in (file_name, source) if x] if basename_searches: log.debug( 'file.extract_hash: %s %s hash for file matching%s: %s', 'If no source_hash_name match found, will extract' if source_hash_name else 'Extracting', 'any supported' if not hash_type else hash_type, '' if len(basename_searches) == 1 else ' either of the following', ', '.join(basename_searches) ) partial = None found = {} with salt.utils.files.fopen(hash_fn, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line.strip()) hash_re = r'(?i)(?<![a-z0-9])([a-f0-9]{' + hash_len_expr + '})(?![a-z0-9])' hash_match = re.search(hash_re, line) matched = None if hash_match: matched_hsum = hash_match.group(1) if matched_hsum is not None: matched_type = HASHES_REVMAP.get(len(matched_hsum)) if matched_type is None: # There was a match, but it's not of the correct length # to match one of the supported hash types. matched = None else: matched = {'hsum': matched_hsum, 'hash_type': matched_type} if matched is None: log.debug( 'file.extract_hash: In line \'%s\', no %shash found', line, '' if not hash_type else hash_type + ' ' ) continue if partial is None: partial = matched def _add_to_matches(found, line, match_type, value, matched): log.debug( 'file.extract_hash: Line \'%s\' matches %s \'%s\'', line, match_type, value ) found.setdefault(match_type, []).append(matched) hash_matched = False if source_hash_name: if line.endswith(source_hash_name): # Checking the character before where the basename # should start for either whitespace or a path # separator. We can't just rsplit on spaces/whitespace, # because the filename may contain spaces. try: if line[source_hash_name_idx] in string.whitespace: _add_to_matches(found, line, 'source_hash_name', source_hash_name, matched) hash_matched = True except IndexError: pass elif re.match(re.escape(source_hash_name) + r'\s+', line): _add_to_matches(found, line, 'source_hash_name', source_hash_name, matched) hash_matched = True if file_name: if line.endswith(file_name_basename): # Checking the character before where the basename # should start for either whitespace or a path # separator. We can't just rsplit on spaces/whitespace, # because the filename may contain spaces. try: if line[file_name_idx] in filename_separators: _add_to_matches(found, line, 'file_name', file_name, matched) hash_matched = True except IndexError: pass elif re.match(re.escape(file_name) + r'\s+', line): _add_to_matches(found, line, 'file_name', file_name, matched) hash_matched = True if source: if line.endswith(source_basename): # Same as above, we can't just do an rsplit here. try: if line[source_idx] in filename_separators: _add_to_matches(found, line, 'source', source, matched) hash_matched = True except IndexError: pass elif re.match(re.escape(source) + r'\s+', line): _add_to_matches(found, line, 'source', source, matched) hash_matched = True if not hash_matched: log.debug( 'file.extract_hash: Line \'%s\' contains %s hash ' '\'%s\', but line did not meet the search criteria', line, matched['hash_type'], matched['hsum'] ) for found_type, found_str in (('source_hash_name', source_hash_name), ('file_name', file_name), ('source', source)): if found_type in found: if len(found[found_type]) > 1: log.debug( 'file.extract_hash: Multiple %s matches for %s: %s', found_type, found_str, ', '.join( ['{0} ({1})'.format(x['hsum'], x['hash_type']) for x in found[found_type]] ) ) ret = found[found_type][0] log.debug( 'file.extract_hash: Returning %s hash \'%s\' as a match of %s', ret['hash_type'], ret['hsum'], found_str ) return ret if partial: log.debug( 'file.extract_hash: Returning the partially identified %s hash ' '\'%s\'', partial['hash_type'], partial['hsum'] ) return partial log.debug('file.extract_hash: No matches, returning None') return None
.. versionchanged:: 2016.3.5 Prior to this version, only the ``file_name`` argument was considered for filename matches in the hash file. This would be problematic for cases in which the user was relying on a remote checksum file that they do not control, and they wished to use a different name for that file on the minion from the filename on the remote server (and in the checksum file). For example, managing ``/tmp/myfile.tar.gz`` when the remote file was at ``https://mydomain.tld/different_name.tar.gz``. The :py:func:`file.managed <salt.states.file.managed>` state now also passes this function the source URI as well as the ``source_hash_name`` (if specified). In cases where ``source_hash_name`` is specified, it takes precedence over both the ``file_name`` and ``source``. When it is not specified, ``file_name`` takes precedence over ``source``. This allows for better capability for matching hashes. .. versionchanged:: 2016.11.0 File name and source URI matches are no longer disregarded when ``source_hash_name`` is specified. They will be used as fallback matches if there is no match to the ``source_hash_name`` value. This routine is called from the :mod:`file.managed <salt.states.file.managed>` state to pull a hash from a remote file. Regular expressions are used line by line on the ``source_hash`` file, to find a potential candidate of the indicated hash type. This avoids many problems of arbitrary file layout rules. It specifically permits pulling hash codes from debian ``*.dsc`` files. If no exact match of a hash and filename are found, then the first hash found (if any) will be returned. If no hashes at all are found, then ``None`` will be returned. For example: .. code-block:: yaml openerp_7.0-latest-1.tar.gz: file.managed: - name: /tmp/openerp_7.0-20121227-075624-1_all.deb - source: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.tar.gz - source_hash: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.dsc CLI Example: .. code-block:: bash salt '*' file.extract_hash /path/to/hash/file sha512 /etc/foo
def convert(self, expr): """ EXPAND INSTANCES OF name TO value """ if expr is True or expr == None or expr is False: return expr elif is_number(expr): return expr elif expr == ".": return "." elif is_variable_name(expr): return coalesce(self.dimensions[expr], expr) elif is_text(expr): Log.error("{{name|quote}} is not a valid variable name", name=expr) elif isinstance(expr, Date): return expr elif is_op(expr, QueryOp): return self._convert_query(expr) elif is_data(expr): if expr["from"]: return self._convert_query(expr) elif len(expr) >= 2: #ASSUME WE HAVE A NAMED STRUCTURE, NOT AN EXPRESSION return wrap({name: self.convert(value) for name, value in expr.leaves()}) else: # ASSUME SINGLE-CLAUSE EXPRESSION k, v = expr.items()[0] return converter_map.get(k, self._convert_bop)(self, k, v) elif is_many(expr): return wrap([self.convert(value) for value in expr]) else: return expr
EXPAND INSTANCES OF name TO value