positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def normalize_topic(topic): """ Get a canonical representation of a Wikipedia topic, which may include a disambiguation string in parentheses. Returns (name, disambig), where "name" is the normalized topic name, and "disambig" is a string corresponding to the disambiguation text or None. """ # find titles of the form Foo (bar) topic = topic.replace('_', ' ') match = re.match(r'([^(]+) \(([^)]+)\)', topic) if not match: return normalize(topic), None else: return normalize(match.group(1)), 'n/' + match.group(2).strip(' _')
Get a canonical representation of a Wikipedia topic, which may include a disambiguation string in parentheses. Returns (name, disambig), where "name" is the normalized topic name, and "disambig" is a string corresponding to the disambiguation text or None.
def _errcheck(result, func, arguments): """ Error checker for functions returning an integer indicating success (0) / failure (1). Raises a XdoException in case of error, otherwise just returns ``None`` (returning the original code, 0, would be useless anyways..) """ if result != 0: raise XdoException( 'Function {0} returned error code {1}' .format(func.__name__, result)) return None
Error checker for functions returning an integer indicating success (0) / failure (1). Raises a XdoException in case of error, otherwise just returns ``None`` (returning the original code, 0, would be useless anyways..)
def update(self, webhook_url=values.unset, friendly_name=values.unset, reachability_webhooks_enabled=values.unset, acl_enabled=values.unset): """ Update the ServiceInstance :param unicode webhook_url: A URL that will receive event updates when objects are manipulated. :param unicode friendly_name: Human-readable name for this service instance :param bool reachability_webhooks_enabled: True or false - controls whether this instance fires webhooks when client endpoints connect to Sync :param bool acl_enabled: true or false - determines whether token identities must be granted access to Sync objects via the Permissions API in this Service. :returns: Updated ServiceInstance :rtype: twilio.rest.sync.v1.service.ServiceInstance """ return self._proxy.update( webhook_url=webhook_url, friendly_name=friendly_name, reachability_webhooks_enabled=reachability_webhooks_enabled, acl_enabled=acl_enabled, )
Update the ServiceInstance :param unicode webhook_url: A URL that will receive event updates when objects are manipulated. :param unicode friendly_name: Human-readable name for this service instance :param bool reachability_webhooks_enabled: True or false - controls whether this instance fires webhooks when client endpoints connect to Sync :param bool acl_enabled: true or false - determines whether token identities must be granted access to Sync objects via the Permissions API in this Service. :returns: Updated ServiceInstance :rtype: twilio.rest.sync.v1.service.ServiceInstance
def init_jcrop(min_size=None): """Initialize jcrop. :param min_size: The minimal size of crop area. """ init_x = current_app.config['AVATARS_CROP_INIT_POS'][0] init_y = current_app.config['AVATARS_CROP_INIT_POS'][1] init_size = current_app.config['AVATARS_CROP_INIT_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2] if current_app.config['AVATARS_CROP_MIN_SIZE']: min_size = min_size or current_app.config['AVATARS_SIZE_TUPLE'][2] min_size_js = 'jcrop_api.setOptions({minSize: [%d, %d]});' % (min_size, min_size) else: min_size_js = '' return Markup(''' <script type="text/javascript"> jQuery(function ($) { // Create variables (in this scope) to hold the API and image size var jcrop_api, boundx, boundy, // Grab some information about the preview pane $preview = $('#preview-box'), $pcnt = $('#preview-box .preview-box'), $pimg = $('#preview-box .preview-box img'), xsize = $pcnt.width(), ysize = $pcnt.height(); $('#crop-box').Jcrop({ onChange: updatePreview, onSelect: updateCoords, setSelect: [%s, %s, %s, %s], aspectRatio: 1 }, function () { // Use the API to get the real image size var bounds = this.getBounds(); boundx = bounds[0]; boundy = bounds[1]; // Store the API in the jcrop_api variable jcrop_api = this; %s jcrop_api.focus(); // Move the preview into the jcrop container for css positioning $preview.appendTo(jcrop_api.ui.holder); }); function updatePreview(c) { if (parseInt(c.w) > 0) { var rx = xsize / c.w; var ry = ysize / c.h; $pimg.css({ width: Math.round(rx * boundx) + 'px', height: Math.round(ry * boundy) + 'px', marginLeft: '-' + Math.round(rx * c.x) + 'px', marginTop: '-' + Math.round(ry * c.y) + 'px' }); } } }); function updateCoords(c) { $('#x').val(c.x); $('#y').val(c.y); $('#w').val(c.w); $('#h').val(c.h); } </script> ''' % (init_x, init_y, init_size, init_size, min_size_js))
Initialize jcrop. :param min_size: The minimal size of crop area.
def scalarDecorator(func): """Decorator to return scalar outputs as a set""" @wraps(func) def scalar_wrapper(*args,**kwargs): if nu.array(args[0]).shape == (): scalarOut= True newargs= () for ii in range(len(args)): newargs= newargs+(nu.array([args[ii]]),) args= newargs else: scalarOut= False result= func(*args,**kwargs) if scalarOut: out= () for ii in range(result.shape[1]): out= out+(result[0,ii],) return out else: return result return scalar_wrapper
Decorator to return scalar outputs as a set
def step_impl10(context): """Create application list. :param context: test context. """ assert context.app_list and len( context.app_list) > 0, "ENSURE: app list is provided." assert context.file_list and len( context.file_list) > 0, "ENSURE: file list is provided." context.fuzz_executor = FuzzExecutor(context.app_list, context.file_list) assert context.fuzz_executor, "VERIFY: fuzz executor created."
Create application list. :param context: test context.
def removeFixedEffect(self, index=None): """ set sample and trait designs F: NxK sample design A: LxP sample design REML: REML for this term? index: index of which fixed effect to replace. If None, remove last term. """ if self._n_terms==0: pass if index is None or index==(self._n_terms-1): self._n_terms-=1 F = self._F.pop() #= self.F[:-1] A = self._A.pop() #= self.A[:-1] self._A_identity.pop() #= self.A_identity[:-1] REML_term = self._REML_term.pop()# = self.REML_term[:-1] self._B.pop()# = self.B[:-1] self._n_fixed_effs-=F.shape[1]*A.shape[0] if REML_term: self._n_fixed_effs_REML-=F.shape[1]*A.shape[0] pass elif index >= self.n_terms: raise Exception("index exceeds max index of terms") else: raise NotImplementedError("currently only last term can be removed") pass self._rebuild_indicator() self.clear_cache('Fstar','Astar','Xstar','Xhat', 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat', 'LRLdiag_Xhat_tens','Areml_grad', 'beta_grad','Xstar_beta_grad','Zstar','DLZ')
set sample and trait designs F: NxK sample design A: LxP sample design REML: REML for this term? index: index of which fixed effect to replace. If None, remove last term.
def with_config_dir(self, dir_path): """ Configure current resolver to use every valid YAML configuration files available in the given directory path. To be taken into account, a configuration file must conform to the following naming convention: * 'lexicon.yml' for a global Lexicon config file (see with_config_file doc) * 'lexicon_[provider].yml' for a DNS provider specific configuration file, with [provider] equals to the DNS provider name (see with_provider_config_file doc) Example: $ ls /etc/lexicon lexicon.yml # global Lexicon configuration file lexicon_cloudflare.yml # specific configuration file for clouflare DNS provder """ lexicon_provider_config_files = [] lexicon_config_files = [] for path in os.listdir(dir_path): path = os.path.join(dir_path, path) if os.path.isfile(path): basename = os.path.basename(path) search = re.search(r'^lexicon(?:_(\w+)|)\.yml$', basename) if search: provider = search.group(1) if provider: lexicon_provider_config_files.append((provider, path)) else: lexicon_config_files.append(path) for lexicon_provider_config_file in lexicon_provider_config_files: self.with_provider_config_file(lexicon_provider_config_file[0], lexicon_provider_config_file[1]) for lexicon_config_file in lexicon_config_files: self.with_config_file(lexicon_config_file) return self
Configure current resolver to use every valid YAML configuration files available in the given directory path. To be taken into account, a configuration file must conform to the following naming convention: * 'lexicon.yml' for a global Lexicon config file (see with_config_file doc) * 'lexicon_[provider].yml' for a DNS provider specific configuration file, with [provider] equals to the DNS provider name (see with_provider_config_file doc) Example: $ ls /etc/lexicon lexicon.yml # global Lexicon configuration file lexicon_cloudflare.yml # specific configuration file for clouflare DNS provder
def _reset(cls): """If we have forked since the watch dictionaries were initialized, all that has is garbage, so clear it.""" if os.getpid() != cls._cls_pid: cls._cls_pid = os.getpid() cls._cls_instances_by_target.clear() cls._cls_thread_by_target.clear()
If we have forked since the watch dictionaries were initialized, all that has is garbage, so clear it.
def select(i): """ Input: { dict - dict with values being dicts with 'name' as string to display and 'sort' as int (for ordering) (title) - print title (error_if_empty) - if 'yes' and Enter, make error (skip_sort) - if 'yes', do not sort array } Output: { return - return code = 0 string - selected dictionary key } """ s='' title=i.get('title','') if title!='': out(title) out('') d=i['dict'] if i.get('skip_sort','')!='yes': kd=sorted(d, key=lambda v: d[v].get('sort',0)) else: kd=d j=0 ks={} for k in kd: q=d[k] sj=str(j) ks[sj]=k qn=q.get('name','') out(sj+') '+qn) j+=1 out('') rx=inp({'text':'Make your selection (or press Enter for 0): '}) if rx['return']>0: return rx sx=rx['string'].strip() if sx=='': if i.get('error_if_empty','')=='yes': return {'return':1, 'error':'selection is empty'} s=kd[0] else: if sx not in ks: return {'return':1, 'error':'selection is not recognized'} s=ks[sx] return {'return':0, 'string':s}
Input: { dict - dict with values being dicts with 'name' as string to display and 'sort' as int (for ordering) (title) - print title (error_if_empty) - if 'yes' and Enter, make error (skip_sort) - if 'yes', do not sort array } Output: { return - return code = 0 string - selected dictionary key }
def present(name, running=None, source=None, profiles=None, config=None, devices=None, architecture='x86_64', ephemeral=False, restart_on_change=False, remote_addr=None, cert=None, key=None, verify_cert=True): ''' Create the named container if it does not exist name The name of the container to be created running : None * If ``True``, ensure that the container is running * If ``False``, ensure that the container is stopped * If ``None``, do nothing with regards to the running state of the container source : None Can be either a string containing an image alias: "xenial/amd64" or an dict with type "image" with alias: {"type": "image", "alias": "xenial/amd64"} or image with "fingerprint": {"type": "image", "fingerprint": "SHA-256"} or image with "properties": {"type": "image", "properties": { "os": "ubuntu", "release": "14.04", "architecture": "x86_64" }} or none: {"type": "none"} or copy: {"type": "copy", "source": "my-old-container"} profiles : ['default'] List of profiles to apply on this container config : A config dict or None (None = unset). Can also be a list: [{'key': 'boot.autostart', 'value': 1}, {'key': 'security.privileged', 'value': '1'}] devices : A device dict or None (None = unset). architecture : 'x86_64' Can be one of the following: * unknown * i686 * x86_64 * armv7l * aarch64 * ppc * ppc64 * ppc64le * s390x ephemeral : False Destroy this container after stop? restart_on_change : False Restart the container when we detect changes on the config or its devices? remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Zertifikate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. ''' if profiles is None: profiles = ['default'] if source is None: source = {} ret = { 'name': name, 'running': running, 'profiles': profiles, 'source': source, 'config': config, 'devices': devices, 'architecture': architecture, 'ephemeral': ephemeral, 'restart_on_change': restart_on_change, 'remote_addr': remote_addr, 'cert': cert, 'key': key, 'verify_cert': verify_cert, 'changes': {} } container = None try: container = __salt__['lxd.container_get']( name, remote_addr, cert, key, verify_cert, _raw=True ) except CommandExecutionError as e: return _error(ret, six.text_type(e)) except SaltInvocationError as e: # Profile not found pass if container is None: if __opts__['test']: # Test is on, just return that we would create the container msg = 'Would create the container "{0}"'.format(name) ret['changes'] = { 'created': msg } if running is True: msg = msg + ' and start it.' ret['changes']['started'] = ( 'Would start the container "{0}"'.format(name) ) ret['changes'] = {'created': msg} return _unchanged(ret, msg) # create the container try: __salt__['lxd.container_create']( name, source, profiles, config, devices, architecture, ephemeral, True, # Wait remote_addr, cert, key, verify_cert ) except CommandExecutionError as e: return _error(ret, six.text_type(e)) msg = 'Created the container "{0}"'.format(name) ret['changes'] = { 'created': msg } if running is True: try: __salt__['lxd.container_start']( name, remote_addr, cert, key, verify_cert ) except CommandExecutionError as e: return _error(ret, six.text_type(e)) msg = msg + ' and started it.' ret['changes'] = { 'started': 'Started the container "{0}"'.format(name) } return _success(ret, msg) # Container exists, lets check for differences new_profiles = set(map(six.text_type, profiles)) old_profiles = set(map(six.text_type, container.profiles)) container_changed = False profile_changes = [] # Removed profiles for k in old_profiles.difference(new_profiles): if not __opts__['test']: profile_changes.append('Removed profile "{0}"'.format(k)) old_profiles.discard(k) else: profile_changes.append('Would remove profile "{0}"'.format(k)) # Added profiles for k in new_profiles.difference(old_profiles): if not __opts__['test']: profile_changes.append('Added profile "{0}"'.format(k)) old_profiles.add(k) else: profile_changes.append('Would add profile "{0}"'.format(k)) if profile_changes: container_changed = True ret['changes']['profiles'] = profile_changes container.profiles = list(old_profiles) # Config and devices changes config, devices = __salt__['lxd.normalize_input_values']( config, devices ) changes = __salt__['lxd.sync_config_devices']( container, config, devices, __opts__['test'] ) if changes: container_changed = True ret['changes'].update(changes) is_running = \ container.status_code == CONTAINER_STATUS_RUNNING if not __opts__['test']: try: __salt__['lxd.pylxd_save_object'](container) except CommandExecutionError as e: return _error(ret, six.text_type(e)) if running != is_running: if running is True: if __opts__['test']: changes['running'] = 'Would start the container' return _unchanged( ret, ('Container "{0}" would get changed ' 'and started.').format(name) ) else: container.start(wait=True) changes['running'] = 'Started the container' elif running is False: if __opts__['test']: changes['stopped'] = 'Would stopped the container' return _unchanged( ret, ('Container "{0}" would get changed ' 'and stopped.').format(name) ) else: container.stop(wait=True) changes['stopped'] = 'Stopped the container' if ((running is True or running is None) and is_running and restart_on_change and container_changed): if __opts__['test']: changes['restarted'] = 'Would restart the container' return _unchanged( ret, 'Would restart the container "{0}"'.format(name) ) else: container.restart(wait=True) changes['restarted'] = ( 'Container "{0}" has been restarted'.format(name) ) return _success( ret, 'Container "{0}" has been restarted'.format(name) ) if not container_changed: return _success(ret, 'No changes') if __opts__['test']: return _unchanged( ret, 'Container "{0}" would get changed.'.format(name) ) return _success(ret, '{0} changes'.format(len(ret['changes'].keys())))
Create the named container if it does not exist name The name of the container to be created running : None * If ``True``, ensure that the container is running * If ``False``, ensure that the container is stopped * If ``None``, do nothing with regards to the running state of the container source : None Can be either a string containing an image alias: "xenial/amd64" or an dict with type "image" with alias: {"type": "image", "alias": "xenial/amd64"} or image with "fingerprint": {"type": "image", "fingerprint": "SHA-256"} or image with "properties": {"type": "image", "properties": { "os": "ubuntu", "release": "14.04", "architecture": "x86_64" }} or none: {"type": "none"} or copy: {"type": "copy", "source": "my-old-container"} profiles : ['default'] List of profiles to apply on this container config : A config dict or None (None = unset). Can also be a list: [{'key': 'boot.autostart', 'value': 1}, {'key': 'security.privileged', 'value': '1'}] devices : A device dict or None (None = unset). architecture : 'x86_64' Can be one of the following: * unknown * i686 * x86_64 * armv7l * aarch64 * ppc * ppc64 * ppc64le * s390x ephemeral : False Destroy this container after stop? restart_on_change : False Restart the container when we detect changes on the config or its devices? remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Zertifikate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates.
def start(self): """ This method must be called immediately after the class is instantiated. It instantiates the serial interface and then performs auto pin discovery. It is intended for use by pymata3 applications that do not use asyncio coroutines directly. :returns: No return value. """ # check if user specified a socket transport if self.ip_address: self.socket = PymataSocket(self.ip_address, self.ip_port, self.loop) self.loop.run_until_complete((self.socket.start())) # set the read and write handles self.read = self.socket.read self.write = self.socket.write for i in range(0, len(self.ip_handshake)): self.loop.run_until_complete((self.read())) else: try: self.serial_port = PymataSerial(self.com_port, 57600, self.sleep_tune, self.log_output) # set the read and write handles self.read = self.serial_port.read self.write = self.serial_port.write except serial.SerialException: if self.log_output: log_string = 'Cannot instantiate serial interface: ' \ + self.com_port logging.exception(log_string) else: print( 'Cannot instantiate serial interface: ' + self.com_port) print('To see a list of serial ports, type: "list_serial_ports" in your console.') sys.exit(0) # wait for arduino to go through a reset cycle if need be time.sleep(self.arduino_wait) # register the get_command method with the event loop # self.loop = asyncio.get_event_loop() self.the_task = self.loop.create_task(self._command_dispatcher()) # get arduino firmware version and print it try: firmware_version = self.loop.run_until_complete(self.get_firmware_version()) if self.log_output: log_string = "\nArduino Firmware ID: " + firmware_version logging.exception(log_string) else: print("\nArduino Firmware ID: " + firmware_version) except TypeError: print('\nIs your serial cable plugged in and do you have the correct Firmata sketch loaded?') print('Is the COM port correct?') print('To see a list of serial ports, type: "list_serial_ports" in your console.') sys.exit(0) # try to get an analog pin map. if it comes back as none - shutdown report = self.loop.run_until_complete(self.get_analog_map()) if not report: if self.log_output: log_string = '*** Analog map retrieval timed out. ***' logging.exception(log_string) log_string = '\nDo you have Arduino connectivity and do you ' \ 'have a Firmata sketch uploaded to the board?' logging.exception(log_string) else: print('*** Analog map retrieval timed out. ***') print('\nDo you have Arduino connectivity and do you have a ' 'Firmata sketch uploaded to the board?') try: loop = self.loop for t in asyncio.Task.all_tasks(loop): t.cancel() loop.run_until_complete(asyncio.sleep(.1)) loop.close() loop.stop() sys.exit(0) except RuntimeError: # this suppresses the Event Loop Is Running message, which may # be a bug in python 3 sys.exit(0) except TypeError: sys.exit(0) # custom assemble the pin lists for pin in report: digital_data = PinData() self.digital_pins.append(digital_data) if pin != Constants.IGNORE: analog_data = PinData() self.analog_pins.append(analog_data) if self.log_output: log_string = 'Auto-discovery complete. Found ' + \ str(len(self.digital_pins)) + ' Digital Pins and ' + \ str(len(self.analog_pins)) + ' Analog Pins' logging.info(log_string) else: print('{} {} {} {} {}'.format('Auto-discovery complete. Found', len(self.digital_pins), 'Digital Pins and', len(self.analog_pins), 'Analog Pins\n\n')) self.first_analog_pin = len(self.digital_pins) - len(self.analog_pins)
This method must be called immediately after the class is instantiated. It instantiates the serial interface and then performs auto pin discovery. It is intended for use by pymata3 applications that do not use asyncio coroutines directly. :returns: No return value.
def returnTradeHistory(self, currencyPair, start=None, end=None): """Returns the past 200 trades for a given market, or up to 50,000 trades between a range specified in UNIX timestamps by the "start" and "end" GET parameters.""" return self._public('returnTradeHistory', currencyPair=currencyPair, start=start, end=end)
Returns the past 200 trades for a given market, or up to 50,000 trades between a range specified in UNIX timestamps by the "start" and "end" GET parameters.
def _get_app_path(url): ''' Extract the app path from a Bokeh server URL Args: url (str) : Returns: str ''' app_path = urlparse(url).path.rstrip("/") if not app_path.startswith("/"): app_path = "/" + app_path return app_path
Extract the app path from a Bokeh server URL Args: url (str) : Returns: str
def fileRefDiscovery(self): ''' Finds the missing components for file nodes by parsing the Doxygen xml (which is just the ``doxygen_output_dir/node.refid``). Additional items parsed include adding items whose ``refid`` tag are used in this file, the <programlisting> for the file, what it includes and what includes it, as well as the location of the file (with respsect to the *Doxygen* root). Care must be taken to only include a refid found with specific tags. The parsing of the xml file was done by just looking at some example outputs. It seems to be working correctly, but there may be some subtle use cases that break it. .. warning:: Some enums, classes, variables, etc declared in the file will not have their associated refid in the declaration of the file, but will be present in the <programlisting>. These are added to the files' list of children when they are found, but this parental relationship cannot be formed if you set ``XML_PROGRAMLISTING = NO`` with Doxygen. An example of such an enum would be an enum declared inside of a namespace within this file. ''' if not os.path.isdir(configs._doxygen_xml_output_directory): utils.fancyError("The doxygen xml output directory [{0}] is not valid!".format( configs._doxygen_xml_output_directory )) # parse the doxygen xml file and extract all refid's put in it # keys: file object, values: list of refid's doxygen_xml_file_ownerships = {} # innerclass, innernamespace, etc ref_regex = re.compile(r'.*<inner.*refid="(\w+)".*') # what files this file includes inc_regex = re.compile(r'.*<includes.*>(.+)</includes>') # what files include this file inc_by_regex = re.compile(r'.*<includedby refid="(\w+)".*>(.*)</includedby>') # the actual location of the file loc_regex = re.compile(r'.*<location file="(.*)"/>') for f in self.files: doxygen_xml_file_ownerships[f] = [] try: doxy_xml_path = os.path.join(configs._doxygen_xml_output_directory, "{0}.xml".format(f.refid)) with codecs.open(doxy_xml_path, "r", "utf-8") as doxy_file: processing_code_listing = False # shows up at bottom of xml for line in doxy_file: # see if this line represents the location tag match = loc_regex.match(line) if match is not None: f.location = os.path.normpath(match.groups()[0]) continue if not processing_code_listing: # gather included by references match = inc_by_regex.match(line) if match is not None: ref, name = match.groups() f.included_by.append((ref, name)) continue # gather includes lines match = inc_regex.match(line) if match is not None: inc = match.groups()[0] f.includes.append(inc) continue # gather any classes, namespaces, etc declared in the file match = ref_regex.match(line) if match is not None: match_refid = match.groups()[0] if match_refid in self.node_by_refid: doxygen_xml_file_ownerships[f].append(match_refid) continue # lastly, see if we are starting the code listing if "<programlisting>" in line: processing_code_listing = True elif processing_code_listing: if "</programlisting>" in line: processing_code_listing = False else: f.program_listing.append(line) except: utils.fancyError( "Unable to process doxygen xml for file [{0}].\n".format(f.name) ) # # IMPORTANT: do not set the parent field of anything being added as a child to the file # # hack to make things work right on RTD # TODO: do this at construction rather than as a post process! if configs.doxygenStripFromPath is not None: for node in itertools.chain(self.files, self.dirs): if node.kind == "file": manip = node.location else: # node.kind == "dir" manip = node.name abs_strip_path = os.path.normpath(os.path.abspath( configs.doxygenStripFromPath )) if manip.startswith(abs_strip_path): manip = os.path.relpath(manip, abs_strip_path) manip = os.path.normpath(manip) if node.kind == "file": node.location = manip else: # node.kind == "dir" node.name = manip # now that we have parsed all the listed refid's in the doxygen xml, reparent # the nodes that we care about allowable_child_kinds = ["struct", "class", "function", "typedef", "define", "enum", "union"] for f in self.files: for match_refid in doxygen_xml_file_ownerships[f]: child = self.node_by_refid[match_refid] if child.kind in allowable_child_kinds: if child not in f.children: f.children.append(child) elif child.kind == "namespace": if child not in f.namespaces_used: f.namespaces_used.append(child) # last but not least, some different kinds declared in the file that are scoped # in a namespace they will show up in the programlisting, but not at the toplevel. for f in self.files: potential_orphans = [] for n in f.namespaces_used: for child in n.children: if child.kind == "enum" or child.kind == "variable" or \ child.kind == "function" or child.kind == "typedef" or \ child.kind == "union": potential_orphans.append(child) # now that we have a list of potential orphans, see if this doxygen xml had # the refid of a given child present. for orphan in potential_orphans: unresolved_name = orphan.name.split("::")[-1] if f.refid in orphan.refid and any(unresolved_name in line for line in f.program_listing): if orphan not in f.children: f.children.append(orphan) # Last but not least, make sure all children know where they were defined. for f in self.files: for child in f.children: if child.def_in_file is None: child.def_in_file = f elif child.def_in_file != f: # << verboseBuild utils.verbose_log( "Conflicting file definition for [{0}]: both [{1}] and [{2}] found.".format( child.name, child.def_in_file.name, f.name ), utils.AnsiColors.BOLD_RED )
Finds the missing components for file nodes by parsing the Doxygen xml (which is just the ``doxygen_output_dir/node.refid``). Additional items parsed include adding items whose ``refid`` tag are used in this file, the <programlisting> for the file, what it includes and what includes it, as well as the location of the file (with respsect to the *Doxygen* root). Care must be taken to only include a refid found with specific tags. The parsing of the xml file was done by just looking at some example outputs. It seems to be working correctly, but there may be some subtle use cases that break it. .. warning:: Some enums, classes, variables, etc declared in the file will not have their associated refid in the declaration of the file, but will be present in the <programlisting>. These are added to the files' list of children when they are found, but this parental relationship cannot be formed if you set ``XML_PROGRAMLISTING = NO`` with Doxygen. An example of such an enum would be an enum declared inside of a namespace within this file.
def _json_safe_dump(self, data): """ Make a json dump of `data`, that can be used directly in a `<script>` tag. Available as json() inside templates """ return json.dumps(data).replace(u'<', u'\\u003c') \ .replace(u'>', u'\\u003e') \ .replace(u'&', u'\\u0026') \ .replace(u"'", u'\\u0027')
Make a json dump of `data`, that can be used directly in a `<script>` tag. Available as json() inside templates
def delete(self, *names): """ Remove the key from redis :param names: tuple of strings - The keys to remove from redis. :return: Future() """ names = [self.redis_key(n) for n in names] with self.pipe as pipe: return pipe.delete(*names)
Remove the key from redis :param names: tuple of strings - The keys to remove from redis. :return: Future()
def _finalize_batch(self): """ Method to finalize the batch, this will iterate over the _batches dict and create a PmtInf node for each batch. The correct information (from the batch_key and batch_totals) will be inserted and the batch transaction nodes will be folded. Finally, the batches will be added to the main XML. """ for batch_meta, batch_nodes in self._batches.items(): PmtInf_nodes = self._create_PmtInf_node() PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name']) PmtInf_nodes['PmtMtdNode'].text = "TRF" PmtInf_nodes['BtchBookgNode'].text = "true" PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA" if batch_meta: PmtInf_nodes['ReqdExctnDtNode'].text = batch_meta else: del PmtInf_nodes['ReqdExctnDtNode'] PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name'] PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN'] if 'BIC' in self._config: PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC'] PmtInf_nodes['ChrgBrNode'].text = "SLEV" PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes)) PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta]) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode']) PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node']) PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode']) if 'ReqdExctnDtNode' in PmtInf_nodes: PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode']) PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode']) PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node']) PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode']) if 'BIC' in self._config: PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node']) PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode']) for txnode in batch_nodes: PmtInf_nodes['PmtInfNode'].append(txnode) CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn') CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
Method to finalize the batch, this will iterate over the _batches dict and create a PmtInf node for each batch. The correct information (from the batch_key and batch_totals) will be inserted and the batch transaction nodes will be folded. Finally, the batches will be added to the main XML.
def manifest(self, entry): """Returns manifest as a list. :param entry: :class:`jicimagelib.image.FileBackend.Entry` :returns: list """ entries = [] for fname in self._sorted_nicely(os.listdir(entry.directory)): if fname == 'manifest.json': continue fpath = os.path.abspath(os.path.join(entry.directory, fname)) metadata = self.metadata_from_fname(fname) entries.append({"filename": fpath, "series": metadata.s, "channel": metadata.c, "zslice": metadata.z, "timepoint": metadata.t}) return entries
Returns manifest as a list. :param entry: :class:`jicimagelib.image.FileBackend.Entry` :returns: list
async def peer_delete(self, *, dc=None, address): """Remove the server with given address from the Raft configuration Parameters: dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. address (str): "IP:port" of the server to remove. Returns: bool: ``True`` on success There are rare cases where a peer may be left behind in the Raft configuration even though the server is no longer present and known to the cluster. This endpoint can be used to remove the failed server so that it is no longer affects the Raft quorum. """ address = extract_attr(address, keys=["Address"]) params = {"dc": dc, "address": address} response = await self._api.delete("/v1/operator/raft/peer", params=params) return response.status < 400
Remove the server with given address from the Raft configuration Parameters: dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. address (str): "IP:port" of the server to remove. Returns: bool: ``True`` on success There are rare cases where a peer may be left behind in the Raft configuration even though the server is no longer present and known to the cluster. This endpoint can be used to remove the failed server so that it is no longer affects the Raft quorum.
def to_dict(self): """Returns attributes formatted as a dictionary.""" d = {'id': self.id, 'classes': self.classes} d.update(self.kvs) return d
Returns attributes formatted as a dictionary.
def degrees_of_freedom(self): """ Returns the number of degrees of freedom. """ if len(self._set_xdata)==0 or len(self._set_ydata)==0: return None # Temporary hack: get the studentized residuals, which uses the massaged data # This should later be changed to get_massaged_data() r = self.studentized_residuals() # Happens if data / functions not defined if r == None: return # calculate the number of points N = 0.0 for i in range(len(r)): N += len(r[i]) return N-len(self._pnames)
Returns the number of degrees of freedom.
def to_satoshis(input_quantity, input_type): ''' convert to satoshis, no rounding ''' assert input_type in UNIT_CHOICES, input_type # convert to satoshis if input_type in ('btc', 'mbtc', 'bit'): satoshis = float(input_quantity) * float(UNIT_MAPPINGS[input_type]['satoshis_per']) elif input_type == 'satoshi': satoshis = input_quantity else: raise Exception('Invalid Unit Choice: %s' % input_type) return int(satoshis)
convert to satoshis, no rounding
def raw(self): """Return raw key. returns: str: raw key """ if self._raw: return text_type(self._raw).strip("\r\n") else: return text_type(base64decode(self._b64encoded)).strip("\r\n")
Return raw key. returns: str: raw key
def check(path_dir, requirements_name='requirements.txt'): '''Look for unused packages listed on project requirements''' requirements = _load_requirements(requirements_name, path_dir) imported_modules = _iter_modules(path_dir) installed_packages = _list_installed_packages() imported_modules.update(_excluded_imports()) diff = {lib for lib in installed_packages if lib not in imported_modules} with_dependencies, _ = _list_dependencies(diff) unused_dependencies = sorted([d for d in diff if d in requirements]) for unused_dependency in unused_dependencies: if with_dependencies.get(unused_dependency): print(' - {}'.format(unused_dependency)) for dependency in with_dependencies.get(unused_dependency): print('\t - {}'.format(dependency)) else: print(' - {}'.format(unused_dependency))
Look for unused packages listed on project requirements
def _delete_redundancy_routers(self, context, router_db): """To be called in delete_router() BEFORE router has been deleted in DB. The router should have not interfaces. """ e_context = context.elevated() for binding in router_db.redundancy_bindings: self.delete_router(e_context, binding.redundancy_router_id) LOG.debug("Deleted redundancy router %s", binding.redundancy_router_id) if router_db.gw_port_id: # delete ha settings and extra port for gateway (VIP) port self._delete_ha_group(e_context, router_db.gw_port_id)
To be called in delete_router() BEFORE router has been deleted in DB. The router should have not interfaces.
def infoObject(object, cat, format, *args): """ Log an informational message in the given category. """ doLog(INFO, object, cat, format, args)
Log an informational message in the given category.
def _standalone_init(self, spark_master_address, pre_20_mode, requests_config, tags): """ Return a dictionary of {app_id: (app_name, tracking_url)} for the running Spark applications """ metrics_json = self._rest_request_to_json( spark_master_address, SPARK_MASTER_STATE_PATH, SPARK_STANDALONE_SERVICE_CHECK, requests_config, tags ) running_apps = {} if metrics_json.get('activeapps'): for app in metrics_json['activeapps']: app_id = app.get('id') app_name = app.get('name') # Parse through the HTML to grab the application driver's link try: app_url = self._get_standalone_app_url(app_id, spark_master_address, requests_config, tags) if app_id and app_name and app_url: if pre_20_mode: self.log.debug('Getting application list in pre-20 mode') applist = self._rest_request_to_json( app_url, SPARK_APPS_PATH, SPARK_STANDALONE_SERVICE_CHECK, requests_config, tags ) for appl in applist: aid = appl.get('id') aname = appl.get('name') running_apps[aid] = (aname, app_url) else: running_apps[app_id] = (app_name, app_url) except Exception: # it's possible for the requests to fail if the job # completed since we got the list of apps. Just continue pass # Report success after gathering metrics from Spark master self.service_check( SPARK_STANDALONE_SERVICE_CHECK, AgentCheck.OK, tags=['url:%s' % spark_master_address] + tags, message='Connection to Spark master "%s" was successful' % spark_master_address, ) self.log.info("Returning running apps %s" % running_apps) return running_apps
Return a dictionary of {app_id: (app_name, tracking_url)} for the running Spark applications
def get_interface_detail_output_interface_configured_line_speed(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_interface_detail = ET.Element("get_interface_detail") config = get_interface_detail output = ET.SubElement(get_interface_detail, "output") interface = ET.SubElement(output, "interface") interface_type_key = ET.SubElement(interface, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(interface, "interface-name") interface_name_key.text = kwargs.pop('interface_name') configured_line_speed = ET.SubElement(interface, "configured-line-speed") configured_line_speed.text = kwargs.pop('configured_line_speed') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def parseFullScan(self, i, modifications=True): """ parses scan info for giving a Spectrum Obj for plotting. takes significantly longer since it has to unzip/parse xml """ scanObj = PeptideObject() peptide = str(i[1]) pid=i[2] if modifications: sql = 'select aam.ModificationName,pam.Position,aam.DeltaMass from peptidesaminoacidmodifications pam left join aminoacidmodifications aam on (aam.AminoAcidModificationID=pam.AminoAcidModificationID) where pam.PeptideID=%s'%pid for row in self.conn.execute(sql): scanObj.addModification(peptide[row[1]], str(row[1]), str(row[2]), row[0]) scanObj.peptide = peptide if self.decompressScanInfo(scanObj, i[0]): return scanObj return None
parses scan info for giving a Spectrum Obj for plotting. takes significantly longer since it has to unzip/parse xml
def natsort_key(val, key, string_func, bytes_func, num_func): """ Key to sort strings and numbers naturally. It works by splitting the string into components of strings and numbers, and then converting the numbers into actual ints or floats. Parameters ---------- val : str | unicode | bytes | int | float | iterable key : callable | None A key to apply to the *val* before any other operations are performed. string_func : callable If *val* (or the output of *key* if given) is of type *str*, this function will be applied to it. The function must return a tuple. bytes_func : callable If *val* (or the output of *key* if given) is of type *bytes*, this function will be applied to it. The function must return a tuple. num_func : callable If *val* (or the output of *key* if given) is not of type *bytes*, *str*, nor is iterable, this function will be applied to it. The function must return a tuple. Returns ------- out : tuple The string split into its string and numeric components. It *always* starts with a string, and then alternates between numbers and strings (unless it was applied recursively, in which case it will return tuples of tuples, but the lowest-level tuples will then *always* start with a string etc.). See Also -------- parse_string_factory parse_bytes_factory parse_number_factory """ # Apply key if needed if key is not None: val = key(val) # Assume the input are strings, which is the most common case try: return string_func(val) except (TypeError, AttributeError): # If bytes type, use the bytes_func if type(val) in (bytes,): return bytes_func(val) # Otherwise, assume it is an iterable that must be parsed recursively. # Do not apply the key recursively. try: return tuple( natsort_key(x, None, string_func, bytes_func, num_func) for x in val ) # If that failed, it must be a number. except TypeError: return num_func(val)
Key to sort strings and numbers naturally. It works by splitting the string into components of strings and numbers, and then converting the numbers into actual ints or floats. Parameters ---------- val : str | unicode | bytes | int | float | iterable key : callable | None A key to apply to the *val* before any other operations are performed. string_func : callable If *val* (or the output of *key* if given) is of type *str*, this function will be applied to it. The function must return a tuple. bytes_func : callable If *val* (or the output of *key* if given) is of type *bytes*, this function will be applied to it. The function must return a tuple. num_func : callable If *val* (or the output of *key* if given) is not of type *bytes*, *str*, nor is iterable, this function will be applied to it. The function must return a tuple. Returns ------- out : tuple The string split into its string and numeric components. It *always* starts with a string, and then alternates between numbers and strings (unless it was applied recursively, in which case it will return tuples of tuples, but the lowest-level tuples will then *always* start with a string etc.). See Also -------- parse_string_factory parse_bytes_factory parse_number_factory
def _set_auth_type(self, v, load=False): """ Setter method for auth_type, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_vrrp_extended/auth_type (container) If this variable is read-only (config: false) in the source YANG file, then _set_auth_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_auth_type() directly. YANG Description: Authentication type """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=auth_type.auth_type, is_container='container', presence=False, yang_name="auth-type", rest_name="auth-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Authentication type'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """auth_type must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=auth_type.auth_type, is_container='container', presence=False, yang_name="auth-type", rest_name="auth-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Authentication type'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)""", }) self.__auth_type = t if hasattr(self, '_set'): self._set()
Setter method for auth_type, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_vrrp_extended/auth_type (container) If this variable is read-only (config: false) in the source YANG file, then _set_auth_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_auth_type() directly. YANG Description: Authentication type
def sqlwhere(criteria=None): """Generates SQL where clause. Returns (sql, values). Criteria is a dictionary of {field: value}. >>> sqlwhere() ('', []) >>> sqlwhere({'id': 5}) ('id=%s', [5]) >>> sqlwhere({'id': 3, 'name': 'toto'}) ('id=%s and name=%s', [3, 'toto']) >>> sqlwhere({'id': 3, 'name': 'toto', 'createdon': '2013-12-02'}) ('createdon=%s and id=%s and name=%s', ['2013-12-02', 3, 'toto']) """ if not criteria: return ('', []) fields = sorted(criteria.keys()) validate_names(fields) values = [criteria[field] for field in fields] parts = [field + '=%s' for field in fields] sql = ' and '.join(parts) return (sql, values)
Generates SQL where clause. Returns (sql, values). Criteria is a dictionary of {field: value}. >>> sqlwhere() ('', []) >>> sqlwhere({'id': 5}) ('id=%s', [5]) >>> sqlwhere({'id': 3, 'name': 'toto'}) ('id=%s and name=%s', [3, 'toto']) >>> sqlwhere({'id': 3, 'name': 'toto', 'createdon': '2013-12-02'}) ('createdon=%s and id=%s and name=%s', ['2013-12-02', 3, 'toto'])
def auto_populate(mode='all'): """ Overrides translation fields population mode (population mode decides which unprovided translations will be filled during model construction / loading). Example: with auto_populate('all'): s = Slugged.objects.create(title='foo') s.title_en == 'foo' // True s.title_de == 'foo' // True This method may be used to ensure consistency loading untranslated fixtures, with non-default language active: with auto_populate('required'): call_command('loaddata', 'fixture.json') """ current_population_mode = settings.AUTO_POPULATE settings.AUTO_POPULATE = mode try: yield finally: settings.AUTO_POPULATE = current_population_mode
Overrides translation fields population mode (population mode decides which unprovided translations will be filled during model construction / loading). Example: with auto_populate('all'): s = Slugged.objects.create(title='foo') s.title_en == 'foo' // True s.title_de == 'foo' // True This method may be used to ensure consistency loading untranslated fixtures, with non-default language active: with auto_populate('required'): call_command('loaddata', 'fixture.json')
def submodel_has_python_callbacks(models): ''' Traverses submodels to check for Python (event) callbacks ''' has_python_callback = False for model in collect_models(models): if len(model._callbacks) > 0 or len(model._event_callbacks) > 0: has_python_callback = True break return has_python_callback
Traverses submodels to check for Python (event) callbacks
def request(self, method, uri, params=None, data=None, headers=None, auth=None, timeout=None, allow_redirects=False): """ Make an HTTP request. """ url = self.relative_uri(uri) return self.domain.request( method, url, params=params, data=data, headers=headers, auth=auth, timeout=timeout, allow_redirects=allow_redirects )
Make an HTTP request.
def fetch_and_parse(url, bodyLines): """Takes a url, and returns a dictionary of data with 'bodyLines' lines""" pageHtml = fetch_page(url) return parse(url, pageHtml, bodyLines)
Takes a url, and returns a dictionary of data with 'bodyLines' lines
def is_public(self): """Return True iff this function should be considered public.""" if self.dunder_all is not None: return self.name in self.dunder_all else: return not self.name.startswith('_')
Return True iff this function should be considered public.
def _anime_add(self, data): """ Adds an anime to a user's list. :param data: A :class:`Pymoe.Mal.Objects.Anime` object with the anime data :raises: SyntaxError on invalid data type :raises: ServerError on failure to add :rtype: Bool :return: True on success """ if isinstance(data, Anime): xmlstr = data.to_xml() r = requests.get(self.apiurl + "animelist/add/{}.xml".format(data.id), params={'data': xmlstr}, auth=HTTPBasicAuth(self._username, self._password), headers=self.header) if r.status_code != 201: raise ServerError(r.text, r.status_code) return True else: raise SyntaxError( "Invalid type: data should be a Pymoe.Mal.Objects.Anime object. Got a {}".format(type(data)))
Adds an anime to a user's list. :param data: A :class:`Pymoe.Mal.Objects.Anime` object with the anime data :raises: SyntaxError on invalid data type :raises: ServerError on failure to add :rtype: Bool :return: True on success
def _keepVol(self, vol): """ Mark this volume to be kept in path. """ if vol is None: return if vol in self.extraVolumes: del self.extraVolumes[vol] return if vol not in self.paths: raise Exception("%s not in %s" % (vol, self)) paths = [os.path.basename(path) for path in self.paths[vol]] newPath = self.selectReceivePath(paths) if self._skipDryRun(logger, 'INFO')("Copy %s to %s", vol, newPath): return self.butterVolumes[vol.uuid].copy(newPath)
Mark this volume to be kept in path.
def _set_dscp_to_cos_mapping(self, v, load=False): """ Setter method for dscp_to_cos_mapping, mapped from YANG variable /qos/map/dscp_cos/dscp_to_cos_mapping (list) If this variable is read-only (config: false) in the source YANG file, then _set_dscp_to_cos_mapping is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dscp_to_cos_mapping() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("dscp_in_values",dscp_to_cos_mapping.dscp_to_cos_mapping, yang_name="dscp-to-cos-mapping", rest_name="map", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-in-values', extensions={u'tailf-common': {u'info': u'Map DSCP values to CoS value', u'cli-suppress-mode': None, u'cli-suppress-no': None, u'alt-name': u'map', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'dscp_mark_list_cos'}}), is_container='list', yang_name="dscp-to-cos-mapping", rest_name="map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map DSCP values to CoS value', u'cli-suppress-mode': None, u'cli-suppress-no': None, u'alt-name': u'map', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'dscp_mark_list_cos'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """dscp_to_cos_mapping must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("dscp_in_values",dscp_to_cos_mapping.dscp_to_cos_mapping, yang_name="dscp-to-cos-mapping", rest_name="map", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-in-values', extensions={u'tailf-common': {u'info': u'Map DSCP values to CoS value', u'cli-suppress-mode': None, u'cli-suppress-no': None, u'alt-name': u'map', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'dscp_mark_list_cos'}}), is_container='list', yang_name="dscp-to-cos-mapping", rest_name="map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map DSCP values to CoS value', u'cli-suppress-mode': None, u'cli-suppress-no': None, u'alt-name': u'map', u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'dscp_mark_list_cos'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)""", }) self.__dscp_to_cos_mapping = t if hasattr(self, '_set'): self._set()
Setter method for dscp_to_cos_mapping, mapped from YANG variable /qos/map/dscp_cos/dscp_to_cos_mapping (list) If this variable is read-only (config: false) in the source YANG file, then _set_dscp_to_cos_mapping is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dscp_to_cos_mapping() directly.
def write_shortstr(self, s): """Write a string up to 255 bytes long (after any encoding). If passed a unicode string, encode with UTF-8. """ self._flushbits() if isinstance(s, string): s = s.encode('utf-8') if len(s) > 255: raise FrameSyntaxError( 'Shortstring overflow ({0} > 255)'.format(len(s))) self.write_octet(len(s)) self.out.write(s)
Write a string up to 255 bytes long (after any encoding). If passed a unicode string, encode with UTF-8.
def colorize_invoke_command(self, string): """ Apply various heuristics to return a colorized version the invoke command string. If these fail, simply return the string in plaintext. Inspired by colorize_log_entry(). """ final_string = string try: # Line headers try: for token in ['START', 'END', 'REPORT', '[DEBUG]']: if token in final_string: format_string = '[{}]' # match whole words only pattern = r'\b{}\b' if token == '[DEBUG]': format_string = '{}' pattern = re.escape(token) repl = click.style( format_string.format(token), bold=True, fg='cyan' ) final_string = re.sub( pattern.format(token), repl, final_string ) except Exception: # pragma: no cover pass # Green bold Tokens try: for token in [ 'Zappa Event:', 'RequestId:', 'Version:', 'Duration:', 'Billed', 'Memory Size:', 'Max Memory Used:' ]: if token in final_string: final_string = final_string.replace(token, click.style( token, bold=True, fg='green' )) except Exception: # pragma: no cover pass # UUIDs for token in final_string.replace('\t', ' ').split(' '): try: if token.count('-') is 4 and token.replace('-', '').isalnum(): final_string = final_string.replace( token, click.style(token, fg='magenta') ) except Exception: # pragma: no cover pass return final_string except Exception: return string
Apply various heuristics to return a colorized version the invoke command string. If these fail, simply return the string in plaintext. Inspired by colorize_log_entry().
def _onFunction(self, name, line, pos, absPosition, keywordLine, keywordPos, colonLine, colonPos, level, isAsync, returnAnnotation): """Memorizes a function""" self.__flushLevel(level) f = Function(name, line, pos, absPosition, keywordLine, keywordPos, colonLine, colonPos, isAsync, returnAnnotation) if self.__lastDecorators is not None: f.decorators = self.__lastDecorators self.__lastDecorators = None self.objectsStack.append(f)
Memorizes a function
def _compute_mean(self, C, rup, dists, sites, imt): """ Compute mean value for PGA and pseudo-velocity response spectrum, as given in equation 1. Converts also pseudo-velocity response spectrum values to SA, using: SA = (PSV * W)/ratio(SA_larger/SA_geo_mean) W = (2 * pi / T) T = period (sec) """ mean = (self._compute_term_1_2(rup, C) + self._compute_term_3_4(dists, C) + self._get_site_amplification(sites, imt, C)) # convert from m/s**2 to g for PGA and from m/s to g for PSV # and divided this value for the ratio(SA_larger/SA_geo_mean) if imt.name == "PGA": mean = (np.exp(mean) / g) / C['r_SA'] else: W = (2. * np.pi)/imt.period mean = ((np.exp(mean) * W) / g) / C['r_SA'] return np.log(mean)
Compute mean value for PGA and pseudo-velocity response spectrum, as given in equation 1. Converts also pseudo-velocity response spectrum values to SA, using: SA = (PSV * W)/ratio(SA_larger/SA_geo_mean) W = (2 * pi / T) T = period (sec)
def _eq__(self, other): """ Compare the current place object to another passed to the comparison method. The two place objects must have the same identification, even if some of their attributes might be different. @param other: a ``Place`` instance to compare with the current place object. @return: ``True`` if the given place corresponds to the current place; ``False`` otherwise. """ return self.place_id and other.place_id \ and self.place_id == other.place_id
Compare the current place object to another passed to the comparison method. The two place objects must have the same identification, even if some of their attributes might be different. @param other: a ``Place`` instance to compare with the current place object. @return: ``True`` if the given place corresponds to the current place; ``False`` otherwise.
def guess_depth_cutoff(cb_histogram): ''' Guesses at an appropriate barcode cutoff ''' with read_cbhistogram(cb_histogram) as fh: cb_vals = [int(p.strip().split()[1]) for p in fh] histo = np.histogram(np.log10(cb_vals), bins=50) vals = histo[0] edges = histo[1] mids = np.array([(edges[i] + edges[i+1])/2 for i in range(edges.size - 1)]) wdensity = vals * (10**mids) / sum(vals * (10**mids)) baseline = np.median(wdensity) wdensity = list(wdensity) # find highest density in upper half of barcode distribution peak = wdensity.index(max(wdensity[len(wdensity)/2:])) cutoff = None for index, dens in reversed(list(enumerate(wdensity[1:peak]))): if dens < 2 * baseline: cutoff = index break if not cutoff: return None else: cutoff = 10**mids[cutoff] logger.info('Setting barcode cutoff to %d' % cutoff) return cutoff
Guesses at an appropriate barcode cutoff
def _ExpandArtifactFilesSource(self, source, requested): """Recursively expands an artifact files source.""" expanded_source = rdf_artifacts.ExpandedSource(base_source=source) sub_sources = [] artifact_list = [] if "artifact_list" in source.attributes: artifact_list = source.attributes["artifact_list"] for artifact_name in artifact_list: if artifact_name in self.processed_artifacts: continue artifact_obj = artifact_registry.REGISTRY.GetArtifact(artifact_name) for expanded_artifact in self.Expand(artifact_obj, requested): sub_sources.extend(expanded_artifact.sources) expanded_source.artifact_sources = sub_sources expanded_source.path_type = self._path_type return [expanded_source]
Recursively expands an artifact files source.
def copy(self, sql, data, **kwargs): """ EXAMPLE: >> with open("/tmp/file.csv", "rb") as fs: >> cursor.copy("COPY table(field1,field2) FROM STDIN DELIMITER ',' ENCLOSED BY ''''", >> fs, buffer_size=65536) """ sql = as_text(sql) if self.closed(): raise errors.InterfaceError('Cursor is closed') self.flush_to_query_ready() if isinstance(data, binary_type): stream = BytesIO(data) elif isinstance(data, text_type): stream = StringIO(data) elif isinstance(data, file_type): stream = data else: raise TypeError("Not valid type of data {0}".format(type(data))) self.connection.write(messages.Query(sql)) while True: message = self.connection.read_message() self._message = message if isinstance(message, messages.ErrorResponse): raise errors.QueryError.from_error_response(message, sql) self.connection.process_message(message=message) if isinstance(message, messages.ReadyForQuery): break elif isinstance(message, messages.CopyInResponse): self.connection.write(messages.CopyStream(stream, **kwargs)) self.connection.write(messages.CopyDone()) if self.error is not None: raise self.error
EXAMPLE: >> with open("/tmp/file.csv", "rb") as fs: >> cursor.copy("COPY table(field1,field2) FROM STDIN DELIMITER ',' ENCLOSED BY ''''", >> fs, buffer_size=65536)
def free_numeric(self): """Free numeric data""" if self._numeric is not None: self.funs.free_numeric(self._numeric) self._numeric = None self.free_symbolic()
Free numeric data
def __modify(self, withdrawal_id, **kwargs): """Call documentation: `/withdrawal/modify <https://www.wepay.com/developer/reference/withdrawal#modify>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` """ params = { 'withdrawal_id': withdrawal_id } return self.make_call(self.__modify, params, kwargs)
Call documentation: `/withdrawal/modify <https://www.wepay.com/developer/reference/withdrawal#modify>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay`
def requirements(requirements_file): """Return packages mentioned in the given file. Args: requirements_file (str): path to the requirements file to be parsed. Returns: (list): 3rd-party package dependencies contained in the file. """ return [ str(pkg.req) for pkg in parse_requirements( requirements_file, session=pip_download.PipSession()) if pkg.req is not None]
Return packages mentioned in the given file. Args: requirements_file (str): path to the requirements file to be parsed. Returns: (list): 3rd-party package dependencies contained in the file.
def all_selected_options(self): """Returns a list of all selected options belonging to this select tag""" ret = [] for opt in self.options: if opt.is_selected(): ret.append(opt) return ret
Returns a list of all selected options belonging to this select tag
def from_eocube(eocube, ji): """Create a EOCubeChunk object from an EOCube object.""" eocubewin = EOCubeChunk(ji, eocube.df_layers, eocube.chunksize, eocube.wdir) return eocubewin
Create a EOCubeChunk object from an EOCube object.
def render(engine, format, filepath, renderer=None, formatter=None, quiet=False): """Render file with Graphviz ``engine`` into ``format``, return result filename. Args: engine: The layout commmand used for rendering (``'dot'``, ``'neato'``, ...). format: The output format used for rendering (``'pdf'``, ``'png'``, ...). filepath: Path to the DOT source file to render. renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...). formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...). quiet (bool): Suppress ``stderr`` output. Returns: The (possibly relative) path of the rendered file. Raises: ValueError: If ``engine``, ``format``, ``renderer``, or ``formatter`` are not known. graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None. graphviz.ExecutableNotFound: If the Graphviz executable is not found. subprocess.CalledProcessError: If the exit status is non-zero. """ cmd, rendered = command(engine, format, filepath, renderer, formatter) run(cmd, capture_output=True, check=True, quiet=quiet) return rendered
Render file with Graphviz ``engine`` into ``format``, return result filename. Args: engine: The layout commmand used for rendering (``'dot'``, ``'neato'``, ...). format: The output format used for rendering (``'pdf'``, ``'png'``, ...). filepath: Path to the DOT source file to render. renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...). formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...). quiet (bool): Suppress ``stderr`` output. Returns: The (possibly relative) path of the rendered file. Raises: ValueError: If ``engine``, ``format``, ``renderer``, or ``formatter`` are not known. graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None. graphviz.ExecutableNotFound: If the Graphviz executable is not found. subprocess.CalledProcessError: If the exit status is non-zero.
def report(rel): """Fires if the machine is running Fedora.""" if "Fedora" in rel.product: return make_pass("IS_FEDORA", product=rel.product) else: return make_fail("IS_NOT_FEDORA", product=rel.product)
Fires if the machine is running Fedora.
def get_compression_extension(self): """ Find the filename extension for the 'docker save' output, which may or may not be compressed. Raises OsbsValidationException if the extension cannot be determined due to a configuration error. :returns: str including leading dot, or else None if no compression """ build_request = BuildRequest(build_json_store=self.os_conf.get_build_json_store()) inner = build_request.inner_template postbuild_plugins = inner.get('postbuild_plugins', []) for plugin in postbuild_plugins: if plugin.get('name') == 'compress': args = plugin.get('args', {}) method = args.get('method', 'gzip') if method == 'gzip': return '.gz' elif method == 'lzma': return '.xz' raise OsbsValidationException("unknown compression method '%s'" % method) return None
Find the filename extension for the 'docker save' output, which may or may not be compressed. Raises OsbsValidationException if the extension cannot be determined due to a configuration error. :returns: str including leading dot, or else None if no compression
def ed25519_generate_key_pair_from_secret(secret): """ Generate a new key pair. Args: secret (:class:`string`): A secret that serves as a seed Returns: A tuple of (private_key, public_key) encoded in base58. """ # if you want to do this correctly, use a key derivation function! if not isinstance(secret, bytes): secret = secret.encode() hash_bytes = sha3.keccak_256(secret).digest() sk = Ed25519SigningKeyFromHash.generate(hash_bytes=hash_bytes) # Private key private_value_base58 = sk.encode(encoding='base58') # Public key public_value_compressed_base58 = sk.get_verifying_key().encode(encoding='base58') return private_value_base58, public_value_compressed_base58
Generate a new key pair. Args: secret (:class:`string`): A secret that serves as a seed Returns: A tuple of (private_key, public_key) encoded in base58.
def find_mrms_tracks(self): """ Identify objects from MRMS timesteps and link them together with object matching. Returns: List of STObjects containing MESH track information. """ obs_objects = [] tracked_obs_objects = [] if self.mrms_ew is not None: self.mrms_grid.load_data() if len(self.mrms_grid.data) != len(self.hours): print('Less than 24 hours of observation data found') return tracked_obs_objects for h, hour in enumerate(self.hours): mrms_data = np.zeros(self.mrms_grid.data[h].shape) mrms_data[:] = np.array(self.mrms_grid.data[h]) mrms_data[mrms_data < 0] = 0 hour_labels = self.mrms_ew.size_filter(self.mrms_ew.label(gaussian_filter(mrms_data, self.gaussian_window)), self.size_filter) hour_labels[mrms_data < self.mrms_ew.min_thresh] = 0 obj_slices = find_objects(hour_labels) num_slices = len(obj_slices) obs_objects.append([]) if num_slices > 0: for sl in obj_slices: obs_objects[-1].append(STObject(mrms_data[sl], np.where(hour_labels[sl] > 0, 1, 0), self.model_grid.x[sl], self.model_grid.y[sl], self.model_grid.i[sl], self.model_grid.j[sl], hour, hour, dx=self.model_grid.dx)) if h > 0: dims = obs_objects[-1][-1].timesteps[0].shape obs_objects[-1][-1].estimate_motion(hour, self.mrms_grid.data[h-1], dims[1], dims[0]) for h, hour in enumerate(self.hours): past_time_objs = [] for obj in tracked_obs_objects: if obj.end_time == hour - 1: past_time_objs.append(obj) if len(past_time_objs) == 0: tracked_obs_objects.extend(obs_objects[h]) elif len(past_time_objs) > 0 and len(obs_objects[h]) > 0: assignments = self.object_matcher.match_objects(past_time_objs, obs_objects[h], hour - 1, hour) unpaired = list(range(len(obs_objects[h]))) for pair in assignments: past_time_objs[pair[0]].extend(obs_objects[h][pair[1]]) unpaired.remove(pair[1]) if len(unpaired) > 0: for up in unpaired: tracked_obs_objects.append(obs_objects[h][up]) print("Tracked Obs Objects: {0:03d} Hour: {1:02d}".format(len(tracked_obs_objects), hour)) return tracked_obs_objects
Identify objects from MRMS timesteps and link them together with object matching. Returns: List of STObjects containing MESH track information.
def update_function_configuration(self, vpc_config): """Update existing Lambda function configuration. Args: vpc_config (dict): Dictionary of SubnetIds and SecurityGroupsIds for using a VPC in lambda """ LOG.info('Updating configuration for lambda function: %s', self.app_name) try: self.lambda_client.update_function_configuration( Environment=self.lambda_environment, FunctionName=self.app_name, Runtime=self.runtime, Role=self.role_arn, Handler=self.handler, Description=self.description, Timeout=int(self.timeout), MemorySize=int(self.memory), VpcConfig=vpc_config) if self.concurrency_limit: self.lambda_client.put_function_concurrency( FunctionName=self.app_name, ReservedConcurrentExecutions=self.concurrency_limit ) else: self.lambda_client.delete_function_concurrency(FunctionName=self.app_name) except boto3.exceptions.botocore.exceptions.ClientError as error: if 'CreateNetworkInterface' in error.response['Error']['Message']: message = '{0} is missing "ec2:CreateNetworkInterface"'.format(self.role_arn) LOG.debug(message) raise SystemExit(message) raise LOG.info('Updating Lambda function tags') lambda_arn = get_lambda_arn(self.app_name, self.env, self.region) self.lambda_client.tag_resource(Resource=lambda_arn, Tags={'app_group': self.group, 'app_name': self.app_name}) LOG.info("Successfully updated Lambda configuration.")
Update existing Lambda function configuration. Args: vpc_config (dict): Dictionary of SubnetIds and SecurityGroupsIds for using a VPC in lambda
def run_gblocks(align_fasta_file, **kwargs): """ remove poorly aligned positions and divergent regions with Gblocks """ cl = GblocksCommandline(aln_file=align_fasta_file, **kwargs) r, e = cl.run() print("Gblocks:", cl, file=sys.stderr) if e: print("***Gblocks could not run", file=sys.stderr) return None else: print(r, file=sys.stderr) alignp = re.sub(r'.*Gblocks alignment:.*\(([0-9]{1,3}) %\).*', \ r'\1', r, flags=re.DOTALL) alignp = int(alignp) if alignp <= 10: print("** WARNING ** Only %s %% positions retained by Gblocks. " \ "Results aborted. Using original alignment instead.\n" % alignp, file=sys.stderr) return None else: return align_fasta_file+"-gb"
remove poorly aligned positions and divergent regions with Gblocks
def kill_mprocess(process): """kill process Args: process - Popen object for process """ if process and proc_alive(process): process.terminate() process.communicate() return not proc_alive(process)
kill process Args: process - Popen object for process
def createLoanOffer(self, currency, amount, duration, autoRenew, lendingRate): """Creates a loan offer for a given currency. Required POST parameters are "currency", "amount", "duration", "autoRenew" (0 or 1), and "lendingRate". """ return self._private('createLoanOffer', currency=currency, amount=amount, duration=duration, autoRenew=autoRenew, lendingRate=lendingRate)
Creates a loan offer for a given currency. Required POST parameters are "currency", "amount", "duration", "autoRenew" (0 or 1), and "lendingRate".
def addProxyObject(self, obj, proxied): """ Stores a reference to the unproxied and proxied versions of C{obj} for later retrieval. @since: 0.6 """ self.proxied_objects[id(obj)] = proxied self.proxied_objects[id(proxied)] = obj
Stores a reference to the unproxied and proxied versions of C{obj} for later retrieval. @since: 0.6
def to_repr(value, ctx): """ Converts a value back to its representation form, e.g. x -> "x" """ as_string = to_string(value, ctx) if isinstance(value, str) or isinstance(value, datetime.date) or isinstance(value, datetime.time): as_string = as_string.replace('"', '""') # escape quotes by doubling as_string = '"%s"' % as_string return as_string
Converts a value back to its representation form, e.g. x -> "x"
def bin_matrix(M, subsampling_factor=3): """Bin either sparse or dense matrices. """ try: from scipy.sparse import issparse if issparse(M): return bin_sparse(M, subsampling_factor=subsampling_factor) else: raise ImportError except ImportError: return bin_dense(M, subsampling_factor=subsampling_factor)
Bin either sparse or dense matrices.
def get_overlaps(self, offset, length): """Returns chunks overlapped with the given range. Args: offset (int): Begin offset of the range. length (int): Length of the range. Returns: Overlapped chunks. (:obj:`budou.chunk.ChunkList`) """ # In case entity's offset points to a space just before the entity. if ''.join([chunk.word for chunk in self])[offset] == ' ': offset += 1 index = 0 result = ChunkList() for chunk in self: if offset < index + len(chunk.word) and index < offset + length: result.append(chunk) index += len(chunk.word) return result
Returns chunks overlapped with the given range. Args: offset (int): Begin offset of the range. length (int): Length of the range. Returns: Overlapped chunks. (:obj:`budou.chunk.ChunkList`)
def get_devices(self): """ Helper that retuns a dict of devices for this server. :return: Returns a tuple of two elements: - dict<tango class name : list of device names> - dict<device names : tango class name> :rtype: tuple<dict, dict> """ if self.__util is None: import tango db = tango.Database() else: db = self.__util.get_database() server = self.server_instance dev_list = db.get_device_class_list(server) class_map, dev_map = {}, {} for class_name, dev_name in zip(dev_list[1::2], dev_list[::2]): dev_names = class_map.get(class_name) if dev_names is None: class_map[class_name] = dev_names = [] dev_name = dev_name.lower() dev_names.append(dev_name) dev_map[dev_name] = class_name return class_map, dev_map
Helper that retuns a dict of devices for this server. :return: Returns a tuple of two elements: - dict<tango class name : list of device names> - dict<device names : tango class name> :rtype: tuple<dict, dict>
def init(opts): ''' This function gets called when the proxy starts up. ''' if 'host' not in opts['proxy']: log.critical('No \'host\' key found in pillar for this proxy.') return False if 'username' not in opts['proxy']: log.critical('No \'username\' key found in pillar for this proxy.') return False if 'password' not in opts['proxy']: log.critical('No \'passwords\' key found in pillar for this proxy.') return False DETAILS['url'] = 'https://{0}/nuova'.format(opts['proxy']['host']) DETAILS['headers'] = {'Content-Type': 'application/x-www-form-urlencoded', 'Content-Length': 62, 'USER-Agent': 'lwp-request/2.06'} # Set configuration details DETAILS['host'] = opts['proxy']['host'] DETAILS['username'] = opts['proxy'].get('username') DETAILS['password'] = opts['proxy'].get('password') # Ensure connectivity to the device log.debug("Attempting to connect to cimc proxy host.") get_config_resolver_class("computeRackUnit") log.debug("Successfully connected to cimc proxy host.") DETAILS['initialized'] = True
This function gets called when the proxy starts up.
def nodeDumpOutput(self, buf, cur, level, format, encoding): """Dump an XML node, recursive behaviour, children are printed too. Note that @format = 1 provide node indenting only if xmlIndentTreeOutput = 1 or xmlKeepBlanksDefault(0) was called """ if buf is None: buf__o = None else: buf__o = buf._o if cur is None: cur__o = None else: cur__o = cur._o libxml2mod.xmlNodeDumpOutput(buf__o, self._o, cur__o, level, format, encoding)
Dump an XML node, recursive behaviour, children are printed too. Note that @format = 1 provide node indenting only if xmlIndentTreeOutput = 1 or xmlKeepBlanksDefault(0) was called
def add_application(self, application, sync=True): """ add an application to this OS instance. :param application: the application to add on this OS instance :param sync: If sync=True(default) synchronize with Ariane server. If sync=False, add the application object on list to be added on next save(). :return: """ LOGGER.debug("OSInstance.add_application") if not sync: self.application_2_add.append(application) else: if application.id is None: application.save() if self.id is not None and application.id is not None: params = { 'id': self.id, 'applicationID': application.id } args = {'http_operation': 'GET', 'operation_path': 'update/applications/add', 'parameters': params} response = OSInstanceService.requester.call(args) if response.rc != 0: LOGGER.warning( 'OSInstance.add_application - Problem while updating OS instance ' + self.name + '. Reason: ' + str(response.response_content) + '-' + str(response.error_message) + " (" + str(response.rc) + ")" ) else: self.application_ids.append(application.id) application.osi_ids.append(self.id) else: LOGGER.warning( 'OSInstance.add_application - Problem while updating OS instance ' + self.name + '. Reason: application ' + application.name + ' id is None' )
add an application to this OS instance. :param application: the application to add on this OS instance :param sync: If sync=True(default) synchronize with Ariane server. If sync=False, add the application object on list to be added on next save(). :return:
def requires_user(fn): """ Requires that the calling Subject be *either* authenticated *or* remembered via RememberMe services before allowing access. This method essentially ensures that subject.identifiers IS NOT None :raises UnauthenticatedException: indicating that the decorated method is not allowed to be executed because the Subject attempted to perform a user-only operation """ @functools.wraps(fn) def wrap(*args, **kwargs): subject = Yosai.get_current_subject() if subject.identifiers is None: msg = ("Attempting to perform a user-only operation. The " "current Subject is NOT a user (they haven't been " "authenticated or remembered from a previous login). " "ACCESS DENIED.") raise UnauthenticatedException(msg) return fn(*args, **kwargs) return wrap
Requires that the calling Subject be *either* authenticated *or* remembered via RememberMe services before allowing access. This method essentially ensures that subject.identifiers IS NOT None :raises UnauthenticatedException: indicating that the decorated method is not allowed to be executed because the Subject attempted to perform a user-only operation
def create(cls, *props, **kwargs): """ Call to CREATE with parameters map. A new instance will be created and saved. :param props: dict of properties to create the nodes. :type props: tuple :param lazy: False by default, specify True to get nodes with id only without the parameters. :type: bool :rtype: list """ if 'streaming' in kwargs: warnings.warn('streaming is not supported by bolt, please remove the kwarg', category=DeprecationWarning, stacklevel=1) lazy = kwargs.get('lazy', False) # create mapped query query = "CREATE (n:{0} {{create_params}})".format(':'.join(cls.inherited_labels())) # close query if lazy: query += " RETURN id(n)" else: query += " RETURN n" results = [] for item in [cls.deflate(p, obj=_UnsavedNode(), skip_empty=True) for p in props]: node, _ = db.cypher_query(query, {'create_params': item}) results.extend(node[0]) nodes = [cls.inflate(node) for node in results] if not lazy and hasattr(cls, 'post_create'): for node in nodes: node.post_create() return nodes
Call to CREATE with parameters map. A new instance will be created and saved. :param props: dict of properties to create the nodes. :type props: tuple :param lazy: False by default, specify True to get nodes with id only without the parameters. :type: bool :rtype: list
def IsPathSuffix(mod_path, path): """Checks whether path is a full path suffix of mod_path. Args: mod_path: Must be an absolute path to a source file. Must not have file extension. path: A relative path. Must not have file extension. Returns: True if path is a full path suffix of mod_path. False otherwise. """ return (mod_path.endswith(path) and (len(mod_path) == len(path) or mod_path[:-len(path)].endswith(os.sep)))
Checks whether path is a full path suffix of mod_path. Args: mod_path: Must be an absolute path to a source file. Must not have file extension. path: A relative path. Must not have file extension. Returns: True if path is a full path suffix of mod_path. False otherwise.
def print_tools(self, pattern=None, buf=sys.stdout): """Print a list of visible tools. Args: pattern (str): Only list tools that match this glob pattern. """ seen = set() rows = [] context = self.context if context: data = context.get_tools() conflicts = set(context.get_conflicting_tools().keys()) for _, (variant, tools) in sorted(data.items()): pkg_str = variant.qualified_package_name for tool in tools: if pattern and not fnmatch(tool, pattern): continue if tool in conflicts: label = "(in conflict)" color = critical else: label = '' color = None rows.append([tool, '-', pkg_str, "active context", label, color]) seen.add(tool) for suite in self.suites: for tool, d in suite.get_tools().iteritems(): if tool in seen: continue if pattern and not fnmatch(tool, pattern): continue label = [] color = None path = which(tool) if path: path_ = os.path.join(suite.tools_path, tool) if path != path_: label.append("(hidden by unknown tool '%s')" % path) color = warning variant = d["variant"] if isinstance(variant, set): pkg_str = ", ".join(variant) label.append("(in conflict)") color = critical else: pkg_str = variant.qualified_package_name orig_tool = d["tool_name"] if orig_tool == tool: orig_tool = '-' label = ' '.join(label) source = ("context '%s' in suite '%s'" % (d["context_name"], suite.load_path)) rows.append([tool, orig_tool, pkg_str, source, label, color]) seen.add(tool) _pr = Printer(buf) if not rows: _pr("No matching tools.") return False headers = [["TOOL", "ALIASING", "PACKAGE", "SOURCE", "", None], ["----", "--------", "-------", "------", "", None]] rows = headers + sorted(rows, key=lambda x: x[0].lower()) print_colored_columns(_pr, rows) return True
Print a list of visible tools. Args: pattern (str): Only list tools that match this glob pattern.
def transform_data_fasttext(data, vocab, idx_to_counts, cbow, ngram_buckets, ngrams, batch_size, window_size, frequent_token_subsampling=1E-4, dtype='float32', index_dtype='int64'): """Transform a DataStream of coded DataSets to a DataStream of batches. Parameters ---------- data : gluonnlp.data.DataStream DataStream where each sample is a valid input to gluonnlp.data.EmbeddingCenterContextBatchify. vocab : gluonnlp.Vocab Vocabulary containing all tokens whose indices occur in data. For each token, it's associated subwords will be computed and used for constructing the batches. No subwords are used if ngram_buckets is 0. idx_to_counts : list of int List of integers such that idx_to_counts[idx] represents the count of vocab.idx_to_token[idx] in the underlying dataset. The count information is used to subsample frequent words in the dataset. Each token is independently dropped with probability 1 - sqrt(t / (count / sum_counts)) where t is the hyperparameter frequent_token_subsampling. cbow : boolean If True, batches for CBOW are returned. ngram_buckets : int Number of hash buckets to consider for the fastText nlp.vocab.NGramHashes subword function. ngrams : list of int For each integer n in the list, all ngrams of length n will be considered by the nlp.vocab.NGramHashes subword function. batch_size : int The returned data stream iterates over batches of batch_size. window_size : int The context window size for gluonnlp.data.EmbeddingCenterContextBatchify. frequent_token_subsampling : float Hyperparameter for subsampling. See idx_to_counts above for more information. dtype : str or np.dtype, default 'float32' Data type of data array. index_dtype : str or np.dtype, default 'int64' Data type of index arrays. Returns ------- gluonnlp.data.DataStream Stream over batches. Each returned element is a list corresponding to the arguments for the forward pass of model.SG or model.CBOW respectively based on if cbow is False or True. If ngarm_buckets > 0, the returned sample will contain ngrams. Both model.SG or model.CBOW will handle them correctly as long as they are initialized with the subword_function returned as second argument by this function (see below). gluonnlp.vocab.NGramHashes The subword_function used for obtaining the subwords in the returned batches. """ if ngram_buckets <= 0: raise ValueError('Invalid ngram_buckets. Use Word2Vec training ' 'pipeline if not interested in ngrams.') sum_counts = float(sum(idx_to_counts)) idx_to_pdiscard = [ 1 - math.sqrt(frequent_token_subsampling / (count / sum_counts)) for count in idx_to_counts] def subsample(shard): return [[ t for t, r in zip(sentence, np.random.uniform(0, 1, size=len(sentence))) if r > idx_to_pdiscard[t]] for sentence in shard] data = data.transform(subsample) batchify = nlp.data.batchify.EmbeddingCenterContextBatchify( batch_size=batch_size, window_size=window_size, cbow=cbow, weight_dtype=dtype, index_dtype=index_dtype) data = data.transform(batchify) with print_time('prepare subwords'): subword_function = nlp.vocab.create_subword_function( 'NGramHashes', ngrams=ngrams, num_subwords=ngram_buckets) # Store subword indices for all words in vocabulary idx_to_subwordidxs = list(subword_function(vocab.idx_to_token)) subwordidxs = np.concatenate(idx_to_subwordidxs) subwordidxsptr = np.cumsum([ len(subwordidxs) for subwordidxs in idx_to_subwordidxs]) subwordidxsptr = np.concatenate([ np.zeros(1, dtype=np.int64), subwordidxsptr]) if cbow: subword_lookup = functools.partial( cbow_lookup, subwordidxs=subwordidxs, subwordidxsptr=subwordidxsptr, offset=len(vocab)) else: subword_lookup = functools.partial( skipgram_lookup, subwordidxs=subwordidxs, subwordidxsptr=subwordidxsptr, offset=len(vocab)) max_subwordidxs_len = max(len(s) for s in idx_to_subwordidxs) if max_subwordidxs_len > 500: warnings.warn( 'The word with largest number of subwords ' 'has {} subwords, suggesting there are ' 'some noisy words in your vocabulary. ' 'You should filter out very long words ' 'to avoid memory issues.'.format(max_subwordidxs_len)) data = UnchainStream(data) if cbow: batchify_fn = cbow_fasttext_batch else: batchify_fn = skipgram_fasttext_batch batchify_fn = functools.partial( batchify_fn, num_tokens=len(vocab) + len(subword_function), subword_lookup=subword_lookup, dtype=dtype, index_dtype=index_dtype) return data, batchify_fn, subword_function
Transform a DataStream of coded DataSets to a DataStream of batches. Parameters ---------- data : gluonnlp.data.DataStream DataStream where each sample is a valid input to gluonnlp.data.EmbeddingCenterContextBatchify. vocab : gluonnlp.Vocab Vocabulary containing all tokens whose indices occur in data. For each token, it's associated subwords will be computed and used for constructing the batches. No subwords are used if ngram_buckets is 0. idx_to_counts : list of int List of integers such that idx_to_counts[idx] represents the count of vocab.idx_to_token[idx] in the underlying dataset. The count information is used to subsample frequent words in the dataset. Each token is independently dropped with probability 1 - sqrt(t / (count / sum_counts)) where t is the hyperparameter frequent_token_subsampling. cbow : boolean If True, batches for CBOW are returned. ngram_buckets : int Number of hash buckets to consider for the fastText nlp.vocab.NGramHashes subword function. ngrams : list of int For each integer n in the list, all ngrams of length n will be considered by the nlp.vocab.NGramHashes subword function. batch_size : int The returned data stream iterates over batches of batch_size. window_size : int The context window size for gluonnlp.data.EmbeddingCenterContextBatchify. frequent_token_subsampling : float Hyperparameter for subsampling. See idx_to_counts above for more information. dtype : str or np.dtype, default 'float32' Data type of data array. index_dtype : str or np.dtype, default 'int64' Data type of index arrays. Returns ------- gluonnlp.data.DataStream Stream over batches. Each returned element is a list corresponding to the arguments for the forward pass of model.SG or model.CBOW respectively based on if cbow is False or True. If ngarm_buckets > 0, the returned sample will contain ngrams. Both model.SG or model.CBOW will handle them correctly as long as they are initialized with the subword_function returned as second argument by this function (see below). gluonnlp.vocab.NGramHashes The subword_function used for obtaining the subwords in the returned batches.
def create(self, target, configuration_url=values.unset, configuration_method=values.unset, configuration_filters=values.unset, configuration_triggers=values.unset, configuration_flow_sid=values.unset, configuration_retry_count=values.unset, configuration_replay_after=values.unset, configuration_buffer_messages=values.unset, configuration_buffer_window=values.unset): """ Create a new WebhookInstance :param WebhookInstance.Target target: The target of this webhook. :param unicode configuration_url: The absolute url the webhook request should be sent to. :param WebhookInstance.Method configuration_method: The HTTP method to be used when sending a webhook request. :param unicode configuration_filters: The list of events, firing webhook event for this Session. :param unicode configuration_triggers: The list of keywords, firing webhook event for this Session. :param unicode configuration_flow_sid: The studio flow sid, where the webhook should be sent to. :param unicode configuration_retry_count: The number of retries in case of webhook request failures. :param unicode configuration_replay_after: The message index for which and it's successors the webhook will be replayed. :param bool configuration_buffer_messages: The flag whether buffering should be applied to messages. :param unicode configuration_buffer_window: The period of buffering messages. :returns: Newly created WebhookInstance :rtype: twilio.rest.messaging.v1.session.webhook.WebhookInstance """ data = values.of({ 'Target': target, 'Configuration.Url': configuration_url, 'Configuration.Method': configuration_method, 'Configuration.Filters': serialize.map(configuration_filters, lambda e: e), 'Configuration.Triggers': serialize.map(configuration_triggers, lambda e: e), 'Configuration.FlowSid': configuration_flow_sid, 'Configuration.RetryCount': configuration_retry_count, 'Configuration.ReplayAfter': configuration_replay_after, 'Configuration.BufferMessages': configuration_buffer_messages, 'Configuration.BufferWindow': configuration_buffer_window, }) payload = self._version.create( 'POST', self._uri, data=data, ) return WebhookInstance(self._version, payload, session_sid=self._solution['session_sid'], )
Create a new WebhookInstance :param WebhookInstance.Target target: The target of this webhook. :param unicode configuration_url: The absolute url the webhook request should be sent to. :param WebhookInstance.Method configuration_method: The HTTP method to be used when sending a webhook request. :param unicode configuration_filters: The list of events, firing webhook event for this Session. :param unicode configuration_triggers: The list of keywords, firing webhook event for this Session. :param unicode configuration_flow_sid: The studio flow sid, where the webhook should be sent to. :param unicode configuration_retry_count: The number of retries in case of webhook request failures. :param unicode configuration_replay_after: The message index for which and it's successors the webhook will be replayed. :param bool configuration_buffer_messages: The flag whether buffering should be applied to messages. :param unicode configuration_buffer_window: The period of buffering messages. :returns: Newly created WebhookInstance :rtype: twilio.rest.messaging.v1.session.webhook.WebhookInstance
def taskrouter(self): """ Access the Taskrouter Twilio Domain :returns: Taskrouter Twilio Domain :rtype: twilio.rest.taskrouter.Taskrouter """ if self._taskrouter is None: from twilio.rest.taskrouter import Taskrouter self._taskrouter = Taskrouter(self) return self._taskrouter
Access the Taskrouter Twilio Domain :returns: Taskrouter Twilio Domain :rtype: twilio.rest.taskrouter.Taskrouter
def create_page_move(self, page_move_parameters, project, wiki_identifier, comment=None): """CreatePageMove. Creates a page move operation that updates the path and order of the page as provided in the parameters. :param :class:`<WikiPageMoveParameters> <azure.devops.v5_0.wiki.models.WikiPageMoveParameters>` page_move_parameters: Page more operation parameters. :param str project: Project ID or project name :param str wiki_identifier: Wiki Id or name. :param str comment: Comment that is to be associated with this page move. :rtype: :class:`<WikiPageMoveResponse> <azure.devops.v5_0.wiki.models.WikiPageMoveResponse>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if wiki_identifier is not None: route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str') query_parameters = {} if comment is not None: query_parameters['comment'] = self._serialize.query('comment', comment, 'str') content = self._serialize.body(page_move_parameters, 'WikiPageMoveParameters') response = self._send(http_method='POST', location_id='e37bbe71-cbae-49e5-9a4e-949143b9d910', version='5.0', route_values=route_values, query_parameters=query_parameters, content=content) response_object = models.WikiPageMoveResponse() response_object.page_move = self._deserialize('WikiPageMove', response) response_object.eTag = response.headers.get('ETag') return response_object
CreatePageMove. Creates a page move operation that updates the path and order of the page as provided in the parameters. :param :class:`<WikiPageMoveParameters> <azure.devops.v5_0.wiki.models.WikiPageMoveParameters>` page_move_parameters: Page more operation parameters. :param str project: Project ID or project name :param str wiki_identifier: Wiki Id or name. :param str comment: Comment that is to be associated with this page move. :rtype: :class:`<WikiPageMoveResponse> <azure.devops.v5_0.wiki.models.WikiPageMoveResponse>`
def close(self): """ Deletes all static mask objects. """ for key in self.masklist.keys(): self.masklist[key] = None self.masklist = {}
Deletes all static mask objects.
def check_next_arg(self, atype, avalue, add=True, check_extension=True): """Argument validity checking This method is usually used by the parser to check if detected argument is allowed for this command. We make a distinction between required and optional arguments. Optional (or tagged) arguments can be provided unordered but not the required ones. A special handling is also done for arguments that require an argument (example: the :comparator argument expects a string argument). The "testlist" type is checked separately as we can't know in advance how many arguments will be provided. If the argument is incorrect, the method raises the appropriate exception, or return False to let the parser handle the exception. :param atype: the argument's type :param avalue: the argument's value :param add: indicates if this argument should be recorded on success :param check_extension: raise ExtensionNotLoaded if extension not loaded :return: True on success, False otherwise """ if not self.has_arguments(): return False if self.iscomplete(atype, avalue): return False if self.curarg is not None and "extra_arg" in self.curarg: condition = ( atype in self.curarg["extra_arg"]["type"] and ("values" not in self.curarg["extra_arg"] or avalue in self.curarg["extra_arg"]["values"]) ) if condition: if add: self.extra_arguments[self.curarg["name"]] = avalue self.curarg = None return True raise BadValue(self.curarg["name"], avalue) failed = False pos = self.nextargpos while pos < len(self.args_definition): curarg = self.args_definition[pos] if curarg.get("required", False): if curarg["type"] == ["testlist"]: if atype != "test": failed = True elif add: if not curarg["name"] in self.arguments: self.arguments[curarg["name"]] = [] self.arguments[curarg["name"]] += [avalue] elif not self.__is_valid_type(atype, curarg["type"]) or \ not self.__is_valid_value_for_arg( curarg, avalue, check_extension): failed = True else: self.curarg = curarg self.rargs_cnt += 1 self.nextargpos = pos + 1 if add: self.arguments[curarg["name"]] = avalue break condition = ( atype in curarg["type"] and self.__is_valid_value_for_arg(curarg, avalue, check_extension) ) if condition: ext = curarg.get("extension") condition = ( check_extension and ext and ext not in RequireCommand.loaded_extensions) if condition: raise ExtensionNotLoaded(ext) condition = ( "extra_arg" in curarg and ("valid_for" not in curarg["extra_arg"] or avalue in curarg["extra_arg"]["valid_for"]) ) if condition: self.curarg = curarg if add: self.arguments[curarg["name"]] = avalue break pos += 1 if failed: raise BadArgument(self.name, avalue, self.args_definition[pos]["type"]) return True
Argument validity checking This method is usually used by the parser to check if detected argument is allowed for this command. We make a distinction between required and optional arguments. Optional (or tagged) arguments can be provided unordered but not the required ones. A special handling is also done for arguments that require an argument (example: the :comparator argument expects a string argument). The "testlist" type is checked separately as we can't know in advance how many arguments will be provided. If the argument is incorrect, the method raises the appropriate exception, or return False to let the parser handle the exception. :param atype: the argument's type :param avalue: the argument's value :param add: indicates if this argument should be recorded on success :param check_extension: raise ExtensionNotLoaded if extension not loaded :return: True on success, False otherwise
def p_ArrayLiteralContentList(p): ''' ArrayLiteralContentList : ArrayLiteralContent | ArrayLiteralContentList COMMA ArrayLiteralContent ''' if len(p) < 3: p[0] = ArrayLiteralContentList(None, p[1]) else: p[0] = ArrayLiteralContentList(p[1], p[3])
ArrayLiteralContentList : ArrayLiteralContent | ArrayLiteralContentList COMMA ArrayLiteralContent
def execute(self, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """ Called to parse the object as described in this parsing plan, using the provided arguments for the parser. * Exceptions are caught and wrapped into ParsingException * If result does not match expected type, an error is thrown :param logger: the logger to use during parsing (optional: None is supported) :param options: a dictionary of option sets. Each option set is identified with an id in the dictionary. :return: """ try: res = self._execute(logger, options) except Exception as e: raise ParsingException.create_for_caught_error(self.parser, self.obj_type, self.obj_on_fs_to_parse, e, options) # Check that the returned parsed object has the correct type if res is not None: if robust_isinstance(res, self.obj_type): return res # wrong type : error raise WrongTypeCreatedError.create_for_wrong_result_type(self.parser, self.obj_type, self.obj_on_fs_to_parse, res, options)
Called to parse the object as described in this parsing plan, using the provided arguments for the parser. * Exceptions are caught and wrapped into ParsingException * If result does not match expected type, an error is thrown :param logger: the logger to use during parsing (optional: None is supported) :param options: a dictionary of option sets. Each option set is identified with an id in the dictionary. :return:
def filter_noexpand_columns(columns): """Return columns not containing and containing the noexpand prefix. Parameters ---------- columns: sequence of str A sequence of strings to be split Returns ------- Two lists, the first containing strings without the noexpand prefix, the second containing those that do with the prefix filtered out. """ prefix_len = len(NOEXPAND_PREFIX) noexpand = [c[prefix_len:] for c in columns if c.startswith(NOEXPAND_PREFIX)] other = [c for c in columns if not c.startswith(NOEXPAND_PREFIX)] return other, noexpand
Return columns not containing and containing the noexpand prefix. Parameters ---------- columns: sequence of str A sequence of strings to be split Returns ------- Two lists, the first containing strings without the noexpand prefix, the second containing those that do with the prefix filtered out.
def _qualifiers_tomof(qualifiers, indent, maxline=MAX_MOF_LINE): """ Return a MOF string with the qualifier values, including the surrounding square brackets. The qualifiers are ordered by their name. Return empty string if no qualifiers. Normally multiline output and may fold qualifiers into multiple lines. The order of qualifiers is preserved. Parameters: qualifiers (NocaseDict): Qualifiers to format. indent (:term:`integer`): Number of spaces to indent each line of the returned string, counted to the opening bracket in the first line. Returns: :term:`unicode string`: MOF string. """ if not qualifiers: return u'' mof = [] mof.append(_indent_str(indent)) mof.append(u'[') line_pos = indent + 1 mof_quals = [] for q in qualifiers.itervalues(): mof_quals.append(q.tomof(indent + 1 + MOF_INDENT, maxline, line_pos)) delim = ',\n' + _indent_str(indent + 1) mof.append(delim.join(mof_quals)) mof.append(u']\n') return u''.join(mof)
Return a MOF string with the qualifier values, including the surrounding square brackets. The qualifiers are ordered by their name. Return empty string if no qualifiers. Normally multiline output and may fold qualifiers into multiple lines. The order of qualifiers is preserved. Parameters: qualifiers (NocaseDict): Qualifiers to format. indent (:term:`integer`): Number of spaces to indent each line of the returned string, counted to the opening bracket in the first line. Returns: :term:`unicode string`: MOF string.
def _write_parameter_file(params): """ Write the parameter file in the format that elaxtix likes. """ # Get path path = os.path.join(get_tempdir(), 'params.txt') # Define helper function def valToStr(val): if val in [True, False]: return '"%s"' % str(val).lower() elif isinstance(val, int): return str(val) elif isinstance(val, float): tmp = str(val) if not '.' in tmp: tmp += '.0' return tmp elif isinstance(val, str): return '"%s"' % val # Compile text text = '' for key in params: val = params[key] # Make a string of the values if isinstance(val, (list, tuple)): vals = [valToStr(v) for v in val] val_ = ' '.join(vals) else: val_ = valToStr(val) # Create line and add line = '(%s %s)' % (key, val_) text += line + '\n' # Write text f = open(path, 'wb') try: f.write(text.encode('utf-8')) finally: f.close() # Done return path
Write the parameter file in the format that elaxtix likes.
def _get_rule_changes(rules, _rules): ''' given a list of desired rules (rules) and existing rules (_rules) return a list of rules to delete (to_delete) and to create (to_create) ''' to_delete = [] to_create = [] # for each rule in state file # 1. validate rule # 2. determine if rule exists in existing security group rules for rule in rules: try: ip_protocol = six.text_type(rule.get('ip_protocol')) except KeyError: raise SaltInvocationError('ip_protocol, to_port, and from_port are' ' required arguments for security group' ' rules.') supported_protocols = ['tcp', '6', 6, 'udp', '17', 17, 'icmp', '1', 1, 'all', '-1', -1] if ip_protocol not in supported_protocols and (not '{0}'.format(ip_protocol).isdigit() or int(ip_protocol) > 255): raise SaltInvocationError( 'Invalid ip_protocol {0} specified in security group rule.'.format(ip_protocol)) # For the 'all' case, we need to change the protocol name to '-1'. if ip_protocol == 'all': rule['ip_protocol'] = '-1' cidr_ip = rule.get('cidr_ip', None) group_name = rule.get('source_group_name', None) group_id = rule.get('source_group_group_id', None) if cidr_ip and (group_id or group_name): raise SaltInvocationError('cidr_ip and source groups can not both' ' be specified in security group rules.') if group_id and group_name: raise SaltInvocationError('Either source_group_group_id or' ' source_group_name can be specified in' ' security group rules, but not both.') if not (cidr_ip or group_id or group_name): raise SaltInvocationError('cidr_ip, source_group_group_id, or' ' source_group_name must be provided for' ' security group rules.') rule_found = False # for each rule in existing security group ruleset determine if # new rule exists for _rule in _rules: if _check_rule(rule, _rule): rule_found = True break if not rule_found: to_create.append(rule) # for each rule in existing security group configuration # 1. determine if rules needed to be deleted for _rule in _rules: rule_found = False for rule in rules: if _check_rule(rule, _rule): rule_found = True break if not rule_found: # Can only supply name or id, not both. Since we're deleting # entries, it doesn't matter which we pick. _rule.pop('source_group_name', None) to_delete.append(_rule) log.debug('Rules to be deleted: %s', to_delete) log.debug('Rules to be created: %s', to_create) return (to_delete, to_create)
given a list of desired rules (rules) and existing rules (_rules) return a list of rules to delete (to_delete) and to create (to_create)
def bprecess(ra0, dec0, mu_radec=None, parallax=None, rad_vel=None, epoch=None): """ NAME: BPRECESS PURPOSE: Precess positions from J2000.0 (FK5) to B1950.0 (FK4) EXPLANATION: Calculates the mean place of a star at B1950.0 on the FK4 system from the mean place at J2000.0 on the FK5 system. CALLING SEQUENCE: bprecess, ra, dec, ra_1950, dec_1950, [ MU_RADEC = , PARALLAX = RAD_VEL =, EPOCH = ] INPUTS: RA,DEC - Input J2000 right ascension and declination in *degrees*. Scalar or N element vector OUTPUTS: RA_1950, DEC_1950 - The corresponding B1950 right ascension and declination in *degrees*. Same number of elements as RA,DEC but always double precision. OPTIONAL INPUT-OUTPUT KEYWORDS MU_RADEC - 2xN element double precision vector containing the proper motion in seconds of arc per tropical *century* in right ascension and declination. PARALLAX - N_element vector giving stellar parallax (seconds of arc) RAD_VEL - N_element vector giving radial velocity in km/s The values of MU_RADEC, PARALLAX, and RADVEL will all be modified upon output to contain the values of these quantities in the B1950 system. The parallax and radial velocity will have a very minor influence on the B1950 position. EPOCH - scalar giving epoch of original observations, default 2000.0d This keyword value is only used if the MU_RADEC keyword is not set. NOTES: The algorithm is taken from the Explanatory Supplement to the Astronomical Almanac 1992, page 186. Also see Aoki et al (1983), A&A, 128,263 BPRECESS distinguishes between the following two cases: (1) The proper motion is known and non-zero (2) the proper motion is unknown or known to be exactly zero (i.e. extragalactic radio sources). In this case, the reverse of the algorithm in Appendix 2 of Aoki et al. (1983) is used to ensure that the output proper motion is exactly zero. Better precision can be achieved in this case by inputting the EPOCH of the original observations. The error in using the IDL procedure PRECESS for converting between B1950 and J1950 can be up to 12", mainly in right ascension. If better accuracy than this is needed then BPRECESS should be used. An unsystematic comparison of BPRECESS with the IPAC precession routine (http://nedwww.ipac.caltech.edu/forms/calculator.html) always gives differences less than 0.15". EXAMPLE: The SAO2000 catalogue gives the J2000 position and proper motion for the star HD 119288. Find the B1950 position. RA(2000) = 13h 42m 12.740s Dec(2000) = 8d 23' 17.69'' Mu(RA) = -.0257 s/yr Mu(Dec) = -.090 ''/yr IDL> mu_radec = 100D* [ -15D*.0257, -0.090 ] IDL> ra = ten(13, 42, 12.740)*15.D IDL> dec = ten(8, 23, 17.69) IDL> bprecess, ra, dec, ra1950, dec1950, mu_radec = mu_radec IDL> print, adstring(ra1950, dec1950,2) ===> 13h 39m 44.526s +08d 38' 28.63" REVISION HISTORY: Written, W. Landsman October, 1992 Vectorized, W. Landsman February, 1994 Treat case where proper motion not known or exactly zero November 1994 Handling of arrays larger than 32767 Lars L. Christensen, march, 1995 Converted to IDL V5.0 W. Landsman September 1997 Fixed bug where A term not initialized for vector input W. Landsman February 2000 Converted to python Sergey Koposov july 2010 """ scal = True if isinstance(ra0, ndarray): ra = ra0 dec = dec0 n = ra.size scal = False else: n = 1 ra = array([ra0]) dec = array([dec0]) if rad_vel is None: rad_vel = zeros(n) else: if not isinstance(rad_vel, ndarray): rad_vel = array([rad_vel],dtype=float) if rad_vel.size != n: raise Exception('ERROR - RAD_VEL keyword vector must be of the same length as RA and DEC') if (mu_radec is not None): if (array(mu_radec).size != 2 * n): raise Exception('ERROR - MU_RADEC keyword (proper motion) be dimensioned (2,' + strtrim(n, 2) + ')') mu_radec = mu_radec * 1. if parallax is None: parallax = zeros(n) else: if not isinstance(parallax, ndarray): parallax = array([parallax],dtype=float) if epoch is None: epoch = 2000.0e0 radeg = 180.e0 / pi sec_to_radian = lambda x : deg2rad(x/3600.) m = array([array([+0.9999256795e0, -0.0111814828e0, -0.0048590040e0, -0.000551e0, -0.238560e0, +0.435730e0]), array([+0.0111814828e0, +0.9999374849e0, -0.0000271557e0, +0.238509e0, -0.002667e0, -0.008541e0]), array([+0.0048590039e0, -0.0000271771e0, +0.9999881946e0, -0.435614e0, +0.012254e0, +0.002117e0]), array([-0.00000242389840e0, +0.00000002710544e0, +0.00000001177742e0, +0.99990432e0, -0.01118145e0, -0.00485852e0]), array([-0.00000002710544e0, -0.00000242392702e0, +0.00000000006585e0, +0.01118145e0, +0.99991613e0, -0.00002716e0]), array([-0.00000001177742e0, +0.00000000006585e0, -0.00000242404995e0, +0.00485852e0, -0.00002717e0, +0.99996684e0])]) a_dot = 1e-3 * array([1.244e0, -1.579e0, -0.660e0]) #in arc seconds per century ra_rad = deg2rad(ra) dec_rad = deg2rad(dec) cosra = cos(ra_rad) sinra = sin(ra_rad) cosdec = cos(dec_rad) sindec = sin(dec_rad) dec_1950 = dec * 0. ra_1950 = ra * 0. for i in range(n): # Following statement moved inside loop in Feb 2000. a = 1e-6 * array([-1.62557e0, -0.31919e0, -0.13843e0]) #in radians r0 = array([cosra[i] * cosdec[i], sinra[i] * cosdec[i], sindec[i]]) if (mu_radec is not None): mu_a = mu_radec[i,0] mu_d = mu_radec[i,1] r0_dot = array([-mu_a * sinra[i] * cosdec[i] - mu_d * cosra[i] * sindec[i], mu_a * cosra[i] * cosdec[i] - mu_d * sinra[i] * sindec[i], mu_d * cosdec[i]]) + 21.095e0 * rad_vel[i] * parallax[i] * r0 else: r0_dot = array([0.0e0, 0.0e0, 0.0e0]) r_0 = concatenate((r0, r0_dot)) r_1 = transpose(dot(transpose(m), transpose(r_0))) # Include the effects of the E-terms of aberration to form r and r_dot. r1 = r_1[0:3] r1_dot = r_1[3:6] if mu_radec is None: r1 = r1 + sec_to_radian ( r1_dot * (epoch - 1950.0e0) / 100. ) a = a + sec_to_radian ( a_dot * (epoch - 1950.0e0) / 100. ) x1 = r_1[0] ; y1 = r_1[1] ; z1 = r_1[2] rmag = sqrt(x1 ** 2 + y1 ** 2 + z1 ** 2) s1 = r1 / rmag ; s1_dot = r1_dot / rmag s = s1 for j in arange(0, 3): r = s1 + a - ((s * a).sum()) * s s = r / rmag x = r[0] ; y = r[1] ; z = r[2] r2 = x ** 2 + y ** 2 + z ** 2 rmag = sqrt(r2) if mu_radec is not None: r_dot = s1_dot + a_dot - ((s * a_dot).sum()) * s x_dot = r_dot[0] ; y_dot = r_dot[1] ; z_dot = r_dot[2] mu_radec[i,0] = (x * y_dot - y * x_dot) / (x ** 2 + y ** 2) mu_radec[i,1] = (z_dot * (x ** 2 + y ** 2) - z * (x * x_dot + y * y_dot)) / (r2 * sqrt(x ** 2 + y ** 2)) dec_1950[i] = arcsin(z / rmag) ra_1950[i] = arctan2(y, x) if parallax[i] > 0.: rad_vel[i] = (x * x_dot + y * y_dot + z * z_dot) / (21.095 * parallax[i] * rmag) parallax[i] = parallax[i] / rmag neg = (ra_1950 < 0) if neg.any() > 0: ra_1950[neg] = ra_1950[neg] + 2.e0 * pi ra_1950 = rad2deg(ra_1950) dec_1950 = rad2deg(dec_1950) # Make output scalar if input was scalar if scal: return ra_1950[0],dec_1950[0] else: return ra_1950, dec_1950
NAME: BPRECESS PURPOSE: Precess positions from J2000.0 (FK5) to B1950.0 (FK4) EXPLANATION: Calculates the mean place of a star at B1950.0 on the FK4 system from the mean place at J2000.0 on the FK5 system. CALLING SEQUENCE: bprecess, ra, dec, ra_1950, dec_1950, [ MU_RADEC = , PARALLAX = RAD_VEL =, EPOCH = ] INPUTS: RA,DEC - Input J2000 right ascension and declination in *degrees*. Scalar or N element vector OUTPUTS: RA_1950, DEC_1950 - The corresponding B1950 right ascension and declination in *degrees*. Same number of elements as RA,DEC but always double precision. OPTIONAL INPUT-OUTPUT KEYWORDS MU_RADEC - 2xN element double precision vector containing the proper motion in seconds of arc per tropical *century* in right ascension and declination. PARALLAX - N_element vector giving stellar parallax (seconds of arc) RAD_VEL - N_element vector giving radial velocity in km/s The values of MU_RADEC, PARALLAX, and RADVEL will all be modified upon output to contain the values of these quantities in the B1950 system. The parallax and radial velocity will have a very minor influence on the B1950 position. EPOCH - scalar giving epoch of original observations, default 2000.0d This keyword value is only used if the MU_RADEC keyword is not set. NOTES: The algorithm is taken from the Explanatory Supplement to the Astronomical Almanac 1992, page 186. Also see Aoki et al (1983), A&A, 128,263 BPRECESS distinguishes between the following two cases: (1) The proper motion is known and non-zero (2) the proper motion is unknown or known to be exactly zero (i.e. extragalactic radio sources). In this case, the reverse of the algorithm in Appendix 2 of Aoki et al. (1983) is used to ensure that the output proper motion is exactly zero. Better precision can be achieved in this case by inputting the EPOCH of the original observations. The error in using the IDL procedure PRECESS for converting between B1950 and J1950 can be up to 12", mainly in right ascension. If better accuracy than this is needed then BPRECESS should be used. An unsystematic comparison of BPRECESS with the IPAC precession routine (http://nedwww.ipac.caltech.edu/forms/calculator.html) always gives differences less than 0.15". EXAMPLE: The SAO2000 catalogue gives the J2000 position and proper motion for the star HD 119288. Find the B1950 position. RA(2000) = 13h 42m 12.740s Dec(2000) = 8d 23' 17.69'' Mu(RA) = -.0257 s/yr Mu(Dec) = -.090 ''/yr IDL> mu_radec = 100D* [ -15D*.0257, -0.090 ] IDL> ra = ten(13, 42, 12.740)*15.D IDL> dec = ten(8, 23, 17.69) IDL> bprecess, ra, dec, ra1950, dec1950, mu_radec = mu_radec IDL> print, adstring(ra1950, dec1950,2) ===> 13h 39m 44.526s +08d 38' 28.63" REVISION HISTORY: Written, W. Landsman October, 1992 Vectorized, W. Landsman February, 1994 Treat case where proper motion not known or exactly zero November 1994 Handling of arrays larger than 32767 Lars L. Christensen, march, 1995 Converted to IDL V5.0 W. Landsman September 1997 Fixed bug where A term not initialized for vector input W. Landsman February 2000 Converted to python Sergey Koposov july 2010
def remove(self, key): """ Removes the mapping for a key from this map if it is present. The map will not contain a mapping for the specified key once the call returns. **Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), key of the mapping to be deleted. :return: (object), the previous value associated with key, or ``None`` if there was no mapping for key. """ check_not_none(key, "key can't be None") key_data = self._to_data(key) return self._remove_internal(key_data)
Removes the mapping for a key from this map if it is present. The map will not contain a mapping for the specified key once the call returns. **Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), key of the mapping to be deleted. :return: (object), the previous value associated with key, or ``None`` if there was no mapping for key.
def create_dep(self, ): """Create a dep and store it in the self.dep :returns: None :rtype: None :raises: None """ name = self.name_le.text() short = self.short_le.text() assetflag = self.asset_rb.isChecked() ordervalue = self.ordervalue_sb.value() desc = self.desc_pte.toPlainText() try: dep = djadapter.models.Department(name=name, short=short, assetflag=assetflag, ordervalue=ordervalue, description=desc) dep.save() for prj in self.projects: dep.projects.add(prj) self.dep = dep self.accept() except: log.exception("Could not create new department.")
Create a dep and store it in the self.dep :returns: None :rtype: None :raises: None
def bbduk_trim(forward_in, forward_out, reverse_in='NA', reverse_out='NA', returncmd=False, **kwargs): """ Wrapper for using bbduk to quality trim reads. Contains arguments used in OLC Assembly Pipeline, but these can be overwritten by using keyword parameters. :param forward_in: Forward reads you want to quality trim. :param returncmd: If set to true, function will return the cmd string passed to subprocess as a third value. :param forward_out: Output forward reads. :param reverse_in: Reverse input reads. Don't need to be specified if _R1/_R2 naming convention is used. :param reverse_out: Reverse output reads. Don't need to be specified if _R1/_R2 convention is used. :param kwargs: Other arguments to give to bbduk in parameter=argument format. See bbduk documentation for full list. :return: out and err: stdout string and stderr string from running bbduk. """ options = kwargs_to_string(kwargs) cmd = 'which bbduk.sh' try: subprocess.check_output(cmd.split()).decode('utf-8') except subprocess.CalledProcessError: print('ERROR: Could not find bbduk. Plase check that the bbtools package is installed and on your $PATH.\n\n') raise FileNotFoundError if os.path.isfile(forward_in.replace('_R1', '_R2')) and reverse_in == 'NA' and '_R1' in forward_in: reverse_in = forward_in.replace('_R1', '_R2') if reverse_out == 'NA': if '_R1' in forward_out: reverse_out = forward_out.replace('_R1', '_R2') else: raise ValueError('If you do not specify reverse_out, forward_out must contain R1.\n\n') cmd = 'bbduk.sh in1={f_in} in2={r_in} out1={f_out} out2={r_out} qtrim=w trimq=20 k=25 minlength=50 ' \ 'forcetrimleft=15 ref=adapters overwrite hdist=1 tpe tbo{optn}'\ .format(f_in=forward_in, r_in=reverse_in, f_out=forward_out, r_out=reverse_out, optn=options) elif reverse_in == 'NA': cmd = 'bbduk.sh in={f_in} out={f_out} qtrim=w trimq=20 k=25 minlength=50 forcetrimleft=15' \ ' ref=adapters overwrite hdist=1 tpe tbo{optn}'\ .format(f_in=forward_in, f_out=forward_out, optn=options) else: if reverse_out == 'NA': raise ValueError('Reverse output reads must be specified.') cmd = 'bbduk.sh in1={f_in} in2={r_in} out1={f_out} out2={r_out} qtrim=w trimq=20 k=25 minlength=50 ' \ 'forcetrimleft=15 ref=adapters overwrite hdist=1 tpe tbo{optn}'\ .format(f_in=forward_in, r_in=reverse_in, f_out=forward_out, r_out=reverse_out, optn=options) out, err = run_subprocess(cmd) if returncmd: return out, err, cmd else: return out, err
Wrapper for using bbduk to quality trim reads. Contains arguments used in OLC Assembly Pipeline, but these can be overwritten by using keyword parameters. :param forward_in: Forward reads you want to quality trim. :param returncmd: If set to true, function will return the cmd string passed to subprocess as a third value. :param forward_out: Output forward reads. :param reverse_in: Reverse input reads. Don't need to be specified if _R1/_R2 naming convention is used. :param reverse_out: Reverse output reads. Don't need to be specified if _R1/_R2 convention is used. :param kwargs: Other arguments to give to bbduk in parameter=argument format. See bbduk documentation for full list. :return: out and err: stdout string and stderr string from running bbduk.
def draw_chimera_embedding(G, *args, **kwargs): """Draws an embedding onto the chimera graph G, according to layout. If interaction_edges is not None, then only display the couplers in that list. If embedded_graph is not None, the only display the couplers between chains with intended couplings according to embedded_graph. Parameters ---------- G : NetworkX graph Should be a Chimera graph or a subgraph of a Chimera graph. emb : dict A dict of chains associated with each node in G. Should be of the form {node: chain, ...}. Chains should be iterables of qubit labels (qubits are nodes in G). embedded_graph : NetworkX graph (optional, default None) A graph which contains all keys of emb as nodes. If specified, edges of G will be considered interactions if and only if they exist between two chains of emb if their keys are connected by an edge in embedded_graph interaction_edges : list (optional, default None) A list of edges which will be used as interactions. show_labels: boolean (optional, default False) If show_labels is True, then each chain in emb is labelled with its key. chain_color : dict (optional, default None) A dict of colors associated with each key in emb. Should be of the form {node: rgba_color, ...}. Colors should be length-4 tuples of floats between 0 and 1 inclusive. If chain_color is None, each chain will be assigned a different color. unused_color : tuple (optional, default (0.9,0.9,0.9,1.0)) The color to use for nodes and edges of G which are not involved in chains, and edges which are neither chain edges nor interactions. If unused_color is None, these nodes and edges will not be shown at all. kwargs : optional keywords See networkx.draw_networkx() for a description of optional keywords, with the exception of the `pos` parameter which is not used by this function. If `linear_biases` or `quadratic_biases` are provided, any provided `node_color` or `edge_color` arguments are ignored. """ draw_embedding(G, chimera_layout(G), *args, **kwargs)
Draws an embedding onto the chimera graph G, according to layout. If interaction_edges is not None, then only display the couplers in that list. If embedded_graph is not None, the only display the couplers between chains with intended couplings according to embedded_graph. Parameters ---------- G : NetworkX graph Should be a Chimera graph or a subgraph of a Chimera graph. emb : dict A dict of chains associated with each node in G. Should be of the form {node: chain, ...}. Chains should be iterables of qubit labels (qubits are nodes in G). embedded_graph : NetworkX graph (optional, default None) A graph which contains all keys of emb as nodes. If specified, edges of G will be considered interactions if and only if they exist between two chains of emb if their keys are connected by an edge in embedded_graph interaction_edges : list (optional, default None) A list of edges which will be used as interactions. show_labels: boolean (optional, default False) If show_labels is True, then each chain in emb is labelled with its key. chain_color : dict (optional, default None) A dict of colors associated with each key in emb. Should be of the form {node: rgba_color, ...}. Colors should be length-4 tuples of floats between 0 and 1 inclusive. If chain_color is None, each chain will be assigned a different color. unused_color : tuple (optional, default (0.9,0.9,0.9,1.0)) The color to use for nodes and edges of G which are not involved in chains, and edges which are neither chain edges nor interactions. If unused_color is None, these nodes and edges will not be shown at all. kwargs : optional keywords See networkx.draw_networkx() for a description of optional keywords, with the exception of the `pos` parameter which is not used by this function. If `linear_biases` or `quadratic_biases` are provided, any provided `node_color` or `edge_color` arguments are ignored.
def __add_token_annotation_tier(self, tier): """ adds a tier to the document graph, in which each event annotates exactly one token. """ for i, event in enumerate(tier.iter('event')): anno_key = '{0}:{1}'.format(self.ns, tier.attrib['category']) anno_val = event.text if event.text else '' self.node[event.attrib['start']][anno_key] = anno_val
adds a tier to the document graph, in which each event annotates exactly one token.
def current_changed(self, i): """Slot for when the current index changes. Emits the :data:`AbstractLevel.new_root` signal. :param index: the new current index :type index: int :returns: None :rtype: None :raises: None """ m = self.model() ri = self.rootModelIndex() index = m.index(i, 0, ri) self.new_root.emit(index)
Slot for when the current index changes. Emits the :data:`AbstractLevel.new_root` signal. :param index: the new current index :type index: int :returns: None :rtype: None :raises: None
def namePop(ctxt): """Pops the top element name from the name stack """ if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o ret = libxml2mod.namePop(ctxt__o) return ret
Pops the top element name from the name stack
def get_string(self, key, is_list=False, is_optional=False, is_secret=False, is_local=False, default=None, options=None): """ Get a the value corresponding to the key and converts it to `str`/`list(str)`. Args: key: the dict key. is_list: If this is one element or a list of elements. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `str`: value corresponding to the key. """ if is_list: return self._get_typed_list_value(key=key, target_type=str, type_convert=str, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) return self._get_typed_value(key=key, target_type=str, type_convert=str, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options)
Get a the value corresponding to the key and converts it to `str`/`list(str)`. Args: key: the dict key. is_list: If this is one element or a list of elements. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `str`: value corresponding to the key.
def get_map(name, map_type, number, reverse=False): """ Return a `BrewerMap` representation of the specified color map. Parameters ---------- name : str Name of color map. Use `print_maps` to see available color maps. map_type : {'Sequential', 'Diverging', 'Qualitative'} Select color map type. number : int Number of defined colors in color map. reverse : bool, optional Set to True to get the reversed color map. """ number = str(number) map_type = map_type.lower().capitalize() # check for valid type if map_type not in MAP_TYPES: s = 'Invalid map type, must be one of {0}'.format(MAP_TYPES) raise ValueError(s) # make a dict of lower case map name to map name so this can be # insensitive to case. # this would be a perfect spot for a dict comprehension but going to # wait on that to preserve 2.6 compatibility. # map_names = {k.lower(): k for k in COLOR_MAPS[map_type].iterkeys()} map_names = dict((k.lower(), k) for k in COLOR_MAPS[map_type].keys()) # check for valid name if name.lower() not in map_names: s = 'Invalid color map name {0!r} for type {1!r}.\n' s = s.format(name, map_type) valid_names = [str(k) for k in COLOR_MAPS[map_type].keys()] valid_names.sort() s += 'Valid names are: {0}'.format(valid_names) raise ValueError(s) name = map_names[name.lower()] # check for valid number if number not in COLOR_MAPS[map_type][name]: s = 'Invalid number for map type {0!r} and name {1!r}.\n' s = s.format(map_type, str(name)) valid_numbers = [int(k) for k in COLOR_MAPS[map_type][name].keys()] valid_numbers.sort() s += 'Valid numbers are : {0}'.format(valid_numbers) raise ValueError(s) colors = COLOR_MAPS[map_type][name][number]['Colors'] if reverse: name += '_r' colors = [x for x in reversed(colors)] return BrewerMap(name, map_type, colors)
Return a `BrewerMap` representation of the specified color map. Parameters ---------- name : str Name of color map. Use `print_maps` to see available color maps. map_type : {'Sequential', 'Diverging', 'Qualitative'} Select color map type. number : int Number of defined colors in color map. reverse : bool, optional Set to True to get the reversed color map.
def get_handler(self): """Create a fully configured CloudLoggingHandler. Returns: (obj): Instance of `google.cloud.logging.handlers. CloudLoggingHandler` """ gcl_client = gcl_logging.Client( project=self.project_id, credentials=self.credentials) handler = gcl_handlers.CloudLoggingHandler( gcl_client, resource=self.resource, labels={ 'resource_id': self.instance_id, 'resource_project': self.project_id, 'resource_zone': self.zone, 'resource_host': self.hostname }) handler.setFormatter(self.get_formatter()) self._set_worker_thread_level() return handler
Create a fully configured CloudLoggingHandler. Returns: (obj): Instance of `google.cloud.logging.handlers. CloudLoggingHandler`
def h_all_pairs(gbm, array_or_frame, indices_or_columns = 'all'): """ PURPOSE Compute Friedman and Popescu's two-variable H statistic, in order to look for an interaction in the passed gradient- boosting model between each pair of variables represented by the elements of the passed array or frame and specified by the passed indices or columns. See Jerome H. Friedman and Bogdan E. Popescu, 2008, "Predictive learning via rule ensembles", Ann. Appl. Stat. 2:916-954, http://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908046, s. 8.1. ARGUMENTS gbm should be a scikit-learn gradient-boosting model (instance of sklearn.ensemble.GradientBoostingClassifier or sklearn.ensemble.GradientBoostingRegressor) that has been fitted to array_or_frame (and a target, not used here). array_or_frame should be a two-dimensional NumPy array or a pandas data frame (instance of numpy.ndarray or pandas .DataFrame). indices_or_columns is optional, with default value 'all'. It should be 'all' or a list of indices of columns of array_or_frame if array_or_frame is a NumPy array or a list of columns of array_or_frame if array_or_frame is a pandas data frame. If it is 'all', then all columns of array_or_frame are used. RETURNS A dict whose keys are pairs (2-tuples) of indices or columns and whose values are the H statistic of the pairs of variables or NaN if a computation is spoiled by weak main effects and rounding errors. H varies from 0 to 1. The larger H, the stronger the evidence for an interaction between a pair of variables. EXAMPLE Friedman and Popescu's (2008) formula (44) for every j and k corresponds to h_all_pairs(F, x) NOTES 1. Per Friedman and Popescu, only variables with strong main effects should be examined for interactions. Strengths of main effects are available as gbm.feature_importances_ once gbm has been fitted. 2. Per Friedman and Popescu, collinearity among variables can lead to interactions in gbm that are not present in the target function. To forestall such spurious interactions, check for strong correlations among variables before fitting gbm. """ if gbm.max_depth < 2: raise Exception("gbm.max_depth must be at least 2.") check_args_contd(array_or_frame, indices_or_columns) arr, model_inds = get_arr_and_model_inds(array_or_frame, indices_or_columns) width = arr.shape[1] f_vals = {} for n in [2, 1]: for inds in itertools.combinations(range(width), n): f_vals[inds] = compute_f_vals(gbm, model_inds, arr, inds) h_vals = {} for inds in itertools.combinations(range(width), 2): h_vals[inds] = compute_h_val(f_vals, arr, inds) if indices_or_columns != 'all': h_vals = {tuple(model_inds[(inds,)]): h_vals[inds] for inds in h_vals.keys()} if not isinstance(array_or_frame, np.ndarray): all_cols = array_or_frame.columns.values h_vals = {tuple(all_cols[(inds,)]): h_vals[inds] for inds in h_vals.keys()} return h_vals
PURPOSE Compute Friedman and Popescu's two-variable H statistic, in order to look for an interaction in the passed gradient- boosting model between each pair of variables represented by the elements of the passed array or frame and specified by the passed indices or columns. See Jerome H. Friedman and Bogdan E. Popescu, 2008, "Predictive learning via rule ensembles", Ann. Appl. Stat. 2:916-954, http://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908046, s. 8.1. ARGUMENTS gbm should be a scikit-learn gradient-boosting model (instance of sklearn.ensemble.GradientBoostingClassifier or sklearn.ensemble.GradientBoostingRegressor) that has been fitted to array_or_frame (and a target, not used here). array_or_frame should be a two-dimensional NumPy array or a pandas data frame (instance of numpy.ndarray or pandas .DataFrame). indices_or_columns is optional, with default value 'all'. It should be 'all' or a list of indices of columns of array_or_frame if array_or_frame is a NumPy array or a list of columns of array_or_frame if array_or_frame is a pandas data frame. If it is 'all', then all columns of array_or_frame are used. RETURNS A dict whose keys are pairs (2-tuples) of indices or columns and whose values are the H statistic of the pairs of variables or NaN if a computation is spoiled by weak main effects and rounding errors. H varies from 0 to 1. The larger H, the stronger the evidence for an interaction between a pair of variables. EXAMPLE Friedman and Popescu's (2008) formula (44) for every j and k corresponds to h_all_pairs(F, x) NOTES 1. Per Friedman and Popescu, only variables with strong main effects should be examined for interactions. Strengths of main effects are available as gbm.feature_importances_ once gbm has been fitted. 2. Per Friedman and Popescu, collinearity among variables can lead to interactions in gbm that are not present in the target function. To forestall such spurious interactions, check for strong correlations among variables before fitting gbm.
def away(self, msg=''): """ Sets/unsets your away status. Optional arguments: * msg='' - Away reason. """ with self.lock: self.send('AWAY :%s' % msg) if self.readable(): msg = self._recv(expected_replies=('306', '305')) if msg[0] == '306': self.is_away = True elif msg[0] == '305': self.is_away = False
Sets/unsets your away status. Optional arguments: * msg='' - Away reason.