code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _check_and_send(self): """Check and send all pending/queued messages that are not waiting on retry timeout After composing the to-be-sent message, also message queue from messages that are not present in the respective SendMessageEvent queue anymore """ if self.transport._stop_event.ready() or not self.transport.greenlet: self.log.error("Can't retry - stopped") return if self.transport._prioritize_global_messages: # During startup global messages have to be sent first self.transport._global_send_queue.join() self.log.debug('Retrying message', receiver=to_normalized_address(self.receiver)) status = self.transport._address_mgr.get_address_reachability(self.receiver) if status is not AddressReachability.REACHABLE: # if partner is not reachable, return self.log.debug( 'Partner not reachable. Skipping.', partner=pex(self.receiver), status=status, ) return # sort output by channel_identifier (so global/unordered queue goes first) # inside queue, preserve order in which messages were enqueued ordered_queue = sorted( self._message_queue, key=lambda d: d.queue_identifier.channel_identifier, ) message_texts = [ data.text for data in ordered_queue # if expired_gen generator yields False, message was sent recently, so skip it if next(data.expiration_generator) ] def message_is_in_queue(data: _RetryQueue._MessageData) -> bool: return any( isinstance(data.message, RetrieableMessage) and send_event.message_identifier == data.message.message_identifier for send_event in self.transport._queueids_to_queues[data.queue_identifier] ) # clean after composing, so any queued messages (e.g. Delivered) are sent at least once for msg_data in self._message_queue[:]: remove = False if isinstance(msg_data.message, (Delivered, Ping, Pong)): # e.g. Delivered, send only once and then clear # TODO: Is this correct? Will a missed Delivered be 'fixed' by the # later `Processed` message? remove = True elif msg_data.queue_identifier not in self.transport._queueids_to_queues: remove = True self.log.debug( 'Stopping message send retry', queue=msg_data.queue_identifier, message=msg_data.message, reason='Raiden queue is gone', ) elif not message_is_in_queue(msg_data): remove = True self.log.debug( 'Stopping message send retry', queue=msg_data.queue_identifier, message=msg_data.message, reason='Message was removed from queue', ) if remove: self._message_queue.remove(msg_data) if message_texts: self.log.debug('Send', receiver=pex(self.receiver), messages=message_texts) self.transport._send_raw(self.receiver, '\n'.join(message_texts))
Check and send all pending/queued messages that are not waiting on retry timeout After composing the to-be-sent message, also message queue from messages that are not present in the respective SendMessageEvent queue anymore
def labels(self, *labelvalues, **labelkwargs): """Return the child for the given labelset. All metrics can have labels, allowing grouping of related time series. Taking a counter as an example: from prometheus_client import Counter c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint']) c.labels('get', '/').inc() c.labels('post', '/submit').inc() Labels can also be provided as keyword arguments: from prometheus_client import Counter c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint']) c.labels(method='get', endpoint='/').inc() c.labels(method='post', endpoint='/submit').inc() See the best practices on [naming](http://prometheus.io/docs/practices/naming/) and [labels](http://prometheus.io/docs/practices/instrumentation/#use-labels). """ if not self._labelnames: raise ValueError('No label names were set when constructing %s' % self) if self._labelvalues: raise ValueError('%s already has labels set (%s); can not chain calls to .labels()' % ( self, dict(zip(self._labelnames, self._labelvalues)) )) if labelvalues and labelkwargs: raise ValueError("Can't pass both *args and **kwargs") if labelkwargs: if sorted(labelkwargs) != sorted(self._labelnames): raise ValueError('Incorrect label names') labelvalues = tuple(unicode(labelkwargs[l]) for l in self._labelnames) else: if len(labelvalues) != len(self._labelnames): raise ValueError('Incorrect label count') labelvalues = tuple(unicode(l) for l in labelvalues) with self._lock: if labelvalues not in self._metrics: self._metrics[labelvalues] = self.__class__( self._name, documentation=self._documentation, labelnames=self._labelnames, unit=self._unit, labelvalues=labelvalues, **self._kwargs ) return self._metrics[labelvalues]
Return the child for the given labelset. All metrics can have labels, allowing grouping of related time series. Taking a counter as an example: from prometheus_client import Counter c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint']) c.labels('get', '/').inc() c.labels('post', '/submit').inc() Labels can also be provided as keyword arguments: from prometheus_client import Counter c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint']) c.labels(method='get', endpoint='/').inc() c.labels(method='post', endpoint='/submit').inc() See the best practices on [naming](http://prometheus.io/docs/practices/naming/) and [labels](http://prometheus.io/docs/practices/instrumentation/#use-labels).
def get_self_uri(self, content_type): "return the first self uri with the content_type" try: return [self_uri for self_uri in self.self_uri_list if self_uri.content_type == content_type][0] except IndexError: return None
return the first self uri with the content_type
def infer_delimiter(filename, comment_char="#", n_lines=3): """ Given a file which contains data separated by one of the following: - commas - tabs - spaces Return the most likely separator by sniffing the first few lines of the file's contents. """ lines = [] with open(filename, "r") as f: for line in f: if line.startswith(comment_char): continue if len(lines) < n_lines: lines.append(line) else: break if len(lines) < n_lines: raise ValueError( "Not enough lines in %s to infer delimiter" % filename) candidate_delimiters = ["\t", ",", "\s+"] for candidate_delimiter in candidate_delimiters: counts = [len(re.split(candidate_delimiter, line)) for line in lines] first_line_count = counts[0] if all(c == first_line_count for c in counts) and first_line_count > 1: return candidate_delimiter raise ValueError("Could not determine delimiter for %s" % filename)
Given a file which contains data separated by one of the following: - commas - tabs - spaces Return the most likely separator by sniffing the first few lines of the file's contents.
def child_task(self): '''child process - this holds all the GUI elements''' from MAVProxy.modules.lib import mp_util import wx_processguard from wx_loader import wx from wxsettings_ui import SettingsDlg mp_util.child_close_fds() app = wx.App(False) dlg = SettingsDlg(self.settings) dlg.parent_pipe = self.parent_pipe dlg.ShowModal() dlg.Destroy()
child process - this holds all the GUI elements
def indirect(self, interface): """ Indirect the implementation of L{IWebViewer} to L{_AnonymousWebViewer}. """ if interface == IWebViewer: return _AnonymousWebViewer(self.store) return super(AnonymousSite, self).indirect(interface)
Indirect the implementation of L{IWebViewer} to L{_AnonymousWebViewer}.
def p_func_args(self, p): 'func_args : func_args COMMA expression' p[0] = p[1] + (p[3],) p.set_lineno(0, p.lineno(1))
func_args : func_args COMMA expression
def admin_view_reverse_fk_links(modeladmin: ModelAdmin, obj, reverse_fk_set_field: str, missing: str = "(None)", use_str: bool = True, separator: str = "<br>", view_type: str = "change", current_app: str = None) -> str: """ Get multiple Django admin site URL for multiple objects linked to our object of interest (where the other objects have foreign keys to our object). """ if not hasattr(obj, reverse_fk_set_field): return missing linked_objs = getattr(obj, reverse_fk_set_field).all() if not linked_objs: return missing first = linked_objs[0] app_name = first._meta.app_label.lower() model_name = first._meta.object_name.lower() viewname = "admin:{}_{}_{}".format(app_name, model_name, view_type) if current_app is None: current_app = modeladmin.admin_site.name links = [] for linked_obj in linked_objs: # log.debug("linked_obj: {}", linked_obj) url = reverse(viewname, args=[linked_obj.pk], current_app=current_app) if use_str: label = escape(str(linked_obj)) else: label = "{} {}".format(escape(linked_obj._meta.object_name), linked_obj.pk) links.append('<a href="{}">{}</a>'.format(url, label)) # log.debug("links: {}", links) return separator.join(links)
Get multiple Django admin site URL for multiple objects linked to our object of interest (where the other objects have foreign keys to our object).
def bugreport(dest_file="default.log"): """ Prints dumpsys, dumpstate, and logcat data to the screen, for the purposes of bug reporting :return: result of _exec_command() execution """ adb_full_cmd = [v.ADB_COMMAND_PREFIX, v.ADB_COMMAND_BUGREPORT] try: dest_file_handler = open(dest_file, "w") except IOError: print("IOError: Failed to create a log file") # We have to check if device is available or not before executing this command # as adb bugreport will wait-for-device infinitely and does not come out of # loop # Execute only if device is available only if _isDeviceAvailable(): result = _exec_command_to_file(adb_full_cmd, dest_file_handler) return (result, "Success: Bug report saved to: " + dest_file) else: return (0, "Device Not Found")
Prints dumpsys, dumpstate, and logcat data to the screen, for the purposes of bug reporting :return: result of _exec_command() execution
def pass_session_attributes(self): """Copies request attributes to response""" for key, value in six.iteritems(self.request.session.attributes): self.response.sessionAttributes[key] = value
Copies request attributes to response
def mimeData(self, items): """ Returns the mime data for dragging for this instance. :param items | [<QTreeWidgetItem>, ..] """ func = self.dataCollector() if func: return func(self, items) # extract the records from the items record_items = [] for item in self.selectedItems(): if isinstance(item, XOrbRecordItem): record_items.append(item) # create the mime data data = QMimeData() self.dataStoreRecords(data, record_items) return data
Returns the mime data for dragging for this instance. :param items | [<QTreeWidgetItem>, ..]
def dlogprior(self, param): """Value of derivative of prior depends on value of `prior`.""" assert param in self.freeparams, "Invalid param: {0}".format(param) return self._dlogprior[param]
Value of derivative of prior depends on value of `prior`.
def get_authinfo(request): """Get authentication info from the encrypted message.""" if (("files_iv" not in request.session) or ("files_text" not in request.session) or ("files_key" not in request.COOKIES)): return False """ Decrypt the password given the SERVER-side IV, SERVER-side ciphertext, and CLIENT-side key. See note above on why this is done. """ iv = base64.b64decode(request.session["files_iv"]) text = base64.b64decode(request.session["files_text"]) key = base64.b64decode(request.COOKIES["files_key"]) obj = AES.new(key, AES.MODE_CFB, iv) password = obj.decrypt(text) username = request.session["filecenter_username"] if "filecenter_username" in request.session else request.user.username return {"username": username, "password": password}
Get authentication info from the encrypted message.
def upload_logs(self, release_singleton=True): ''' uploads a log to a server using the method and gui specifed in self.pcfg singleton mode can be disabled so a new version can be restarted whille uploading oges on typicallin in the case of uploadnig after a crash or sys exit set self.cfg log_upload_interface to gui/cli/or background ''' if release_singleton: self.release_singleton() def _upload(): for log in self.get_logs(): new_name = self._uniquename(log) self._upload(log, new_name) # Todo: keep log around for a few days self.delete_log(log) if self.pcfg['log_server_interface'] == 'gui': raise NotImplementedError threading.Thread(target=threadme) gui_uploader(threadme) elif self.pcfg['log_server_interface'] == 'cli': raise NotImplementedError elif self.pcfg['log_server_interface'] == 'background': _upload()
uploads a log to a server using the method and gui specifed in self.pcfg singleton mode can be disabled so a new version can be restarted whille uploading oges on typicallin in the case of uploadnig after a crash or sys exit set self.cfg log_upload_interface to gui/cli/or background
def txt(self, diff, f): """ Generate a text report for a diff. """ env = Environment( loader=PackageLoader('clan', 'templates'), trim_blocks=True, lstrip_blocks=True ) template = env.get_template('diff.txt') def format_row(label, values): change = format_comma(values['change']) percent_change = '{:.1%}'.format(values['percent_change']) if values['percent_change'] is not None else '-' point_change = '{:.1f}'.format(values['point_change'] * 100) if values['point_change'] is not None else '-' if values['change'] > 0: change = '+%s' % change if values['percent_change'] is not None and values['percent_change'] > 0: percent_change = '+%s' % percent_change if values['point_change'] is not None and values['point_change'] > 0: point_change = '+%s' % point_change return '{:>15s} {:>8s} {:>8s} {:s}\n'.format(change, percent_change, point_change, label) context = { 'diff': diff, 'field_definitions': self.field_definitions, 'GLOBAL_ARGUMENTS': GLOBAL_ARGUMENTS, 'format_comma': format_comma, 'format_duration': format_duration, 'format_percent': format_percent, 'format_row': format_row } f.write(template.render(**context).encode('utf-8'))
Generate a text report for a diff.
def check_token(func): """检查 access token 是否有效.""" @wraps(func) def wrapper(*args, **kwargs): response = func(*args, **kwargs) if response.status_code == 401: raise InvalidToken('Access token invalid or no longer valid') else: return response return wrapper
检查 access token 是否有效.
def checkInfo(email=None, username=None, api_key=None): ''' Method that checks if the given hash is stored in the pipl.com website. :param email: queries to be launched. :param api_key: api_key to be used in pipl.com. If not provided, the API key will be searched in the config_api_keys.py file. :return: Python structure for the Json received. It has the following structure: ''' # This is for i3visio if api_key==None: #api_key = raw_input("Insert the API KEY here:\t") allKeys = config_api_keys.returnListOfAPIKeys() try: api_key = allKeys["pipl_com"] except: # API_Key not found. The samplekey will be used but it has a limit of 10 queries/day. api_key = "samplekey" results = {} results["person"] = [] results["records"] = [] if username != None: request = SearchAPIRequest( username=username, api_key=api_key) person, records = launchRequest(request) # Appending the results results["person"].append(person) results["records"].append(records) if email != None: request = SearchAPIRequest( email=email, api_key=api_key) person, records = launchRequest(request) # Appending the results results["person"].append(person) results["records"].append(records) return results
Method that checks if the given hash is stored in the pipl.com website. :param email: queries to be launched. :param api_key: api_key to be used in pipl.com. If not provided, the API key will be searched in the config_api_keys.py file. :return: Python structure for the Json received. It has the following structure:
def values(self): """list of _ColumnPairwiseSignificance tests. Result has as many elements as there are coliumns in the slice. Each significance test contains `p_vals` and `t_stats` significance tests. """ # TODO: Figure out how to intersperse pairwise objects for columns # that represent H&S return [ _ColumnPairwiseSignificance( self._slice, col_idx, self._axis, self._weighted, self._alpha, self._only_larger, self._hs_dims, ) for col_idx in range(self._slice.get_shape(hs_dims=self._hs_dims)[1]) ]
list of _ColumnPairwiseSignificance tests. Result has as many elements as there are coliumns in the slice. Each significance test contains `p_vals` and `t_stats` significance tests.
def token_view(token): """Show token details.""" if request.method == "POST" and 'delete' in request.form: db.session.delete(token) db.session.commit() return redirect(url_for('.index')) show_token = session.pop('show_personal_access_token', False) form = TokenForm(request.form, name=token.client.name, scopes=token.scopes) form.scopes.choices = current_oauth2server.scope_choices() if form.validate_on_submit(): token.client.name = form.data['name'] token.scopes = form.data['scopes'] db.session.commit() if len(current_oauth2server.scope_choices()) == 0: del(form.scopes) return render_template( "invenio_oauth2server/settings/token_view.html", token=token, form=form, show_token=show_token, )
Show token details.
def community_post_comments(self, post_id, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/post_comments#list-comments" api_path = "/api/v2/community/posts/{post_id}/comments.json" api_path = api_path.format(post_id=post_id) return self.call(api_path, **kwargs)
https://developer.zendesk.com/rest_api/docs/help_center/post_comments#list-comments
def set_contrast(self, contrast): """ Adjusts the image contrast. Contrast refers to the rate of change of color with color level. At low contrast, color changes gradually over many intensity levels, while at high contrast it can change rapidly within a few levels Args: contrast: float A number between 0 and 1. Note that upon initialization the colormap has a default contrast value of 0.5. Returns: void """ self._contrast = contrast self.x_spread = 2 * (1.0 - contrast) self.y_spread = 2.0 - 2 * (1.0 - contrast) self._build_cdict()
Adjusts the image contrast. Contrast refers to the rate of change of color with color level. At low contrast, color changes gradually over many intensity levels, while at high contrast it can change rapidly within a few levels Args: contrast: float A number between 0 and 1. Note that upon initialization the colormap has a default contrast value of 0.5. Returns: void
def dispense(self): '''dispense a card if ready, otherwise throw an Exception''' self.sendcommand(Vendapin.DISPENSE) # wait for the reply time.sleep(1) # parse the reply response = self.receivepacket() print('Vendapin.dispense(): ' + str(response)) if not self.was_packet_accepted(response): raise Exception('DISPENSE packet not accepted: ' + str(response)) return self.parsedata(response)[0]
dispense a card if ready, otherwise throw an Exception
def normalizer(text, exclusion=OPERATIONS_EXCLUSION, lower=True, separate_char='-', **kwargs): """ Clean text string of simbols only alphanumeric chars. """ clean_str = re.sub(r'[^\w{}]'.format( "".join(exclusion)), separate_char, text.strip()) or '' clean_lowerbar = clean_str_without_accents = strip_accents(clean_str) if '_' not in exclusion: clean_lowerbar = re.sub(r'\_', separate_char, clean_str_without_accents.strip()) limit_guion = re.sub(r'\-+', separate_char, clean_lowerbar.strip()) # TODO: refactor with a regexp if limit_guion and separate_char and separate_char in limit_guion[0]: limit_guion = limit_guion[1:] if limit_guion and separate_char and separate_char in limit_guion[-1]: limit_guion = limit_guion[:-1] if lower: limit_guion = limit_guion.lower() return limit_guion
Clean text string of simbols only alphanumeric chars.
def unpack_rawr_zip_payload(table_sources, payload): """unpack a zipfile and turn it into a callable "tables" object.""" # the io we get from S3 is streaming, so we can't seek on it, but zipfile # seems to require that. so we buffer it all in memory. RAWR tiles are # generally up to around 100MB in size, which should be safe to store in # RAM. from tilequeue.query.common import Table from io import BytesIO zfh = zipfile.ZipFile(BytesIO(payload), 'r') def get_table(table_name): # need to extract the whole compressed file from zip reader, as it # doesn't support .tell() on the filelike, which gzip requires. data = zfh.open(table_name, 'r').read() unpacker = Unpacker(file_like=BytesIO(data)) source = table_sources[table_name] return Table(source, unpacker) return get_table
unpack a zipfile and turn it into a callable "tables" object.
def init(envVarName, enableColorOutput=False): """ Initialize the logging system and parse the environment variable of the given name. Needs to be called before starting the actual application. """ global _initialized if _initialized: return global _ENV_VAR_NAME _ENV_VAR_NAME = envVarName if enableColorOutput: _preformatLevels(envVarName + "_NO_COLOR") else: _preformatLevels(None) if envVarName in os.environ: # install a log handler that uses the value of the environment var setDebug(os.environ[envVarName]) addLimitedLogHandler(stderrHandler) _initialized = True
Initialize the logging system and parse the environment variable of the given name. Needs to be called before starting the actual application.
def create_app(app_id, app_name, source_id, region, app_data): """ insert app record when stack run as a app """ try: create_at = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') conn = get_conn() c = conn.cursor() #check old app c.execute("SELECT count(*) FROM app WHERE name='{0}' ".format(app_name)) old_app = c.fetchone() if old_app[0] > 0: print 'app name (%s) already existed, clear old app and container info ...' % app_name c.execute("DELETE FROM container WHERE app_id='{0}'".format(app_id)) c.execute("DELETE FROM app WHERE name='{0}'".format(app_name)) conn.commit() #insert new app c.execute("INSERT INTO app (id,name,source_id,region,state,create_at,change_at,app_data) VALUES ('{0}','{1}','{2}','{3}','{4}','{5}','{6}','{7}')" .format(app_id, app_name, source_id, region, constant.STATE_APP_RUNNING, create_at, create_at, app_data)) conn.commit() conn.close() print 'create app %s succeed!' % app_id except Exception, e: raise RuntimeError('create app %s failed! %s' % (app_id,e))
insert app record when stack run as a app
def add_semantic_data(self, path_as_list, value, key): """ Adds a semantic data entry. :param list path_as_list: The path in the vividict to enter the value :param value: The value of the new entry. :param key: The key of the new entry. :return: """ assert isinstance(key, string_types) target_dict = self.get_semantic_data(path_as_list) target_dict[key] = value return path_as_list + [key]
Adds a semantic data entry. :param list path_as_list: The path in the vividict to enter the value :param value: The value of the new entry. :param key: The key of the new entry. :return:
def evolve_genomes(rng, pop, params, recorder=None): """ Evolve a population without tree sequence recordings. In other words, complete genomes must be simulated and tracked. :param rng: random number generator :type rng: :class:`fwdpy11.GSLrng` :param pop: A population :type pop: :class:`fwdpy11.DiploidPopulation` :param params: simulation parameters :type params: :class:`fwdpy11.ModelParams` :param recorder: (None) A temporal sampler/data recorder. :type recorder: callable .. note:: If recorder is None, then :class:`fwdpy11.RecordNothing` will be used. """ import warnings # Test parameters while suppressing warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") # Will throw exception if anything is wrong: params.validate() from ._fwdpy11 import MutationRegions from ._fwdpy11 import evolve_without_tree_sequences from ._fwdpy11 import dispatch_create_GeneticMap pneutral = params.mutrate_n/(params.mutrate_n+params.mutrate_s) mm = MutationRegions.create(pneutral, params.nregions, params.sregions) rm = dispatch_create_GeneticMap(params.recrate, params.recregions) if recorder is None: from ._fwdpy11 import RecordNothing recorder = RecordNothing() evolve_without_tree_sequences(rng, pop, params.demography, params.mutrate_n, params.mutrate_s, params.recrate, mm, rm, params.gvalue, recorder, params.pself, params.prune_selected)
Evolve a population without tree sequence recordings. In other words, complete genomes must be simulated and tracked. :param rng: random number generator :type rng: :class:`fwdpy11.GSLrng` :param pop: A population :type pop: :class:`fwdpy11.DiploidPopulation` :param params: simulation parameters :type params: :class:`fwdpy11.ModelParams` :param recorder: (None) A temporal sampler/data recorder. :type recorder: callable .. note:: If recorder is None, then :class:`fwdpy11.RecordNothing` will be used.
def Parse(self): """Iterator returning dict for each entry in history.""" for data in self.Query(self.EVENTS_QUERY): (timestamp, agent_bundle_identifier, agent_name, url, sender, sender_address, type_number, title, referrer, referrer_alias) = data yield [ timestamp, "OSX_QUARANTINE", url, referrer, title, agent_name, agent_bundle_identifier, sender, sender_address, type_number, referrer_alias ]
Iterator returning dict for each entry in history.
def tops(opts): ''' Returns the tops modules ''' if 'master_tops' not in opts: return {} whitelist = list(opts['master_tops'].keys()) ret = LazyLoader( _module_dirs(opts, 'tops', 'top'), opts, tag='top', whitelist=whitelist, ) return FilterDictWrapper(ret, '.top')
Returns the tops modules
def _init_os_api(self): """ Initialise client objects for talking to OpenStack API. This is in a separate function so to be called by ``__init__`` and ``__setstate__``. """ if not self.nova_client: log.debug("Initializing OpenStack API clients:" " OS_AUTH_URL='%s'" " OS_USERNAME='%s'" " OS_USER_DOMAIN_NAME='%s'" " OS_PROJECT_NAME='%s'" " OS_PROJECT_DOMAIN_NAME='%s'" " OS_REGION_NAME='%s'" " OS_CACERT='%s'" "", self._os_auth_url, self._os_username, self._os_user_domain_name, self._os_tenant_name, self._os_project_domain_name, self._os_region_name, self._os_cacert) sess = self.__init_keystone_session() log.debug("Creating OpenStack Compute API (Nova) v%s client ...", self._compute_api_version) self.nova_client = nova_client.Client( self._compute_api_version, session=sess, region_name=self._os_region_name, cacert=self._os_cacert) log.debug("Creating OpenStack Network API (Neutron) client ...") self.neutron_client = neutron_client.Client( #self._network_api_version, ## doesn't work as of Neutron Client 2 :-( session=sess, region_name=self._os_region_name, ca_cert=self._os_cacert) # FIXME: Glance's `Client` class does not take an explicit # `cacert` parameter, instead it relies on the `session` # argument being "A keystoneauth1 session that should be # used for transport" -- I presume this means that # `cacert` only needs to be set there. Is this true of # other OpenStack client classes as well? log.debug("Creating OpenStack Image API (Glance) v%s client ...", self._image_api_version) self.glance_client = glance_client.Client( self._image_api_version, session=sess, region_name=self._os_region_name) log.debug("Creating OpenStack Volume API (Cinder) v%s client ...", self._volume_api_version) self.cinder_client = cinder_client.Client( self._volume_api_version, session=sess, region_name=self._os_region_name, cacert=self._os_cacert)
Initialise client objects for talking to OpenStack API. This is in a separate function so to be called by ``__init__`` and ``__setstate__``.
def ae_partial_waves(self): """Dictionary with the AE partial waves indexed by state.""" ae_partial_waves = OrderedDict() for mesh, values, attrib in self._parse_all_radfuncs("ae_partial_wave"): state = attrib["state"] #val_state = self.valence_states[state] ae_partial_waves[state] = RadialFunction(mesh, values) return ae_partial_waves
Dictionary with the AE partial waves indexed by state.
def rotate(obj, axis, angle, origin=None): ''' Rotation around unit vector following the right hand rule Parameters: obj : obj to be rotated (e.g. neurite, neuron). Must implement a transform method. axis : unit vector for the axis of rotation angle : rotation angle in rads Returns: A copy of the object with the applied translation. ''' R = _rodrigues_to_dcm(axis, angle) try: return obj.transform(PivotRotation(R, origin)) except AttributeError: raise NotImplementedError
Rotation around unit vector following the right hand rule Parameters: obj : obj to be rotated (e.g. neurite, neuron). Must implement a transform method. axis : unit vector for the axis of rotation angle : rotation angle in rads Returns: A copy of the object with the applied translation.
def next(self): """ Goes to the previous page for this wizard. """ curr_page = self.currentPage() if not curr_page: return elif not curr_page.validatePage(): return pageId = curr_page.nextId() try: next_page = self._pages[pageId] except KeyError: return self._currentId = pageId self._navigation.append(pageId) y = curr_page.y() next_page.move(self.width(), y) # animate the last page in anim_in = QtCore.QPropertyAnimation(self) anim_in.setTargetObject(curr_page) anim_in.setPropertyName('pos') anim_in.setStartValue(curr_page.pos()) anim_in.setEndValue(QtCore.QPoint(-curr_page.width(), y)) anim_in.setDuration(self.animationSpeed()) anim_in.setEasingCurve(QtCore.QEasingCurve.Linear) # animate the current page out anim_out = QtCore.QPropertyAnimation(self) anim_out.setTargetObject(next_page) anim_out.setPropertyName('pos') anim_out.setStartValue(next_page.pos()) anim_out.setEndValue(curr_page.pos()) anim_out.setDuration(self.animationSpeed()) anim_out.setEasingCurve(QtCore.QEasingCurve.Linear) # create the anim group anim_grp = QtCore.QParallelAnimationGroup(self) anim_grp.addAnimation(anim_in) anim_grp.addAnimation(anim_out) anim_grp.finished.connect(curr_page.hide) anim_grp.finished.connect(anim_grp.deleteLater) next_page.show() # update the button states self._buttons[self.WizardButton.BackButton].setVisible(True) self._buttons[self.WizardButton.NextButton].setVisible(self.canGoForward()) self._buttons[self.WizardButton.RetryButton].setVisible(self.canRetry()) self._buttons[self.WizardButton.CommitButton].setVisible(next_page.isCommitPage()) self._buttons[self.WizardButton.FinishButton].setVisible(next_page.isFinalPage()) self.adjustSize() # initialize the new page self.currentIdChanged.emit(pageId) next_page.initializePage() anim_grp.start()
Goes to the previous page for this wizard.
def proxy_for(widget): """Create a proxy for a Widget :param widget: A gtk.Widget to proxy This will raise a KeyError if there is no proxy type registered for the widget type. """ proxy_type = widget_proxies.get(widget.__class__) if proxy_type is None: raise KeyError('There is no proxy type registered for %r' % widget) return proxy_type(widget)
Create a proxy for a Widget :param widget: A gtk.Widget to proxy This will raise a KeyError if there is no proxy type registered for the widget type.
def set_aesthetic(palette="yellowbrick", font="sans-serif", font_scale=1, color_codes=True, rc=None): """ Set aesthetic parameters in one step. Each set of parameters can be set directly or temporarily, see the referenced functions below for more information. Parameters ---------- palette : string or sequence Color palette, see :func:`color_palette` font : string Font family, see matplotlib font manager. font_scale : float, optional Separate scaling factor to independently scale the size of the font elements. color_codes : bool If ``True`` and ``palette`` is a yellowbrick palette, remap the shorthand color codes (e.g. "b", "g", "r", etc.) to the colors from this palette. rc : dict or None Dictionary of rc parameter mappings to override the above. """ _set_context(font_scale) set_style(rc={"font.family": font}) set_palette(palette, color_codes=color_codes) if rc is not None: mpl.rcParams.update(rc)
Set aesthetic parameters in one step. Each set of parameters can be set directly or temporarily, see the referenced functions below for more information. Parameters ---------- palette : string or sequence Color palette, see :func:`color_palette` font : string Font family, see matplotlib font manager. font_scale : float, optional Separate scaling factor to independently scale the size of the font elements. color_codes : bool If ``True`` and ``palette`` is a yellowbrick palette, remap the shorthand color codes (e.g. "b", "g", "r", etc.) to the colors from this palette. rc : dict or None Dictionary of rc parameter mappings to override the above.
def run(self, fnames=None): """Run Python scripts""" if fnames is None: fnames = self.get_selected_filenames() for fname in fnames: self.sig_run.emit(fname)
Run Python scripts
def _fill_missing_values(df, range_values, fill_value=0, fill_method=None): """ Will get the names of the index colums of df, obtain their ranges from range_values dict and return a reindexed version of df with the given range values. :param df: pandas DataFrame :param range_values: dict or array-like Must contain for each index column of df an entry with all the values within the range of the column. :param fill_value: scalar or 'nearest', default 0 Value to use for missing values. Defaults to 0, but can be any "compatible" value, e.g., NaN. The 'nearest' mode will fill the missing value with the nearest value in the column. :param fill_method: {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed DataFrame 'pad' / 'ffill': propagate last valid observation forward to next valid 'backfill' / 'bfill': use NEXT valid observation to fill gap :return: pandas Dataframe and used column ranges reindexed DataFrame and dict with index column ranges """ idx_colnames = df.index.names idx_colranges = [range_values[x] for x in idx_colnames] fullindex = pd.Index([p for p in product(*idx_colranges)], name=tuple(idx_colnames)) fulldf = df.reindex(index=fullindex, fill_value=fill_value, method=fill_method) fulldf.index.names = idx_colnames return fulldf, idx_colranges
Will get the names of the index colums of df, obtain their ranges from range_values dict and return a reindexed version of df with the given range values. :param df: pandas DataFrame :param range_values: dict or array-like Must contain for each index column of df an entry with all the values within the range of the column. :param fill_value: scalar or 'nearest', default 0 Value to use for missing values. Defaults to 0, but can be any "compatible" value, e.g., NaN. The 'nearest' mode will fill the missing value with the nearest value in the column. :param fill_method: {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed DataFrame 'pad' / 'ffill': propagate last valid observation forward to next valid 'backfill' / 'bfill': use NEXT valid observation to fill gap :return: pandas Dataframe and used column ranges reindexed DataFrame and dict with index column ranges
def delete_topic_rule(ruleName, region=None, key=None, keyid=None, profile=None): ''' Given a rule name, delete it. Returns {deleted: true} if the rule was deleted and returns {deleted: false} if the rule was not deleted. CLI Example: .. code-block:: bash salt myminion boto_iot.delete_rule myrule ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_topic_rule(ruleName=ruleName) return {'deleted': True} except ClientError as e: return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
Given a rule name, delete it. Returns {deleted: true} if the rule was deleted and returns {deleted: false} if the rule was not deleted. CLI Example: .. code-block:: bash salt myminion boto_iot.delete_rule myrule
def reindex(self, request): """ Recreate the Search Index. """ r = redis.StrictRedis.from_url(request.registry.settings["celery.scheduler_url"]) try: with SearchLock(r, timeout=30 * 60, blocking_timeout=30): p = urllib.parse.urlparse(request.registry.settings["elasticsearch.url"]) client = elasticsearch.Elasticsearch( [urllib.parse.urlunparse(p[:2] + ("",) * 4)], verify_certs=True, ca_certs=certifi.where(), timeout=30, retry_on_timeout=True, serializer=serializer.serializer, ) number_of_replicas = request.registry.get("elasticsearch.replicas", 0) refresh_interval = request.registry.get("elasticsearch.interval", "1s") # We use a randomly named index so that we can do a zero downtime reindex. # Essentially we'll use a randomly named index which we will use until all # of the data has been reindexed, at which point we'll point an alias at # our randomly named index, and then delete the old randomly named index. # Create the new index and associate all of our doc types with it. index_base = request.registry["elasticsearch.index"] random_token = binascii.hexlify(os.urandom(5)).decode("ascii") new_index_name = "{}-{}".format(index_base, random_token) doc_types = request.registry.get("search.doc_types", set()) shards = request.registry.get("elasticsearch.shards", 1) # Create the new index with zero replicas and index refreshes disabled # while we are bulk indexing. new_index = get_index( new_index_name, doc_types, using=client, shards=shards, replicas=0, interval="-1", ) new_index.create(wait_for_active_shards=shards) # From this point on, if any error occurs, we want to be able to delete our # in progress index. try: request.db.execute("SET statement_timeout = '600s'") for _ in parallel_bulk( client, _project_docs(request.db), index=new_index_name ): pass except: # noqa new_index.delete() raise finally: request.db.rollback() request.db.close() # Now that we've finished indexing all of our data we can update the # replicas and refresh intervals. client.indices.put_settings( index=new_index_name, body={ "index": { "number_of_replicas": number_of_replicas, "refresh_interval": refresh_interval, } }, ) # Point the alias at our new randomly named index and delete the old index. if client.indices.exists_alias(name=index_base): to_delete = set() actions = [] for name in client.indices.get_alias(name=index_base): to_delete.add(name) actions.append({"remove": {"index": name, "alias": index_base}}) actions.append({"add": {"index": new_index_name, "alias": index_base}}) client.indices.update_aliases({"actions": actions}) client.indices.delete(",".join(to_delete)) else: client.indices.put_alias(name=index_base, index=new_index_name) except redis.exceptions.LockError as exc: raise self.retry(countdown=60, exc=exc)
Recreate the Search Index.
def parametrized_class(decorator): '''Decorator used to make simple class decorator with arguments. Doesn't really do anything, just here to have a central implementation of the simple class decorator.''' def decorator_builder(*args, **kwargs): def meta_decorator(cls): return decorator(cls, *args, **kwargs) return meta_decorator return decorator_builder
Decorator used to make simple class decorator with arguments. Doesn't really do anything, just here to have a central implementation of the simple class decorator.
def nodes(self, tree): """ Returns the relevant nodes for the spec's frequency """ # Run the match against the tree if self.frequency == 'per_session': nodes = [] for subject in tree.subjects: for sess in subject.sessions: nodes.append(sess) elif self.frequency == 'per_subject': nodes = tree.subjects elif self.frequency == 'per_visit': nodes = tree.visits elif self.frequency == 'per_study': nodes = [tree] else: assert False, "Unrecognised frequency '{}'".format( self.frequency) return nodes
Returns the relevant nodes for the spec's frequency
def env(self, key, value=None, unset=False, asap=False): """Processes (sets/unsets) environment variable. If is not given in `set` mode value will be taken from current env. :param str|unicode key: :param value: :param bool unset: Whether to unset this variable. :param bool asap: If True env variable will be set as soon as possible. """ if unset: self._set('unenv', key, multi=True) else: if value is None: value = os.environ.get(key) self._set('%senv' % ('i' if asap else ''), '%s=%s' % (key, value), multi=True) return self
Processes (sets/unsets) environment variable. If is not given in `set` mode value will be taken from current env. :param str|unicode key: :param value: :param bool unset: Whether to unset this variable. :param bool asap: If True env variable will be set as soon as possible.
def getLinkProperties(self, wanInterfaceId=1, timeout=1): """Execute GetCommonLinkProperties action to get WAN link properties. :param int wanInterfaceId: the id of the WAN device :param float timeout: the timeout to wait for the action to be executed :return: WAN link properties :rtype: WanLinkProperties """ namespace = Wan.getServiceType("getLinkProperties") + str(wanInterfaceId) uri = self.getControlURL(namespace) results = self.execute(uri, namespace, "GetCommonLinkProperties", timeout=timeout) return WanLinkProperties(results)
Execute GetCommonLinkProperties action to get WAN link properties. :param int wanInterfaceId: the id of the WAN device :param float timeout: the timeout to wait for the action to be executed :return: WAN link properties :rtype: WanLinkProperties
def _instance_callable(obj): """Given an object, return True if the object is callable. For classes, return True if instances would be callable.""" if not isinstance(obj, ClassTypes): # already an instance return getattr(obj, '__call__', None) is not None if six.PY3: # *could* be broken by a class overriding __mro__ or __dict__ via # a metaclass for base in (obj,) + obj.__mro__: if base.__dict__.get('__call__') is not None: return True else: klass = obj # uses __bases__ instead of __mro__ so that we work with old style classes if klass.__dict__.get('__call__') is not None: return True for base in klass.__bases__: if _instance_callable(base): return True return False
Given an object, return True if the object is callable. For classes, return True if instances would be callable.
def register_as_guest(self): """ Register a guest account on this HS. Note: HS must have guest registration enabled. Returns: str: Access Token Raises: MatrixRequestError """ response = self.api.register(auth_body=None, kind='guest') return self._post_registration(response)
Register a guest account on this HS. Note: HS must have guest registration enabled. Returns: str: Access Token Raises: MatrixRequestError
def insert_record(self, table: str, fields: Sequence[str], values: Sequence[Any], update_on_duplicate_key: bool = False) -> int: """Inserts a record into database, table "table", using the list of fieldnames and the list of values. Returns the new PK (or None).""" self.ensure_db_open() if len(fields) != len(values): raise AssertionError("Field/value mismatch") if update_on_duplicate_key: sql = get_sql_insert_or_update(table, fields, self.get_delims()) else: sql = get_sql_insert(table, fields, self.get_delims()) sql = self.localize_sql(sql) log.debug("About to insert_record with SQL template: " + sql) try: cursor = self.db.cursor() debug_sql(sql, values) cursor.execute(sql, values) # ... binds the placeholders (?, %s) to values in the process new_pk = get_pk_of_last_insert(cursor) log.debug("Record inserted.") return new_pk except: # nopep8 log.exception("insert_record: Failed to insert record.") raise
Inserts a record into database, table "table", using the list of fieldnames and the list of values. Returns the new PK (or None).
def _get_inherited_field_types(class_to_field_type_overrides, schema_graph): """Return a dictionary describing the field type overrides in subclasses.""" inherited_field_type_overrides = dict() for superclass_name, field_type_overrides in class_to_field_type_overrides.items(): for subclass_name in schema_graph.get_subclass_set(superclass_name): inherited_field_type_overrides.setdefault(subclass_name, dict()) inherited_field_type_overrides[subclass_name].update(field_type_overrides) return inherited_field_type_overrides
Return a dictionary describing the field type overrides in subclasses.
def place_objects(self): """Places objects randomly until no collisions or max iterations hit.""" pos_arr, quat_arr = self.initializer.sample() for k, obj_name in enumerate(self.objects): self.objects[obj_name].set("pos", array_to_string(pos_arr[k])) self.objects[obj_name].set("quat", array_to_string(quat_arr[k]))
Places objects randomly until no collisions or max iterations hit.
def visit_List(self, node): """ List construction depend on each elements type dependency. """ if node.elts: return list(set(sum([self.visit(elt) for elt in node.elts], []))) else: return [frozenset()]
List construction depend on each elements type dependency.
def show_batch_runner(self): """Show the batch runner dialog.""" from safe.gui.tools.batch.batch_dialog import BatchDialog dialog = BatchDialog( parent=self.iface.mainWindow(), iface=self.iface, dock=self.dock_widget) dialog.exec_()
Show the batch runner dialog.
def check_no_signature(self, function, docstring): # def context """D402: First line should not be function's or method's "signature". The one-line docstring should NOT be a "signature" reiterating the function/method parameters (which can be obtained by introspection). """ if docstring: first_line = ast.literal_eval(docstring).strip().split('\n')[0] if function.name + '(' in first_line.replace(' ', ''): return violations.D402()
D402: First line should not be function's or method's "signature". The one-line docstring should NOT be a "signature" reiterating the function/method parameters (which can be obtained by introspection).
def init_progress_bar(self): """Initialize and return a progress bar.""" # Forked worker processes can't show progress bars. disable = MapReduce._forked or not config.PROGRESS_BARS # Don't materialize iterable unless we have to: huge iterables # (e.g. of `KCuts`) eat memory. if disable: total = None else: self.iterable = list(self.iterable) total = len(self.iterable) return tqdm(total=total, disable=disable, leave=False, desc=self.description)
Initialize and return a progress bar.
def spin(self): """:class:`.BinaryQuadraticModel`: An instance of the Ising model subclass of the :class:`.BinaryQuadraticModel` superclass, corresponding to a binary quadratic model with spins as its variables. Enables access to biases for the spin-valued binary quadratic model regardless of the :class:`vartype` set when the model was created. If the model was created with the :attr:`.binary` vartype, the Ising model subclass is instantiated upon the first use of the :attr:`.spin` property and used in any subsequent reads. Examples: This example creates a QUBO model and uses the :attr:`.spin` property to instantiate the corresponding Ising model. >>> import dimod ... >>> bqm_qubo = dimod.BinaryQuadraticModel({0: -1, 1: -1}, {(0, 1): 2}, 0.0, dimod.BINARY) >>> bqm_spin = bqm_qubo.spin >>> bqm_spin # doctest: +SKIP BinaryQuadraticModel({0: 0.0, 1: 0.0}, {(0, 1): 0.5}, -0.5, Vartype.SPIN) >>> bqm_spin.spin is bqm_spin True Note: Methods like :meth:`.add_variable`, :meth:`.add_variables_from`, :meth:`.add_interaction`, etc. should only be used on the base model. """ # NB: The existence of the _spin property implies that it is up to date, methods that # invalidate it will erase the property try: spin = self._spin if spin is not None: return spin except AttributeError: pass if self.vartype is Vartype.SPIN: self._spin = spin = self else: self._counterpart = self._spin = spin = self.change_vartype(Vartype.SPIN, inplace=False) # we also want to go ahead and set spin.binary to refer back to self spin._binary = self return spin
:class:`.BinaryQuadraticModel`: An instance of the Ising model subclass of the :class:`.BinaryQuadraticModel` superclass, corresponding to a binary quadratic model with spins as its variables. Enables access to biases for the spin-valued binary quadratic model regardless of the :class:`vartype` set when the model was created. If the model was created with the :attr:`.binary` vartype, the Ising model subclass is instantiated upon the first use of the :attr:`.spin` property and used in any subsequent reads. Examples: This example creates a QUBO model and uses the :attr:`.spin` property to instantiate the corresponding Ising model. >>> import dimod ... >>> bqm_qubo = dimod.BinaryQuadraticModel({0: -1, 1: -1}, {(0, 1): 2}, 0.0, dimod.BINARY) >>> bqm_spin = bqm_qubo.spin >>> bqm_spin # doctest: +SKIP BinaryQuadraticModel({0: 0.0, 1: 0.0}, {(0, 1): 0.5}, -0.5, Vartype.SPIN) >>> bqm_spin.spin is bqm_spin True Note: Methods like :meth:`.add_variable`, :meth:`.add_variables_from`, :meth:`.add_interaction`, etc. should only be used on the base model.
def to_file(file_): """Serializes file to id string :param file_: object to serialize :return: string id """ from sevenbridges.models.file import File if not file_: raise SbgError('File is required!') elif isinstance(file_, File): return file_.id elif isinstance(file_, six.string_types): return file_ else: raise SbgError('Invalid file parameter!')
Serializes file to id string :param file_: object to serialize :return: string id
def get_ilo_firmware_version_as_major_minor(self): """Gets the ilo firmware version for server capabilities Parse the get_host_health_data() to retreive the firmware details. :param data: the output returned by get_host_health_data() :returns: String with the format "<major>.<minor>" or None. """ data = self.get_host_health_data() firmware_details = self._get_firmware_embedded_health(data) if firmware_details: ilo_version_str = firmware_details.get('iLO', None) return common.get_major_minor(ilo_version_str)
Gets the ilo firmware version for server capabilities Parse the get_host_health_data() to retreive the firmware details. :param data: the output returned by get_host_health_data() :returns: String with the format "<major>.<minor>" or None.
def point_in_triangle(p, v1, v2, v3): """Checks whether a point is within the given triangle The function checks, whether the given point p is within the triangle defined by the the three corner point v1, v2 and v3. This is done by checking whether the point is on all three half-planes defined by the three edges of the triangle. :param p: The point to be checked (tuple with x any y coordinate) :param v1: First vertex of the triangle (tuple with x any y coordinate) :param v2: Second vertex of the triangle (tuple with x any y coordinate) :param v3: Third vertex of the triangle (tuple with x any y coordinate) :return: True if the point is within the triangle, False if not """ def _test(p1, p2, p3): return (p1[0] - p3[0]) * (p2[1] - p3[1]) - (p2[0] - p3[0]) * (p1[1] - p3[1]) b1 = _test(p, v1, v2) < 0.0 b2 = _test(p, v2, v3) < 0.0 b3 = _test(p, v3, v1) < 0.0 return (b1 == b2) and (b2 == b3)
Checks whether a point is within the given triangle The function checks, whether the given point p is within the triangle defined by the the three corner point v1, v2 and v3. This is done by checking whether the point is on all three half-planes defined by the three edges of the triangle. :param p: The point to be checked (tuple with x any y coordinate) :param v1: First vertex of the triangle (tuple with x any y coordinate) :param v2: Second vertex of the triangle (tuple with x any y coordinate) :param v3: Third vertex of the triangle (tuple with x any y coordinate) :return: True if the point is within the triangle, False if not
def query(path, method='GET', data=None, params=None, header_dict=None, decode=True): ''' Perform a query directly against the Azure REST API ''' certificate_path = config.get_cloud_config_value( 'certificate_path', get_configured_provider(), __opts__, search_global=False ) subscription_id = salt.utils.stringutils.to_str( config.get_cloud_config_value( 'subscription_id', get_configured_provider(), __opts__, search_global=False ) ) management_host = config.get_cloud_config_value( 'management_host', get_configured_provider(), __opts__, search_global=False, default='management.core.windows.net' ) backend = config.get_cloud_config_value( 'backend', get_configured_provider(), __opts__, search_global=False ) url = 'https://{management_host}/{subscription_id}/{path}'.format( management_host=management_host, subscription_id=subscription_id, path=path, ) if header_dict is None: header_dict = {} header_dict['x-ms-version'] = '2014-06-01' result = salt.utils.http.query( url, method=method, params=params, data=data, header_dict=header_dict, port=443, text=True, cert=certificate_path, backend=backend, decode=decode, decode_type='xml', ) if 'dict' in result: return result['dict'] return
Perform a query directly against the Azure REST API
def get_index2data(model_description): """ Get a dictionary that maps indices to a list of (1) the id in the hwrt symbol database (2) the latex command (3) the unicode code point (4) a font family and (5) a font style. Parameters ---------- model_description : string A model description file that points to a feature folder where an ``index2formula_id.csv`` has to be. Returns ------- dictionary that maps indices to lists of data Notes ----- This command need a database connection. """ index2latex = {} translation_csv = os.path.join(get_project_root(), model_description["data-source"], "index2formula_id.csv") with open(translation_csv) as csvfile: csvreader = csv.DictReader(csvfile, delimiter=',', quotechar='"') for row in csvreader: database_id = int(row['formula_id']) online_data = get_online_symbol_data(database_id) latex = online_data['formula_in_latex'] unicode_code_point = online_data['unicode_dec'] font = online_data['font'] font_style = online_data['font_style'] index2latex[int(row['index'])] = [database_id, latex, unicode_code_point, font, font_style] return index2latex
Get a dictionary that maps indices to a list of (1) the id in the hwrt symbol database (2) the latex command (3) the unicode code point (4) a font family and (5) a font style. Parameters ---------- model_description : string A model description file that points to a feature folder where an ``index2formula_id.csv`` has to be. Returns ------- dictionary that maps indices to lists of data Notes ----- This command need a database connection.
def ancestors(self): """Returns a list of the ancestors of this node.""" ancestors = set([]) self._depth_ascend(self, ancestors) try: ancestors.remove(self) except KeyError: # we weren't ancestor of ourself, that's ok pass return list(ancestors)
Returns a list of the ancestors of this node.
def queue_emission(self, msg): """ queue an emission of a message for all output plugins """ if not msg: return for _emitter in self._emit: if not hasattr(_emitter, 'emit'): continue def emit(emitter=_emitter): self.log.debug("emit to {}".format(emitter.name)) emitter.emit(msg) self.log.debug("queue emission to {} ({})".format( _emitter.name, self._emit_queue.qsize())) self._emit_queue.put(emit)
queue an emission of a message for all output plugins
async def save(proxies, filename): """Save proxies to a file.""" with open(filename, 'w') as f: while True: proxy = await proxies.get() if proxy is None: break proto = 'https' if 'HTTPS' in proxy.types else 'http' row = '%s://%s:%d\n' % (proto, proxy.host, proxy.port) f.write(row)
Save proxies to a file.
def service(name, action): """ Open/close access to a service :param name: could be a service name defined in `/etc/services` or a port number. :param action: `open` or `close` """ if action == 'open': subprocess.check_output(['ufw', 'allow', str(name)], universal_newlines=True) elif action == 'close': subprocess.check_output(['ufw', 'delete', 'allow', str(name)], universal_newlines=True) else: raise UFWError(("'{}' not supported, use 'allow' " "or 'delete'").format(action))
Open/close access to a service :param name: could be a service name defined in `/etc/services` or a port number. :param action: `open` or `close`
def open_files(by_pid=False): ''' Return a list of all physical open files on the system. CLI Examples: .. code-block:: bash salt '*' file.open_files salt '*' file.open_files by_pid=True ''' # First we collect valid PIDs pids = {} procfs = os.listdir('/proc/') for pfile in procfs: try: pids[int(pfile)] = [] except ValueError: # Not a valid PID, move on pass # Then we look at the open files for each PID files = {} for pid in pids: ppath = '/proc/{0}'.format(pid) try: tids = os.listdir('{0}/task'.format(ppath)) except OSError: continue # Collect the names of all of the file descriptors fd_ = [] #try: # fd_.append(os.path.realpath('{0}/task/{1}exe'.format(ppath, tid))) #except Exception: # pass for fpath in os.listdir('{0}/fd'.format(ppath)): fd_.append('{0}/fd/{1}'.format(ppath, fpath)) for tid in tids: try: fd_.append( os.path.realpath('{0}/task/{1}/exe'.format(ppath, tid)) ) except OSError: continue for tpath in os.listdir('{0}/task/{1}/fd'.format(ppath, tid)): fd_.append('{0}/task/{1}/fd/{2}'.format(ppath, tid, tpath)) fd_ = sorted(set(fd_)) # Loop through file descriptors and return useful data for each file for fdpath in fd_: # Sometimes PIDs and TIDs disappear before we can query them try: name = os.path.realpath(fdpath) # Running stat on the file cuts out all of the sockets and # deleted files from the list os.stat(name) except OSError: continue if name not in files: files[name] = [pid] else: # We still want to know which PIDs are using each file files[name].append(pid) files[name] = sorted(set(files[name])) pids[pid].append(name) pids[pid] = sorted(set(pids[pid])) if by_pid: return pids return files
Return a list of all physical open files on the system. CLI Examples: .. code-block:: bash salt '*' file.open_files salt '*' file.open_files by_pid=True
def normalize(alias): """ Normalizes an alias by removing adverbs defined in IGNORED_WORDS """ # Convert from CamelCase to snake_case alias = re.sub(r'([a-z])([A-Z])', r'\1_\2', alias) # Ignore words words = alias.lower().split('_') words = filter(lambda w: w not in IGNORED_WORDS, words) return '_'.join(words)
Normalizes an alias by removing adverbs defined in IGNORED_WORDS
def ref_file( ticker: str, fld: str, has_date=False, cache=False, ext='parq', **kwargs ) -> str: """ Data file location for Bloomberg reference data Args: ticker: ticker name fld: field has_date: whether add current date to data file cache: if has_date is True, whether to load file from latest cached ext: file extension **kwargs: other overrides passed to ref function Returns: file location Examples: >>> import shutil >>> >>> os.environ['BBG_ROOT'] = '' >>> ref_file('BLT LN Equity', fld='Crncy') == '' True >>> os.environ['BBG_ROOT'] = '/data/bbg' >>> ref_file('BLT LN Equity', fld='Crncy', cache=True) '/data/bbg/Equity/BLT LN Equity/Crncy/ovrd=None.parq' >>> ref_file('BLT LN Equity', fld='Crncy') '' >>> cur_dt = utils.cur_time(tz=utils.DEFAULT_TZ) >>> ref_file( ... 'BLT LN Equity', fld='DVD_Hist_All', has_date=True, cache=True, ... ).replace(cur_dt, '[cur_date]') '/data/bbg/Equity/BLT LN Equity/DVD_Hist_All/asof=[cur_date], ovrd=None.parq' >>> ref_file( ... 'BLT LN Equity', fld='DVD_Hist_All', has_date=True, ... cache=True, DVD_Start_Dt='20180101', ... ).replace(cur_dt, '[cur_date]')[:-5] '/data/bbg/Equity/BLT LN Equity/DVD_Hist_All/asof=[cur_date], DVD_Start_Dt=20180101' >>> sample = 'asof=2018-11-02, DVD_Start_Dt=20180101, DVD_End_Dt=20180501.pkl' >>> root_path = 'xbbg/tests/data' >>> sub_path = f'{root_path}/Equity/AAPL US Equity/DVD_Hist_All' >>> os.environ['BBG_ROOT'] = root_path >>> for tmp_file in files.all_files(sub_path): os.remove(tmp_file) >>> files.create_folder(sub_path) >>> sample in shutil.copy(f'{root_path}/{sample}', sub_path) True >>> new_file = ref_file( ... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101', ... has_date=True, cache=True, ext='pkl' ... ) >>> new_file.split('/')[-1] == f'asof={cur_dt}, DVD_Start_Dt=20180101.pkl' True >>> old_file = 'asof=2018-11-02, DVD_Start_Dt=20180101, DVD_End_Dt=20180501.pkl' >>> old_full = '/'.join(new_file.split('/')[:-1] + [old_file]) >>> updated_file = old_full.replace('2018-11-02', cur_dt) >>> updated_file in shutil.copy(old_full, updated_file) True >>> exist_file = ref_file( ... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101', ... has_date=True, cache=True, ext='pkl' ... ) >>> exist_file == updated_file False >>> exist_file = ref_file( ... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101', ... DVD_End_Dt='20180501', has_date=True, cache=True, ext='pkl' ... ) >>> exist_file == updated_file True """ data_path = os.environ.get(assist.BBG_ROOT, '').replace('\\', '/') if (not data_path) or (not cache): return '' proper_ticker = ticker.replace('/', '_') cache_days = kwargs.pop('cache_days', 10) root = f'{data_path}/{ticker.split()[-1]}/{proper_ticker}/{fld}' if len(kwargs) > 0: info = utils.to_str(kwargs)[1:-1].replace('|', '_') else: info = 'ovrd=None' # Check date info if has_date: cur_dt = utils.cur_time() missing = f'{root}/asof={cur_dt}, {info}.{ext}' to_find = re.compile(rf'{root}/asof=(.*), {info}\.pkl') cur_files = list(filter(to_find.match, sorted( files.all_files(path_name=root, keyword=info, ext=ext) ))) if len(cur_files) > 0: upd_dt = to_find.match(cur_files[-1]).group(1) diff = pd.Timestamp('today') - pd.Timestamp(upd_dt) if diff >= pd.Timedelta(days=cache_days): return missing return sorted(cur_files)[-1] else: return missing else: return f'{root}/{info}.{ext}'
Data file location for Bloomberg reference data Args: ticker: ticker name fld: field has_date: whether add current date to data file cache: if has_date is True, whether to load file from latest cached ext: file extension **kwargs: other overrides passed to ref function Returns: file location Examples: >>> import shutil >>> >>> os.environ['BBG_ROOT'] = '' >>> ref_file('BLT LN Equity', fld='Crncy') == '' True >>> os.environ['BBG_ROOT'] = '/data/bbg' >>> ref_file('BLT LN Equity', fld='Crncy', cache=True) '/data/bbg/Equity/BLT LN Equity/Crncy/ovrd=None.parq' >>> ref_file('BLT LN Equity', fld='Crncy') '' >>> cur_dt = utils.cur_time(tz=utils.DEFAULT_TZ) >>> ref_file( ... 'BLT LN Equity', fld='DVD_Hist_All', has_date=True, cache=True, ... ).replace(cur_dt, '[cur_date]') '/data/bbg/Equity/BLT LN Equity/DVD_Hist_All/asof=[cur_date], ovrd=None.parq' >>> ref_file( ... 'BLT LN Equity', fld='DVD_Hist_All', has_date=True, ... cache=True, DVD_Start_Dt='20180101', ... ).replace(cur_dt, '[cur_date]')[:-5] '/data/bbg/Equity/BLT LN Equity/DVD_Hist_All/asof=[cur_date], DVD_Start_Dt=20180101' >>> sample = 'asof=2018-11-02, DVD_Start_Dt=20180101, DVD_End_Dt=20180501.pkl' >>> root_path = 'xbbg/tests/data' >>> sub_path = f'{root_path}/Equity/AAPL US Equity/DVD_Hist_All' >>> os.environ['BBG_ROOT'] = root_path >>> for tmp_file in files.all_files(sub_path): os.remove(tmp_file) >>> files.create_folder(sub_path) >>> sample in shutil.copy(f'{root_path}/{sample}', sub_path) True >>> new_file = ref_file( ... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101', ... has_date=True, cache=True, ext='pkl' ... ) >>> new_file.split('/')[-1] == f'asof={cur_dt}, DVD_Start_Dt=20180101.pkl' True >>> old_file = 'asof=2018-11-02, DVD_Start_Dt=20180101, DVD_End_Dt=20180501.pkl' >>> old_full = '/'.join(new_file.split('/')[:-1] + [old_file]) >>> updated_file = old_full.replace('2018-11-02', cur_dt) >>> updated_file in shutil.copy(old_full, updated_file) True >>> exist_file = ref_file( ... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101', ... has_date=True, cache=True, ext='pkl' ... ) >>> exist_file == updated_file False >>> exist_file = ref_file( ... 'AAPL US Equity', 'DVD_Hist_All', DVD_Start_Dt='20180101', ... DVD_End_Dt='20180501', has_date=True, cache=True, ext='pkl' ... ) >>> exist_file == updated_file True
def _ExtractPathSpecsFromFile(self, file_entry): """Extracts path specification from a file. Args: file_entry (dfvfs.FileEntry): file entry that refers to the file. Yields: dfvfs.PathSpec: path specification of a file entry found in the file. """ produced_main_path_spec = False for data_stream in file_entry.data_streams: # Make a copy so we don't make the changes on a path specification # directly. Otherwise already produced path specifications can be # altered in the process. path_spec = copy.deepcopy(file_entry.path_spec) if data_stream.name: setattr(path_spec, 'data_stream', data_stream.name) yield path_spec if not data_stream.name: produced_main_path_spec = True if not produced_main_path_spec: yield file_entry.path_spec
Extracts path specification from a file. Args: file_entry (dfvfs.FileEntry): file entry that refers to the file. Yields: dfvfs.PathSpec: path specification of a file entry found in the file.
def remove_role(role): """Remove a action for a role.""" def processor(action, argument): ActionRoles.query_by_action(action, argument=argument).filter( ActionRoles.role_id == role.id ).delete(synchronize_session=False) return processor
Remove a action for a role.
def set_time_zone(self, item): """ Work out the time zone and create a shim tzinfo. We return True if all is good or False if there was an issue and we need to re check the time zone. see issue #1375 """ # parse i3status date i3s_time = item["full_text"].encode("UTF-8", "replace") try: # python3 compatibility code i3s_time = i3s_time.decode() except: # noqa e722 pass # get datetime and time zone info parts = i3s_time.split() i3s_datetime = " ".join(parts[:2]) # occassionally we do not get the timezone name if len(parts) < 3: return True else: i3s_time_tz = parts[2] date = datetime.strptime(i3s_datetime, TIME_FORMAT) # calculate the time delta utcnow = datetime.utcnow() delta = datetime( date.year, date.month, date.day, date.hour, date.minute ) - datetime(utcnow.year, utcnow.month, utcnow.day, utcnow.hour, utcnow.minute) # create our custom timezone try: self.tz = Tz(i3s_time_tz, delta) except ValueError: return False return True
Work out the time zone and create a shim tzinfo. We return True if all is good or False if there was an issue and we need to re check the time zone. see issue #1375
def find_mof(self, classname): """ Find the MOF file that defines a particular CIM class, in the search path of the MOF compiler. The MOF file is found based on its file name: It is assumed that the base part of the file name is the CIM class name. Example: The class "CIM_ComputerSystem" is expected to be in a file "CIM_ComputerSystem.mof". Parameters: classame (:term:`string`): The name of the CIM class to look up. Returns: :term:`string`: Path name of the MOF file defining the CIM class, if it was found. `None`, if it was not found. """ classname = classname.lower() for search in self.parser.search_paths: for root, dummy_dirs, files in os.walk(search): for file_ in files: if file_.endswith('.mof') and \ file_[:-4].lower() == classname: return os.path.join(root, file_) return None
Find the MOF file that defines a particular CIM class, in the search path of the MOF compiler. The MOF file is found based on its file name: It is assumed that the base part of the file name is the CIM class name. Example: The class "CIM_ComputerSystem" is expected to be in a file "CIM_ComputerSystem.mof". Parameters: classame (:term:`string`): The name of the CIM class to look up. Returns: :term:`string`: Path name of the MOF file defining the CIM class, if it was found. `None`, if it was not found.
def to_native(self, obj, name, value): # pylint:disable=unused-argument """Transform the MongoDB value into a Marrow Mongo value.""" if self.mapping: for original, new in self.mapping.items(): value = value.replace(original, new) return load(value, self.namespace)
Transform the MongoDB value into a Marrow Mongo value.
def score_samples(self, X): """Return the log-likelihood of each sample. See. "Pattern Recognition and Machine Learning" by C. Bishop, 12.2.1 p. 574 or http://www.miketipping.com/papers/met-mppca.pdf Parameters ---------- X : array, shape(n_samples, n_features) The data. Returns ------- ll : array, shape (n_samples,) Log-likelihood of each sample under the current model """ check_is_fitted(self, "mean_") # X = check_array(X) Xr = X - self.mean_ n_features = X.shape[1] precision = self.get_precision() # [n_features, n_features] log_like = -0.5 * (Xr * (da.dot(Xr, precision))).sum(axis=1) log_like -= 0.5 * (n_features * da.log(2.0 * np.pi) - fast_logdet(precision)) return log_like
Return the log-likelihood of each sample. See. "Pattern Recognition and Machine Learning" by C. Bishop, 12.2.1 p. 574 or http://www.miketipping.com/papers/met-mppca.pdf Parameters ---------- X : array, shape(n_samples, n_features) The data. Returns ------- ll : array, shape (n_samples,) Log-likelihood of each sample under the current model
def flush_redis_unsafe(redis_client=None): """This removes some non-critical state from the primary Redis shard. This removes the log files as well as the event log from Redis. This can be used to try to address out-of-memory errors caused by the accumulation of metadata in Redis. However, it will only partially address the issue as much of the data is in the task table (and object table), which are not flushed. Args: redis_client: optional, if not provided then ray.init() must have been called. """ if redis_client is None: ray.worker.global_worker.check_connected() redis_client = ray.worker.global_worker.redis_client # Delete the log files from the primary Redis shard. keys = redis_client.keys("LOGFILE:*") if len(keys) > 0: num_deleted = redis_client.delete(*keys) else: num_deleted = 0 print("Deleted {} log files from Redis.".format(num_deleted)) # Delete the event log from the primary Redis shard. keys = redis_client.keys("event_log:*") if len(keys) > 0: num_deleted = redis_client.delete(*keys) else: num_deleted = 0 print("Deleted {} event logs from Redis.".format(num_deleted))
This removes some non-critical state from the primary Redis shard. This removes the log files as well as the event log from Redis. This can be used to try to address out-of-memory errors caused by the accumulation of metadata in Redis. However, it will only partially address the issue as much of the data is in the task table (and object table), which are not flushed. Args: redis_client: optional, if not provided then ray.init() must have been called.
def get_pk(self, field_val): """convenience method for running is_pk(_id).get_one() since this is so common""" field_name = self.schema.pk.name return self.is_field(field_name, field_val).get_one()
convenience method for running is_pk(_id).get_one() since this is so common
def create_object_id(collection, vault, name, version): """ :param collection: The resource collection type. :type collection: str :param vault: The vault URI. :type vault: str :param name: The resource name. :type name: str :param version: The resource version. :type version: str :rtype: KeyVaultId """ collection = _validate_string_argument(collection, 'collection') vault = _validate_string_argument(vault, 'vault') name = _validate_string_argument(name, 'name') version = _validate_string_argument(version, 'version', True) _parse_uri_argument(vault) # check that vault is a valid URI but don't change it return KeyVaultIdentifier(collection=collection, vault=vault, name=name, version=version)
:param collection: The resource collection type. :type collection: str :param vault: The vault URI. :type vault: str :param name: The resource name. :type name: str :param version: The resource version. :type version: str :rtype: KeyVaultId
def parse_options(): """ Commandline options arguments parsing. """ # build options and help version = "%%prog {version}".format(version=__version__) parser = OptionParser(version=version) parser.add_option( "-u", "--username", action="store", dest="username", type="string", default="", metavar="RECIPIENT", help="user" ) parser.add_option( "-C", "--calendar", metavar="CALENDAR", action="store", type="string", dest="calendar", default="", help="google calendar ID" ) parser.add_option( "-t", "--timezone", metavar="TIMEZONE", action="store", type="string", dest="timezone", default="", help="user timezone" ) parser.add_option( "-m", "--message", metavar="MESSAGE", action="store", type="string", dest="message", default="", help="message text" ) parser.add_option( "-c", "--config", metavar="CONFIG", action="store", type="string", dest="config", help="path to config file", default="/etc/nagios/notification_google_calendar.ini") parser.add_option( "-q", "--quiet", metavar="QUIET", action="store_true", default=False, dest="quiet", help="be quiet" ) parser.add_option( "-g", "--get-google-credentials", metavar="GET-GOOGLE-CREDENTIALS", action="store_true", default=False, dest="get_google_credentials", help="get google API credentials for user" ) options = parser.parse_args(sys.argv)[0] mandatories = ["username", ] # check mandatory command line options supplied if not options.get_google_credentials: mandatories.append("calendar") # set calendar option required when sending message mandatories.append("message") # set message option required when sending message mandatories.append("timezone") # set timezone option required when sending message if not all(options.__dict__[mandatory] for mandatory in mandatories): parser.error("Required command line option missing\n") return options
Commandline options arguments parsing.
def _load_hooks_settings(self): """load hooks settings""" log.debug("executing _load_hooks_settings") hook_show_widget = self.get_widget("hook_show") hook_show_setting = self.settings.hooks.get_string("show") if hook_show_widget is not None: if hook_show_setting is not None: hook_show_widget.set_text(hook_show_setting)
load hooks settings
def poll(self): """ Poll for coordinator events. Only applicable if group_id is set, and broker version supports GroupCoordinators. This ensures that the coordinator is known, and if using automatic partition assignment, ensures that the consumer has joined the group. This also handles periodic offset commits if they are enabled. """ if self.group_id is None or self.config['api_version'] < (0, 8, 2): return self._invoke_completed_offset_commit_callbacks() self.ensure_coordinator_ready() if self.config['api_version'] >= (0, 9) and self._subscription.partitions_auto_assigned(): if self.need_rejoin(): # due to a race condition between the initial metadata fetch and the # initial rebalance, we need to ensure that the metadata is fresh # before joining initially, and then request the metadata update. If # metadata update arrives while the rebalance is still pending (for # example, when the join group is still inflight), then we will lose # track of the fact that we need to rebalance again to reflect the # change to the topic subscription. Without ensuring that the # metadata is fresh, any metadata update that changes the topic # subscriptions and arrives while a rebalance is in progress will # essentially be ignored. See KAFKA-3949 for the complete # description of the problem. if self._subscription.subscribed_pattern: metadata_update = self._client.cluster.request_update() self._client.poll(future=metadata_update) self.ensure_active_group() self.poll_heartbeat() self._maybe_auto_commit_offsets_async()
Poll for coordinator events. Only applicable if group_id is set, and broker version supports GroupCoordinators. This ensures that the coordinator is known, and if using automatic partition assignment, ensures that the consumer has joined the group. This also handles periodic offset commits if they are enabled.
def set_terms(self,*terms, **kw_terms): """ Create or set top level terms in the section. After python 3.6.0, the terms entries should maintain the same order as the argument list. The term arguments can have any of these forms: * For position argument, a Term object * For kw arguments: - 'TermName=TermValue' - 'TermName=(TermValue, PropertyDict) Positional arguments are processed before keyword arguments, and are passed into .add_term() :param terms: Term arguments :return: """ for t in terms: self.add_term(t) for k,v in kw_terms.items(): try: value, props = v except (ValueError, TypeError) as e: value, props = v,{} self.new_term(k,value,**props)
Create or set top level terms in the section. After python 3.6.0, the terms entries should maintain the same order as the argument list. The term arguments can have any of these forms: * For position argument, a Term object * For kw arguments: - 'TermName=TermValue' - 'TermName=(TermValue, PropertyDict) Positional arguments are processed before keyword arguments, and are passed into .add_term() :param terms: Term arguments :return:
def getSearchUrl(self, album, artist): """ See CoverSource.getSearchUrl. """ params = collections.OrderedDict() params["search-alias"] = "popular" params["field-artist"] = artist params["field-title"] = album params["sort"] = "relevancerank" return __class__.assembleUrl(self.base_url, params)
See CoverSource.getSearchUrl.
def get_certificates( self, vault_base_url, maxresults=None, include_pending=None, custom_headers=None, raw=False, **operation_config): """List certificates in a specified key vault. The GetCertificates operation returns the set of certificates resources in the specified key vault. This operation requires the certificates/list permission. :param vault_base_url: The vault name, for example https://myvault.vault.azure.net. :type vault_base_url: str :param maxresults: Maximum number of results to return in a page. If not specified the service will return up to 25 results. :type maxresults: int :param include_pending: Specifies whether to include certificates which are not completely provisioned. :type include_pending: bool :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of CertificateItem :rtype: ~azure.keyvault.v7_0.models.CertificateItemPaged[~azure.keyvault.v7_0.models.CertificateItem] :raises: :class:`KeyVaultErrorException<azure.keyvault.v7_0.models.KeyVaultErrorException>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.get_certificates.metadata['url'] path_format_arguments = { 'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if maxresults is not None: query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1) if include_pending is not None: query_parameters['includePending'] = self._serialize.query("include_pending", include_pending, 'bool') query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: raise models.KeyVaultErrorException(self._deserialize, response) return response # Deserialize response deserialized = models.CertificateItemPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.CertificateItemPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized
List certificates in a specified key vault. The GetCertificates operation returns the set of certificates resources in the specified key vault. This operation requires the certificates/list permission. :param vault_base_url: The vault name, for example https://myvault.vault.azure.net. :type vault_base_url: str :param maxresults: Maximum number of results to return in a page. If not specified the service will return up to 25 results. :type maxresults: int :param include_pending: Specifies whether to include certificates which are not completely provisioned. :type include_pending: bool :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of CertificateItem :rtype: ~azure.keyvault.v7_0.models.CertificateItemPaged[~azure.keyvault.v7_0.models.CertificateItem] :raises: :class:`KeyVaultErrorException<azure.keyvault.v7_0.models.KeyVaultErrorException>`
def p_opt_order(self, p): '''opt_order : | ORDER LPAREN IDENTIFIER RPAREN''' if len(p) > 1: if p[3] not in 'CF': raise PythranSyntaxError("Invalid Pythran spec. " "Unknown order '{}'".format(p[3])) p[0] = p[3] else: p[0] = None
opt_order : | ORDER LPAREN IDENTIFIER RPAREN
def plot(self, x=None, y=None, z=None, what="count(*)", vwhat=None, reduce=["colormap"], f=None, normalize="normalize", normalize_axis="what", vmin=None, vmax=None, shape=256, vshape=32, limits=None, grid=None, colormap="afmhot", # colors=["red", "green", "blue"], figsize=None, xlabel=None, ylabel=None, aspect="auto", tight_layout=True, interpolation="nearest", show=False, colorbar=True, colorbar_label=None, selection=None, selection_labels=None, title=None, background_color="white", pre_blend=False, background_alpha=1., visual=dict(x="x", y="y", layer="z", fade="selection", row="subspace", column="what"), smooth_pre=None, smooth_post=None, wrap=True, wrap_columns=4, return_extra=False, hardcopy=None): """Viz data in a 2d histogram/heatmap. Declarative plotting of statistical plots using matplotlib, supports subplots, selections, layers. Instead of passing x and y, pass a list as x argument for multiple panels. Give what a list of options to have multiple panels. When both are present then will be origanized in a column/row order. This methods creates a 6 dimensional 'grid', where each dimension can map the a visual dimension. The grid dimensions are: * x: shape determined by shape, content by x argument or the first dimension of each space * y: ,, * z: related to the z argument * selection: shape equals length of selection argument * what: shape equals length of what argument * space: shape equals length of x argument if multiple values are given By default, this its shape is (1, 1, 1, 1, shape, shape) (where x is the last dimension) The visual dimensions are * x: x coordinate on a plot / image (default maps to grid's x) * y: y ,, (default maps to grid's y) * layer: each image in this dimension is blended togeher to one image (default maps to z) * fade: each image is shown faded after the next image (default mapt to selection) * row: rows of subplots (default maps to space) * columns: columns of subplot (default maps to what) All these mappings can be changes by the visual argument, some examples: >>> df.plot('x', 'y', what=['mean(x)', 'correlation(vx, vy)']) Will plot each 'what' as a column. >>> df.plot('x', 'y', selection=['FeH < -3', '(FeH >= -3) & (FeH < -2)'], visual=dict(column='selection')) Will plot each selection as a column, instead of a faded on top of each other. :param x: Expression to bin in the x direction (by default maps to x), or list of pairs, like [['x', 'y'], ['x', 'z']], if multiple pairs are given, this dimension maps to rows by default :param y: y (by default maps to y) :param z: Expression to bin in the z direction, followed by a :start,end,shape signature, like 'FeH:-3,1:5' will produce 5 layers between -10 and 10 (by default maps to layer) :param what: What to plot, count(*) will show a N-d histogram, mean('x'), the mean of the x column, sum('x') the sum, std('x') the standard deviation, correlation('vx', 'vy') the correlation coefficient. Can also be a list of values, like ['count(x)', std('vx')], (by default maps to column) :param reduce: :param f: transform values by: 'identity' does nothing 'log' or 'log10' will show the log of the value :param normalize: normalization function, currently only 'normalize' is supported :param normalize_axis: which axes to normalize on, None means normalize by the global maximum. :param vmin: instead of automatic normalization, (using normalize and normalization_axis) scale the data between vmin and vmax to [0, 1] :param vmax: see vmin :param shape: shape/size of the n-D histogram grid :param limits: list of [[xmin, xmax], [ymin, ymax]], or a description such as 'minmax', '99%' :param grid: if the binning is done before by yourself, you can pass it :param colormap: matplotlib colormap to use :param figsize: (x, y) tuple passed to pylab.figure for setting the figure size :param xlabel: :param ylabel: :param aspect: :param tight_layout: call pylab.tight_layout or not :param colorbar: plot a colorbar or not :param interpolation: interpolation for imshow, possible options are: 'nearest', 'bilinear', 'bicubic', see matplotlib for more :param return_extra: :return: """ import pylab import matplotlib n = _parse_n(normalize) if type(shape) == int: shape = (shape,) * 2 binby = [] x = _ensure_strings_from_expressions(x) y = _ensure_strings_from_expressions(y) for expression in [y, x]: if expression is not None: binby = [expression] + binby fig = pylab.gcf() if figsize is not None: fig.set_size_inches(*figsize) import re what_units = None whats = _ensure_list(what) selections = _ensure_list(selection) selections = _ensure_strings_from_expressions(selections) if y is None: waslist, [x, ] = vaex.utils.listify(x) else: waslist, [x, y] = vaex.utils.listify(x, y) x = list(zip(x, y)) limits = [limits] # every plot has its own vwhat for now vwhats = _expand_limits(vwhat, len(x)) # TODO: we're abusing this function.. logger.debug("x: %s", x) limits, shape = self.limits(x, limits, shape=shape) shape = shape[0] logger.debug("limits: %r", limits) # mapping of a grid axis to a label labels = {} shape = _expand_shape(shape, 2) vshape = _expand_shape(shape, 2) if z is not None: match = re.match("(.*):(.*),(.*),(.*)", z) if match: groups = match.groups() import ast z_expression = groups[0] logger.debug("found groups: %r", list(groups)) z_limits = [ast.literal_eval(groups[1]), ast.literal_eval(groups[2])] z_shape = ast.literal_eval(groups[3]) # for pair in x: x = [[z_expression] + list(k) for k in x] limits = np.array([[z_limits] + list(k) for k in limits]) shape = (z_shape,) + shape vshape = (z_shape,) + vshape logger.debug("x = %r", x) values = np.linspace(z_limits[0], z_limits[1], num=z_shape + 1) labels["z"] = list(["%s <= %s < %s" % (v1, z_expression, v2) for v1, v2 in zip(values[:-1], values[1:])]) else: raise ValueError("Could not understand 'z' argument %r, expected something in form: 'column:-1,10:5'" % facet) else: z_shape = 1 # z == 1 if z is None: total_grid = np.zeros((len(x), len(whats), len(selections), 1) + shape, dtype=float) total_vgrid = np.zeros((len(x), len(whats), len(selections), 1) + vshape, dtype=float) else: total_grid = np.zeros((len(x), len(whats), len(selections)) + shape, dtype=float) total_vgrid = np.zeros((len(x), len(whats), len(selections)) + vshape, dtype=float) logger.debug("shape of total grid: %r", total_grid.shape) axis = dict(plot=0, what=1, selection=2) xlimits = limits grid_axes = dict(x=-1, y=-2, z=-3, selection=-4, what=-5, subspace=-6) visual_axes = dict(x=-1, y=-2, layer=-3, fade=-4, column=-5, row=-6) # visual_default=dict(x="x", y="y", z="layer", selection="fade", subspace="row", what="column") # visual: mapping of a plot axis, to a grid axis visual_default = dict(x="x", y="y", layer="z", fade="selection", row="subspace", column="what") def invert(x): return dict((v, k) for k, v in x.items()) # visual_default_reverse = invert(visual_default) # visual_ = visual_default # visual = dict(visual) # copy for modification # add entries to avoid mapping multiple times to the same axis free_visual_axes = list(visual_default.keys()) # visual_reverse = invert(visual) logger.debug("1: %r %r", visual, free_visual_axes) for visual_name, grid_name in visual.items(): if visual_name in free_visual_axes: free_visual_axes.remove(visual_name) else: raise ValueError("visual axes %s used multiple times" % visual_name) logger.debug("2: %r %r", visual, free_visual_axes) for visual_name, grid_name in visual_default.items(): if visual_name in free_visual_axes and grid_name not in visual.values(): free_visual_axes.remove(visual_name) visual[visual_name] = grid_name logger.debug("3: %r %r", visual, free_visual_axes) for visual_name, grid_name in visual_default.items(): if visual_name not in free_visual_axes and grid_name not in visual.values(): visual[free_visual_axes.pop(0)] = grid_name logger.debug("4: %r %r", visual, free_visual_axes) visual_reverse = invert(visual) # TODO: the meaning of visual and visual_reverse is changed below this line, super confusing visual, visual_reverse = visual_reverse, visual # so now, visual: mapping of a grid axis to plot axis # visual_reverse: mapping of a grid axis to plot axis move = {} for grid_name, visual_name in visual.items(): if visual_axes[visual_name] in visual.values(): index = visual.values().find(visual_name) key = visual.keys()[index] raise ValueError("trying to map %s to %s while, it is already mapped by %s" % (grid_name, visual_name, key)) move[grid_axes[grid_name]] = visual_axes[visual_name] # normalize_axis = _ensure_list(normalize_axis) fs = _expand(f, total_grid.shape[grid_axes[normalize_axis]]) # assert len(vwhat) # labels["y"] = ylabels what_labels = [] if grid is None: grid_of_grids = [] for i, (binby, limits) in enumerate(zip(x, xlimits)): grid_of_grids.append([]) for j, what in enumerate(whats): if isinstance(what, vaex.stat.Expression): grid = what.calculate(self, binby=binby, shape=shape, limits=limits, selection=selections, delay=True) else: what = what.strip() index = what.index("(") import re groups = re.match("(.*)\((.*)\)", what).groups() if groups and len(groups) == 2: function = groups[0] arguments = groups[1].strip() if "," in arguments: arguments = arguments.split(",") functions = ["mean", "sum", "std", "var", "correlation", "covar", "min", "max", "median_approx"] unit_expression = None if function in ["mean", "sum", "std", "min", "max", "median"]: unit_expression = arguments if function in ["var"]: unit_expression = "(%s) * (%s)" % (arguments, arguments) if function in ["covar"]: unit_expression = "(%s) * (%s)" % arguments if unit_expression: unit = self.unit(unit_expression) if unit: what_units = unit.to_string('latex_inline') if function in functions: grid = getattr(self, function)(arguments, binby=binby, limits=limits, shape=shape, selection=selections, delay=True) elif function == "count": grid = self.count(arguments, binby, shape=shape, limits=limits, selection=selections, delay=True) else: raise ValueError("Could not understand method: %s, expected one of %r'" % (function, functions)) else: raise ValueError("Could not understand 'what' argument %r, expected something in form: 'count(*)', 'mean(x)'" % what) if i == 0: # and j == 0: what_label = str(whats[j]) if what_units: what_label += " (%s)" % what_units if fs[j]: what_label = fs[j] + " " + what_label what_labels.append(what_label) grid_of_grids[-1].append(grid) self.executor.execute() for i, (binby, limits) in enumerate(zip(x, xlimits)): for j, what in enumerate(whats): grid = grid_of_grids[i][j].get() total_grid[i, j, :, :] = grid[:, None, ...] labels["what"] = what_labels else: dims_left = 6 - len(grid.shape) total_grid = np.broadcast_to(grid, (1,) * dims_left + grid.shape) # visual=dict(x="x", y="y", selection="fade", subspace="facet1", what="facet2",) def _selection_name(name): if name in [None, False]: return "selection: all" elif name in ["default", True]: return "selection: default" else: return "selection: %s" % name if selection_labels is None: labels["selection"] = list([_selection_name(k) for k in selections]) else: labels["selection"] = selection_labels # visual_grid = np.moveaxis(total_grid, move.keys(), move.values()) # np.moveaxis is in np 1.11 only?, use transpose axes = [None] * len(move) for key, value in move.items(): axes[value] = key visual_grid = np.transpose(total_grid, axes) logger.debug("grid shape: %r", total_grid.shape) logger.debug("visual: %r", visual.items()) logger.debug("move: %r", move) logger.debug("visual grid shape: %r", visual_grid.shape) xexpressions = [] yexpressions = [] for i, (binby, limits) in enumerate(zip(x, xlimits)): xexpressions.append(binby[0]) yexpressions.append(binby[1]) if xlabel is None: xlabels = [] ylabels = [] for i, (binby, limits) in enumerate(zip(x, xlimits)): if z is not None: xlabels.append(self.label(binby[1])) ylabels.append(self.label(binby[2])) else: xlabels.append(self.label(binby[0])) ylabels.append(self.label(binby[1])) else: Nl = visual_grid.shape[visual_axes['row']] xlabels = _expand(xlabel, Nl) ylabels = _expand(ylabel, Nl) #labels[visual["x"]] = (xlabels, ylabels) labels["x"] = xlabels labels["y"] = ylabels # grid = total_grid # print(grid.shape) # grid = self.reduce(grid, ) axes = [] # cax = pylab.subplot(1,1,1) background_color = np.array(matplotlib.colors.colorConverter.to_rgb(background_color)) # if grid.shape[axis["selection"]] > 1:# and not facet: # rgrid = vaex.image.fade(rgrid) # finite_mask = np.any(finite_mask, axis=0) # do we really need this # print(rgrid.shape) # facet_row_axis = axis["what"] import math facet_columns = None facets = visual_grid.shape[visual_axes["row"]] * visual_grid.shape[visual_axes["column"]] if visual_grid.shape[visual_axes["column"]] == 1 and wrap: facet_columns = min(wrap_columns, visual_grid.shape[visual_axes["row"]]) wrapped = True elif visual_grid.shape[visual_axes["row"]] == 1 and wrap: facet_columns = min(wrap_columns, visual_grid.shape[visual_axes["column"]]) wrapped = True else: wrapped = False facet_columns = visual_grid.shape[visual_axes["column"]] facet_rows = int(math.ceil(facets / facet_columns)) logger.debug("facet_rows: %r", facet_rows) logger.debug("facet_columns: %r", facet_columns) # if visual_grid.shape[visual_axes["row"]] > 1: # and not wrap: # #facet_row_axis = axis["what"] # facet_columns = visual_grid.shape[visual_axes["column"]] # else: # facet_columns = min(wrap_columns, facets) # if grid.shape[axis["plot"]] > 1:# and not facet: # this loop could be done using axis arguments everywhere # assert len(normalize_axis) == 1, "currently only 1 normalization axis supported" grid = visual_grid * 1. fgrid = visual_grid * 1. ngrid = visual_grid * 1. # colorgrid = np.zeros(ngrid.shape + (4,), float) # print "norma", normalize_axis, visual_grid.shape[visual_axes[visual[normalize_axis]]] vmins = _expand(vmin, visual_grid.shape[visual_axes[visual[normalize_axis]]], type=list) vmaxs = _expand(vmax, visual_grid.shape[visual_axes[visual[normalize_axis]]], type=list) # for name in normalize_axis: visual_grid if smooth_pre: grid = vaex.grids.gf(grid, smooth_pre) if 1: axis = visual_axes[visual[normalize_axis]] for i in range(visual_grid.shape[axis]): item = [slice(None, None, None), ] * len(visual_grid.shape) item[axis] = i item = tuple(item) f = _parse_f(fs[i]) with np.errstate(divide='ignore', invalid='ignore'): # these are fine, we are ok with nan's in vaex fgrid.__setitem__(item, f(grid.__getitem__(item))) # print vmins[i], vmaxs[i] if vmins[i] is not None and vmaxs[i] is not None: nsubgrid = fgrid.__getitem__(item) * 1 nsubgrid -= vmins[i] nsubgrid /= (vmaxs[i] - vmins[i]) else: nsubgrid, vmin, vmax = n(fgrid.__getitem__(item)) vmins[i] = vmin vmaxs[i] = vmax # print " ", vmins[i], vmaxs[i] ngrid.__setitem__(item, nsubgrid) if 0: # TODO: above should be like the code below, with custom vmin and vmax grid = visual_grid[i] f = _parse_f(fs[i]) fgrid = f(grid) finite_mask = np.isfinite(grid) finite_mask = np.any(finite_mask, axis=0) if vmin is not None and vmax is not None: ngrid = fgrid * 1 ngrid -= vmin ngrid /= (vmax - vmin) ngrid = np.clip(ngrid, 0, 1) else: ngrid, vmin, vmax = n(fgrid) # vmin, vmax = np.nanmin(fgrid), np.nanmax(fgrid) # every 'what', should have its own colorbar, check if what corresponds to # rows or columns in facets, if so, do a colorbar per row or per column rows, columns = int(math.ceil(facets / float(facet_columns))), facet_columns colorbar_location = "individual" if visual["what"] == "row" and visual_grid.shape[1] == facet_columns: colorbar_location = "per_row" if visual["what"] == "column" and visual_grid.shape[0] == facet_rows: colorbar_location = "per_column" # values = np.linspace(facet_limits[0], facet_limits[1], facet_count+1) logger.debug("rows: %r, columns: %r", rows, columns) import matplotlib.gridspec as gridspec column_scale = 1 row_scale = 1 row_offset = 0 if facets > 1: if colorbar_location == "per_row": column_scale = 4 gs = gridspec.GridSpec(rows, columns * column_scale + 1) elif colorbar_location == "per_column": row_offset = 1 row_scale = 4 gs = gridspec.GridSpec(rows * row_scale + 1, columns) else: gs = gridspec.GridSpec(rows, columns) facet_index = 0 fs = _expand(f, len(whats)) colormaps = _expand(colormap, len(whats)) # row for i in range(visual_grid.shape[0]): # column for j in range(visual_grid.shape[1]): if colorbar and colorbar_location == "per_column" and i == 0: norm = matplotlib.colors.Normalize(vmins[j], vmaxs[j]) sm = matplotlib.cm.ScalarMappable(norm, colormaps[j]) sm.set_array(1) # make matplotlib happy (strange behavious) if facets > 1: ax = pylab.subplot(gs[0, j]) colorbar = fig.colorbar(sm, cax=ax, orientation="horizontal") else: colorbar = fig.colorbar(sm) if "what" in labels: label = labels["what"][j] if facets > 1: colorbar.ax.set_title(label) else: colorbar.ax.set_ylabel(colorbar_label or label) if colorbar and colorbar_location == "per_row" and j == 0: norm = matplotlib.colors.Normalize(vmins[i], vmaxs[i]) sm = matplotlib.cm.ScalarMappable(norm, colormaps[i]) sm.set_array(1) # make matplotlib happy (strange behavious) if facets > 1: ax = pylab.subplot(gs[i, -1]) colorbar = fig.colorbar(sm, cax=ax) else: colorbar = fig.colorbar(sm) label = labels["what"][i] colorbar.ax.set_ylabel(colorbar_label or label) rgrid = ngrid[i, j] * 1. # print rgrid.shape for k in range(rgrid.shape[0]): for l in range(rgrid.shape[0]): if smooth_post is not None: rgrid[k, l] = vaex.grids.gf(rgrid, smooth_post) if visual["what"] == "column": what_index = j elif visual["what"] == "row": what_index = i else: what_index = 0 if visual[normalize_axis] == "column": normalize_index = j elif visual[normalize_axis] == "row": normalize_index = i else: normalize_index = 0 for r in reduce: r = _parse_reduction(r, colormaps[what_index], []) rgrid = r(rgrid) row = facet_index // facet_columns column = facet_index % facet_columns if colorbar and colorbar_location == "individual": # visual_grid.shape[visual_axes[visual[normalize_axis]]] norm = matplotlib.colors.Normalize(vmins[normalize_index], vmaxs[normalize_index]) sm = matplotlib.cm.ScalarMappable(norm, colormaps[what_index]) sm.set_array(1) # make matplotlib happy (strange behavious) if facets > 1: ax = pylab.subplot(gs[row, column]) colorbar = fig.colorbar(sm, ax=ax) else: colorbar = fig.colorbar(sm) label = labels["what"][what_index] colorbar.ax.set_ylabel(colorbar_label or label) if facets > 1: ax = pylab.subplot(gs[row_offset + row * row_scale:row_offset + (row + 1) * row_scale, column * column_scale:(column + 1) * column_scale]) else: ax = pylab.gca() axes.append(ax) logger.debug("rgrid: %r", rgrid.shape) plot_rgrid = rgrid assert plot_rgrid.shape[1] == 1, "no layers supported yet" plot_rgrid = plot_rgrid[:, 0] if plot_rgrid.shape[0] > 1: plot_rgrid = vaex.image.fade(plot_rgrid[::-1]) else: plot_rgrid = plot_rgrid[0] extend = None if visual["subspace"] == "row": subplot_index = i elif visual["subspace"] == "column": subplot_index = j else: subplot_index = 0 extend = np.array(xlimits[subplot_index][-2:]).flatten() # extend = np.array(xlimits[i]).flatten() logger.debug("plot rgrid: %r", plot_rgrid.shape) plot_rgrid = np.transpose(plot_rgrid, (1, 0, 2)) im = ax.imshow(plot_rgrid, extent=extend.tolist(), origin="lower", aspect=aspect, interpolation=interpolation) # v1, v2 = values[i], values[i+1] def label(index, label, expression): if label and _issequence(label): return label[i] else: return self.label(expression) if visual_reverse["x"] =='x': labelsx = labels['x'] pylab.xlabel(labelsx[subplot_index]) if visual_reverse["x"] =='x': labelsy = labels['y'] pylab.ylabel(labelsy[subplot_index]) if visual["z"] in ['row']: labelsz = labels['z'] ax.set_title(labelsz[i]) if visual["z"] in ['column']: labelsz = labels['z'] ax.set_title(labelsz[j]) max_labels = 10 # xexpression = xexpressions[i] # if self.iscategory(xexpression): # labels = self.category_labels(xexpression) # step = len(labels) // max_labels # pylab.xticks(np.arange(len(labels))[::step], labels[::step], size='small') # yexpression = yexpressions[i] # if self.iscategory(yexpression): # labels = self.category_labels(yexpression) # step = len(labels) // max_labels # pylab.yticks(np.arange(len(labels))[::step], labels[::step], size='small') facet_index += 1 if title: fig.suptitle(title, fontsize="x-large") if tight_layout: if title: pylab.tight_layout(rect=[0, 0.03, 1, 0.95]) else: pylab.tight_layout() if hardcopy: pylab.savefig(hardcopy) if show: pylab.show() if return_extra: return im, grid, fgrid, ngrid, rgrid else: return im
Viz data in a 2d histogram/heatmap. Declarative plotting of statistical plots using matplotlib, supports subplots, selections, layers. Instead of passing x and y, pass a list as x argument for multiple panels. Give what a list of options to have multiple panels. When both are present then will be origanized in a column/row order. This methods creates a 6 dimensional 'grid', where each dimension can map the a visual dimension. The grid dimensions are: * x: shape determined by shape, content by x argument or the first dimension of each space * y: ,, * z: related to the z argument * selection: shape equals length of selection argument * what: shape equals length of what argument * space: shape equals length of x argument if multiple values are given By default, this its shape is (1, 1, 1, 1, shape, shape) (where x is the last dimension) The visual dimensions are * x: x coordinate on a plot / image (default maps to grid's x) * y: y ,, (default maps to grid's y) * layer: each image in this dimension is blended togeher to one image (default maps to z) * fade: each image is shown faded after the next image (default mapt to selection) * row: rows of subplots (default maps to space) * columns: columns of subplot (default maps to what) All these mappings can be changes by the visual argument, some examples: >>> df.plot('x', 'y', what=['mean(x)', 'correlation(vx, vy)']) Will plot each 'what' as a column. >>> df.plot('x', 'y', selection=['FeH < -3', '(FeH >= -3) & (FeH < -2)'], visual=dict(column='selection')) Will plot each selection as a column, instead of a faded on top of each other. :param x: Expression to bin in the x direction (by default maps to x), or list of pairs, like [['x', 'y'], ['x', 'z']], if multiple pairs are given, this dimension maps to rows by default :param y: y (by default maps to y) :param z: Expression to bin in the z direction, followed by a :start,end,shape signature, like 'FeH:-3,1:5' will produce 5 layers between -10 and 10 (by default maps to layer) :param what: What to plot, count(*) will show a N-d histogram, mean('x'), the mean of the x column, sum('x') the sum, std('x') the standard deviation, correlation('vx', 'vy') the correlation coefficient. Can also be a list of values, like ['count(x)', std('vx')], (by default maps to column) :param reduce: :param f: transform values by: 'identity' does nothing 'log' or 'log10' will show the log of the value :param normalize: normalization function, currently only 'normalize' is supported :param normalize_axis: which axes to normalize on, None means normalize by the global maximum. :param vmin: instead of automatic normalization, (using normalize and normalization_axis) scale the data between vmin and vmax to [0, 1] :param vmax: see vmin :param shape: shape/size of the n-D histogram grid :param limits: list of [[xmin, xmax], [ymin, ymax]], or a description such as 'minmax', '99%' :param grid: if the binning is done before by yourself, you can pass it :param colormap: matplotlib colormap to use :param figsize: (x, y) tuple passed to pylab.figure for setting the figure size :param xlabel: :param ylabel: :param aspect: :param tight_layout: call pylab.tight_layout or not :param colorbar: plot a colorbar or not :param interpolation: interpolation for imshow, possible options are: 'nearest', 'bilinear', 'bicubic', see matplotlib for more :param return_extra: :return:
def oauth2_token_setter(remote, resp, token_type='', extra_data=None): """Set an OAuth2 token. The refresh_token can be used to obtain a new access_token after the old one is expired. It is saved in the database for long term use. A refresh_token will be present only if `access_type=offline` is included in the authorization code request. :param remote: The remote application. :param resp: The response. :param token_type: The token type. (Default: ``''``) :param extra_data: Extra information. (Default: ``None``) :returns: A :class:`invenio_oauthclient.models.RemoteToken` instance. """ return token_setter( remote, resp['access_token'], secret='', token_type=token_type, extra_data=extra_data, )
Set an OAuth2 token. The refresh_token can be used to obtain a new access_token after the old one is expired. It is saved in the database for long term use. A refresh_token will be present only if `access_type=offline` is included in the authorization code request. :param remote: The remote application. :param resp: The response. :param token_type: The token type. (Default: ``''``) :param extra_data: Extra information. (Default: ``None``) :returns: A :class:`invenio_oauthclient.models.RemoteToken` instance.
def save(self, filename): """ Saves the xml data to the inputed filename. :param filename | <str> """ projex.text.xmlindent(self.xmlElement()) try: f = open(filename, 'w') except IOError: logger.exception('Could not save file: %s' % filename) return False f.write(self.toString()) f.close() return True
Saves the xml data to the inputed filename. :param filename | <str>
def addFASTACommandLineOptions(parser): """ Add standard command-line options to an argparse parser. @param parser: An C{argparse.ArgumentParser} instance. """ parser.add_argument( '--fastaFile', type=open, default=sys.stdin, metavar='FILENAME', help=('The name of the FASTA input file. Standard input will be read ' 'if no file name is given.')) parser.add_argument( '--readClass', default='DNARead', choices=readClassNameToClass, metavar='CLASSNAME', help=('If specified, give the type of the reads in the input. ' 'Possible choices: %s.' % ', '.join(readClassNameToClass))) # A mutually exclusive group for either --fasta, --fastq, or --fasta-ss group = parser.add_mutually_exclusive_group() group.add_argument( '--fasta', default=False, action='store_true', help=('If specified, input will be treated as FASTA. This is the ' 'default.')) group.add_argument( '--fastq', default=False, action='store_true', help='If specified, input will be treated as FASTQ.') group.add_argument( '--fasta-ss', dest='fasta_ss', default=False, action='store_true', help=('If specified, input will be treated as PDB FASTA ' '(i.e., regular FASTA with each sequence followed by its ' 'structure).'))
Add standard command-line options to an argparse parser. @param parser: An C{argparse.ArgumentParser} instance.
def feather_links(self, factor=0.01, include_self=False): """ Feather the links of connected nodes. Go through every node in the network and make it inherit the links of the other nodes it is connected to. Because the link weight sum for any given node can be very different within a graph, the weights of inherited links are made proportional to the sum weight of the parent nodes. Args: factor (float): multiplier of neighbor links include_self (bool): whether nodes can inherit links pointing to themselves Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_1.add_link(node_2, 1) >>> node_2.add_link(node_1, 1) >>> graph = Graph([node_1, node_2]) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) Two 1 >>> graph.feather_links(include_self=True) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) Two 1 One 0.01 """ def feather_node(node): node_weight_sum = sum(l.weight for l in node.link_list) # Iterate over a copy of the original link list since we will # need to refer to this while modifying node.link_list for original_link in node.link_list[:]: neighbor_node = original_link.target neighbor_weight = original_link.weight feather_weight = neighbor_weight / node_weight_sum neighbor_node_weight_sum = sum(l.weight for l in neighbor_node.link_list) # Iterate over the links belonging to the neighbor_node, # copying its links to ``node`` with proportional weights for neighbor_link in neighbor_node.link_list: if (not include_self) and (neighbor_link.target == node): continue relative_link_weight = (neighbor_link.weight / neighbor_node_weight_sum) feathered_link_weight = round((relative_link_weight * feather_weight * factor), 2) node.add_link(neighbor_link.target, feathered_link_weight) for n in self.node_list: feather_node(n)
Feather the links of connected nodes. Go through every node in the network and make it inherit the links of the other nodes it is connected to. Because the link weight sum for any given node can be very different within a graph, the weights of inherited links are made proportional to the sum weight of the parent nodes. Args: factor (float): multiplier of neighbor links include_self (bool): whether nodes can inherit links pointing to themselves Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_1.add_link(node_2, 1) >>> node_2.add_link(node_1, 1) >>> graph = Graph([node_1, node_2]) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) Two 1 >>> graph.feather_links(include_self=True) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) Two 1 One 0.01
def result_report_class_wise(self): """Report class-wise results Returns ------- str result report in string format """ results = self.results_class_wise_metrics() output = self.ui.section_header('Class-wise metrics', indent=2) + '\n' output += self.ui.row( 'Scene label', 'Ncorr', 'Nref', 'Accuracy', widths=[20, 12, 12, 12], separators=[True, False, True, False], indent=4 ) + '\n' output += self.ui.row('-', '-', '-', '-') + '\n' for scene_label in self.scene_label_list: output += self.ui.row( scene_label, results[scene_label]['count']['Ncorr'], results[scene_label]['count']['Nref'], results[scene_label]['accuracy']['accuracy'] * 100, types=['str', 'int', 'int', 'float1_percentage'] ) + '\n' return output
Report class-wise results Returns ------- str result report in string format
def read_until(self, expected_commands, timeout): """Read AdbMessages from this transport until we get an expected command. The ADB protocol specifies that before a successful CNXN handshake, any other packets must be ignored, so this method provides the ability to ignore unwanted commands. It's primarily used during the initial connection to the device. See Read() for more details, including more exceptions that may be raised. Args: expected_commands: Iterable of expected command responses, like ('CNXN', 'AUTH'). timeout: timeouts.PolledTimeout object to use for timeout. Returns: The ADB message received that matched one of expected_commands. Raises: AdbProtocolError: If timeout expires between reads, this can happen if we are getting spammed with unexpected commands. """ msg = timeouts.loop_until_timeout_or_valid( timeout, lambda: self.read_message(timeout), lambda m: m.command in expected_commands, 0) if msg.command not in expected_commands: raise usb_exceptions.AdbTimeoutError( 'Timed out establishing connection, waiting for: %s', expected_commands) return msg
Read AdbMessages from this transport until we get an expected command. The ADB protocol specifies that before a successful CNXN handshake, any other packets must be ignored, so this method provides the ability to ignore unwanted commands. It's primarily used during the initial connection to the device. See Read() for more details, including more exceptions that may be raised. Args: expected_commands: Iterable of expected command responses, like ('CNXN', 'AUTH'). timeout: timeouts.PolledTimeout object to use for timeout. Returns: The ADB message received that matched one of expected_commands. Raises: AdbProtocolError: If timeout expires between reads, this can happen if we are getting spammed with unexpected commands.
def ValidOptions(cls): """Returns a list of valid option names.""" valid_options = [] for obj_name in dir(cls): obj = getattr(cls, obj_name) if inspect.isclass(obj) and issubclass(obj, cls.OptionBase): valid_options.append(obj_name) return valid_options
Returns a list of valid option names.
def adapter_add_nio_binding(self, adapter_number, port_number, nio): """ Adds a adapter NIO binding. :param adapter_number: adapter number :param port_number: port number :param nio: NIO instance to add to the adapter/port """ try: adapter = self._adapters[adapter_number] except IndexError: raise IOUError('Adapter {adapter_number} does not exist for IOU "{name}"'.format(name=self._name, adapter_number=adapter_number)) if not adapter.port_exists(port_number): raise IOUError("Port {port_number} does not exist in adapter {adapter}".format(adapter=adapter, port_number=port_number)) adapter.add_nio(port_number, nio) log.info('IOU "{name}" [{id}]: {nio} added to {adapter_number}/{port_number}'.format(name=self._name, id=self._id, nio=nio, adapter_number=adapter_number, port_number=port_number)) if self.ubridge: bridge_name = "IOL-BRIDGE-{}".format(self.application_id + 512) yield from self._ubridge_send("iol_bridge add_nio_udp {name} {iol_id} {bay} {unit} {lport} {rhost} {rport}".format(name=bridge_name, iol_id=self.application_id, bay=adapter_number, unit=port_number, lport=nio.lport, rhost=nio.rhost, rport=nio.rport)) yield from self._ubridge_apply_filters(adapter_number, port_number, nio.filters)
Adds a adapter NIO binding. :param adapter_number: adapter number :param port_number: port number :param nio: NIO instance to add to the adapter/port
def install_nginx(instance, dbhost, dbname, port, hostname=None): """Install nginx configuration""" _check_root() log("Installing nginx configuration") if hostname is None: try: configuration = _get_system_configuration(dbhost, dbname) hostname = configuration.hostname except Exception as e: log('Exception:', e, type(e), exc=True, lvl=error) log("""Could not determine public fully qualified hostname! Check systemconfig (see db view and db modify commands) or specify manually with --hostname host.domain.tld Using 'localhost' for now""", lvl=warn) hostname = 'localhost' definitions = { 'instance': instance, 'server_public_name': hostname, 'ssl_certificate': cert_file, 'ssl_key': key_file, 'host_url': 'http://127.0.0.1:%i/' % port } if distribution == 'DEBIAN': configuration_file = '/etc/nginx/sites-available/hfos.%s.conf' % instance configuration_link = '/etc/nginx/sites-enabled/hfos.%s.conf' % instance elif distribution == 'ARCH': configuration_file = '/etc/nginx/nginx.conf' configuration_link = None else: log('Unsure how to proceed, you may need to specify your ' 'distribution', lvl=error) return log('Writing nginx HFOS site definition') write_template_file(os.path.join('dev/templates', nginx_configuration), configuration_file, definitions) if configuration_link is not None: log('Enabling nginx HFOS site (symlink)') if not os.path.exists(configuration_link): os.symlink(configuration_file, configuration_link) log('Restarting nginx service') Popen([ 'systemctl', 'restart', 'nginx.service' ]) log("Done: Install nginx configuration")
Install nginx configuration
def unserializers(self, value): """ Setter for **self.__unserializers** attribute. :param value: Attribute value. :type value: dict """ raise foundations.exceptions.ProgrammingError( "{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "unserializers"))
Setter for **self.__unserializers** attribute. :param value: Attribute value. :type value: dict
def validate_call(kwargs, returns, is_method=False): """ Decorator which runs validation on a callable's arguments and its return value. Pass a schema for the kwargs and for the return value. Positional arguments are not supported. """ def decorator(func): @wraps(func) def inner(*passed_args, **passed_kwargs): # Enforce no positional args # first argument of instance method and class method is always positonal so we need # to make expception for them. Static methods are still validated according to standard rules # this check happens before methods are bound, so instance method is still a regular function max_allowed_passed_args_len = 0 if is_method and type(func) in (types.FunctionType, classmethod): max_allowed_passed_args_len = 1 if len(passed_args) > max_allowed_passed_args_len: raise PositionalError('You cannot call this with positional arguments.') # Validate keyword arguments validate(kwargs, passed_kwargs, 'keyword arguments') # Call callable return_value = func(*passed_args, **passed_kwargs) # Validate return value validate(returns, return_value, 'return value') return return_value inner.__wrapped__ = func # caveat: checking for f.__validated__ will only work if @validate_call # is not masked by other decorators except for @classmethod or @staticmethod inner.__validated__ = True return inner return decorator
Decorator which runs validation on a callable's arguments and its return value. Pass a schema for the kwargs and for the return value. Positional arguments are not supported.
def plot(darray, row=None, col=None, col_wrap=None, ax=None, hue=None, rtol=0.01, subplot_kws=None, **kwargs): """ Default plot of DataArray using matplotlib.pyplot. Calls xarray plotting function based on the dimensions of darray.squeeze() =============== =========================== Dimensions Plotting function --------------- --------------------------- 1 :py:func:`xarray.plot.line` 2 :py:func:`xarray.plot.pcolormesh` Anything else :py:func:`xarray.plot.hist` =============== =========================== Parameters ---------- darray : DataArray row : string, optional If passed, make row faceted plots on this dimension name col : string, optional If passed, make column faceted plots on this dimension name hue : string, optional If passed, make faceted line plots with hue on this dimension name col_wrap : integer, optional Use together with ``col`` to wrap faceted plots ax : matplotlib axes, optional If None, uses the current axis. Not applicable when using facets. rtol : number, optional Relative tolerance used to determine if the indexes are uniformly spaced. Usually a small positive number. subplot_kws : dict, optional Dictionary of keyword arguments for matplotlib subplots. Only applies to FacetGrid plotting. **kwargs : optional Additional keyword arguments to matplotlib """ darray = darray.squeeze() plot_dims = set(darray.dims) plot_dims.discard(row) plot_dims.discard(col) plot_dims.discard(hue) ndims = len(plot_dims) error_msg = ('Only 1d and 2d plots are supported for facets in xarray. ' 'See the package `Seaborn` for more options.') if ndims in [1, 2]: if row or col: kwargs['row'] = row kwargs['col'] = col kwargs['col_wrap'] = col_wrap kwargs['subplot_kws'] = subplot_kws if ndims == 1: plotfunc = line kwargs['hue'] = hue elif ndims == 2: if hue: plotfunc = line kwargs['hue'] = hue else: plotfunc = pcolormesh else: if row or col or hue: raise ValueError(error_msg) plotfunc = hist kwargs['ax'] = ax return plotfunc(darray, **kwargs)
Default plot of DataArray using matplotlib.pyplot. Calls xarray plotting function based on the dimensions of darray.squeeze() =============== =========================== Dimensions Plotting function --------------- --------------------------- 1 :py:func:`xarray.plot.line` 2 :py:func:`xarray.plot.pcolormesh` Anything else :py:func:`xarray.plot.hist` =============== =========================== Parameters ---------- darray : DataArray row : string, optional If passed, make row faceted plots on this dimension name col : string, optional If passed, make column faceted plots on this dimension name hue : string, optional If passed, make faceted line plots with hue on this dimension name col_wrap : integer, optional Use together with ``col`` to wrap faceted plots ax : matplotlib axes, optional If None, uses the current axis. Not applicable when using facets. rtol : number, optional Relative tolerance used to determine if the indexes are uniformly spaced. Usually a small positive number. subplot_kws : dict, optional Dictionary of keyword arguments for matplotlib subplots. Only applies to FacetGrid plotting. **kwargs : optional Additional keyword arguments to matplotlib
def _commonprefix(files): """Retrieve a common prefix for files without extra _R1 _I1 extensions. Allows alternative naming schemes (R1/R2/R3) (R1/R2/I1). """ out = os.path.commonprefix(files) out = out.rstrip("_R") out = out.rstrip("_I") out = out.rstrip("_") return out
Retrieve a common prefix for files without extra _R1 _I1 extensions. Allows alternative naming schemes (R1/R2/R3) (R1/R2/I1).
def extern_store_utf8(self, context_handle, utf8_ptr, utf8_len): """Given a context and UTF8 bytes, return a new Handle to represent the content.""" c = self._ffi.from_handle(context_handle) return c.to_value(self._ffi.string(utf8_ptr, utf8_len).decode('utf-8'))
Given a context and UTF8 bytes, return a new Handle to represent the content.
def splitext_files_only(filepath): "Custom version of splitext that doesn't perform splitext on directories" return ( (filepath, '') if os.path.isdir(filepath) else os.path.splitext(filepath) )
Custom version of splitext that doesn't perform splitext on directories
def get_proficiencies_by_search(self, proficiency_query, proficiency_search): """Pass through to provider ProficiencySearchSession.get_proficiencies_by_search""" # Implemented from azosid template for - # osid.resource.ResourceSearchSession.get_resources_by_search_template if not self._can('search'): raise PermissionDenied() return self._provider_session.get_proficiencies_by_search(proficiency_query, proficiency_search)
Pass through to provider ProficiencySearchSession.get_proficiencies_by_search
async def acquire(self, command=None, args=()): """Acquires a connection from free pool. Creates new connection if needed. """ if self.closed: raise PoolClosedError("Pool is closed") async with self._cond: if self.closed: raise PoolClosedError("Pool is closed") while True: await self._fill_free(override_min=True) if self.freesize: conn = self._pool.popleft() assert not conn.closed, conn assert conn not in self._used, (conn, self._used) self._used.add(conn) return conn else: await self._cond.wait()
Acquires a connection from free pool. Creates new connection if needed.