positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def _GetPurgeMessage(most_recent_step, most_recent_wall_time, event_step, event_wall_time, num_expired_scalars, num_expired_histos, num_expired_comp_histos, num_expired_images, num_expired_audio): """Return the string message associated with TensorBoard purges.""" return ('Detected out of order event.step likely caused by ' 'a TensorFlow restart. Purging expired events from Tensorboard' ' display between the previous step: {} (timestamp: {}) and ' 'current step: {} (timestamp: {}). Removing {} scalars, {} ' 'histograms, {} compressed histograms, {} images, ' 'and {} audio.').format(most_recent_step, most_recent_wall_time, event_step, event_wall_time, num_expired_scalars, num_expired_histos, num_expired_comp_histos, num_expired_images, num_expired_audio)
Return the string message associated with TensorBoard purges.
def set_exclusion_file(self, confound, exclusion_criteria, confound_stat='mean'): """ Excludes subjects given a certain exclusion criteria. Parameters ---------- confound : str or list string or list of confound name(s) from confound files exclusion_criteria : str or list for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected. confound_stat : str or list Can be median, mean, std. How the confound data is aggregated (so if there is a meaasure per time-point, this is averaged over all time points. If multiple confounds specified, this has to be a list.). Returns -------- calls TenetoBIDS.set_bad_files with the files meeting the exclusion criteria. """ self.add_history(inspect.stack()[0][3], locals(), 1) if isinstance(confound, str): confound = [confound] if isinstance(exclusion_criteria, str): exclusion_criteria = [exclusion_criteria] if isinstance(confound_stat, str): confound_stat = [confound_stat] if len(exclusion_criteria) != len(confound): raise ValueError( 'Same number of confound names and exclusion criteria must be given') if len(confound_stat) != len(confound): raise ValueError( 'Same number of confound names and confound stats must be given') relex, crit = process_exclusion_criteria(exclusion_criteria) files = sorted(self.get_selected_files(quiet=1)) confound_files = sorted( self.get_selected_files(quiet=1, pipeline='confound')) files, confound_files = confound_matching(files, confound_files) bad_files = [] bs = 0 foundconfound = [] foundreason = [] for s, cfile in enumerate(confound_files): df = load_tabular_file(cfile, index_col=None) found_bad_subject = False for i, _ in enumerate(confound): if confound_stat[i] == 'median': if relex[i](df[confound[i]].median(), crit[i]): found_bad_subject = True elif confound_stat[i] == 'mean': if relex[i](df[confound[i]].mean(), crit[i]): found_bad_subject = True elif confound_stat[i] == 'std': if relex[i](df[i][confound[i]].std(), crit[i]): found_bad_subject = True if found_bad_subject: foundconfound.append(confound[i]) foundreason.append(exclusion_criteria[i]) if found_bad_subject: bad_files.append(files[s]) bs += 1 self.set_bad_files( bad_files, reason='excluded file (confound over specfied stat threshold)') for i, f in enumerate(bad_files): sidecar = get_sidecar(f) sidecar['file_exclusion'] = {} sidecar['confound'] = foundconfound[i] sidecar['threshold'] = foundreason[i] for af in ['.tsv', '.nii.gz']: f = f.split(af)[0] f += '.json' with open(f, 'w') as fs: json.dump(sidecar, fs) print('Removed ' + str(bs) + ' files from inclusion.')
Excludes subjects given a certain exclusion criteria. Parameters ---------- confound : str or list string or list of confound name(s) from confound files exclusion_criteria : str or list for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected. confound_stat : str or list Can be median, mean, std. How the confound data is aggregated (so if there is a meaasure per time-point, this is averaged over all time points. If multiple confounds specified, this has to be a list.). Returns -------- calls TenetoBIDS.set_bad_files with the files meeting the exclusion criteria.
def deepcopy(original_obj): """ Creates a deep copy of an object with no crossed referenced lists or dicts, useful when loading from yaml as anchors generate those cross-referenced dicts and lists Args: original_obj(object): Object to deep copy Return: object: deep copy of the object """ if isinstance(original_obj, list): return list(deepcopy(item) for item in original_obj) elif isinstance(original_obj, dict): return dict((key, deepcopy(val)) for key, val in original_obj.items()) else: return original_obj
Creates a deep copy of an object with no crossed referenced lists or dicts, useful when loading from yaml as anchors generate those cross-referenced dicts and lists Args: original_obj(object): Object to deep copy Return: object: deep copy of the object
def truncate(self, path, length, fh=None): "Download existing path, truncate and reupload" try: f = self._getpath(path) except JFS.JFSError: raise OSError(errno.ENOENT, '') if isinstance(f, (JFS.JFSFile, JFS.JFSFolder)) and f.is_deleted(): raise OSError(errno.ENOENT) data = StringIO(f.read()) data.truncate(length) try: self.client.up(path, data) # replace file contents self._dirty(path) return ESUCCESS except: raise OSError(errno.ENOENT, '')
Download existing path, truncate and reupload
def autoLayout(self): """ Automatically lays out the contents for this widget. """ try: direction = self.currentSlide().scene().direction() except AttributeError: direction = QtGui.QBoxLayout.TopToBottom size = self.size() self._slideshow.resize(size) prev = self._previousButton next = self._nextButton if direction == QtGui.QBoxLayout.BottomToTop: y = 9 else: y = size.height() - prev.height() - 9 prev.move(9, y) next.move(size.width() - next.width() - 9, y) # update the layout for the slides for i in range(self._slideshow.count()): widget = self._slideshow.widget(i) widget.scene().autoLayout(size)
Automatically lays out the contents for this widget.
def silence_warnings(*warnings): """ Context manager for silencing bokeh validation warnings. """ for warning in warnings: silence(warning) try: yield finally: for warning in warnings: silence(warning, False)
Context manager for silencing bokeh validation warnings.
def list_deelgemeenten_by_gemeente(self, gemeente): ''' List all `deelgemeenten` in a `gemeente`. :param gemeente: The :class:`Gemeente` for which the \ `deelgemeenten` are wanted. Currently only Flanders is supported. :rtype: A :class:`list` of :class:`Deelgemeente`. ''' try: niscode = gemeente.niscode except AttributeError: niscode = gemeente def creator(): return [ Deelgemeente(dg['id'], dg['naam'], dg['gemeente_niscode']) for dg in self.deelgemeenten.values() if dg['gemeente_niscode'] == niscode ] if self.caches['permanent'].is_configured: key = 'ListDeelgemeentenByGemeenteId#%s' % niscode deelgemeenten = self.caches['permanent'].get_or_create(key, creator) else: deelgemeenten = creator() for dg in deelgemeenten: dg.set_gateway(self) return deelgemeenten
List all `deelgemeenten` in a `gemeente`. :param gemeente: The :class:`Gemeente` for which the \ `deelgemeenten` are wanted. Currently only Flanders is supported. :rtype: A :class:`list` of :class:`Deelgemeente`.
def bb_get_instr_max_width(basic_block): """Get maximum instruction mnemonic width """ asm_mnemonic_max_width = 0 for instr in basic_block: if len(instr.mnemonic) > asm_mnemonic_max_width: asm_mnemonic_max_width = len(instr.mnemonic) return asm_mnemonic_max_width
Get maximum instruction mnemonic width
def wrongstatus(data, sb, msb, lsb): """Check if the status bit and field bits are consistency. This Function is used for checking BDS code versions. """ # status bit, most significant bit, least significant bit status = int(data[sb-1]) value = bin2int(data[msb-1:lsb]) if not status: if value != 0: return True return False
Check if the status bit and field bits are consistency. This Function is used for checking BDS code versions.
def can_be_updated(cls, dist, latest_version): """Determine whether package can be updated or not.""" scheme = get_scheme('default') name = dist.project_name dependants = cls.get_dependants(name) for dependant in dependants: requires = dependant.requires() for requirement in cls.get_requirement(name, requires): req = parse_requirement(requirement) # Ignore error if version in requirement spec can't be parsed try: matcher = scheme.matcher(req.requirement) except UnsupportedVersionError: continue if not matcher.match(str(latest_version)): return False return True
Determine whether package can be updated or not.
def create_method(self): """ Build the estimator method or function. Returns ------- :return : string The built method as string. """ n_indents = 1 if self.target_language in ['java', 'js', 'php', 'ruby'] else 0 return self.temp('separated.method', n_indents=n_indents, skipping=True).format(**self.__dict__)
Build the estimator method or function. Returns ------- :return : string The built method as string.
def get_current_selection(self, i=None): """Get the :class:`TaskFileInfo` for the file selected in the active tab :param i: If None, returns selection of active tab. If 0, assetselection. If 1, shotselection :type i: :returns: The taskfile info in the currently active tab :rtype: :class:`TaskFileInfo` | None :raises: None """ taskfile = None if (i is None and self.selection_tabw.currentIndex() == 0) or (i is not None and i == 0): indexes = self.assetverbrws.selected_indexes(0) if indexes and indexes[0].isValid(): item = indexes[0].internalPointer() taskfile = item.internal_data() elif (i is None and self.selection_tabw.currentIndex() == 1) or (i is not None and i == 1): indexes = self.shotverbrws.selected_indexes(0) if indexes and indexes[0].isValid(): item = indexes[0].internalPointer() taskfile = item.internal_data() return taskfile
Get the :class:`TaskFileInfo` for the file selected in the active tab :param i: If None, returns selection of active tab. If 0, assetselection. If 1, shotselection :type i: :returns: The taskfile info in the currently active tab :rtype: :class:`TaskFileInfo` | None :raises: None
def __trim_outputspeech(self, speech_output=None): # type: (Union[str, None]) -> str """Trims the output speech if it already has the <speak></speak> tag. :param speech_output: the output speech sent back to user. :type speech_output: str :return: the trimmed output speech. :rtype: Union[bool, None] """ if speech_output is None: return "" speech = speech_output.strip() if speech.startswith("<speak>") and speech.endswith("</speak>"): return speech[7:-8].strip() return speech
Trims the output speech if it already has the <speak></speak> tag. :param speech_output: the output speech sent back to user. :type speech_output: str :return: the trimmed output speech. :rtype: Union[bool, None]
def toggle_actions(actions, enable): """Enable/disable actions""" if actions is not None: for action in actions: if action is not None: action.setEnabled(enable)
Enable/disable actions
def put_file(self, client, source_file): """ Put file on instance in default SSH directory. """ try: file_name = os.path.basename(source_file) ipa_utils.put_file(client, source_file, file_name) except Exception as error: raise IpaCloudException( 'Failed copying file, "{0}"; {1}.'.format( source_file, error ) ) else: return file_name
Put file on instance in default SSH directory.
def wait_for_close( raiden: 'RaidenService', payment_network_id: PaymentNetworkID, token_address: TokenAddress, channel_ids: List[ChannelID], retry_timeout: float, ) -> None: """Wait until all channels are closed. Note: This does not time out, use gevent.Timeout. """ return wait_for_channel_in_states( raiden=raiden, payment_network_id=payment_network_id, token_address=token_address, channel_ids=channel_ids, retry_timeout=retry_timeout, target_states=CHANNEL_AFTER_CLOSE_STATES, )
Wait until all channels are closed. Note: This does not time out, use gevent.Timeout.
def __getStationName(name, id): """Construct a staiion name.""" name = name.replace("Meetstation", "") name = name.strip() name += " (%s)" % id return name
Construct a staiion name.
def render_children(node: Node, **child_args): """Render node children""" for xml_node in node.xml_node.children: child = render(xml_node, **child_args) node.add_child(child)
Render node children
def capture_widget(widget, path=None): """Grab an image of a Qt widget Args: widget: The Qt Widget to capture path (optional): The path to save to. If not provided - will return image data. Returns: If a path is provided, the image will be saved to it. If not, the PNG buffer will be returned. """ if use_qt5: pixmap = widget.grab() else: pixmap = QtGui.QPixmap.grabWidget(widget) if path: pixmap.save(path) else: image_buffer = QtCore.QBuffer() image_buffer.open(QtCore.QIODevice.ReadWrite) pixmap.save(image_buffer, "PNG") return image_buffer.data().data()
Grab an image of a Qt widget Args: widget: The Qt Widget to capture path (optional): The path to save to. If not provided - will return image data. Returns: If a path is provided, the image will be saved to it. If not, the PNG buffer will be returned.
def ovsdb_server_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ovsdb_server = ET.SubElement(config, "ovsdb-server", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name = ET.SubElement(ovsdb_server, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def get_vlan_brief_output_vlan_vlan_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vlan_brief = ET.Element("get_vlan_brief") config = get_vlan_brief output = ET.SubElement(get_vlan_brief, "output") vlan = ET.SubElement(output, "vlan") vlan_id_key = ET.SubElement(vlan, "vlan-id") vlan_id_key.text = kwargs.pop('vlan_id') vlan_type = ET.SubElement(vlan, "vlan-type") vlan_type.text = kwargs.pop('vlan_type') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def convert_url_to_download_info(self, url, project_name): """ See if a URL is a candidate for a download URL for a project (the URL has typically been scraped from an HTML page). If it is, a dictionary is returned with keys "name", "version", "filename" and "url"; otherwise, None is returned. """ def same_project(name1, name2): return normalize_name(name1) == normalize_name(name2) result = None scheme, netloc, path, params, query, frag = urlparse(url) if frag.lower().startswith('egg='): logger.debug('%s: version hint in fragment: %r', project_name, frag) m = HASHER_HASH.match(frag) if m: algo, digest = m.groups() else: algo, digest = None, None origpath = path if path and path[-1] == '/': path = path[:-1] if path.endswith('.whl'): try: wheel = Wheel(path) if is_compatible(wheel, self.wheel_tags): if project_name is None: include = True else: include = same_project(wheel.name, project_name) if include: result = { 'name': wheel.name, 'version': wheel.version, 'filename': wheel.filename, 'url': urlunparse((scheme, netloc, origpath, params, query, '')), 'python-version': ', '.join( ['.'.join(list(v[2:])) for v in wheel.pyver]), } except Exception as e: logger.warning('invalid path for wheel: %s', path) elif path.endswith(self.downloadable_extensions): path = filename = posixpath.basename(path) for ext in self.downloadable_extensions: if path.endswith(ext): path = path[:-len(ext)] t = self.split_filename(path, project_name) if not t: logger.debug('No match for project/version: %s', path) else: name, version, pyver = t if not project_name or same_project(project_name, name): result = { 'name': name, 'version': version, 'filename': filename, 'url': urlunparse((scheme, netloc, origpath, params, query, '')), #'packagetype': 'sdist', } if pyver: result['python-version'] = pyver break if result and algo: result['%s_digest' % algo] = digest return result
See if a URL is a candidate for a download URL for a project (the URL has typically been scraped from an HTML page). If it is, a dictionary is returned with keys "name", "version", "filename" and "url"; otherwise, None is returned.
def setup(self): """ Create mask list. Consists of all tuples between which this filter accepts lines. """ # get start and end of the mask and set a start_limit if not self.mask_source.start: raise SystemExit("Can't parse format of %s. Is this a log file or " "system.profile collection?" % self.mlogfilter.args['mask']) self.mask_half_td = timedelta(seconds=self.mlogfilter.args ['mask_size'] / 2) # load filter mask file logevent_list = list(self.mask_source) # define start and end of total mask self.mask_start = self.mask_source.start - self.mask_half_td self.mask_end = self.mask_source.end + self.mask_half_td # consider --mask-center if self.mlogfilter.args['mask_center'] in ['start', 'both']: if logevent_list[0].duration: self.mask_start -= timedelta(milliseconds=logevent_list[0] .duration) if self.mlogfilter.args['mask_center'] == 'start': if logevent_list[-1].duration: self.mask_end -= timedelta(milliseconds=logevent_list[-1] .duration) self.start_limit = self.mask_start # different center points if 'mask_center' in self.mlogfilter.args: if self.mlogfilter.args['mask_center'] in ['start', 'both']: starts = ([(le.datetime - timedelta(milliseconds=le.duration)) if le.duration is not None else le.datetime for le in logevent_list if le.datetime]) if self.mlogfilter.args['mask_center'] in ['end', 'both']: ends = [le.datetime for le in logevent_list if le.datetime] if self.mlogfilter.args['mask_center'] == 'start': event_list = sorted(starts) elif self.mlogfilter.args['mask_center'] == 'end': event_list = sorted(ends) elif self.mlogfilter.args['mask_center'] == 'both': event_list = sorted(zip(starts, ends)) mask_list = [] if len(event_list) == 0: return start_point = end_point = None for e in event_list: if start_point is None: start_point, end_point = self._pad_event(e) continue next_start = (e[0] if type(e) == tuple else e) - self.mask_half_td if next_start <= end_point: end_point = ((e[1] if type(e) == tuple else e) + self.mask_half_td) else: mask_list.append((start_point, end_point)) start_point, end_point = self._pad_event(e) if start_point: mask_list.append((start_point, end_point)) self.mask_list = mask_list
Create mask list. Consists of all tuples between which this filter accepts lines.
def no_exception(on_exception, logger=None): """ 处理函数抛出异常的装饰器, ATT: on_exception必填 :param on_exception: 遇到异常时函数返回什么内容 """ def decorator(function): def wrapper(*args, **kwargs): try: result = function(*args, **kwargs) except Exception, e: if hasattr(logger, 'exception'): logger.exception(e) else: print traceback.format_exc() result = on_exception return result return wrapper return decorator
处理函数抛出异常的装饰器, ATT: on_exception必填 :param on_exception: 遇到异常时函数返回什么内容
def location(ip=None, key=None, field=None): ''' Get geolocation data for a given IP address If field is specified, get specific field as text Else get complete location data as JSON ''' if field and (field not in field_list): return 'Invalid field' if field: if ip: url = 'https://ipapi.co/{}/{}/'.format(ip, field) else: url = 'https://ipapi.co/{}/'.format(field) else: if ip: url = 'https://ipapi.co/{}/json/'.format(ip) else: url = 'https://ipapi.co/json/' if key or API_KEY: url = '{}?key={}'.format(url, (key or API_KEY)) response = get(url, headers=headers) if field: return response.text else: return response.json()
Get geolocation data for a given IP address If field is specified, get specific field as text Else get complete location data as JSON
def path(self, source, target): """ Find the path of id fields connecting two tables. This is just a basic breadth-first-search. The relations file should be small enough to not be a problem. Returns: list: (table, fieldname) pairs describing the path from the source to target tables Raises: :class:`delphin.exceptions.ItsdbError`: when no path is found Example: >>> relations.path('item', 'result') [('parse', 'i-id'), ('result', 'parse-id')] >>> relations.path('parse', 'item') [('item', 'i-id')] >>> relations.path('item', 'item') [] """ visited = set(source.split('+')) # split on + for joins targets = set(target.split('+')) - visited # ensure sources and targets exists for tablename in visited.union(targets): self[tablename] # base case; nothing to do if len(targets) == 0: return [] paths = [[(tablename, None)] for tablename in visited] while True: newpaths = [] for path in paths: laststep, pivot = path[-1] if laststep in targets: return path[1:] else: for key in self[laststep].keys(): for step in set(self.find(key)) - visited: visited.add(step) newpaths.append(path + [(step, key)]) if newpaths: paths = newpaths else: break raise ItsdbError('no relation path found from {} to {}' .format(source, target))
Find the path of id fields connecting two tables. This is just a basic breadth-first-search. The relations file should be small enough to not be a problem. Returns: list: (table, fieldname) pairs describing the path from the source to target tables Raises: :class:`delphin.exceptions.ItsdbError`: when no path is found Example: >>> relations.path('item', 'result') [('parse', 'i-id'), ('result', 'parse-id')] >>> relations.path('parse', 'item') [('item', 'i-id')] >>> relations.path('item', 'item') []
def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status
Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping
def yearly_average(arr, dt): """Average a sub-yearly time-series over each year. Resulting timeseries comprises one value for each year in which the original array had valid data. Accounts for (i.e. ignores) masked values in original data when computing the annual averages. Parameters ---------- arr : xarray.DataArray The array to be averaged dt : xarray.DataArray Array of the duration of each timestep Returns ------- xarray.DataArray Has the same shape and mask as the original ``arr``, except for the time dimension, which is truncated to one value for each year that ``arr`` spanned """ assert_matching_time_coord(arr, dt) yr_str = TIME_STR + '.year' # Retain original data's mask. dt = dt.where(np.isfinite(arr)) return ((arr*dt).groupby(yr_str).sum(TIME_STR) / dt.groupby(yr_str).sum(TIME_STR))
Average a sub-yearly time-series over each year. Resulting timeseries comprises one value for each year in which the original array had valid data. Accounts for (i.e. ignores) masked values in original data when computing the annual averages. Parameters ---------- arr : xarray.DataArray The array to be averaged dt : xarray.DataArray Array of the duration of each timestep Returns ------- xarray.DataArray Has the same shape and mask as the original ``arr``, except for the time dimension, which is truncated to one value for each year that ``arr`` spanned
def fromCSV(csvfile,out=None,fieldnames=None,fmtparams=None,conv_func={}, empty_to_None=[]): """Conversion from CSV to PyDbLite csvfile : name of the CSV file in the file system out : path for the new PyDbLite base in the file system fieldnames : list of field names. If set to None, the field names must be present in the first line of the CSV file fmtparams : the format parameters for the CSV file, as described in the csv module of the standard distribution conv_func is a dictionary mapping a field name to the function used to convert the string read in the CSV to the appropriate Python type. For instance if field "age" must be converted to an integer : conv_func["age"] = int empty_to_None is a list of the fields such that when the value read in the CSV file is the empty string, the field value is set to None """ import csv import time import datetime if out is None: out = os.path.splitext(csvfile)[0]+".pdl" if fieldnames is None: # read field names in the first line of CSV file reader = csv.reader(open(csvfile)) fieldnames = reader.next() reader = csv.DictReader(open(csvfile),fieldnames,fmtparams) reader.next() # skip first line db = PyDbLite.Base(out) conv_func.update({"__id__":int}) auto_id = not "__id__" in fieldnames fieldnames = [ f for f in fieldnames if not f in ("__id__") ] kw = {"mode":"override"} db.create(*fieldnames,**kw) print db.fields next_id = 0 records = {} while True: try: record = reader.next() except StopIteration: break if auto_id: record["__id__"] = next_id next_id += 1 # replace empty strings by None for field in empty_to_None: if not record[field]: record[field] = None # type conversion for field in conv_func: if not isinstance(conv_func[field],(tuple,list)): record[field] = conv_func[field](record[field]) else: # date or datetime date_class,date_fmt = conv_func[field] if not record[field]: record[field] = None else: time_tuple = time.strptime(record[field],date_fmt) if date_class is datetime.date: time_tuple = time_tuple[:3] record[field] = date_class(*time_tuple) records[record["__id__"]] = record db.records = records db.commit() print len(db) return db
Conversion from CSV to PyDbLite csvfile : name of the CSV file in the file system out : path for the new PyDbLite base in the file system fieldnames : list of field names. If set to None, the field names must be present in the first line of the CSV file fmtparams : the format parameters for the CSV file, as described in the csv module of the standard distribution conv_func is a dictionary mapping a field name to the function used to convert the string read in the CSV to the appropriate Python type. For instance if field "age" must be converted to an integer : conv_func["age"] = int empty_to_None is a list of the fields such that when the value read in the CSV file is the empty string, the field value is set to None
def update(self, capacity=values.unset, available=values.unset): """ Update the WorkerChannelInstance :param unicode capacity: The total number of Tasks worker should handle for this TaskChannel type. :param bool available: Toggle the availability of the WorkerChannel. :returns: Updated WorkerChannelInstance :rtype: twilio.rest.taskrouter.v1.workspace.worker.worker_channel.WorkerChannelInstance """ return self._proxy.update(capacity=capacity, available=available, )
Update the WorkerChannelInstance :param unicode capacity: The total number of Tasks worker should handle for this TaskChannel type. :param bool available: Toggle the availability of the WorkerChannel. :returns: Updated WorkerChannelInstance :rtype: twilio.rest.taskrouter.v1.workspace.worker.worker_channel.WorkerChannelInstance
def a(value): """Eq(a) -> Parser(a, a) Returns a parser that parses a token that is equal to the value value. """ name = getattr(value, 'name', value) return some(lambda t: t == value).named(u'(a "%s")' % (name,))
Eq(a) -> Parser(a, a) Returns a parser that parses a token that is equal to the value value.
def ci_data(namespace, name, branch='master'): '''Returns or starts the ci data collection process''' with repository(namespace, name, branch) as (path, latest, cache): if not path or not latest: return {'build_success': NOT_FOUND, 'status': NOT_FOUND} elif latest in cache: return json.loads(cache[latest]) starting = {'status': 'starting'} cache[latest] = json.dumps(starting) ci_worker(namespace, name, branch=branch, _bg=True) return starting
Returns or starts the ci data collection process
def strip_footer(ref_lines, section_title): """Remove footer title from references lines""" pattern = ur'\(?\[?\d{0,4}\]?\)?\.?\s*%s\s*$' % re.escape(section_title) re_footer = re.compile(pattern, re.UNICODE) return [l for l in ref_lines if not re_footer.match(l)]
Remove footer title from references lines
def to_csv(self, filename, stimuli=None, inhibitors=None, prepend=""): """ Writes the list of clampings to a CSV file Parameters ---------- filename : str Absolute path where to write the CSV file stimuli : Optional[list[str]] List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}. inhibitors : Optional[list[str]] List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}. prepend : str Columns are renamed using the given string at the beginning """ self.to_dataframe(stimuli, inhibitors, prepend).to_csv(filename, index=False)
Writes the list of clampings to a CSV file Parameters ---------- filename : str Absolute path where to write the CSV file stimuli : Optional[list[str]] List of stimuli names. If given, stimuli are converted to {0,1} instead of {-1,1}. inhibitors : Optional[list[str]] List of inhibitors names. If given, inhibitors are renamed and converted to {0,1} instead of {-1,1}. prepend : str Columns are renamed using the given string at the beginning
def _from_fields(self, fields): ''' Parse from generator. Raise StopIteration if the property could not be read. ''' return _np.dtype(self.dtype()).type(next(fields))
Parse from generator. Raise StopIteration if the property could not be read.
def facetintervallookupone(table, key, start='start', stop='stop', value=None, include_stop=False, strict=True): """ Construct a faceted interval lookup for the given table, returning at most one result for each query. If ``strict=True``, queries returning more than one result will raise a `DuplicateKeyError`. If ``strict=False`` and there is more than one result, the first result is returned. """ trees = facettupletrees(table, key, start=start, stop=stop, value=value) out = dict() for k in trees: out[k] = IntervalTreeLookupOne(trees[k], include_stop=include_stop, strict=strict) return out
Construct a faceted interval lookup for the given table, returning at most one result for each query. If ``strict=True``, queries returning more than one result will raise a `DuplicateKeyError`. If ``strict=False`` and there is more than one result, the first result is returned.
def _get_ami_dict(json_url): """Get ami from a web url. Args: region (str): AWS Region to find AMI ID. Returns: dict: Contents in dictionary format. """ LOG.info("Getting AMI from %s", json_url) response = requests.get(json_url) assert response.ok, "Error getting ami info from {}".format(json_url) ami_dict = response.json() LOG.debug('AMI json contents: %s', ami_dict) return ami_dict
Get ami from a web url. Args: region (str): AWS Region to find AMI ID. Returns: dict: Contents in dictionary format.
def entity_readme_content(self, entity_id, channel=None): '''Get the readme for an entity. @entity_id The id of the entity (i.e. charm, bundle). @param channel Optional channel name. ''' readme_url = self.entity_readme_url(entity_id, channel=channel) response = self._get(readme_url) return response.text
Get the readme for an entity. @entity_id The id of the entity (i.e. charm, bundle). @param channel Optional channel name.
def _noise_model_program_header(noise_model): """ Generate the header for a pyquil Program that uses ``noise_model`` to overload noisy gates. The program header consists of 3 sections: - The ``DEFGATE`` statements that define the meaning of the newly introduced "noisy" gate names. - The ``PRAGMA ADD-KRAUS`` statements to overload these noisy gates on specific qubit targets with their noisy implementation. - THe ``PRAGMA READOUT-POVM`` statements that define the noisy readout per qubit. :param NoiseModel noise_model: The assumed noise model. :return: A quil Program with the noise pragmas. :rtype: pyquil.quil.Program """ from pyquil.quil import Program p = Program() defgates = set() for k in noise_model.gates: # obtain ideal gate matrix and new, noisy name by looking it up in the NOISY_GATES dict try: ideal_gate, new_name = get_noisy_gate(k.gate, tuple(k.params)) # if ideal version of gate has not yet been DEFGATE'd, do this if new_name not in defgates: p.defgate(new_name, ideal_gate) defgates.add(new_name) except NoisyGateUndefined: print("WARNING: Could not find ideal gate definition for gate {}".format(k.gate), file=sys.stderr) new_name = k.gate # define noisy version of gate on specific targets p.define_noisy_gate(new_name, k.targets, k.kraus_ops) # define noisy readouts for q, ap in noise_model.assignment_probs.items(): p.define_noisy_readout(q, p00=ap[0, 0], p11=ap[1, 1]) return p
Generate the header for a pyquil Program that uses ``noise_model`` to overload noisy gates. The program header consists of 3 sections: - The ``DEFGATE`` statements that define the meaning of the newly introduced "noisy" gate names. - The ``PRAGMA ADD-KRAUS`` statements to overload these noisy gates on specific qubit targets with their noisy implementation. - THe ``PRAGMA READOUT-POVM`` statements that define the noisy readout per qubit. :param NoiseModel noise_model: The assumed noise model. :return: A quil Program with the noise pragmas. :rtype: pyquil.quil.Program
def receipts(): ''' Return the results of a call to ``system_profiler -xml -detail full SPInstallHistoryDataType`` as a dictionary. Top-level keys of the dictionary are the names of each set of install receipts, since there can be multiple receipts with the same name. Contents of each key are a list of dictionaries. CLI Example: .. code-block:: bash salt '*' systemprofiler.receipts ''' apps = _call_system_profiler('SPInstallHistoryDataType') appdict = {} for a in apps: details = dict(a) details.pop('_name') if 'install_date' in details: details['install_date'] = details['install_date'].strftime('%Y-%m-%d %H:%M:%S') if 'info' in details: try: details['info'] = '{0}: {1}'.format(details['info'][0], details['info'][1].strftime('%Y-%m-%d %H:%M:%S')) except (IndexError, AttributeError): pass if a['_name'] not in appdict: appdict[a['_name']] = [] appdict[a['_name']].append(details) return appdict
Return the results of a call to ``system_profiler -xml -detail full SPInstallHistoryDataType`` as a dictionary. Top-level keys of the dictionary are the names of each set of install receipts, since there can be multiple receipts with the same name. Contents of each key are a list of dictionaries. CLI Example: .. code-block:: bash salt '*' systemprofiler.receipts
def inv(self, q_data, max_iterations=100, tollerance=1e-5): """ Inverse Rosenblatt transformation. If possible the transformation is done analytically. If not possible, transformation is approximated using an algorithm that alternates between Newton-Raphson and binary search. Args: q_data (numpy.ndarray): Probabilities to be inverse. If any values are outside ``[0, 1]``, error will be raised. ``q_data.shape`` must be compatible with distribution shape. max_iterations (int): If approximation is used, this sets the maximum number of allowed iterations in the Newton-Raphson algorithm. tollerance (float): If approximation is used, this set the error tolerance level required to define a sample as converged. Returns: (numpy.ndarray): Inverted probability values where ``out.shape == q_data.shape``. """ q_data = numpy.asfarray(q_data) assert numpy.all((q_data >= 0) & (q_data <= 1)), "sanitize your inputs!" shape = q_data.shape q_data = q_data.reshape(len(self), -1) x_data = evaluation.evaluate_inverse(self, q_data) lower, upper = evaluation.evaluate_bound(self, x_data) x_data = numpy.clip(x_data, a_min=lower, a_max=upper) x_data = x_data.reshape(shape) return x_data
Inverse Rosenblatt transformation. If possible the transformation is done analytically. If not possible, transformation is approximated using an algorithm that alternates between Newton-Raphson and binary search. Args: q_data (numpy.ndarray): Probabilities to be inverse. If any values are outside ``[0, 1]``, error will be raised. ``q_data.shape`` must be compatible with distribution shape. max_iterations (int): If approximation is used, this sets the maximum number of allowed iterations in the Newton-Raphson algorithm. tollerance (float): If approximation is used, this set the error tolerance level required to define a sample as converged. Returns: (numpy.ndarray): Inverted probability values where ``out.shape == q_data.shape``.
def boxcox(X): """ Gaussianize X using the Box-Cox transformation: [samples x phenotypes] - each phentoype is brought to a positive schale, by first subtracting the minimum value and adding 1. - Then each phenotype transformed by the boxcox transformation """ X_transformed = sp.zeros_like(X) maxlog = sp.zeros(X.shape[1]) for i in range(X.shape[1]): i_nan = sp.isnan(X[:,i]) values = X[~i_nan,i] X_transformed[i_nan,i] = X[i_nan,i] X_transformed[~i_nan,i], maxlog[i] = st.boxcox(values-values.min()+1.0) return X_transformed, maxlog
Gaussianize X using the Box-Cox transformation: [samples x phenotypes] - each phentoype is brought to a positive schale, by first subtracting the minimum value and adding 1. - Then each phenotype transformed by the boxcox transformation
def topN(self, user, n=10, exclude_seen=True, items_pool=None): """ Recommend Top-N items for a user Outputs the Top-N items according to score predicted by the model. Can exclude the items for the user that were associated to her in the training set, and can also recommend from only a subset of user-provided items. Parameters ---------- user : obj User for which to recommend. n : int Number of top items to recommend. exclude_seen: bool Whether to exclude items that were associated to the user in the training set. items_pool: None or array Items to consider for recommending to the user. Returns ------- rec : array (n,) Top-N recommended items. """ if isinstance(n, float): n = int(n) assert isinstance(n ,int) if self.reindex: if self.produce_dicts: try: user = self.user_dict_[user] except: raise ValueError("Can only predict for users who were in the training set.") else: user = pd.Categorical(np.array([user]), self.user_mapping_).codes[0] if user == -1: raise ValueError("Can only predict for users who were in the training set.") if exclude_seen and not self.keep_data: raise Exception("Can only exclude seen items when passing 'keep_data=True' to .fit") if items_pool is None: allpreds = - (self.Theta[user].dot(self.Beta.T)) if exclude_seen: n_ext = np.min([n + self._n_seen_by_user[user], self.Beta.shape[0]]) rec = np.argpartition(allpreds, n_ext-1)[:n_ext] seen = self.seen[self._st_ix_user[user] : self._st_ix_user[user] + self._n_seen_by_user[user]] rec = np.setdiff1d(rec, seen) rec = rec[np.argsort(allpreds[rec])[:n]] if self.reindex: return self.item_mapping_[rec] else: return rec else: n = np.min([n, self.Beta.shape[0]]) rec = np.argpartition(allpreds, n-1)[:n] rec = rec[np.argsort(allpreds[rec])] if self.reindex: return self.item_mapping_[rec] else: return rec else: if isinstance(items_pool, list) or isinstance(items_pool, tuple): items_pool = np.array(items_pool) if items_pool.__class__.__name__=='Series': items_pool = items_pool.values if isinstance(items_pool, np.ndarray): if len(items_pool.shape) > 1: items_pool = items_pool.reshape(-1) if self.reindex: items_pool_reind = pd.Categorical(items_pool, self.item_mapping_).codes nan_ix = (items_pool_reind == -1) if nan_ix.sum() > 0: items_pool_reind = items_pool_reind[~nan_ix] msg = "There were " + ("%d" % int(nan_ix.sum())) + " entries from 'item_pool'" msg += "that were not in the training data and will be exluded." warnings.warn(msg) del nan_ix if items_pool_reind.shape[0] == 0: raise ValueError("No items to recommend.") elif items_pool_reind.shape[0] == 1: raise ValueError("Only 1 item to recommend.") else: pass else: raise ValueError("'items_pool' must be an array.") if self.reindex: allpreds = - self.Theta[user].dot(self.Beta[items_pool_reind].T) else: allpreds = - self.Theta[user].dot(self.Beta[items_pool].T) n = np.min([n, items_pool.shape[0]]) if exclude_seen: n_ext = np.min([n + self._n_seen_by_user[user], items_pool.shape[0]]) rec = np.argpartition(allpreds, n_ext-1)[:n_ext] seen = self.seen[self._st_ix_user[user] : self._st_ix_user[user] + self._n_seen_by_user[user]] if self.reindex: rec = np.setdiff1d(items_pool_reind[rec], seen) allpreds = - self.Theta[user].dot(self.Beta[rec].T) return self.item_mapping_[rec[np.argsort(allpreds)[:n]]] else: rec = np.setdiff1d(items_pool[rec], seen) allpreds = - self.Theta[user].dot(self.Beta[rec].T) return rec[np.argsort(allpreds)[:n]] else: rec = np.argpartition(allpreds, n-1)[:n] return items_pool[rec[np.argsort(allpreds[rec])]]
Recommend Top-N items for a user Outputs the Top-N items according to score predicted by the model. Can exclude the items for the user that were associated to her in the training set, and can also recommend from only a subset of user-provided items. Parameters ---------- user : obj User for which to recommend. n : int Number of top items to recommend. exclude_seen: bool Whether to exclude items that were associated to the user in the training set. items_pool: None or array Items to consider for recommending to the user. Returns ------- rec : array (n,) Top-N recommended items.
def mark_all(request): """ Marks notifications as either read or unread depending of POST parameters. Takes ``action`` as POST data, it can either be ``read`` or ``unread``. :param request: HTTP Request context. :return: Response to mark_all action. """ action = request.POST.get('action', None) success = True if action == 'read': request.user.notifications.read_all() msg = _("Marked all notifications as read") elif action == 'unread': request.user.notifications.unread_all() msg = _("Marked all notifications as unread") else: msg = _("Invalid mark action") success = False ctx = {'msg': msg, 'success': success, 'action': action} return notification_redirect(request, ctx)
Marks notifications as either read or unread depending of POST parameters. Takes ``action`` as POST data, it can either be ``read`` or ``unread``. :param request: HTTP Request context. :return: Response to mark_all action.
def wrap(cls, meth): ''' Wraps a connection opening method in this class. ''' async def inner(*args, **kwargs): sock = await meth(*args, **kwargs) return cls(sock) return inner
Wraps a connection opening method in this class.
def bleu_tokenize(string): r"""Tokenize a string following the official BLEU implementation. See https://github.com/moses-smt/mosesdecoder/" "blob/master/scripts/generic/mteval-v14.pl#L954-L983 In our case, the input string is expected to be just one line and no HTML entities de-escaping is needed. So we just tokenize on punctuation and symbols, except when a punctuation is preceded and followed by a digit (e.g. a comma/dot as a thousand/decimal separator). Note that a number (e.g. a year) followed by a dot at the end of sentence is NOT tokenized, i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g` does not match this case (unless we add a space after each sentence). However, this error is already in the original mteval-v14.pl and we want to be consistent with it. Args: string: the input string Returns: a list of tokens """ string = uregex.nondigit_punct_re.sub(r"\1 \2 ", string) string = uregex.punct_nondigit_re.sub(r" \1 \2", string) string = uregex.symbol_re.sub(r" \1 ", string) return string.split()
r"""Tokenize a string following the official BLEU implementation. See https://github.com/moses-smt/mosesdecoder/" "blob/master/scripts/generic/mteval-v14.pl#L954-L983 In our case, the input string is expected to be just one line and no HTML entities de-escaping is needed. So we just tokenize on punctuation and symbols, except when a punctuation is preceded and followed by a digit (e.g. a comma/dot as a thousand/decimal separator). Note that a number (e.g. a year) followed by a dot at the end of sentence is NOT tokenized, i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g` does not match this case (unless we add a space after each sentence). However, this error is already in the original mteval-v14.pl and we want to be consistent with it. Args: string: the input string Returns: a list of tokens
def azureTables(self, *args, **kwargs): """ List Tables in an Account Managed by Auth Retrieve a list of all tables in an account. This method gives output: ``v1/azure-table-list-response.json#`` This method is ``stable`` """ return self._makeApiCall(self.funcinfo["azureTables"], *args, **kwargs)
List Tables in an Account Managed by Auth Retrieve a list of all tables in an account. This method gives output: ``v1/azure-table-list-response.json#`` This method is ``stable``
def load_images(input_dir, batch_shape): """Read png images from input directory in batches. Args: input_dir: input directory batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3] Yields: filenames: list file names without path of each image Length of this list could be less than batch_size, in this case only first few images of the result are elements of the minibatch. images: array with all images from this batch """ images = np.zeros(batch_shape) filenames = [] idx = 0 batch_size = batch_shape[0] for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')): with tf.gfile.Open(filepath) as f: images[idx, :, :, :] = imread(f, mode='RGB').astype(np.float) / 255.0 filenames.append(os.path.basename(filepath)) idx += 1 if idx == batch_size: yield filenames, images filenames = [] images = np.zeros(batch_shape) idx = 0 if idx > 0: yield filenames, images
Read png images from input directory in batches. Args: input_dir: input directory batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3] Yields: filenames: list file names without path of each image Length of this list could be less than batch_size, in this case only first few images of the result are elements of the minibatch. images: array with all images from this batch
def _set_domain_name(self, v, load=False): """ Setter method for domain_name, mapped from YANG variable /protocol/cfm/domain_name/domain_name (string) If this variable is read-only (config: false) in the source YANG file, then _set_domain_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_domain_name() directly. """ parent = getattr(self, "_parent", None) if parent is not None and load is False: raise AttributeError("Cannot set keys directly when" + " within an instantiated list") if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..21']}), is_leaf=True, yang_name="domain-name", rest_name="domain-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'String length 21 char', u'cli-run-template': u'$(.?:)', u'cli-incomplete-command': None, u'cli-hide-in-submode': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='string', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """domain_name must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..21']}), is_leaf=True, yang_name="domain-name", rest_name="domain-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'String length 21 char', u'cli-run-template': u'$(.?:)', u'cli-incomplete-command': None, u'cli-hide-in-submode': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='string', is_config=True)""", }) self.__domain_name = t if hasattr(self, '_set'): self._set()
Setter method for domain_name, mapped from YANG variable /protocol/cfm/domain_name/domain_name (string) If this variable is read-only (config: false) in the source YANG file, then _set_domain_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_domain_name() directly.
def sample_cluster(sources, srcfilter, num_ses, param): """ Yields ruptures generated by a cluster of sources. :param sources: A sequence of sources of the same group :param num_ses: Number of stochastic event sets :param param: a dictionary of additional parameters including ses_per_logic_tree_path :yields: dictionaries with keys rup_array, calc_times, eff_ruptures """ eb_ruptures = [] numpy.random.seed(sources[0].serial) [grp_id] = set(src.src_group_id for src in sources) # AccumDict of arrays with 3 elements weight, nsites, calc_time calc_times = AccumDict(accum=numpy.zeros(3, numpy.float32)) # Set the parameters required to compute the number of occurrences # of the group of sources # assert param['oqparam'].number_of_logic_tree_samples > 0 samples = getattr(sources[0], 'samples', 1) tom = getattr(sources, 'temporal_occurrence_model') rate = tom.occurrence_rate time_span = tom.time_span # Note that using a single time interval corresponding to the product # of the investigation time and the number of realisations as we do # here is admitted only in the case of a time-independent model grp_num_occ = numpy.random.poisson(rate * time_span * samples * num_ses) # Now we process the sources included in the group. Possible cases: # * The group is a cluster. In this case we choose one rupture per each # source; uncertainty in the ruptures can be handled in this case # using mutually exclusive ruptures (note that this is admitted # only for nons-parametric sources). # * The group contains mutually exclusive sources. In this case we # choose one source and then one rupture from this source. rup_counter = {} rup_data = {} eff_ruptures = 0 for rlz_num in range(grp_num_occ): if sources.cluster: for src, _sites in srcfilter(sources): # Sum Ruptures if rlz_num == 0: eff_ruptures += src.num_ruptures # Track calculation time t0 = time.time() rup = src.get_one_rupture() # The problem here is that we do not know a-priori the # number of occurrences of a given rupture. if src.id not in rup_counter: rup_counter[src.id] = {} rup_data[src.id] = {} if rup.idx not in rup_counter[src.id]: rup_counter[src.id][rup.idx] = 1 rup_data[src.id][rup.idx] = [rup, src.id, grp_id] else: rup_counter[src.id][rup.idx] += 1 # Store info dt = time.time() - t0 calc_times[src.id] += numpy.array([len(rup_data[src.id]), src.nsites, dt]) elif param['src_interdep'] == 'mutex': print('Not yet implemented') exit(0) # Create event based ruptures for src_key in rup_data: for rup_key in rup_data[src_key]: dat = rup_data[src_key][rup_key] cnt = rup_counter[src_key][rup_key] ebr = EBRupture(dat[0], dat[1], dat[2], cnt, samples) eb_ruptures.append(ebr) return eb_ruptures, calc_times, eff_ruptures, grp_id
Yields ruptures generated by a cluster of sources. :param sources: A sequence of sources of the same group :param num_ses: Number of stochastic event sets :param param: a dictionary of additional parameters including ses_per_logic_tree_path :yields: dictionaries with keys rup_array, calc_times, eff_ruptures
def listen(manifest, config, model_mock=False): """ IRC listening process. """ config['manifest'] = manifest config['model_mock'] = model_mock IRC = IrcBot(config) try: IRC.start() except KeyboardInterrupt: pass
IRC listening process.
def make_ref(self, branch): """ Make a branch on github :param branch: Name of the branch to create :return: Sha of the branch or self.ProxyError """ master_sha = self.get_ref(self.master_upstream) if not isinstance(master_sha, str): return self.ProxyError( 404, "The default branch from which to checkout is either not available or does not exist", step="make_ref" ) params = { "ref": "refs/heads/{branch}".format(branch=branch), "sha": master_sha } uri = "{api}/repos/{origin}/git/refs".format( api=self.github_api_url, origin=self.origin ) data = self.request("POST", uri, data=params) if data.status_code == 201: data = json.loads(data.content.decode("utf-8")) return data["object"]["sha"] else: decoded_data = json.loads(data.content.decode("utf-8")) return self.ProxyError( data.status_code, (decoded_data, "message"), step="make_ref", context={ "uri": uri, "params": params } )
Make a branch on github :param branch: Name of the branch to create :return: Sha of the branch or self.ProxyError
def load_data(self): """ Loads data from `scipy_data_fitting.Data.path` using [`numpy.genfromtxt`][1] and returns a [`numpy.ndarray`][2]. Data is scaled according to `scipy_data_fitting.Data.scale`. Arguments to [`numpy.genfromtxt`][1] are controlled by `scipy_data_fitting.Data.genfromtxt_args`. [1]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.genfromtxt.html [2]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html """ array = numpy.genfromtxt(self.path, **self.genfromtxt_args) for n, scale in enumerate(self.scale): array[n,:] *= self.scale[n] return array
Loads data from `scipy_data_fitting.Data.path` using [`numpy.genfromtxt`][1] and returns a [`numpy.ndarray`][2]. Data is scaled according to `scipy_data_fitting.Data.scale`. Arguments to [`numpy.genfromtxt`][1] are controlled by `scipy_data_fitting.Data.genfromtxt_args`. [1]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.genfromtxt.html [2]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
def find_databases(databases): """ define ribosomal proteins and location of curated databases """ # 16 ribosomal proteins in their expected order proteins = ['L15', 'L18', 'L6', 'S8', 'L5', 'L24', 'L14', 'S17', 'L16', 'S3', 'L22', 'S19', 'L2', 'L4', 'L3', 'S10'] # curated databases protein_databases = { 'L14': 'rpL14_JGI_MDM.filtered.faa', 'L15': 'rpL15_JGI_MDM.filtered.faa', 'L16': 'rpL16_JGI_MDM.filtered.faa', 'L18': 'rpL18_JGI_MDM.filtered.faa', 'L22': 'rpL22_JGI_MDM.filtered.faa', 'L24': 'rpL24_JGI_MDM.filtered.faa', 'L2': 'rpL2_JGI_MDM.filtered.faa', 'L3': 'rpL3_JGI_MDM.filtered.faa', 'L4': 'rpL4_JGI_MDM.filtered.faa', 'L5': 'rpL5_JGI_MDM.filtered.faa', 'L6': 'rpL6_JGI_MDM.filtered.faa', 'S10': 'rpS10_JGI_MDM.filtered.faa', 'S17': 'rpS17_JGI_MDM.filtered.faa', 'S19': 'rpS19_JGI_MDM.filtered.faa', 'S3': 'rpS3_JGI_MDM.filtered.faa', 'S8': 'rpS8_JGI_MDM.filtered.faa'} protein_databases = {key: '%s/%s' % (databases, database) \ for key, database in list(protein_databases.items())} return proteins, protein_databases
define ribosomal proteins and location of curated databases
def store_object(self, obj_name, data, content_type=None, etag=None, content_encoding=None, ttl=None, return_none=False, headers=None, extra_info=None): """ Creates a new object in this container, and populates it with the given data. A StorageObject reference to the uploaded file will be returned, unless 'return_none' is set to True. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more. """ return self.create(obj_name=obj_name, data=data, content_type=content_type, etag=etag, content_encoding=content_encoding, ttl=ttl, return_none=return_none, headers=headers)
Creates a new object in this container, and populates it with the given data. A StorageObject reference to the uploaded file will be returned, unless 'return_none' is set to True. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more.
def print_help_page(bot, file=sys.stdout): """print help page""" def p(text): print(text, file=file) plugin = bot.get_plugin(Commands) title = "Available Commands for {nick} at {host}".format(**bot.config) p("=" * len(title)) p(title) p("=" * len(title)) p('') p('.. contents::') p('') modules = {} for name, (predicates, callback) in plugin.items(): commands = modules.setdefault(callback.__module__, []) commands.append((name, callback, predicates)) for module in sorted(modules): p(module) p('=' * len(module)) p('') for name, callback, predicates in sorted(modules[module]): p(name) p('-' * len(name)) p('') doc = callback.__doc__ doc = doc.replace('%%', bot.config.cmd) for line in doc.split('\n'): line = line.strip() if line.startswith(bot.config.cmd): line = ' ``{}``'.format(line) p(line) if 'permission' in predicates: p('*Require {0[permission]} permission.*'.format(predicates)) if predicates.get('public', True) is False: p('*Only available in private.*') p('')
print help page
def is_open(table_name=None, table_key=None, gsi_name=None, gsi_key=None): """ Checks whether the circuit breaker is open :param table_name: Name of the table being checked :param table_key: Configuration key for table :param gsi_name: Name of the GSI being checked :param gsi_key: Configuration key for the GSI :returns: bool -- True if the circuit is open """ logger.debug('Checking circuit breaker status') # Parse the URL to make sure it is OK pattern = re.compile( r'^(?P<scheme>http(s)?://)' r'((?P<username>.+):(?P<password>.+)@){0,1}' r'(?P<url>.*)$' ) url = timeout = None if gsi_name: url = get_gsi_option(table_key, gsi_key, 'circuit_breaker_url') timeout = get_gsi_option(table_key, gsi_key, 'circuit_breaker_timeout') elif table_name: url = get_table_option(table_key, 'circuit_breaker_url') timeout = get_table_option(table_key, 'circuit_breaker_timeout') if not url: url = get_global_option('circuit_breaker_url') timeout = get_global_option('circuit_breaker_timeout') match = pattern.match(url) if not match: logger.error('Malformatted URL: {0}'.format(url)) sys.exit(1) use_basic_auth = False if match.group('username') and match.group('password'): use_basic_auth = True # Make the actual URL to call auth = () if use_basic_auth: url = '{scheme}{url}'.format( scheme=match.group('scheme'), url=match.group('url')) auth = (match.group('username'), match.group('password')) headers = {} if table_name: headers["x-table-name"] = table_name if gsi_name: headers["x-gsi-name"] = gsi_name # Make the actual request try: response = requests.get( url, auth=auth, timeout=timeout / 1000.00, headers=headers) if int(response.status_code) >= 200 and int(response.status_code) < 300: logger.info('Circuit breaker is closed') return False else: logger.warning( 'Circuit breaker returned with status code {0:d}'.format( response.status_code)) except requests.exceptions.SSLError as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.Timeout as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.ConnectionError as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.HTTPError as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.TooManyRedirects as error: logger.warning('Circuit breaker: {0}'.format(error)) except Exception as error: logger.error('Unhandled exception: {0}'.format(error)) logger.error( 'Please file a bug at ' 'https://github.com/sebdah/dynamic-dynamodb/issues') return True
Checks whether the circuit breaker is open :param table_name: Name of the table being checked :param table_key: Configuration key for table :param gsi_name: Name of the GSI being checked :param gsi_key: Configuration key for the GSI :returns: bool -- True if the circuit is open
def validate(token): """Validate token and return auth context.""" token_url = TOKEN_URL_FMT.format(token=token) headers = { 'x-auth-token': token, 'accept': 'application/json', } resp = requests.get(token_url, headers=headers) if not resp.status_code == 200: raise HTTPError(status=401) return resp.json()
Validate token and return auth context.
def _set_catalog_view(self, session): """Sets the underlying catalog view to match current view""" if self._catalog_view == FEDERATED: try: session.use_federated_catalog_view() except AttributeError: pass else: try: session.use_isolated_catalog_view() except AttributeError: pass
Sets the underlying catalog view to match current view
def filter_out_none_valued_keys(self, d): # type: (typing.Dict[K, V]) -> typing.Dict[K, V] """Given a dict, returns a new dict with all the same key/values except for keys that had values of None.""" new_d = {} for k, v in d.items(): if v is not None: new_d[k] = v return new_d
Given a dict, returns a new dict with all the same key/values except for keys that had values of None.
def flatten_hierarchical_dict(original_dict, separator='.', max_recursion_depth=None): """Flatten a dict. Inputs ------ original_dict: dict the dictionary to flatten separator: string, optional the separator item in the keys of the flattened dictionary max_recursion_depth: positive integer, optional the number of recursions to be done. None is infinte. Output ------ the flattened dictionary Notes ----- Each element of `original_dict` which is not an instance of `dict` (or of a subclass of it) is kept as is. The others are treated as follows. If ``original_dict['key_dict']`` is an instance of `dict` (or of a subclass of `dict`), a corresponding key of the form ``key_dict<separator><key_in_key_dict>`` will be created in ``original_dict`` with the value of ``original_dict['key_dict']['key_in_key_dict']``. If that value is a subclass of `dict` as well, the same procedure is repeated until the maximum recursion depth is reached. Only string keys are supported. """ if max_recursion_depth is not None and max_recursion_depth <= 0: # we reached the maximum recursion depth, refuse to go further return original_dict if max_recursion_depth is None: next_recursion_depth = None else: next_recursion_depth = max_recursion_depth - 1 dict1 = {} for k in original_dict: if not isinstance(original_dict[k], dict): dict1[k] = original_dict[k] else: dict_recursed = flatten_hierarchical_dict( original_dict[k], separator, next_recursion_depth) dict1.update( dict([(k + separator + x, dict_recursed[x]) for x in dict_recursed])) return dict1
Flatten a dict. Inputs ------ original_dict: dict the dictionary to flatten separator: string, optional the separator item in the keys of the flattened dictionary max_recursion_depth: positive integer, optional the number of recursions to be done. None is infinte. Output ------ the flattened dictionary Notes ----- Each element of `original_dict` which is not an instance of `dict` (or of a subclass of it) is kept as is. The others are treated as follows. If ``original_dict['key_dict']`` is an instance of `dict` (or of a subclass of `dict`), a corresponding key of the form ``key_dict<separator><key_in_key_dict>`` will be created in ``original_dict`` with the value of ``original_dict['key_dict']['key_in_key_dict']``. If that value is a subclass of `dict` as well, the same procedure is repeated until the maximum recursion depth is reached. Only string keys are supported.
def get_rule_id_from_name(self, name): """Finds the rule that matches name. SoftLayer_Virtual_PlacementGroup_Rule.getAllObjects doesn't support objectFilters. """ results = self.client.call('SoftLayer_Virtual_PlacementGroup_Rule', 'getAllObjects') return [result['id'] for result in results if result['keyName'] == name.upper()]
Finds the rule that matches name. SoftLayer_Virtual_PlacementGroup_Rule.getAllObjects doesn't support objectFilters.
def decr(self, key, value, noreply=False): """ The memcached "decr" command. Args: key: str, see class docs for details. value: int, the amount by which to increment the value. noreply: optional bool, False to wait for the reply (the default). Returns: If noreply is True, always returns None. Otherwise returns the new value of the key, or None if the key wasn't found. """ key = self.check_key(key) cmd = b'decr ' + key + b' ' + six.text_type(value).encode('ascii') if noreply: cmd += b' noreply' cmd += b'\r\n' results = self._misc_cmd([cmd], b'decr', noreply) if noreply: return None if results[0] == b'NOT_FOUND': return None return int(results[0])
The memcached "decr" command. Args: key: str, see class docs for details. value: int, the amount by which to increment the value. noreply: optional bool, False to wait for the reply (the default). Returns: If noreply is True, always returns None. Otherwise returns the new value of the key, or None if the key wasn't found.
def setTreeDoc(self, tree): """update all nodes under the tree to point to the right document """ if tree is None: tree__o = None else: tree__o = tree._o libxml2mod.xmlSetTreeDoc(tree__o, self._o)
update all nodes under the tree to point to the right document
def _rename_duplicate_tabs(self, current, name, path): """ Rename tabs whose title is the same as the name """ for i in range(self.count()): if self.widget(i)._tab_name == name and self.widget(i) != current: file_path = self.widget(i).file.path if file_path: parent_dir = os.path.split(os.path.abspath( os.path.join(file_path, os.pardir)))[1] new_name = os.path.join(parent_dir, name) self.setTabText(i, new_name) self.widget(i)._tab_name = new_name break if path: parent_dir = os.path.split(os.path.abspath( os.path.join(path, os.pardir)))[1] return os.path.join(parent_dir, name) else: return name
Rename tabs whose title is the same as the name
def chhome(name, home, persist=False): ''' Set a new home directory for an existing user name Username to modify home New home directory to set persist : False Set to ``True`` to prevent configuration files in the new home directory from being overwritten by the files from the skeleton directory. CLI Example: .. code-block:: bash salt '*' user.chhome foo /home/users/foo True ''' pre_info = info(name) if not pre_info: raise CommandExecutionError( 'User \'{0}\' does not exist'.format(name) ) if home == pre_info['home']: return True cmd = ['usermod', '-d', home] if persist: cmd.append('-m') cmd.append(name) __salt__['cmd.run'](cmd, python_shell=False) return info(name).get('home') == home
Set a new home directory for an existing user name Username to modify home New home directory to set persist : False Set to ``True`` to prevent configuration files in the new home directory from being overwritten by the files from the skeleton directory. CLI Example: .. code-block:: bash salt '*' user.chhome foo /home/users/foo True
def load_fasta_file_as_dict_of_seqs(filename): """Load a FASTA file and return the sequences as a dict of {ID: sequence string} Args: filename (str): Path to the FASTA file to load Returns: dict: Dictionary of IDs to their sequence strings """ results = {} records = load_fasta_file(filename) for r in records: results[r.id] = str(r.seq) return results
Load a FASTA file and return the sequences as a dict of {ID: sequence string} Args: filename (str): Path to the FASTA file to load Returns: dict: Dictionary of IDs to their sequence strings
def OnUpdate(self, event): """Updates the toolbar states""" attributes = event.attr self._update_font(attributes["textfont"]) self._update_pointsize(attributes["pointsize"]) self._update_font_weight(attributes["fontweight"]) self._update_font_style(attributes["fontstyle"]) self._update_frozencell(attributes["frozen"]) self._update_lockedcell(attributes["locked"]) self._update_markupcell(attributes["markup"]) self._update_underline(attributes["underline"]) self._update_strikethrough(attributes["strikethrough"]) self._update_justification(attributes["justification"]) self._update_alignment(attributes["vertical_align"]) self._update_fontcolor(attributes["textcolor"]) self._update_merge(attributes["merge_area"] is not None) self._update_textrotation(attributes["angle"]) self._update_bgbrush(attributes["bgcolor"]) self._update_bordercolor(attributes["bordercolor_bottom"]) self._update_borderwidth(attributes["borderwidth_bottom"]) self.Refresh() event.Skip()
Updates the toolbar states
def adjoint(self): """Adjoint of this operator.""" if not self.is_linear: raise NotImplementedError('this operator is not linear and ' 'thus has no adjoint') forward_op = self class ResizingOperatorAdjoint(ResizingOperatorBase): """Adjoint of `ResizingOperator`. See `the online documentation <https://odlgroup.github.io/odl/math/resizing_ops.html>`_ on resizing operators for mathematical details. """ def _call(self, x, out): """Implement ``self(x, out)``.""" with writable_array(out) as out_arr: resize_array(x.asarray(), self.range.shape, offset=self.offset, pad_mode=self.pad_mode, pad_const=0, direction='adjoint', out=out_arr) @property def adjoint(self): """Adjoint of the adjoint, i.e. the original operator.""" return forward_op @property def inverse(self): """(Pseudo-)Inverse of this operator. Note that in axes where ``self`` extends, the returned operator acts as a proper inverse, while in restriction axes, the operation is not invertible. """ return ResizingOperatorAdjoint( domain=self.range, range=self.domain, pad_mode=self.pad_mode) return ResizingOperatorAdjoint(domain=self.range, range=self.domain, pad_mode=self.pad_mode)
Adjoint of this operator.
def try_int(s, default=None, minimum=None): """ Try parsing a string into an integer. If None is passed, default is returned. On failure, InvalidNumber is raised. """ if not s: return default try: val = int(s) except (TypeError, ValueError): raise InvalidNumber(s) if (minimum is not None) and (val < minimum): val = minimum return val
Try parsing a string into an integer. If None is passed, default is returned. On failure, InvalidNumber is raised.
def setup_errors(app, error_template="error.html"): """Add a handler for each of the available HTTP error responses.""" def error_handler(error): if isinstance(error, HTTPException): description = error.get_description(request.environ) code = error.code name = error.name else: description = error code = 500 name = "Internal Server Error" return render_template(error_template, error=error, code=code, name=Markup(name), description=Markup(description)), code for exception in default_exceptions: app.register_error_handler(exception, error_handler)
Add a handler for each of the available HTTP error responses.
def item_wegobject_adapter(obj, request): """ Adapter for rendering a list of :class:`crabpy.gateway.Wegobject` to json. """ return { 'id': obj.id, 'aard': { 'id': obj.aard.id, 'naam': obj.aard.naam, 'definitie': obj.aard.definitie }, 'centroid': obj.centroid, 'bounding_box': obj.bounding_box, 'metadata': { 'begin_tijd': obj.metadata.begin_tijd, 'begin_datum': obj.metadata.begin_datum, 'begin_bewerking': { 'id': obj.metadata.begin_bewerking.id, 'naam': obj.metadata.begin_bewerking.naam, 'definitie': obj.metadata.begin_bewerking.definitie }, 'begin_organisatie': { 'id': obj.metadata.begin_organisatie.id, 'naam': obj.metadata.begin_organisatie.naam, 'definitie': obj.metadata.begin_organisatie.definitie } } }
Adapter for rendering a list of :class:`crabpy.gateway.Wegobject` to json.
def default(session): """Default unit test session. This is intended to be run **without** an interpreter set, so that the current ``python`` (on the ``PATH``) or the version of Python corresponding to the ``nox`` binary the ``PATH`` can run the tests. """ # Install all test dependencies, then install local packages in-place. session.install("mock", "pytest", "pytest-cov") for local_dep in LOCAL_DEPS: session.install("-e", local_dep) # Pyarrow does not support Python 3.7 dev_install = ".[all]" session.install("-e", dev_install) # IPython does not support Python 2 after version 5.x if session.python == "2.7": session.install("ipython==5.5") else: session.install("ipython") # Run py.test against the unit tests. session.run( "py.test", "--quiet", "--cov=google.cloud.bigquery", "--cov=tests.unit", "--cov-append", "--cov-config=.coveragerc", "--cov-report=", "--cov-fail-under=97", os.path.join("tests", "unit"), *session.posargs )
Default unit test session. This is intended to be run **without** an interpreter set, so that the current ``python`` (on the ``PATH``) or the version of Python corresponding to the ``nox`` binary the ``PATH`` can run the tests.
def get_labels(self, depth=None): """ Returns a list with a copy of the labels in this cell. Parameters ---------- depth : integer or ``None`` If not ``None``, defines from how many reference levels to retrieve labels from. Returns ------- out : list of ``Label`` List containing the labels in this cell and its references. """ labels = libCopy.deepcopy(self.labels) if depth is None or depth > 0: for element in self.elements: if isinstance(element, CellReference): labels.extend( element.get_labels(None if depth is None else depth - 1)) elif isinstance(element, CellArray): labels.extend( element.get_labels(None if depth is None else depth - 1)) return labels
Returns a list with a copy of the labels in this cell. Parameters ---------- depth : integer or ``None`` If not ``None``, defines from how many reference levels to retrieve labels from. Returns ------- out : list of ``Label`` List containing the labels in this cell and its references.
def from_epsg(self, epsg_code): """ Loads self.prj by epsg_code. If prjtext not found returns False. """ self.epsg_code = epsg_code assert isinstance(self.epsg_code, int) cur = self.conn.cursor() cur.execute("SELECT prjtext FROM prj_epsg WHERE epsg_code = ?", (self.epsg_code,)) result = cur.fetchone() if result is not None: self.prj = result[0] return True return False
Loads self.prj by epsg_code. If prjtext not found returns False.
async def stop_bridges(self): """Stop all sleep tasks to allow bridges to end.""" for task in self.sleep_tasks: task.cancel() for bridge in self.bridges: bridge.stop()
Stop all sleep tasks to allow bridges to end.
def get_strategy_types(): """Get a list of all :class:`Strategy` subclasses.""" def get_subtypes(type_): subtypes = type_.__subclasses__() for subtype in subtypes: subtypes.extend(get_subtypes(subtype)) return subtypes return get_subtypes(Strategy)
Get a list of all :class:`Strategy` subclasses.
def personas(text, cloud=None, batch=False, api_key=None, version=None, **kwargs): """ Given input text, returns the authors likelihood of being 16 different personality types in a dict. Example usage: .. code-block:: python >>> text = "I love going out with my friends" >>> entities = indicoio.personas(text) {'architect': 0.2191890478134155, 'logician': 0.0158474326133728, 'commander': 0.07654544115066528 ...} :param text: The text to be analyzed. :type text: str or unicode :rtype: The authors 'Extraversion', 'Conscientiousness', 'Openness', and 'Agreeableness' score (a float between 0 and 1) in a dictionary. """ url_params = {"batch": batch, "api_key": api_key, "version": version} kwargs['persona'] = True return api_handler(text, cloud=cloud, api="personality", url_params=url_params, **kwargs)
Given input text, returns the authors likelihood of being 16 different personality types in a dict. Example usage: .. code-block:: python >>> text = "I love going out with my friends" >>> entities = indicoio.personas(text) {'architect': 0.2191890478134155, 'logician': 0.0158474326133728, 'commander': 0.07654544115066528 ...} :param text: The text to be analyzed. :type text: str or unicode :rtype: The authors 'Extraversion', 'Conscientiousness', 'Openness', and 'Agreeableness' score (a float between 0 and 1) in a dictionary.
def args_str(self): """ Return an args string for the repr. """ matched = [str(m) for m in self._matchers[:self._position]] unmatched = [str(m) for m in self._matchers[self._position:]] return 'matched=[{}], unmatched=[{}]'.format( ', '.join(matched), ', '.join(unmatched))
Return an args string for the repr.
def build(self, targets: Iterable[str]) -> Iterable[str]: """ Shell out to buck to build the targets, then yield the paths to the link trees. """ return generate_source_directories( targets, build=self._build, prompt=self._prompt )
Shell out to buck to build the targets, then yield the paths to the link trees.
def createList(self,args): """ This is an internal method to create the list of input files (or directories) contained in the provided directory or directories. """ resultList = [] if len(args.path) == 1 and os.path.isdir(args.path[0]): resultList = [os.path.join(args.path[0], f) for f in os.listdir(args.path[0])] else: # If there are multiple items, wildcard expansion has already created the list of files resultList = args.path return list(set(resultList))
This is an internal method to create the list of input files (or directories) contained in the provided directory or directories.
def set_rubric(self, assessment_id): """Sets the rubric expressed as another assessment. arg: assessment_id (osid.id.Id): the assessment ``Id`` raise: InvalidArgument - ``assessment_id`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``assessment_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.set_avatar_template if self.get_rubric_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_id(assessment_id): raise errors.InvalidArgument() self._my_map['rubricId'] = str(assessment_id)
Sets the rubric expressed as another assessment. arg: assessment_id (osid.id.Id): the assessment ``Id`` raise: InvalidArgument - ``assessment_id`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``assessment_id`` is ``null`` *compliance: mandatory -- This method must be implemented.*
def get_undecorated_callback(self): """ Return the callback. If the callback is a decorated function, try to recover the original function. """ func = self.callback func = getattr(func, '__func__' if py3k else 'im_func', func) closure_attr = '__closure__' if py3k else 'func_closure' while hasattr(func, closure_attr) and getattr(func, closure_attr): attributes = getattr(func, closure_attr) func = attributes[0].cell_contents # in case of decorators with multiple arguments if not isinstance(func, FunctionType): # pick first FunctionType instance from multiple arguments func = filter(lambda x: isinstance(x, FunctionType), map(lambda x: x.cell_contents, attributes)) func = list(func)[0] # py3 support return func
Return the callback. If the callback is a decorated function, try to recover the original function.
def xml(self, attribs = None,elements = None, skipchildren = False): """See :meth:`AbstractElement.xml`""" if not attribs: attribs = {} E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"}) e = super(AbstractSpanAnnotation,self).xml(attribs, elements, True) for child in self: if isinstance(child, (Word, Morpheme, Phoneme)): #Include REFERENCES to word items instead of word items themselves attribs['{' + NSFOLIA + '}id'] = child.id if child.PRINTABLE and child.hastext(self.textclass): attribs['{' + NSFOLIA + '}t'] = child.text(self.textclass) e.append( E.wref(**attribs) ) elif not (isinstance(child, Feature) and child.SUBSET): #Don't add pre-defined features, they are already added as attributes e.append( child.xml() ) return e
See :meth:`AbstractElement.xml`
def _step(self, theme, direction): """ Traverse the list in the given direction and return the next theme """ if not self.themes: self.reload() # Try to find the starting index key = (theme.source, theme.name) for i, val in enumerate(self.themes): if (val.source, val.name) == key: index = i break else: # If the theme was set from a custom source it might # not be a part of the list returned by list_themes(). self.themes.insert(0, theme) index = 0 index = (index + direction) % len(self.themes) new_theme = self.themes[index] return new_theme
Traverse the list in the given direction and return the next theme
def post(self, endpoint, json=None, params=None, **kwargs): """POST to DHIS2 :param endpoint: DHIS2 API endpoint :param json: HTTP payload :param params: HTTP parameters :return: requests.Response object """ json = kwargs['data'] if 'data' in kwargs else json return self._make_request('post', endpoint, data=json, params=params)
POST to DHIS2 :param endpoint: DHIS2 API endpoint :param json: HTTP payload :param params: HTTP parameters :return: requests.Response object
def tz(self): """Time zone information.""" if self.timezone is None: return None try: tz = pytz.timezone(self.timezone) return tz except pytz.UnknownTimeZoneError: raise AstralError("Unknown timezone '%s'" % self.timezone)
Time zone information.
def compileGLShader(self, pchShaderName, pchVertexShader, pchFragmentShader): """ Purpose: Compiles a GL shader program and returns the handle. Returns 0 if the shader couldn't be compiled for some reason. """ unProgramID = glCreateProgram() nSceneVertexShader = glCreateShader(GL_VERTEX_SHADER) glShaderSource( nSceneVertexShader, pchVertexShader) glCompileShader( nSceneVertexShader ) vShaderCompiled = glGetShaderiv( nSceneVertexShader, GL_COMPILE_STATUS) if not vShaderCompiled: dprintf("%s - Unable to compile vertex shader %d!\n" % (pchShaderName, nSceneVertexShader) ) glDeleteProgram( unProgramID ) glDeleteShader( nSceneVertexShader ) return 0 glAttachShader( unProgramID, nSceneVertexShader) glDeleteShader( nSceneVertexShader ) # the program hangs onto this once it's attached nSceneFragmentShader = glCreateShader(GL_FRAGMENT_SHADER) glShaderSource( nSceneFragmentShader, pchFragmentShader) glCompileShader( nSceneFragmentShader ) fShaderCompiled = glGetShaderiv( nSceneFragmentShader, GL_COMPILE_STATUS) if not fShaderCompiled: dprintf("%s - Unable to compile fragment shader %d!\n" % ( pchShaderName, nSceneFragmentShader) ) glDeleteProgram( unProgramID ) glDeleteShader( nSceneFragmentShader ) return 0 glAttachShader( unProgramID, nSceneFragmentShader ) glDeleteShader( nSceneFragmentShader ) # the program hangs onto this once it's attached glLinkProgram( unProgramID ) programSuccess = glGetProgramiv( unProgramID, GL_LINK_STATUS) if not programSuccess: dprintf("%s - Error linking program %d!\n" % (pchShaderName, unProgramID) ) glDeleteProgram( unProgramID ) return 0 glUseProgram( unProgramID ) glUseProgram( 0 ) return unProgramID
Purpose: Compiles a GL shader program and returns the handle. Returns 0 if the shader couldn't be compiled for some reason.
def change (properties, feature, value = None): """ Returns a modified version of properties with all values of the given feature replaced by the given value. If 'value' is None the feature will be removed. """ assert is_iterable_typed(properties, basestring) assert isinstance(feature, basestring) assert isinstance(value, (basestring, type(None))) result = [] feature = add_grist (feature) for p in properties: if get_grist (p) == feature: if value: result.append (replace_grist (value, feature)) else: result.append (p) return result
Returns a modified version of properties with all values of the given feature replaced by the given value. If 'value' is None the feature will be removed.
async def on_raw_433(self, message): """ Nickname in use. """ if not self.registered: self._registration_attempts += 1 # Attempt to set new nickname. if self._attempt_nicknames: await self.set_nickname(self._attempt_nicknames.pop(0)) else: await self.set_nickname( self._nicknames[0] + '_' * (self._registration_attempts - len(self._nicknames)))
Nickname in use.
def _get_table_as_string(self): """Get table as SOFT formated string.""" tablelist = [] tablelist.append("!%s_table_begin" % self.geotype.lower()) tablelist.append("\t".join(self.table.columns)) for idx, row in self.table.iterrows(): tablelist.append("\t".join(map(str, row))) tablelist.append("!%s_table_end" % self.geotype.lower()) return "\n".join(tablelist)
Get table as SOFT formated string.
def visit(self, node): """Visit a node. This method is largely modelled after the ast.NodeTransformer class. Args: node: The node to visit. Returns: A tuple of the primal and adjoint, each of which is a node or a list of nodes. """ method = 'visit_' + node.__class__.__name__ if not hasattr(self, method): raise ValueError('Unknown node type: %s' % node.__class__.__name__) visitor = getattr(self, method) # If this node is a statement, inform all child nodes what the active # variables in this statement are if anno.hasanno(node, 'active_in'): self.active_variables = anno.getanno(node, 'active_in') pri, adj = visitor(node) # Annotate primal and adjoint statements if isinstance(pri, gast.AST): anno.setdefaultanno(pri, 'adj', adj) else: for node in pri: anno.setdefaultanno(node, 'adj', adj) if isinstance(adj, gast.AST): anno.setdefaultanno(adj, 'pri', pri) else: for node in adj: anno.setdefaultanno(node, 'pri', pri) return pri, adj
Visit a node. This method is largely modelled after the ast.NodeTransformer class. Args: node: The node to visit. Returns: A tuple of the primal and adjoint, each of which is a node or a list of nodes.
def patch(self, operation, path, value, custom_headers=None, timeout=-1): """Uses the PATCH to update a resource. Only one operation can be performed in each PATCH call. Args operation: Patch operation path: Path value: Value timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. custom_headers: Allows to add custom http headers. Returns: Updated resource. """ patch_request_body = [{'op': operation, 'path': path, 'value': value}] resource_uri = self.data['uri'] self.data = self.patch_request(resource_uri, body=patch_request_body, custom_headers=custom_headers, timeout=timeout) return self
Uses the PATCH to update a resource. Only one operation can be performed in each PATCH call. Args operation: Patch operation path: Path value: Value timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. custom_headers: Allows to add custom http headers. Returns: Updated resource.
def cmd(send, msg, args): """Corrects a previous message. Syntax: {command}/<msg>/<replacement>/<ig|nick> """ if not msg: send("Invalid Syntax.") return char = msg[0] msg = [x.replace(r'\/', '/') for x in re.split(r'(?<!\\)\%s' % char, msg[1:], maxsplit=2)] # fix for people who forget a trailing slash if len(msg) == 2 and args['config']['feature'].getboolean('lazyregex'): msg.append('') # not a valid sed statement. if not msg or len(msg) < 3: send("Invalid Syntax.") return if args['type'] == 'privmsg': send("Don't worry, %s is not a grammar Nazi." % args['botnick']) return string = msg[0] replacement = msg[1] modifiers = get_modifiers(msg[2], args['nick'], args['config']['core']['nickregex']) if modifiers is None: send("Invalid modifiers.") return try: regex = re.compile(string, re.IGNORECASE) if modifiers['ignorecase'] else re.compile(string) log = get_log(args['db'], args['target'], modifiers['nick']) workers = args['handler'].workers result = workers.run_pool(do_replace, [log, args['config']['core'], char, regex, replacement]) try: msg = result.get(5) except multiprocessing.TimeoutError: workers.restart_pool() send("Sed regex timed out.") return if msg: send(msg) else: send("No match found.") except sre_constants.error as ex: raise CommandFailedException(ex)
Corrects a previous message. Syntax: {command}/<msg>/<replacement>/<ig|nick>
def show_linkinfo_output_show_link_info_linkinfo_isl_linkinfo_isllink_destdomain(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_linkinfo = ET.Element("show_linkinfo") config = show_linkinfo output = ET.SubElement(show_linkinfo, "output") show_link_info = ET.SubElement(output, "show-link-info") linkinfo_rbridgeid_key = ET.SubElement(show_link_info, "linkinfo-rbridgeid") linkinfo_rbridgeid_key.text = kwargs.pop('linkinfo_rbridgeid') linkinfo_isl = ET.SubElement(show_link_info, "linkinfo-isl") linkinfo_isl_linknumber_key = ET.SubElement(linkinfo_isl, "linkinfo-isl-linknumber") linkinfo_isl_linknumber_key.text = kwargs.pop('linkinfo_isl_linknumber') linkinfo_isllink_destdomain = ET.SubElement(linkinfo_isl, "linkinfo-isllink-destdomain") linkinfo_isllink_destdomain.text = kwargs.pop('linkinfo_isllink_destdomain') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def popvalue(self, k, d=None): """ D.popvalue(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised """ if k not in self._col_dict: return d value = self._col_dict.pop(k) self._col_list.remove(value) return value
D.popvalue(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised
def lastNode(class_, hot_map): ''' Return the very last node (recursively) in the hot map. ''' children = hot_map[-1][2] if children: return class_.lastNode(children) else: return hot_map[-1][1]
Return the very last node (recursively) in the hot map.
def make_interface_child(device_name, interface_name, parent_name): ''' .. versionadded:: 2019.2.0 Set an interface as part of a LAG. device_name The name of the device, e.g., ``edge_router``. interface_name The name of the interface to be attached to LAG, e.g., ``xe-1/0/2``. parent_name The name of the LAG interface, e.g., ``ae13``. CLI Example: .. code-block:: bash salt myminion netbox.make_interface_child xe-1/0/2 ae13 ''' nb_device = get_('dcim', 'devices', name=device_name) nb_parent = get_('dcim', 'interfaces', device_id=nb_device['id'], name=parent_name) if nb_device and nb_parent: return update_interface(device_name, interface_name, lag=nb_parent['id']) else: return False
.. versionadded:: 2019.2.0 Set an interface as part of a LAG. device_name The name of the device, e.g., ``edge_router``. interface_name The name of the interface to be attached to LAG, e.g., ``xe-1/0/2``. parent_name The name of the LAG interface, e.g., ``ae13``. CLI Example: .. code-block:: bash salt myminion netbox.make_interface_child xe-1/0/2 ae13
def tea_decipher(v, key): """ Tiny Decryption Algorithm decription (TEA) See https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm """ DELTA = 0x9e3779b9 n = len(v) rounds = 6 + 52//n sum = (rounds*DELTA) y = v[0] while sum != 0: e = (sum >> 2) & 3 for p in range(n-1, -1, -1): z = v[(n + p - 1) % n] v[p] = (v[p] - MX(z, y, sum, key, p, e)) & 0xffffffff y = v[p] sum -= DELTA return v
Tiny Decryption Algorithm decription (TEA) See https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm
def labels(): """ Path to labels file """ datapath = path.join(path.dirname(path.realpath(__file__)), path.pardir) datapath = path.join(datapath, '../gzoo_data', 'train_solution.csv') return path.normpath(datapath)
Path to labels file