positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def ingest(self, co, classname=None, code_objects={}, show_asm=None): """ Pick out tokens from an uncompyle6 code object, and transform them, returning a list of uncompyle6 Token's. The transformations are made to assist the deparsing grammar. """ tokens, customize = scan.Scanner21.ingest(self, co, classname, code_objects, show_asm) for t in tokens: if t.op == self.opc.UNPACK_LIST: t.kind = 'UNPACK_LIST_%d' % t.attr pass return tokens, customize
Pick out tokens from an uncompyle6 code object, and transform them, returning a list of uncompyle6 Token's. The transformations are made to assist the deparsing grammar.
def autoscale_eydata(self): """ Rescales the error so the next fit will give reduced chi squareds of 1. Each data set will be scaled independently, and you may wish to run this a few times until it converges. """ if not self.results: self._error("You must complete a fit first.") return r = self.reduced_chi_squareds() # loop over the eydata and rescale for n in range(len(r)): self["scale_eydata"][n] *= _n.sqrt(r[n]) # the fit is no longer valid self.clear_results() # replot if self['autoplot']: self.plot() return self
Rescales the error so the next fit will give reduced chi squareds of 1. Each data set will be scaled independently, and you may wish to run this a few times until it converges.
def engagement_context(self): """ Access the engagement_context :returns: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList :rtype: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList """ if self._engagement_context is None: self._engagement_context = EngagementContextList( self._version, flow_sid=self._solution['flow_sid'], engagement_sid=self._solution['sid'], ) return self._engagement_context
Access the engagement_context :returns: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList :rtype: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList
def _set_neighbor_route_map_name_direction_out(self, v, load=False): """ Setter method for neighbor_route_map_name_direction_out, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/neighbor/af_ipv6_vrf_neighbor_address_holder/af_ipv6_neighbor_addr/neighbor_route_map/neighbor_route_map_direction_out/neighbor_route_map_name_direction_out (common-def:name-string64) If this variable is read-only (config: false) in the source YANG file, then _set_neighbor_route_map_name_direction_out is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_neighbor_route_map_name_direction_out() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,63})'}), is_leaf=True, yang_name="neighbor-route-map-name-direction-out", rest_name="neighbor-route-map-name-direction-out", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply route map to neighbor', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='common-def:name-string64', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """neighbor_route_map_name_direction_out must be of a type compatible with common-def:name-string64""", 'defined-type': "common-def:name-string64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,63})'}), is_leaf=True, yang_name="neighbor-route-map-name-direction-out", rest_name="neighbor-route-map-name-direction-out", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply route map to neighbor', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='common-def:name-string64', is_config=True)""", }) self.__neighbor_route_map_name_direction_out = t if hasattr(self, '_set'): self._set()
Setter method for neighbor_route_map_name_direction_out, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/af_ipv6_vrf/neighbor/af_ipv6_vrf_neighbor_address_holder/af_ipv6_neighbor_addr/neighbor_route_map/neighbor_route_map_direction_out/neighbor_route_map_name_direction_out (common-def:name-string64) If this variable is read-only (config: false) in the source YANG file, then _set_neighbor_route_map_name_direction_out is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_neighbor_route_map_name_direction_out() directly.
def build(self, _resource, _cache=True, updatecontent=True, **kwargs): """Build a schema class from input _resource. :param _resource: object from where get the right schema. :param bool _cache: use _cache system. :param bool updatecontent: if True (default) update result. :rtype: Schema. """ result = None if _cache and _resource in self._schemasbyresource: result = self._schemasbyresource[_resource] else: for builder in self._builders.values(): try: result = builder.build(_resource=_resource, **kwargs) except Exception: pass else: break if result is None: raise ValueError('No builder found for {0}'.format(_resource)) if _cache: self._schemasbyresource[_resource] = result if updatecontent: from ..utils import updatecontent updatecontent(result, updateparents=False) return result
Build a schema class from input _resource. :param _resource: object from where get the right schema. :param bool _cache: use _cache system. :param bool updatecontent: if True (default) update result. :rtype: Schema.
def get_trail_ids(cls, event, mode): """extract resources ids from a cloud trail event.""" resource_ids = () event_name = event['detail']['eventName'] event_source = event['detail']['eventSource'] for e in mode.get('events', []): if not isinstance(e, dict): # Check if we have a short cut / alias info = CloudWatchEvents.match(event) if info: return info['ids'].search(event) continue if event_name != e.get('event'): continue if event_source != e.get('source'): continue id_query = e.get('ids') if not id_query: raise ValueError("No id query configured") evt = event # be forgiving for users specifying with details or without if not id_query.startswith('detail.'): evt = event.get('detail', {}) resource_ids = jmespath.search(id_query, evt) if resource_ids: break return resource_ids
extract resources ids from a cloud trail event.
def copy(tree, source_filename): """ Copy file in tree, show a progress bar during operations, and return the sha1 sum of copied file. """ #_, ext = os.path.splitext(source_filename) filehash = sha1() with printer.progress(os.path.getsize(source_filename)) as update: with open(source_filename, 'rb') as fsource: with NamedTemporaryFile(dir=os.path.join(tree, '.kolekto', 'movies'), delete=False) as fdestination: # Copy the source into the temporary destination: while True: buf = fsource.read(10 * 1024) if not buf: break filehash.update(buf) fdestination.write(buf) update(len(buf)) # Rename the file to its final name or raise an error if # the file already exists: dest = os.path.join(tree, '.kolekto', 'movies', filehash.hexdigest()) if os.path.exists(dest): raise IOError('This file already exists in tree (%s)' % filehash.hexdigest()) else: os.rename(fdestination.name, dest) return filehash.hexdigest()
Copy file in tree, show a progress bar during operations, and return the sha1 sum of copied file.
def select_station( candidates, coverage_range=None, min_fraction_coverage=0.9, distance_warnings=(50000, 200000), rank=1, ): """ Select a station from a list of candidates that meets given data quality criteria. Parameters ---------- candidates : :any:`pandas.DataFrame` A dataframe of the form given by :any:`eeweather.rank_stations` or :any:`eeweather.combine_ranked_stations`, specifically having at least an index with ``usaf_id`` values and the column ``distance_meters``. Returns ------- isd_station, warnings : tuple of (:any:`eeweather.ISDStation`, list of str) A qualified weather station. ``None`` if no station meets criteria. """ def _test_station(station): if coverage_range is None: return True, [] else: start_date, end_date = coverage_range try: tempC, warnings = eeweather.mockable.load_isd_hourly_temp_data( station, start_date, end_date ) except ISDDataNotAvailableError: return False, [] # reject # TODO(philngo): also need to incorporate within-day limits if len(tempC) > 0: fraction_coverage = tempC.notnull().sum() / float(len(tempC)) return (fraction_coverage > min_fraction_coverage), warnings else: return False, [] # reject def _station_warnings(station, distance_meters): return [ EEWeatherWarning( qualified_name="eeweather.exceeds_maximum_distance", description=( "Distance from target to weather station is greater" "than the specified km." ), data={ "distance_meters": distance_meters, "max_distance_meters": d, "rank": rank, }, ) for d in distance_warnings if distance_meters > d ] n_stations_passed = 0 for usaf_id, row in candidates.iterrows(): station = ISDStation(usaf_id) test_result, warnings = _test_station(station) if test_result: n_stations_passed += 1 if n_stations_passed == rank: if not warnings: warnings = [] warnings.extend(_station_warnings(station, row.distance_meters)) return station, warnings no_station_warning = EEWeatherWarning( qualified_name="eeweather.no_weather_station_selected", description=( "No weather station found with the specified rank and" " minimum fracitional coverage." ), data={"rank": rank, "min_fraction_coverage": min_fraction_coverage}, ) return None, [no_station_warning]
Select a station from a list of candidates that meets given data quality criteria. Parameters ---------- candidates : :any:`pandas.DataFrame` A dataframe of the form given by :any:`eeweather.rank_stations` or :any:`eeweather.combine_ranked_stations`, specifically having at least an index with ``usaf_id`` values and the column ``distance_meters``. Returns ------- isd_station, warnings : tuple of (:any:`eeweather.ISDStation`, list of str) A qualified weather station. ``None`` if no station meets criteria.
def get_conda_path(): """ Return anaconda or miniconda directory :return: anaconda directory """ dstdir = '' # try: import subprocess import re # cond info --root work only for root environment # p = subprocess.Popen(['conda', 'info', '--root'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) p = subprocess.Popen(['conda', 'info', '-e'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() dstdir = out.strip() dstdir = re.search("\*(.*)\n", dstdir).group(1).strip() # except: # import traceback # traceback.print_exc() # import os.path as op # conda_pth = op.expanduser('~/anaconda/bin') # if not op.exists(conda_pth): # conda_pth = op.expanduser('~/miniconda/bin') # return conda_pth return dstdir
Return anaconda or miniconda directory :return: anaconda directory
def populate_from_path(bucket, source, checksum=True, key_prefix='', chunk_size=None): """Populate a ``bucket`` from all files in path. :param bucket: The bucket (instance or id) to create the object in. :param source: The file or directory path. :param checksum: If ``True`` then a MD5 checksum will be computed for each file. (Default: ``True``) :param key_prefix: The key prefix for the bucket. :param chunk_size: Chunk size to read from file. :returns: A iterator for all :class:`invenio_files_rest.models.ObjectVersion` instances. """ from .models import FileInstance, ObjectVersion def create_file(key, path): """Create new ``ObjectVersion`` from path or existing ``FileInstance``. It checks MD5 checksum and size of existing ``FileInstance``s. """ key = key_prefix + key if checksum: file_checksum = compute_md5_checksum( open(path, 'rb'), chunk_size=chunk_size) file_instance = FileInstance.query.filter_by( checksum=file_checksum, size=os.path.getsize(path) ).first() if file_instance: return ObjectVersion.create( bucket, key, _file_id=file_instance.id ) return ObjectVersion.create(bucket, key, stream=open(path, 'rb')) if os.path.isfile(source): yield create_file(os.path.basename(source), source) else: for root, dirs, files in os.walk(source, topdown=False): for name in files: filename = os.path.join(root, name) assert filename.startswith(source) parts = [p for p in filename[len(source):].split(os.sep) if p] yield create_file('/'.join(parts), os.path.join(root, name))
Populate a ``bucket`` from all files in path. :param bucket: The bucket (instance or id) to create the object in. :param source: The file or directory path. :param checksum: If ``True`` then a MD5 checksum will be computed for each file. (Default: ``True``) :param key_prefix: The key prefix for the bucket. :param chunk_size: Chunk size to read from file. :returns: A iterator for all :class:`invenio_files_rest.models.ObjectVersion` instances.
def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', bold_rows=True, classes=None, escape=True, notebook=False, border=None, table_id=None, render_links=False): """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.html.border``. .. versionadded:: 0.19.0 table_id : str, optional A css id is included in the opening `<table>` tag if specified. .. versionadded:: 0.23.0 render_links : bool, default False Convert URLs to HTML links. .. versionadded:: 0.24.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if (justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS): raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, bold_rows=bold_rows, escape=escape, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, table_id=table_id, render_links=render_links) # TODO: a generic formatter wld b in DataFrameFormatter formatter.to_html(classes=classes, notebook=notebook, border=border) if buf is None: return formatter.buf.getvalue()
Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.html.border``. .. versionadded:: 0.19.0 table_id : str, optional A css id is included in the opening `<table>` tag if specified. .. versionadded:: 0.23.0 render_links : bool, default False Convert URLs to HTML links. .. versionadded:: 0.24.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string.
def create_blazar_client(config, session): """Check the reservation, creates a new one if nescessary.""" return blazar_client.Client(session=session, service_type="reservation", region_name=os.environ["OS_REGION_NAME"])
Check the reservation, creates a new one if nescessary.
def shapes_match(a, b): """Recursively check if shapes of object `a` and `b` match. Will walk lists, tuples and dicts. Args: a: object of type (numpy.ndarray,tf.Tensor,list,tuple,dict) to check for matching shapes against `b`. b: object to check for matching shape against `a`. Returns: A boolean indicating whether the shapes of `a` and `b` match. """ if isinstance(a, (tuple, list)) and isinstance(b, (tuple, list)): if len(a) != len(b): return False return all([shapes_match(ia, ib) for ia, ib in zip(a, b)]) elif isinstance(a, dict) and isinstance(b, dict): if len(a) != len(b): return False match = True for (ak, av), (bk, bv) in zip(a.items(), b.items()): match = match and all([ak == bk and shapes_match(av, bv)]) return match else: shape_checker = shape_checkers[(type(a), type(b))] return shape_checker(a, b)
Recursively check if shapes of object `a` and `b` match. Will walk lists, tuples and dicts. Args: a: object of type (numpy.ndarray,tf.Tensor,list,tuple,dict) to check for matching shapes against `b`. b: object to check for matching shape against `a`. Returns: A boolean indicating whether the shapes of `a` and `b` match.
def set_existing_extra_keywords(self): """Set extra keywords from the value from metadata.""" extra_keywords = self.parent.get_existing_keyword('extra_keywords') for key, widgets in list(self.widgets_dict.items()): value = extra_keywords.get(key) if value is None: widgets[0].setChecked(False) else: widgets[0].setChecked(True) if isinstance(widgets[1], QLineEdit): widgets[1].setText(value) elif isinstance(widgets[1], QComboBox): value_index = widgets[1].findData(value) widgets[1].setCurrentIndex(value_index) elif isinstance(widgets[1], QDoubleSpinBox): try: value = float(value) widgets[1].setValue(value) except ValueError: LOGGER.warning('Failed to convert %s to float' % value) elif isinstance(widgets[1], QDateTimeEdit): try: value_datetime = datetime.strptime( value, "%Y-%m-%dT%H:%M:%S.%f") widgets[1].setDateTime(value_datetime) except ValueError: try: value_datetime = datetime.strptime( value, "%Y-%m-%dT%H:%M:%S") widgets[1].setDateTime(value_datetime) except ValueError: LOGGER.info( 'Failed to convert %s to datetime' % value)
Set extra keywords from the value from metadata.
def item_selection_changed(self): """Item selection has changed""" is_selection = len(self.selectedItems()) > 0 self.expand_selection_action.setEnabled(is_selection) self.collapse_selection_action.setEnabled(is_selection)
Item selection has changed
def zoom_out_pixel(self, curr_pixel): """ return the curr_frag at a lower resolution""" low_frag = curr_pixel[0] high_frag = curr_pixel[1] level = curr_pixel[2] str_level = str(level) if level < self.n_level - 1: low_super = self.spec_level[str_level]["fragments_dict"][low_frag][ "super_index" ] high_super = self.spec_level[str_level]["fragments_dict"][ high_frag ]["sub_index"] new_pix_low = min([low_super, high_super]) new_pix_high = max([low_super, high_super]) new_level = level + 1 new_pixel = [new_pix_low, new_pix_high, new_level] else: new_pixel = curr_pixel return new_pixel
return the curr_frag at a lower resolution
def pylxd_save_object(obj): ''' Saves an object (profile/image/container) and translate its execpetion on failure obj : The object to save This is an internal method, no CLI Example. ''' try: obj.save() except pylxd.exceptions.LXDAPIException as e: raise CommandExecutionError(six.text_type(e)) return True
Saves an object (profile/image/container) and translate its execpetion on failure obj : The object to save This is an internal method, no CLI Example.
def check_dtype(array, allowed): """Raises TypeError if the array is not of an allowed dtype. :param array: array whose dtype is to be checked :param allowed: instance or list of allowed dtypes """ if not hasattr(allowed, "__iter__"): allowed = [allowed,] if array.dtype not in allowed: raise(TypeError( "Invalid dtype {}. Allowed dtype(s): {}".format(array.dtype, allowed)))
Raises TypeError if the array is not of an allowed dtype. :param array: array whose dtype is to be checked :param allowed: instance or list of allowed dtypes
def list_config_variables(self, offset): """List defined config variables up to 9 at a time.""" names = sorted(self._config_variables) names = names[offset:offset + 9] count = len(names) if len(names) < 9: names += [0]*(9 - count) return [count] + names
List defined config variables up to 9 at a time.
def get_sub_rectangles(self, ims): """ get_sub_rectangles(ims) Calculate the minimal rectangles that need updating each frame. Returns a two-element tuple containing the cropped images and a list of x-y positions. Calculating the subrectangles takes extra time, obviously. However, if the image sizes were reduced, the actual writing of the GIF goes faster. In some cases applying this method produces a GIF faster. """ # Check image count if len(ims) < 2: return ims, [(0, 0) for i in ims] # We need numpy if np is None: raise RuntimeError("Need Numpy to calculate sub-rectangles. ") # Prepare ims2 = [ims[0]] xy = [(0, 0)] # Iterate over images prev = ims[0] for im in ims[1:]: # Get difference, sum over colors diff = np.abs(im - prev) if diff.ndim == 3: diff = diff.sum(2) # Get begin and end for both dimensions x = np.argwhere(diff.sum(0)) y = np.argwhere(diff.sum(1)) # Get rect coordinates if x.size and y.size: x0, x1 = x[0], x[-1] + 1 y0, y1 = y[0], y[-1] + 1 else: # No change ... make it minimal x0, x1 = 0, 2 y0, y1 = 0, 2 # Cut out and store im2 = im[y0:y1, x0:x1] prev = im ims2.append(im2) xy.append((x0, y0)) return ims2, xy
get_sub_rectangles(ims) Calculate the minimal rectangles that need updating each frame. Returns a two-element tuple containing the cropped images and a list of x-y positions. Calculating the subrectangles takes extra time, obviously. However, if the image sizes were reduced, the actual writing of the GIF goes faster. In some cases applying this method produces a GIF faster.
def write(self, text): """Write text. An additional attribute terminator with a value of None is added to the logging record to indicate that StreamHandler should not add a newline.""" self.logger.log(self.loglevel, text, extra={'terminator': None})
Write text. An additional attribute terminator with a value of None is added to the logging record to indicate that StreamHandler should not add a newline.
def _enumload(l: Loader, value, type_) -> Enum: """ This loads something into an Enum. It tries with basic types first. If that fails, it tries to look for type annotations inside the Enum, and tries to use those to load the value into something that is compatible with the Enum. Of course if that fails too, a ValueError is raised. """ try: # Try naïve conversion return type_(value) except: pass # Try with the typing hints for _, t in get_type_hints(type_).items(): try: return type_(l.load(value, t)) except: pass raise TypedloadValueError( 'Value could not be loaded into %s' % type_, value=value, type_=type_ )
This loads something into an Enum. It tries with basic types first. If that fails, it tries to look for type annotations inside the Enum, and tries to use those to load the value into something that is compatible with the Enum. Of course if that fails too, a ValueError is raised.
def p_OptionalOrRequiredArgument_optional(p): """OptionalOrRequiredArgument : optional Type IDENTIFIER Default""" p[0] = model.OperationArgument( type=p[2], name=p[3], optional=True, default=p[4])
OptionalOrRequiredArgument : optional Type IDENTIFIER Default
def backup(schema, uuid, export_filter, export_format, filename, pretty, export_all, omit): """Exports all collections to (JSON-) files.""" export_format = export_format.upper() if pretty: indent = 4 else: indent = 0 f = None if filename: try: f = open(filename, 'w') except (IOError, PermissionError) as e: backup_log('Could not open output file for writing:', exc=True, lvl=error) return def output(what, convert=False): """Output the backup in a specified format.""" if convert: if export_format == 'JSON': data = json.dumps(what, indent=indent) else: data = "" else: data = what if not filename: print(data) else: f.write(data) if schema is None: if export_all is False: backup_log('No schema given.', lvl=warn) return else: schemata = objectmodels.keys() else: schemata = [schema] all_items = {} for schema_item in schemata: model = objectmodels[schema_item] if uuid: obj = model.find({'uuid': uuid}) elif export_filter: obj = model.find(literal_eval(export_filter)) else: obj = model.find() items = [] for item in obj: fields = item.serializablefields() for field in omit: try: fields.pop(field) except KeyError: pass items.append(fields) all_items[schema_item] = items # if pretty is True: # output('\n// Objectmodel: ' + schema_item + '\n\n') # output(schema_item + ' = [\n') output(all_items, convert=True) if f is not None: f.flush() f.close()
Exports all collections to (JSON-) files.
def ifelse(arg, true_expr, false_expr): """ Shorthand for implementing ternary expressions bool_expr.ifelse(0, 1) e.g., in SQL: CASE WHEN bool_expr THEN 0 else 1 END """ # Result will be the result of promotion of true/false exprs. These # might be conflicting types; same type resolution as case expressions # must be used. case = ops.SearchedCaseBuilder() return case.when(arg, true_expr).else_(false_expr).end()
Shorthand for implementing ternary expressions bool_expr.ifelse(0, 1) e.g., in SQL: CASE WHEN bool_expr THEN 0 else 1 END
def _separate_exclude_cases(name, exclude_prefix): """ Splits the excluded Parameters ---------- name : str Name of the country/region to convert. exclude_prefix : list of valid regex strings List of indicators which negate the subsequent country/region. These prefixes and everything following will not be converted. E.g. 'Asia excluding China' becomes 'Asia' and 'China excluding Hong Kong' becomes 'China' prior to conversion Returns ------- dict with 'clean_name' : str as name without anything following exclude_prefix 'excluded_countries' : list list of excluded countries """ excluder = re.compile('|'.join(exclude_prefix)) split_entries = excluder.split(name) return {'clean_name': split_entries[0], 'excluded_countries': split_entries[1:]}
Splits the excluded Parameters ---------- name : str Name of the country/region to convert. exclude_prefix : list of valid regex strings List of indicators which negate the subsequent country/region. These prefixes and everything following will not be converted. E.g. 'Asia excluding China' becomes 'Asia' and 'China excluding Hong Kong' becomes 'China' prior to conversion Returns ------- dict with 'clean_name' : str as name without anything following exclude_prefix 'excluded_countries' : list list of excluded countries
def _format_feature(feature, weight, hl_spaces): # type: (...) -> str """ Format any feature. """ if isinstance(feature, FormattedFeatureName): return feature.format() elif (isinstance(feature, list) and all('name' in x and 'sign' in x for x in feature)): return _format_unhashed_feature(feature, weight, hl_spaces=hl_spaces) else: return _format_single_feature(feature, weight, hl_spaces=hl_spaces)
Format any feature.
def full_name(self): """Return full name of member""" if self.prefix is not None: return '.'.join([self.prefix, self.member]) return self.member
Return full name of member
def omerc2cf(area): """Return the cf grid mapping for the omerc projection.""" proj_dict = area.proj_dict args = dict(azimuth_of_central_line=proj_dict.get('alpha'), latitude_of_projection_origin=proj_dict.get('lat_0'), longitude_of_projection_origin=proj_dict.get('lonc'), grid_mapping_name='oblique_mercator', reference_ellipsoid_name=proj_dict.get('ellps', 'WGS84'), false_easting=0., false_northing=0. ) if "no_rot" in proj_dict: args['no_rotation'] = 1 if "gamma" in proj_dict: args['gamma'] = proj_dict['gamma'] return args
Return the cf grid mapping for the omerc projection.
def _R2deriv(self,R,z,phi=0.,t=0.): """ NAME: _R2deriv PURPOSE: evaluate the second radial derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the second radial derivative HISTORY: 2011-10-09 - Written - Bovy (IAS) """ Rz= R**2.+z**2. sqrtRz= numpy.sqrt(Rz) return (3.*R**4.+2.*R**2.*(z**2.+self.a*sqrtRz)\ -z**2.*(z**2.+self.a*sqrtRz)\ -(2.*R**2.-z**2.)*(self.a**2.+R**2.+z**2.+2.*self.a*sqrtRz)\ *numpy.log(1.+sqrtRz/self.a))\ /Rz**2.5/(self.a+sqrtRz)**2.
NAME: _R2deriv PURPOSE: evaluate the second radial derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the second radial derivative HISTORY: 2011-10-09 - Written - Bovy (IAS)
def statistical_axes(fit, **kw): """ Hyperbolic error using a statistical process (either sampling or noise errors) Integrates covariance with error level and degrees of freedom for plotting confidence intervals. Degrees of freedom is set to 2, which is the relevant number of independent dimensions to planar fitting of *a priori* centered data. """ method = kw.pop('method', 'noise') confidence_level = kw.pop('confidence_level', 0.95) dof = kw.pop('dof',2) nominal = fit.eigenvalues if method == 'sampling': cov = sampling_covariance(fit,**kw) elif method == 'noise': cov = noise_covariance(fit,**kw) if kw.pop('chisq', False): # Model the incorrect behaviour of using the # Chi2 distribution instead of the Fisher # distribution (which is a measure of the # ratio between the two). z = chi2.ppf(confidence_level,dof) else: z = fisher_statistic(fit.n,confidence_level,dof=dof) # Apply two fisher F parameters (one along each axis) # Since we apply to each axis without division, # it is as if we are applying N.sqrt(2*F) to the entire # distribution, aligning us with (Francq, 2014) err = z*N.sqrt(cov) return apply_error_scaling(nominal, err, n=fit.n, **kw)
Hyperbolic error using a statistical process (either sampling or noise errors) Integrates covariance with error level and degrees of freedom for plotting confidence intervals. Degrees of freedom is set to 2, which is the relevant number of independent dimensions to planar fitting of *a priori* centered data.
def get_url_map(): """ Loads custom/pypi/map.txt and builds a dict where map[package_name] = url :return: dict, urls """ map = {} path = os.path.join( os.path.dirname(os.path.realpath(__file__)), # current working dir ../ "custom", # ../custom/ "pypi", # ../custom/pypi/ "map.txt" # ../custom/pypi/map.txt ) with open(path) as f: for line in f.readlines(): package, url = line.strip().split(": ") map[package] = url return map
Loads custom/pypi/map.txt and builds a dict where map[package_name] = url :return: dict, urls
def get_d1str(self, goobj, reverse=False): """Get D1-string representing all parent terms which are depth-01 GO terms.""" return "".join(sorted(self.get_parents_letters(goobj), reverse=reverse))
Get D1-string representing all parent terms which are depth-01 GO terms.
def close(self, for_shutdown=False, **_kwargs): """ Only call super().close() if the server is shutting down (not between requests). :param for_shutdown: If `False` (the default) """ if for_shutdown: super(PySOAPyLibMCCache, self).close()
Only call super().close() if the server is shutting down (not between requests). :param for_shutdown: If `False` (the default)
def visit_spires_keyword_query(self, node): """Transform a :class:`SpiresKeywordQuery` into a :class:`KeywordOp`. Notes: In case the value being a :class:`SimpleValueBooleanQuery`, the subtree is transformed to chained :class:`AndOp` queries containing :class:`KeywordOp`, whose keyword is the keyword of the current node and values, all the :class:`SimpleValueBooleanQuery` values (either :class:`SimpleValues` or :class:`SimpleValueNegation`.) """ keyword = node.left.accept(self) value = node.right.accept(self) if isinstance(value, SimpleValueBooleanQuery): return _convert_simple_value_boolean_query_to_and_boolean_queries(value, keyword) return KeywordOp(keyword, value)
Transform a :class:`SpiresKeywordQuery` into a :class:`KeywordOp`. Notes: In case the value being a :class:`SimpleValueBooleanQuery`, the subtree is transformed to chained :class:`AndOp` queries containing :class:`KeywordOp`, whose keyword is the keyword of the current node and values, all the :class:`SimpleValueBooleanQuery` values (either :class:`SimpleValues` or :class:`SimpleValueNegation`.)
def session_scope(session_cls=None): """Provide a transactional scope around a series of operations.""" session = session_cls() if session_cls else Session() try: yield session session.commit() except Exception: session.rollback() raise finally: session.close()
Provide a transactional scope around a series of operations.
def isset(alias_name): """Return a boolean if the docker link is set or not and is a valid looking docker link value. Args: alias_name: The link alias name """ warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2) raw_value = read(alias_name, allow_none=True) if raw_value: if re.compile(r'.+://.+:\d+').match(raw_value): return True else: warnings.warn('"{0}_PORT={1}" does not look like a docker link.'.format(alias_name, raw_value), stacklevel=2) return False return False
Return a boolean if the docker link is set or not and is a valid looking docker link value. Args: alias_name: The link alias name
def get_cluster_graph(self, engine="fdp", graph_attr=None, node_attr=None, edge_attr=None): """ Generate directory graph in the DOT language. Directories are shown as clusters .. warning:: This function scans the entire directory tree starting from top so the resulting graph can be really big. Args: engine: Layout command used. ['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchwork', 'osage'] graph_attr: Mapping of (attribute, value) pairs for the graph. node_attr: Mapping of (attribute, value) pairs set for all nodes. edge_attr: Mapping of (attribute, value) pairs set for all edges. Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph> """ # https://www.graphviz.org/doc/info/ from graphviz import Digraph g = Digraph("directory", #filename="flow_%s.gv" % os.path.basename(self.relworkdir), engine=engine) # if engine == "automatic" else engine) # Set graph attributes. #g.attr(label="%s@%s" % (self.__class__.__name__, self.relworkdir)) g.attr(label=self.top) #g.attr(fontcolor="white", bgcolor='purple:pink') #g.attr(rankdir="LR", pagedir="BL") #g.attr(constraint="false", pack="true", packMode="clust") g.node_attr.update(color='lightblue2', style='filled') #g.node_attr.update(ranksep='equally') # Add input attributes. if graph_attr is not None: fg.graph_attr.update(**graph_attr) if node_attr is not None: fg.node_attr.update(**node_attr) if edge_attr is not None: fg.edge_attr.update(**edge_attr) def node_kwargs(path): return dict( #shape="circle", #shape="none", #shape="plaintext", #shape="point", shape="record", #color=node.color_hex, fontsize="8.0", label=os.path.basename(path), ) edge_kwargs = dict(arrowType="vee", style="solid", minlen="1") cluster_kwargs = dict(rankdir="LR", pagedir="BL", style="rounded", bgcolor="azure2") # TODO: Write other method without clusters if not walk. exclude_top_node = False for root, dirs, files in os.walk(self.top): if exclude_top_node and root == self.top: continue cluster_name = "cluster_%s" % root #print("root", root, cluster_name, "dirs", dirs, "files", files, sep="\n") with g.subgraph(name=cluster_name) as d: d.attr(**cluster_kwargs) d.attr(rank="source" if (files or dirs) else "sink") d.attr(label=os.path.basename(root)) for f in files: filepath = os.path.join(root, f) d.node(filepath, **node_kwargs(filepath)) if os.path.islink(filepath): # Follow the link and use the relpath wrt link as label. realp = os.path.realpath(filepath) realp = os.path.relpath(realp, filepath) #realp = os.path.relpath(realp, self.top) #print(filepath, realp) #g.node(realp, **node_kwargs(realp)) g.edge(filepath, realp, **edge_kwargs) for dirname in dirs: dirpath = os.path.join(root, dirname) #head, basename = os.path.split(dirpath) new_cluster_name = "cluster_%s" % dirpath #rank = "source" if os.listdir(dirpath) else "sink" #g.node(dirpath, rank=rank, **node_kwargs(dirpath)) #g.edge(dirpath, new_cluster_name, **edge_kwargs) #d.edge(cluster_name, new_cluster_name, minlen="2", **edge_kwargs) d.edge(cluster_name, new_cluster_name, **edge_kwargs) return g
Generate directory graph in the DOT language. Directories are shown as clusters .. warning:: This function scans the entire directory tree starting from top so the resulting graph can be really big. Args: engine: Layout command used. ['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchwork', 'osage'] graph_attr: Mapping of (attribute, value) pairs for the graph. node_attr: Mapping of (attribute, value) pairs set for all nodes. edge_attr: Mapping of (attribute, value) pairs set for all edges. Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph>
def visual(title, X, activation): '''create a grid of images and save it as a final image title : grid image name X : array of images ''' assert len(X.shape) == 4 X = X.transpose((0, 2, 3, 1)) if activation == 'sigmoid': X = np.clip((X)*(255.0), 0, 255).astype(np.uint8) elif activation == 'tanh': X = np.clip((X+1.0)*(255.0/2.0), 0, 255).astype(np.uint8) n = np.ceil(np.sqrt(X.shape[0])) buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8) for i, img in enumerate(X): fill_buf(buff, i, img, X.shape[1:3]) cv2.imwrite('%s.jpg' % (title), buff)
create a grid of images and save it as a final image title : grid image name X : array of images
def show_window_option(self, option, g=False): """ Return a list of options for the window. todo: test and return True/False for on/off string Parameters ---------- option : str g : bool, optional Pass ``-g`` flag, global. Default False. Returns ------- str, int Raises ------ :exc:`exc.OptionError`, :exc:`exc.UnknownOption`, :exc:`exc.InvalidOption`, :exc:`exc.AmbiguousOption` """ tmux_args = tuple() if g: tmux_args += ('-g',) tmux_args += (option,) cmd = self.cmd('show-window-options', *tmux_args) if len(cmd.stderr): handle_option_error(cmd.stderr[0]) if not len(cmd.stdout): return None option = [shlex.split(item) for item in cmd.stdout][0] if option[1].isdigit(): option = (option[0], int(option[1])) return option[1]
Return a list of options for the window. todo: test and return True/False for on/off string Parameters ---------- option : str g : bool, optional Pass ``-g`` flag, global. Default False. Returns ------- str, int Raises ------ :exc:`exc.OptionError`, :exc:`exc.UnknownOption`, :exc:`exc.InvalidOption`, :exc:`exc.AmbiguousOption`
def column_definition(table_name, col_name): """ Get the source of a column function. If a column is a registered Series and not a function then all that is returned is {'type': 'series'}. If the column is a registered function then the JSON returned has keys "type", "filename", "lineno", "text", and "html". "text" is the raw text of the function, "html" has been marked up by Pygments. """ col_type = orca.get_table(table_name).column_type(col_name) if col_type != 'function': return jsonify(type=col_type) filename, lineno, source = \ orca.get_raw_column(table_name, col_name).func_source_data() html = highlight(source, PythonLexer(), HtmlFormatter()) return jsonify( type='function', filename=filename, lineno=lineno, text=source, html=html)
Get the source of a column function. If a column is a registered Series and not a function then all that is returned is {'type': 'series'}. If the column is a registered function then the JSON returned has keys "type", "filename", "lineno", "text", and "html". "text" is the raw text of the function, "html" has been marked up by Pygments.
def make_insert(cls, table, insert_tuple): """ [Deprecated] Make INSERT query. :param str table: Table name of executing the query. :param list/tuple insert_tuple: Insertion data. :return: Query of SQLite. :rtype: str :raises ValueError: If ``insert_tuple`` is empty |list|/|tuple|. :raises simplesqlite.NameValidationError: |raises_validate_table_name| """ validate_table_name(table) table = Table(table) if typepy.is_empty_sequence(insert_tuple): raise ValueError("empty insert list/tuple") return "INSERT INTO {:s} VALUES ({:s})".format( table, ",".join(["?" for _i in insert_tuple]) )
[Deprecated] Make INSERT query. :param str table: Table name of executing the query. :param list/tuple insert_tuple: Insertion data. :return: Query of SQLite. :rtype: str :raises ValueError: If ``insert_tuple`` is empty |list|/|tuple|. :raises simplesqlite.NameValidationError: |raises_validate_table_name|
def from_string(string_data, file_format="xyz"): """ Uses OpenBabel to read a molecule from a string in all supported formats. Args: string_data: String containing molecule data. file_format: String specifying any OpenBabel supported formats. Returns: BabelMolAdaptor object """ mols = pb.readstring(str(file_format), str(string_data)) return BabelMolAdaptor(mols.OBMol)
Uses OpenBabel to read a molecule from a string in all supported formats. Args: string_data: String containing molecule data. file_format: String specifying any OpenBabel supported formats. Returns: BabelMolAdaptor object
def search(query, index=INDEX_NAME, doc_type=DOC_TYPE): """ Thin wrapper of the main query function to provide just the resulting objects """ results = raw_query(query, index=index, doc_type=doc_type) return [r['_source'] for r in results]
Thin wrapper of the main query function to provide just the resulting objects
def where_entry_tag(query, tag): """ Generate a where clause for entries with the given tag """ if isinstance(tag, (list, set, tuple)): tags = [t.lower() for t in tag] return orm.select(e for e in query for t in e.tags if t.key in tags) return orm.select(e for e in query for t in e.tags if t.key == tag.lower())
Generate a where clause for entries with the given tag
def uniprot_ec(uniprot_id): """Retrieve the EC number annotation for a UniProt ID. Args: uniprot_id: Valid UniProt ID Returns: """ r = requests.post('http://www.uniprot.org/uniprot/?query=%s&columns=ec&format=tab' % uniprot_id) ec = r.content.decode('utf-8').splitlines()[1] if len(ec) == 0: ec = None return ec
Retrieve the EC number annotation for a UniProt ID. Args: uniprot_id: Valid UniProt ID Returns:
def get_page_full(self, page_id): """ Get full page info and full html code """ try: result = self._request('/getpagefull/', {'pageid': page_id}) return TildaPage(**result) except NetworkError: return []
Get full page info and full html code
def _calculate(self, startingPercentage, endPercentage, startDate, endDate): """This is the error calculation function that gets called by :py:meth:`BaseErrorMeasure.get_error`. Both parameters will be correct at this time. :param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0]. It represents the value, where the error calculation should be started. 25.0 for example means that the first 25% of all calculated errors will be ignored. :param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0]. It represents the value, after which all error values will be ignored. 90.0 for example means that the last 10% of all local errors will be ignored. :param float startDate: Epoch representing the start date used for error calculation. :param float endDate: Epoch representing the end date used in the error calculation. :return: Returns a float representing the error. :rtype: float """ # get the defined subset of error values errorValues = self._get_error_values(startingPercentage, endPercentage, startDate, endDate) errorValues = filter(lambda item: item is not None, errorValues) return sorted(errorValues)[len(errorValues)//2]
This is the error calculation function that gets called by :py:meth:`BaseErrorMeasure.get_error`. Both parameters will be correct at this time. :param float startingPercentage: Defines the start of the interval. This has to be a value in [0.0, 100.0]. It represents the value, where the error calculation should be started. 25.0 for example means that the first 25% of all calculated errors will be ignored. :param float endPercentage: Defines the end of the interval. This has to be a value in [0.0, 100.0]. It represents the value, after which all error values will be ignored. 90.0 for example means that the last 10% of all local errors will be ignored. :param float startDate: Epoch representing the start date used for error calculation. :param float endDate: Epoch representing the end date used in the error calculation. :return: Returns a float representing the error. :rtype: float
def ensure_hexadecimal_string(self, value, command=None): """ Make sure the given value is a hexadecimal string. :param value: The value to check (a string). :param command: The command that produced the value (a string or :data:`None`). :returns: The validated hexadecimal string. :raises: :exc:`~exceptions.ValueError` when `value` is not a hexadecimal string. """ if not HEX_PATTERN.match(value): msg = "Expected a hexadecimal string, got '%s' instead!" if command: msg += " ('%s' gave unexpected output)" msg %= (value, command) else: msg %= value raise ValueError(msg) return value
Make sure the given value is a hexadecimal string. :param value: The value to check (a string). :param command: The command that produced the value (a string or :data:`None`). :returns: The validated hexadecimal string. :raises: :exc:`~exceptions.ValueError` when `value` is not a hexadecimal string.
def integer_entry(self, prompt, message=None, min=None, max=None, rofi_args=None, **kwargs): """Prompt the user to enter an integer. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. min, max: integer, optional Minimum and maximum values to allow. If None, no limit is imposed. Returns ------- integer, or None if the dialog is cancelled. """ # Sanity check. if (min is not None) and (max is not None) and not (max > min): raise ValueError("Maximum limit has to be more than the minimum limit.") def integer_validator(text): error = None # Attempt to convert to integer. try: value = int(text) except ValueError: return None, "Please enter an integer value." # Check its within limits. if (min is not None) and (value < min): return None, "The minimum allowable value is {0:d}.".format(min) if (max is not None) and (value > max): return None, "The maximum allowable value is {0:d}.".format(max) return value, None return self.generic_entry(prompt, integer_validator, message, rofi_args, **kwargs)
Prompt the user to enter an integer. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. min, max: integer, optional Minimum and maximum values to allow. If None, no limit is imposed. Returns ------- integer, or None if the dialog is cancelled.
def powmod(a, b, c): """ Uses GMP, if available, to do a^b mod c where a, b, c are integers. :return int: (a ** b) % c """ if a == 1: return 1 if not HAVE_GMP or max(a, b, c) < _USE_MOD_FROM_GMP_SIZE: return pow(a, b, c) else: return int(gmpy2.powmod(a, b, c))
Uses GMP, if available, to do a^b mod c where a, b, c are integers. :return int: (a ** b) % c
def getClsNames(item): ''' Return a list of "fully qualified" class names for an instance. Example: for name in getClsNames(foo): print(name) ''' mro = inspect.getmro(item.__class__) mro = [c for c in mro if c not in clsskip] return ['%s.%s' % (c.__module__, c.__name__) for c in mro]
Return a list of "fully qualified" class names for an instance. Example: for name in getClsNames(foo): print(name)
def chunk(self, count): """ Chunk the results of the query :param count: The chunk size :type count: int :return: The current chunk :rtype: list """ for chunk in self._connection.select_many( count, self.to_sql(), self.get_bindings(), not self._use_write_connection ): yield chunk
Chunk the results of the query :param count: The chunk size :type count: int :return: The current chunk :rtype: list
def ShouldRetry(self, exception): """Returns true if should retry based on the passed-in exception. :param (errors.HTTPFailure instance) exception: :rtype: boolean """ if self.current_retry_attempt_count < self._max_retry_attempt_count: self.current_retry_attempt_count += 1 self.retry_after_in_milliseconds = 0 if self._fixed_retry_interval_in_milliseconds: self.retry_after_in_milliseconds = self._fixed_retry_interval_in_milliseconds elif http_constants.HttpHeaders.RetryAfterInMilliseconds in exception.headers: self.retry_after_in_milliseconds = int(exception.headers[http_constants.HttpHeaders.RetryAfterInMilliseconds]) if self.cummulative_wait_time_in_milliseconds < self._max_wait_time_in_milliseconds: self.cummulative_wait_time_in_milliseconds += self.retry_after_in_milliseconds return True return False
Returns true if should retry based on the passed-in exception. :param (errors.HTTPFailure instance) exception: :rtype: boolean
def get_balance(self): """Check the balance fot this account. Returns a dictionary containing: account_type: The account type balance: The balance remaining on the account currency: The currency used for the account balance. Assume GBP in not set""" xml_root = self.__init_xml('Balance') response = clockwork_http.request(BALANCE_URL, etree.tostring(xml_root, encoding='utf-8')) data_etree = etree.fromstring(response['data']) err_desc = data_etree.find('ErrDesc') if err_desc is not None: raise clockwork_exceptions.ApiException(err_desc.text, data_etree.find('ErrNo').text) result = {} result['account_type'] = data_etree.find('AccountType').text result['balance'] = data_etree.find('Balance').text result['currency'] = data_etree.find('Currency').text return result
Check the balance fot this account. Returns a dictionary containing: account_type: The account type balance: The balance remaining on the account currency: The currency used for the account balance. Assume GBP in not set
def get_template_path(filename): """Find raw template in working directory or in sys.path. template_path from config may refer to templates colocated with the Stacker config, or files in remote package_sources. Here, we emulate python module loading to find the path to the template. Args: filename (str): Template filename. Returns: Optional[str]: Path to file, or None if no file found """ if os.path.isfile(filename): return os.path.abspath(filename) for i in sys.path: if os.path.isfile(os.path.join(i, filename)): return os.path.abspath(os.path.join(i, filename)) return None
Find raw template in working directory or in sys.path. template_path from config may refer to templates colocated with the Stacker config, or files in remote package_sources. Here, we emulate python module loading to find the path to the template. Args: filename (str): Template filename. Returns: Optional[str]: Path to file, or None if no file found
def import_data(args): '''import additional data to the experiment''' validate_file(args.filename) validate_dispatcher(args) content = load_search_space(args.filename) args.port = get_experiment_port(args) if args.port is not None: if import_data_to_restful_server(args, content): pass else: print_error('Import data failed!')
import additional data to the experiment
def cli(ctx, feature_id, symbol, organism="", sequence=""): """Set a feature's description Output: A standard apollo feature dictionary ({"features": [{...}]}) """ return ctx.gi.annotations.set_symbol(feature_id, symbol, organism=organism, sequence=sequence)
Set a feature's description Output: A standard apollo feature dictionary ({"features": [{...}]})
def order(self): """Return the order for this payment. """ from ..resources.orders import Order url = self._get_link('order') if url: resp = self.client.orders.perform_api_call(self.client.orders.REST_READ, url) return Order(resp, self.client)
Return the order for this payment.
def _create_filter_by(self): """Transform the json-server filter arguments to model-resource ones.""" filter_by = [] for name, values in request.args.copy().lists(): # copy.lists works in py2 and py3 if name not in _SKIPPED_ARGUMENTS: column = _re_column_name.search(name).group(1) if column not in self._model_columns: continue for value in values: if name.endswith('_ne'): filter_by.append(name[:-3] + '!=' + value) elif name.endswith('_lte'): filter_by.append(name[:-4] + '<=' + value) elif name.endswith('_gte'): filter_by.append(name[:-4] + '>=' + value) elif name.endswith('_like'): filter_by.append(name[:-5] + '::like::%' + value + '%') else: filter_by.append(name.replace('__', '.') + '==' + value) filter_by += self._create_fulltext_query() return ','.join(filter_by)
Transform the json-server filter arguments to model-resource ones.
def collides(self,position,size): '''Returns True if the word collides with another plotted word.''' word_rect = pygame.Rect(position,self.word_size) if word_rect.collidelistall(self.used_pos) == []: return False else: return True
Returns True if the word collides with another plotted word.
def post(self, uri=''): "For sql queries that start with 'INSERT ...'" # get node... (node, rule_kw) = node_from_uri(uri, method=request.method) rule_kw.update( node ) values = rule_kw xhr_data = request.get_json() if xhr_data: values.update( xhr_data ) values.update( request.form.to_dict(flat=True) ) values.update( request.args.to_dict(flat=True) ) values['method'] = request.method # Execute the sql query with the data _query(node['id'], **values) response = make_response('ok', 201) return response
For sql queries that start with 'INSERT ...
def get_values(self): """Return a copy of field values on the current object. This method is almost identical to ``vars(self).copy()``. However, only instance attributes that correspond to a field are included in the returned dict. :return: A dict mapping field names to user-provided values. """ attrs = vars(self).copy() attrs.pop('_server_config') attrs.pop('_fields') attrs.pop('_meta') if '_path_fields' in attrs: attrs.pop('_path_fields') return attrs
Return a copy of field values on the current object. This method is almost identical to ``vars(self).copy()``. However, only instance attributes that correspond to a field are included in the returned dict. :return: A dict mapping field names to user-provided values.
def list_pod_disruption_budget_for_all_namespaces(self, **kwargs): """ list or watch objects of kind PodDisruptionBudget This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_pod_disruption_budget_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1PodDisruptionBudgetList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_pod_disruption_budget_for_all_namespaces_with_http_info(**kwargs) else: (data) = self.list_pod_disruption_budget_for_all_namespaces_with_http_info(**kwargs) return data
list or watch objects of kind PodDisruptionBudget This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_pod_disruption_budget_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1PodDisruptionBudgetList If the method is called asynchronously, returns the request thread.
def transform_raw_abundance(biomf, fn=math.log10, sampleIDs=None, sample_abd=True): """ Function to transform the total abundance calculation for each sample ID to another format based on user given transformation function. :type biomf: A BIOM file. :param biomf: OTU table format. :param fn: Mathematical function which is used to transform smax to another format. By default, the function has been given as base 10 logarithm. :rtype: dict :return: Returns a dictionary similar to output of raw_abundance function but with the abundance values modified by the mathematical operation. By default, the operation performed on the abundances is base 10 logarithm. """ totals = raw_abundance(biomf, sampleIDs, sample_abd) return {sid: fn(abd) for sid, abd in totals.items()}
Function to transform the total abundance calculation for each sample ID to another format based on user given transformation function. :type biomf: A BIOM file. :param biomf: OTU table format. :param fn: Mathematical function which is used to transform smax to another format. By default, the function has been given as base 10 logarithm. :rtype: dict :return: Returns a dictionary similar to output of raw_abundance function but with the abundance values modified by the mathematical operation. By default, the operation performed on the abundances is base 10 logarithm.
def get_share_info(self, grantee_type=None, grantee_id=None, grantee_name=None, owner=None, owner_type='name'): """ :returns: list of dict representing shares informations """ params = {} if grantee_type: if 'grantee' not in params.keys(): params['grantee'] = {} params['grantee'].update({'type': grantee_type}) if grantee_id: if 'grantee' not in params.keys(): params['grantee'] = {} params['grantee'].update({'id': grantee_id}) if grantee_name: if 'grantee' not in params.keys(): params['grantee'] = {} params['grantee'].update({'name': grantee_name}) if owner: params['owner'] = {'by': owner_type, '_content': owner} try: resp = self.request('GetShareInfo', params) # if user never logged in, no mailbox was created except ZimbraSoapServerError as e: if 'mailbox not found for account' in str(e): return [] else: raise e if resp and isinstance(resp['share'], list): return resp['share'] elif resp and isinstance(resp['share'], dict): return [resp['share']] else: return []
:returns: list of dict representing shares informations
def get(self, query: Mapping[str, Any], context: PipelineContext = None) -> T: """Gets a query from the data source. 1) Extracts the query from the data source. 2) Inserts the result into any data sinks. 3) Transforms the result into the requested type if it wasn't already. 4) Inserts the transformed result into any data sinks. Args: query: The query being requested. context: The context for the extraction (mutable). Returns: The requested object. """ result = self._source.get(self._source_type, deepcopy(query), context) LOGGER.info("Got result \"{result}\" from query \"{query}\" of source \"{source}\"".format(result=result, query=query, source=self._source)) LOGGER.info("Sending result \"{result}\" to sinks before converting".format(result=result)) for sink in self._before_transform: sink.put(result, context) LOGGER.info("Converting result \"{result}\" to request type".format(result=result)) result = self._transform(data=result, context=context) LOGGER.info("Sending result \"{result}\" to sinks after converting".format(result=result)) for sink in self._after_transform: sink.put(result, context) return result
Gets a query from the data source. 1) Extracts the query from the data source. 2) Inserts the result into any data sinks. 3) Transforms the result into the requested type if it wasn't already. 4) Inserts the transformed result into any data sinks. Args: query: The query being requested. context: The context for the extraction (mutable). Returns: The requested object.
def cutout_data(self, x1, y1, x2, y2, xstep=1, ystep=1, astype=None): """cut out data area based on coords. """ view = np.s_[y1:y2:ystep, x1:x2:xstep] data = self._slice(view) if astype: data = data.astype(astype, copy=False) return data
cut out data area based on coords.
def init_app(self, app): """Flask application initialization.""" self.init_config(app) app.extensions['invenio-github'] = self @app.before_first_request def connect_signals(): """Connect OAuthClient signals.""" from invenio_oauthclient.models import RemoteAccount from invenio_oauthclient.signals import account_setup_committed from .api import GitHubAPI from .handlers import account_post_init account_setup_committed.connect( account_post_init, sender=GitHubAPI.remote._get_current_object() ) @event.listens_for(RemoteAccount, 'before_delete') def receive_before_delete(mapper, connection, target): """Listen for the 'before_delete' event."""
Flask application initialization.
def collection(name=None): """Render the collection page. It renders it either with a collection specific template (aka collection_{collection_name}.html) or with the default collection template (collection.html). """ if name is None: collection = Collection.query.get_or_404(1) else: collection = Collection.query.filter( Collection.name == name).first_or_404() # TODO add breadcrumbs # breadcrumbs = current_breadcrumbs + collection.breadcrumbs(ln=g.ln)[1:] return render_template([ 'invenio_collections/collection_{0}.html'.format(collection.id), 'invenio_collections/collection_{0}.html'.format(slugify(name, '_')), current_app.config['COLLECTIONS_DEFAULT_TEMPLATE'] ], collection=collection)
Render the collection page. It renders it either with a collection specific template (aka collection_{collection_name}.html) or with the default collection template (collection.html).
def _combine_files(tsv_files, work_dir, data): """Combine multiple priority tsv files into a final sorted output. """ header = "\t".join(["caller", "sample", "chrom", "start", "end", "svtype", "lof", "annotation", "split_read_support", "paired_support_PE", "paired_support_PR"]) sample = dd.get_sample_name(data) out_file = os.path.join(work_dir, "%s-prioritize.tsv" % (sample)) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: tmpdir = os.path.dirname(tx_out_file) input_files = " ".join(tsv_files) sort_cmd = bedutils.get_sort_cmd(tmpdir) cmd = "{{ echo '{header}'; cat {input_files} | {sort_cmd} -k3,3 -k4,4n; }} > {tx_out_file}" do.run(cmd.format(**locals()), "Combine prioritized from multiple callers") return out_file
Combine multiple priority tsv files into a final sorted output.
def parse_options(): """ Parses command-line option """ try: opts, args = getopt.getopt(sys.argv[1:], 'ac:e:hilms:t:vx', ['adapt', 'comp=', 'enum=', 'exhaust', 'help', 'incr', 'blo', 'minimize', 'solver=', 'trim=', 'verbose']) except getopt.GetoptError as err: sys.stderr.write(str(err).capitalize()) usage() sys.exit(1) adapt = False exhaust = False cmode = None to_enum = 1 incr = False blo = False minz = False solver = 'g3' trim = 0 verbose = 1 for opt, arg in opts: if opt in ('-a', '--adapt'): adapt = True elif opt in ('-c', '--comp'): cmode = str(arg) elif opt in ('-e', '--enum'): to_enum = str(arg) if to_enum != 'all': to_enum = int(to_enum) else: to_enum = 0 elif opt in ('-h', '--help'): usage() sys.exit(0) elif opt in ('-i', '--incr'): incr = True elif opt in ('-l', '--blo'): blo = True elif opt in ('-m', '--minimize'): minz = True elif opt in ('-s', '--solver'): solver = str(arg) elif opt in ('-t', '--trim'): trim = int(arg) elif opt in ('-v', '--verbose'): verbose += 1 elif opt in ('-x', '--exhaust'): exhaust = True else: assert False, 'Unhandled option: {0} {1}'.format(opt, arg) return adapt, blo, cmode, to_enum, exhaust, incr, minz, solver, trim, \ verbose, args
Parses command-line option
def append(self, val): """Connect any new results to the resultset. This is where all the heavy lifting is done for creating results: - We add a datatype here, so that each result can handle validation etc independently. This is so that scraper authors don't need to worry about creating and passing around datatype objects. - As the scraper author yields result objects, we append them to a resultset. - This is also where we normalize dialects. """ val.resultset = self val.dataset = self.dataset # Check result dimensions against available dimensions for this dataset if val.dataset: dataset_dimensions = self.dataset.dimensions for k, v in val.raw_dimensions.items(): if k not in dataset_dimensions: d = Dimension(k) else: d = dataset_dimensions[k] # Normalize if we have a datatype and a foreign dialect normalized_value = unicode(v) if d.dialect and d.datatype: if d.dialect in d.datatype.dialects: for av in d.allowed_values: # Not all allowed_value have all dialects if unicode(v) in av.dialects.get(d.dialect, []): normalized_value = av.value # Use first match # We do not support multiple matches # This is by design. break # Create DimensionValue object if isinstance(v, DimensionValue): dim = v v.value = normalized_value else: if k in dataset_dimensions: dim = DimensionValue(normalized_value, d) else: dim = DimensionValue(normalized_value, Dimension()) val.dimensionvalues.append(dim) # Add last list of dimension values to the ResultSet # They will usually be the same for each result self.dimensionvalues = val.dimensionvalues super(ResultSet, self).append(val)
Connect any new results to the resultset. This is where all the heavy lifting is done for creating results: - We add a datatype here, so that each result can handle validation etc independently. This is so that scraper authors don't need to worry about creating and passing around datatype objects. - As the scraper author yields result objects, we append them to a resultset. - This is also where we normalize dialects.
def set_file_logger(self, log_level, path, logger_name='internetarchive'): """Convenience function to quickly configure any level of logging to a file. :type log_level: str :param log_level: A log level as specified in the `logging` module. :type path: string :param path: Path to the log file. The file will be created if it doesn't already exist. :type logger_name: str :param logger_name: (optional) The name of the logger. """ _log_level = { 'CRITICAL': 50, 'ERROR': 40, 'WARNING': 30, 'INFO': 20, 'DEBUG': 10, 'NOTSET': 0, } log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' _log = logging.getLogger(logger_name) _log.setLevel(logging.DEBUG) fh = logging.FileHandler(path, encoding='utf-8') fh.setLevel(_log_level[log_level]) formatter = logging.Formatter(log_format) fh.setFormatter(formatter) _log.addHandler(fh)
Convenience function to quickly configure any level of logging to a file. :type log_level: str :param log_level: A log level as specified in the `logging` module. :type path: string :param path: Path to the log file. The file will be created if it doesn't already exist. :type logger_name: str :param logger_name: (optional) The name of the logger.
def print_commands(self, out=sys.stdout): ''' utility method to print commands and descriptions for @BotFather ''' cmds = self.list_commands() for ck in cmds: if ck.printable: out.write('%s\n' % ck)
utility method to print commands and descriptions for @BotFather
def import_links(self, links): """ Import links, returns import result (http://confluence.jetbrains.net/display/YTD2/Import+Links) Accepts result of getLinks() Example: importLinks([{'login':'vadim', 'fullName':'vadim', 'email':'eee@ss.com', 'jabber':'fff@fff.com'}, {'login':'maxim', 'fullName':'maxim', 'email':'aaa@ss.com', 'jabber':'www@fff.com'}]) """ xml = '<list>\n' for l in links: # ignore typeOutward and typeInward returned by getLinks() xml += ' <link ' + "".join(attr + '=' + quoteattr(l[attr]) + ' ' for attr in l if attr not in ['typeInward', 'typeOutward']) + '/>\n' xml += '</list>' # TODO: convert response xml into python objects res = self._req_xml('PUT', '/import/links', xml, 400) return res.toxml() if hasattr(res, "toxml") else res
Import links, returns import result (http://confluence.jetbrains.net/display/YTD2/Import+Links) Accepts result of getLinks() Example: importLinks([{'login':'vadim', 'fullName':'vadim', 'email':'eee@ss.com', 'jabber':'fff@fff.com'}, {'login':'maxim', 'fullName':'maxim', 'email':'aaa@ss.com', 'jabber':'www@fff.com'}])
def add_command(self, handler, name=None): """Add a subcommand `name` which invokes `handler`. """ if name is None: name = docstring_to_subcommand(handler.__doc__) # TODO: Prevent overwriting 'help'? self._commands[name] = handler
Add a subcommand `name` which invokes `handler`.
def get_item(self, item_id, expand=False): """Returns Hacker News `Item` object. Fetches the data from url: https://hacker-news.firebaseio.com/v0/item/<item_id>.json e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json Args: item_id (int or string): Unique item id of Hacker News story, comment etc. expand (bool): expand (bool): Flag to indicate whether to transform all IDs into objects. Returns: `Item` object representing Hacker News item. Raises: InvalidItemID: If corresponding Hacker News story does not exist. """ url = urljoin(self.item_url, F"{item_id}.json") response = self._get_sync(url) if not response: raise InvalidItemID item = Item(response) if expand: item.by = self.get_user(item.by) item.kids = self.get_items_by_ids(item.kids) if item.kids else None item.parent = self.get_item(item.parent) if item.parent else None item.poll = self.get_item(item.poll) if item.poll else None item.parts = ( self.get_items_by_ids(item.parts) if item.parts else None ) return item
Returns Hacker News `Item` object. Fetches the data from url: https://hacker-news.firebaseio.com/v0/item/<item_id>.json e.g. https://hacker-news.firebaseio.com/v0/item/69696969.json Args: item_id (int or string): Unique item id of Hacker News story, comment etc. expand (bool): expand (bool): Flag to indicate whether to transform all IDs into objects. Returns: `Item` object representing Hacker News item. Raises: InvalidItemID: If corresponding Hacker News story does not exist.
def allocate(self, amount, update=True): """ This allocates capital to the Security. This is the method used to buy/sell the security. A given amount of shares will be determined on the current price, a commission will be calculated based on the parent's commission fn, and any remaining capital will be passed back up to parent as an adjustment. Args: * amount (float): Amount of adjustment. * update (bool): Force update? """ # will need to update if this has been idle for a while... # update if needupdate or if now is stale # fetch parent's now since our now is stale if self._needupdate or self.now != self.parent.now: self.update(self.parent.now) # ignore 0 alloc # Note that if the price of security has dropped to zero, then it # should never be selected by SelectAll, SelectN etc. I.e. we should # not open the position at zero price. At the same time, we are able # to close it at zero price, because at that point amount=0. # Note also that we don't erase the position in an asset which price # has dropped to zero (though the weight will indeed be = 0) if amount == 0: return if self.parent is self or self.parent is None: raise Exception( 'Cannot allocate capital to a parentless security') if self._price == 0 or np.isnan(self._price): raise Exception( 'Cannot allocate capital to ' '%s because price is %s as of %s' % (self.name, self._price, self.parent.now)) # buy/sell # determine quantity - must also factor in commission # closing out? if amount == -self._value: q = -self._position else: q = amount / (self._price * self.multiplier) if self.integer_positions: if (self._position > 0) or ((self._position == 0) and ( amount > 0)): # if we're going long or changing long position q = math.floor(q) else: # if we're going short or changing short position q = math.ceil(q) # if q is 0 nothing to do if q == 0 or np.isnan(q): return # unless we are closing out a position (q == -position) # we want to ensure that # # - In the event of a positive amount, this indicates the maximum # amount a given security can use up for a purchase. Therefore, if # commissions push us above this amount, we cannot buy `q`, and must # decrease its value # # - In the event of a negative amount, we want to 'raise' at least the # amount indicated, no less. Therefore, if we have commission, we must # sell additional units to fund this requirement. As such, q must once # again decrease. # if not q == -self._position: full_outlay, _, _ = self.outlay(q) # if full outlay > amount, we must decrease the magnitude of `q` # this can potentially lead to an infinite loop if the commission # per share > price per share. However, we cannot really detect # that in advance since the function can be non-linear (say a fn # like max(1, abs(q) * 0.01). Nevertheless, we want to avoid these # situations. # cap the maximum number of iterations to 1e4 and raise exception # if we get there # if integer positions then we know we are stuck if q doesn't change # if integer positions is false then we want full_outlay == amount # if integer positions is true then we want to be at the q where # if we bought 1 more then we wouldn't have enough cash i = 0 last_q = q last_amount_short = full_outlay - amount while not np.isclose(full_outlay, amount, rtol=0.) and q != 0: dq_wout_considering_tx_costs = (full_outlay - amount)/(self._price * self.multiplier) q = q - dq_wout_considering_tx_costs if self.integer_positions: q = math.floor(q) full_outlay, _, _ = self.outlay(q) # if our q is too low and we have integer positions # then we know that the correct quantity is the one where # the outlay of q + 1 < amount. i.e. if we bought one more # position then we wouldn't have enough cash if self.integer_positions: full_outlay_of_1_more, _, _ = self.outlay(q + 1) if full_outlay < amount and full_outlay_of_1_more > amount: break # if not integer positions then we should keep going until # full_outlay == amount or is close enough i = i + 1 if i > 1e4: raise Exception( 'Potentially infinite loop detected. This occurred ' 'while trying to reduce the amount of shares purchased' ' to respect the outlay <= amount rule. This is most ' 'likely due to a commission function that outputs a ' 'commission that is greater than the amount of cash ' 'a short sale can raise.') if self.integer_positions and last_q == q: raise Exception( 'Newton Method like root search for quantity is stuck!' ' q did not change in iterations so it is probably a bug' ' but we are not entirely sure it is wrong! Consider ' ' changing to warning.' ) last_q = q if np.abs(full_outlay - amount) > np.abs(last_amount_short): raise Exception( 'The difference between what we have raised with q and' ' the amount we are trying to raise has gotten bigger since' ' last iteration! full_outlay should always be approaching' ' amount! There may be a case where the commission fn is' ' not smooth' ) last_amount_short = full_outlay - amount # if last step led to q == 0, then we can return just like above if q == 0: return # this security will need an update, even if pos is 0 (for example if # we close the positions, value and pos is 0, but still need to do that # last update) self._needupdate = True # adjust position & value self._position += q # calculate proper adjustment for parent # parent passed down amount so we want to pass # -outlay back up to parent to adjust for capital # used full_outlay, outlay, fee = self.outlay(q) # store outlay for future reference self._outlay += outlay # call parent self.parent.adjust(-full_outlay, update=update, flow=False, fee=fee)
This allocates capital to the Security. This is the method used to buy/sell the security. A given amount of shares will be determined on the current price, a commission will be calculated based on the parent's commission fn, and any remaining capital will be passed back up to parent as an adjustment. Args: * amount (float): Amount of adjustment. * update (bool): Force update?
def get_project_by_network_id(network_id,**kwargs): """ get a project complexmodel by a network_id """ user_id = kwargs.get('user_id') projects_i = db.DBSession.query(Project).join(ProjectOwner).join(Network, Project.id==Network.project_id).filter( Network.id==network_id, ProjectOwner.user_id==user_id).order_by('name').all() ret_project = None for project_i in projects_i: try: project_i.check_read_permission(user_id) ret_project = project_i except: log.info("Can't return project %s. User %s does not have permission to read it.", project_i.id, user_id) return ret_project
get a project complexmodel by a network_id
def new_title_bar(self, title, color=None): """Return an array of Pdf Objects which constitute a Header""" # Build a title bar for top of page w, t, c = '100%', 2, color or HexColor('#404040') title = '<b>{0}</b>'.format(title) if 'TitleBar' not in self.stylesheet: tb = ParagraphStyle('TitleBar', parent=self.stylesheet['Normal'], fontName='Helvetica-Bold', fontSize=10, leading=10, alignment=TA_CENTER) self.stylesheet.add(tb) return [HRFlowable(width=w, thickness=t, color=c, spaceAfter=2, vAlign='MIDDLE', lineCap='square'), self.new_paragraph(title, 'TitleBar'), HRFlowable(width=w, thickness=t, color=c, spaceBefore=2, vAlign='MIDDLE', lineCap='square')]
Return an array of Pdf Objects which constitute a Header
def json(self): """Custom JSON encoder""" attributes = { 'type': self.type, 'filename': self.filename, 'line_number': self.lineno, 'hashed_secret': self.secret_hash, } if self.is_secret is not None: attributes['is_secret'] = self.is_secret return attributes
Custom JSON encoder
def read_surfrad(filename, map_variables=True): """Read in a daily NOAA SURFRAD[1] file. Parameters ---------- filename: str Filepath or url. map_variables: bool When true, renames columns of the Dataframe to pvlib variable names where applicable. See variable SURFRAD_COLUMNS. Returns ------- Tuple of the form (data, metadata). data: Dataframe Dataframe with the fields found below. metadata: dict Site metadata included in the file. Notes ----- Metadata dictionary includes the following fields: =============== ====== =============== Key Format Description =============== ====== =============== station String site name latitude Float site latitude longitude Float site longitude elevation Int site elevation surfrad_version Int surfrad version tz String Timezone (UTC) =============== ====== =============== Dataframe includes the following fields: ======================= ====== ========================================== raw, mapped Format Description ======================= ====== ========================================== **Mapped field names are returned when the map_variables argument is True** --------------------------------------------------------------------------- year int year as 4 digit int jday int day of year 1-365(or 366) month int month (1-12) day int day of month(1-31) hour int hour (0-23) minute int minute (0-59) dt float decimal time i.e. 23.5 = 2330 zen, solar_zenith float solar zenith angle (deg) **Fields below have associated qc flags labeled <field>_flag.** --------------------------------------------------------------------------- dw_solar, ghi float downwelling global solar(W/m^2) uw_solar float updownwelling global solar(W/m^2) direct_n, dni float direct normal solar (W/m^2) diffuse, dhi float downwelling diffuse solar (W/m^2) dw_ir float downwelling thermal infrared (W/m^2) dw_casetemp float downwelling IR case temp (K) dw_dometemp float downwelling IR dome temp (K) uw_ir float upwelling thermal infrared (W/m^2) uw_casetemp float upwelling IR case temp (K) uw_dometemp float upwelling IR case temp (K) uvb float global uvb (miliWatts/m^2) par float photosynthetically active radiation(W/m^2) netsolar float net solar (dw_solar - uw_solar) (W/m^2) netir float net infrared (dw_ir - uw_ir) (W/m^2) totalnet float net radiation (netsolar+netir) (W/m^2) temp, temp_air float 10-meter air temperature (?C) rh, relative_humidity float relative humidity (%) windspd, wind_speed float wind speed (m/s) winddir, wind_direction float wind direction (deg, clockwise from north) pressure float station pressure (mb) ======================= ====== ========================================== See README files located in the station directories in the SURFRAD data archives[2] for details on SURFRAD daily data files. References ---------- [1] NOAA Earth System Research Laboratory Surface Radiation Budget Network `SURFRAD Homepage <https://www.esrl.noaa.gov/gmd/grad/surfrad/>`_ [2] NOAA SURFRAD Data Archive `SURFRAD Archive <ftp://aftp.cmdl.noaa.gov/data/radiation/surfrad/>`_ """ if filename.startswith('ftp'): req = Request(filename) response = urlopen(req) file_buffer = io.StringIO(response.read().decode(errors='ignore')) else: file_buffer = open(filename, 'r') # Read and parse the first two lines to build the metadata dict. station = file_buffer.readline() file_metadata = file_buffer.readline() metadata_list = file_metadata.split() metadata = {} metadata['name'] = station.strip() metadata['latitude'] = float(metadata_list[0]) metadata['longitude'] = float(metadata_list[1]) metadata['elevation'] = float(metadata_list[2]) metadata['surfrad_version'] = int(metadata_list[-1]) metadata['tz'] = 'UTC' data = pd.read_csv(file_buffer, delim_whitespace=True, header=None, names=SURFRAD_COLUMNS) file_buffer.close() data = format_index(data) missing = data == -9999.9 data = data.where(~missing, np.NaN) if map_variables: data.rename(columns=VARIABLE_MAP, inplace=True) return data, metadata
Read in a daily NOAA SURFRAD[1] file. Parameters ---------- filename: str Filepath or url. map_variables: bool When true, renames columns of the Dataframe to pvlib variable names where applicable. See variable SURFRAD_COLUMNS. Returns ------- Tuple of the form (data, metadata). data: Dataframe Dataframe with the fields found below. metadata: dict Site metadata included in the file. Notes ----- Metadata dictionary includes the following fields: =============== ====== =============== Key Format Description =============== ====== =============== station String site name latitude Float site latitude longitude Float site longitude elevation Int site elevation surfrad_version Int surfrad version tz String Timezone (UTC) =============== ====== =============== Dataframe includes the following fields: ======================= ====== ========================================== raw, mapped Format Description ======================= ====== ========================================== **Mapped field names are returned when the map_variables argument is True** --------------------------------------------------------------------------- year int year as 4 digit int jday int day of year 1-365(or 366) month int month (1-12) day int day of month(1-31) hour int hour (0-23) minute int minute (0-59) dt float decimal time i.e. 23.5 = 2330 zen, solar_zenith float solar zenith angle (deg) **Fields below have associated qc flags labeled <field>_flag.** --------------------------------------------------------------------------- dw_solar, ghi float downwelling global solar(W/m^2) uw_solar float updownwelling global solar(W/m^2) direct_n, dni float direct normal solar (W/m^2) diffuse, dhi float downwelling diffuse solar (W/m^2) dw_ir float downwelling thermal infrared (W/m^2) dw_casetemp float downwelling IR case temp (K) dw_dometemp float downwelling IR dome temp (K) uw_ir float upwelling thermal infrared (W/m^2) uw_casetemp float upwelling IR case temp (K) uw_dometemp float upwelling IR case temp (K) uvb float global uvb (miliWatts/m^2) par float photosynthetically active radiation(W/m^2) netsolar float net solar (dw_solar - uw_solar) (W/m^2) netir float net infrared (dw_ir - uw_ir) (W/m^2) totalnet float net radiation (netsolar+netir) (W/m^2) temp, temp_air float 10-meter air temperature (?C) rh, relative_humidity float relative humidity (%) windspd, wind_speed float wind speed (m/s) winddir, wind_direction float wind direction (deg, clockwise from north) pressure float station pressure (mb) ======================= ====== ========================================== See README files located in the station directories in the SURFRAD data archives[2] for details on SURFRAD daily data files. References ---------- [1] NOAA Earth System Research Laboratory Surface Radiation Budget Network `SURFRAD Homepage <https://www.esrl.noaa.gov/gmd/grad/surfrad/>`_ [2] NOAA SURFRAD Data Archive `SURFRAD Archive <ftp://aftp.cmdl.noaa.gov/data/radiation/surfrad/>`_
def _set_metric(self, metric_name, metric_type, value, tags=None): """ Set a metric """ if metric_type == self.GAUGE: self.gauge(metric_name, value, tags=tags) else: self.log.error('Metric type "{}" unknown'.format(metric_type))
Set a metric
def parameter_to_field(self, name): """ Promotes a parameter to a field by creating a new array of same size as the other existing fields, filling it with the current value of the parameter, and then removing that parameter. """ if name not in self._parameters: raise ValueError("no '%s' parameter found" % (name)) if self._fields.count(name) > 0: raise ValueError("field with name '%s' already exists" % (name)) data = np.array([self._parameters[name]]*self._num_fix) self.rm_parameter(name) self.add_field(name, data)
Promotes a parameter to a field by creating a new array of same size as the other existing fields, filling it with the current value of the parameter, and then removing that parameter.
def bytes(self): """ Returns a t-uple with instruction bytes (integers) """ result = [] op = self.opcode.split(' ') argi = 0 while op: q = op.pop(0) if q == 'XX': for k in range(self.argbytes[argi] - 1): op.pop(0) result.extend(num2bytes(self.argval()[argi], self.argbytes[argi])) argi += 1 else: result.append(int(q, 16)) # Add opcode if len(result) != self.size: raise InternalMismatchSizeError(len(result), self) return result
Returns a t-uple with instruction bytes (integers)
def attach_lb_to_subnets(self, name, subnets): """ Attaches load balancer to one or more subnets. Attaching subnets that are already registered with the Load Balancer has no effect. :type name: string :param name: The name of the Load Balancer :type subnets: List of strings :param subnets: The name of the subnet(s) to add. :rtype: List of strings :return: An updated list of subnets for this Load Balancer. """ params = {'LoadBalancerName' : name} self.build_list_params(params, subnets, 'Subnets.member.%d') return self.get_list('AttachLoadBalancerToSubnets', params, None)
Attaches load balancer to one or more subnets. Attaching subnets that are already registered with the Load Balancer has no effect. :type name: string :param name: The name of the Load Balancer :type subnets: List of strings :param subnets: The name of the subnet(s) to add. :rtype: List of strings :return: An updated list of subnets for this Load Balancer.
def flatten_dict(self, obj): """Return an OrderedDict dict preserving order of keys in fieldnames """ return OrderedDict(zip(self.fieldnames, self.flatten(obj)))
Return an OrderedDict dict preserving order of keys in fieldnames
def make_index(css_class, entities): """ Generate the HTML index (a short description and a link to the full documentation) for a list of FunctionDocs or ClassDocs. """ def make_entry(entity): return ('<dt><a href = "%(url)s">%(name)s</a></dt>\n' + '<dd>%(doc)s</dd>') % { 'name': entity.name, 'url': entity.url, 'doc': first_sentence(entity.doc) } entry_text = '\n'.join(make_entry(val) for val in entities) if entry_text: return '<dl class = "%s">\n%s\n</dl>' % (css_class, entry_text) else: return ''
Generate the HTML index (a short description and a link to the full documentation) for a list of FunctionDocs or ClassDocs.
def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)): """Remove overlaps in UFOs' glyphs' contours.""" from booleanOperations import union, BooleanOperationsError for ufo in ufos: font_name = self._font_name(ufo) logger.info("Removing overlaps for " + font_name) for glyph in ufo: if not glyph_filter(glyph): continue contours = list(glyph) glyph.clearContours() try: union(contours, glyph.getPointPen()) except BooleanOperationsError: logger.error( "Failed to remove overlaps for %s: %r", font_name, glyph.name ) raise
Remove overlaps in UFOs' glyphs' contours.
def set_frame_parameters(self, profile_index: int, frame_parameters) -> None: """Set the frame parameters with the settings index and fire the frame parameters changed event. If the settings index matches the current settings index, call set current frame parameters. If the settings index matches the record settings index, call set record frame parameters. """ self.frame_parameters_changed_event.fire(profile_index, frame_parameters)
Set the frame parameters with the settings index and fire the frame parameters changed event. If the settings index matches the current settings index, call set current frame parameters. If the settings index matches the record settings index, call set record frame parameters.
def _ReadStructure( self, file_object, file_offset, data_size, data_type_map, description): """Reads a structure. Args: file_object (FileIO): file-like object. file_offset (int): offset of the data relative from the start of the file-like object. data_size (int): data size of the structure. data_type_map (dtfabric.DataTypeMap): data type map of the structure. description (str): description of the structure. Returns: object: structure values object. Raises: FileFormatError: if the structure cannot be read. ValueError: if file-like object or date type map are invalid. """ data = self._ReadData(file_object, file_offset, data_size, description) return self._ReadStructureFromByteStream( data, file_offset, data_type_map, description)
Reads a structure. Args: file_object (FileIO): file-like object. file_offset (int): offset of the data relative from the start of the file-like object. data_size (int): data size of the structure. data_type_map (dtfabric.DataTypeMap): data type map of the structure. description (str): description of the structure. Returns: object: structure values object. Raises: FileFormatError: if the structure cannot be read. ValueError: if file-like object or date type map are invalid.
def createBatchSystem(config): """ Creates an instance of the batch system specified in the given config. :param toil.common.Config config: the current configuration :rtype: batchSystems.abstractBatchSystem.AbstractBatchSystem :return: an instance of a concrete subclass of AbstractBatchSystem """ kwargs = dict(config=config, maxCores=config.maxCores, maxMemory=config.maxMemory, maxDisk=config.maxDisk) from toil.batchSystems.registry import batchSystemFactoryFor try: factory = batchSystemFactoryFor(config.batchSystem) batchSystemClass = factory() except: raise RuntimeError('Unrecognised batch system: %s' % config.batchSystem) if not config.disableCaching and not batchSystemClass.supportsWorkerCleanup(): raise RuntimeError('%s currently does not support shared caching. Set the ' '--disableCaching flag if you want to ' 'use this batch system.' % config.batchSystem) logger.debug('Using the %s' % re.sub("([a-z])([A-Z])", "\g<1> \g<2>", batchSystemClass.__name__).lower()) return batchSystemClass(**kwargs)
Creates an instance of the batch system specified in the given config. :param toil.common.Config config: the current configuration :rtype: batchSystems.abstractBatchSystem.AbstractBatchSystem :return: an instance of a concrete subclass of AbstractBatchSystem
def get_plugin_by_model(self, model_class): """ Return the corresponding plugin for a given model. You can also use the :attr:`ContentItem.plugin <fluent_contents.models.ContentItem.plugin>` property directly. This is the low-level function that supports that feature. """ self._import_plugins() # could happen during rendering that no plugin scan happened yet. assert issubclass(model_class, ContentItem) # avoid confusion between model instance and class here! try: name = self._name_for_model[model_class] except KeyError: raise PluginNotFound("No plugin found for model '{0}'.".format(model_class.__name__)) return self.plugins[name]
Return the corresponding plugin for a given model. You can also use the :attr:`ContentItem.plugin <fluent_contents.models.ContentItem.plugin>` property directly. This is the low-level function that supports that feature.
def error_response(message, status=400, code=None): """"Return error message(in dict).""" from django.http import JsonResponse data = {'message': message} if code: data['code'] = code LOG.error("Error response, status code is : {} | {}".format(status, data)) return JsonResponse(data=data, status=status)
Return error message(in dict).
def run(host=DEFAULT_HOST, port=DEFAULT_PORT, path='.'): """Run the development server """ path = abspath(path) c = Clay(path) c.run(host=host, port=port)
Run the development server
def close(self): """Close the AVR device connection and don't try to reconnect.""" self.log.warning('Closing connection to AVR') self._closing = True if self.protocol.transport: self.protocol.transport.close()
Close the AVR device connection and don't try to reconnect.
def has_preview_permission(self, request, obj=None): """ Return `True` if the user has permissions to preview a publishable item. NOTE: this method does not actually change who can or cannot preview any particular item, just whether to show the preview link. The real dcision is made by a combination of: - `PublishingMiddleware` which chooses who can view draft content - the view code for a particular item, which may or may not render draft content for a specific user. :param request: Django request object. :param obj: The object the user would preview, if permitted. :return: Boolean. """ # User who can publish always has preview permission. if self.has_publish_permission(request, obj=obj): return True user_obj = request.user if not user_obj.is_active: return False if user_obj.is_staff: return True return False
Return `True` if the user has permissions to preview a publishable item. NOTE: this method does not actually change who can or cannot preview any particular item, just whether to show the preview link. The real dcision is made by a combination of: - `PublishingMiddleware` which chooses who can view draft content - the view code for a particular item, which may or may not render draft content for a specific user. :param request: Django request object. :param obj: The object the user would preview, if permitted. :return: Boolean.
def process_cmap(cmap, ncolors=None, provider=None, categorical=False): """ Convert valid colormap specifications to a list of colors. """ providers_checked="matplotlib, bokeh, or colorcet" if provider is None else provider if isinstance(cmap, Cycle): palette = [rgb2hex(c) if isinstance(c, tuple) else c for c in cmap.values] elif isinstance(cmap, list): palette = cmap elif isinstance(cmap, basestring): mpl_cmaps = _list_cmaps('matplotlib') bk_cmaps = _list_cmaps('bokeh') cet_cmaps = _list_cmaps('colorcet') if provider=='matplotlib' or (provider is None and (cmap in mpl_cmaps or cmap.lower() in mpl_cmaps)): palette = mplcmap_to_palette(cmap, ncolors, categorical) elif provider=='bokeh' or (provider is None and (cmap in bk_cmaps or cmap.capitalize() in bk_cmaps)): palette = bokeh_palette_to_palette(cmap, ncolors, categorical) elif provider=='colorcet' or (provider is None and cmap in cet_cmaps): from colorcet import palette if cmap.endswith('_r'): palette = list(reversed(palette[cmap[:-2]])) else: palette = palette[cmap] else: raise ValueError("Supplied cmap %s not found among %s colormaps." % (cmap,providers_checked)) else: try: # Try processing as matplotlib colormap palette = mplcmap_to_palette(cmap, ncolors) except: palette = None if not isinstance(palette, list): raise TypeError("cmap argument %s expects a list, Cycle or valid %s colormap or palette." % (cmap,providers_checked)) if ncolors and len(palette) != ncolors: return [palette[i%len(palette)] for i in range(ncolors)] return palette
Convert valid colormap specifications to a list of colors.
def from_json(cls, json): """Create new DatastoreInputReader from the json, encoded by to_json. Args: json: json map representation of DatastoreInputReader. Returns: an instance of DatastoreInputReader with all data deserialized from json. """ return cls( namespace_range.NamespaceRange.from_json_object( json[cls.NAMESPACE_RANGE_PARAM]), json[cls.BATCH_SIZE_PARAM])
Create new DatastoreInputReader from the json, encoded by to_json. Args: json: json map representation of DatastoreInputReader. Returns: an instance of DatastoreInputReader with all data deserialized from json.