code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def view(self, channel_names='auto', gates=None, diag_kw={}, offdiag_kw={}, gate_colors=None, **kwargs): if channel_names == 'auto': channel_names = list(self.channel_names) def plot_region(channels, **kwargs): if channels[0] == channels[1]: channels = channels[0] kind = 'histogram' self.plot(channels, kind=kind, gates=gates, gate_colors=gate_colors, autolabel=False) channel_list = np.array(list(channel_names), dtype=object) channel_mat = [[(x, y) for x in channel_list] for y in channel_list] channel_mat = DataFrame(channel_mat, columns=channel_list, index=channel_list) kwargs.setdefault('wspace', 0.1) kwargs.setdefault('hspace', 0.1) return plot_ndpanel(channel_mat, plot_region, **kwargs)
Generates a matrix of subplots allowing for a quick way to examine how the sample looks in different channels. Parameters ---------- channel_names : [list | 'auto'] List of channel names to plot. offdiag_plot : ['histogram' | 'scatter'] Specifies the type of plot for the off-diagonal elements. diag_kw : dict Not implemented Returns ------------ axes references
def serialize_basic(self, data, data_type, **kwargs): custom_serializer = self._get_custom_serializers(data_type, **kwargs) if custom_serializer: return custom_serializer(data) if data_type == 'str': return self.serialize_unicode(data) return eval(data_type)(data)
Serialize basic builting data type. Serializes objects to str, int, float or bool. Possible kwargs: - is_xml bool : If set, adapt basic serializers without the need for basic_types_serializers - basic_types_serializers dict[str, callable] : If set, use the callable as serializer :param data: Object to be serialized. :param str data_type: Type of object in the iterable.
def read_host_file(path): res = [] for l in file(path).xreadlines(): hostname = l.strip() if hostname: res.append(hostname) return res
Read the host file. Return a list of hostnames.
def download(self, replace=False): download_data( self.url, self.signature, data_home=self.data_home, replace=replace, extract=True )
Download the dataset from the hosted Yellowbrick data store and save it to the location specified by ``get_data_home``. The downloader verifies the download completed successfully and safely by comparing the expected signature with the SHA 256 signature of the downloaded archive file. Parameters ---------- replace : bool, default: False If the data archive already exists, replace the dataset. If this is False and the dataset exists, an exception is raised.
def delete_from_environment(self, environment, synchronous=True): if isinstance(environment, Environment): environment_id = environment.id else: environment_id = environment response = client.delete( '{0}/environments/{1}'.format(self.path(), environment_id), **self._server_config.get_client_kwargs() ) return _handle_response(response, self._server_config, synchronous)
Delete this content view version from an environment. This method acts much like :meth:`nailgun.entity_mixins.EntityDeleteMixin.delete`. The documentation on that method describes how the deletion procedure works in general. This method differs only in accepting an ``environment`` parameter. :param environment: A :class:`nailgun.entities.Environment` object. The environment's ``id`` parameter *must* be specified. As a convenience, an environment ID may be passed in instead of an ``Environment`` object.
def add_dependency(self, from_task_name, to_task_name): logger.debug('Adding dependency from {0} to {1}'.format(from_task_name, to_task_name)) if not self.state.allow_change_graph: raise DagobahError("job's graph is immutable in its current state: %s" % self.state.status) self.add_edge(from_task_name, to_task_name) self.commit()
Add a dependency between two tasks.
def register_for_app( self, app_label=None, exclude_models=None, exclude_model_classes=None ): models = [] exclude_models = exclude_models or [] app_config = django_apps.get_app_config(app_label) for model in app_config.get_models(): if model._meta.label_lower in exclude_models: pass elif exclude_model_classes and issubclass(model, exclude_model_classes): pass else: models.append(model._meta.label_lower) self.register(models)
Registers all models for this app_label.
def make_urls_hyperlinks(text: str) -> str: find_url = r replace_url = r'<a href="\1">\1</a>' find_email = re.compile(r'([.\w\-]+@(\w[\w\-]+\.)+[\w\-]+)') replace_email = r'<a href="mailto:\1">\1</a>' text = re.sub(find_url, replace_url, text) text = re.sub(find_email, replace_email, text) return text
Adds hyperlinks to text that appears to contain URLs. See - http://stackoverflow.com/questions/1071191 - ... except that double-replaces everything; e.g. try with ``text = "me@somewhere.com me@somewhere.com"`` - http://stackp.online.fr/?p=19
def generate_exports(): env = [] for name in list_installed_genomes(): try: g = Genome(name) env_name = re.sub(r'[^\w]+', "_", name).upper() env.append("export {}={}".format(env_name, g.filename)) except: pass return env
Print export commands for setting environment variables.
def unstaged(): with conf.within_proj_dir(): status = shell.run( 'git status --porcelain', capture=True, never_pretend=True ).stdout results = [] for file_status in status.split(os.linesep): if file_status.strip() and file_status[0] == ' ': results.append(file_status[3:].strip()) return results
Return a list of unstaged files in the project repository. Returns: list[str]: The list of files not tracked by project git repo.
def _saveState(self, path): self.addSession() self._save(str(self.n_sessions), path)
save current state and add a new state
def del_kwnkb(mapper, connection, target): if(target.kbtype == KnwKB.KNWKB_TYPES['taxonomy']): if os.path.isfile(target.get_filename()): os.remove(target.get_filename())
Remove taxonomy file.
def worker_stop(obj, worker_ids): if len(worker_ids) == 0: msg = 'Would you like to stop all workers?' else: msg = '\n{}\n\n{}'.format('\n'.join(worker_ids), 'Would you like to stop these workers?') if click.confirm(msg, default=True, abort=True): stop_worker(obj['config'], worker_ids=list(worker_ids) if len(worker_ids) > 0 else None)
Stop running workers. \b WORKER_IDS: The IDs of the worker that should be stopped or none to stop them all.
def _iter_grouped(self): for indices in self._group_indices: yield self._obj.isel(**{self._group_dim: indices})
Iterate over each element in this group
def compute_groups_matrix(groups): if not groups: return None num_vars = len(groups) unique_group_names = list(OrderedDict.fromkeys(groups)) number_of_groups = len(unique_group_names) indices = dict([(x, i) for (i, x) in enumerate(unique_group_names)]) output = np.zeros((num_vars, number_of_groups), dtype=np.int) for parameter_row, group_membership in enumerate(groups): group_index = indices[group_membership] output[parameter_row, group_index] = 1 return output, unique_group_names
Generate matrix which notes factor membership of groups Computes a k-by-g matrix which notes factor membership of groups where: k is the number of variables (factors) g is the number of groups Also returns a g-length list of unique group_names whose positions correspond to the order of groups in the k-by-g matrix Arguments --------- groups : list Group names corresponding to each variable Returns ------- tuple containing group matrix assigning parameters to groups and a list of unique group names
def _calculate_expires(self): self._backend_client.expires = None now = datetime.utcnow() self._backend_client.expires = now + timedelta(seconds=self._config.timeout)
Calculates the session expiry using the timeout
def getShocks(self): employed = self.eStateNow == 1.0 N = int(np.sum(employed)) newly_unemployed = drawBernoulli(N,p=self.UnempPrb,seed=self.RNG.randint(0,2**31-1)) self.eStateNow[employed] = 1.0 - newly_unemployed
Determine which agents switch from employment to unemployment. All unemployed agents remain unemployed until death. Parameters ---------- None Returns ------- None
def remove_prefix(self, prefix): if prefix not in self.__prefix_map: return ni = self.__lookup_prefix(prefix) ni.prefixes.discard(prefix) del self.__prefix_map[prefix] if ni.preferred_prefix == prefix: ni.preferred_prefix = next(iter(ni.prefixes), None)
Removes prefix from this set. This is a no-op if the prefix doesn't exist in it.
def create_alert_policy(self, policy_name): policy_data = { 'policy': { 'incident_preference': 'PER_POLICY', 'name': policy_name } } create_policy = requests.post( 'https://api.newrelic.com/v2/alerts_policies.json', headers=self.auth_header, data=json.dumps(policy_data)) create_policy.raise_for_status() policy_id = create_policy.json()['policy']['id'] self.refresh_all_alerts() return policy_id
Creates an alert policy in NewRelic
def units_convertible(units1, units2, reftimeistime=True): try: u1 = Unit(units1) u2 = Unit(units2) except ValueError: return False return u1.is_convertible(u2)
Return True if a Unit representing the string units1 can be converted to a Unit representing the string units2, else False. :param str units1: A string representing the units :param str units2: A string representing the units
def rows(self, *args) -> List[List[Well]]: row_dict = self._create_indexed_dictionary(group=1) keys = sorted(row_dict) if not args: res = [row_dict[key] for key in keys] elif isinstance(args[0], int): res = [row_dict[keys[idx]] for idx in args] elif isinstance(args[0], str): res = [row_dict[idx] for idx in args] else: raise TypeError return res
Accessor function used to navigate through a labware by row. With indexing one can treat it as a typical python nested list. To access row A for example, simply write: labware.rows()[0]. This will output ['A1', 'A2', 'A3', 'A4'...] Note that this method takes args for backward-compatibility, but use of args is deprecated and will be removed in future versions. Args can be either strings or integers, but must all be the same type (e.g.: `self.rows(1, 4, 8)` or `self.rows('A', 'B')`, but `self.rows('A', 4)` is invalid. :return: A list of row lists
def ssh(ctx, cluster_id, key_file): session = create_session(ctx.obj['AWS_PROFILE_NAME']) client = session.client('emr') result = client.describe_cluster(ClusterId=cluster_id) target_dns = result['Cluster']['MasterPublicDnsName'] ssh_options = '-o StrictHostKeyChecking=no -o ServerAliveInterval=10' cmd = 'ssh {ssh_options} -i {key_file} hadoop@{target_dns}'.format( ssh_options=ssh_options, key_file=key_file, target_dns=target_dns) subprocess.call(cmd, shell=True)
SSH login to EMR master node
def trimsquants(self, col: str, sup: float): try: self.df = self._trimquants(col, None, sup) except Exception as e: self.err(e, self.trimsquants, "Can not trim superior quantiles")
Remove superior quantiles from the dataframe :param col: column name :type col: str :param sup: superior quantile :type sup: float :example: ``ds.trimsquants("Col 1", 0.99)``
def plot_estimates(positions, estimates): x = list(positions) fig = plt.figure(figsize=(SUBPLOT_WIDTH * len(estimates), FIGURE_HEIGHT)) for i, (title, y) in enumerate(zip(ESTIMATE_TITLES, estimates)): ax = fig.add_subplot(1, len(estimates), i + 1) ax.plot(x, y, linewidth=LINE_WIDTH, c=LINE_COLOR) ax.title.set_text(title) ax.set_xlim(0, 1) ax.set_xlabel("position") ax.set_ylabel("$\\hat P$") ax.grid() return fig
Plots density, and probability estimates. Parameters ---------- positions : iterable of float Paragraph positions for which densities, and probabilities were estimated. estimates : six-tuple of (sequence of float) Estimates of P(relevant), p(position), p(position | relevant), P(position, relevant), and P(relevant | position). Returns ------- matplotlib.figure.Figure The plotted figure.
def natural_neighbor_to_grid(xp, yp, variable, grid_x, grid_y): r points_obs = list(zip(xp, yp)) points_grid = generate_grid_coords(grid_x, grid_y) img = natural_neighbor_to_points(points_obs, variable, points_grid) return img.reshape(grid_x.shape)
r"""Generate a natural neighbor interpolation of the given points to a regular grid. This assigns values to the given grid using the Liang and Hale [Liang2010]_. approach. Parameters ---------- xp: (N, ) ndarray x-coordinates of observations yp: (N, ) ndarray y-coordinates of observations variable: (N, ) ndarray observation values associated with (xp, yp) pairs. IE, variable[i] is a unique observation at (xp[i], yp[i]) grid_x: (M, 2) ndarray Meshgrid associated with x dimension grid_y: (M, 2) ndarray Meshgrid associated with y dimension Returns ------- img: (M, N) ndarray Interpolated values on a 2-dimensional grid See Also -------- natural_neighbor_to_points
def get_analysis_data_by_title(self, ar_data, title): analyses = ar_data.get("analyses", []) for analysis in analyses: if analysis.get("title") == title: return analysis return None
A template helper to pick an Analysis identified by the name of the current Analysis Service. ar_data is the dictionary structure which is returned by _ws_data
def first_location_of_minimum(x): if not isinstance(x, (np.ndarray, pd.Series)): x = np.asarray(x) return np.argmin(x) / len(x) if len(x) > 0 else np.NaN
Returns the first location of the minimal value of x. The position is calculated relatively to the length of x. :param x: the time series to calculate the feature of :type x: numpy.ndarray :return: the value of this feature :return type: float
def _apply_replacement(error, found_file, file_lines): fixed_lines = file_lines fixed_lines[error[1].line - 1] = error[1].replacement concatenated_fixed_lines = "".join(fixed_lines) found_file.seek(0) found_file.write(concatenated_fixed_lines) found_file.truncate()
Apply a single replacement.
def get_es(urls=None, timeout=DEFAULT_TIMEOUT, force_new=False, **settings): urls = urls or DEFAULT_URLS if 'hosts' in settings: raise DeprecationWarning('"hosts" is deprecated in favor of "urls".') if not force_new: key = _build_key(urls, timeout, **settings) if key in _cached_elasticsearch: return _cached_elasticsearch[key] es = Elasticsearch(urls, timeout=timeout, **settings) if not force_new: _cached_elasticsearch[key] = es return es
Create an elasticsearch `Elasticsearch` object and return it. This will aggressively re-use `Elasticsearch` objects with the following rules: 1. if you pass the same argument values to `get_es()`, then it will return the same `Elasticsearch` object 2. if you pass different argument values to `get_es()`, then it will return different `Elasticsearch` object 3. it caches each `Elasticsearch` object that gets created 4. if you pass in `force_new=True`, then you are guaranteed to get a fresh `Elasticsearch` object AND that object will not be cached :arg urls: list of uris; Elasticsearch hosts to connect to, defaults to ``['http://localhost:9200']`` :arg timeout: int; the timeout in seconds, defaults to 5 :arg force_new: Forces get_es() to generate a new Elasticsearch object rather than pulling it from cache. :arg settings: other settings to pass into Elasticsearch constructor; See `<http://elasticsearch-py.readthedocs.org/>`_ for more details. Examples:: # Returns cached Elasticsearch object es = get_es() # Returns a new Elasticsearch object es = get_es(force_new=True) es = get_es(urls=['localhost']) es = get_es(urls=['localhost:9200'], timeout=10, max_retries=3)
def update_video_image(edx_video_id, course_id, image_data, file_name): try: course_video = CourseVideo.objects.select_related('video').get( course_id=course_id, video__edx_video_id=edx_video_id ) except ObjectDoesNotExist: error_message = u'VAL: CourseVideo not found for edx_video_id: {0} and course_id: {1}'.format( edx_video_id, course_id ) raise ValVideoNotFoundError(error_message) video_image, _ = VideoImage.create_or_update(course_video, file_name, image_data) return video_image.image_url()
Update video image for an existing video. NOTE: If `image_data` is None then `file_name` value will be used as it is, otherwise a new file name is constructed based on uuid and extension from `file_name` value. `image_data` will be None in case of course re-run and export. Arguments: image_data (InMemoryUploadedFile): Image data to be saved for a course video. Returns: course video image url Raises: Raises ValVideoNotFoundError if the CourseVideo cannot be retrieved.
def deactivate_(self): self.preDeactivate_() self.active = False self.image_dimensions = None self.client = None
Init shmem variables to None
def item_frequency(sa, xlabel=LABEL_DEFAULT, ylabel=LABEL_DEFAULT, title=LABEL_DEFAULT): if (not isinstance(sa, tc.data_structures.sarray.SArray) or sa.dtype != str): raise ValueError("turicreate.visualization.item_frequency supports " + "SArrays of dtype str") title = _get_title(title) plt_ref = tc.extensions.plot_item_frequency(sa, xlabel, ylabel, title) return Plot(plt_ref)
Plots an item frequency of the sarray provided as input, and returns the resulting Plot object. The function supports SArrays with dtype str. Parameters ---------- sa : SArray The data to get an item frequency for. Must have dtype str xlabel : str (optional) The text label for the X axis. Defaults to "Values". ylabel : str (optional) The text label for the Y axis. Defaults to "Count". title : str (optional) The title of the plot. Defaults to LABEL_DEFAULT. If the value is LABEL_DEFAULT, the title will be "<xlabel> vs. <ylabel>". If the value is None, the title will be omitted. Otherwise, the string passed in as the title will be used as the plot title. Returns ------- out : Plot A :class: Plot object that is the item frequency plot. Examples -------- Make an item frequency of an SArray. >>> x = turicreate.SArray(['a','ab','acd','ab','a','a','a','ab','cd']) >>> ifplt = turicreate.visualization.item_frequency(x)
def __callback(self, data): method = self.__cb_message if method is not None: try: method(data) except Exception as ex: _logger.exception("Error calling method: %s", ex)
Safely calls back a method :param data: Associated stanza
def set_value(self, dry_wet: LeakSensorState): value = 0 if dry_wet == self._dry_wet_type: value = 1 self._update_subscribers(value)
Set the value of the state to dry or wet.
def tt_avg(self, print_output=True, output_file="tt.csv"): avg = self.tt.mean(axis=2) if print_output: np.savetxt(output_file, avg, delimiter=",") return avg
Compute average term-topic matrix, and print to file if print_output=True.
def dilate(self, size): if size > 0: from scipy.ndimage.morphology import binary_dilation size = (size * 2) + 1 coords = self.coordinates tmp = zeros(self.extent + size * 2) coords = (coords - self.bbox[0:len(self.center)] + size) tmp[coords.T.tolist()] = 1 tmp = binary_dilation(tmp, ones((size, size))) new = asarray(where(tmp)).T + self.bbox[0:len(self.center)] - size new = [c for c in new if all(c >= 0)] else: return self return one(new)
Dilate a region using morphological operators. Parameters ---------- size : int Size of dilation in pixels
def find_module(self, fullname, path=None): basepaths = [""] + list(sys.path) if fullname.startswith("."): if path is None: return None fullname = fullname[1:] basepaths.insert(0, path) fullpath = os.path.join(*fullname.split(".")) for head in basepaths: path = os.path.join(head, fullpath) filepath = path + self.ext dirpath = os.path.join(path, "__init__" + self.ext) if os.path.exists(filepath): self.run_compiler(filepath) return None if os.path.exists(dirpath): self.run_compiler(path) return None return None
Searches for a Coconut file of the given name and compiles it.
def do_static_merge(cls, c_source, c_target): c_target.extend(c_source) c_source.parent = c_target.parent cls.CLUSTERS.remove(c_source) for m in c_source.mentions: cls.MENTION_TO_CLUSTER[m] = c_target
By the time we're just folding in clusters, there's no need to maintain self.INSTANCES and self.clusters, so we just call this method
def prepare(self, context): if __debug__: log.debug("Preparing request context.", extra=dict(request=id(context))) context.request = Request(context.environ) context.response = Response(request=context.request) context.environ['web.base'] = context.request.script_name context.request.remainder = context.request.path_info.split('/') if context.request.remainder and not context.request.remainder[0]: del context.request.remainder[0] context.path = Bread()
Add the usual suspects to the context. This adds `request`, `response`, and `path` to the `RequestContext` instance.
def css(app, env): srcdir = os.path.abspath(os.path.dirname(__file__)) cssfile = 'bolditalic.css' csspath = os.path.join(srcdir, cssfile) buildpath = os.path.join(app.outdir, '_static') try: os.makedirs(buildpath) except OSError: if not os.path.isdir(buildpath): raise copy(csspath, buildpath) app.add_stylesheet(cssfile) return
Add bolditalic CSS. :param app: Sphinx application context. :param env: Sphinx environment context.
def submit_sms_conversion(self, message_id, delivered=True, timestamp=None): params = { "message-id": message_id, "delivered": delivered, "timestamp": timestamp or datetime.now(pytz.utc), } _format_date_param(params, "timestamp") return self.post(self.api_host, "/conversions/sms", params)
Notify Nexmo that an SMS was successfully received. :param message_id: The `message-id` str returned by the send_message call. :param delivered: A `bool` indicating that the message was or was not successfully delivered. :param timestamp: A `datetime` object containing the time the SMS arrived. :return: The parsed response from the server. On success, the bytestring b'OK'
def quantiles(x, qlist=(2.5, 25, 50, 75, 97.5)): x = x.copy() if x.ndim > 1: sx = sort(x.T).T else: sx = sort(x) try: quants = [sx[int(len(sx) * q / 100.0)] for q in qlist] return dict(zip(qlist, quants)) except IndexError: print_("Too few elements for quantile calculation")
Returns a dictionary of requested quantiles from array :Arguments: x : Numpy array An array containing MCMC samples qlist : tuple or list A list of desired quantiles (defaults to (2.5, 25, 50, 75, 97.5))
def build_job(name=None, parameters=None): if not name: raise SaltInvocationError('Required parameter \'name\' is missing') server = _connect() if not job_exists(name): raise CommandExecutionError('Job \'{0}\' does not exist.'.format(name)) try: server.build_job(name, parameters) except jenkins.JenkinsException as err: raise CommandExecutionError( 'Encountered error building job \'{0}\': {1}'.format(name, err) ) return True
Initiate a build for the provided job. :param name: The name of the job is check if it exists. :param parameters: Parameters to send to the job. :return: True is successful, otherwise raise an exception. CLI Example: .. code-block:: bash salt '*' jenkins.build_job jobname
def get_generation_code(self, **gencode): channels, verts = self.coordinates channels = ', '.join(["'{}'".format(ch) for ch in channels]) verts = list(verts) if len(verts) == 1: verts = verts[0] if len(verts) == 1: verts = verts[0] verts = apply_format(verts, '{:.3e}') gencode.setdefault('name', self.name) gencode.setdefault('region', self.region) gencode.setdefault('gate_type', self._gencode_gate_class) gencode.setdefault('verts', verts) gencode.setdefault('channels', channels) format_string = "{name} = {gate_type}({verts}, ({channels}), region='{region}', name='{name}')" return format_string.format(**gencode)
Generates python code that can create the gate.
def is_ip(string): mo = re.match(r'(\d+)\.(\d+)\.(\d+)\.(\d+)', string) if mo is None: return False for group in mo.groups(): if int(group) not in list(range(0, 256)): return False return True
Returns True if the given string is an IPv4 address, False otherwise. :type string: string :param string: Any string. :rtype: bool :return: True if the string is an IP address, False otherwise.
def _desy_bookkeeping2marc(self, key, value): if 'identifier' not in value: return { 'a': value.get('expert'), 'd': value.get('date'), 's': value.get('status'), } self.setdefault('035', []).append({ '9': 'DESY', 'z': value['identifier'] })
Populate the ``595_D`` MARC field. Also populates the ``035`` MARC field through side effects.
def canonicalize_half_turns( half_turns: Union[sympy.Basic, float] ) -> Union[sympy.Basic, float]: if isinstance(half_turns, sympy.Basic): return half_turns half_turns %= 2 if half_turns > 1: half_turns -= 2 return half_turns
Wraps the input into the range (-1, +1].
def sar(patch, cols, splits, divs, ear=False): def sar_y_func(spatial_table, all_spp): return np.mean(spatial_table['n_spp']) def ear_y_func(spatial_table, all_spp): endemic_counter = 0 for spp in all_spp: spp_in_cell = [spp in x for x in spatial_table['spp_set']] spp_n_cells = np.sum(spp_in_cell) if spp_n_cells == 1: endemic_counter += 1 n_cells = len(spatial_table) return endemic_counter / n_cells if ear: y_func = ear_y_func else: y_func = sar_y_func return _sar_ear_inner(patch, cols, splits, divs, y_func)
Calculates an empirical species area or endemics area relationship Parameters ---------- {0} divs : str Description of how to divide x_col and y_col. See notes. ear : bool If True, calculates an endemics area relationship Returns ------- {1} Result has 5 columns; div, x, and y; that give the ID for the division given as an argument, fractional area, and the mean species richness at that division. Notes ----- {2} For the SAR and EAR, cols must also contain x_col and y_col, giving the x and y dimensions along which to grid the patch. {3} {4} Examples -------- {5} >>> # Get the SAR at the full area (1,1), 1 x 2 division, >>> # 2 x 1 division, 2 x 2 division, 2 x 4 division, 4 x 2 division, and >>> # 4 x 4 division >>> sar = meco.empirical.sar(pat, cols='spp_col:spp; count_col:count; x_col:row; y_col:column', splits="", divs="1,1; 1,2; 2,1; 2,2; 2,4; 4,2; 4,4") >>> sar[0][1] div n_individs n_spp x y 0 1,1 2445.0000 24.0000 16 24.0000 1 1,2 1222.5000 18.5000 8 18.5000 2 2,1 1222.5000 17.0000 8 17.0000 3 2,2 611.2500 13.5000 4 13.5000 4 2,4 305.6250 10.1250 2 10.1250 5 4,2 305.6250 10.5000 2 10.5000 6 4,4 152.8125 7.5625 1 7.5625 The column div gives the divisions specified in the function call. The column n_individs specifies the average number of individuals across the cells made from the given division. n_spp gives the average species across the cells made from the given division. x gives the absolute area of a cell for the given division. y gives the same information as n_spp and is included for easy plotting. See http://www.macroeco.org/tutorial_macroeco.html for additional examples and explanation
def bundle_view(parser, token): bits = token.split_contents() if len(bits) < 3: raise TemplateSyntaxError("'%s' takes at least two arguments" " bundle and view_name" % bits[0]) bundle = parser.compile_filter(bits[1]) viewname = parser.compile_filter(bits[2]) asvar = None bits = bits[2:] if len(bits) >= 2 and bits[-2] == 'as': asvar = bits[-1] bits = bits[:-2] return ViewNode(bundle, viewname, asvar)
Returns an string version of a bundle view. This is done by calling the `get_string_from_view` method of the provided bundle. This tag expects that the request object as well as the the original url_params are available in the context. Requires two arguments bundle and the name of the view you want to render. In addition, this tag also accepts the 'as xxx' syntax. Example: {% bundle_url bundle main_list as html %}
def __unpack_tgz(self, filename): if isinstance(filename, string_types) and self.__isValidTGZ(filename) and tarfile.is_tarfile(filename): with tarfile.open(filename, mode='r:gz') as t: for name in t.getnames(): t.extract(name, self.plugin_abspath) else: raise TarError("Invalid Plugin Compressed File")
Unpack the `tar.gz`, `tgz` compressed file format
def _create_cipher(self, password, salt, IV): from Crypto.Protocol.KDF import PBKDF2 from Crypto.Cipher import AES pw = PBKDF2(password, salt, dkLen=self.block_size) return AES.new(pw[:self.block_size], AES.MODE_CFB, IV)
Create the cipher object to encrypt or decrypt a payload.
def reMutualReceptions(self): planets = copy(const.LIST_SEVEN_PLANETS) planets.remove(self.obj.id) mrs = {} for ID in planets: mr = self.dyn.reMutualReceptions(self.obj.id, ID) if mr: mrs[ID] = mr return mrs
Returns all mutual receptions with the object and other planets, indexed by planet ID. It only includes ruler and exaltation receptions.
def is_twss(self, phrase): featureset = self.extract_features(phrase) return self.classifier.classify(featureset)
The magic function- this accepts a phrase and tells you if it classifies as an entendre
def _set_bounds(self, bounds): min_value, max_value = bounds self.min_value = None self.max_value = None self.min_value = min_value self.max_value = max_value
Sets the boundaries for this parameter to min_value and max_value
def find(self, location): try: content = self.store[location] return StringIO(content) except: reason = 'location "%s" not in document store' % location raise Exception, reason
Find the specified location in the store. @param location: The I{location} part of a URL. @type location: str @return: An input stream to the document. @rtype: StringIO
def lock(self): if self.cache.get(self.lock_name): return False else: self.cache.set(self.lock_name, timezone.now(), self.timeout) return True
This method sets a cache variable to mark current job as "already running".
def abort_submission(namespace, workspace, submission_id): uri = "workspaces/{0}/{1}/submissions/{2}".format(namespace, workspace, submission_id) return __delete(uri)
Abort running job in a workspace. Args: namespace (str): project to which workspace belongs workspace (str): Workspace name submission_id (str): Submission's unique identifier Swagger: https://api.firecloud.org/#!/Submissions/deleteSubmission
def deep_merge(base, extra): if extra is None: return for key, value in extra.items(): if value is None: if key in base: del base[key] elif isinstance(base.get(key), dict) and isinstance(value, dict): deep_merge(base[key], value) else: base[key] = value
Deeply merge two dictionaries, overriding existing keys in the base. :param base: The base dictionary which will be merged into. :param extra: The dictionary to merge into the base. Keys from this dictionary will take precedence.
def create(self): steps = [ (self.create_role, (), {}), (self.create_vpc, (), {}), (self.create_cluster, (), {}), (self.create_node_group, (), {}), (self.create_spot_nodes, (), {}), (self.create_utilities, (), {}), ] for step in tqdm.tqdm(steps, ncols=70): method, args, kwargs = step method(*args, **kwargs)
Deploy a cluster on Amazon's EKS Service configured for Jupyterhub Deployments.
def get_tree(self, process_name): for tree_name, tree in self.trees.items(): if process_name in tree: return tree
return tree that is managing time-periods for given process
def get_fw_policy(self, policy_id): policy = None try: policy = self.neutronclient.show_firewall_policy(policy_id) except Exception as exc: LOG.error("Failed to get firewall plcy for id %(id)s " "Exc %(exc)s", {'id': policy_id, 'exc': str(exc)}) return policy
Return the firewall policy, given its ID.
def in_file(self, fn: str) -> Iterator[Statement]: yield from self.__file_to_statements.get(fn, [])
Returns an iterator over all of the statements belonging to a file.
def send_response(self, code, message=None): self.log_request(code) self.send_response_only(code, message) self.send_header('Server', self.version_string()) self.send_header('Date', self.date_time_string())
Add the response header to the headers buffer and log the response code. Also send two standard headers with the server software version and the current date.
def create_driver(self): driver_type = self.config.get('Driver', 'type') try: if self.config.getboolean_optional('Server', 'enabled'): self.logger.info("Creating remote driver (type = %s)", driver_type) driver = self._create_remote_driver() else: self.logger.info("Creating local driver (type = %s)", driver_type) driver = self._create_local_driver() except Exception as exc: error_message = get_error_message_from_exception(exc) self.logger.error("%s driver can not be launched: %s", driver_type.capitalize(), error_message) raise return driver
Create a selenium driver using specified config properties :returns: a new selenium driver :rtype: selenium.webdriver.remote.webdriver.WebDriver
def _tp__get_typed_properties(self): try: return tuple(getattr(self, p) for p in self._tp__typed_properties) except AttributeError: raise NotImplementedError
Return a tuple of typed attrs that can be used for comparisons. Raises: NotImplementedError: Raised if this class was mixed into a class that was not created by _AnnotatedObjectMeta.
def _traverse_list(self, input_list, resolution_data, resolver_method): for index, value in enumerate(input_list): input_list[index] = self._traverse(value, resolution_data, resolver_method) return input_list
Traverse a list to resolve intrinsic functions on every element :param input_list: List of input :param resolution_data: Data that the `resolver_method` needs to operate :param resolver_method: Method that can actually resolve an intrinsic function, if it detects one :return: Modified list with intrinsic functions resolved
def lex(filename): with io.open(filename, mode='r', encoding='utf-8') as f: it = _lex_file_object(f) it = _balance_braces(it, filename) for token, line, quoted in it: yield (token, line, quoted)
Generates tokens from an nginx config file
def set_double_stack(socket_obj, double_stack=True): try: opt_ipv6_only = socket.IPV6_V6ONLY except AttributeError: if os.name == "nt": opt_ipv6_only = 27 elif platform.system() == "Linux": opt_ipv6_only = 26 else: raise socket_obj.setsockopt(ipproto_ipv6(), opt_ipv6_only, int(not double_stack))
Sets up the IPv6 double stack according to the operating system :param socket_obj: A socket object :param double_stack: If True, use the double stack, else only support IPv6 :raise AttributeError: Python or system doesn't support V6 :raise socket.error: Error setting up the double stack value
def _parse_scram_response(response): return dict(item.split(b"=", 1) for item in response.split(b","))
Split a scram response into key, value pairs.
def deprecation_warning(func, name): @wraps(func) def caller(*args, **kwargs): logger = logging.getLogger(__name__) instance = func(*args, **kwargs) logger.warning( "Distribution `chaospy.{}` has been renamed to ".format(name) + "`chaospy.{}` and will be deprecated next release.".format(instance.__class__.__name__)) return instance return caller
Add a deprecation warning do each distribution.
def _put_overlay(self, overlay_name, overlay): if not isinstance(overlay, dict): raise TypeError("Overlay must be dict") if set(self._identifiers()) != set(overlay.keys()): raise ValueError("Overlay keys must be dataset identifiers") self._storage_broker.put_overlay(overlay_name, overlay)
Store overlay so that it is accessible by the given name. :param overlay_name: name of the overlay :param overlay: overlay must be a dictionary where the keys are identifiers in the dataset :raises: TypeError if the overlay is not a dictionary, ValueError if identifiers in overlay and dataset do not match
def enqueue(self, item_type, item): with self.enlock: self.queue[item_type].append(item)
Queue a new data item, make item iterable
def db(self, connection_string=None): connection_string = connection_string or self.settings["db"] if not hasattr(self, "_db_conns"): self._db_conns = {} if not connection_string in self._db_conns: self._db_conns[connection_string] = oz.sqlalchemy.session(connection_string=connection_string) return self._db_conns[connection_string]
Gets the SQLALchemy session for this request
def print_statistics(self): print("Q1 =", self.Q1) print("Q2 =", self.Q2) print("cR =", self.cR)
Prints out the Q1, Q2, and cR statistics for the variogram fit. NOTE that ideally Q1 is close to zero, Q2 is close to 1, and cR is as small as possible.
def determine_struct_tree_subtype(self, data_type, obj): if '.tag' not in obj: raise bv.ValidationError("missing '.tag' key") if not isinstance(obj['.tag'], six.string_types): raise bv.ValidationError('expected string, got %s' % bv.generic_type_name(obj['.tag']), parent='.tag') full_tags_tuple = (obj['.tag'],) if full_tags_tuple in data_type.definition._tag_to_subtype_: subtype = data_type.definition._tag_to_subtype_[full_tags_tuple] if isinstance(subtype, bv.StructTree): raise bv.ValidationError("tag '%s' refers to non-leaf subtype" % ('.'.join(full_tags_tuple))) return subtype else: if self.strict: raise bv.ValidationError("unknown subtype '%s'" % '.'.join(full_tags_tuple)) else: if data_type.definition._is_catch_all_: return data_type else: raise bv.ValidationError( "unknown subtype '%s' and '%s' is not a catch-all" % ('.'.join(full_tags_tuple), data_type.definition.__name__))
Searches through the JSON-object-compatible dict using the data type definition to determine which of the enumerated subtypes `obj` is.
def l2_regression_loss(y, target, name=None): with tf.name_scope(name, 'l2_regression', [y, target]) as scope: y = tf.convert_to_tensor(y, name='y') target = tf.convert_to_tensor(target, name='target') return tf.sqrt(l2_regression_sq_loss(y, target, name=scope))
Calculates the square root of the SSE between y and target. Args: y: the calculated values. target: the desired values. name: the name for this op, defaults to l2_regression Returns: A tensorflow op.
def _readuint(self, length, start): if not length: raise InterpretError("Cannot interpret a zero length bitstring " "as an integer.") offset = self._offset startbyte = (start + offset) // 8 endbyte = (start + offset + length - 1) // 8 b = binascii.hexlify(bytes(self._datastore.getbyteslice(startbyte, endbyte + 1))) assert b i = int(b, 16) final_bits = 8 - ((start + offset + length) % 8) if final_bits != 8: i >>= final_bits i &= (1 << length) - 1 return i
Read bits and interpret as an unsigned int.
def stream(self): if self._stream is None: self._stream = tempfile.NamedTemporaryFile(delete=False) try: self._stream.write(self.client.open(self.filename, view='data').read()) except: pass return self._stream
the stream to write the log content too. @return:
def delete_host_from_segment(ipaddress, networkaddress, auth, url): host_id = get_host_id(ipaddress, networkaddress, auth, url) remove_scope_ip(host_id, auth.creds, auth.url)
Function to abstract
def intersect_regions(flist): if len(flist) < 2: raise Exception("Require at least two regions to perform intersection") a = Region.load(flist[0]) for b in [Region.load(f) for f in flist[1:]]: a.intersect(b) return a
Construct a region which is the intersection of all regions described in the given list of file names. Parameters ---------- flist : list A list of region filenames. Returns ------- region : :class:`AegeanTools.regions.Region` The intersection of all regions, possibly empty.
def set_property(self, name, value): typeof = type(self.get_property(name)) self._interface.SetProperty(name, translate_to_dbus_type(typeof, value))
Helper to set a property value by name, translating to correct dbus type See also :py:meth:`get_property` :param str name: The property name in the object's dictionary whose value shall be set. :param value: Properties new value to be assigned. :return: :raises KeyError: if the property key is not found in the object's dictionary :raises dbus.Exception: org.bluez.Error.DoesNotExist :raises dbus.Exception: org.bluez.Error.InvalidArguments
def process_like(proc): newproc = copy.deepcopy(proc) newproc.creation_date = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.localtime()) return newproc
Make an exact clone of a process, including state and all subprocesses. The creation date is updated. :param proc: process :type proc: :class:`~climlab.process.process.Process` :return: new process identical to the given process :rtype: :class:`~climlab.process.process.Process` :Example: :: >>> import climlab >>> from climlab.process.process import process_like >>> model = climlab.EBM() >>> model.subprocess.keys() ['diffusion', 'LW', 'albedo', 'insolation'] >>> albedo = model.subprocess['albedo'] >>> albedo_copy = process_like(albedo) >>> albedo.creation_date 'Thu, 24 Mar 2016 01:32:25 +0000' >>> albedo_copy.creation_date 'Thu, 24 Mar 2016 01:33:29 +0000'
def add_string_pairs_from_button_element(xib_file, results, button, special_ui_components_prefix): button_entry_comment = extract_element_internationalized_comment(button) if button_entry_comment is None: return for state in button.getElementsByTagName('state'): state_name = state.attributes['key'].value state_entry_comment = button_entry_comment + " - " + state_name + " state of button" if not add_string_pairs_from_attributed_ui_element(results, state, state_entry_comment): try: button_entry_key = state.attributes['title'].value except KeyError: try: button_entry_key = state.getElementsByTagName('string')[0].firstChild.nodeValue except Exception: continue results.append((button_entry_key, state_entry_comment)) warn_if_element_not_of_class(button, 'Button', special_ui_components_prefix)
Adds strings pairs from a button xib element. Args: xib_file (str): Path to the xib file. results (list): The list to add the results to. button(element): The button element from the xib, to extract the string pairs from. special_ui_components_prefix(str): A custom prefix for internationalize component to allow (default is only JT)
def set_role_config_groups(self, role_config_group_refs): update = copy.copy(self) update.roleConfigGroupRefs = role_config_group_refs return self._do_update(update)
Updates the role config groups in a host template. @param role_config_group_refs: List of role config group refs. @return: An ApiHostTemplate object.
def load_global_conf(cls, global_configuration): logger.debug("Propagate global parameter for %s:", cls) for prop, entry in global_configuration.properties.items(): if not entry.managed or not getattr(entry, 'class_inherit'): continue for (cls_dest, change_name) in entry.class_inherit: if cls_dest == cls: value = getattr(global_configuration, prop) logger.debug("- global parameter %s=%s -> %s=%s", prop, getattr(global_configuration, prop), change_name, value) if change_name is None: setattr(cls, prop, value) else: setattr(cls, change_name, value)
Apply global Alignak configuration. Some objects inherit some properties from the global configuration if they do not define their own value. E.g. the global 'accept_passive_service_checks' is inherited by the services as 'accept_passive_checks' :param cls: parent object :type cls: object :param global_configuration: current object (child) :type global_configuration: object :return: None
def inter(a, b): assert isinstance(a, stypes.SpiceCell) assert isinstance(b, stypes.SpiceCell) assert a.dtype == b.dtype if a.dtype is 0: c = stypes.SPICECHAR_CELL(max(a.size, b.size), max(a.length, b.length)) elif a.dtype is 1: c = stypes.SPICEDOUBLE_CELL(max(a.size, b.size)) elif a.dtype is 2: c = stypes.SPICEINT_CELL(max(a.size, b.size)) else: raise NotImplementedError libspice.inter_c(ctypes.byref(a), ctypes.byref(b), ctypes.byref(c)) return c
Intersect two sets of any data type to form a third set. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/inter_c.html :param a: First input set. :type a: spiceypy.utils.support_types.SpiceCell :param b: Second input set. :type b: spiceypy.utils.support_types.SpiceCell :return: Intersection of a and b. :rtype: spiceypy.utils.support_types.SpiceCell
def dummy_func(arg1, arg2, arg3=None, arg4=[1, 2, 3], arg5={}, **kwargs): foo = kwargs.get('foo', None) bar = kwargs.pop('bar', 4) foo2 = kwargs['foo2'] foobar = str(foo) + str(bar) + str(foo2) return foobar
test func for kwargs parseing
def is_contextfree(self): for lhs, rhs in self.rules: if len(lhs) != 1: return False if lhs[0] not in self.nonterminals: return False return True
Returns True iff the grammar is context-free.
def resend_presence(self): if self.client.established: return self.client.enqueue(self.make_stanza())
Re-send the currently configured presence. :return: Stanza token of the presence stanza or :data:`None` if the stream is not established. :rtype: :class:`~.stream.StanzaToken` .. note:: :meth:`set_presence` automatically broadcasts the new presence if any of the parameters changed.
def get_productivity_stats(self): response = API.get_productivity_stats(self.api_token) _fail_if_contains_errors(response) return response.json()
Return the user's productivity stats. :return: A JSON-encoded representation of the user's productivity stats. :rtype: A JSON-encoded object. >>> from pytodoist import todoist >>> user = todoist.login('john.doe@gmail.com', 'password') >>> stats = user.get_productivity_stats() >>> print(stats) {"karma_last_update": 50.0, "karma_trend": "up", ... }
def advance(self): elem = next(self._iterable) for deque in self._deques: deque.append(elem)
Advance the base iterator, publish to constituent iterators.
def _get_licences(): licenses = _LICENSES for license in licenses: print("{license_name} [{license_code}]".format( license_name=licenses[license], license_code=license))
Lists all the licenses on command line
def create_static_profile_path(client_id): profile_path = os.path.join(STATIC_FILES_PATH, str(client_id)) if not os.path.exists(profile_path): os.makedirs(profile_path) return profile_path
Create a profile path folder if not exist @param client_id: ID of client user @return string profile path
def is_connected(self): if self._is_open: err = hidapi.hid_read_timeout(self._device, ffi.NULL, 0, 0) if err == -1: return False else: return True else: en = Enumeration(vid=self.vendor_id, pid=self.product_id).find(path=self.path) if len(en) == 0: return False else: return True
Checks if the USB device is still connected
def pixels(self, value: int) -> 'Gap': raise_not_number(value) self.gap = '{}px'.format(value) return self
Set the margin in pixels.
def _read_as_int(self, addr, numBytes): buf = self.read_register(addr, numBytes) if len(buf) >= 4: return struct.unpack_from("<i", buf)[0] else: rtn = 0 for i, byte in enumerate(buf): rtn |= byte << 8 * i return rtn
Convenience method. Oftentimes we need to read a range of registers to represent an int. This method will automatically read @numBytes registers starting at @addr and convert the array into an int.
def _get_key_value_config(self): for rp in self._remote_providers: val = self._get_remote_config(rp) self._kvstore = val return None raise errors.RemoteConfigError("No Files Found")
Retrieves the first found remote configuration.
def _load_resource(self): url = self._url if self._params: url += '?{0}'.format(six.moves.urllib_parse.urlencode(self._params)) r = getattr(self._session, self._meta.get_method.lower())(url) if r.status_code == 404: raise NotFoundException('Server returned 404 Not Found for the URL {0}'.format(self._url)) elif not 200 <= r.status_code < 400: raise HTTPException('Server returned {0} ({1})'.format(r.status_code, r.reason), r) data = self._meta.deserializer.to_dict(r.text) self.populate_field_values(data)
Load resource data from server
def graphs(self): result = Dummy() for graph in graphs.__all__: cls = getattr(graphs, graph) setattr(result, cls.short_name, cls(self)) return result
Sorry for the black magic. The result is an object whose attributes are all the graphs found in graphs.py initialized with this instance as only argument.
async def remove_key(request: web.Request) -> web.Response: keys_dir = CONFIG['wifi_keys_dir'] available_keys = os.listdir(keys_dir) requested_hash = request.match_info['key_uuid'] if requested_hash not in available_keys: return web.json_response( {'message': 'No such key file {}' .format(requested_hash)}, status=404) key_path = os.path.join(keys_dir, requested_hash) name = os.listdir(key_path)[0] shutil.rmtree(key_path) return web.json_response( {'message': 'Key file {} deleted'.format(name)}, status=200)
Remove a key. ``` DELETE /wifi/keys/:id -> 200 OK {message: 'Removed key keyfile.pem'} ```