code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def fetch_blob(cls, username, password, multifactor_password=None, client_id=None): """Just fetches the blob, could be used to store it locally""" session = fetcher.login(username, password, multifactor_password, client_id) blob = fetcher.fetch(session) fetcher.logout(session) return blob
Just fetches the blob, could be used to store it locally
def create_gre_tunnel_no_encryption(cls, name, local_endpoint, remote_endpoint, mtu=0, pmtu_discovery=True, ttl=0, enabled=True, comment=None): """ Create a GRE Tunnel with no encryption. See `create_gre_tunnel_mode` for constructor descriptions. """ return cls.create_gre_tunnel_mode( name, local_endpoint, remote_endpoint, policy_vpn=None, mtu=mtu, pmtu_discovery=pmtu_discovery, ttl=ttl, enabled=enabled, comment=comment)
Create a GRE Tunnel with no encryption. See `create_gre_tunnel_mode` for constructor descriptions.
def _prune_penalty_box(self): """Restores clients that have reconnected. This function should be called first for every public method. """ added = False for client in self.penalty_box.get(): log.info("Client %r is back up.", client) self.active_clients.append(client) added = True if added: self._sort_clients()
Restores clients that have reconnected. This function should be called first for every public method.
def get_signature_candidate(lines): """Return lines that could hold signature The lines should: * be among last SIGNATURE_MAX_LINES non-empty lines. * not include first line * be shorter than TOO_LONG_SIGNATURE_LINE * not include more than one line that starts with dashes """ # non empty lines indexes non_empty = [i for i, line in enumerate(lines) if line.strip()] # if message is empty or just one line then there is no signature if len(non_empty) <= 1: return [] # we don't expect signature to start at the 1st line candidate = non_empty[1:] # signature shouldn't be longer then SIGNATURE_MAX_LINES candidate = candidate[-SIGNATURE_MAX_LINES:] markers = _mark_candidate_indexes(lines, candidate) candidate = _process_marked_candidate_indexes(candidate, markers) # get actual lines for the candidate instead of indexes if candidate: candidate = lines[candidate[0]:] return candidate return []
Return lines that could hold signature The lines should: * be among last SIGNATURE_MAX_LINES non-empty lines. * not include first line * be shorter than TOO_LONG_SIGNATURE_LINE * not include more than one line that starts with dashes
def check_node_parent( self, resource_id, new_parent_id, db_session=None, *args, **kwargs ): """ Checks if parent destination is valid for node :param resource_id: :param new_parent_id: :param db_session: :return: """ return self.service.check_node_parent( resource_id=resource_id, new_parent_id=new_parent_id, db_session=db_session, *args, **kwargs )
Checks if parent destination is valid for node :param resource_id: :param new_parent_id: :param db_session: :return:
def get_subclass_tree(cls, ensure_unique=True): """Returns all subclasses (direct and recursive) of cls.""" subclasses = [] # cls.__subclasses__() fails on classes inheriting from type for subcls in type.__subclasses__(cls): subclasses.append(subcls) subclasses.extend(get_subclass_tree(subcls, ensure_unique)) return list(set(subclasses)) if ensure_unique else subclasses
Returns all subclasses (direct and recursive) of cls.
def get_json_response_object(self, datatable): """ Returns the JSON-compatible dictionary that will be serialized for an AJAX response. The value names are in the form "s~" for strings, "i~" for integers, and "a~" for arrays, if you're unfamiliar with the old C-style jargon used in dataTables.js. "aa~" means "array of arrays". In some instances, the author uses "ao~" for "array of objects", an object being a javascript dictionary. """ # Ensure the object list is calculated. # Calling get_records() will do this implicitly, but we want simultaneous access to the # 'total_initial_record_count', and 'unpaged_record_count' values. datatable.populate_records() draw = getattr(self.request, self.request.method).get('draw', None) if draw is not None: draw = escape_uri_path(draw) response_data = { 'draw': draw, 'recordsFiltered': datatable.unpaged_record_count, 'recordsTotal': datatable.total_initial_record_count, 'data': [dict(record, **{ 'DT_RowId': record.pop('pk'), 'DT_RowData': record.pop('_extra_data'), }) for record in datatable.get_records()], } return response_data
Returns the JSON-compatible dictionary that will be serialized for an AJAX response. The value names are in the form "s~" for strings, "i~" for integers, and "a~" for arrays, if you're unfamiliar with the old C-style jargon used in dataTables.js. "aa~" means "array of arrays". In some instances, the author uses "ao~" for "array of objects", an object being a javascript dictionary.
def get_callback_function(setting_name, default=None): """ Resolve a callback function based on a setting name. If the setting value isn't set, default is returned. If the setting value is already a callable function, that value is used - If the setting value is a string, an attempt is made to import it. Anything else will result in a failed import causing ImportError to be raised. :param setting_name: The name of the setting to resolve a callback from. :type setting_name: string (``str``/``unicode``) :param default: The default to return if setting isn't populated. :type default: ``bool`` :returns: The resolved callback function (if any). :type: ``callable`` """ func = getattr(settings, setting_name, None) if not func: return default if callable(func): return func if isinstance(func, str): func = import_string(func) if not callable(func): raise ImproperlyConfigured("{name} must be callable.".format(name=setting_name)) return func
Resolve a callback function based on a setting name. If the setting value isn't set, default is returned. If the setting value is already a callable function, that value is used - If the setting value is a string, an attempt is made to import it. Anything else will result in a failed import causing ImportError to be raised. :param setting_name: The name of the setting to resolve a callback from. :type setting_name: string (``str``/``unicode``) :param default: The default to return if setting isn't populated. :type default: ``bool`` :returns: The resolved callback function (if any). :type: ``callable``
def add_load(self, lv_load): """Adds a LV load to _loads and grid graph if not already existing Parameters ---------- lv_load : Description #TODO """ if lv_load not in self._loads and isinstance(lv_load, LVLoadDing0): self._loads.append(lv_load) self.graph_add_node(lv_load)
Adds a LV load to _loads and grid graph if not already existing Parameters ---------- lv_load : Description #TODO
def compare(left: Union[str, pathlib.Path, _Entity], right: Union[str, pathlib.Path, _Entity]) -> Comparison: """ Compare two paths. :param left: The left side or "before" entity. :param right: The right side or "after" entity. :return: A comparison details what has changed from the left side to the right side. """ def normalise(param: Union[str, pathlib.Path, _Entity]) -> _Entity: """ Turns any one of a number of types of input into an entity. :param param: The input - either a path string, a path object, or a full blown entity. :return: The input param as an entity. """ if isinstance(param, str): param = pathlib.Path(param) if isinstance(param, pathlib.Path): param = _Entity.from_path(param) return param return Comparison.compare(normalise(left), normalise(right))
Compare two paths. :param left: The left side or "before" entity. :param right: The right side or "after" entity. :return: A comparison details what has changed from the left side to the right side.
def _split_input_slice(batch_size, work_load_list): """Get input slice from the input shape. Parameters ---------- batch_size : int The number of samples in a mini-batch. work_load_list : list of float or int, optional The list of work load for different devices, in the same order as `ctx`. Returns ------- slices : list of slice The split slices to get a specific slice. Raises ------ ValueError In case of too many splits, leading to some empty slices. """ total_work_load = sum(work_load_list) batch_num_list = [round(work_load * batch_size / total_work_load) for work_load in work_load_list] batch_num_sum = sum(batch_num_list) if batch_num_sum < batch_size: batch_num_list[-1] += batch_size - batch_num_sum slices = [] end = 0 for batch_num in batch_num_list: begin = int(min((end, batch_size))) end = int(min((begin + batch_num, batch_size))) if begin >= end: raise ValueError('Too many slices. Some splits are empty.') slices.append(slice(begin, end)) return slices
Get input slice from the input shape. Parameters ---------- batch_size : int The number of samples in a mini-batch. work_load_list : list of float or int, optional The list of work load for different devices, in the same order as `ctx`. Returns ------- slices : list of slice The split slices to get a specific slice. Raises ------ ValueError In case of too many splits, leading to some empty slices.
def get_column_at_index(self, index): """ Returns a table column by it's index :param int index: the zero-indexed position of the column in the table """ if index is None: return None url = self.build_url(self._endpoints.get('get_column_index')) response = self.session.post(url, data={'index': index}) if not response: return None return self.column_constructor(parent=self, **{self._cloud_data_key: response.json()})
Returns a table column by it's index :param int index: the zero-indexed position of the column in the table
def upload_file(request): '''Upload a Zip File Containing a single file containing media.''' if request.method == 'POST': form = MediaForm(request.POST, request.FILES) if form.is_valid(): context_dict = {} try: context_dict['copied_files'] = update_media_file( request.FILES['zip_file']) except Exception as e: context_dict['error_message'] = e.message return render(request, 'django_admin/transfer_media_message.html', context_dict) else: form = MediaForm() return render(request, 'django_admin/upload_media.html', {'form': form})
Upload a Zip File Containing a single file containing media.
def standard_block(self, bytes_): """Adds a standard block of bytes. For TAP files, it's just the Low + Hi byte plus the content (here, the bytes plus the checksum) """ self.out(self.LH(len(bytes_) + 1)) # + 1 for CHECKSUM byte checksum = 0 for i in bytes_: checksum ^= (int(i) & 0xFF) self.out(i) self.out(checksum)
Adds a standard block of bytes. For TAP files, it's just the Low + Hi byte plus the content (here, the bytes plus the checksum)
def apply_plugin_settings(self, options): """Apply configuration file's plugin settings""" color_scheme_n = 'color_scheme_name' color_scheme_o = self.get_color_scheme() font_n = 'plugin_font' font_o = self.get_plugin_font() wrap_n = 'wrap' wrap_o = self.get_option(wrap_n) self.wrap_action.setChecked(wrap_o) linenb_n = 'line_numbers' linenb_o = self.get_option(linenb_n) for editor in self.editors: if font_n in options: scs = color_scheme_o if color_scheme_n in options else None editor.set_font(font_o, scs) elif color_scheme_n in options: editor.set_color_scheme(color_scheme_o) if wrap_n in options: editor.toggle_wrap_mode(wrap_o) if linenb_n in options: editor.toggle_line_numbers(linenumbers=linenb_o, markers=False)
Apply configuration file's plugin settings
def _allowAnotherAt(cls, parent): """You can only create one of these pages per site.""" site = parent.get_site() if site is None: return False return not cls.peers().descendant_of(site.root_page).exists()
You can only create one of these pages per site.
def wheel_dist_name(self): """Return distribution full name with - replaced with _""" components = (safer_name(self.distribution.get_name()), safer_version(self.distribution.get_version())) if self.build_number: components += (self.build_number,) return '-'.join(components)
Return distribution full name with - replaced with _
def cPrint(self, level, message, *args, **kw): """Print a message to the console. Prints only if level <= self.consolePrinterVerbosity Printing with level 0 is equivalent to using a print statement, and should normally be avoided. :param level: (int) indicating the urgency of the message with lower values meaning more urgent (messages at level 0 are the most urgent and are always printed) :param message: (string) possibly with format specifiers :param args: specifies the values for any format specifiers in message :param kw: newline is the only keyword argument. True (default) if a newline should be printed """ if level > self.consolePrinterVerbosity: return if len(kw) > 1: raise KeyError("Invalid keywords for cPrint: %s" % str(kw.keys())) newline = kw.get("newline", True) if len(kw) == 1 and 'newline' not in kw: raise KeyError("Invalid keyword for cPrint: %s" % kw.keys()[0]) if len(args) == 0: if newline: print message else: print message, else: if newline: print message % args else: print message % args,
Print a message to the console. Prints only if level <= self.consolePrinterVerbosity Printing with level 0 is equivalent to using a print statement, and should normally be avoided. :param level: (int) indicating the urgency of the message with lower values meaning more urgent (messages at level 0 are the most urgent and are always printed) :param message: (string) possibly with format specifiers :param args: specifies the values for any format specifiers in message :param kw: newline is the only keyword argument. True (default) if a newline should be printed
def get_filtered_register_graph(register_uri, g): """ Gets a filtered version (label, comment, contained item classes & subregisters only) of the each register for the Register of Registers :param register_uri: the public URI of the register :type register_uri: string :param g: the rdf graph to append registers to :type g: Graph :return: True if ok, else False :rtype: boolean """ import requests from pyldapi.exceptions import ViewsFormatsException assert isinstance(g, Graph) logging.debug('assessing register candidate ' + register_uri.replace('?_view=reg&_format=text/turtle', '')) try: r = requests.get(register_uri) print('getting ' + register_uri) except ViewsFormatsException as e: return False # ignore these exceptions as are just a result of requesting a view/format combo of something like a page if r.status_code == 200: return _filter_register_graph(register_uri.replace('?_view=reg&_format=text/turtle', ''), r, g) logging.debug('{} returns no HTTP 200'.format(register_uri)) return False
Gets a filtered version (label, comment, contained item classes & subregisters only) of the each register for the Register of Registers :param register_uri: the public URI of the register :type register_uri: string :param g: the rdf graph to append registers to :type g: Graph :return: True if ok, else False :rtype: boolean
def _combine(self, applied, shortcut=False): """Recombine the applied objects like the original.""" applied_example, applied = peek_at(applied) coord, dim, positions = self._infer_concat_args(applied_example) if shortcut: combined = self._concat_shortcut(applied, dim, positions) else: combined = concat(applied, dim) combined = _maybe_reorder(combined, dim, positions) if isinstance(combined, type(self._obj)): # only restore dimension order for arrays combined = self._restore_dim_order(combined) if coord is not None: if shortcut: combined._coords[coord.name] = as_variable(coord) else: combined.coords[coord.name] = coord combined = self._maybe_restore_empty_groups(combined) combined = self._maybe_unstack(combined) return combined
Recombine the applied objects like the original.
def state_province_region(self, value=None): """Corresponds to IDD Field `state_province_region` Args: value (str): value for IDD Field `state_province_region` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError( 'value {} need to be of type str ' 'for field `state_province_region`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `state_province_region`') self._state_province_region = value
Corresponds to IDD Field `state_province_region` Args: value (str): value for IDD Field `state_province_region` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def reboot_node(node_id, profile, **libcloud_kwargs): ''' Reboot a node in the cloud :param node_id: Unique ID of the node to reboot :type node_id: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's reboot_node method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_compute.reboot_node as-2346 profile1 ''' conn = _get_driver(profile=profile) node = _get_by_id(conn.list_nodes(**libcloud_kwargs), node_id) return conn.reboot_node(node, **libcloud_kwargs)
Reboot a node in the cloud :param node_id: Unique ID of the node to reboot :type node_id: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's reboot_node method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_compute.reboot_node as-2346 profile1
def _request(self, method, uri, headers={}, body='', stream=False): """ Given a Method, URL, Headers, and Body, perform and HTTP request, and return a 3-tuple containing the response status, response headers (as httplib.HTTPMessage), and response body. """ response = None headers.setdefault('Accept', 'multipart/mixed, application/json, */*;q=0.5') if self._client._credentials: self._security_auth_headers(self._client._credentials.username, self._client._credentials.password, headers) try: self._connection.request(method, uri, body, headers) try: response = self._connection.getresponse(buffering=True) except TypeError: response = self._connection.getresponse() if stream: # The caller is responsible for fully reading the # response and closing it when streaming. response_body = response else: response_body = response.read() finally: if response and not stream: response.close() return response.status, response.msg, response_body
Given a Method, URL, Headers, and Body, perform and HTTP request, and return a 3-tuple containing the response status, response headers (as httplib.HTTPMessage), and response body.
def create_endpoint_folder(self, endpoint_id, folder): '''create an endpoint folder, catching the error if it exists. Parameters ========== endpoint_id: the endpoint id parameters folder: the relative path of the folder to create ''' try: res = self.transfer_client.operation_mkdir(endpoint_id, folder) bot.info("%s --> %s" %(res['message'], folder)) except TransferAPIError: bot.info('%s already exists at endpoint' %folder)
create an endpoint folder, catching the error if it exists. Parameters ========== endpoint_id: the endpoint id parameters folder: the relative path of the folder to create
def p_union_patch(self, p): """union_patch : PATCH uniont ID NL INDENT field_list examples DEDENT""" p[0] = AstUnionPatch( path=self.path, lineno=p[2][1], lexpos=p[2][2], name=p[3], fields=p[6], examples=p[7], closed=p[2][0] == 'union_closed')
union_patch : PATCH uniont ID NL INDENT field_list examples DEDENT
def refresh(self, data): """ refresh the module(s) """ modules = data.get("module") # for i3status modules we have to refresh the whole i3status output. update_i3status = False for module_name in self.find_modules(modules): module = self.py3_wrapper.output_modules[module_name] if self.debug: self.py3_wrapper.log("refresh %s" % module) if module["type"] == "py3status": module["module"].force_update() else: update_i3status = True if update_i3status: self.py3_wrapper.i3status_thread.refresh_i3status()
refresh the module(s)
def getCachedOrUpdatedValue(self, key): """ Gets the device's value with the given key. If the key is not found in the cache, the value is queried from the host. """ try: return self._VALUES[key] except KeyError: return self.getValue(key)
Gets the device's value with the given key. If the key is not found in the cache, the value is queried from the host.
def add_attribute(self, tag, name, value): """ add an attribute (nam, value pair) to the named tag """ self.add_tag(tag) d = self._tags[tag] d[name] = value
add an attribute (nam, value pair) to the named tag
def plot_world(*args, **kwargs): """ Generate a plot from received instance of World and show it. See also plot_world_with_elegans and plot_world_with_matplotlib. Parameters ---------- world : World or str World or a HDF5 filename to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans. Examples -------- >>> plot_world(w) >>> plot_world(w, interactive=False) """ interactive = kwargs.pop('interactive', True) if interactive: plot_world_with_elegans(*args, **kwargs) else: plot_world_with_matplotlib(*args, **kwargs)
Generate a plot from received instance of World and show it. See also plot_world_with_elegans and plot_world_with_matplotlib. Parameters ---------- world : World or str World or a HDF5 filename to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans. Examples -------- >>> plot_world(w) >>> plot_world(w, interactive=False)
def _get_tick_frac_labels(self): """Get the major ticks, minor ticks, and major labels""" minor_num = 4 # number of minor ticks per major division if (self.axis.scale_type == 'linear'): domain = self.axis.domain if domain[1] < domain[0]: flip = True domain = domain[::-1] else: flip = False offset = domain[0] scale = domain[1] - domain[0] transforms = self.axis.transforms length = self.axis.pos[1] - self.axis.pos[0] # in logical coords n_inches = np.sqrt(np.sum(length ** 2)) / transforms.dpi # major = np.linspace(domain[0], domain[1], num=11) # major = MaxNLocator(10).tick_values(*domain) major = _get_ticks_talbot(domain[0], domain[1], n_inches, 2) labels = ['%g' % x for x in major] majstep = major[1] - major[0] minor = [] minstep = majstep / (minor_num + 1) minstart = 0 if self.axis._stop_at_major[0] else -1 minstop = -1 if self.axis._stop_at_major[1] else 0 for i in range(minstart, len(major) + minstop): maj = major[0] + i * majstep minor.extend(np.linspace(maj + minstep, maj + majstep - minstep, minor_num)) major_frac = (major - offset) / scale minor_frac = (np.array(minor) - offset) / scale major_frac = major_frac[::-1] if flip else major_frac use_mask = (major_frac > -0.0001) & (major_frac < 1.0001) major_frac = major_frac[use_mask] labels = [l for li, l in enumerate(labels) if use_mask[li]] minor_frac = minor_frac[(minor_frac > -0.0001) & (minor_frac < 1.0001)] elif self.axis.scale_type == 'logarithmic': return NotImplementedError elif self.axis.scale_type == 'power': return NotImplementedError return major_frac, minor_frac, labels
Get the major ticks, minor ticks, and major labels
def make_ns(self, ns): ''' Returns the `lazily` created template namespace. ''' if self.namespace: val = {} val.update(self.namespace) val.update(ns) return val else: return ns
Returns the `lazily` created template namespace.
def user(self): """Creates a User object when requested.""" try: return self._user except AttributeError: self._user = MatrixUser(self.mxid, self.Api(identity=self.mxid)) return self._user
Creates a User object when requested.
def smoother_step(F, filt, next_pred, next_smth): """Smoothing step of Kalman filter/smoother. Parameters ---------- F: (dx, dx) numpy array Mean of X_t | X_{t-1} is F * X_{t-1} filt: MeanAndCov object filtering distribution at time t next_pred: MeanAndCov object predictive distribution at time t+1 next_smth: MeanAndCov object smoothing distribution at time t+1 Returns ------- smth: MeanAndCov object smoothing distribution at time t """ J = dotdot(filt.cov, F.T, inv(next_pred.cov)) smth_cov = filt.cov + dotdot(J, next_smth.cov - next_pred.cov, J.T) smth_mean = filt.mean + np.matmul(next_smth.mean - next_pred.mean, J.T) return MeanAndCov(mean=smth_mean, cov=smth_cov)
Smoothing step of Kalman filter/smoother. Parameters ---------- F: (dx, dx) numpy array Mean of X_t | X_{t-1} is F * X_{t-1} filt: MeanAndCov object filtering distribution at time t next_pred: MeanAndCov object predictive distribution at time t+1 next_smth: MeanAndCov object smoothing distribution at time t+1 Returns ------- smth: MeanAndCov object smoothing distribution at time t
def execute(self): """ Invoke the redispy pipeline.execute() method and take all the values returned in sequential order of commands and map them to the Future objects we returned when each command was queued inside the pipeline. Also invoke all the callback functions queued up. :param raise_on_error: boolean :return: None """ stack = self._stack callbacks = self._callbacks promises = [] if stack: def process(): """ take all the commands and pass them to redis. this closure has the context of the stack :return: None """ # get the connection to redis pipe = ConnectionManager.get(self.connection_name) # keep track of all the commands call_stack = [] # build a corresponding list of the futures futures = [] # we need to do this because we need to make sure # all of these are callable. # there shouldn't be any non-callables. for item, args, kwargs, future in stack: f = getattr(pipe, item) if callable(f): futures.append(future) call_stack.append((f, args, kwargs)) # here's where we actually pass the commands to the # underlying redis-py pipeline() object. for f, args, kwargs in call_stack: f(*args, **kwargs) # execute the redis-py pipeline. # map all of the results into the futures. for i, v in enumerate(pipe.execute()): futures[i].set(v) promises.append(process) # collect all the other pipelines for other named connections attached. promises += [p.execute for p in self._pipelines.values()] if len(promises) == 1: promises[0]() else: # if there are no promises, this is basically a no-op. TaskManager.wait(*[TaskManager.promise(p) for p in promises]) for cb in callbacks: cb()
Invoke the redispy pipeline.execute() method and take all the values returned in sequential order of commands and map them to the Future objects we returned when each command was queued inside the pipeline. Also invoke all the callback functions queued up. :param raise_on_error: boolean :return: None
def api_request( self, method, path, query_params=None, data=None, content_type=None, headers=None, api_base_url=None, api_version=None, expect_json=True, _target_object=None, ): """Make a request over the HTTP transport to the API. You shouldn't need to use this method, but if you plan to interact with the API using these primitives, this is the correct one to use. :type method: str :param method: The HTTP method name (ie, ``GET``, ``POST``, etc). Required. :type path: str :param path: The path to the resource (ie, ``'/b/bucket-name'``). Required. :type query_params: dict or list :param query_params: A dictionary of keys and values (or list of key-value pairs) to insert into the query string of the URL. :type data: str :param data: The data to send as the body of the request. Default is the empty string. :type content_type: str :param content_type: The proper MIME type of the data provided. Default is None. :type headers: dict :param headers: extra HTTP headers to be sent with the request. :type api_base_url: str :param api_base_url: The base URL for the API endpoint. Typically you won't have to provide this. Default is the standard API base URL. :type api_version: str :param api_version: The version of the API to call. Typically you shouldn't provide this and instead use the default for the library. Default is the latest API version supported by google-cloud-python. :type expect_json: bool :param expect_json: If True, this method will try to parse the response as JSON and raise an exception if that cannot be done. Default is True. :type _target_object: :class:`object` :param _target_object: (Optional) Protected argument to be used by library callers. This can allow custom behavior, for example, to defer an HTTP request and complete initialization of the object at a later time. :raises ~google.cloud.exceptions.GoogleCloudError: if the response code is not 200 OK. :raises ValueError: if the response content type is not JSON. :rtype: dict or str :returns: The API response payload, either as a raw string or a dictionary if the response is valid JSON. """ url = self.build_api_url( path=path, query_params=query_params, api_base_url=api_base_url, api_version=api_version, ) # Making the executive decision that any dictionary # data will be sent properly as JSON. if data and isinstance(data, dict): data = json.dumps(data) content_type = "application/json" response = self._make_request( method=method, url=url, data=data, content_type=content_type, headers=headers, target_object=_target_object, ) if not 200 <= response.status_code < 300: raise exceptions.from_http_response(response) if expect_json and response.content: return response.json() else: return response.content
Make a request over the HTTP transport to the API. You shouldn't need to use this method, but if you plan to interact with the API using these primitives, this is the correct one to use. :type method: str :param method: The HTTP method name (ie, ``GET``, ``POST``, etc). Required. :type path: str :param path: The path to the resource (ie, ``'/b/bucket-name'``). Required. :type query_params: dict or list :param query_params: A dictionary of keys and values (or list of key-value pairs) to insert into the query string of the URL. :type data: str :param data: The data to send as the body of the request. Default is the empty string. :type content_type: str :param content_type: The proper MIME type of the data provided. Default is None. :type headers: dict :param headers: extra HTTP headers to be sent with the request. :type api_base_url: str :param api_base_url: The base URL for the API endpoint. Typically you won't have to provide this. Default is the standard API base URL. :type api_version: str :param api_version: The version of the API to call. Typically you shouldn't provide this and instead use the default for the library. Default is the latest API version supported by google-cloud-python. :type expect_json: bool :param expect_json: If True, this method will try to parse the response as JSON and raise an exception if that cannot be done. Default is True. :type _target_object: :class:`object` :param _target_object: (Optional) Protected argument to be used by library callers. This can allow custom behavior, for example, to defer an HTTP request and complete initialization of the object at a later time. :raises ~google.cloud.exceptions.GoogleCloudError: if the response code is not 200 OK. :raises ValueError: if the response content type is not JSON. :rtype: dict or str :returns: The API response payload, either as a raw string or a dictionary if the response is valid JSON.
def create_constants(self, rdbms): """ Factory for creating a Constants objects (i.e. objects for creating constants based on column widths, and auto increment columns and labels). :param str rdbms: The target RDBMS (i.e. mysql, mssql or pgsql). :rtype: pystratum.Constants.Constants """ # Note: We load modules and classes dynamically such that on the end user's system only the required modules # and other dependencies for the targeted RDBMS must be installed (and required modules and other # dependencies for the other RDBMSs are not required). if rdbms == 'mysql': module = locate('pystratum_mysql.MySqlConstants') return module.MySqlConstants(self.output) if rdbms == 'mssql': module = locate('pystratum_mssql.MsSqlConstants') return module.MsSqlConstants(self.output) if rdbms == 'pgsql': module = locate('pystratum_pgsql.PgSqlConstants') return module.PgSqlConstants(self.output) raise Exception("Unknown RDBMS '{0!s}'.".format(rdbms))
Factory for creating a Constants objects (i.e. objects for creating constants based on column widths, and auto increment columns and labels). :param str rdbms: The target RDBMS (i.e. mysql, mssql or pgsql). :rtype: pystratum.Constants.Constants
def reload_cache_config(self, call_params): """REST Reload Plivo Cache Config helper """ path = '/' + self.api_version + '/ReloadCacheConfig/' method = 'POST' return self.request(path, method, call_params)
REST Reload Plivo Cache Config helper
def get_window_settings(self): """Return current window settings Symetric to the 'set_window_settings' setter""" window_size = (self.window_size.width(), self.window_size.height()) is_fullscreen = self.isFullScreen() if is_fullscreen: is_maximized = self.maximized_flag else: is_maximized = self.isMaximized() pos = (self.window_position.x(), self.window_position.y()) prefs_dialog_size = (self.prefs_dialog_size.width(), self.prefs_dialog_size.height()) hexstate = qbytearray_to_str(self.saveState()) return (hexstate, window_size, prefs_dialog_size, pos, is_maximized, is_fullscreen)
Return current window settings Symetric to the 'set_window_settings' setter
def recv(sock, size): """Receives exactly `size` bytes. This function blocks the thread.""" data = sock.recv(size, socket.MSG_WAITALL) if len(data) < size: raise socket.error(ECONNRESET, 'Connection closed') return data
Receives exactly `size` bytes. This function blocks the thread.
def fork(self, server_address: str = None, *, namespace: str = None) -> "State": r""" "Forks" this State object. Takes the same args as the :py:class:`State` constructor, except that they automatically default to the values provided during the creation of this State object. If no args are provided to this function, then it shall create a new :py:class:`State` object that follows the exact same semantics as this one. This is preferred over ``copy()``\ -ing a :py:class:`State` object. Useful when one needs to access 2 or more namespaces from the same code. """ if server_address is None: server_address = self.server_address if namespace is None: namespace = self.namespace return self.__class__(server_address, namespace=namespace)
r""" "Forks" this State object. Takes the same args as the :py:class:`State` constructor, except that they automatically default to the values provided during the creation of this State object. If no args are provided to this function, then it shall create a new :py:class:`State` object that follows the exact same semantics as this one. This is preferred over ``copy()``\ -ing a :py:class:`State` object. Useful when one needs to access 2 or more namespaces from the same code.
def timeout(seconds=None, use_signals=True, timeout_exception=TimeoutError, exception_message=None): """Add a timeout parameter to a function and return it. :param seconds: optional time limit in seconds or fractions of a second. If None is passed, no timeout is applied. This adds some flexibility to the usage: you can disable timing out depending on the settings. :type seconds: float :param use_signals: flag indicating whether signals should be used for timing function out or the multiprocessing When using multiprocessing, timeout granularity is limited to 10ths of a second. :type use_signals: bool :raises: TimeoutError if time limit is reached It is illegal to pass anything other than a function as the first parameter. The function is wrapped and returned to the caller. """ def decorate(function): if not seconds: return function if use_signals: def handler(signum, frame): _raise_exception(timeout_exception, exception_message) @wraps(function) def new_function(*args, **kwargs): new_seconds = kwargs.pop('timeout', seconds) if new_seconds: old = signal.signal(signal.SIGALRM, handler) signal.setitimer(signal.ITIMER_REAL, new_seconds) try: return function(*args, **kwargs) finally: if new_seconds: signal.setitimer(signal.ITIMER_REAL, 0) signal.signal(signal.SIGALRM, old) return new_function else: @wraps(function) def new_function(*args, **kwargs): timeout_wrapper = _Timeout(function, timeout_exception, exception_message, seconds) return timeout_wrapper(*args, **kwargs) return new_function return decorate
Add a timeout parameter to a function and return it. :param seconds: optional time limit in seconds or fractions of a second. If None is passed, no timeout is applied. This adds some flexibility to the usage: you can disable timing out depending on the settings. :type seconds: float :param use_signals: flag indicating whether signals should be used for timing function out or the multiprocessing When using multiprocessing, timeout granularity is limited to 10ths of a second. :type use_signals: bool :raises: TimeoutError if time limit is reached It is illegal to pass anything other than a function as the first parameter. The function is wrapped and returned to the caller.
def process_rst_and_summaries(content_generators): """ Ensure mathjax script is applied to RST and summaries are corrected if specified in user settings. Handles content attached to ArticleGenerator and PageGenerator objects, since the plugin doesn't know how to handle other Generator types. For reStructuredText content, examine both articles and pages. If article or page is reStructuredText and there is math present, append the mathjax script. Also process summaries if present (only applies to articles) and user wants summaries processed (via user settings) """ for generator in content_generators: if isinstance(generator, generators.ArticlesGenerator): for article in ( generator.articles + generator.translations + generator.drafts): rst_add_mathjax(article) #optionally fix truncated formulae in summaries. if process_summary.mathjax_script is not None: process_summary(article) elif isinstance(generator, generators.PagesGenerator): for page in generator.pages: rst_add_mathjax(page) for page in generator.hidden_pages: rst_add_mathjax(page)
Ensure mathjax script is applied to RST and summaries are corrected if specified in user settings. Handles content attached to ArticleGenerator and PageGenerator objects, since the plugin doesn't know how to handle other Generator types. For reStructuredText content, examine both articles and pages. If article or page is reStructuredText and there is math present, append the mathjax script. Also process summaries if present (only applies to articles) and user wants summaries processed (via user settings)
def parse_epsv_response(s): """ Parsing `EPSV` (`message (|||port|)`) response. :param s: response line :type s: :py:class:`str` :return: (ip, port) :rtype: (:py:class:`None`, :py:class:`int`) """ matches = tuple(re.finditer(r"\((.)\1\1\d+\1\)", s)) s = matches[-1].group() port = int(s[4:-2]) return None, port
Parsing `EPSV` (`message (|||port|)`) response. :param s: response line :type s: :py:class:`str` :return: (ip, port) :rtype: (:py:class:`None`, :py:class:`int`)
def predict(self, X): """Predict the class for X. The predicted class for each sample in X is returned. Parameters ---------- X : List of ndarrays, one for each training example. Each training example's shape is (string1_len, string2_len, n_features), where string1_len and string2_len are the length of the two training strings and n_features the number of features. Returns ------- y : iterable of shape = [n_samples] The predicted classes. """ return [self.classes[prediction.argmax()] for prediction in self.predict_proba(X)]
Predict the class for X. The predicted class for each sample in X is returned. Parameters ---------- X : List of ndarrays, one for each training example. Each training example's shape is (string1_len, string2_len, n_features), where string1_len and string2_len are the length of the two training strings and n_features the number of features. Returns ------- y : iterable of shape = [n_samples] The predicted classes.
def regions(self): """gets the regions value""" url = "%s/regions" % self.root params = {"f": "json"} return self._get(url=url, param_dict=params, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
gets the regions value
def output_to_json(sources): """Print statistics to the terminal in Json format""" results = OrderedDict() for source in sources: if source.get_is_available(): source.update() source_name = source.get_source_name() results[source_name] = source.get_sensors_summary() print(json.dumps(results, indent=4)) sys.exit()
Print statistics to the terminal in Json format
def minimumBelow(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by a constant n. Draws only the metrics with a minimum value below n. Example:: &target=minimumBelow(system.interface.eth*.packetsSent,1000) This would only display interfaces which sent at one point less than 1000 packets/min. """ results = [] for series in seriesList: val = safeMin(series) if val is None or val <= n: results.append(series) return results
Takes one metric or a wildcard seriesList followed by a constant n. Draws only the metrics with a minimum value below n. Example:: &target=minimumBelow(system.interface.eth*.packetsSent,1000) This would only display interfaces which sent at one point less than 1000 packets/min.
def fromlineno(self): """The first line that this node appears on in the source code. :type: int or None """ lineno = super(Arguments, self).fromlineno return max(lineno, self.parent.fromlineno or 0)
The first line that this node appears on in the source code. :type: int or None
def permission_required(perm, *lookup_variables, **kwargs): """ Decorator for views that checks whether a user has a particular permission enabled, redirecting to the log-in page if necessary. """ login_url = kwargs.pop('login_url', settings.LOGIN_URL) redirect_field_name = kwargs.pop('redirect_field_name', REDIRECT_FIELD_NAME) redirect_to_login = kwargs.pop('redirect_to_login', True) def decorate(view_func): def decorated(request, *args, **kwargs): if request.user.is_authenticated(): params = [] for lookup_variable in lookup_variables: if isinstance(lookup_variable, string_types): value = kwargs.get(lookup_variable, None) if value is None: continue params.append(value) elif isinstance(lookup_variable, (tuple, list)): model, lookup, varname = lookup_variable value = kwargs.get(varname, None) if value is None: continue if isinstance(model, string_types): model_class = apps.get_model(*model.split(".")) else: model_class = model if model_class is None: raise ValueError( "The given argument '%s' is not a valid model." % model) if (inspect.isclass(model_class) and not issubclass(model_class, Model)): raise ValueError( 'The argument %s needs to be a model.' % model) obj = get_object_or_404(model_class, **{lookup: value}) params.append(obj) check = get_check(request.user, perm) granted = False if check is not None: granted = check(*params) if granted or request.user.has_perm(perm): return view_func(request, *args, **kwargs) if redirect_to_login: path = urlquote(request.get_full_path()) tup = login_url, redirect_field_name, path return HttpResponseRedirect('%s?%s=%s' % tup) return permission_denied(request) return wraps(view_func)(decorated) return decorate
Decorator for views that checks whether a user has a particular permission enabled, redirecting to the log-in page if necessary.
def update(self, item, dry_run=None): """Updates item info in file.""" logger.debug('Updating item. Item: {item} Table: {namespace}'.format( item=item, namespace=self.namespace )) if not dry_run: self.table.put_item(Item=item) return item
Updates item info in file.
def create_session(self, session_id, register=True, session_factory=None): """ Creates new session object and returns it. @param session_id: Session id. If not provided, will generate a new session id. @param register: Should be the session registered in a storage. Websockets don't need it. @param session_factory: Use the given (class, args, kwargs) tuple to create the session. Class should derive from `BaseSession`. Normally not needed. """ if session_factory is not None: # use custom class to create session sess_factory, sess_args, sess_kwargs = session_factory s = sess_factory(*sess_args, **sess_kwargs) else: # use default session and arguments if not using a custom session # factory s = session.Session(self._connection, self, session_id, self.settings.get('disconnect_delay')) if register: self._sessions.add(s) return s
Creates new session object and returns it. @param session_id: Session id. If not provided, will generate a new session id. @param register: Should be the session registered in a storage. Websockets don't need it. @param session_factory: Use the given (class, args, kwargs) tuple to create the session. Class should derive from `BaseSession`. Normally not needed.
def is_data_diverging(data_container): """ We want to use this to check whether the data are diverging or not. This is a simple check, can be made much more sophisticated. :param data_container: A generic container of data points. :type data_container: `iterable` """ assert infer_data_type(data_container) in [ "ordinal", "continuous", ], "Data type should be ordinal or continuous" # Check whether the data contains negative and positive values. has_negative = False has_positive = False for i in data_container: if i < 0: has_negative = True elif i > 0: has_positive = True if has_negative and has_positive: return True else: return False
We want to use this to check whether the data are diverging or not. This is a simple check, can be made much more sophisticated. :param data_container: A generic container of data points. :type data_container: `iterable`
def _fix_up_properties(cls): """Fix up the properties by calling their _fix_up() method. Note: This is called by MetaModel, but may also be called manually after dynamically updating a model class. """ # Verify that _get_kind() returns an 8-bit string. kind = cls._get_kind() if not isinstance(kind, basestring): raise KindError('Class %s defines a _get_kind() method that returns ' 'a non-string (%r)' % (cls.__name__, kind)) if not isinstance(kind, str): try: kind = kind.encode('ascii') # ASCII contents is okay. except UnicodeEncodeError: raise KindError('Class %s defines a _get_kind() method that returns ' 'a Unicode string (%r); please encode using utf-8' % (cls.__name__, kind)) cls._properties = {} # Map of {name: Property} if cls.__module__ == __name__: # Skip the classes in *this* file. return for name in set(dir(cls)): attr = getattr(cls, name, None) if isinstance(attr, ModelAttribute) and not isinstance(attr, ModelKey): if name.startswith('_'): raise TypeError('ModelAttribute %s cannot begin with an underscore ' 'character. _ prefixed attributes are reserved for ' 'temporary Model instance values.' % name) attr._fix_up(cls, name) if isinstance(attr, Property): if (attr._repeated or (isinstance(attr, StructuredProperty) and attr._modelclass._has_repeated)): cls._has_repeated = True cls._properties[attr._name] = attr cls._update_kind_map()
Fix up the properties by calling their _fix_up() method. Note: This is called by MetaModel, but may also be called manually after dynamically updating a model class.
def analyze(data, normalize=None, reduce=None, ndims=None, align=None, internal=False): """ Wrapper function for normalize -> reduce -> align transformations. Parameters ---------- data : numpy array, pandas df, or list of arrays/dfs The data to analyze normalize : str or False or None If set to 'across', the columns of the input data will be z-scored across lists (default). That is, the z-scores will be computed with with respect to column n across all arrays passed in the list. If set to 'within', the columns will be z-scored within each list that is passed. If set to 'row', each row of the input data will be z-scored. If set to False, the input data will be returned with no z-scoring. reduce : str or dict Decomposition/manifold learning model to use. Models supported: PCA, IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA, FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning, TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be passed as a string, but for finer control of the model parameters, pass as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}. See scikit-learn specific model docs for details on parameters supported for each model. ndims : int Number of dimensions to reduce align : str or dict If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be hyperalignment. If 'SRM', alignment algorithm will be shared response model. You can also pass a dictionary for finer control, where the 'model' key is a string that specifies the model and the params key is a dictionary of parameter values (default : 'hyper'). Returns ---------- analyzed_data : list of numpy arrays The processed data """ # return processed data return aligner(reducer(normalizer(data, normalize=normalize, internal=internal), reduce=reduce, ndims=ndims, internal=internal), align=align)
Wrapper function for normalize -> reduce -> align transformations. Parameters ---------- data : numpy array, pandas df, or list of arrays/dfs The data to analyze normalize : str or False or None If set to 'across', the columns of the input data will be z-scored across lists (default). That is, the z-scores will be computed with with respect to column n across all arrays passed in the list. If set to 'within', the columns will be z-scored within each list that is passed. If set to 'row', each row of the input data will be z-scored. If set to False, the input data will be returned with no z-scoring. reduce : str or dict Decomposition/manifold learning model to use. Models supported: PCA, IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA, FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning, TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be passed as a string, but for finer control of the model parameters, pass as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}. See scikit-learn specific model docs for details on parameters supported for each model. ndims : int Number of dimensions to reduce align : str or dict If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be hyperalignment. If 'SRM', alignment algorithm will be shared response model. You can also pass a dictionary for finer control, where the 'model' key is a string that specifies the model and the params key is a dictionary of parameter values (default : 'hyper'). Returns ---------- analyzed_data : list of numpy arrays The processed data
def lset(self, key, index, value): """Emulate lset.""" redis_list = self._get_list(key, 'LSET') if redis_list is None: raise ResponseError("no such key") try: redis_list[index] = self._encode(value) except IndexError: raise ResponseError("index out of range")
Emulate lset.
def create(self, **kwargs): """ Create a resource on the server :params kwargs: Attributes (field names and values) of the new resource """ resource = self.resource_class(self.client) resource.update_from_dict(kwargs) resource.save(force_create=True) return resource
Create a resource on the server :params kwargs: Attributes (field names and values) of the new resource
def get_stored_content_length(headers): """Return the content length (in bytes) of the object as stored in GCS. x-goog-stored-content-length should always be present except when called via the local dev_appserver. Therefore if it is not present we default to the standard content-length header. Args: headers: a dict of headers from the http response. Returns: the stored content length. """ length = headers.get('x-goog-stored-content-length') if length is None: length = headers.get('content-length') return length
Return the content length (in bytes) of the object as stored in GCS. x-goog-stored-content-length should always be present except when called via the local dev_appserver. Therefore if it is not present we default to the standard content-length header. Args: headers: a dict of headers from the http response. Returns: the stored content length.
def make_key(table_name, objid): """Create an object key for storage.""" key = datastore.Key() path = key.path_element.add() path.kind = table_name path.name = str(objid) return key
Create an object key for storage.
def main(): """ Main function """ ctx = {} def pretty_json(data): return json.dumps(data, indent=2, sort_keys=True) client = server.create_app().test_client() host = 'example.com:9984' # HTTP Index res = client.get('/', environ_overrides={'HTTP_HOST': host}) res_data = json.loads(res.data.decode()) ctx['index'] = pretty_json(res_data) # API index res = client.get('/api/v1/', environ_overrides={'HTTP_HOST': host}) ctx['api_index'] = pretty_json(json.loads(res.data.decode())) # tx create privkey = 'CfdqtD7sS7FgkMoGPXw55MVGGFwQLAoHYTcBhZDtF99Z' pubkey = '4K9sWUMFwTgaDGPfdynrbxWqWS6sWmKbZoTjxLtVUibD' asset = {'msg': 'Hello BigchainDB!'} tx = Transaction.create([pubkey], [([pubkey], 1)], asset=asset, metadata={'sequence': 0}) tx = tx.sign([privkey]) ctx['tx'] = pretty_json(tx.to_dict()) ctx['public_keys'] = tx.outputs[0].public_keys[0] ctx['txid'] = tx.id # tx transfer privkey_transfer = '3AeWpPdhEZzWLYfkfYHBfMFC2r1f8HEaGS9NtbbKssya' pubkey_transfer = '3yfQPHeWAa1MxTX9Zf9176QqcpcnWcanVZZbaHb8B3h9' cid = 0 input_ = Input(fulfillment=tx.outputs[cid].fulfillment, fulfills=TransactionLink(txid=tx.id, output=cid), owners_before=tx.outputs[cid].public_keys) tx_transfer = Transaction.transfer([input_], [([pubkey_transfer], 1)], asset_id=tx.id, metadata={'sequence': 1}) tx_transfer = tx_transfer.sign([privkey]) ctx['tx_transfer'] = pretty_json(tx_transfer.to_dict()) ctx['public_keys_transfer'] = tx_transfer.outputs[0].public_keys[0] ctx['tx_transfer_id'] = tx_transfer.id # privkey_transfer_last = 'sG3jWDtdTXUidBJK53ucSTrosktG616U3tQHBk81eQe' pubkey_transfer_last = '3Af3fhhjU6d9WecEM9Uw5hfom9kNEwE7YuDWdqAUssqm' cid = 0 input_ = Input(fulfillment=tx_transfer.outputs[cid].fulfillment, fulfills=TransactionLink(txid=tx_transfer.id, output=cid), owners_before=tx_transfer.outputs[cid].public_keys) tx_transfer_last = Transaction.transfer([input_], [([pubkey_transfer_last], 1)], asset_id=tx.id, metadata={'sequence': 2}) tx_transfer_last = tx_transfer_last.sign([privkey_transfer]) ctx['tx_transfer_last'] = pretty_json(tx_transfer_last.to_dict()) ctx['tx_transfer_last_id'] = tx_transfer_last.id ctx['public_keys_transfer_last'] = tx_transfer_last.outputs[0].public_keys[0] # block node_private = "5G2kE1zJAgTajkVSbPAQWo4c2izvtwqaNHYsaNpbbvxX" node_public = "DngBurxfeNVKZWCEcDnLj1eMPAS7focUZTE5FndFGuHT" signature = "53wxrEQDYk1dXzmvNSytbCfmNVnPqPkDQaTnAe8Jf43s6ssejPxezkCvUnGTnduNUmaLjhaan1iRLi3peu6s5DzA" app_hash = 'f6e0c49c6d94d6924351f25bb334cf2a99af4206339bf784e741d1a5ab599056' block = lib.Block(height=1, transactions=[tx.to_dict()], app_hash=app_hash) block_dict = block._asdict() block_dict.pop('app_hash') ctx['block'] = pretty_json(block_dict) ctx['blockid'] = block.height # block status block_list = [ block.height ] ctx['block_list'] = pretty_json(block_list) base_path = os.path.join(os.path.dirname(__file__), 'source/http-samples') if not os.path.exists(base_path): os.makedirs(base_path) for name, tpl in TPLS.items(): path = os.path.join(base_path, name + '.http') code = tpl % ctx with open(path, 'w') as handle: handle.write(code)
Main function
def sort(self, values): """Sort the values in-place based on the connectors in the network.""" for level in self: for wire1, wire2 in level: if values[wire1] > values[wire2]: values[wire1], values[wire2] = values[wire2], values[wire1]
Sort the values in-place based on the connectors in the network.
def plistfilename(self): ''' This is a lazily detected absolute filename of the corresponding property list file (*.plist). None if it doesn't exist. ''' if self._plist_fname is None: self._plist_fname = discover_filename(self.label) return self._plist_fname
This is a lazily detected absolute filename of the corresponding property list file (*.plist). None if it doesn't exist.
def _error_if_word_invalid(word, valid_words_dictionary, technical_words_dictionary, line_offset, col_offset): """Return SpellcheckError if this non-technical word is invalid.""" word_lower = word.lower() valid_words_result = valid_words_dictionary.corrections(word_lower) if technical_words_dictionary: technical_words_result = technical_words_dictionary.corrections(word) else: # No technical words available to make an otherwise invalid # result value. technical_words_result = Dictionary.Result(False, list()) if not valid_words_result.valid and not technical_words_result.valid: return SpellcheckError(word, line_offset, col_offset, valid_words_result.suggestions, SpellcheckError.InvalidWord)
Return SpellcheckError if this non-technical word is invalid.
def _assert_command_dict(self, struct, name, path=None, extra_info=None): """Checks whether struct is a command dict (e.g. it's a dict and has 1 key-value pair.""" self._assert_dict(struct, name, path, extra_info) if len(struct) != 1: err = [self._format_error_path(path + [name])] err.append('Commands of run, dependencies, and argument sections must be mapping with ' 'exactly 1 key-value pair, got {0}: {1}'.format(len(struct), struct)) if extra_info: err.append(extra_info) raise exceptions.YamlSyntaxError('\n'.join(err))
Checks whether struct is a command dict (e.g. it's a dict and has 1 key-value pair.
def create_api(name, description, cloneFrom=None, region=None, key=None, keyid=None, profile=None): ''' Create a new REST API Service with the given name Returns {created: True} if the rest api was created and returns {created: False} if the rest api was not created. CLI Example: .. code-block:: bash salt myminion boto_apigateway.create_api myapi_name api_description ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if cloneFrom: api = conn.create_rest_api(name=name, description=description, cloneFrom=cloneFrom) else: api = conn.create_rest_api(name=name, description=description) api = _convert_datetime_str(api) return {'created': True, 'restapi': api} if api else {'created': False} except ClientError as e: return {'created': False, 'error': __utils__['boto3.get_error'](e)}
Create a new REST API Service with the given name Returns {created: True} if the rest api was created and returns {created: False} if the rest api was not created. CLI Example: .. code-block:: bash salt myminion boto_apigateway.create_api myapi_name api_description
def cur_time(typ='date', tz=DEFAULT_TZ) -> (datetime.date, str): """ Current time Args: typ: one of ['date', 'time', 'time_path', 'raw', ''] tz: timezone Returns: relevant current time or date Examples: >>> cur_dt = pd.Timestamp('now') >>> cur_time(typ='date') == cur_dt.strftime('%Y-%m-%d') True >>> cur_time(typ='time') == cur_dt.strftime('%Y-%m-%d %H:%M:%S') True >>> cur_time(typ='time_path') == cur_dt.strftime('%Y-%m-%d/%H-%M-%S') True >>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp) True >>> cur_time(typ='') == cur_dt.date() True """ dt = pd.Timestamp('now', tz=tz) if typ == 'date': return dt.strftime('%Y-%m-%d') if typ == 'time': return dt.strftime('%Y-%m-%d %H:%M:%S') if typ == 'time_path': return dt.strftime('%Y-%m-%d/%H-%M-%S') if typ == 'raw': return dt return dt.date()
Current time Args: typ: one of ['date', 'time', 'time_path', 'raw', ''] tz: timezone Returns: relevant current time or date Examples: >>> cur_dt = pd.Timestamp('now') >>> cur_time(typ='date') == cur_dt.strftime('%Y-%m-%d') True >>> cur_time(typ='time') == cur_dt.strftime('%Y-%m-%d %H:%M:%S') True >>> cur_time(typ='time_path') == cur_dt.strftime('%Y-%m-%d/%H-%M-%S') True >>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp) True >>> cur_time(typ='') == cur_dt.date() True
def do_refresh(self,args): """Refresh the view of the log group""" # prints all the groups: pprint(AwsConnectionFactory.getLogClient().describe_log_groups()) response = AwsConnectionFactory.getLogClient().describe_log_groups(logGroupNamePrefix=self.stackResource.physical_resource_id) if not 'logGroups' in response: raise Exception("Expected log group description to have logGroups entry. Got {}".format(response)) # pprint(response) descriptions = [x for x in response['logGroups'] if x['logGroupName'] == self.stackResource.physical_resource_id] if not descriptions: raise Exception("Could not find log group {} in list {}".format(self.stackResource.physical_resource_id,response['logGroups'])) self.description = descriptions[0] self.logStreams = self.loadLogStreams() print "== logStream" maxIndex = "{}".format(len(self.logStreams)+1) print "maxIndex:{}".format(maxIndex) frm = " {{0:{}d}}: {{1}}".format(len(maxIndex)) print frm index = 0 for logStream in self.logStreams: print frm.format(index,logStream['logStreamName']) index += 1
Refresh the view of the log group
def patch_sys(self, inherit_path): """Patch sys with all site scrubbed.""" def patch_dict(old_value, new_value): old_value.clear() old_value.update(new_value) def patch_all(path, path_importer_cache, modules): sys.path[:] = path patch_dict(sys.path_importer_cache, path_importer_cache) patch_dict(sys.modules, modules) new_sys_path, new_sys_path_importer_cache, new_sys_modules = self.minimum_sys(inherit_path) new_sys_path.extend(merge_split(self._pex_info.pex_path, self._vars.PEX_PATH)) patch_all(new_sys_path, new_sys_path_importer_cache, new_sys_modules)
Patch sys with all site scrubbed.
def _attach_record_as_json(mfg_event, record): """Attach a copy of the record as JSON so we have an un-mangled copy.""" attachment = mfg_event.attachment.add() attachment.name = TEST_RECORD_ATTACHMENT_NAME test_record_dict = htf_data.convert_to_base_types(record) attachment.value_binary = _convert_object_to_json(test_record_dict) attachment.type = test_runs_pb2.TEXT_UTF8
Attach a copy of the record as JSON so we have an un-mangled copy.
def filesfile_string(self): """String with the list of files and prefixes needed to execute ABINIT.""" lines = [] app = lines.append #optic.in ! Name of input file #optic.out ! Unused #optic ! Root name for all files that will be produced app(self.input_file.path) # Path to the input file app(os.path.join(self.workdir, "unused")) # Path to the output file app(os.path.join(self.workdir, self.prefix.odata)) # Prefix for output data return "\n".join(lines)
String with the list of files and prefixes needed to execute ABINIT.
def update_stats(stats, start_time, data): ''' Calculate the master stats and return the updated stat info ''' end_time = time.time() cmd = data['cmd'] # the jid is used as the create time try: jid = data['jid'] except KeyError: try: jid = data['data']['__pub_jid'] except KeyError: log.info('jid not found in data, stats not updated') return stats create_time = int(time.mktime(time.strptime(jid, '%Y%m%d%H%M%S%f'))) latency = start_time - create_time duration = end_time - start_time stats[cmd]['runs'] += 1 stats[cmd]['latency'] = (stats[cmd]['latency'] * (stats[cmd]['runs'] - 1) + latency) / stats[cmd]['runs'] stats[cmd]['mean'] = (stats[cmd]['mean'] * (stats[cmd]['runs'] - 1) + duration) / stats[cmd]['runs'] return stats
Calculate the master stats and return the updated stat info
def slaveraise(self, type, error, traceback): """ slave only """ message = 'E' * 1 + pickle.dumps((type, ''.join(tb.format_exception(type, error, traceback)))) if self.pipe is not None: self.pipe.put(message)
slave only
def perform_iteration(self): """Get any changes to the log files and push updates to Redis.""" stats = self.get_all_stats() self.redis_client.publish( self.redis_key, jsonify_asdict(stats), )
Get any changes to the log files and push updates to Redis.
def laplacian(script, iterations=1, boundary=True, cotangent_weight=True, selected=False): """ Laplacian smooth of the mesh: for each vertex it calculates the average position with nearest vertex Args: script: the FilterScript object or script filename to write the filter to. iterations (int): The number of times that the whole algorithm (normal smoothing + vertex fitting) is iterated. boundary (bool): If true the boundary edges are smoothed only by themselves (e.g. the polyline forming the boundary of the mesh is independently smoothed). Can reduce the shrinking on the border but can have strange effects on very small boundaries. cotangent_weight (bool): If True the cotangent weighting scheme is computed for the averaging of the position. Otherwise (False) the simpler umbrella scheme (1 if the edge is present) is used. selected (bool): If selected the filter is performed only on the selected faces Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA """ filter_xml = ''.join([ ' <filter name="Laplacian Smooth">\n', ' <Param name="stepSmoothNum" ', 'value="{:d}" '.format(iterations), 'description="Smoothing steps" ', 'type="RichInt" ', '/>\n', ' <Param name="Boundary" ', 'value="{}" '.format(str(boundary).lower()), 'description="1D Boundary Smoothing" ', 'type="RichBool" ', '/>\n', ' <Param name="cotangentWeight" ', 'value="{}" '.format(str(cotangent_weight).lower()), 'description="Cotangent weighting" ', 'type="RichBool" ', '/>\n', ' <Param name="Selected" ', 'value="{}" '.format(str(selected).lower()), 'description="Affect only selected faces" ', 'type="RichBool" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
Laplacian smooth of the mesh: for each vertex it calculates the average position with nearest vertex Args: script: the FilterScript object or script filename to write the filter to. iterations (int): The number of times that the whole algorithm (normal smoothing + vertex fitting) is iterated. boundary (bool): If true the boundary edges are smoothed only by themselves (e.g. the polyline forming the boundary of the mesh is independently smoothed). Can reduce the shrinking on the border but can have strange effects on very small boundaries. cotangent_weight (bool): If True the cotangent weighting scheme is computed for the averaging of the position. Otherwise (False) the simpler umbrella scheme (1 if the edge is present) is used. selected (bool): If selected the filter is performed only on the selected faces Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
def one_line(self): """Return True|False if the AMP shoukd be displayed in oneline (one_lineline=true|false).""" ret = self.get('one_line') if ret is None: return False else: return ret.lower().startswith('true')
Return True|False if the AMP shoukd be displayed in oneline (one_lineline=true|false).
def speed(self): '''Return the current transfer speed. Returns: int: The speed in bytes per second. ''' if self._stalled: return 0 time_sum = 0 data_len_sum = 0 for time_diff, data_len in self._samples: time_sum += time_diff data_len_sum += data_len if time_sum: return data_len_sum / time_sum else: return 0
Return the current transfer speed. Returns: int: The speed in bytes per second.
def add_to_manifest(self, manifest): """ Add useful details to the manifest about this service so that it can be used in an application. :param manifest: An predix.admin.app.Manifest object instance that manages reading/writing manifest config for a cloud foundry app. """ # Add this service to list of services manifest.add_service(self.service.name) # Add environment variable to manifest varname = predix.config.set_env_value(self.use_class, 'uri', self._get_uri()) manifest.add_env_var(varname, self._get_uri()) manifest.write_manifest()
Add useful details to the manifest about this service so that it can be used in an application. :param manifest: An predix.admin.app.Manifest object instance that manages reading/writing manifest config for a cloud foundry app.
def chunks(iterable, size=1): """Splits iterator in chunks.""" iterator = iter(iterable) for element in iterator: yield chain([element], islice(iterator, size - 1))
Splits iterator in chunks.
def nvmlDeviceGetPcieReplayCounter(handle): r""" /** * Retrieve the PCIe replay counter. * * For Kepler &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param value Reference in which to return the counter's value * * @return * - \ref NVML_SUCCESS if \a value has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a value is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetPcieReplayCounter """ c_replay = c_uint() fn = _nvmlGetFunctionPointer("nvmlDeviceGetPcieReplayCounter") ret = fn(handle, byref(c_replay)) _nvmlCheckReturn(ret) return bytes_to_str(c_replay.value)
r""" /** * Retrieve the PCIe replay counter. * * For Kepler &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param value Reference in which to return the counter's value * * @return * - \ref NVML_SUCCESS if \a value has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a value is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetPcieReplayCounter
def parse_san(self, san: str) -> Move: """ Uses the current position as the context to parse a move in standard algebraic notation and returns the corresponding move object. The returned move is guaranteed to be either legal or a null move. :raises: :exc:`ValueError` if the SAN is invalid or ambiguous. """ # Castling. try: if san in ["O-O", "O-O+", "O-O#"]: return next(move for move in self.generate_castling_moves() if self.is_kingside_castling(move)) elif san in ["O-O-O", "O-O-O+", "O-O-O#"]: return next(move for move in self.generate_castling_moves() if self.is_queenside_castling(move)) except StopIteration: raise ValueError("illegal san: {!r} in {}".format(san, self.fen())) # Match normal moves. match = SAN_REGEX.match(san) if not match: # Null moves. if san in ["--", "Z0"]: return Move.null() raise ValueError("invalid san: {!r}".format(san)) # Get target square. to_square = SQUARE_NAMES.index(match.group(4)) to_mask = BB_SQUARES[to_square] & ~self.occupied_co[self.turn] # Get the promotion type. p = match.group(5) promotion = p and PIECE_SYMBOLS.index(p[-1].lower()) # Filter by piece type. if match.group(1): piece_type = PIECE_SYMBOLS.index(match.group(1).lower()) from_mask = self.pieces_mask(piece_type, self.turn) else: from_mask = self.pawns # Filter by source file. if match.group(2): from_mask &= BB_FILES[FILE_NAMES.index(match.group(2))] # Filter by source rank. if match.group(3): from_mask &= BB_RANKS[int(match.group(3)) - 1] # Match legal moves. matched_move = None for move in self.generate_legal_moves(from_mask, to_mask): if move.promotion != promotion: continue if matched_move: raise ValueError("ambiguous san: {!r} in {}".format(san, self.fen())) matched_move = move if not matched_move: raise ValueError("illegal san: {!r} in {}".format(san, self.fen())) return matched_move
Uses the current position as the context to parse a move in standard algebraic notation and returns the corresponding move object. The returned move is guaranteed to be either legal or a null move. :raises: :exc:`ValueError` if the SAN is invalid or ambiguous.
def get_separator_words(toks1): """ Finds the words that separate a list of tokens from a background corpus Basically this generates a list of informative/interesting words in a set toks1 is a list of words Returns a list of separator words """ tab_toks1 = nltk.FreqDist(word.lower() for word in toks1) if(os.path.isfile(ESSAY_COR_TOKENS_PATH)): toks2 = pickle.load(open(ESSAY_COR_TOKENS_PATH, 'rb')) else: essay_corpus = open(ESSAY_CORPUS_PATH).read() essay_corpus = sub_chars(essay_corpus) toks2 = nltk.FreqDist(word.lower() for word in nltk.word_tokenize(essay_corpus)) pickle.dump(toks2, open(ESSAY_COR_TOKENS_PATH, 'wb')) sep_words = [] for word in tab_toks1.keys(): tok1_present = tab_toks1[word] if(tok1_present > 2): tok1_total = tab_toks1._N tok2_present = toks2[word] tok2_total = toks2._N fish_val = pvalue(tok1_present, tok2_present, tok1_total, tok2_total).two_tail if(fish_val < .001 and tok1_present / float(tok1_total) > (tok2_present / float(tok2_total)) * 2): sep_words.append(word) sep_words = [w for w in sep_words if not w in nltk.corpus.stopwords.words("english") and len(w) > 5] return sep_words
Finds the words that separate a list of tokens from a background corpus Basically this generates a list of informative/interesting words in a set toks1 is a list of words Returns a list of separator words
def _soap_client_call(method_name, *args): """Wrapper to call SoapClient method""" # a new client instance is built for threading issues soap_client = _build_soap_client() soap_args = _convert_soap_method_args(*args) # if pysimplesoap version requires it, apply a workaround for # https://github.com/pysimplesoap/pysimplesoap/issues/31 if PYSIMPLESOAP_1_16_2: return getattr(soap_client, method_name)(*soap_args) else: return getattr(soap_client, method_name)(soap_client, *soap_args)
Wrapper to call SoapClient method
def _configure_registry(self, include_process_stats: bool = False): """Configure the MetricRegistry.""" if include_process_stats: self.registry.register_additional_collector( ProcessCollector(registry=None))
Configure the MetricRegistry.
def configure(config={}, datastore=None, nested=False): """ Useful for when you need to control Switchboard's setup """ if nested: config = nested_config(config) # Re-read settings to make sure we have everything. # XXX It would be really nice if we didn't need to do this. Settings.init(**config) if datastore: Switch.ds = datastore # Register the builtins __import__('switchboard.builtins')
Useful for when you need to control Switchboard's setup
def loads(s, encoding=None, cls=None, object_hook=None, **kw): """ Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON document) to a Python object. If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name must be specified. Encodings that are not ASCII based (such as UCS-2) are not allowed and should be decoded to ``unicode`` first. ``object_hook`` is an optional function that will be called with the result of any object literal decode (a ``dict``). The return value of ``object_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders (e.g. JSON-RPC class hinting). To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg. """ if cls is None: cls = JSONDecoder if object_hook is not None: kw['object_hook'] = object_hook return cls(encoding=encoding, **kw).decode(s)
Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON document) to a Python object. If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name must be specified. Encodings that are not ASCII based (such as UCS-2) are not allowed and should be decoded to ``unicode`` first. ``object_hook`` is an optional function that will be called with the result of any object literal decode (a ``dict``). The return value of ``object_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders (e.g. JSON-RPC class hinting). To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg.
def emit( self, tup, stream=None, anchors=None, direct_task=None, need_task_ids=False ): """Emit a new Tuple to a stream. :param tup: the Tuple payload to send to Storm, should contain only JSON-serializable data. :type tup: :class:`list` or :class:`pystorm.component.Tuple` :param stream: the ID of the stream to emit this Tuple to. Specify ``None`` to emit to default stream. :type stream: str :param anchors: IDs the Tuples (or :class:`pystorm.component.Tuple` instances) which the emitted Tuples should be anchored to. If ``auto_anchor`` is set to ``True`` and you have not specified ``anchors``, ``anchors`` will be set to the incoming/most recent Tuple ID(s). :type anchors: list :param direct_task: the task to send the Tuple to. :type direct_task: int :param need_task_ids: indicate whether or not you'd like the task IDs the Tuple was emitted (default: ``False``). :type need_task_ids: bool :returns: ``None``, unless ``need_task_ids=True``, in which case it will be a ``list`` of task IDs that the Tuple was sent to if. Note that when specifying direct_task, this will be equal to ``[direct_task]``. """ if anchors is None: anchors = self._current_tups if self.auto_anchor else [] anchors = [a.id if isinstance(a, Tuple) else a for a in anchors] return super(Bolt, self).emit( tup, stream=stream, anchors=anchors, direct_task=direct_task, need_task_ids=need_task_ids, )
Emit a new Tuple to a stream. :param tup: the Tuple payload to send to Storm, should contain only JSON-serializable data. :type tup: :class:`list` or :class:`pystorm.component.Tuple` :param stream: the ID of the stream to emit this Tuple to. Specify ``None`` to emit to default stream. :type stream: str :param anchors: IDs the Tuples (or :class:`pystorm.component.Tuple` instances) which the emitted Tuples should be anchored to. If ``auto_anchor`` is set to ``True`` and you have not specified ``anchors``, ``anchors`` will be set to the incoming/most recent Tuple ID(s). :type anchors: list :param direct_task: the task to send the Tuple to. :type direct_task: int :param need_task_ids: indicate whether or not you'd like the task IDs the Tuple was emitted (default: ``False``). :type need_task_ids: bool :returns: ``None``, unless ``need_task_ids=True``, in which case it will be a ``list`` of task IDs that the Tuple was sent to if. Note that when specifying direct_task, this will be equal to ``[direct_task]``.
def _restore_file_lmt(self): # type: (Descriptor) -> None """Restore file lmt for file :param Descriptor self: this """ if not self._restore_file_properties.lmt or self._ase.lmt is None: return # timestamp() func is not available in py27 ts = time.mktime(self._ase.lmt.timetuple()) os.utime(str(self.final_path), (ts, ts))
Restore file lmt for file :param Descriptor self: this
def compare_mim_panels(self, existing_panel, new_panel): """Check if the latest version of OMIM differs from the most recent in database Return all genes that where not in the previous version. Args: existing_panel(dict) new_panel(dict) Returns: new_genes(set(str)) """ existing_genes = set([gene['hgnc_id'] for gene in existing_panel['genes']]) new_genes = set([gene['hgnc_id'] for gene in new_panel['genes']]) return new_genes.difference(existing_genes)
Check if the latest version of OMIM differs from the most recent in database Return all genes that where not in the previous version. Args: existing_panel(dict) new_panel(dict) Returns: new_genes(set(str))
def cd(path): '''Creates the path if it doesn't exist''' old_dir = os.getcwd() try: os.makedirs(path) except OSError: pass os.chdir(path) try: yield finally: os.chdir(old_dir)
Creates the path if it doesn't exist
def service_define(self, service, ty): """ Add a service variable of type ``ty`` to this model :param str service: variable name :param type ty: variable type :return: None """ assert service not in self._data assert service not in self._algebs + self._states self._service.append(service) self._service_ty.append(ty)
Add a service variable of type ``ty`` to this model :param str service: variable name :param type ty: variable type :return: None
def get_my_ip(): """Returns this computers IP address as a string.""" ip = subprocess.check_output(GET_IP_CMD, shell=True).decode('utf-8')[:-1] return ip.strip()
Returns this computers IP address as a string.
def cast_item(cls, item): """Cast list item to the appropriate tag type.""" if not isinstance(item, cls.subtype): incompatible = isinstance(item, Base) and not any( issubclass(cls.subtype, tag_type) and isinstance(item, tag_type) for tag_type in cls.all_tags.values() ) if incompatible: raise IncompatibleItemType(item, cls.subtype) try: return cls.subtype(item) except EndInstantiation: raise ValueError('List tags without an explicit subtype must ' 'either be empty or instantiated with ' 'elements from which a subtype can be ' 'inferred') from None except (IncompatibleItemType, CastError): raise except Exception as exc: raise CastError(item, cls.subtype) from exc return item
Cast list item to the appropriate tag type.
def capture_working_directory(self): """ Returns a working directory where to temporary store packet capture files. :returns: path to the directory """ workdir = os.path.join(self._path, "tmp", "captures") if not self._deleted: try: os.makedirs(workdir, exist_ok=True) except OSError as e: raise aiohttp.web.HTTPInternalServerError(text="Could not create the capture working directory: {}".format(e)) return workdir
Returns a working directory where to temporary store packet capture files. :returns: path to the directory
def _histogram_fixed_binsize(a, start, width, n): """histogram_even(a, start, width, n) -> histogram Return an histogram where the first bin counts the number of lower outliers and the last bin the number of upper outliers. Works only with fixed width bins. :Stochastics: a : array Array of samples. start : float Left-most bin edge. width : float Width of the bins. All bins are considered to have the same width. n : int Number of bins. :Return: H : array Array containing the number of elements in each bin. H[0] is the number of samples smaller than start and H[-1] the number of samples greater than start + n*width. """ return flib.fixed_binsize(a, start, width, n)
histogram_even(a, start, width, n) -> histogram Return an histogram where the first bin counts the number of lower outliers and the last bin the number of upper outliers. Works only with fixed width bins. :Stochastics: a : array Array of samples. start : float Left-most bin edge. width : float Width of the bins. All bins are considered to have the same width. n : int Number of bins. :Return: H : array Array containing the number of elements in each bin. H[0] is the number of samples smaller than start and H[-1] the number of samples greater than start + n*width.
def stack(self, k=5, stratify=False, shuffle=True, seed=100, full_test=True, add_diff=False): """Stacks sequence of models. Parameters ---------- k : int, default 5 Number of folds. stratify : bool, default False shuffle : bool, default True seed : int, default 100 full_test : bool, default True If True then evaluate test dataset on the full data otherwise take the mean of every fold. add_diff : bool, default False Returns ------- `DataFrame` Examples -------- >>> pipeline = ModelsPipeline(model_rf,model_lr) >>> stack_ds = pipeline.stack(k=10, seed=111) """ result_train = [] result_test = [] y = None for model in self.models: result = model.stack(k=k, stratify=stratify, shuffle=shuffle, seed=seed, full_test=full_test) train_df = pd.DataFrame(result.X_train, columns=generate_columns(result.X_train, model.name)) test_df = pd.DataFrame(result.X_test, columns=generate_columns(result.X_test, model.name)) result_train.append(train_df) result_test.append(test_df) if y is None: y = result.y_train result_train = pd.concat(result_train, axis=1) result_test = pd.concat(result_test, axis=1) if add_diff: result_train = feature_combiner(result_train) result_test = feature_combiner(result_test) ds = Dataset(X_train=result_train, y_train=y, X_test=result_test) return ds
Stacks sequence of models. Parameters ---------- k : int, default 5 Number of folds. stratify : bool, default False shuffle : bool, default True seed : int, default 100 full_test : bool, default True If True then evaluate test dataset on the full data otherwise take the mean of every fold. add_diff : bool, default False Returns ------- `DataFrame` Examples -------- >>> pipeline = ModelsPipeline(model_rf,model_lr) >>> stack_ds = pipeline.stack(k=10, seed=111)
def filter_data(data, filter_dict): """ filter a data dictionary for values only matching the filter """ for key, match_string in filter_dict.items(): if key not in data: logger.warning("{0} doesn't match a top level key".format(key)) continue values = data[key] matcher = re.compile(match_string) if isinstance(values, list): values = [v for v in values if matcher.search(v)] elif isinstance(values, dict): values = dict((k, v) for k, v in values.items() if matcher.search(k)) else: raise MiuraException("cannot filter a {0}".format(type(values))) data[key] = values
filter a data dictionary for values only matching the filter
def threadpooled( func: typing.Callable[..., typing.Union["typing.Awaitable[typing.Any]", typing.Any]], *, loop_getter: typing.Union[typing.Callable[..., asyncio.AbstractEventLoop], asyncio.AbstractEventLoop], loop_getter_need_context: bool = False, ) -> typing.Callable[..., "asyncio.Task[typing.Any]"]: """Overload: function callable, loop getter available."""
Overload: function callable, loop getter available.
def get_events(self, service_location_id, appliance_id, start, end, max_number=None): """ Request events for a given appliance Parameters ---------- service_location_id : int appliance_id : int start : int | dt.datetime | pd.Timestamp end : int | dt.datetime | pd.Timestamp start and end support epoch (in milliseconds), datetime and Pandas Timestamp timezone-naive datetimes are assumed to be in UTC max_number : int, optional The maximum number of events that should be returned by this query Default returns all events in the selected period Returns ------- dict """ start = self._to_milliseconds(start) end = self._to_milliseconds(end) url = urljoin(URLS['servicelocation'], service_location_id, "events") headers = {"Authorization": "Bearer {}".format(self.access_token)} params = { "from": start, "to": end, "applianceId": appliance_id, "maxNumber": max_number } r = requests.get(url, headers=headers, params=params) r.raise_for_status() return r.json()
Request events for a given appliance Parameters ---------- service_location_id : int appliance_id : int start : int | dt.datetime | pd.Timestamp end : int | dt.datetime | pd.Timestamp start and end support epoch (in milliseconds), datetime and Pandas Timestamp timezone-naive datetimes are assumed to be in UTC max_number : int, optional The maximum number of events that should be returned by this query Default returns all events in the selected period Returns ------- dict
def fmt(self): """Make printable representation out of this instance. """ tmpl = string.Template(self.template) kw = {} for key, val in self.kw.items(): if key == 'phrase': kw[key] = val else: kw[key] = val.fmt() return tmpl.substitute(kw)
Make printable representation out of this instance.
def find_by_id(self, section, params={}, **options): """Returns the complete record for a single section. Parameters ---------- section : {Id} The section to get. [params] : {Object} Parameters for the request """ path = "/sections/%s" % (section) return self.client.get(path, params, **options)
Returns the complete record for a single section. Parameters ---------- section : {Id} The section to get. [params] : {Object} Parameters for the request
def get_header(self, hdrclass, returnval=None): ''' Return the first header object that is of class hdrclass, or None if the header class isn't found. ''' if isinstance(hdrclass, str): return self.get_header_by_name(hdrclass) for hdr in self._headers: if isinstance(hdr, hdrclass): return hdr return returnval
Return the first header object that is of class hdrclass, or None if the header class isn't found.