code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def interactive(outdir): print("Building your Blended files into a website!") global outdir_type outdir_type = outdir reload(sys) sys.setdefaultencoding('utf8') build_files(outdir) print("Watching the content and templates directories for changes, press CTRL+C to stop...\n") w = Watcher() w.run()
Blends the generated files and outputs a HTML website on file change
def _compile_and_collapse(self): self._real_regex = self._real_re_compile(*self._regex_args, **self._regex_kwargs) for attr in self._regex_attributes_to_copy: setattr(self, attr, getattr(self._real_regex, attr))
Actually compile the requested regex
def silent_execute(self, code): try: self.kernel_client.execute(to_text_string(code), silent=True) except AttributeError: pass
Execute code in the kernel without increasing the prompt
def stop(self): distributed_logger.info('Stopping metrics aggregator') self.process.terminate() self.process.join() distributed_logger.info('Stopped metrics aggregator')
Terminates the forked process. Only valid if started as a fork, because... well you wouldn't get here otherwise. :return:
def impute_knn(df, k=3): imputed_matrix = KNN(k=k).complete(df.values) imputed_df = pd.DataFrame(imputed_matrix, df.index, df.columns) return imputed_df
Nearest neighbour imputations which weights samples using the mean squared difference on features for which two rows both have observed data. :param df: The input dataframe that contains missing values :param k: The number of neighbours :return: the imputed dataframe
def vectorize(fn): @functools.wraps(fn) def vectorized_function(values, *vargs, **kwargs): return [fn(value, *vargs, **kwargs) for value in values] return vectorized_function
Allows a method to accept a list argument, but internally deal only with a single item of that list.
def list_nodes(**kwargs): ret = {} nodes = list_nodes_full() for node in nodes: ret[node] = {} for prop in 'id', 'image', 'size', 'state', 'private_ips', 'public_ips': ret[node][prop] = nodes[node][prop] return ret
Return basic data on nodes
def registration_function_for_optionable(self, optionable_class): self._assert_not_frozen() def register(*args, **kwargs): kwargs['registering_class'] = optionable_class self.register(optionable_class.options_scope, *args, **kwargs) register.bootstrap = self.bootstrap_option_values() register.scope = optionable_class.options_scope return register
Returns a function for registering options on the given scope.
def clear_cache_delete_selected(modeladmin, request, queryset): result = delete_selected(modeladmin, request, queryset) if not result and hasattr(modeladmin, 'invalidate_cache'): modeladmin.invalidate_cache(queryset=queryset) return result
A delete action that will invalidate cache after being called.
def _validate_all_tags_are_used(metadata): tag_names = set([tag_name for tag_name, _ in metadata.tags]) filter_arg_names = set() for location, _ in metadata.registered_locations: for filter_info in metadata.get_filter_infos(location): for filter_arg in filter_info.args: if is_tag_argument(filter_arg): filter_arg_names.add(get_directive_argument_name(filter_arg)) unused_tags = tag_names - filter_arg_names if unused_tags: raise GraphQLCompilationError(u'This GraphQL query contains @tag directives whose values ' u'are not used: {}. This is not allowed. Please either use ' u'them in a filter or remove them entirely.' .format(unused_tags))
Ensure all tags are used in some filter.
def sample(self, data, interval): data_slice = dict() for key in data: if '_valid' in key: continue index = [slice(None)] * data[key].ndim index[0] = self.rng.randint(0, data[key].shape[0]) index[0] = slice(index[0], index[0] + 1) for tdim in self._time[key]: index[tdim] = interval data_slice[key] = data[key][tuple(index)] return data_slice
Sample a patch from the data object Parameters ---------- data : dict A data dict as produced by pumpp.Pump.transform interval : slice The time interval to sample Returns ------- data_slice : dict `data` restricted to `interval`.
def ilsr_pairwise_dense( comp_mat, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8): fun = functools.partial( lsr_pairwise_dense, comp_mat=comp_mat, alpha=alpha) return _ilsr(fun, initial_params, max_iter, tol)
Compute the ML estimate of model parameters given dense data. This function computes the maximum-likelihood (ML) estimate of model parameters given dense pairwise-comparison data. The data is described by a pairwise-comparison matrix ``comp_mat`` such that ``comp_mat[i,j]`` contains the number of times that item ``i`` wins against item ``j``. In comparison to :func:`~choix.ilsr_pairwise`, this function is particularly efficient for dense pairwise-comparison datasets (i.e., containing many comparisons for a large fraction of item pairs). The transition rates of the LSR Markov chain are initialized with ``alpha``. When ``alpha > 0``, this corresponds to a form of regularization (see :ref:`regularization` for details). Parameters ---------- comp_mat : np.array 2D square matrix describing the pairwise-comparison outcomes. alpha : float, optional Regularization parameter. initial_params : array_like, optional Parameters used to initialize the iterative procedure. max_iter : int, optional Maximum number of iterations allowed. tol : float, optional Maximum L1-norm of the difference between successive iterates to declare convergence. Returns ------- params : numpy.ndarray The ML estimate of model parameters.
def sam_readline(sock, partial = None): response = b'' exception = None while True: try: c = sock.recv(1) if not c: raise EOFError('SAM connection died. Partial response %r %r' % (partial, response)) elif c == b'\n': break else: response += c except (BlockingIOError, pysocket.timeout) as e: if partial is None: raise e else: exception = e break if partial is None: return response.decode('ascii') else: return (partial + response.decode('ascii'), exception)
read a line from a sam control socket
def _get_token_from_headers(self, request, refresh_token): header = request.headers.get(self.config.authorization_header(), None) if header is None: return None else: header_prefix_key = "authorization_header_prefix" header_prefix = getattr(self.config, header_prefix_key) if header_prefix(): try: prefix, token = header.split(" ") if prefix != header_prefix(): raise Exception except Exception: raise exceptions.InvalidAuthorizationHeader() else: token = header if refresh_token: token = request.json.get(self.config.refresh_token_name()) return token
Extract the token if present inside the headers of a request.
def content_types(self): return EnvironmentContentTypesProxy(self._client, self.space.id, self.id)
Provides access to content type management methods for content types of an environment. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/content-types :return: :class:`EnvironmentContentTypesProxy <contentful_management.space_content_types_proxy.EnvironmentContentTypesProxy>` object. :rtype: contentful.space_content_types_proxy.EnvironmentContentTypesProxy Usage: >>> space_content_types_proxy = environment.content_types() <EnvironmentContentTypesProxy space_id="cfexampleapi" environment_id="master">
def tag(self, text): matches = self._match(text.text) matches = self._resolve_conflicts(matches) if self.return_layer: return matches else: text[self.layer_name] = matches
Retrieves list of regex_matches in text. Parameters ---------- text: Text The estnltk text object to search for events. Returns ------- list of matches
def register_error(self, code=1, errmsg=None): if errmsg is not None: if self.errmsg is None: self.errmsg = errmsg elif errmsg not in self.errmsg: self.errmsg += ", " + errmsg if code is not None: self.exit_code = max(self.exit_code, code)
Update the exit code.
def update_default_iou_values(self): try: output = yield from gns3server.utils.asyncio.subprocess_check_output(self._path, "-h", cwd=self.working_dir, stderr=True) match = re.search("-n <n>\s+Size of nvram in Kb \(default ([0-9]+)KB\)", output) if match: self.nvram = int(match.group(1)) match = re.search("-m <n>\s+Megabytes of router memory \(default ([0-9]+)MB\)", output) if match: self.ram = int(match.group(1)) except (ValueError, OSError, subprocess.SubprocessError) as e: log.warning("could not find default RAM and NVRAM values for {}: {}".format(os.path.basename(self._path), e))
Finds the default RAM and NVRAM values for the IOU image.
def reflect_well(value, bounds): while value not in bounds: value = bounds._max.reflect_left(value) value = bounds._min.reflect_right(value) return value
Given some boundaries, reflects the value until it falls within both boundaries. This is done iteratively, reflecting left off of the `boundaries.max`, then right off of the `boundaries.min`, etc. Parameters ---------- value : float The value to apply the reflected boundaries to. bounds : Bounds instance Boundaries to reflect between. Both `bounds.min` and `bounds.max` must be instances of `ReflectedBound`, otherwise an AttributeError is raised. Returns ------- float The value after being reflected between the two bounds.
def get_queryset(self): queryset = super(IndexView, self).get_queryset() search_form = self.get_search_form() if search_form.is_valid(): query_str = search_form.cleaned_data.get('q', '').strip() queryset = self.model.objects.search(query_str) return queryset
Returns queryset instance. :rtype: django.db.models.query.QuerySet.
def get_tag_context(name, state): new_contexts = 0 ctm = None while True: try: ctx_key, name = name.split('.', 1) ctm = state.context.get(ctx_key) except ValueError: break if not ctm: break else: state.context.push(ctm) new_contexts += 1 ctm = state.context.get(name) return new_contexts, ctm
Given a tag name, return its associated value as defined in the current context stack.
def sendACK(self, blocknumber=None): log.debug("In sendACK, passed blocknumber is %s", blocknumber) if blocknumber is None: blocknumber = self.context.next_block log.info("Sending ack to block %d" % blocknumber) ackpkt = TftpPacketACK() ackpkt.blocknumber = blocknumber self.context.sock.sendto(ackpkt.encode().buffer, (self.context.host, self.context.tidport)) self.context.last_pkt = ackpkt
This method sends an ack packet to the block number specified. If none is specified, it defaults to the next_block property in the parent context.
def apply(self, im): from scipy.ndimage.interpolation import shift im = rollaxis(im, self.axis) im.setflags(write=True) for ind in range(0, im.shape[0]): im[ind] = shift(im[ind], map(lambda x: -x, self.delta[ind]), mode='nearest') im = rollaxis(im, 0, self.axis+1) return im
Apply axis-localized displacements. Parameters ---------- im : ndarray The image or volume to shift
def render_css(self, fn=None, text=None, margin='', indent='\t'): fn = fn or os.path.splitext(self.fn)[0]+'.css' if not os.path.exists(os.path.dirname(fn)): os.makedirs(os.path.dirname(fn)) curdir = os.path.abspath(os.curdir) os.chdir(os.path.dirname(fn)) text = text or self.render_styles() if text != '': text = sass.compile(string=text) os.chdir(curdir) return CSS(fn=fn, text=text)
output css using the Sass processor
def load_spectrum(filename): import f311 f = load_with_classes(filename, f311.classes_sp()) if f: return f.spectrum return None
Attempts to load spectrum as one of the supported types. Returns: a Spectrum, or None
def extract_bus_routine(page): if not isinstance(page, pq): page = pq(page) stations = extract_stations(page) return { 'name': extract_routine_name(page), 'stations': stations, 'current': extract_current_routine(page, stations) }
Extract bus routine information from page. :param page: crawled page.
def load(name, **kwargs): ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} ret['changes'] = __salt__['junos.load'](name, **kwargs) return ret
Loads the configuration provided onto the junos device. .. code-block:: yaml Install the mentioned config: junos: - load - path: salt//configs/interface.set .. code-block:: yaml Install the mentioned config: junos: - load - template_path: salt//configs/interface.set - template_vars: interface_name: lo0 description: Creating interface via SaltStack. name Path where the configuration/template file is present. If the file has a ``*.conf`` extension, the content is treated as text format. If the file has a ``*.xml`` extension, the content is treated as XML format. If the file has a ``*.set`` extension, the content is treated as Junos OS ``set`` commands. overwrite : False Set to ``True`` if you want this file is to completely replace the configuration file. replace : False Specify whether the configuration file uses "replace:" statements. Only those statements under the 'replace' tag will be changed. format: Determines the format of the contents. update : False Compare a complete loaded configuration against the candidate configuration. For each hierarchy level or configuration object that is different in the two configurations, the version in the loaded configuration replaces the version in the candidate configuration. When the configuration is later committed, only system processes that are affected by the changed configuration elements parse the new configuration. This action is supported from PyEZ 2.1 (default = False) template_vars Variables to be passed into the template processing engine in addition to those present in __pillar__, __opts__, __grains__, etc. You may reference these variables in your template like so: {{ template_vars["var_name"] }}
def validate(self, val): if val in self.values: return True, None else: return False, "'%s' is not in enum: %s" % (val, str(self.values))
Validates that the val is in the list of values for this Enum. Returns two element tuple: (bool, string) - `bool` - True if valid, False if not - `string` - Description of validation error, or None if valid :Parameters: val Value to validate. Should be a string.
def fermi_dist(energy, beta): exponent = np.asarray(beta*energy).clip(-600, 600) return 1./(np.exp(exponent) + 1)
Fermi Dirac distribution
def __parse_drac(output): drac = {} section = '' for i in output.splitlines(): if i.strip().endswith(':') and '=' not in i: section = i[0:-1] drac[section] = {} if i.rstrip() and '=' in i: if section in drac: drac[section].update(dict( [[prop.strip() for prop in i.split('=')]] )) else: section = i.strip() if section not in drac and section: drac[section] = {} return drac
Parse Dell DRAC output
def transform(self, context, handler, result): handler = handler.__func__ if hasattr(handler, '__func__') else handler annotation = getattr(handler, '__annotations__', {}).get('return', None) if annotation: return (annotation, result) return result
Transform the value returned by the controller endpoint. This extension transforms returned values if the endpoint has a return type annotation.
def to_node(value): if isinstance(value, Node): return value elif isinstance(value, str): return Node('string', value=value, pseudo_type='String') elif isinstance(value, int): return Node('int', value=value, pseudo_type='Int') elif isinstance(value, bool): return Node('boolean', value=str(value).lower(), pseudo_type='Boolean') elif isinstance(value, float): return Node('float', value=value, pseudo_type='Float') elif value is None: return Node('null', pseudo_type='Void') else: 1/0
Expand to a literal node if a basic type otherwise just returns the node
def extract_fieldnames(config): fields = [] for x in get_fields(config): if x in fields: fields.append(x + '_' + str(fields.count(x) + 1)) else: fields.append(x) return fields
Function to return a list of unique field names from the config file :param config: The configuration file that contains the specification of the extractor :return: A list of field names from the config file
def write_config(self): json.dump( self.config, open(CONFIG_FILE, 'w'), indent=4, separators=(',', ': ') ) return True
Write the configuration to a local file. :return: Boolean if successful
def list_records(self, file_const=None): for r in self._dataset.files: if file_const and r.minor_type != file_const: continue yield self.instance_from_name(r.path)
Iterate through the file records
def _validate_install(self): self.printer('Checking heroku installation ... ', flush=True) from os import devnull from subprocess import call, check_output sys_command = 'heroku --version' try: call(sys_command, shell=True, stdout=open(devnull, 'wb')) except Exception as err: self.printer('ERROR') raise Exception('"heroku cli" not installed. GoTo: https://devcenter.heroku.com/articles/heroku-cli') self.printer('done.') return True
a method to validate heroku is installed
def set_status(self, enabled): self.__manual_update_time = time.time() if enabled: data = self._controller.command(self._id, 'auto_conditioning_start', wake_if_asleep=True) if data['response']['result']: self.__is_auto_conditioning_on = True self.__is_climate_on = True else: data = self._controller.command(self._id, 'auto_conditioning_stop', wake_if_asleep=True) if data['response']['result']: self.__is_auto_conditioning_on = False self.__is_climate_on = False self.update()
Enable or disable the HVAC.
def load(self, config): password_dict = {} if config is None: logger.warning("No configuration file available. Cannot load password list.") elif not config.has_section(self._section): logger.warning("No [%s] section in the configuration file. Cannot load password list." % self._section) else: logger.info("Start reading the [%s] section in the configuration file" % self._section) password_dict = dict(config.items(self._section)) logger.info("%s password(s) loaded from the configuration file" % len(password_dict)) logger.debug("Password dictionary: %s" % password_dict) return password_dict
Load the password from the configuration file.
def kernels_push_cli(self, folder): folder = folder or os.getcwd() result = self.kernels_push(folder) if result is None: print('Kernel push error: see previous output') elif not result.error: if result.invalidTags: print( 'The following are not valid tags and could not be added ' 'to the kernel: ' + str(result.invalidTags)) if result.invalidDatasetSources: print( 'The following are not valid dataset sources and could not ' 'be added to the kernel: ' + str(result.invalidDatasetSources)) if result.invalidCompetitionSources: print( 'The following are not valid competition sources and could ' 'not be added to the kernel: ' + str(result.invalidCompetitionSources)) if result.invalidKernelSources: print( 'The following are not valid kernel sources and could not ' 'be added to the kernel: ' + str(result.invalidKernelSources)) if result.versionNumber: print('Kernel version %s successfully pushed. Please check ' 'progress at %s' % (result.versionNumber, result.url)) else: print('Kernel version successfully pushed. Please check ' 'progress at %s' % result.url) else: print('Kernel push error: ' + result.error)
client wrapper for kernels_push, with same arguments.
def startAll(self): self.logger.info("Starting all workers...") for worker in self.getWorkers(): process = self.getWorker(worker) self.logger.debug("Starting {0}".format(process.name)) process.start() self.logger.info("Started all workers")
Start all registered Workers.
def _read_channel(channel, stream, start, duration): channel_type = lalframe.FrStreamGetTimeSeriesType(channel, stream) read_func = _fr_type_map[channel_type][0] d_type = _fr_type_map[channel_type][1] data = read_func(stream, channel, start, duration, 0) return TimeSeries(data.data.data, delta_t=data.deltaT, epoch=start, dtype=d_type)
Get channel using lalframe
def any_validator(obj, validators, **kwargs): if not len(validators) > 1: raise ValueError( "any_validator requires at least 2 validator. Only got " "{0}".format(len(validators)) ) errors = ErrorDict() for key, validator in validators.items(): try: validator(obj, **kwargs) except ValidationError as err: errors[key] = err.detail else: break else: if len(errors) == 1: error = errors.values()[0] raise ValidationError(error) else: errors.raise_()
Attempt multiple validators on an object. - If any pass, then all validation passes. - Otherwise, raise all of the errors.
def rotate(self): self._index -= 1 if self._index >= 0: return self._ring[self._index] return None
Rotate the kill ring, then yank back the new top. Returns ------- A text string or None.
def export_gpx_file(self): gpx = create_elem('gpx', GPX_ELEM_ATTRIB) if not self.metadata.bounds: self.metadata.bounds = [j for i in self for j in i] gpx.append(self.metadata.togpx()) track = create_elem('trk') gpx.append(track) for segment in self: chunk = create_elem('trkseg') track.append(chunk) for place in segment: chunk.append(place.togpx()) return etree.ElementTree(gpx)
Generate GPX element tree from ``Trackpoints``. Returns: etree.ElementTree: GPX element tree depicting ``Trackpoints`` objects
def privacy_options_view(request): if "user" in request.GET: user = User.objects.user_with_ion_id(request.GET.get("user")) elif "student_id" in request.GET: user = User.objects.user_with_student_id(request.GET.get("student_id")) else: user = request.user if not user: messages.error(request, "Invalid user.") user = request.user if user.is_eighthoffice: user = None if user: if request.method == "POST": privacy_options_form = save_privacy_options(request, user) else: privacy_options = get_privacy_options(user) privacy_options_form = PrivacyOptionsForm(user, initial=privacy_options) context = {"privacy_options_form": privacy_options_form, "profile_user": user} else: context = {"profile_user": user} return render(request, "preferences/privacy_options.html", context)
View and edit privacy options for a user.
def protect_pip_from_modification_on_windows(modifying_pip): pip_names = [ "pip.exe", "pip{}.exe".format(sys.version_info[0]), "pip{}.{}.exe".format(*sys.version_info[:2]) ] should_show_use_python_msg = ( modifying_pip and WINDOWS and os.path.basename(sys.argv[0]) in pip_names ) if should_show_use_python_msg: new_command = [ sys.executable, "-m", "pip" ] + sys.argv[1:] raise CommandError( 'To modify pip, please run the following command:\n{}' .format(" ".join(new_command)) )
Protection of pip.exe from modification on Windows On Windows, any operation modifying pip should be run as: python -m pip ...
def close(self): if self._connection: self._connection_file.close() self._connection_file = None self._connection.close() self._connection = None
Closes connection with the q service.
def knapsack_iterative(items, maxweight): weights = [t[1] for t in items] max_exp = max([number_of_decimals(w_) for w_ in weights]) coeff = 10 ** max_exp int_maxweight = int(maxweight * coeff) int_items = [(v, int(w * coeff), idx) for v, w, idx in items] return knapsack_iterative_int(int_items, int_maxweight)
items = int_items maxweight = int_maxweight
def get_stream_formats(self, media_item): scraper = ScraperApi(self._ajax_api._connector) formats = scraper.get_media_formats(media_item.media_id) return formats
Get the available media formats for a given media item @param crunchyroll.models.Media @return dict
def run(self): qry = os.path.abspath(self.qry) ref = os.path.abspath(self.ref) outfile = os.path.abspath(self.outfile) tmpdir = tempfile.mkdtemp(prefix='tmp.run_nucmer.', dir=os.getcwd()) original_dir = os.getcwd() os.chdir(tmpdir) script = 'run_nucmer.sh' self._write_script(script, ref, qry, outfile) syscall.run('bash ' + script, verbose=self.verbose) os.chdir(original_dir) shutil.rmtree(tmpdir)
Change to a temp directory Run bash script containing commands Place results in specified output file Clean up temp directory
def create_intent(self, parent, intent, language_code=None, intent_view=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): if 'create_intent' not in self._inner_api_calls: self._inner_api_calls[ 'create_intent'] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_intent, default_retry=self._method_configs['CreateIntent'].retry, default_timeout=self._method_configs['CreateIntent'] .timeout, client_info=self._client_info, ) request = intent_pb2.CreateIntentRequest( parent=parent, intent=intent, language_code=language_code, intent_view=intent_view, ) return self._inner_api_calls['create_intent']( request, retry=retry, timeout=timeout, metadata=metadata)
Creates an intent in the specified agent. Example: >>> import dialogflow_v2 >>> >>> client = dialogflow_v2.IntentsClient() >>> >>> parent = client.project_agent_path('[PROJECT]') >>> >>> # TODO: Initialize ``intent``: >>> intent = {} >>> >>> response = client.create_intent(parent, intent) Args: parent (str): Required. The agent to create a intent for. Format: ``projects/<Project ID>/agent``. intent (Union[dict, ~google.cloud.dialogflow_v2.types.Intent]): Required. The intent to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dialogflow_v2.types.Intent` language_code (str): Optional. The language of training phrases, parameters and rich messages defined in ``intent``. If not specified, the agent's default language is used. [More than a dozen languages](https://dialogflow.com/docs/reference/language) are supported. Note: languages must be enabled in the agent, before they can be used. intent_view (~google.cloud.dialogflow_v2.types.IntentView): Optional. The resource view to apply to the returned intent. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dialogflow_v2.types.Intent` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def preprocess_cell( self, cell: "NotebookNode", resources: dict, index: int ) -> Tuple["NotebookNode", dict]: if cell.cell_type == "markdown": variables = cell["metadata"].get("variables", {}) if len(variables) > 0: cell.source = self.replace_variables(cell.source, variables) if resources.get("delete_pymarkdown", False): del cell.metadata["variables"] return cell, resources
Preprocess cell. Parameters ---------- cell : NotebookNode cell Notebook cell being processed resources : dictionary Additional resources used in the conversion process. Allows preprocessors to pass variables into the Jinja engine. cell_index : int Index of the cell being processed (see base.py)
def _main(self): self.set_proctitle(self.name) self.set_signal_handler() logger.info("process for module %s is now running (pid=%d)", self.name, os.getpid()) try: self.main() except (IOError, EOFError): pass except Exception as exp: logger.exception('main function exception: %s', exp) self.do_stop() logger.info("process for module %s is now exiting (pid=%d)", self.name, os.getpid()) exit()
module "main" method. Only used by external modules. :return: None
def import_mapping(connection_id, mapping): url = os.path.join(settings.HEROKU_CONNECT_API_ENDPOINT, 'connections', connection_id, 'actions', 'import') response = requests.post( url=url, json=mapping, headers=_get_authorization_headers() ) response.raise_for_status()
Import Heroku Connection mapping for given connection. Args: connection_id (str): Heroku Connection connection ID. mapping (dict): Heroku Connect mapping. Raises: requests.HTTPError: If an error occurs uploading the mapping. ValueError: If the mapping is not JSON serializable.
def parse( files, config=None, compilation_mode=COMPILATION_MODE.FILE_BY_FILE, cache=None): if not config: config = xml_generator_configuration_t() parser = project_reader_t(config=config, cache=cache) declarations = parser.read_files(files, compilation_mode) config.xml_generator_from_xml_file = parser.xml_generator_from_xml_file return declarations
Parse header files. :param files: The header files that should be parsed :type files: list of str :param config: Configuration object or None :type config: :class:`parser.xml_generator_configuration_t` :param compilation_mode: Determines whether the files are parsed individually or as one single chunk :type compilation_mode: :class:`parser.COMPILATION_MODE` :param cache: Declaration cache (None=no cache) :type cache: :class:`parser.cache_base_t` or str :rtype: list of :class:`declarations.declaration_t`
def create_organization(self, name): log.warning('Creating organization...') url = 'rest/servicedeskapi/organization' data = {'name': name} return self.post(url, headers=self.experimental_headers, data=data)
To create an organization Jira administrator global permission or agent permission is required depending on the settings :param name: str :return: Organization data
def show_settings(self): self.notes.config.put_values() self.overview.config.put_values() self.settings.config.put_values() self.spectrum.config.put_values() self.traces.config.put_values() self.video.config.put_values() self.settings.show()
Open the Setting windows, after updating the values in GUI.
def decode(vol, filename, content): bbox = Bbox.from_filename(filename) content_len = len(content) if content is not None else 0 if not content: if vol.fill_missing: content = '' else: raise EmptyVolumeException(filename) shape = list(bbox.size3()) + [ vol.num_channels ] try: return chunks.decode( content, encoding=vol.encoding, shape=shape, dtype=vol.dtype, block_size=vol.compressed_segmentation_block_size, ) except Exception as error: print(red('File Read Error: {} bytes, {}, {}, errors: {}'.format( content_len, bbox, filename, error))) raise
Decode content according to settings in a cloudvolume instance.
def select_fields(self, *fields): if fields: if not isinstance(fields[0], basestring): fields = list(fields[0]) + list(fields)[1:] for field_name in fields: field_name = self._normalize_field_name(field_name) self.select_field(field_name) return self
set multiple fields to be selected
def clean_value(self): result = [] for mdl in self: result.append(super(ListNode, mdl).clean_value()) return result
Populates json serialization ready data. This is the method used to serialize and store the object data in to DB Returns: List of dicts.
def matches(self, *specs): for spec in specs: if ':' in spec: app_name, endpoint_name = spec.split(':') else: app_name, endpoint_name = spec, None for endpoint in self.endpoints: if app_name == endpoint.application.name and \ endpoint_name in (endpoint.name, None): break else: return False return True
Check if this relation matches relationship specs. Relation specs are strings that would be given to Juju to establish a relation, and should be in the form ``<application>[:<endpoint_name>]`` where the ``:<endpoint_name>`` suffix is optional. If the suffix is omitted, this relation will match on any endpoint as long as the given application is involved. In other words, this relation will match a spec if that spec could have created this relation. :return: True if all specs match.
def encode_numeric(self): with io.StringIO() as buf: for triplet in self.grouper(3, self.data): number = '' for digit in triplet: if isinstance(digit, int): digit = chr(digit) if digit: number = ''.join([number, digit]) else: break if len(number) == 1: bin = self.binary_string(number, 4) elif len(number) == 2: bin = self.binary_string(number, 7) else: bin = self.binary_string(number, 10) buf.write(bin) return buf.getvalue()
This method encodes the QR code's data if its mode is numeric. It returns the data encoded as a binary string.
def get_chunk(self, chunk_id): if chunk_id in self.idx: return Cchunk(self.idx[chunk_id], self.type) else: return None
Returns the chunk object for the supplied identifier @type chunk_id: string @param chunk_id: chunk identifier
def delete_table(self, table_name): data = {'TableName': table_name} json_input = json.dumps(data) return self.make_request('DeleteTable', json_input)
Deletes the table and all of it's data. After this request the table will be in the DELETING state until DynamoDB completes the delete operation. :type table_name: str :param table_name: The name of the table to delete.
def colored(text, color=None, on_color=None, attrs=None, ansi_code=None): if os.getenv('ANSI_COLORS_DISABLED') is None: if ansi_code is not None: return "\033[38;5;{}m{}\033[0m".format(ansi_code, text) fmt_str = '\033[%dm%s' if color is not None: text = re.sub(COLORS_RE + '(.*?)' + RESET_RE, r'\1', text) text = fmt_str % (COLORS[color], text) if on_color is not None: text = re.sub(HIGHLIGHTS_RE + '(.*?)' + RESET_RE, r'\1', text) text = fmt_str % (HIGHLIGHTS[on_color], text) if attrs is not None: text = re.sub(ATTRIBUTES_RE + '(.*?)' + RESET_RE, r'\1', text) for attr in attrs: text = fmt_str % (ATTRIBUTES[attr], text) return text + RESET else: return text
Colorize text, while stripping nested ANSI color sequences. Author: Konstantin Lepa <konstantin.lepa@gmail.com> / termcolor Available text colors: red, green, yellow, blue, magenta, cyan, white. Available text highlights: on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white. Available attributes: bold, dark, underline, blink, reverse, concealed. Example: colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink']) colored('Hello, World!', 'green')
def list_passwords(kwargs=None, call=None): response = _query('support', 'password/list') ret = {} for item in response['list']: if 'server' in item: server = item['server']['name'] if server not in ret: ret[server] = [] ret[server].append(item) return ret
List all password on the account .. versionadded:: 2015.8.0
def init_extension(self, app): app.config.setdefault('CACHE_VERSION', '0') app.config.setdefault('CACHE_PREFIX', 'r') app.config.setdefault('CACHE_BACKEND', 'rio.exts.flask_cache.NullBackend') app.config.setdefault('CACHE_BACKEND_OPTIONS', {})
Initialize cache instance.
def randomEarlyShared(store, role): for r in role.allRoles(): share = store.findFirst(Share, Share.sharedTo == r, sort=Share.storeID.ascending) if share is not None: return share.sharedItem raise NoSuchShare("Why, that user hasn't shared anything at all!")
If there are no explicitly-published public index pages to display, find a shared item to present to the user as first.
def TokenClient( domain, token, user_agent=None, request_encoder=default_request_encoder, response_decoder=default_response_decoder, ): return AuthorizingClient( domain, transport.TokenAuthorization(token), request_encoder, response_decoder, user_agent=user_agent )
Creates a Freshbooks client for a freshbooks domain, using token-based auth. The optional request_encoder and response_decoder parameters can be passed the logging_request_encoder and logging_response_decoder objects from this module, or custom encoders, to aid debugging or change the behaviour of refreshbooks' request-to-XML-to-response mapping. The optional user_agent keyword parameter can be used to specify the user agent string passed to FreshBooks. If unset, a default user agent string is used.
def _removeTags(tags, objects): for t in tags: for o in objects: o.tags.remove(t) return True
Removes tags from objects
def open_inbox_page(self, content_type): from .inbox_page import InboxPage with self.term.loader('Loading inbox'): page = InboxPage(self.reddit, self.term, self.config, self.oauth, content_type=content_type) if not self.term.loader.exception: return page
Open an instance of the inbox page for the logged in user.
def aggregate(self, search): for f, facet in iteritems(self.facets): agg = facet.get_aggregation() agg_filter = MatchAll() for field, filter in iteritems(self._filters): if f == field: continue agg_filter &= filter search.aggs.bucket( '_filter_' + f, 'filter', filter=agg_filter ).bucket(f, agg)
Add aggregations representing the facets selected, including potential filters.
def is_address_guard(self, address): try: mbi = self.mquery(address) except WindowsError: e = sys.exc_info()[1] if e.winerror == win32.ERROR_INVALID_PARAMETER: return False raise return mbi.is_guard()
Determines if an address belongs to a guard page. @note: Returns always C{False} for kernel mode addresses. @type address: int @param address: Memory address to query. @rtype: bool @return: C{True} if the address belongs to a guard page. @raise WindowsError: An exception is raised on error.
def spheres_intersect(ar, aR, br, bR): return vector.vector_mag_sq(ar - br) < (aR + bR) ** 2
Return whether or not two spheres intersect each other. Parameters ---------- ar, br: array-like, shape (n,) in n dimensions Coordinates of the centres of the spheres `a` and `b`. aR, bR: float Radiuses of the spheres `a` and `b`. Returns ------- intersecting: boolean True if the spheres intersect.
def check_internet_on(secrets_file_path): while True: if internet_on() is True and not os.path.exists(secrets_file_path): break else: print("Turn on your internet and unplug your USB to continue...") time.sleep(10) return True
If internet on and USB unplugged, returns true. Else, continues to wait...
def load_modules(self, modules, config): for pluginclass in get_plugin_classes(modules): name = pluginclass.__name__ if name in config["enabledplugins"]: if issubclass(pluginclass, _ConnectionPlugin): log.debug(LOG_PLUGIN, "Enable connection plugin %s", name) self.connection_plugins.append(pluginclass(config[name])) elif issubclass(pluginclass, _ContentPlugin): log.debug(LOG_PLUGIN, "Enable content plugin %s", name) self.content_plugins.append(pluginclass(config[name])) elif issubclass(pluginclass, _ParserPlugin): log.debug(LOG_PLUGIN, "Enable parser plugin %s", name) self.parser_plugins.append(pluginclass(config[name])) else: raise ValueError("Invalid plugin class %s" % pluginclass)
Load plugin modules.
def get_pkg_version(): try: with open("PKG-INFO", "r") as fp: rgx = re.compile(r"Version: (\d+)") for line in fp.readlines(): match = rgx.match(line) if match: return match.group(1) except IOError: return None
Get version string by parsing PKG-INFO.
def ip_to_int(ip): ret = 0 for octet in ip.split('.'): ret = ret * 256 + int(octet) return ret
Converts an IP address to an integer
def run_query_series(queries, conn): results = [] for item in queries: qry = item kwargs = {} if isinstance(item, tuple): qry = item[0] kwargs = item[1] result = conn.update_query(qry, **kwargs) results.append(result) return results
Iterates through a list of queries and runs them through the connection Args: ----- queries: list of strings or tuples containing (query_string, kwargs) conn: the triplestore connection to use
def changes_found(self): if self.dest is None: warnings.warn("dest directory not found!") if self.src is None: warnings.warn("src directory not found!") if self.src is None or self.dest is None: return False dest_mtime = -1 src_mtime = os.path.getmtime(self.src) if os.path.exists(self.dest): dest_mtime = os.path.getmtime(self.dest) if src_mtime >= dest_mtime: return True for folder, _, files in os.walk(self.src): for filename in fnmatch.filter(files, '*.scss'): src_path = os.path.join(folder, filename) if os.path.getmtime(src_path) >= dest_mtime: return True return False
Returns True if the target folder is older than the source folder.
def _merge_colormaps(kwargs): from trollimage.colormap import Colormap full_cmap = None palette = kwargs['palettes'] if isinstance(palette, Colormap): full_cmap = palette else: for itm in palette: cmap = create_colormap(itm) cmap.set_range(itm["min_value"], itm["max_value"]) if full_cmap is None: full_cmap = cmap else: full_cmap = full_cmap + cmap return full_cmap
Merge colormaps listed in kwargs.
def visit_const(self, node, parent): return nodes.Const( node.value, getattr(node, "lineno", None), getattr(node, "col_offset", None), parent, )
visit a Const node by returning a fresh instance of it
def _process_article_phene_row(self, row): phenotype_id = self.id_hash['phene'].get(row['phene_id']) article_id = self.id_hash['article'].get(row['article_id']) omia_id = self._get_omia_id_from_phene_id(phenotype_id) if self.test_mode or omia_id not in self.test_ids['disease'] \ or phenotype_id is None or article_id is None: return self.graph.addTriple( article_id, self.globaltt['is_about'], phenotype_id) return
Linking articles to species-specific phenes. :param row: :return:
def _extract_tls_session_ticket(ssl_session: nassl._nassl.SSL_SESSION) -> str: session_string = ((ssl_session.as_text()).split('TLS session ticket:'))[1] session_tls_ticket = (session_string.split('Compression:'))[0] return session_tls_ticket
Extract the TLS session ticket from a SSL session object or raises IndexError if the ticket was not set.
def crop_resize_image(image: np.ndarray, size) -> np.ndarray: width, height = image.size if width > height: left = (width - height) / 2 right = width - left top = 0 bottom = height else: top = (height - width) / 2 bottom = height - top left = 0 right = width image = image.crop((left, top, right, bottom)) image = image.resize(size, Image.ANTIALIAS) return image
Resize the input image. :param image: Original image which is a PIL object. :param size: Tuple of height and width to resize the image to. :return: Resized image which is a PIL object
def attach_remote_media(self, url, username=None, password=None): self.oem_init() return self._oem.attach_remote_media(url, username, password)
Attach remote media by url Given a url, attach remote media (cd/usb image) to the target system. :param url: URL to indicate where to find image (protocol support varies by BMC) :param username: Username for endpoint to use when accessing the URL. If applicable, 'domain' would be indicated by '@' or '\' syntax. :param password: Password for endpoint to use when accessing the URL.
def _cost_method(self, *args, **kwargs): cost_val = self.thresh * nuclear_norm(cube2matrix(args[0])) if 'verbose' in kwargs and kwargs['verbose']: print(' - NUCLEAR NORM (X):', cost_val) return cost_val
Calculate low-rank component of the cost This method returns the nuclear norm error of the deconvolved data in matrix form Returns ------- float low-rank cost component
def timex_starts(self): if not self.is_tagged(TIMEXES): self.tag_timexes() return self.starts(TIMEXES)
The list of start positions of ``timexes`` layer elements.
def invert_projection(self, X, identities): distances = self.transform(X) if len(distances) != len(identities): raise ValueError("X and identities are not the same length: " "{0} and {1}".format(len(X), len(identities))) node_match = [] for d in distances.__getattribute__(self.argfunc)(0): node_match.append(identities[d]) return np.array(node_match)
Calculate the inverted projection. The inverted projectio of a SOM is created by association each weight with the input which matches it the most, thus giving a good approximation of the "influence" of each input item. Works best for symbolic (instead of continuous) input data. Parameters ---------- X : numpy array Input data identities : list A list of names for each of the input data. Must be the same length as X. Returns ------- m : numpy array An array with the same shape as the map
def visit_augassign(self, node, parent): newnode = nodes.AugAssign( self._bin_op_classes[type(node.op)] + "=", node.lineno, node.col_offset, parent, ) newnode.postinit( self.visit(node.target, newnode), self.visit(node.value, newnode) ) return newnode
visit a AugAssign node by returning a fresh instance of it
def get_dict_for_mongodb_queries(self): d = {} return d all_structures = [task.input.structure for task in self.iflat_tasks()] all_pseudos = [task.input.pseudos for task in self.iflat_tasks()]
This function returns a dictionary with the attributes that will be put in the mongodb document to facilitate the query. Subclasses may want to replace or extend the default behaviour.
def _validate_planar_fault_geometry(self, node, _float_re): valid_spacing = node["spacing"] for key in ["topLeft", "topRight", "bottomLeft", "bottomRight"]: lon = getattr(node, key)["lon"] lat = getattr(node, key)["lat"] depth = getattr(node, key)["depth"] valid_lon = (lon >= -180.0) and (lon <= 180.0) valid_lat = (lat >= -90.0) and (lat <= 90.0) valid_depth = (depth >= 0.0) is_valid = valid_lon and valid_lat and valid_depth if not is_valid or not valid_spacing: raise LogicTreeError( node, self.filename, "'planarFaultGeometry' node is not valid")
Validares a node representation of a planar fault geometry
def backward_delete_char(event): " Delete the character behind the cursor. " if event.arg < 0: deleted = event.current_buffer.delete(count=-event.arg) else: deleted = event.current_buffer.delete_before_cursor(count=event.arg) if not deleted: event.cli.output.bell()
Delete the character behind the cursor.
def setduration(self, **duration): if len(duration) == 1: arg = [x[0] for x in duration.items()] if not arg[0] in self.units: raise Exception('must be: %s' % str(self.units)) self.duration = arg return self
Set the caching duration which defines how long the file will be cached. @param duration: The cached file duration which defines how long the file will be cached. A duration=0 means forever. The duration may be: (months|weeks|days|hours|minutes|seconds). @type duration: {unit:value}
def structure(cls): downstream = cls.cutter.elucidate() upstream = str(Seq(downstream).reverse_complement()) return "".join( [ upstream.replace("^", ")(").replace("_", "("), "N*", downstream.replace("^", ")(").replace("_", ")"), ] )
Get the vector structure, as a DNA regex pattern. Warning: If overloading this method, the returned pattern must include 3 capture groups to capture the following features: 1. The downstream (3') overhang sequence 2. The vector placeholder sequence 3. The upstream (5') overhang sequence
def wait_for_save(filename, timeout=5): modification_time = os.path.getmtime(filename) start_time = time.time() while time.time() < start_time + timeout: if (os.path.getmtime(filename) > modification_time and os.path.getsize(filename) > 0): return True time.sleep(0.2) return False
Waits for FILENAME to update, waiting up to TIMEOUT seconds. Returns True if a save was detected, and False otherwise.
def geolocate(client, home_mobile_country_code=None, home_mobile_network_code=None, radio_type=None, carrier=None, consider_ip=None, cell_towers=None, wifi_access_points=None): params = {} if home_mobile_country_code is not None: params["homeMobileCountryCode"] = home_mobile_country_code if home_mobile_network_code is not None: params["homeMobileNetworkCode"] = home_mobile_network_code if radio_type is not None: params["radioType"] = radio_type if carrier is not None: params["carrier"] = carrier if consider_ip is not None: params["considerIp"] = consider_ip if cell_towers is not None: params["cellTowers"] = cell_towers if wifi_access_points is not None: params["wifiAccessPoints"] = wifi_access_points return client._request("/geolocation/v1/geolocate", {}, base_url=_GEOLOCATION_BASE_URL, extract_body=_geolocation_extract, post_json=params)
The Google Maps Geolocation API returns a location and accuracy radius based on information about cell towers and WiFi nodes given. See https://developers.google.com/maps/documentation/geolocation/intro for more info, including more detail for each parameter below. :param home_mobile_country_code: The mobile country code (MCC) for the device's home network. :type home_mobile_country_code: string :param home_mobile_network_code: The mobile network code (MCC) for the device's home network. :type home_mobile_network_code: string :param radio_type: The mobile radio type. Supported values are lte, gsm, cdma, and wcdma. While this field is optional, it should be included if a value is available, for more accurate results. :type radio_type: string :param carrier: The carrier name. :type carrier: string :param consider_ip: Specifies whether to fall back to IP geolocation if wifi and cell tower signals are not available. Note that the IP address in the request header may not be the IP of the device. :type consider_ip: bool :param cell_towers: A list of cell tower dicts. See https://developers.google.com/maps/documentation/geolocation/intro#cell_tower_object for more detail. :type cell_towers: list of dicts :param wifi_access_points: A list of WiFi access point dicts. See https://developers.google.com/maps/documentation/geolocation/intro#wifi_access_point_object for more detail. :type wifi_access_points: list of dicts
def bitswap_unwant(self, key, **kwargs): args = (key,) return self._client.request('/bitswap/unwant', args, **kwargs)
Remove a given block from wantlist. Parameters ---------- key : str Key to remove from wantlist.
def accepts_kwarg(func, kwarg): signature = inspect.signature(func) try: signature.bind_partial(**{kwarg: None}) return True except TypeError: return False
Determine whether the callable `func` has a signature that accepts the keyword argument `kwarg`
def delete(stack_ref: List[str], region: str, dry_run: bool, force: bool, remote: str): lizzy = setup_lizzy_client(remote) stack_refs = get_stack_refs(stack_ref) all_with_version = all(stack.version is not None for stack in stack_refs) if (not all_with_version and not dry_run and not force): fatal_error( 'Error: {} matching stacks found. '.format(len(stack_refs)) + 'Please use the "--force" flag if you really want to delete multiple stacks.') output = '' for stack in stack_refs: if stack.version is not None: stack_id = '{stack.name}-{stack.version}'.format(stack=stack) else: stack_id = stack.name with Action("Requesting stack '{stack_id}' deletion..", stack_id=stack_id): output = lizzy.delete(stack_id, region=region, dry_run=dry_run) print(output)
Delete Cloud Formation stacks