code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _generate_examples(self, archive, directory): reg = re.compile(os.path.join("^%s" % directory, "(?P<label>neg|pos)", "")) for path, imdb_f in archive: res = reg.match(path) if not res: continue text = imdb_f.read().strip() yield { "text": text, "label": res.groupdict()["label"], }
Generate IMDB examples.
async def start(self): successful = 0 try: for adapter in self.adapters: await adapter.start() successful += 1 self._started = True except: for adapter in self.adapters[:successful]: await adapter.stop() raise
Start all adapters managed by this device adapter. If there is an error starting one or more adapters, this method will stop any adapters that we successfully started and raise an exception.
def function_dependency_graph(self, func): if self._function_data_dependencies is None: self._build_function_dependency_graphs() if func in self._function_data_dependencies: return self._function_data_dependencies[func] return None
Get a dependency graph for the function `func`. :param func: The Function object in CFG.function_manager. :returns: A networkx.DiGraph instance.
def download_cart(cart_name, env): cart_con = cart_db() carts = cart_con[env] return carts.find_one({'_id': cart_name})
accesses mongodb and return a cart spec stored there
def WebLookup(url, urlQuery=None, utf8=True): goodlogging.Log.Info("UTIL", "Looking up info from URL:{0} with QUERY:{1})".format(url, urlQuery), verbosity=goodlogging.Verbosity.MINIMAL) response = requests.get(url, params=urlQuery) goodlogging.Log.Info("UTIL", "Full url: {0}".format(response.url), verbosity=goodlogging.Verbosity.MINIMAL) if utf8 is True: response.encoding = 'utf-8' if(response.status_code == requests.codes.ok): return(response.text) else: response.raise_for_status()
Look up webpage at given url with optional query string Parameters ---------- url : string Web url. urlQuery : dictionary [optional: default = None] Parameter to be passed to GET method of requests module utf8 : boolean [optional: default = True] Set response encoding Returns ---------- string GET response text
def print_locals(*args, **kwargs): from utool import util_str from utool import util_dbg from utool import util_dict locals_ = util_dbg.get_parent_frame().f_locals keys = kwargs.get('keys', None if len(args) == 0 else []) to_print = {} for arg in args: varname = util_dbg.get_varname_from_locals(arg, locals_) to_print[varname] = arg if keys is not None: to_print.update(util_dict.dict_take(locals_, keys)) if not to_print: to_print = locals_ locals_str = util_str.repr4(to_print) print(locals_str)
Prints local variables in function. If no arguments all locals are printed. Variables can be specified directly (variable values passed in) as varargs or indirectly (variable names passed in) in kwargs by using keys and a list of strings.
def defaulted_property(self, target, option_name): if target.has_sources('.java'): matching_subsystem = Java.global_instance() elif target.has_sources('.scala'): matching_subsystem = ScalaPlatform.global_instance() else: return getattr(target, option_name) return matching_subsystem.get_scalar_mirrored_target_option(option_name, target)
Computes a language property setting for the given JvmTarget. :param selector A function that takes a target or platform and returns the boolean value of the property for that target or platform, or None if that target or platform does not directly define the property. If the target does not override the language property, returns true iff the property is true for any of the matched languages for the target.
def entrypoints(section): return {ep.name: ep.load() for ep in pkg_resources.iter_entry_points(section)}
Returns the Entry Point for a given Entry Point section. :param str section: The section name in the entry point collection :returns: A dictionary of (Name, Class) pairs stored in the entry point collection.
def make_coord_dict(coord): return dict( z=int_if_exact(coord.zoom), x=int_if_exact(coord.column), y=int_if_exact(coord.row), )
helper function to make a dict from a coordinate for logging
def set_level(self, level, realms): self.level = level if not self.level: logger.info("- %s", self.get_name()) else: logger.info(" %s %s", '+' * self.level, self.get_name()) self.all_sub_members = [] self.all_sub_members_names = [] for child in sorted(self.realm_members): child = realms.find_by_name(child) if not child: continue self.all_sub_members.append(child.uuid) self.all_sub_members_names.append(child.get_name()) grand_children = child.set_level(self.level + 1, realms) for grand_child in grand_children: if grand_child in self.all_sub_members_names: continue grand_child = realms.find_by_name(grand_child) if grand_child: self.all_sub_members_names.append(grand_child.get_name()) self.all_sub_members.append(grand_child.uuid) return self.all_sub_members_names
Set the realm level in the realms hierarchy :return: None
def parse_env(s): m = ENV_RE.search(s) if m is None: return {} g1 = m.group(1) env = dict(ENV_SPLIT_RE.findall(g1)) return env
Parses the environment portion of string into a dict.
def minimize(self, minimize): self._minimize = minimize self._logger.log('debug', 'Minimize set to {}'.format(minimize))
Configures the ABC to minimize fitness function return value or derived score Args: minimize (bool): if True, minimizes fitness function return value; if False, minimizes derived score
def scan_for_devices(timeout: float) -> List[Tuple[str, str]]: from bluepy.btle import Scanner scanner = Scanner() result = [] for device in scanner.scan(timeout): result.append((device.addr, device.getValueText(9))) return result
Scan for bluetooth low energy devices. Note this must be run as root!
def node_contained_in_layer_area_validation(self): if self.layer and isinstance(self.layer.area, Polygon) and not self.layer.area.contains(self.geometry): raise ValidationError(_('Node must be inside layer area'))
if layer defines an area, ensure node coordinates are contained in the area
def socket_recvall(socket, length, bufsize=4096): data = b"" while len(data) < length: data += socket.recv(bufsize) return data
A helper method to read of bytes from a socket to a maximum length
def get_interface_by_instance_name(self, name): with self._mutex: for intf in self.interfaces: if intf.instance_name == name: return intf return None
Get an interface of this port by instance name.
def saveOverlayToDicomCopy(input_dcmfilelist, output_dicom_dir, overlays, crinfo, orig_shape): from . import datawriter as dwriter if not os.path.exists(output_dicom_dir): os.makedirs(output_dicom_dir) import imtools.image_manipulation for key in overlays: overlays[key] = imtools.image_manipulation.uncrop(overlays[key], crinfo, orig_shape) dw = dwriter.DataWriter() dw.DataCopyWithOverlay(input_dcmfilelist, output_dicom_dir, overlays)
Save overlay to dicom.
def timeout(self, duration=3600): self.room.check_owner() self.conn.make_call("timeoutFile", self.fid, duration)
Timeout the uploader of this file
def remove_scene(self, scene_id): if self.state.activeSceneId == scene_id: err_msg = "Requested to delete scene {sceneNum}, which is currently active. Cannot delete active scene.".format(sceneNum=scene_id) logging.info(err_msg) return(False, 0, err_msg) try: del self.state.scenes[scene_id] logging.debug("Deleted scene {sceneNum}".format(sceneNum=scene_id)) except KeyError: err_msg = "Requested to delete scene {sceneNum}, which does not exist".format(sceneNum=scene_id) logging.info(err_msg) return(False, 0, err_msg) sequence_number = self.zmq_publisher.publish_scene_remove(scene_id) logging.debug("Removed scene {sceneNum}".format(sceneNum=scene_id)) return (True, sequence_number, "OK")
remove a scene by Scene ID
def add_dep(self, ): i = self.dep_tablev.currentIndex() item = i.internalPointer() if item: dep = item.internal_data() dep.projects.add(self._project) self.deps.append(dep) item.set_parent(None)
Add a dep and store it in the self.deps :returns: None :rtype: None :raises: None
def load(self, loc): try: w_td_c = pickle.load(open(loc, 'rb')) except IOError: msg = ("Missing trontagger.pickle file.") raise MissingCorpusError(msg) self.model.weights, self.tagdict, self.classes = w_td_c self.model.classes = self.classes return None
Load a pickled model.
def fasta(self): fasta_str = '' max_line_length = 79 for p in self._molecules: if hasattr(p, 'sequence'): fasta_str += '>{0}:{1}|PDBID|CHAIN|SEQUENCE\n'.format( self.id.upper(), p.id) seq = p.sequence split_seq = [seq[i: i + max_line_length] for i in range(0, len(seq), max_line_length)] for seq_part in split_seq: fasta_str += '{0}\n'.format(seq_part) return fasta_str
Generates a FASTA string for the `Assembly`. Notes ----- Explanation of FASTA format: https://en.wikipedia.org/wiki/FASTA_format Recommendation that all lines of text be shorter than 80 characters is adhered to. Format of PDBID|CHAIN|SEQUENCE is consistent with files downloaded from the PDB. Uppercase PDBID used for consistency with files downloaded from the PDB. Useful for feeding into cdhit and then running sequence clustering. Returns ------- fasta_str : str String of the fasta file for the `Assembly`.
def resolve(self, key, keylist): raise AmbiguousKeyError("Ambiguous key "+ repr(key) + ", could be any of " + str(sorted(keylist)))
Hook to resolve ambiguities in selected keys
def monitor(name, callback): global _monitor if not exists(name): raise ContainerNotExists("The container (%s) does not exist!" % name) if _monitor: if _monitor.is_monitored(name): raise Exception("You are already monitoring this container (%s)" % name) else: _monitor = _LXCMonitor() logging.info("Starting LXC Monitor") _monitor.start() def kill_handler(sg, fr): stop_monitor() signal.signal(signal.SIGTERM, kill_handler) signal.signal(signal.SIGINT, kill_handler) _monitor.add_monitor(name, callback)
monitors actions on the specified container, callback is a function to be called on
def hide(cls): cls.el.style.display = "none" cls.overlay.hide() cls.bind()
Hide the log interface.
def get_form(self, form_class=None): form = super().get_form(form_class) if not getattr(form, 'helper', None): form.helper = FormHelper() form.helper.form_tag = False else: form.helper.form_tag = False return form
Get form for model
def normalize(self, address, **kwargs): addresses = super(AddressType, self).normalize(address, **kwargs) return addresses
Make the address more compareable.
def get_ndv_b(b): b_ndv = b.GetNoDataValue() if b_ndv is None: ns = b.XSize nl = b.YSize ul = float(b.ReadAsArray(0, 0, 1, 1)) lr = float(b.ReadAsArray(ns-1, nl-1, 1, 1)) if np.isnan(ul) or ul == lr: b_ndv = ul else: b_ndv = 0 elif np.isnan(b_ndv): b_dt = gdal.GetDataTypeName(b.DataType) if 'Float' in b_dt: b_ndv = np.nan else: b_ndv = 0 return b_ndv
Get NoData value for GDAL band. If NoDataValue is not set in the band, extract upper left and lower right pixel values. Otherwise assume NoDataValue is 0. Parameters ---------- b : GDALRasterBand object This is the input band. Returns ------- b_ndv : float NoData value
def gettext(self, string, domain=None, **variables): t = self.get_translations(domain) return t.ugettext(string) % variables
Translate a string with the current locale.
def parse(self, rrstr): if self._initialized: raise pycdlibexception.PyCdlibInternalError('PD record already initialized!') (su_len_unused, su_entry_version_unused) = struct.unpack_from('=BB', rrstr[:4], 2) self.padding = rrstr[4:] self._initialized = True
Parse a Rock Ridge Platform Dependent record out of a string. Parameters: rrstr - The string to parse the record out of. Returns: Nothing.
def listRuns(self, run_num=-1, logical_file_name="", block_name="", dataset=""): if( '%' in logical_file_name or '%' in block_name or '%' in dataset ): dbsExceptionHandler('dbsException-invalid-input', " DBSDatasetRun/listRuns. No wildcards are allowed in logical_file_name, block_name or dataset.\n.") conn = self.dbi.connection() tran = False try: ret = self.runlist.execute(conn, run_num, logical_file_name, block_name, dataset, tran) result = [] rnum = [] for i in ret: rnum.append(i['run_num']) result.append({'run_num' : rnum}) return result finally: if conn: conn.close()
List run known to DBS.
def _where(filename, dirs=[], env="PATH"): if not isinstance(dirs, list): dirs = [dirs] if glob(filename): return filename paths = [os.curdir] + os.environ[env].split(os.path.pathsep) + dirs for path in paths: for match in glob(os.path.join(path, filename)): if match: return os.path.normpath(match) raise IOError("File not found: %s" % filename)
Find file in current dir or system path
def reduced_chi_squareds(self, p=None): if len(self._set_xdata)==0 or len(self._set_ydata)==0: return None if p is None: p = self.results[0] r = self.studentized_residuals(p) if r is None: return N = 0 for i in range(len(r)): N += len(r[i]) dof_per_point = self.degrees_of_freedom()/N for n in range(len(r)): r[n] = sum(r[n]**2)/(len(r[n])*dof_per_point) return r
Returns the reduced chi squared for each massaged data set. p=None means use the fit results.
def cric(display=False): X = pd.read_csv(cache(github_data_url + "CRIC_time_4yearESRD_X.csv")) y = np.loadtxt(cache(github_data_url + "CRIC_time_4yearESRD_y.csv")) if display: X_display = X.copy() return X_display, y else: return X, y
A nicely packaged version of CRIC data with progression to ESRD within 4 years as the label.
def get_immediate_children_ownership(self): ownership = Ownership.objects.filter(parent=self).select_related('child', 'child__country') return ownership
Return all direct subsidiaries of this company AS OWNERSHIP OBJECTS. Excludes subsidiaries of subsidiaries.
def create_view(self, state_root_hash=None): if state_root_hash is None: state_root_hash = INIT_ROOT_KEY merkle_db = MerkleDatabase(self._database, merkle_root=state_root_hash) return StateView(merkle_db)
Creates a StateView for the given state root hash. Args: state_root_hash (str): The state root hash of the state view to return. If None, returns the state view for the Returns: StateView: state view locked to the given root hash.
def get_all_tags_of_reminder(self, reminder_id): return self._iterate_through_pages( get_function=self.get_tags_of_reminder_per_page, resource=REMINDER_TAGS, **{'reminder_id': reminder_id} )
Get all tags of reminder This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param reminder_id: the reminder id :return: list
def _vertex_different_colors_qubo(G, x_vars): Q = {} for u, v in G.edges: if u not in x_vars or v not in x_vars: continue for color in x_vars[u]: if color in x_vars[v]: Q[(x_vars[u][color], x_vars[v][color])] = 1. return Q
For each vertex, it should not have the same color as any of its neighbors. Generates the QUBO to enforce this constraint. Notes ----- Does not enforce each node having a single color. Ground energy is 0, infeasible gap is 1.
async def issueClaim(self, schemaId: ID, claimRequest: ClaimRequest, iA=None, i=None) -> (Claims, Dict[str, ClaimAttributeValues]): schemaKey = (await self.wallet.getSchema(schemaId)).getKey() attributes = self._attrRepo.getAttributes(schemaKey, claimRequest.userId) await self._genContxt(schemaId, iA, claimRequest.userId) (c1, claim) = await self._issuePrimaryClaim(schemaId, attributes, claimRequest.U) c2 = await self._issueNonRevocationClaim(schemaId, claimRequest.Ur, iA, i) if claimRequest.Ur else None signature = Claims(primaryClaim=c1, nonRevocClaim=c2) return (signature, claim)
Issue a claim for the given user and schema. :param schemaId: The schema ID (reference to claim definition schema) :param claimRequest: A claim request containing prover ID and prover-generated values :param iA: accumulator ID :param i: claim's sequence number within accumulator :return: The claim (both primary and non-revocation)
def log_level_from_vebosity(verbosity): if verbosity == 0: return logging.WARNING if verbosity == 1: return logging.INFO return logging.DEBUG
Get the `logging` module log level from a verbosity. :param verbosity: The number of times the `-v` option was specified. :return: The corresponding log level.
def set_executing(on: bool): my_thread = threading.current_thread() if isinstance(my_thread, threads.CauldronThread): my_thread.is_executing = on
Toggle whether or not the current thread is executing a step file. This will only apply when the current thread is a CauldronThread. This function has no effect when run on a Main thread. :param on: Whether or not the thread should be annotated as executing a step file.
def notify_observers(self, which=None, min_priority=None): if self._update_on: if which is None: which = self if min_priority is None: [callble(self, which=which) for _, _, callble in self.observers] else: for p, _, callble in self.observers: if p <= min_priority: break callble(self, which=which)
Notifies all observers. Which is the element, which kicked off this notification loop. The first argument will be self, the second `which`. .. note:: notifies only observers with priority p > min_priority! :param min_priority: only notify observers with priority > min_priority if min_priority is None, notify all observers in order
def _wait_for_function(self, function_descriptor, driver_id, timeout=10): start_time = time.time() warning_sent = False while True: with self.lock: if (self._worker.actor_id.is_nil() and (function_descriptor.function_id in self._function_execution_info[driver_id])): break elif not self._worker.actor_id.is_nil() and ( self._worker.actor_id in self._worker.actors): break if time.time() - start_time > timeout: warning_message = ("This worker was asked to execute a " "function that it does not have " "registered. You may have to restart " "Ray.") if not warning_sent: ray.utils.push_error_to_driver( self._worker, ray_constants.WAIT_FOR_FUNCTION_PUSH_ERROR, warning_message, driver_id=driver_id) warning_sent = True time.sleep(0.001)
Wait until the function to be executed is present on this worker. This method will simply loop until the import thread has imported the relevant function. If we spend too long in this loop, that may indicate a problem somewhere and we will push an error message to the user. If this worker is an actor, then this will wait until the actor has been defined. Args: function_descriptor : The FunctionDescriptor of the function that we want to execute. driver_id (str): The ID of the driver to push the error message to if this times out.
def calc_nfalse(d): dtfactor = n.sum([1./i for i in d['dtarr']]) ntrials = d['readints'] * dtfactor * len(d['dmarr']) * d['npixx'] * d['npixy'] qfrac = 1 - (erf(d['sigma_image1']/n.sqrt(2)) + 1)/2. nfalse = int(qfrac*ntrials) return nfalse
Calculate the number of thermal-noise false positives per segment.
def index(*args, **kwargs): _, idx = _index(*args, start=0, step=1, **kwargs) return idx
Search a list for an exact element, or element satisfying a predicate. Usage:: index(element, list_) index(of=element, in_=list_) index(where=predicate, in_=list_) :param element, of: Element to search for (by equality comparison) :param where: Predicate defining an element to search for. This should be a callable taking a single argument and returning a boolean result. :param list_, in_: List to search in :return: Index of first matching element, or -1 if none was found .. versionadded:: 0.0.3
def _check_warn_threshold(self, time_to, event_dict): if time_to["total_minutes"] <= self.warn_threshold: warn_message = self.py3.safe_format(self.format_notification, event_dict) self.py3.notify_user(warn_message, "warning", self.warn_timeout)
Checks if the time until an event starts is less than or equal to the warn_threshold. If True, issue a warning with self.py3.notify_user.
def recent_submissions(self): for group in self.groups: submission = Submission.most_recent_submission(self, group) if submission: yield submission
Generate a list of the most recent submissions for each user. Only yields a submission for a user if they've made one.
def makedirs_safe(fulldir): try: if not os.path.exists(fulldir): os.makedirs(fulldir) except OSError as exc: import errno if exc.errno == errno.EEXIST: pass else: raise
Creates a directory if it does not exists. Takes into consideration concurrent access support. Works like the shell's 'mkdir -p'.
def _compute_value(power, wg): if power not in wg: p1, p2 = power if p1 == 0: yy = wg[(0, -1)] wg[power] = numpy.power(yy, p2 / 2).sum() / len(yy) else: xx = wg[(-1, 0)] wg[power] = numpy.power(xx, p1 / 2).sum() / len(xx) return wg[power]
Return the weight corresponding to single power.
def is_template(self, filename): if self.is_partial(filename): return False if self.is_ignored(filename): return False if self.is_static(filename): return False return True
Check if a file is a template. A file is a considered a template if it is neither a partial nor ignored. :param filename: the name of the file to check
def name(self): if not hasattr(self, 'digest_name'): self.digest_name = Oid(libcrypto.EVP_MD_type(self.digest) ).longname() return self.digest_name
Returns name of the digest
def deriv(self, mu): p = self._clean(mu) return 1 + 2 * self.alpha * p
Derivative of the negative binomial variance function.
def fit(self, inputs=None, job_name=None, include_cls_metadata=False, **kwargs): if isinstance(inputs, list) or isinstance(inputs, RecordSet): self.estimator._prepare_for_training(inputs, **kwargs) else: self.estimator._prepare_for_training(job_name) self._prepare_for_training(job_name=job_name, include_cls_metadata=include_cls_metadata) self.latest_tuning_job = _TuningJob.start_new(self, inputs)
Start a hyperparameter tuning job. Args: inputs: Information about the training data. Please refer to the ``fit()`` method of the associated estimator, as this can take any of the following forms: * (str) - The S3 location where training data is saved. * (dict[str, str] or dict[str, sagemaker.session.s3_input]) - If using multiple channels for training data, you can specify a dict mapping channel names to strings or :func:`~sagemaker.session.s3_input` objects. * (sagemaker.session.s3_input) - Channel configuration for S3 data sources that can provide additional information about the training dataset. See :func:`sagemaker.session.s3_input` for full details. * (sagemaker.amazon.amazon_estimator.RecordSet) - A collection of Amazon :class:~`Record` objects serialized and stored in S3. For use with an estimator for an Amazon algorithm. * (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of :class:~`sagemaker.amazon.amazon_estimator.RecordSet` objects, where each instance is a different channel of training data. job_name (str): Tuning job name. If not specified, the tuner generates a default job name, based on the training image name and current timestamp. include_cls_metadata (bool): Whether or not the hyperparameter tuning job should include information about the estimator class (default: False). This information is passed as a hyperparameter, so if the algorithm you are using cannot handle unknown hyperparameters (e.g. an Amazon SageMaker built-in algorithm that does not have a custom estimator in the Python SDK), then set ``include_cls_metadata`` to ``False``. **kwargs: Other arguments needed for training. Please refer to the ``fit()`` method of the associated estimator to see what other arguments are needed.
def delete(self): if self._on_delete is not None: return self._on_delete(self) return self._query.delete()
Delete a record from the database.
def _post(url, data, content_type, params=None): try: response = requests.post(url, params=params, data=data, headers={ 'Content-Type': content_type, }) response.raise_for_status() return response.json() except NameError: url = '{0}?{1}'.format(url, urllib.urlencode(params)) req = urllib2.Request(url, data.encode(ENCODING), { 'Content-Type': content_type, }) return json.loads(urllib2.urlopen(req).read().decode(ENCODING))
HTTP POST request.
def _is_bhyve_hyper(): sysctl_cmd = 'sysctl hw.vmm.create' vmm_enabled = False try: stdout = subprocess.Popen(sysctl_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0] vmm_enabled = len(salt.utils.stringutils.to_str(stdout).split('"')[1]) != 0 except IndexError: pass return vmm_enabled
Returns a bool whether or not this node is a bhyve hypervisor
def repeatingfieldsnames(fields): fnames = [field['field'][0] for field in fields] fnames = [bunchhelpers.onlylegalchar(fname) for fname in fnames] fnames = [fname for fname in fnames if bunchhelpers.intinlist(fname.split())] fnames = [(bunchhelpers.replaceint(fname), None) for fname in fnames] dct = dict(fnames) repnames = fnames[:len(list(dct.keys()))] return repnames
get the names of the repeating fields
def _create_reference_value_options(self, keys, finished_keys): set_of_reference_value_option_names = set() for key in keys: if key in finished_keys: continue an_option = self.option_definitions[key] if an_option.reference_value_from: fully_qualified_reference_name = '.'.join(( an_option.reference_value_from, an_option.name )) if fully_qualified_reference_name in keys: continue reference_option = an_option.copy() reference_option.reference_value_from = None reference_option.name = fully_qualified_reference_name set_of_reference_value_option_names.add( fully_qualified_reference_name ) self.option_definitions.add_option(reference_option) for a_reference_value_option_name in set_of_reference_value_option_names: for x in range(a_reference_value_option_name.count('.')): namespace_path = \ a_reference_value_option_name.rsplit('.', x + 1)[0] self.option_definitions[namespace_path].ref_value_namespace() return set_of_reference_value_option_names
this method steps through the option definitions looking for alt paths. On finding one, it creates the 'reference_value_from' links within the option definitions and populates it with copied options.
def gateway(): if settings.CAS_GATEWAY == False: raise ImproperlyConfigured('CAS_GATEWAY must be set to True') def wrap(func): def wrapped_f(*args): from cas.views import login request = args[0] try: is_authenticated = request.user.is_authenticated() except TypeError: is_authenticated = request.user.is_authenticated if is_authenticated: pass else: path_with_params = request.path + '?' + urlencode(request.GET.copy()) if request.GET.get('ticket'): response = login(request, path_with_params, False, True) if isinstance(response, HttpResponseRedirect): return response else: gatewayed = request.GET.get('gatewayed') if gatewayed == 'true': pass else: response = login(request, path_with_params, False, True) if isinstance(response, HttpResponseRedirect): return response return func(*args) return wrapped_f return wrap
Authenticates single sign on session if ticket is available, but doesn't redirect to sign in url otherwise.
def complete_run(self, text, line, b, e): text = line.split()[-1] forth_files = glob.glob(text + '*.fs') if len(forth_files) == 0: return [f.split(os.path.sep)[-1] for f in glob.glob(text + '*')] forth_files = [f.split(os.path.sep)[-1] for f in forth_files] return forth_files
Autocomplete file names with .forth ending.
def ensure_dict(param, default_value, default_key=None): if not param: param = default_value if not isinstance(param, dict): if param: default_value = param return {default_key: param}, default_value return param, default_value
Retrieves a dict and a default value from given parameter. if parameter is not a dict, it will be promoted as the default value. :param param: :type param: :param default_value: :type default_value: :param default_key: :type default_key: :return: :rtype:
def set_root(self, depth, index): if depth < len(self._levels): self._levels[depth].set_root(index)
Set the level\'s root of the given depth to index :param depth: the depth level :type depth: int :param index: the new root index :type index: QtCore.QModelIndex :returns: None :rtype: None :raises: None
def add_rule(name, localport, protocol='tcp', action='allow', dir='in', remoteip='any'): ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} if not __salt__['firewall.rule_exists'](name): ret['changes'] = {'new rule': name} else: ret['comment'] = 'A rule with that name already exists' return ret if __opts__['test']: ret['result'] = not ret['changes'] or None ret['comment'] = ret['changes'] ret['changes'] = {} return ret try: __salt__['firewall.add_rule']( name, localport, protocol, action, dir, remoteip) except CommandExecutionError: ret['comment'] = 'Could not add rule' return ret
Add a new inbound or outbound rule to the firewall policy Args: name (str): The name of the rule. Must be unique and cannot be "all". Required. localport (int): The port the rule applies to. Must be a number between 0 and 65535. Can be a range. Can specify multiple ports separated by commas. Required. protocol (Optional[str]): The protocol. Can be any of the following: - A number between 0 and 255 - icmpv4 - icmpv6 - tcp - udp - any action (Optional[str]): The action the rule performs. Can be any of the following: - allow - block - bypass dir (Optional[str]): The direction. Can be ``in`` or ``out``. remoteip (Optional [str]): The remote IP. Can be any of the following: - any - localsubnet - dns - dhcp - wins - defaultgateway - Any valid IPv4 address (192.168.0.12) - Any valid IPv6 address (2002:9b3b:1a31:4:208:74ff:fe39:6c43) - Any valid subnet (192.168.1.0/24) - Any valid range of IP addresses (192.168.0.1-192.168.0.12) - A list of valid IP addresses Can be combinations of the above separated by commas. .. versionadded:: 2016.11.6 Example: .. code-block:: yaml open_smb_port: win_firewall.add_rule: - name: SMB (445) - localport: 445 - protocol: tcp - action: allow
def keep_session_alive(self): try: self.resources() except xmlrpclib.Fault as fault: if fault.faultCode == 5: self.login() else: raise
If the session expired, logs back in.
def make_html_page(self, valumap): logger.info('Making an html report using template %r.', self.html_template) fh = open(self.html_template) template = fh.read() fh.close() parts = [] for sr in self.subreports: report_data = [item.html for item in sr.report_data if item.html] if report_data: parts.append('\n<h2>{1}</h2>\n'.format(sr.title, sr.reptext)) parts.extend(report_data) parts.append('\n<hr/>') valumap['subreports'] = '\n'.join(parts) html_page = Template(template).safe_substitute(valumap) return TextPart(fmt='html', text=html_page, ext='html')
Builds the report as html page, using the template page from file.
def _get_goroot(self, goids_all, namespace): root_goid = self.consts.NAMESPACE2GO[namespace] if root_goid in goids_all: return root_goid root_goids = set() for goid in goids_all: goterm = self.gosubdag.go2obj[goid] if goterm.depth == 0: root_goids.add(goterm.id) if len(root_goids) == 1: return next(iter(root_goids)) raise RuntimeError("UNEXPECTED NUMBER OF ROOTS: {R}".format(R=root_goids))
Get the top GO for the set of goids_all.
def format_exception(self): import traceback frames = self.get_traceback_frames() tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames] list = ['Traceback (most recent call last):\n'] list += traceback.format_list(tb) list += traceback.format_exception_only(self.exc_type, self.exc_value) return list
Return the same data as from traceback.format_exception.
def get_summary(self, squeeze=True, parameters=None, chains=None): results = [] if chains is None: chains = self.parent.chains else: if isinstance(chains, (int, str)): chains = [chains] chains = [self.parent.chains[i] for c in chains for i in self.parent._get_chain(c)] for chain in chains: res = {} params_to_find = parameters if parameters is not None else chain.parameters for p in params_to_find: if p not in chain.parameters: continue summary = self.get_parameter_summary(chain, p) res[p] = summary results.append(res) if squeeze and len(results) == 1: return results[0] return results
Gets a summary of the marginalised parameter distributions. Parameters ---------- squeeze : bool, optional Squeeze the summaries. If you only have one chain, squeeze will not return a length one list, just the single summary. If this is false, you will get a length one list. parameters : list[str], optional A list of parameters which to generate summaries for. chains : list[int|str], optional A list of the chains to get a summary of. Returns ------- list of dictionaries One entry per chain, parameter bounds stored in dictionary with parameter as key
def experiments_fmri_create(self, experiment_id, filename): experiment = self.experiments_get(experiment_id) if experiment is None: return None fmri = self.funcdata.create_object(filename) experiment = self.experiments.update_fmri_data(experiment_id, fmri.identifier) if experiment is None: shutil.rmtree(fmri.directory) self.funcdata.delete_object(fmri.identifier, erase=True) return None else: return funcdata.FMRIDataHandle(fmri, experiment_id)
Create functional data object from given file and associate the object with the specified experiment. Parameters ---------- experiment_id : string Unique experiment identifier filename : File-type object Functional data file Returns ------- FMRIDataHandle Handle for created fMRI object or None if identified experiment is unknown
def vividict_to_dict(vividict): try: from numpy import ndarray except ImportError: ndarray = dict dictionary = {} def np_to_native(np_val): if isinstance(np_val, dict): for key, value in np_val.items(): np_val[key] = np_to_native(value) elif isinstance(np_val, ndarray): np_val = np_val.tolist() if isinstance(np_val, (list, tuple)): native_list = [np_to_native(val) for val in np_val] if isinstance(np_val, tuple): return tuple(native_list) return native_list if not hasattr(np_val, 'dtype'): return np_val return np_val.item() for key, value in vividict.items(): value = np_to_native(value) if isinstance(value, Vividict): value = Vividict.vividict_to_dict(value) dictionary[key] = value return dictionary
Helper method to create Python dicts from arbitrary Vividict objects :param Vividict vividict: A Vividict to be converted :return: A Python dict :rtype: dict
def _separate_exclude_cases(name, exclude_prefix): excluder = re.compile('|'.join(exclude_prefix)) split_entries = excluder.split(name) return {'clean_name': split_entries[0], 'excluded_countries': split_entries[1:]}
Splits the excluded Parameters ---------- name : str Name of the country/region to convert. exclude_prefix : list of valid regex strings List of indicators which negate the subsequent country/region. These prefixes and everything following will not be converted. E.g. 'Asia excluding China' becomes 'Asia' and 'China excluding Hong Kong' becomes 'China' prior to conversion Returns ------- dict with 'clean_name' : str as name without anything following exclude_prefix 'excluded_countries' : list list of excluded countries
def play_tour(self, name=None, interval=0): if self.headless: return if not name: name = "default" if name not in self._tour_steps: raise Exception("Tour {%s} does not exist!" % name) if "Bootstrap" in self._tour_steps[name][0]: tour_helper.play_bootstrap_tour( self.driver, self._tour_steps, self.browser, self.message_duration, name=name, interval=interval) elif "Hopscotch" in self._tour_steps[name][0]: tour_helper.play_hopscotch_tour( self.driver, self._tour_steps, self.browser, self.message_duration, name=name, interval=interval) elif "IntroJS" in self._tour_steps[name][0]: tour_helper.play_introjs_tour( self.driver, self._tour_steps, self.browser, self.message_duration, name=name, interval=interval) else: tour_helper.play_shepherd_tour( self.driver, self._tour_steps, self.message_duration, name=name, interval=interval)
Plays a tour on the current website. @Params name - If creating multiple tours at the same time, use this to select the tour you wish to add steps to. interval - The delay time between autoplaying tour steps. If set to 0 (default), the tour is fully manual control.
def strel_octagon(radius): iradius = int(radius) i, j = np.mgrid[-iradius:(iradius + 1), -iradius:(iradius+1)] dradius = float(iradius) * np.sqrt(2) strel = (((i+j) <= dradius) & ((i+j) >= -dradius) & ((i-j) <= dradius) & ((i-j) >= -dradius)) return strel
Create an octagonal structuring element for morphological operations radius - the distance from the origin to each edge of the octagon
def need_to_create_symlink(directory, checksums, filetype, symlink_path): if symlink_path is None: return False pattern = NgdConfig.get_fileending(filetype) filename, _ = get_name_and_checksum(checksums, pattern) full_filename = os.path.join(directory, filename) symlink_name = os.path.join(symlink_path, filename) if os.path.islink(symlink_name): existing_link = os.readlink(symlink_name) if full_filename == existing_link: return False return True
Check if we need to create a symlink for an existing file.
def unique_to_each(*iterables): pool = [list(it) for it in iterables] counts = Counter(chain.from_iterable(map(set, pool))) uniques = {element for element in counts if counts[element] == 1} return [list(filter(uniques.__contains__, it)) for it in pool]
Return the elements from each of the input iterables that aren't in the other input iterables. For example, suppose you have a set of packages, each with a set of dependencies:: {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}} If you remove one package, which dependencies can also be removed? If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for ``pkg_2``, and ``D`` is only needed for ``pkg_3``:: >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'}) [['A'], ['C'], ['D']] If there are duplicates in one input iterable that aren't in the others they will be duplicated in the output. Input order is preserved:: >>> unique_to_each("mississippi", "missouri") [['p', 'p'], ['o', 'u', 'r']] It is assumed that the elements of each iterable are hashable.
def get(self): if not hasattr(self, '_instance'): raise ImplementationError("Impossible to get the PK of an unbound field") if not hasattr(self._instance, '_pk'): raise DoesNotExist("The current object doesn't exists anymore") if not self._instance._pk: self.set(value=None) return self.normalize(self._instance._pk)
We do not call the default getter as we have the value cached in the instance in its _pk attribute
def remove_file_from_s3(awsclient, bucket, key): client_s3 = awsclient.get_client('s3') response = client_s3.delete_object(Bucket=bucket, Key=key)
Remove a file from an AWS S3 bucket. :param awsclient: :param bucket: :param key: :return:
def commit_events(self): for event in sorted(self._event_buf): self.store.record_event(event) self._snapshot.process_event(event) self._event_buf = []
Applies all outstanding `Event`s to the internal state
async def pause_writing(self): self._restart_writer = False if self._writer_task: self._writer_task.remove_done_callback(self.restart_writing) self._writer_task.cancel() await self._writer_task await asyncio.sleep(0, loop=self._loop)
Pause writing.
def submit_and_verify( xml_str=None, xml_file=None, xml_root=None, config=None, session=None, dry_run=None, **kwargs ): try: config = config or configuration.get_config() xml_root = _get_xml_root(xml_root, xml_str, xml_file) submit_config = SubmitConfig(xml_root, config, **kwargs) session = session or utils.get_session(submit_config.credentials, config) submit_response = submit(xml_root, submit_config, session, dry_run=dry_run, **kwargs) except Dump2PolarionException as err: logger.error(err) return None valid_response = submit_response.validate_response() if not valid_response or kwargs.get("no_verify"): return submit_response.response response = verify_submit( session, submit_config.queue_url, submit_config.log_url, submit_response.job_ids, timeout=kwargs.get("verify_timeout"), log_file=kwargs.get("log_file"), ) return response
Submits data to the Polarion Importer and checks that it was imported.
def _tot_services_by_state(self, state=None, state_type=None): if state is None and state_type is None: return len(self.services) if state_type: return sum(1 for s in self.services if s.state == state and s.state_type == state_type) return sum(1 for s in self.services if s.state == state)
Generic function to get the number of services in the specified state :param state: state to filter on :type state: str :param state_type: state type to filter on (HARD, SOFT) :type state_type: str :return: number of host in state *state* :rtype: int TODO: Should be moved
def _MessageToJsonObject(self, message): message_descriptor = message.DESCRIPTOR full_name = message_descriptor.full_name if _IsWrapperMessage(message_descriptor): return self._WrapperMessageToJsonObject(message) if full_name in _WKTJSONMETHODS: return methodcaller(_WKTJSONMETHODS[full_name][0], message)(self) js = {} return self._RegularMessageToJsonObject(message, js)
Converts message to an object according to Proto3 JSON Specification.
def _prepare_to_send_ack(self, path, ack_id): 'Return function that acknowledges the server' return lambda *args: self._ack(path, ack_id, *args)
Return function that acknowledges the server
def addService(self, service, name=None, description=None, authenticator=None, expose_request=None, preprocessor=None): if isinstance(service, (int, long, float, basestring)): raise TypeError("Service cannot be a scalar value") allowed_types = (types.ModuleType, types.FunctionType, types.DictType, types.MethodType, types.InstanceType, types.ObjectType) if not python.callable(service) and not isinstance(service, allowed_types): raise TypeError("Service must be a callable, module, or an object") if name is None: if isinstance(service, (type, types.ClassType)): name = service.__name__ elif isinstance(service, types.FunctionType): name = service.func_name elif isinstance(service, types.ModuleType): name = service.__name__ else: name = str(service) if name in self.services: raise remoting.RemotingError("Service %s already exists" % name) self.services[name] = ServiceWrapper(service, description, authenticator, expose_request, preprocessor)
Adds a service to the gateway. @param service: The service to add to the gateway. @type service: C{callable}, class instance, or a module @param name: The name of the service. @type name: C{str} @raise pyamf.remoting.RemotingError: Service already exists. @raise TypeError: C{service} cannot be a scalar value. @raise TypeError: C{service} must be C{callable} or a module.
def get_overall_state(self, services): overall_state = 0 if not self.monitored: overall_state = 5 elif self.acknowledged: overall_state = 1 elif self.downtimed: overall_state = 2 elif self.state_type == 'HARD': if self.state == 'UNREACHABLE': overall_state = 3 elif self.state == 'DOWN': overall_state = 4 if overall_state <= 2: for service in self.services: if service in services: service = services[service] if service.overall_state_id < 5: overall_state = max(overall_state, service.overall_state_id) return overall_state
Get the host overall state including the host self status and the status of its services Compute the host overall state identifier, including: - the acknowledged state - the downtime state The host overall state is (prioritized): - an host not monitored (5) - an host down (4) - an host unreachable (3) - an host downtimed (2) - an host acknowledged (1) - an host up (0) If the host overall state is <= 2, then the host overall state is the maximum value of the host overall state and all the host services overall states. The overall state of an host is: - 0 if the host is UP and all its services are OK - 1 if the host is DOWN or UNREACHABLE and acknowledged or at least one of its services is acknowledged and no other services are WARNING or CRITICAL - 2 if the host is DOWN or UNREACHABLE and in a scheduled downtime or at least one of its services is in a scheduled downtime and no other services are WARNING or CRITICAL - 3 if the host is UNREACHABLE or at least one of its services is WARNING - 4 if the host is DOWN or at least one of its services is CRITICAL - 5 if the host is not monitored :param services: a list of known services :type services: alignak.objects.service.Services :return: the host overall state :rtype: int
def check_cursor_location(self): data_x, data_y = self.get_data_xy(self.last_win_x, self.last_win_y) if (data_x != self.last_data_x or data_y != self.last_data_y): self.last_data_x, self.last_data_y = data_x, data_y self.logger.debug("cursor location changed %.4f,%.4f => %.4f,%.4f" % ( self.last_data_x, self.last_data_y, data_x, data_y)) button = 0 self.make_ui_callback('cursor-changed', button, data_x, data_y) return data_x, data_y
Check whether the data location of the last known position of the cursor has changed. If so, issue a callback.
def traverse_depth_first_pre_order(self, callback): n = len(self.suftab) root = [0, 0, n - 1, ""] def _traverse_top_down(interval): callback(interval) i, j = interval[1], interval[2] if i != j: children = self._get_child_intervals(i, j) children.sort(key=lambda child: child[3]) for child in children: _traverse_top_down(child) _traverse_top_down(root)
Visits the internal "nodes" of the enhanced suffix array in depth-first pre-order. Based on Abouelhoda et al. (2004).
def get_failed_job(self, id): url = self._url('{}/errors'.format(id)) return self.client.get(url)
Get failed job error details Args: id (str): The id of the job. See: https://auth0.com/docs/api/management/v2#!/Jobs/get_errors
def run(self): try: while True: message = self.connection.recv() result = self.on_message(message) if result: self.connection.send(result) except SelenolWebSocketClosedException as ex: self.on_closed(0, '') raise SelenolWebSocketClosedException() from ex
Run the service in infinitive loop processing requests.
def check_user(user_id, u_pass): user_count = TabMember.select().where(TabMember.uid == user_id).count() if user_count == 0: return -1 the_user = TabMember.get(uid=user_id) if the_user.user_pass == tools.md5(u_pass): return 1 return 0
Checking the password by user's ID.
def qual(args): from jcvi.formats.sizes import Sizes p = OptionParser(qual.__doc__) p.add_option("--qv", default=31, type="int", help="Dummy qv score for extended bases") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args sizes = Sizes(fastafile) qvchar = str(opts.qv) fw = must_open(opts.outfile, "w") total = 0 for s, slen in sizes.iter_sizes(): print(">" + s, file=fw) print(" ".join([qvchar] * slen), file=fw) total += 1 fw.close() logging.debug("Written {0} records in `{1}`.".format(total, opts.outfile))
%prog qual fastafile Generate dummy .qual file based on FASTA file.
def shutdown(self, message=None): for name, server in self.servers.items(): server.quit(message)
Disconnect all servers with a message. Args: message (str): Quit message to use on each connection.
def create_question_dialog(self, text, second_text): dialog = self.create_message_dialog( text, buttons=Gtk.ButtonsType.YES_NO, icon=Gtk.MessageType.QUESTION ) dialog.format_secondary_text(second_text) response = dialog.run() dialog.destroy() return response
Function creates a question dialog with title text and second_text
def print_cyjs_graph(self): cyjs_dict = {'edges': self._edges, 'nodes': self._nodes} cyjs_str = json.dumps(cyjs_dict, indent=1, sort_keys=True) return cyjs_str
Return the assembled Cytoscape JS network as a json string. Returns ------- cyjs_str : str A json string representation of the Cytoscape JS network.
def _extract_file(self, zf, info, extract_dir): out_path = os.path.join(extract_dir, info.filename) out_path = os.path.abspath(out_path) if not out_path.startswith(extract_dir): raise ValueError( "malicious zipfile, %s outside of extract_dir %s" % (info.filename, extract_dir)) zf.extract(info.filename, path=extract_dir) perm = info.external_attr >> 16 os.chmod(out_path, perm)
the zipfile module does not restore file permissions so we'll do it manually
def write(self, endpoints, filename): with open(filename, "w") as filep: filep.write(self.to_string(endpoints))
Writes the given endpoint descriptions to the given file :param endpoints: A list of EndpointDescription beans :param filename: Name of the file where to write the XML :raise IOError: Error writing the file
def cc(self, chan, ctrl, val): return fluid_synth_cc(self.synth, chan, ctrl, val)
Send control change value. The controls that are recognized are dependent on the SoundFont. Values are always 0 to 127. Typical controls include: 1: vibrato 7: volume 10: pan (left to right) 11: expression (soft to loud) 64: sustain 91: reverb 93: chorus
def simple_balance(self, as_of=None, raw=False, leg_query=None, **kwargs): legs = self.legs if as_of: legs = legs.filter(transaction__date__lte=as_of) if leg_query or kwargs: leg_query = leg_query or models.Q() legs = legs.filter(leg_query, **kwargs) return legs.sum_to_balance() * (1 if raw else self.sign) + self._zero_balance()
Get the balance for this account, ignoring all child accounts Args: as_of (Date): Only include transactions on or before this date raw (bool): If true the returned balance should not have its sign adjusted for display purposes. leg_query (models.Q): Django Q-expression, will be used to filter the transaction legs. allows for more complex filtering than that provided by **kwargs. kwargs (dict): Will be used to filter the transaction legs Returns: Balance
def is_pythonw(filename): pattern = r'.*python(\d\.?\d*)?w(.exe)?$' if re.match(pattern, filename, flags=re.I) is None: return False else: return True
Check that the python interpreter has 'pythonw'.
def _fun_names_iter(self, functyp, val): funcstore = getattr(self.engine, functyp) for v in val: if callable(v): setattr(funcstore, v.__name__, v) yield v.__name__ elif v not in funcstore: raise KeyError("Function {} not present in {}".format( v, funcstore._tab )) else: yield v
Iterate over the names of the functions in ``val``, adding them to ``funcstore`` if they are missing; or if the items in ``val`` are already the names of functions in ``funcstore``, iterate over those.