code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def evaluate_at(self, eval_at, testcases, mode=None): self.eval_at = eval_at self.log.eval_at = eval_at if mode is None: if self.context_mode is None or (self.context_mode.has_key('choose_m') and self.context_mode['choose_m']): mode = 'inverse' else: mode = self.context_mode["mode"] self.evaluation = Evaluation(self.ag, self.env, testcases, mode=mode) for test in testcases: self.log.add('testcases', test)
Sets the evaluation interation indices. :param list eval_at: iteration indices where an evaluation should be performed :param numpy.array testcases: testcases used for evaluation
def relpath(self, path, start=None): if not path: raise ValueError("no path specified") path = make_string_path(path) if start is not None: start = make_string_path(start) else: start = self.filesystem.cwd if self.filesystem.alternative_path_separator is not None: path = path.replace(self.filesystem.alternative_path_separator, self._os_path.sep) start = start.replace(self.filesystem.alternative_path_separator, self._os_path.sep) path = path.replace(self.filesystem.path_separator, self._os_path.sep) start = start.replace( self.filesystem.path_separator, self._os_path.sep) path = self._os_path.relpath(path, start) return path.replace(self._os_path.sep, self.filesystem.path_separator)
We mostly rely on the native implementation and adapt the path separator.
def _parse_mods(mods): if isinstance(mods, six.string_types): mods = [item.strip() for item in mods.split(',') if item.strip()] return mods
Parse modules.
def _handle_requests_params(self, kwargs): requests_params = kwargs.pop('requests_params', {}) for param in requests_params: if param in kwargs: error_message = 'Requests Parameter %r collides with a load'\ ' parameter of the same name.' % param raise RequestParamKwargCollision(error_message) if self._meta_data['icontrol_version']: params = requests_params.pop('params', {}) params.update({'ver': self._meta_data['icontrol_version']}) requests_params.update({'params': params}) return requests_params
Validate parameters that will be passed to the requests verbs. This method validates that there is no conflict in the names of the requests_params passed to the function and the other kwargs. It also ensures that the required request parameters for the object are added to the request params that are passed into the verbs. An example of the latter is ensuring that a certain version of the API is always called to add 'ver=11.6.0' to the url.
def record(self): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Timestamp not initialized') tmp = ((1 << 16) - 1) & self.tz newtz = tmp & 0xff newtimetype = ((tmp >> 8) & 0x0f) | (self.timetype << 4) return struct.pack(self.FMT, newtz, newtimetype, self.year, self.month, self.day, self.hour, self.minute, self.second, self.centiseconds, self.hundreds_microseconds, self.microseconds)
A method to generate the string representing this UDF Timestamp. Parameters: None. Returns: A string representing this UDF Timestamp.
def _remove_overlaps(in_file, out_dir, data): out_file = os.path.join(out_dir, "%s-nooverlaps%s" % utils.splitext_plus(os.path.basename(in_file))) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: with open(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: prev_line = None for line in in_handle: if prev_line: pchrom, pstart, pend = prev_line.split("\t", 4)[:3] cchrom, cstart, cend = line.split("\t", 4)[:3] if pchrom == cchrom and int(pend) > int(cstart): pass else: out_handle.write(prev_line) prev_line = line out_handle.write(prev_line) return out_file
Remove regions that overlap with next region, these result in issues with PureCN.
def on_epoch_end(self, epoch, **kwargs:Any)->None: "Compare the value monitored to its best and maybe reduce lr." current = self.get_monitor_value() if current is None: return if self.operator(current - self.min_delta, self.best): self.best,self.wait = current,0 else: self.wait += 1 if self.wait > self.patience: self.opt.lr *= self.factor self.wait = 0 print(f'Epoch {epoch}: reducing lr to {self.opt.lr}')
Compare the value monitored to its best and maybe reduce lr.
def parse_dsn(dsn): parsed_dsn = urlparse(dsn) parsed_path = parse_path(parsed_dsn.path) return { 'scheme': parsed_dsn.scheme, 'sender': parsed_dsn.username, 'token': parsed_dsn.password, 'domain': parsed_dsn.hostname, 'port': parsed_dsn.port or 80, 'version': parsed_path.get('version'), 'project': parsed_path.get('project'), }
Parse dsn string.
def _key_to_address(key): key_parts = key.split('.', maxsplit=_MAX_KEY_PARTS - 1) key_parts.extend([''] * (_MAX_KEY_PARTS - len(key_parts))) return SETTINGS_NAMESPACE + ''.join(_short_hash(x) for x in key_parts)
Creates the state address for a given setting key.
def flightmode_colours(): from MAVProxy.modules.lib.grapher import flightmode_colours mapping = {} idx = 0 for (mode,t0,t1) in flightmodes: if not mode in mapping: mapping[mode] = flightmode_colours[idx] idx += 1 if idx >= len(flightmode_colours): idx = 0 return mapping
return mapping of flight mode to colours
def import_task(self, img, cont, img_format=None, img_name=None): return self._tasks_manager.create("import", img=img, cont=cont, img_format=img_format, img_name=img_name)
Creates a task to import the specified image from the swift container named in the 'cont' parameter. The new image will be named the same as the object in the container unless you specify a value for the 'img_name' parameter. By default it is assumed that the image is in 'vhd' format; if it is another format, you must specify that in the 'img_format' parameter.
def find_user(self, username=None, email=None): if username: return ( self.get_session.query(self.user_model) .filter(func.lower(self.user_model.username) == func.lower(username)) .first() ) elif email: return ( self.get_session.query(self.user_model).filter_by(email=email).first() )
Finds user by username or email
def get(self, section, option, default = None): if self.has_section(section): try: return self.config[section][option].get('value', None) except KeyError: if default == None: raise NoOptionError(option) else: return default else: raise NoSectionError(section)
Returns the option's value converted into it's intended type. If default is specified, return that on failure, else raise NoOptionError.
def adapt_files(solver): print("adapting {0}'s files".format(solver)) root = os.path.join('solvers', solver) for arch in to_extract[solver]: arch = os.path.join(root, arch) extract_archive(arch, solver, put_inside=True) for fnames in to_move[solver]: old = os.path.join(root, fnames[0]) new = os.path.join(root, fnames[1]) os.rename(old, new) for f in to_remove[solver]: f = os.path.join(root, f) if os.path.isdir(f): shutil.rmtree(f) else: os.remove(f)
Rename and remove files whenever necessary.
def fillna(self, value=None, method=None, limit=None): if ((method is None and value is None) or (method is not None and value is not None)): raise ValueError("Must specify one of 'method' or 'value'.") elif method is not None: msg = "fillna with 'method' requires high memory usage." warnings.warn(msg, PerformanceWarning) filled = interpolate_2d(np.asarray(self), method=method, limit=limit) return type(self)(filled, fill_value=self.fill_value) else: new_values = np.where(isna(self.sp_values), value, self.sp_values) if self._null_fill_value: new_dtype = SparseDtype(self.dtype.subtype, fill_value=value) else: new_dtype = self.dtype return self._simple_new(new_values, self._sparse_index, new_dtype)
Fill missing values with `value`. Parameters ---------- value : scalar, optional method : str, optional .. warning:: Using 'method' will result in high memory use, as all `fill_value` methods will be converted to an in-memory ndarray limit : int, optional Returns ------- SparseArray Notes ----- When `value` is specified, the result's ``fill_value`` depends on ``self.fill_value``. The goal is to maintain low-memory use. If ``self.fill_value`` is NA, the result dtype will be ``SparseDtype(self.dtype, fill_value=value)``. This will preserve amount of memory used before and after filling. When ``self.fill_value`` is not NA, the result dtype will be ``self.dtype``. Again, this preserves the amount of memory used.
def _get_lrs(self, indices): if self.lr_scheduler is not None: lr = self.lr_scheduler(self.num_update) else: lr = self.lr lrs = [lr for _ in indices] for i, index in enumerate(indices): if index in self.param_dict: lrs[i] *= self.param_dict[index].lr_mult elif index in self.lr_mult: lrs[i] *= self.lr_mult[index] elif index in self.idx2name: lrs[i] *= self.lr_mult.get(self.idx2name[index], 1.0) return lrs
Gets the learning rates given the indices of the weights. Parameters ---------- indices : list of int Indices corresponding to weights. Returns ------- lrs : list of float Learning rates for those indices.
def huffman_encode(cls, s): i = 0 ibl = 0 for c in s: val, bl = cls._huffman_encode_char(c) i = (i << bl) + val ibl += bl padlen = 8 - (ibl % 8) if padlen != 8: val, bl = cls._huffman_encode_char(EOS()) i = (i << padlen) + (val >> (bl - padlen)) ibl += padlen ret = i, ibl assert(ret[0] >= 0) assert (ret[1] >= 0) return ret
huffman_encode returns the bitstring and the bitlength of the bitstring representing the string provided as a parameter @param str s: the string to encode @return (int, int): the bitstring of s and its bitlength @raise AssertionError
def _call(self, x, out=None): if out is None: out = x[self.index].copy() else: out.assign(x[self.index]) return out
Project ``x`` onto the subspace.
def setup_prj_page(self, ): self.prj_seq_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents) self.prj_atype_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents) self.prj_dep_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents) self.prj_user_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
Create and set the model on the project page :returns: None :rtype: None :raises: None
def to_dict(self): mydict = {'id': self.id, 'name': self.name, 'description': self.description, 'kbtype': self.kbtype} if self.kbtype == 'd': mydict.update((self.kbdefs.to_dict() if self.kbdefs else {}) or {}) return mydict
Return a dict representation of KnwKB.
def delete_insight(self, project_key, insight_id): projectOwner, projectId = parse_dataset_key(project_key) try: self._insights_api.delete_insight(projectOwner, projectId, insight_id) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
Delete an existing insight. :params project_key: Project identifier, in the form of projectOwner/projectId :type project_key: str :params insight_id: Insight unique id :type insight_id: str :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> del_insight = api_client.delete_insight( ... 'username/project', 'insightid') # doctest: +SKIP
def dump(obj, fp): encoder = ArffEncoder() generator = encoder.iter_encode(obj) last_row = next(generator) for row in generator: fp.write(last_row + u'\n') last_row = row fp.write(last_row) return fp
Serialize an object representing the ARFF document to a given file-like object. :param obj: a dictionary. :param fp: a file-like object.
def power(base, exp): return _ufunc_helper( base, exp, op.broadcast_power, operator.pow, _internal._power_scalar, _internal._rpower_scalar)
Returns result of first array elements raised to powers from second array, element-wise with broadcasting. Equivalent to ``base ** exp`` and ``mx.nd.broadcast_power(lhs, rhs)``. .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- base : scalar or NDArray The base array exp : scalar or NDArray The exponent array. If ``base.shape != exp.shape``, they must be broadcastable to a common shape. Returns -------- NDArray The bases in x raised to the exponents in y. Examples -------- >>> x = mx.nd.ones((2,3))*2 >>> y = mx.nd.arange(1,3).reshape((2,1)) >>> z = mx.nd.arange(1,3).reshape((2,1)) >>> x.asnumpy() array([[ 2., 2., 2.], [ 2., 2., 2.]], dtype=float32) >>> y.asnumpy() array([[ 1.], [ 2.]], dtype=float32) >>> z.asnumpy() array([[ 1.], [ 2.]], dtype=float32) >>> (x**2).asnumpy() array([[ 4., 4., 4.], [ 4., 4., 4.]], dtype=float32) >>> (x**y).asnumpy() array([[ 2., 2., 2.], [ 4., 4., 4.]], dtype=float32) >>> mx.nd.power(x,y).asnumpy() array([[ 2., 2., 2.], [ 4., 4., 4.]], dtype=float32) >>> (z**y).asnumpy() array([[ 1.], [ 4.]], dtype=float32)
def gradient(poly): return differential(poly, chaospy.poly.collection.basis(1, 1, poly.dim))
Gradient of a polynomial. Args: poly (Poly) : polynomial to take gradient of. Returns: (Poly) : The resulting gradient. Examples: >>> q0, q1, q2 = chaospy.variable(3) >>> poly = 2*q0 + q1*q2 >>> print(chaospy.gradient(poly)) [2, q2, q1]
async def get_alarms(): async with aiohttp.ClientSession() as session: ghlocalapi = Alarms(LOOP, session, IPADDRESS) await ghlocalapi.get_alarms() print("Alarms:", ghlocalapi.alarms)
Get alarms and timers from GH.
def winddir_text(pts): "Convert wind direction from 0..15 to compass point text" global _winddir_text_array if pts is None: return None if not isinstance(pts, int): pts = int(pts + 0.5) % 16 if not _winddir_text_array: _ = pywws.localisation.translation.ugettext _winddir_text_array = ( _(u'N'), _(u'NNE'), _(u'NE'), _(u'ENE'), _(u'E'), _(u'ESE'), _(u'SE'), _(u'SSE'), _(u'S'), _(u'SSW'), _(u'SW'), _(u'WSW'), _(u'W'), _(u'WNW'), _(u'NW'), _(u'NNW'), ) return _winddir_text_array[pts]
Convert wind direction from 0..15 to compass point text
def _format_metric_name(self, m_name, cfunc): try: aggr = CFUNC_TO_AGGR[cfunc] except KeyError: aggr = cfunc.lower() try: m_name = CACTI_TO_DD[m_name] if aggr != 'avg': m_name += '.{}'.format(aggr) return m_name except KeyError: return "cacti.{}.{}".format(m_name.lower(), aggr)
Format a cacti metric name into a Datadog-friendly name
def fit_gaussian(x, y, yerr, p0): try: popt, pcov = curve_fit(gaussian, x, y, sigma=yerr, p0=p0, absolute_sigma=True) except RuntimeError: return [0],[0] return popt, pcov
Fit a Gaussian to the data
def _align_bags(predicted: List[Set[str]], gold: List[Set[str]]) -> List[float]: f1_scores = [] for gold_index, gold_item in enumerate(gold): max_f1 = 0.0 max_index = None best_alignment: Tuple[Set[str], Set[str]] = (set(), set()) if predicted: for pred_index, pred_item in enumerate(predicted): current_f1 = _compute_f1(pred_item, gold_item) if current_f1 >= max_f1: best_alignment = (gold_item, pred_item) max_f1 = current_f1 max_index = pred_index match_flag = _match_numbers_if_present(*best_alignment) gold[gold_index] = set() predicted[max_index] = set() else: match_flag = False if match_flag: f1_scores.append(max_f1) else: f1_scores.append(0.0) return f1_scores
Takes gold and predicted answer sets and first finds a greedy 1-1 alignment between them and gets maximum metric values over all the answers
def _on_report(_loop, adapter, conn_id, report): conn_string = None if conn_id is not None: conn_string = adapter._get_property(conn_id, 'connection_string') if isinstance(report, BroadcastReport): adapter.notify_event_nowait(conn_string, 'broadcast', report) elif conn_string is not None: adapter.notify_event_nowait(conn_string, 'report', report) else: adapter._logger.debug("Dropping report with unknown conn_id=%s", conn_id)
Callback when a report is received.
def calc_progress(self, completed_count, total_count): self.logger.debug( "calc_progress(%s, %s)", completed_count, total_count, ) current_time = time.time() time_spent = current_time - self.start_time self.logger.debug("Progress time spent: %s", time_spent) if total_count == 0: return 100, 1 completion_fraction = completed_count / total_count if completion_fraction == 0: completion_fraction = 1 total_time = 0 total_time = time_spent / completion_fraction time_remaining = total_time - time_spent completion_display = completion_fraction * 100 if completion_display == 100: return 100, 1 return completion_display, time_remaining
Calculate the percentage progress and estimated remaining time based on the current number of items completed of the total. Returns a tuple of ``(percentage_complete, seconds_remaining)``.
def createService(self, createServiceParameter, description=None, tags="Feature Service", snippet=None): url = "%s/createService" % self.location val = createServiceParameter.value params = { "f" : "json", "outputType" : "featureService", "createParameters" : json.dumps(val), "tags" : tags } if snippet is not None: params['snippet'] = snippet if description is not None: params['description'] = description res = self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url) if 'id' in res or \ 'serviceItemId' in res: if 'id' in res: url = "%s/items/%s" % (self.location, res['id']) else: url = "%s/items/%s" % (self.location, res['serviceItemId']) return UserItem(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) return res
The Create Service operation allows users to create a hosted feature service. You can use the API to create an empty hosted feaure service from feature service metadata JSON. Inputs: createServiceParameter - create service object
def _get_best_prediction(self, record, train=True): if not self.trees: return best = (+1e999999, None) for tree in self.trees: best = min(best, (tree.mae.mean, tree)) _, best_tree = best prediction, tree_mae = best_tree.predict(record, train=train) return prediction.mean
Gets the prediction from the tree with the lowest mean absolute error.
def check_in(choices, **params): for p in params: if params[p] not in choices: raise ValueError( "{} value {} not recognized. Choose from {}".format( p, params[p], choices))
Checks parameters are in a list of allowed parameters Parameters ---------- choices : array-like, accepted values params : object Named arguments, parameters to be checked Raises ------ ValueError : unacceptable choice of parameters
def write_frames(self, frames_out): self.check_for_errors() self._connection.write_frames(self.channel_id, frames_out)
Write multiple pamqp frames from the current channel. :param list frames_out: A list of pamqp frames. :return:
def connections(self): conn = lambda x: str(x).replace('connection:', '') return [conn(name) for name in self.sections()]
Returns all of the loaded connections names as a list
def circuit_to_pyquil(circuit: Circuit) -> pyquil.Program: prog = pyquil.Program() for elem in circuit.elements: if isinstance(elem, Gate) and elem.name in QUIL_GATES: params = list(elem.params.values()) if elem.params else [] prog.gate(elem.name, params, elem.qubits) elif isinstance(elem, Measure): prog.measure(elem.qubit, elem.cbit) else: raise ValueError('Cannot convert operation to pyquil') return prog
Convert a QuantumFlow circuit to a pyQuil program
def getBounds(self, tzinfo=None): if self.resolution >= datetime.timedelta(days=1) \ and tzinfo is not None: time = self._time.replace(tzinfo=tzinfo) else: time = self._time return ( min(self.fromDatetime(time), self.fromDatetime(self._time)), max(self.fromDatetime(time + self.resolution), self.fromDatetime(self._time + self.resolution)) )
Return a pair describing the bounds of self. This returns a pair (min, max) of Time instances. It is not quite the same as (self, self + self.resolution). This is because timezones are insignificant for instances with a resolution greater or equal to 1 day. To illustrate the problem, consider a Time instance:: T = Time.fromHumanly('today', tzinfo=anything) This will return an equivalent instance independent of the tzinfo used. The hour, minute, and second of this instance are 0, and its resolution is one day. Now say we have a sorted list of times, and we want to get all times for 'today', where whoever said 'today' is in a timezone that's 5 hours ahead of UTC. The start of 'today' in this timezone is UTC 05:00. The example instance T above is before this, but obviously it is today. The min and max times this returns are such that all potentially matching instances are within this range. However, this range might contain unmatching instances. As an example of this, if 'today' is April first 2005, then Time.fromISO8601TimeAndDate('2005-04-01T00:00:00') sorts in the same place as T from above, but is not in the UTC+5 'today'. TIME IS FUN!
def Analyze(self, hashes): hash_analyses = [] for digest in hashes: json_response = self._QueryHash(digest) hash_analysis = interface.HashAnalysis(digest, json_response) hash_analyses.append(hash_analysis) return hash_analyses
Looks up hashes in Viper using the Viper HTTP API. Args: hashes (list[str]): hashes to look up. Returns: list[HashAnalysis]: hash analysis. Raises: RuntimeError: If no host has been set for Viper.
def check_url (aggregate): while True: try: aggregate.urlqueue.join(timeout=30) break except urlqueue.Timeout: aggregate.remove_stopped_threads() if not any(aggregate.get_check_threads()): break
Helper function waiting for URL queue.
def save(self, t, base=0, heap=False): c, k = _keytuple(t) if k and k not in _typedefs: _typedefs[k] = self if c and c not in _typedefs: if t.__module__ in _builtin_modules: k = _kind_ignored else: k = self.kind _typedefs[c] = _Typedef(base=_basicsize(type(t), base=base, heap=heap), refs=_type_refs, both=False, kind=k, type=t) elif isbuiltin(t) and t not in _typedefs: _typedefs[t] = _Typedef(base=_basicsize(t, base=base), both=False, kind=_kind_ignored, type=t) else: raise KeyError('asizeof typedef %r bad: %r %r' % (self, (c, k), self.both))
Save this typedef plus its class typedef.
def cleanup_virtualenv(bare=True): if not bare: click.echo(crayons.red("Environment creation aborted.")) try: vistir.path.rmtree(project.virtualenv_location) except OSError as e: click.echo( "{0} An error occurred while removing {1}!".format( crayons.red("Error: ", bold=True), crayons.green(project.virtualenv_location), ), err=True, ) click.echo(crayons.blue(e), err=True)
Removes the virtualenv directory from the system.
def calendar_dates(self, val): self._calendar_dates = val if val is not None and not val.empty: self._calendar_dates_g = self._calendar_dates.groupby( ["service_id", "date"] ) else: self._calendar_dates_g = None
Update ``self._calendar_dates_g`` if ``self.calendar_dates`` changes.
def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): sources = safe_load((config(sources_var) or '').strip()) or [] keys = safe_load((config(keys_var) or '').strip()) or None if isinstance(sources, six.string_types): sources = [sources] if keys is None: for source in sources: add_source(source, None) else: if isinstance(keys, six.string_types): keys = [keys] if len(sources) != len(keys): raise SourceConfigError( 'Install sources and keys lists are different lengths') for source, key in zip(sources, keys): add_source(source, key) if update: _fetch_update(fatal=True)
Configure multiple sources from charm configuration. The lists are encoded as yaml fragments in the configuration. The fragment needs to be included as a string. Sources and their corresponding keys are of the types supported by add_source(). Example config: install_sources: | - "ppa:foo" - "http://example.com/repo precise main" install_keys: | - null - "a1b2c3d4" Note that 'null' (a.k.a. None) should not be quoted.
def _check_certificate(self): if (self.file_name.startswith("jdk-") and self.repo == "sbo" and self.downder == "wget"): certificate = (' --no-check-certificate --header="Cookie: ' 'oraclelicense=accept-securebackup-cookie"') self.msg.template(78) print("| '{0}' need to go ahead downloading".format( certificate[:23].strip())) self.msg.template(78) print("") self.downder_options += certificate if not self.msg.answer() in ["y", "Y"]: raise SystemExit()
Check for certificates options for wget
def _filter_data(self, pattern): removed = [] filtered = [] for param in self.data: if not param[0].startswith(pattern): filtered.append(param) else: removed.append(param) self.data = filtered return removed
Removes parameters which match the pattern from the config data
def context(self, name): data = self._context(name) context = data.get("context") if context: return context assert self.load_path context_path = os.path.join(self.load_path, "contexts", "%s.rxt" % name) context = ResolvedContext.load(context_path) data["context"] = context data["loaded"] = True return context
Get a context. Args: name (str): Name to store the context under. Returns: `ResolvedContext` object.
def get_following(self, auth_secret): result = {pytwis_constants.ERROR_KEY: None} loggedin, userid = self._is_loggedin(auth_secret) if not loggedin: result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN return (False, result) following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid) following_userids = self._rc.zrange(following_zset_key, 0, -1) if following_userids is None or not following_userids: result[pytwis_constants.FOLLOWING_LIST_KEY] = [] return (True, result) with self._rc.pipeline() as pipe: pipe.multi() for following_userid in following_userids: following_userid_profile_key = \ pytwis_constants.USER_PROFILE_KEY_FORMAT.format(following_userid) pipe.hget(following_userid_profile_key, pytwis_constants.USERNAME_KEY) result[pytwis_constants.FOLLOWING_LIST_KEY] = pipe.execute() return (True, result)
Get the following list of a logged-in user. Parameters ---------- auth_secret: str The authentication secret of the logged-in user. Returns ------- bool True if the following list is successfully obtained, False otherwise. result A dict containing the following list with the key FOLLOWING_LIST_KEY if the follower list is successfully obtained, a dict containing the error string with the key ERROR_KEY otherwise. Note ---- Possible error strings are listed as below: - ERROR_NOT_LOGGED_IN
def get_copy_folder_location(): copy_settings_path = 'Library/Application Support/Copy Agent/config.db' copy_home = None copy_settings = os.path.join(os.environ['HOME'], copy_settings_path) if os.path.isfile(copy_settings): database = sqlite3.connect(copy_settings) if database: cur = database.cursor() query = ("SELECT value " "FROM config2 " "WHERE option = 'csmRootPath';") cur.execute(query) data = cur.fetchone() copy_home = str(data[0]) cur.close() if not copy_home: error("Unable to find your Copy install =(") return copy_home
Try to locate the Copy folder. Returns: (str) Full path to the current Copy folder
def iq_handler(type_, payload_cls, *, with_send_reply=False): if (not hasattr(payload_cls, "TAG") or (aioxmpp.IQ.CHILD_MAP.get(payload_cls.TAG) is not aioxmpp.IQ.payload.xq_descriptor) or payload_cls not in aioxmpp.IQ.payload._classes): raise ValueError( "{!r} is not a valid IQ payload " "(use IQ.as_payload_class decorator)".format( payload_cls, ) ) def decorator(f): add_handler_spec( f, HandlerSpec( (_apply_iq_handler, (type_, payload_cls)), require_deps=(), ), kwargs=dict(with_send_reply=with_send_reply), ) return f return decorator
Register the decorated function or coroutine function as IQ request handler. :param type_: IQ type to listen for :type type_: :class:`~.IQType` :param payload_cls: Payload XSO class to listen for :type payload_cls: :class:`~.XSO` subclass :param with_send_reply: Whether to pass a function to send a reply to the decorated callable as second argument. :type with_send_reply: :class:`bool` :raises ValueError: if `payload_cls` is not a registered IQ payload If the decorated function is not a coroutine function, it must return an awaitable instead. .. seealso:: :meth:`~.StanzaStream.register_iq_request_handler` for more details on the `type_`, `payload_cls` and `with_send_reply` arguments, as well as behaviour expected from the decorated function. :meth:`aioxmpp.IQ.as_payload_class` for a way to register a XSO as IQ payload .. versionadded:: 0.11 The `with_send_reply` argument. .. versionchanged:: 0.10 The decorator now checks if `payload_cls` is a valid, registered IQ payload and raises :class:`ValueError` if not.
def get_splitext_basename(path): basename = foundations.common.get_first_item(os.path.splitext(os.path.basename(os.path.normpath(path)))) LOGGER.debug("> Splitext basename: '{0}'.".format(basename)) return basename
Gets the basename of a path without its extension. Usage:: >>> get_splitext_basename("/Users/JohnDoe/Documents/Test.txt") u'Test' :param path: Path to extract the basename without extension. :type path: unicode :return: Splitext basename. :rtype: unicode
def as_statements(lines: Iterator[str]) -> Iterator[str]: lines = (l.strip() for l in lines if l) lines = (l for l in lines if l and not l.startswith('--')) parts = [] for line in lines: parts.append(line.rstrip(';')) if line.endswith(';'): yield ' '.join(parts) parts.clear() if parts: yield ' '.join(parts)
Create an iterator that transforms lines into sql statements. Statements within the lines must end with ";" The last statement will be included even if it does not end in ';' >>> list(as_statements(['select * from', '-- comments are filtered', 't;'])) ['select * from t'] >>> list(as_statements(['a;', 'b', 'c;', 'd', ' '])) ['a', 'b c', 'd']
def retrieve(customer_id): http_client = HttpClient() response, __ = http_client.get(routes.url(routes.CUSTOMER_RESOURCE, resource_id=customer_id)) return resources.Customer(**response)
Retrieve a customer from its id. :param customer_id: The customer id :type customer_id: string :return: The customer resource :rtype: resources.Customer
def read(self, size): raw_read = super(USBRawDevice, self).read received = bytearray() while not len(received) >= size: resp = raw_read(self.RECV_CHUNK) received.extend(resp) return bytes(received)
Read raw bytes from the instrument. :param size: amount of bytes to be sent to the instrument :type size: integer :return: received bytes :return type: bytes
def _start_thread(self): self._stopping_event = Event() self._enqueueing_thread = Thread(target=self._enqueue_batches, args=(self._stopping_event,)) self._enqueueing_thread.start()
Start an enqueueing thread.
def _createEmptyJobGraphForJob(self, jobStore, command=None, predecessorNumber=0): self._config = jobStore.config return jobStore.create(JobNode.fromJob(self, command=command, predecessorNumber=predecessorNumber))
Create an empty job for the job.
def docsfor(self, rel): prefix, _rel = rel.split(':') if prefix in self.curies: doc_url = uritemplate.expand(self.curies[prefix], {'rel': _rel}) else: doc_url = rel print('opening', doc_url) webbrowser.open(doc_url)
Obtains the documentation for a link relation. Opens in a webbrowser window
def search_suggestion(self, query): response = self._call( mc_calls.QuerySuggestion, query ) suggested_queries = response.body.get('suggested_queries', []) return [ suggested_query['suggestion_string'] for suggested_query in suggested_queries ]
Get search query suggestions for query. Parameters: query (str): Search text. Returns: list: Suggested query strings.
def remove(self, oid): uri = self._resources[oid].uri del self._resources[oid] del self._hmc.all_resources[uri]
Remove a faked resource from this manager. Parameters: oid (string): The object ID of the resource (e.g. value of the 'object-uri' property).
def _handle_error(self, data, params): error = data.get('error', 'API call failed') mode = params.get('mode') raise SabnzbdApiException(error, mode=mode)
Handle an error response from the SABnzbd API
def _setup_xauth(self): handle, filename = tempfile.mkstemp(prefix='PyVirtualDisplay.', suffix='.Xauthority') self._xauth_filename = filename os.close(handle) self._old_xauth = {} self._old_xauth['AUTHFILE'] = os.getenv('AUTHFILE') self._old_xauth['XAUTHORITY'] = os.getenv('XAUTHORITY') os.environ['AUTHFILE'] = os.environ['XAUTHORITY'] = filename cookie = xauth.generate_mcookie() xauth.call('add', self.new_display_var, '.', cookie)
Set up the Xauthority file and the XAUTHORITY environment variable.
def get_qemu_info(path, backing_chain=False, fail_on_error=True): cmd = ['qemu-img', 'info', '--output=json', path] if backing_chain: cmd.insert(-1, '--backing-chain') result = run_command_with_validation( cmd, fail_on_error, msg='Failed to get info for {}'.format(path) ) return json.loads(result.out)
Get info on a given qemu disk Args: path(str): Path to the required disk backing_chain(boo): if true, include also info about the image predecessors. Return: object: if backing_chain == True then a list of dicts else a dict
def copy(self): if self._global_condition is not None: raise SimStateError("global condition was not cleared before state.copy().") c_plugins = self._copy_plugins() state = SimState(project=self.project, arch=self.arch, plugins=c_plugins, options=self.options.copy(), mode=self.mode, os_name=self.os_name) if self._is_java_jni_project: state.ip_is_soot_addr = self.ip_is_soot_addr state.uninitialized_access_handler = self.uninitialized_access_handler state._special_memory_filler = self._special_memory_filler state.ip_constraints = self.ip_constraints return state
Returns a copy of the state.
def _match_exists(self, searchable): position_searchable = self.get_position_searchable() for pos,val in position_searchable.iteritems(): if val == searchable: return pos return False
Make sure the searchable description doesn't already exist
def extract_objects(self, fname, type_filter=None): objects = [] if fname in self.object_cache: objects = self.object_cache[fname] else: with io.open(fname, 'rt', encoding='utf-8') as fh: text = fh.read() objects = parse_verilog(text) self.object_cache[fname] = objects if type_filter: objects = [o for o in objects if isinstance(o, type_filter)] return objects
Extract objects from a source file Args: fname(str): Name of file to read from type_filter (class, optional): Object class to filter results Returns: List of objects extracted from the file.
def get_synth_input_fn(height, width, num_channels, num_classes): def input_fn(is_training, data_dir, batch_size, *args, **kwargs): images = tf.zeros((batch_size, height, width, num_channels), tf.float32) labels = tf.zeros((batch_size, num_classes), tf.int32) return tf.data.Dataset.from_tensors((images, labels)).repeat() return input_fn
Returns an input function that returns a dataset with zeroes. This is useful in debugging input pipeline performance, as it removes all elements of file reading and image preprocessing. Args: height: Integer height that will be used to create a fake image tensor. width: Integer width that will be used to create a fake image tensor. num_channels: Integer depth that will be used to create a fake image tensor. num_classes: Number of classes that should be represented in the fake labels tensor Returns: An input_fn that can be used in place of a real one to return a dataset that can be used for iteration.
def bootstrap(score_objs, n_boot=1000): all_samples = np.random.choice(score_objs, size=(n_boot, len(score_objs)), replace=True) return all_samples.sum(axis=1)
Given a set of DistributedROC or DistributedReliability objects, this function performs a bootstrap resampling of the objects and returns n_boot aggregations of them. Args: score_objs: A list of DistributedROC or DistributedReliability objects. Objects must have an __add__ method n_boot (int): Number of bootstrap samples Returns: An array of DistributedROC or DistributedReliability
def _get_calling_module(self): for frame in inspect.stack(): mod = inspect.getmodule(frame[0]) logger.debug(f'calling module: {mod}') if mod is not None: mod_name = mod.__name__ if mod_name != __name__: return mod
Get the last module in the call stack that is not this module or ``None`` if the call originated from this module.
def lookup(self, req, parent, name): self.reply_err(req, errno.ENOENT)
Look up a directory entry by name and get its attributes. Valid replies: reply_entry reply_err
def get_comment_create_data(self): user_model = get_user_model() return dict( content_type=ContentType.objects.get_for_model(self.target_object), object_pk=force_text(self.target_object._get_pk_val()), text=self.cleaned_data["text"], user=user_model.objects.latest('id'), post_date=timezone.now(), site_id=settings.SITE_ID, is_public=True, is_removed=False, )
Returns the dict of data to be used to create a comment. Subclasses in custom comment apps that override get_comment_model can override this method to add extra fields onto a custom comment model.
def _process_state_change_events(): sdp_state = SDPState() service_states = get_service_state_list() state_events = sdp_state.get_event_queue(subscriber=__service_name__) state_is_off = sdp_state.current_state == 'off' counter = 0 while True: time.sleep(0.1) if not state_is_off: if counter % 1000 == 0: LOG.debug('Checking published events ... %d', counter / 1000) _published_events = state_events.get_published_events( process=True) for _state_event in _published_events: _process_event(_state_event, sdp_state, service_states) else: _state_event = state_events.get() if _state_event: _process_event(_state_event, sdp_state, service_states) state_is_off = sdp_state.current_state == 'off' counter += 1
Process events relating to the overall state of SDP. This function starts and event loop which continually checks for and responds to SDP state change events.
def get_object(self, identifier, include_inactive=False): query = {'_id': identifier} if not include_inactive: query['active'] = True cursor = self.collection.find(query) if cursor.count() > 0: return self.from_dict(cursor.next()) else: return None
Retrieve object with given identifier from the database. Parameters ---------- identifier : string Unique object identifier include_inactive : Boolean Flag indicating whether inactive (i.e., deleted) object should be included in the search (i.e., return an object with given identifier if it has been deleted or return None) Returns ------- (Sub-class of)ObjectHandle The database object with given identifier or None if no object with identifier exists.
def require(self, lock, guard_func, *guard_args, **guard_kw): def decorator(f): @wraps(f) def wrapper(*args, **kw): if self.granted(lock): self.msg('Granted {}'.format(lock)) return f(*args, **kw) if guard_func(*guard_args, **guard_kw) and self.acquire(lock): return f(*args, **kw) return None return wrapper return decorator
Decorate a function to be run only when a lock is acquired. The lock is requested if the guard function returns True. The decorated function is called if the lock has been granted.
def get_resourcegroupitems(group_id, scenario_id, **kwargs): rgi_qry = db.DBSession.query(ResourceGroupItem).\ filter(ResourceGroupItem.scenario_id==scenario_id) if group_id is not None: rgi_qry = rgi_qry.filter(ResourceGroupItem.group_id==group_id) rgi = rgi_qry.all() return rgi
Get all the items in a group, in a scenario. If group_id is None, return all items across all groups in the scenario.
def publish_topology_opened(self, topology_id): event = TopologyOpenedEvent(topology_id) for subscriber in self.__topology_listeners: try: subscriber.opened(event) except Exception: _handle_exception()
Publish a TopologyOpenedEvent to all topology listeners. :Parameters: - `topology_id`: A unique identifier for the topology this server is a part of.
def uncompress_file(inputfile, filename): zipfile = gzip.GzipFile(fileobj=inputfile, mode="rb") try: outputfile = create_spooled_temporary_file(fileobj=zipfile) finally: zipfile.close() new_basename = os.path.basename(filename).replace('.gz', '') return outputfile, new_basename
Uncompress this file using gzip and change its name. :param inputfile: File to compress :type inputfile: ``file`` like object :param filename: File's name :type filename: ``str`` :returns: Tuple with file and new file's name :rtype: :class:`tempfile.SpooledTemporaryFile`, ``str``
def load_api_folder(api_folder_path): api_definition_mapping = {} api_items_mapping = load_folder_content(api_folder_path) for api_file_path, api_items in api_items_mapping.items(): if isinstance(api_items, list): for api_item in api_items: key, api_dict = api_item.popitem() api_id = api_dict.get("id") or api_dict.get("def") or api_dict.get("name") if key != "api" or not api_id: raise exceptions.ParamsError( "Invalid API defined in {}".format(api_file_path)) if api_id in api_definition_mapping: raise exceptions.ParamsError( "Duplicated API ({}) defined in {}".format(api_id, api_file_path)) else: api_definition_mapping[api_id] = api_dict elif isinstance(api_items, dict): if api_file_path in api_definition_mapping: raise exceptions.ParamsError( "Duplicated API defined: {}".format(api_file_path)) else: api_definition_mapping[api_file_path] = api_items return api_definition_mapping
load api definitions from api folder. Args: api_folder_path (str): api files folder. api file should be in the following format: [ { "api": { "def": "api_login", "request": {}, "validate": [] } }, { "api": { "def": "api_logout", "request": {}, "validate": [] } } ] Returns: dict: api definition mapping. { "api_login": { "function_meta": {"func_name": "api_login", "args": [], "kwargs": {}} "request": {} }, "api_logout": { "function_meta": {"func_name": "api_logout", "args": [], "kwargs": {}} "request": {} } }
def to_html(self, codebase): body = '' for section in ('params', 'options', 'exceptions'): val = getattr(self, section) if val: body += '<h5>%s</h5>\n<dl class = "%s">%s</dl>' % ( printable(section), section, '\n'.join(param.to_html() for param in val)) body += codebase.build_see_html(self.see, 'h5', self) return ('<a name = "%s" />\n<div class = "function">\n' + '<h4>%s</h4>\n%s\n%s\n</div>\n') % (self.name, self.name, htmlize_paragraphs(codebase.translate_links(self.doc, self)), body)
Convert this `FunctionDoc` to HTML.
def get_relations_cnt(self): return cx.Counter([e.relation for es in self.exts for e in es])
Get the set of all relations.
def timdef(action, item, lenout, value=None): action = stypes.stringToCharP(action) item = stypes.stringToCharP(item) lenout = ctypes.c_int(lenout) if value is None: value = stypes.stringToCharP(lenout) else: value = stypes.stringToCharP(value) libspice.timdef_c(action, item, lenout, value) return stypes.toPythonString(value)
Set and retrieve the defaults associated with calendar input strings. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/timdef_c.html :param action: the kind of action to take "SET" or "GET". :type action: str :param item: the default item of interest. :type item: str :param lenout: the length of list for output. :type lenout: int :param value: the optional string used if action is "SET" :type value: str :return: the value associated with the default item. :rtype: str
def is_obsoleted_by_pid(pid): return d1_gmn.app.models.ScienceObject.objects.filter( obsoleted_by__did=pid ).exists()
Return True if ``pid`` is referenced in the obsoletedBy field of any object. This will return True even if the PID is in the obsoletes field of an object that does not exist on the local MN, such as replica that is in an incomplete chain.
def remove_prefix(self, prefix): self._req('prefix remove %s' % prefix) time.sleep(1) self._req('netdataregister')
Remove network prefix.
def register_computer_view(request): if request.method == "POST": form = ComputerRegistrationForm(request.POST) logger.debug(form) if form.is_valid(): obj = form.save() obj.user = request.user obj.save() messages.success(request, "Successfully added computer.") return redirect("itemreg") else: messages.error(request, "Error adding computer.") else: form = ComputerRegistrationForm() return render(request, "itemreg/register_form.html", {"form": form, "action": "add", "type": "computer", "form_route": "itemreg_computer"})
Register a computer.
def random_possible_hands(self): missing = self.missing_values() other_dominoes = [d for p, h in enumerate(self.hands) for d in h if p != self.turn] while True: shuffled_dominoes = (d for d in random.sample(other_dominoes, len(other_dominoes))) hands = [] for player, hand in enumerate(self.hands): if player != self.turn: hand = [next(shuffled_dominoes) for _ in hand] hands.append(dominoes.Hand(hand)) if _validate_hands(hands, missing): return hands
Returns random possible hands for all players, given the information known by the player whose turn it is. This information includes the current player's hand, the sizes of the other players' hands, and the moves played by every player, including the passes. :return: a list of possible Hand objects, corresponding to each player
def number_of_interactions(self, u=None, v=None, t=None): if t is None: if u is None: return int(self.size()) elif u is not None and v is not None: if v in self._succ[u]: return 1 else: return 0 else: if u is None: return int(self.size(t)) elif u is not None and v is not None: if v in self._succ[u]: if self.__presence_test(u, v, t): return 1 else: return 0
Return the number of interaction between two nodes at time t. Parameters ---------- u, v : nodes, optional (default=all interaction) If u and v are specified, return the number of interaction between u and v. Otherwise return the total number of all interaction. t : snapshot id (default=None) If None will be returned the number of edges on the flattened graph. Returns ------- nedges : int The number of interaction in the graph. If nodes u and v are specified return the number of interaction between those nodes. If a single node is specified return None. See Also -------- size Examples -------- >>> G = dn.DynDiGraph() >>> G.add_path([0,1,2,3], t=0) >>> G.number_of_interactions() 3 >>> G.number_of_interactions(0,1, t=0) 1 >>> G.add_edge(3, 4, t=1) >>> G.number_of_interactions() 4
def prepend_status(func): @ft.wraps(func) def wrapper(self, *args, **kwargs): res = func(self, *args, **kwargs) if self.status is not StepResult.UNSET: res = "[{status}]".format(status=self.status.name) + res return res return wrapper
Prepends the output of `func` with the status.
def get_nodes(): cfg_file = "/etc/nago/nago.ini" config = ConfigParser.ConfigParser() config.read(cfg_file) result = {} for section in config.sections(): if section in ['main']: continue token = section node = Node(token) for key, value in config.items(token): node[key] = value result[token] = node return result
Returns all nodes in a list of dicts format
def get_time_remaining_estimate(self): if IOPSGetTimeRemainingEstimate is not None: estimate = float(IOPSGetTimeRemainingEstimate()) if estimate == -1.0: return common.TIME_REMAINING_UNKNOWN elif estimate == -2.0: return common.TIME_REMAINING_UNLIMITED else: return estimate / 60.0 else: warnings.warn("IOPSGetTimeRemainingEstimate is not preset", RuntimeWarning) blob = IOPSCopyPowerSourcesInfo() type = IOPSGetProvidingPowerSourceType(blob) if type == common.POWER_TYPE_AC: return common.TIME_REMAINING_UNLIMITED else: estimate = 0.0 for source in IOPSCopyPowerSourcesList(blob): description = IOPSGetPowerSourceDescription(blob, source) if kIOPSIsPresentKey in description and description[kIOPSIsPresentKey] and kIOPSTimeToEmptyKey in description and description[kIOPSTimeToEmptyKey] > 0.0: estimate += float(description[kIOPSTimeToEmptyKey]) if estimate > 0.0: return float(estimate) else: return common.TIME_REMAINING_UNKNOWN
In Mac OS X 10.7+ Uses IOPSGetTimeRemainingEstimate to get time remaining estimate. In Mac OS X 10.6 IOPSGetTimeRemainingEstimate is not available. If providing power source type is AC, returns TIME_REMAINING_UNLIMITED. Otherwise looks through all power sources returned by IOPSGetProvidingPowerSourceType and returns total estimate.
def get_buffer( self ): last_byte = self.current_bits if (self.bits_remaining < 8) else None result = self.output if last_byte is not None: result = bytearray( result ) result.append( last_byte ) if self.bytes_reverse: return bytes( reversed( result ) ) else: return bytes( result )
Return a byte string containing the target as currently written.
def _init_count_terms(self, annots): gonotindag = set() gocnts = self.gocnts go2obj = self.go2obj for terms in annots.values(): allterms = set() for go_id in terms: goobj = go2obj.get(go_id, None) if goobj is not None: allterms.add(go_id) allterms |= goobj.get_all_parents() else: gonotindag.add(go_id) for parent in allterms: gocnts[parent] += 1 if gonotindag: print("{N} Assc. GO IDs not found in the GODag\n".format(N=len(gonotindag)))
Fills in the counts and overall aspect counts.
def set(self, name, value=True): "set a feature value" setattr(self, name.lower(), value)
set a feature value
def posargs_limiter(func, *args): posargs = inspect.getargspec(func)[0] length = len(posargs) if inspect.ismethod(func): length -= 1 if length == 0: return func() return func(*args[0:length])
takes a function a positional arguments and sends only the number of positional arguments the function is expecting
def ssh_known_hosts_lines(application_name, user=None): known_hosts_list = [] with open(known_hosts(application_name, user)) as hosts: for hosts_line in hosts: if hosts_line.rstrip(): known_hosts_list.append(hosts_line.rstrip()) return(known_hosts_list)
Return contents of known_hosts file for given application. :param application_name: Name of application eg nova-compute-something :type application_name: str :param user: The user that the ssh asserts are for. :type user: str
def multi_split(text, regexes): def make_regex(s): return re.compile(s) if isinstance(s, basestring) else s regexes = [make_regex(r) for r in regexes] piece_list = [text] finished_pieces = set() def apply_re(regex, piece_list): for piece in piece_list: if piece in finished_pieces: yield piece continue for s in full_split(piece, regex): if regex.match(s): finished_pieces.add(s) if s: yield s for regex in regexes: piece_list = list(apply_re(regex, piece_list)) assert ''.join(piece_list) == text return piece_list
Split the text by the given regexes, in priority order. Make sure that the regex is parenthesized so that matches are returned in re.split(). Splitting on a single regex works like normal split. >>> '|'.join(multi_split('one two three', [r'\w+'])) 'one| |two| |three' Splitting on digits first separates the digits from their word >>> '|'.join(multi_split('one234five 678', [r'\d+', r'\w+'])) 'one|234|five| |678' Splitting on words first keeps the word with digits intact. >>> '|'.join(multi_split('one234five 678', [r'\w+', r'\d+'])) 'one234five| |678'
def normalize(path_name, override=None): identity = identify(path_name, override=override) new_path_name = os.path.normpath(os.path.expanduser(path_name)) return new_path_name, identity
Prepares a path name to be worked with. Path name must not be empty. This function will return the 'normpath'ed path and the identity of the path. This function takes an optional overriding argument for the identity. ONLY PROVIDE OVERRIDE IF: 1) YOU AREWORKING WITH A FOLDER THAT HAS AN EXTENSION IN THE NAME 2) YOU ARE MAKING A FILE WITH NO EXTENSION
def insert(self, resourcetype, source, insert_date=None): caller = inspect.stack()[1][3] if caller == 'transaction': hhclass = 'Layer' source = resourcetype resourcetype = resourcetype.csw_schema else: hhclass = 'Service' if resourcetype not in HYPERMAP_SERVICE_TYPES.keys(): raise RuntimeError('Unsupported Service Type') return self._insert_or_update(resourcetype, source, mode='insert', hhclass=hhclass)
Insert a record into the repository
def fetch(self): if self.data.type == self._manager.FOLDER_TYPE: raise YagocdException("Can't fetch folder <{}>, only file!".format(self._path)) response = self._session.get(self.data.url) return response.content
Method for getting artifact's content. Could only be applicable for file type. :return: content of the artifact.
def get_table(ports): table = PrettyTable(["Name", "Port", "Protocol", "Description"]) table.align["Name"] = "l" table.align["Description"] = "l" table.padding_width = 1 for port in ports: table.add_row(port) return table
This function returns a pretty table used to display the port results. :param ports: list of found ports :return: the table to display
def login(): cas_token_session_key = current_app.config['CAS_TOKEN_SESSION_KEY'] redirect_url = create_cas_login_url( current_app.config['CAS_SERVER'], current_app.config['CAS_LOGIN_ROUTE'], flask.url_for('.login', origin=flask.session.get('CAS_AFTER_LOGIN_SESSION_URL'), _external=True)) if 'ticket' in flask.request.args: flask.session[cas_token_session_key] = flask.request.args['ticket'] if cas_token_session_key in flask.session: if validate(flask.session[cas_token_session_key]): if 'CAS_AFTER_LOGIN_SESSION_URL' in flask.session: redirect_url = flask.session.pop('CAS_AFTER_LOGIN_SESSION_URL') elif flask.request.args.get('origin'): redirect_url = flask.request.args['origin'] else: redirect_url = flask.url_for( current_app.config['CAS_AFTER_LOGIN']) else: del flask.session[cas_token_session_key] current_app.logger.debug('Redirecting to: {0}'.format(redirect_url)) return flask.redirect(redirect_url)
This route has two purposes. First, it is used by the user to login. Second, it is used by the CAS to respond with the `ticket` after the user logs in successfully. When the user accesses this url, they are redirected to the CAS to login. If the login was successful, the CAS will respond to this route with the ticket in the url. The ticket is then validated. If validation was successful the logged in username is saved in the user's session under the key `CAS_USERNAME_SESSION_KEY` and the user's attributes are saved under the key 'CAS_USERNAME_ATTRIBUTE_KEY'
def change_default_radii(def_map): s = current_system() rep = current_representation() rep.radii_state.default = [def_map[t] for t in s.type_array] rep.radii_state.reset()
Change the default radii