code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def get_program_uuid(self, obj): <NEW_LINE> <INDENT> return retrieve_offer(obj).condition.program_uuid
Get the Program UUID attached to the coupon.
625941c2d53ae8145f87a218
def verify_state(self, resp, state_data, state): <NEW_LINE> <INDENT> if not ("state" in resp and "state" in state_data and resp["state"] == state_data["state"]): <NEW_LINE> <INDENT> tmp_state = "" <NEW_LINE> if "state" in resp: <NEW_LINE> <INDENT> tmp_state = resp["state"] <NEW_LINE> <DEDENT> satosa_logging(LOGGER, logging.DEBUG, "Missing or invalid state [%s] in response!" % tmp_state, state, exc_info=True) <NEW_LINE> raise SATOSAAuthenticationError(state, "Missing or invalid state [%s] in response!" % tmp_state)
Will verify the state and throw and error if the state is invalid. :type resp: AuthorizationResponse :type state_data: dict[str, str] :type state: satosa.state.State :param resp: The authorization response from the OP, created by pyoidc. :param state_data: The state data for this backend. :param state: The current state for the proxy and this backend. Only used for raising errors.
625941c2f548e778e58cd522
def notify_trommons(task_dir_name, project_backlink, json_filename, trommons_dir): <NEW_LINE> <INDENT> temp_dir = mkdtemp() <NEW_LINE> temp_proj_dir = os.path.join(temp_dir, task_dir_name) <NEW_LINE> os.mkdir(temp_proj_dir) <NEW_LINE> temp_file_name = os.path.join(temp_proj_dir, json_filename) <NEW_LINE> output_json_file = open(temp_file_name, "w") <NEW_LINE> response_data = { 'created': True, 'backlink': project_backlink, 'completed': False, } <NEW_LINE> json.dump(response_data, output_json_file, indent=4, separators=(',', ': ')) <NEW_LINE> output_json_file.close() <NEW_LINE> shutil.move(temp_proj_dir, trommons_dir) <NEW_LINE> shutil.rmtree(temp_dir) <NEW_LINE> logging.info("Succesfully notified Trommons the success in importing.")
Write the 'meta.json' and translated file so Trommons can get it.
625941c24f88993c3716c00e
def train_model_oe_pe(): <NEW_LINE> <INDENT> print('load data') <NEW_LINE> import data_utils, training_utils <NEW_LINE> conf = data_utils.TrainConfigure() <NEW_LINE> data_dict = data_utils.pickle_load(conf.char_file) <NEW_LINE> print('loading embed ...') <NEW_LINE> vocab_dict = data_utils.pickle_load(conf.char_dict) <NEW_LINE> embed_matrix = data_utils.load_our_embedding(vocab_dict) <NEW_LINE> print('load embed done.') <NEW_LINE> y = to_categorical(data_dict['y']) <NEW_LINE> xe = [[i for i in range(600)] for _ in range(y.shape[0])] <NEW_LINE> xe = np.array(xe) <NEW_LINE> x_tn, y_tn, x_ts, y_ts = training_utils.split([data_dict['x'], xe] , y, shuffle=False) <NEW_LINE> x_tn, y_tn, x_val, y_val = training_utils.split(x_tn, y_tn, shuffle=False) <NEW_LINE> print('train') <NEW_LINE> print('define model') <NEW_LINE> model = CharModel(embed_matrix=embed_matrix, name='charmodel_PE_OE.h5', PE=True) <NEW_LINE> model.train(x_tn, y_tn, x_val, y_val, x_ts, y_ts)
使用自己训练的Embedding,使用Position embedding :return:
625941c244b2445a3393203c
def normalizeEmail(emailaddr, domainmap, domainrules, ignorePlus=True): <NEW_LINE> <INDENT> emailaddr = emailaddr.lower() <NEW_LINE> localpart, domain = extractEmailAddress(emailaddr) <NEW_LINE> canonical = canonicalizeEmailDomain(domain, domainmap) <NEW_LINE> if ignorePlus: <NEW_LINE> <INDENT> idx = localpart.find('+') <NEW_LINE> if idx >= 0: <NEW_LINE> <INDENT> localpart = localpart[:idx] <NEW_LINE> <DEDENT> <DEDENT> rules = domainrules.get(canonical, []) <NEW_LINE> if 'ignore_dots' in rules: <NEW_LINE> <INDENT> localpart = localpart.replace(".", "") <NEW_LINE> <DEDENT> normalized = "%s@%s" % (localpart, domain) <NEW_LINE> return normalized
Normalise an email address according to the processing rules for its canonical originating domain. The email address, **emailaddr**, will be parsed and validated, and then checked that it originated from one of the domains allowed to email requests for bridges to the :class:`~bridgedb.distributors.email.distributor.EmailDistributor` via the :func:`canonicaliseEmailDomain` function. :param str emailaddr: An email address to normalise. :param dict domainmap: A map of permitted alternate domains (in lowercase) to their canonical domain names (in lowercase). This can be configured with the ``EMAIL_DOMAIN_MAP`` option in ``bridgedb.conf``, for example:: EMAIL_DOMAIN_MAP = {'mail.google.com': 'gmail.com', 'googlemail.com': 'gmail.com'} :param dict domainrules: A mapping of canonical permitted domain names to a list of rules which should be applied to processing them, for example:: EMAIL_DOMAIN_RULES = {'gmail.com': ["ignore_dots", "dkim"] Currently, ``"ignore_dots"`` means that all ``"."`` characters will be removed from the local part of the validated email address. :param bool ignorePlus: If ``True``, assume that ``blackhole+kerr@torproject.org`` is an alias for ``blackhole@torproject.org``, and remove everything after the first ``'+'`` character. :raises UnsupportedDomain: if the email address originated from a domain that we do not explicitly support. :raises BadEmail: if the email address could not be parsed or validated. :rtype: str :returns: The validated, normalised email address, if it was from a permitted domain. Otherwise, returns an empty string.
625941c2711fe17d82542315
def _check_pillar_exact_minions(self, expr, delimiter, greedy): <NEW_LINE> <INDENT> return self._check_cache_minions(expr, greedy, delimiter, 'pillar', exact_match=True)
Return the minions found by looking via pillar
625941c27cff6e4e8111792b
def divisible_by_three(number_list): <NEW_LINE> <INDENT> return [x for x in number_list if not x % 3]
Given a list of numbers, return a list of the numbers which are divisible by three.
625941c2be8e80087fb20beb
def is_developer_certificate_getter(self): <NEW_LINE> <INDENT> return self._device_execution_mode.value == 1
Return whether this certificate is a developer certificate or not. :param self: Instance of the entity for which this is a custom method. :type self: mbed_cloud.foundation.TrustedCertificate :return: True if a developer certificate, False otherwise. :rtype: bool
625941c2167d2b6e31218b3c
def set_tf_cuda_version(environ_cp): <NEW_LINE> <INDENT> ask_cuda_version = ( 'Please specify the CUDA SDK version you want to use. ' '[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION <NEW_LINE> for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS): <NEW_LINE> <INDENT> tf_cuda_version = get_from_env_or_user_or_default( environ_cp, 'TF_CUDA_VERSION', ask_cuda_version, _DEFAULT_CUDA_VERSION) <NEW_LINE> tf_cuda_version = reformat_version_sequence(str(tf_cuda_version), 2) <NEW_LINE> default_cuda_path = _DEFAULT_CUDA_PATH <NEW_LINE> if is_windows() or is_cygwin(): <NEW_LINE> <INDENT> default_cuda_path = cygpath( environ_cp.get('CUDA_PATH', _DEFAULT_CUDA_PATH_WIN)) <NEW_LINE> <DEDENT> elif is_linux(): <NEW_LINE> <INDENT> if (not os.path.exists(default_cuda_path) ) and os.path.exists(_DEFAULT_CUDA_PATH_LINUX): <NEW_LINE> <INDENT> default_cuda_path = _DEFAULT_CUDA_PATH_LINUX <NEW_LINE> <DEDENT> <DEDENT> ask_cuda_path = ('Please specify the location where CUDA %s toolkit is' ' installed. Refer to README.md for more details. ' '[Default is %s]: ') % (tf_cuda_version, default_cuda_path) <NEW_LINE> cuda_toolkit_path = get_from_env_or_user_or_default( environ_cp, 'CUDA_TOOLKIT_PATH', ask_cuda_path, default_cuda_path) <NEW_LINE> if is_windows() or is_cygwin(): <NEW_LINE> <INDENT> cuda_toolkit_path = cygpath(cuda_toolkit_path) <NEW_LINE> <DEDENT> if is_windows(): <NEW_LINE> <INDENT> cuda_rt_lib_paths = ['lib/x64/cudart.lib'] <NEW_LINE> <DEDENT> elif is_linux(): <NEW_LINE> <INDENT> cuda_rt_lib_paths = [ '%s/libcudart.so.%s' % (x, tf_cuda_version) for x in [ 'lib64', 'lib/powerpc64le-linux-gnu', 'lib/x86_64-linux-gnu', ] ] <NEW_LINE> <DEDENT> elif is_macos(): <NEW_LINE> <INDENT> cuda_rt_lib_paths = ['lib/libcudart.%s.dylib' % tf_cuda_version] <NEW_LINE> <DEDENT> cuda_toolkit_paths_full = [ os.path.join(cuda_toolkit_path, x) for x in cuda_rt_lib_paths ] <NEW_LINE> if any(os.path.exists(x) for x in cuda_toolkit_paths_full): <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> print('Invalid path to CUDA %s toolkit. %s cannot be found' % (tf_cuda_version, cuda_toolkit_paths_full)) <NEW_LINE> environ_cp['TF_CUDA_VERSION'] = '' <NEW_LINE> environ_cp['CUDA_TOOLKIT_PATH'] = '' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise UserInputError('Invalid TF_CUDA_SETTING setting was provided %d ' 'times in a row. Assuming to be a scripting mistake.' % _DEFAULT_PROMPT_ASK_ATTEMPTS) <NEW_LINE> <DEDENT> environ_cp['CUDA_TOOLKIT_PATH'] = cuda_toolkit_path <NEW_LINE> write_action_env_to_bazelrc('CUDA_TOOLKIT_PATH', cuda_toolkit_path) <NEW_LINE> environ_cp['TF_CUDA_VERSION'] = tf_cuda_version <NEW_LINE> write_action_env_to_bazelrc('TF_CUDA_VERSION', tf_cuda_version)
Set CUDA_TOOLKIT_PATH and TF_CUDA_VERSION.
625941c2d486a94d0b98e0eb
def pop(self, value): <NEW_LINE> <INDENT> return self.delete(value)
Remove value in the set if it is present.
625941c28e71fb1e9831d750
def _comparison(self, op, value): <NEW_LINE> <INDENT> value = self._validate(value) <NEW_LINE> return query.FilterNode(self._code_name, op, value)
get called when standard Python binary operator is used on a property to return a query.FilterNode.
625941c26fb2d068a760f041
def clear_db(func): <NEW_LINE> <INDENT> def test_wrapper(*args): <NEW_LINE> <INDENT> global DB <NEW_LINE> DB = {} <NEW_LINE> return func(*args) <NEW_LINE> <DEDENT> return test_wrapper
A decorator to clear out the in-memory database prior to a test.
625941c27c178a314d6ef402
def auth_password_change(request): <NEW_LINE> <INDENT> data = parse_auth_password_change_data(request.data) <NEW_LINE> return data
params: request return: data
625941c27047854f462a13b2
def adx_pos(high, low, close, n=14, fillna=False): <NEW_LINE> <INDENT> cs = close.shift(1) <NEW_LINE> tr = high.combine(cs, max) - low.combine(cs, min) <NEW_LINE> trs = tr.rolling(n).sum() <NEW_LINE> up = high - high.shift(1) <NEW_LINE> dn = low.shift(1) - low <NEW_LINE> pos = ((up > dn) & (up > 0)) * up <NEW_LINE> neg = ((dn > up) & (dn > 0)) * dn <NEW_LINE> dip = 100 * pos.rolling(n).sum() / trs <NEW_LINE> if fillna: <NEW_LINE> <INDENT> dip = dip.replace([np.inf, -np.inf], np.nan).fillna(20) <NEW_LINE> <DEDENT> return pd.Series(dip, name='adx_pos')
Average Directional Movement Index Positive (ADX) The Plus Directional Indicator (+DI) and Minus Directional Indicator (-DI) are derived from smoothed averages of these differences, and measure trend direction over time. These two indicators are often referred to collectively as the Directional Movement Indicator (DMI). The Average Directional Index (ADX) is in turn derived from the smoothed averages of the difference between +DI and -DI, and measures the strength of the trend (regardless of direction) over time. Using these three indicators together, chartists can determine both the direction and strength of the trend. http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:average_directional_index_adx Args: high(pandas.Series): dataset 'High' column. low(pandas.Series): dataset 'Low' column. close(pandas.Series): dataset 'Close' column. n(int): n period. fillna(bool): if True, fill nan values. Returns: pandas.Series: New feature generated.
625941c2bde94217f3682d98
def _decode_preferred_encoding(s): <NEW_LINE> <INDENT> if six.PY3 and isinstance(s, bytes): <NEW_LINE> <INDENT> enc = locale.getpreferredencoding() <NEW_LINE> try: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return s.decode(enc) <NEW_LINE> <DEDENT> except LookupError: <NEW_LINE> <INDENT> enc = 'utf-8' <NEW_LINE> <DEDENT> return s.decode(enc) <NEW_LINE> <DEDENT> except UnicodeDecodeError: <NEW_LINE> <INDENT> return s.decode('latin-1') <NEW_LINE> <DEDENT> <DEDENT> return s
Decode the supplied byte string using the preferred encoding for the locale (`locale.getpreferredencoding`) or, if the default encoding is invalid, fall back first on utf-8, then on latin-1 if the message cannot be decoded with utf-8.
625941c25f7d997b87174a3c
def send_invitation_email(from_addr, to_addr, signed_invitation): <NEW_LINE> <INDENT> subject = "Invitation by %s to %s" % (from_addr, settings.HOST) <NEW_LINE> text_content = loader.get_template('emails/profiles/invite.txt') <NEW_LINE> html_content = loader.get_template('emails/profiles/invite.html') <NEW_LINE> log.debug('The hash is: %s', signed_invitation) <NEW_LINE> payload = { 'link_to_signup': 'https://%s/%s/%s/' % (settings.HOST, settings.SIGNUP_PAGE, signed_invitation) } <NEW_LINE> c = Context(payload) <NEW_LINE> msg = EmailMultiAlternatives(subject, text_content.render(c), "tracker@%s" % settings.HOST, [to_addr]) <NEW_LINE> msg.attach_alternative(html_content.render(c), "text/html") <NEW_LINE> msg.send() <NEW_LINE> log.debug('The invitation email from %s to %s has been sent' % (from_addr, to_addr))
Send the invitation to join
625941c2097d151d1a222e01
def minWindow(self, S, T): <NEW_LINE> <INDENT> lookup = [[None for _ in xrange(26)] for _ in xrange(len(S)+1)] <NEW_LINE> find_char_next_pos = [None]*26 <NEW_LINE> for i in reversed(xrange(len(S))): <NEW_LINE> <INDENT> find_char_next_pos[ord(S[i])-ord('a')] = i+1 <NEW_LINE> lookup[i] = list(find_char_next_pos) <NEW_LINE> <DEDENT> min_i, min_len = None, float("inf") <NEW_LINE> for i in xrange(len(S)): <NEW_LINE> <INDENT> if S[i] != T[0]: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> start = i <NEW_LINE> for c in T: <NEW_LINE> <INDENT> start = lookup[start][ord(c)-ord('a')] <NEW_LINE> if start == None: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if start-i < min_len: <NEW_LINE> <INDENT> min_i, min_len = i, start-i <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return S[min_i:min_i+min_len] if min_i is not None else ""
:type S: str :type T: str :rtype: str
625941c230c21e258bdfa442
def test_bad_R_spec(self): <NEW_LINE> <INDENT> from plugins.R import RCheckInstallSection <NEW_LINE> class ChecksMockup(object): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> class ApplicableRCheckInstallSection(RCheckInstallSection): <NEW_LINE> <INDENT> def is_applicable(self): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> self.init_test('test-R', argv=['-rpn', 'R-Rdummypkg', '--no-build']) <NEW_LINE> spec = SpecFile(os.path.join(os.getcwd(), 'R-Rdummypkg-bad.spec')) <NEW_LINE> check = ApplicableRCheckInstallSection(ChecksMockup()) <NEW_LINE> check.checks.spec = spec <NEW_LINE> check.run() <NEW_LINE> note = check.result.output_extra <NEW_LINE> self.assertTrue(check.is_failed) <NEW_LINE> self.assertTrue('directory creation' in note) <NEW_LINE> self.assertTrue('removal of *.o and *.so' in note) <NEW_LINE> self.assertTrue('removal of the R.css file' in note) <NEW_LINE> self.assertTrue('R CMD INSTALL function' in note)
test R spec, expected to fail.
625941c2d7e4931a7ee9dec3
def ex8_cpv_hist(bot_year=2008, top_year=2020, country_list=countries, cpv='50'): <NEW_LINE> <INDENT> pipeline = [] <NEW_LINE> list_documents = [] <NEW_LINE> return list_documents
Produce an histogram where each bucket has the contract counts of a particular cpv in a given range of values (bucket) according to 'VALUE_EURO' Choose 10 buckets of any partition Buckets Example: 0 to 100000 100000 to 200000 200000 to 300000 300000 to 400000 400000 to 500000 500000 to 600000 600000 to 700000 700000 to 800000 800000 to 900000 900000 to 1000000 So given a CPV Division code (two digit string) return a list of documents where each document as the bucket _id, and respective bucket count. Result filterable by floor year, roof year and country_list Expected Output (list of documents): [{bucket: value_1, count: value_2}, ....] Where: value_1 = lower limit of respective bucket (if bucket position 0 of example then bucket:0 ) value_2 = contract count for thar particular bucket, (int)
625941c2283ffb24f3c558a9
def setConfig(self, config): <NEW_LINE> <INDENT> self._previousIdentifier = config['identifier'] <NEW_LINE> self._previousGFLoc = config['GF Filename'] <NEW_LINE> self._previousEnsLoc = config['ensemble filename'] <NEW_LINE> self._previousMeshLoc = config['mesh filename'] <NEW_LINE> self._previousPathLoc = config['path'] <NEW_LINE> self._ui.idLineEdit.setText(config['identifier']) <NEW_LINE> self._ui.gfLocLineEdit.setText(config['GF Filename']) <NEW_LINE> self._ui.ensLocLineEdit.setText(config['ensemble filename']) <NEW_LINE> self._ui.meshLocLineEdit.setText(config['mesh filename']) <NEW_LINE> self._ui.pathLocLineEdit.setText(config['path'])
Set the current value of the configuration for the dialog. Also set the _previousIdentifier value so that we can check uniqueness of the identifier over the whole of the workflow.
625941c24f6381625f1149e1
def _dump_image_info(self, local_images, pvc_images): <NEW_LINE> <INDENT> LOG.debug(_('Local hosting OS image dict: %s'), str(local_images)) <NEW_LINE> LOG.debug(_('PowerVC image dict: %s'), str(pvc_images)) <NEW_LINE> LOG.debug(_('Image ids dict: %s'), str(self.ids_dict)) <NEW_LINE> LOG.debug(_('Local hosting OS updated_at dict: %s'), str(self.local_updated_at)) <NEW_LINE> LOG.debug(_('PowerVC updated_at dict: %s'), str(self.pvc_updated_at))
Dump out the current image information :param: local_images A dict of the local hostingOS images :param: pvc_images A dict of the PowerVC images
625941c2ec188e330fd5a749
def behind(self, point): <NEW_LINE> <INDENT> if np.dot(self.direction, np.array(point)) - np.dot(self.direction, self.position) < 0: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> return False
Returns True is if 'point' is behind the ray location. Used in Scene.sort() :param point: A cartesian point :return: boolean Discussion: The equation of a plane is ax + by + cz + d = 0. Let point O, lie on the plane. The normal of the plane is, n = [a, b, c], thus d = -n dot O If n.p + d < 0, the point is behind the plane
625941c291f36d47f21ac496
def _place_node(self, node_id, x, min_y): <NEW_LINE> <INDENT> self.processed.append(node_id) <NEW_LINE> try: <NEW_LINE> <INDENT> y_occupied = self.occupied[x] <NEW_LINE> <DEDENT> except IndexError: <NEW_LINE> <INDENT> self.occupied.append(-1) <NEW_LINE> y_occupied = -1 <NEW_LINE> <DEDENT> y = max(min_y, y_occupied + 1) <NEW_LINE> try: <NEW_LINE> <INDENT> first_child = self.children[node_id][0] <NEW_LINE> y += self._place_node(first_child, x + 1, y) <NEW_LINE> <DEDENT> except IndexError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> self.occupied[x] = y <NEW_LINE> self.positions[node_id] = (x, y) <NEW_LINE> for child in self.children[node_id][1:]: <NEW_LINE> <INDENT> self._place_node(child, x + 1, y) <NEW_LINE> <DEDENT> return y - min_y
Determine x, y position for a node. node_id: id of the node to be positioned x: x position (depth) of the node min_y: minimal y position of the node (can't be above parent nodes) returns: y offset relative to min_y
625941c2d164cc6175782cf4
def select_task(conn, sql,params): <NEW_LINE> <INDENT> cur = conn.cursor() <NEW_LINE> cur.execute(sql, params) <NEW_LINE> rows = cur.fetchall() <NEW_LINE> return rows
Query tasks by priority :param conn: the Connection object :param priority: :return:
625941c291f36d47f21ac497
def depthFirstSearch(problem): <NEW_LINE> <INDENT> return GeneralSearch(problem, util.Stack())
Search the deepest nodes in the search tree first Your search algorithm needs to return a list of actions that reaches the goal. Make sure to implement a graph search algorithm To get started, you might want to try some of these simple commands to understand the search problem that is being passed in: print "Start:", problem.getStartState() print "Is the start a goal?", problem.isGoalState(problem.getStartState()) print "Start's successors:", problem.getSuccessors(problem.getStartState()) fringe keeps track of tuple to record state and path
625941c27cff6e4e8111792c
def make_256color(self, colortype, val): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> ival = int(val) <NEW_LINE> <DEDENT> except (TypeError, ValueError) as ex: <NEW_LINE> <INDENT> raise self.make_256error(colortype, val) from ex <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if (ival < 0) or (ival > 255): <NEW_LINE> <INDENT> raise self.make_256error(colortype, val) <NEW_LINE> <DEDENT> <DEDENT> if colortype == 'fore': <NEW_LINE> <INDENT> return self.extforefmt(str(ival)) <NEW_LINE> <DEDENT> elif colortype == 'back': <NEW_LINE> <INDENT> return self.extbackfmt(str(ival)) <NEW_LINE> <DEDENT> errmsg = 'Invalid colortype: {}'.format(colortype) <NEW_LINE> raise ColorCodes.Invalid256Color(errmsg)
Create a 256 color code based on type ('fore' or 'back') out of a number (can be string). Raises ColorCodes.Invalid256Color() on error. Returns the raw color code on success.
625941c24527f215b584c3ff
def read_installation_log(fname): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> with open(fname) as f: <NEW_LINE> <INDENT> file_contents = list(f.readlines()) <NEW_LINE> <DEDENT> <DEDENT> except IOError: <NEW_LINE> <INDENT> log.d('Log not found: ' + fname) <NEW_LINE> return [] <NEW_LINE> <DEDENT> mods_list = [] <NEW_LINE> for line in file_contents: <NEW_LINE> <INDENT> if line.startswith('mods/'): <NEW_LINE> <INDENT> mods_list.append(line.strip().replace('mods/', '')) <NEW_LINE> <DEDENT> <DEDENT> return mods_list
Read an 'installed_raws.txt' and return the mods.
625941c21f5feb6acb0c4af9
def echo_add_to_file(echo_string, out_file): <NEW_LINE> <INDENT> check_cmd = 'echo "%s" >> %s' % (echo_string, out_file) <NEW_LINE> output = subprocess.getoutput(check_cmd) <NEW_LINE> error = False <NEW_LINE> if output: <NEW_LINE> <INDENT> error = True <NEW_LINE> <DEDENT> assert not error, "echo is complaining:\n%s\n%s" % (check_cmd, output)
Add a string to file, using echo command.
625941c23eb6a72ae02ec47e
def read_nodes(path): <NEW_LINE> <INDENT> arr = np.genfromtxt(path, delimiter = "'", dtype = np.int32) <NEW_LINE> return arr
Read data example and save it >>> read_nodes("examples/nodes1.csv") array([1, 2, 3, 1, 2, 3, 1, 2])
625941c273bcbd0ca4b2c01d
def save_revision(self, user=None, submitted_for_moderation=False, approved_go_live_at=None, changed=True, log_action=False, previous_revision=None, clean=True): <NEW_LINE> <INDENT> if self.alias_of_id: <NEW_LINE> <INDENT> raise RuntimeError( "save_revision() was called on an alias page. " "Revisions are not required for alias pages as they are an exact copy of another page." ) <NEW_LINE> <DEDENT> if clean: <NEW_LINE> <INDENT> self.full_clean() <NEW_LINE> <DEDENT> revision = self.revisions.create( content_json=self.to_json(), user=user, submitted_for_moderation=submitted_for_moderation, approved_go_live_at=approved_go_live_at, ) <NEW_LINE> update_fields = [] <NEW_LINE> self.latest_revision_created_at = revision.created_at <NEW_LINE> update_fields.append('latest_revision_created_at') <NEW_LINE> self.draft_title = self.title <NEW_LINE> update_fields.append('draft_title') <NEW_LINE> if changed: <NEW_LINE> <INDENT> self.has_unpublished_changes = True <NEW_LINE> update_fields.append('has_unpublished_changes') <NEW_LINE> <DEDENT> if update_fields: <NEW_LINE> <INDENT> self.save(update_fields=update_fields, clean=False) <NEW_LINE> <DEDENT> logger.info("Page edited: \"%s\" id=%d revision_id=%d", self.title, self.id, revision.id) <NEW_LINE> if log_action: <NEW_LINE> <INDENT> if not previous_revision: <NEW_LINE> <INDENT> PageLogEntry.objects.log_action( instance=self, action=log_action if isinstance(log_action, str) else 'wagtail.edit', user=user, revision=revision, content_changed=changed, ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> PageLogEntry.objects.log_action( instance=self, action=log_action if isinstance(log_action, str) else 'wagtail.revert', user=user, data={ 'revision': { 'id': previous_revision.id, 'created': previous_revision.created_at.strftime("%d %b %Y %H:%M") } }, revision=revision, content_changed=changed, ) <NEW_LINE> <DEDENT> <DEDENT> if submitted_for_moderation: <NEW_LINE> <INDENT> logger.info("Page submitted for moderation: \"%s\" id=%d revision_id=%d", self.title, self.id, revision.id) <NEW_LINE> <DEDENT> return revision
Creates and saves a page revision. :param user: the user performing the action :param submitted_for_moderation: indicates whether the page was submitted for moderation :param approved_go_live_at: the date and time the revision is approved to go live :param changed: indicates whether there were any content changes :param log_action: flag for logging the action. Pass False to skip logging. Can be passed an action string. Defaults to 'wagtail.edit' when no 'previous_revision' param is passed, otherwise 'wagtail.revert' :param previous_revision: indicates a revision reversal. Should be set to the previous revision instance :param clean: Set this to False to skip cleaning page content before saving this revision :return: the newly created revision
625941c2090684286d50ec8a
def runfilter(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.rtn_filename() <NEW_LINE> <DEDENT> except getopt.GetoptError as err: <NEW_LINE> <INDENT> print(err) <NEW_LINE> self.usage() <NEW_LINE> sys.exit(2) <NEW_LINE> <DEDENT> if (not self.attr['Interactive']): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.processfile(self.filename) <NEW_LINE> <DEDENT> except IOError: <NEW_LINE> <INDENT> sys.stderr.write("script file '" + self.filename + "' cannot be ") <NEW_LINE> sys.stderr.write("loaded - No such file or directory\n\n") <NEW_LINE> sys.exit(1) <NEW_LINE> <DEDENT> except getopt.GetoptError: <NEW_LINE> <INDENT> print(err) <NEW_LINE> self.usage() <NEW_LINE> sys.exit(2) <NEW_LINE> <DEDENT> self.chk_commands() <NEW_LINE> <DEDENT> self.chk_attr() <NEW_LINE> chk_rtn = self.chk_memory()
Run configured job submission filter. After queue classes, illegal command and attribute rules have been specified, runfilter method will ensure submitted jobs do not violate any set rules.
625941c221a7993f00bc7c93
def dbsApiImplUpdateProcDSDesc(self, dataset, desc): <NEW_LINE> <INDENT> funcInfo = inspect.getframeinfo(inspect.currentframe()) <NEW_LINE> data = self._server._call ({ 'api' : 'updateProcDSDesc', 'path' : get_path(dataset), 'desc' : str(desc), }, 'POST')
Updates the description of a processed dataset dataset: Dataset to be updated desc : the description
625941c22eb69b55b151c853
def create_user(self, email, password=None, **extra_fields): <NEW_LINE> <INDENT> if not email: <NEW_LINE> <INDENT> raise ValueError('Users must have a valid email address') <NEW_LINE> <DEDENT> user = self.model(email=self.normalize_email(email), **extra_fields) <NEW_LINE> user.set_password(password) <NEW_LINE> user.save(using=self._db) <NEW_LINE> return user
Create and save a new user
625941c24a966d76dd550fb4
def formatDict(self, results): <NEW_LINE> <INDENT> formattedResults = DBFormatter.formatDict(self, results) <NEW_LINE> for formattedResult in formattedResults: <NEW_LINE> <INDENT> formattedResult["id"] = int(formattedResult["id"]) <NEW_LINE> <DEDENT> return formattedResults
_formatDict_ Cast the id column to an integer since formatDict() turns everything into strings.
625941c2cdde0d52a9e52fd7
def create_html_table_from_data(table_headings, table_data, table_name): <NEW_LINE> <INDENT> table_head = '<tr>' + '<th>%s</th>' * len(table_headings) + '</tr>\n' <NEW_LINE> table_body = '' <NEW_LINE> for item in table_data: <NEW_LINE> <INDENT> table_line = '<tr>' + '<td>%s</td>' * len(item) + '</tr>\n' <NEW_LINE> table_body += table_line % tuple(item) <NEW_LINE> <DEDENT> table_content = table_head % tuple(table_headings) + table_body <NEW_LINE> table_content = TABLE_TEMPLATE.replace('TABLE_CONTENT', table_content) <NEW_LINE> table_content = table_content.replace('TABLE_NAME', table_name) <NEW_LINE> return table_content
Takes in a list of tuples as data for the table, returns the data as an html table
625941c230dc7b766590190f
def _locality(self, executors): <NEW_LINE> <INDENT> if self.src: <NEW_LINE> <INDENT> if isinstance(self.src, Iterable): <NEW_LINE> <INDENT> localities = defaultdict(int) <NEW_LINE> forbidden = set() <NEW_LINE> for src in self.src: <NEW_LINE> <INDENT> if src.dset.sync_required: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> for executor, locality in src.locality(executors) or (): <NEW_LINE> <INDENT> if locality == FORBIDDEN: <NEW_LINE> <INDENT> forbidden.append(executor) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> localities[executor] += locality <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> for executor in forbidden: <NEW_LINE> <INDENT> yield executor, FORBIDDEN <NEW_LINE> try: <NEW_LINE> <INDENT> del localities[executor] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> src_count = len(self.src) <NEW_LINE> for executor, locality in localities.items(): <NEW_LINE> <INDENT> yield executor, locality / src_count <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> src = self.src <NEW_LINE> if not src.dset.sync_required: <NEW_LINE> <INDENT> yield from src.locality(executors)
Determine locality of computing this partition. Cache locality is dealt with when this method is invoked by Partition.locality(executors). Typically source partition implementations which inherit from Partition indicate here whether computing on a executor shows locality. This allows dealing with caching and preference/requirements set at the data set separately from locality in the 'normal' case. :param executors: An iterable of executors. :returns: An iterable of (executor, locality:int) tuples indicating the executor locality; locality 0 can be omitted as this is the default locality.
625941c2b830903b967e98b3
def __eq__(self, other): <NEW_LINE> <INDENT> if not isinstance(other, PagedResourceListOfCutLabelDefinition): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return self.to_dict() == other.to_dict()
Returns true if both objects are equal
625941c20383005118ecf58a
def get_advise_python_with_http_info(self, analysis_id, **kwargs): <NEW_LINE> <INDENT> all_params = ['analysis_id'] <NEW_LINE> all_params.append('async_req') <NEW_LINE> all_params.append('_return_http_data_only') <NEW_LINE> all_params.append('_preload_content') <NEW_LINE> all_params.append('_request_timeout') <NEW_LINE> params = locals() <NEW_LINE> for key, val in six.iteritems(params['kwargs']): <NEW_LINE> <INDENT> if key not in all_params: <NEW_LINE> <INDENT> raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_advise_python" % key ) <NEW_LINE> <DEDENT> params[key] = val <NEW_LINE> <DEDENT> del params['kwargs'] <NEW_LINE> if ('analysis_id' not in params or params['analysis_id'] is None): <NEW_LINE> <INDENT> raise ValueError("Missing the required parameter `analysis_id` when calling `get_advise_python`") <NEW_LINE> <DEDENT> collection_formats = {} <NEW_LINE> path_params = {} <NEW_LINE> if 'analysis_id' in params: <NEW_LINE> <INDENT> path_params['analysis_id'] = params['analysis_id'] <NEW_LINE> <DEDENT> query_params = [] <NEW_LINE> header_params = {} <NEW_LINE> form_params = [] <NEW_LINE> local_var_files = {} <NEW_LINE> body_params = None <NEW_LINE> header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) <NEW_LINE> auth_settings = [] <NEW_LINE> return self.api_client.call_api( '/advise/python/{analysis_id}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AdviserResultResponse', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
Get computed adviser result based on its identifier # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_advise_python_with_http_info(analysis_id, async_req=True) >>> result = thread.get() :param async_req bool :param str analysis_id: An identifier of the requested analysis (required) :return: AdviserResultResponse If the method is called asynchronously, returns the request thread.
625941c2f548e778e58cd523
def is_live(): <NEW_LINE> <INDENT> return self.stem.find('Live')!=-1
True if the file belongs to a Live image, False otherwise
625941c2627d3e7fe0d68df5
def normalize_cols(pair_count): <NEW_LINE> <INDENT> max_margin_error = 0.0 <NEW_LINE> for letter in alphabet: <NEW_LINE> <INDENT> total = 0.0 <NEW_LINE> for encrypted_letter in alphabet: <NEW_LINE> <INDENT> pair_key = (encrypted_letter, letter) <NEW_LINE> count = pair_count[pair_key] <NEW_LINE> total += count <NEW_LINE> <DEDENT> margin_error = abs(total - 1.0) <NEW_LINE> max_margin_error = max(max_margin_error, margin_error) <NEW_LINE> for encrypted_letter in alphabet: <NEW_LINE> <INDENT> pair_key = (encrypted_letter, letter) <NEW_LINE> pair_count[pair_key] /= total <NEW_LINE> <DEDENT> <DEDENT> logger.debug("Max row margin error: %s" % max_margin_error)
:param pair_count: a pair count object, a Counter with tuples for keys like ('a','b') for each pair of letters :return: normalized along the col direction
625941c2d8ef3951e32434e4
def get(self, request, format=None): <NEW_LINE> <INDENT> parameters = { 'query': request.REQUEST.get('query', ''), 'started': int(request.REQUEST.get('started', 0)), 'hasTA': int(request.REQUEST.get('hasTA', 0)), } <NEW_LINE> if not parameters['started']: <NEW_LINE> <INDENT> parameters['started'] = 'false' <NEW_LINE> <DEDENT> course_list, total_length = new_search(parameters['query'], started=parameters['started'], hasTA=parameters['hasTA']) <NEW_LINE> response = fill_in_courses(course_list, request.user) <NEW_LINE> return Response(response)
Search
625941c20fa83653e4656f62
def readGenos(self,genofile,referencefile=None): <NEW_LINE> <INDENT> if referencefile: <NEW_LINE> <INDENT> self.gen = np.zeros((len(self.ped),len(self.mark))) <NEW_LINE> self.gen1 = np.zeros((len(self.ped),len(self.mark))) <NEW_LINE> self.gen1[:] = np.nan <NEW_LINE> <DEDENT> else: self.gen = np.zeros((len(self.ped),len(self.mark))) <NEW_LINE> self.gen[:] = np.nan <NEW_LINE> if genofile.rsplit('.',1)[1] == 'gz': op = gzip.open <NEW_LINE> else: op = open <NEW_LINE> with op(genofile,'r') as fin: <NEW_LINE> <INDENT> for line in fin: <NEW_LINE> <INDENT> if line.startswith('#'): <NEW_LINE> <INDENT> mlist = line.strip('#').strip().split() <NEW_LINE> continue <NEW_LINE> <DEDENT> l = line.strip().split() <NEW_LINE> if len(l) < 1: continue <NEW_LINE> try: irow = self.ped[l[self.nc]]['rank'] <NEW_LINE> except KeyError: continue <NEW_LINE> for i,mark in enumerate(mlist): <NEW_LINE> <INDENT> if mark not in self.mark: continue <NEW_LINE> icol = self.mark[mark]['rank'] <NEW_LINE> if self.ia == 1: <NEW_LINE> <INDENT> a = l[i+self.ic] <NEW_LINE> <DEDENT> elif self.ia == 2: <NEW_LINE> <INDENT> a = self.tbase012(l[i+self.ic],mark) <NEW_LINE> <DEDENT> elif self.ia == 3: <NEW_LINE> <INDENT> a = self.tbase012(l[i*2+self.ic]+l[i*2+1+self.ic],mark) <NEW_LINE> <DEDENT> if a not in ['0','1','2']: a = np.nan <NEW_LINE> else: a = int(a) <NEW_LINE> self.gen[irow,icol] = a-1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if not referencefile: return <NEW_LINE> if referencefile.rsplit('.',1)[1] == 'gz': op = gzip.open <NEW_LINE> else: op = open <NEW_LINE> with op(referencefile,'r') as fin: <NEW_LINE> <INDENT> for line in fin: <NEW_LINE> <INDENT> if line.startswith('#'): <NEW_LINE> <INDENT> mlist = line.strip('#').strip().split() <NEW_LINE> continue <NEW_LINE> <DEDENT> l = line.strip().split() <NEW_LINE> if len(l) < 1: continue <NEW_LINE> try: irow = self.ped[l[self.nc]]['rank'] <NEW_LINE> except KeyError: continue <NEW_LINE> for i,mark in enumerate(mlist): <NEW_LINE> <INDENT> if mark not in self.mark: continue <NEW_LINE> icol = self.mark[mark]['rank'] <NEW_LINE> if self.ia == 1: <NEW_LINE> <INDENT> a = l[i+self.ic] <NEW_LINE> <DEDENT> elif self.ia == 2: <NEW_LINE> <INDENT> a = self.tbase012(l[i+self.ic],mark) <NEW_LINE> <DEDENT> elif self.ia == 3: <NEW_LINE> <INDENT> a = self.tbase012(l[i*2+self.ic]+l[i*2+1+self.ic],mark) <NEW_LINE> <DEDENT> if a not in ['0','1','2']: a = np.nan <NEW_LINE> else: a = int(a) <NEW_LINE> self.gen1[irow,icol] = a-1
Reads the genotype file and converts the genotypes into a numpy array as 0/1/2 The -a parameter will specify if the alleles are given as: 0/1/2 (-a 1), 11/13/33 (-a 2), 1 1/1 3/3 3 (-a 3)
625941c2a934411ee375163a
def adam_updates(params, cost_or_grads, lr=0.001, mom1=0.9, mom2=0.999): <NEW_LINE> <INDENT> updates = [] <NEW_LINE> if not isinstance(cost_or_grads, list): <NEW_LINE> <INDENT> grads = tf.gradients(cost_or_grads, params) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> grads = cost_or_grads <NEW_LINE> <DEDENT> t = tf.Variable(1., 'adam_t') <NEW_LINE> for p, g in zip(params, grads): <NEW_LINE> <INDENT> mg = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_mg') <NEW_LINE> if mom1 > 0: <NEW_LINE> <INDENT> v = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_v') <NEW_LINE> v_t = mom1 * v + (1. - mom1) * g <NEW_LINE> v_hat = v_t / (1. - tf.pow(mom1, t)) <NEW_LINE> updates.append(v.assign(v_t)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> v_hat = g <NEW_LINE> <DEDENT> mg_t = mom2 * mg + (1. - mom2) * tf.square(g) <NEW_LINE> mg_hat = mg_t / (1. - tf.pow(mom2, t)) <NEW_LINE> g_t = v_hat / tf.sqrt(mg_hat + 1e-8) <NEW_LINE> p_t = p - lr * g_t <NEW_LINE> updates.append(mg.assign(mg_t)) <NEW_LINE> updates.append(p.assign(p_t)) <NEW_LINE> <DEDENT> updates.append(t.assign_add(1)) <NEW_LINE> return tf.group(*updates)
Builds an adam optimizer.
625941c2e64d504609d747e6
def GetGenerateScalesOutput(self): <NEW_LINE> <INDENT> return _itkMultiScaleHessianBasedMeasureImageFilterPython.itkMultiScaleHessianBasedMeasureImageFilterID3ISSRTD33ID3_GetGenerateScalesOutput(self)
GetGenerateScalesOutput(self) -> bool
625941c29b70327d1c4e0d7b
def unmute(self): <NEW_LINE> <INDENT> ret = lib.dc_set_chat_mute_duration(self.account._dc_context, self.id, 0) <NEW_LINE> if not bool(ret): <NEW_LINE> <INDENT> raise ValueError("Failed to unmute chat")
unmutes the chat :returns: None
625941c23d592f4c4ed1d019
def test_ignore_empty(self): <NEW_LINE> <INDENT> doc_1 = get_xml_declaration('1.0', 'utf-8') + get_xml_doctype('foo', 'SYSTEM', 'test.dtd') + '<foo></foo>' <NEW_LINE> doc_2 = get_xml_declaration('1.0', 'utf-8') + get_xml_doctype('foo', 'SYSTEM', 'test.dtd') + '<foo><bar /></foo>' <NEW_LINE> f1 = file_obj_from_string(doc_1) <NEW_LINE> f2 = file_obj_from_string(doc_2) <NEW_LINE> self.assertIsNone(xml_compare.compare_files(f1, f2))
Ensure that empty tags are ignored if ignore_empty_tags is set to True.
625941c2cb5e8a47e48b7a53
def radius(G, e=None): <NEW_LINE> <INDENT> if e is None: <NEW_LINE> <INDENT> e=eccentricity(G) <NEW_LINE> <DEDENT> return min(e.values())
Return the radius of the graph G. The radius is the minimum eccentricity. Parameters ---------- G : NetworkX graph A graph e : eccentricity dictionary, optional A precomputed dictionary of eccentricities. Returns ------- r : integer Radius of graph
625941c2fb3f5b602dac3638
def add_time(time): <NEW_LINE> <INDENT> return time * 0.9 + time * 0.2 * random.random()
Make some random for next iteration
625941c2fff4ab517eb2f3e1
@task.task() <NEW_LINE> def run_downsampling(): <NEW_LINE> <INDENT> datastream.downsample_streams()
Executes the `downsample_streams` API method on the datastream backend as some backends need this to be executed periodically.
625941c2a8370b7717052847
def plot_volume(nifti_file,v_color=(.98,.63,.48),v_opacity=.1, fliplr=False, newfig=False): <NEW_LINE> <INDENT> import nibabel as nib <NEW_LINE> if newfig: <NEW_LINE> <INDENT> mlab.figure(bgcolor=(1, 1, 1), size=(400, 400)) <NEW_LINE> <DEDENT> input = nib.load(nifti_file) <NEW_LINE> input_d = input.get_data() <NEW_LINE> d_shape = input_d.shape <NEW_LINE> if fliplr: <NEW_LINE> <INDENT> input_d = input_d[range(d_shape[0]-1,-1,-1), :, :] <NEW_LINE> <DEDENT> mlab.contour3d(input_d, color=v_color, opacity=v_opacity)
Render a volume from a .nii file Use fliplr option if scan has radiological orientation
625941c260cbc95b062c64e9
def start(self): <NEW_LINE> <INDENT> def _check_list(list_object): <NEW_LINE> <INDENT> if list_object: <NEW_LINE> <INDENT> return list_object <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None, None, None <NEW_LINE> <DEDENT> <DEDENT> def _list(l_payload, go, l_last_obj): <NEW_LINE> <INDENT> return _check_list( list_object=go.object_lister( url=l_payload['url'], container=l_payload['c_name'], last_obj=l_last_obj ) ) <NEW_LINE> <DEDENT> payload = http.prep_payload( auth=self.auth, container=ARGS.get('container'), source=None, args=ARGS ) <NEW_LINE> self.go = actions.CloudActions(payload=payload) <NEW_LINE> report.reporter( msg='API Access for a list of Objects in %s' % payload['c_name'], log=True ) <NEW_LINE> report.reporter( msg='PAYLOAD\t: "%s"' % payload, log=True, lvl='debug', prt=False ) <NEW_LINE> last_obj = None <NEW_LINE> with multi.spinner(): <NEW_LINE> <INDENT> objects, list_count, last_obj = _list( l_payload=payload, go=self.go, l_last_obj=last_obj ) <NEW_LINE> if 'pattern_match' in ARGS: <NEW_LINE> <INDENT> objects = basic.match_filter( idx_list=objects, pattern=ARGS['pattern_match'], dict_type=True ) <NEW_LINE> <DEDENT> if ARGS.get('filter') is not None: <NEW_LINE> <INDENT> objects = [obj for obj in objects if ARGS.get('filter') in obj.get('name')] <NEW_LINE> <DEDENT> <DEDENT> if objects is False: <NEW_LINE> <INDENT> report.reporter(msg='Nothing found.') <NEW_LINE> <DEDENT> elif len(objects) < 1: <NEW_LINE> <INDENT> report.reporter(msg='Nothing found.') <NEW_LINE> <DEDENT> elif ARGS.get('object'): <NEW_LINE> <INDENT> self.go.object_updater( url=payload['url'], container=payload['c_name'], u_file=last_obj ) <NEW_LINE> <DEDENT> elif objects is not None: <NEW_LINE> <INDENT> kwargs = { 'url': payload['url'], 'container': payload['c_name'], 'cf_job': getattr(self.go, 'object_updater'), } <NEW_LINE> object_names = [i['name'] for i in objects] <NEW_LINE> num_files = len(object_names) <NEW_LINE> concurrency = multi.set_concurrency( args=ARGS, file_count=num_files ) <NEW_LINE> multi.job_processer( num_jobs=num_files, objects=object_names, job_action=multi.doerator, concur=concurrency, kwargs=kwargs ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> report.reporter(msg='Nothing found.')
Return a list of objects from the API for a container.
625941c24d74a7450ccd416a
def _do_complete_added_revision(self, **kwargs): <NEW_LINE> <INDENT> pass
Hook for indiividual revisions to process additional parameters necessary to create a new revision representing an added record. By default no additional processing is done, so subclasses are free to override this method without calling it on the parent class.
625941c2a79ad161976cc0ec
def normalize_args_sync(self, args): <NEW_LINE> <INDENT> if not self.infer_value: <NEW_LINE> <INDENT> args = tuple(_broaden(a) for a in args) <NEW_LINE> <DEDENT> return args
If infer_value is False, return broadened arguments.
625941c2d10714528d5ffc88
def test_default(): <NEW_LINE> <INDENT> assert MINIMAL_STOP.wheelchair_boarding == 0 <NEW_LINE> assert MINIMAL_STOP.location_type == 0
test_default: check for correct default values (wheelchair_boarding and location_type)
625941c27d43ff24873a2c46
def createMov(self, shotCutList, marker): <NEW_LINE> <INDENT> fontColor = self.mode['[burnInTopFontColor]'] <NEW_LINE> fontSize = self.mode['[burnInTopFontSize]'] <NEW_LINE> prerendered = self.getPrerenderedString(shotCutList) <NEW_LINE> version = shotCutList.version <NEW_LINE> nukeFolder = self.mode['[localFolder]'] <NEW_LINE> shots = [] <NEW_LINE> for shot in shotCutList: <NEW_LINE> <INDENT> kargs = {'beat': shot.beat, 'setup': shot.setup, 'version': shot.version, 'frame': '%04d'} <NEW_LINE> path = shotCutList.mode.get('[recipeCompedFile]', kargs) <NEW_LINE> shotInfo = {_PANEL_PATH_KEY: path, _START_FRAME_KEY: shot.markInFrame, _END_FRAME_KEY: shot.markOutFrame} <NEW_LINE> shots.append(shotInfo) <NEW_LINE> <DEDENT> info_dict = {} <NEW_LINE> info_dict[_SEQUENCE_KEY] = marker.name <NEW_LINE> info_dict[_VERSION_KEY] = str(version) <NEW_LINE> info_dict[_START_FRAME_KEY] = self.markIn <NEW_LINE> info_dict[_END_FRAME_KEY] = self.markOut <NEW_LINE> info_dict[_PRERENDER_KEY] = prerendered <NEW_LINE> info_dict[_FONT_COLOR_KEY] = fontColor <NEW_LINE> info_dict[_FONT_SIZE_KEY] = fontSize <NEW_LINE> info_dict[_SHOTS_KEY] = shots <NEW_LINE> mov = ToMovEA() <NEW_LINE> flix.logger.log('\n\n>>>>>>>>>> CALLING TOMOVEA>COMPBUILD() <<<<<<<<<<\n') <NEW_LINE> mov.compBuild(shotCutList, info_dict) <NEW_LINE> movPath = self.repath.localize(mov.movieName) <NEW_LINE> flix.logger.log('movPath: {}'.format(movPath)) <NEW_LINE> return movPath
Composite the slate information onto a ShotCutList Args: shotCutList: The ShotCutList to composite marker: The Marker corresponding to shotCutList Returns: The path to the composited movie
625941c250485f2cf553cd40
def remote_type_mode(self): <NEW_LINE> <INDENT> for widget, mode in self.widget_display_mode(): <NEW_LINE> <INDENT> widget.hide()
In this mode, the user selects either an existing remote or the type of the new remote (directory or web).
625941c24c3428357757c2d1
def fitstars(uknstars, refstars, verbose=True): <NEW_LINE> <INDENT> assert len(uknstars) == len(refstars) <NEW_LINE> if len(uknstars) < 2: <NEW_LINE> <INDENT> logger.debug("Sorry I cannot fit a transform on less than 2 stars.") <NEW_LINE> return None <NEW_LINE> <DEDENT> ref = np.hstack(listtoarray(refstars)) <NEW_LINE> uknlist = [] <NEW_LINE> for star in uknstars: <NEW_LINE> <INDENT> uknlist.append([star.x, -star.y, 1, 0]) <NEW_LINE> uknlist.append([star.y, star.x, 0, 1]) <NEW_LINE> <DEDENT> ukn = np.vstack(np.array(uknlist)) <NEW_LINE> if len(uknstars) == 2: <NEW_LINE> <INDENT> trans = np.linalg.solve(ukn, ref) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> trans = np.linalg.lstsq(ukn, ref,rcond=None)[0] <NEW_LINE> <DEDENT> return SimpleTransform(np.asarray(trans))
I return the transform that puts the unknown stars (uknstars) onto the refstars. If you supply only two stars, this is using linalg.solve() -- perfect solution. If you supply more stars, we use linear least squares, i.e. minimize the 2D error. Formalism inspired by : http://math.stackexchange.com/questions/77462/
625941c294891a1f4081ba4f
def get_tfidf(docs, dictionary): <NEW_LINE> <INDENT> bow_corpus = [dictionary.doc2bow(doc) for doc in docs] <NEW_LINE> tfidf = gensim.models.TfidfModel(bow_corpus) <NEW_LINE> corpus_tfidf = tfidf[bow_corpus] <NEW_LINE> return corpus_tfidf
Get the TF-IDF transformed data :param docs: given descriptor :param dictionary: Mapping of words to # :return: tfidf model
625941c23eb6a72ae02ec47f
def gen_stmt(self, stmt, func): <NEW_LINE> <INDENT> if stmt.kind == TN.COMP_STMT: <NEW_LINE> <INDENT> if stmt.stmt_list is not None: <NEW_LINE> <INDENT> for body_stmt in stmt.stmt_list: <NEW_LINE> <INDENT> self.gen_stmt(body_stmt, func) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> elif stmt.kind in (TN.WRITE_STMT, TN.WRITELN_STMT): <NEW_LINE> <INDENT> self.gen_write_stmt(stmt, func) <NEW_LINE> <DEDENT> elif stmt.kind == TN.IF_STMT: <NEW_LINE> <INDENT> self.gen_if_stmt(stmt, func) <NEW_LINE> <DEDENT> elif stmt.kind == TN.WHILE_STMT: <NEW_LINE> <INDENT> self.gen_while_stmt(stmt, func) <NEW_LINE> <DEDENT> elif stmt.kind == TN.RET_STMT: <NEW_LINE> <INDENT> if stmt.val is not None: <NEW_LINE> <INDENT> self.gen_expr(stmt.val) <NEW_LINE> <DEDENT> self.write_instr('jmp', func.ret_label, comment='return from {}'.format(func.name)) <NEW_LINE> <DEDENT> elif stmt.kind == TN.EXPR_STMT: <NEW_LINE> <INDENT> self.gen_expr(stmt.expr)
Generate code for a statement tree node :stmt:. :func: is the function that :stmt: belongs to.
625941c2f9cc0f698b1405a4
def add_site(self, site): <NEW_LINE> <INDENT> obj, created = GallerySites.objects.get_or_create( gallery=self, site=site ) <NEW_LINE> return obj
Associates a :class:`django.contrib.sites.models.Site` object with this gallery.
625941c226068e7796caec83
def preprocess(self, image): <NEW_LINE> <INDENT> return image
Preprocess Image, e.g., resizing
625941c24e4d5625662d4381
def assign_daily_distances(plan, form_data): <NEW_LINE> <INDENT> for week in plan.weeks: <NEW_LINE> <INDENT> for day in week.days: <NEW_LINE> <INDENT> day.distance = min(day.distance, determine_max_long_run(form_data)) <NEW_LINE> diff = week._target_distance - week.distance <NEW_LINE> if diff > 5: <NEW_LINE> <INDENT> short_day = week.shortest_day() <NEW_LINE> if short_day.type == Day_types.Crosstrain: <NEW_LINE> <INDENT> short_day.type = Day_types.Easy <NEW_LINE> <DEDENT> short_day.distance += diff <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> short_days = week.shortest_days(3) <NEW_LINE> for day in short_days: <NEW_LINE> <INDENT> if day.type == Day_types.Crosstrain: <NEW_LINE> <INDENT> day.type = Day_types.Easy <NEW_LINE> <DEDENT> day.distance += diff / 3 <NEW_LINE> <DEDENT> <DEDENT> if day.date == form_data.race_date: <NEW_LINE> <INDENT> day.distance = form_data.race_distance
Now that weeks are set, customize individual daily distances.
625941c2fbf16365ca6f6167
def test_02_supressing_mulitple_errors_should_prevent_any_of_them_throwing(self): <NEW_LINE> <INDENT> with suppress.suppress(IOError, KeyError): <NEW_LINE> <INDENT> with open('blahblah'): <NEW_LINE> <INDENT> raise Exception("FAIL: Somehow opened blahblah, why on earth does it exist!") <NEW_LINE> <DEDENT> d = {} <NEW_LINE> print(d['Bad key']) <NEW_LINE> raise Exception("FAIL: Bad key exists....")
Suppressing multiple error types should prevent them being raised
625941c229b78933be1e5656
def get_version(rel_path): <NEW_LINE> <INDENT> for line in read(rel_path).splitlines(): <NEW_LINE> <INDENT> if line.startswith('__version__'): <NEW_LINE> <INDENT> delim = '"' if '"' in line else "'" <NEW_LINE> return line.split(delim)[1] <NEW_LINE> <DEDENT> <DEDENT> raise RuntimeError("Unable to find version string.")
Gets the version number declared in the `__version__` constant of the Python file at `rel_path`.
625941c2c4546d3d9de729d9
def attachEventListener(name, callback): <NEW_LINE> <INDENT> global eventpool <NEW_LINE> eventpool.attachEventListener(name, callback)
Attach a new event listener to the event pool
625941c2287bf620b61d3a0c
def encode_scene_primitives(scenes, export_settings): <NEW_LINE> <INDENT> dll = cdll.LoadLibrary(str(dll_path().resolve())) <NEW_LINE> dll.encoderCreate.restype = c_void_p <NEW_LINE> dll.encoderCreate.argtypes = [c_uint32] <NEW_LINE> dll.encoderRelease.restype = None <NEW_LINE> dll.encoderRelease.argtypes = [c_void_p] <NEW_LINE> dll.encoderSetCompressionLevel.restype = None <NEW_LINE> dll.encoderSetCompressionLevel.argtypes = [c_void_p, c_uint32] <NEW_LINE> dll.encoderSetQuantizationBits.restype = None <NEW_LINE> dll.encoderSetQuantizationBits.argtypes = [c_void_p, c_uint32, c_uint32, c_uint32, c_uint32, c_uint32] <NEW_LINE> dll.encoderSetIndices.restype = None <NEW_LINE> dll.encoderSetIndices.argtypes = [c_void_p, c_size_t, c_uint32, c_void_p] <NEW_LINE> dll.encoderSetAttribute.restype = c_uint32 <NEW_LINE> dll.encoderSetAttribute.argtypes = [c_void_p, c_char_p, c_size_t, c_char_p, c_void_p] <NEW_LINE> dll.encoderEncode.restype = c_bool <NEW_LINE> dll.encoderEncode.argtypes = [c_void_p, c_uint8] <NEW_LINE> dll.encoderGetEncodedVertexCount.restype = c_uint32 <NEW_LINE> dll.encoderGetEncodedVertexCount.argtypes = [c_void_p] <NEW_LINE> dll.encoderGetEncodedIndexCount.restype = c_uint32 <NEW_LINE> dll.encoderGetEncodedIndexCount.argtypes = [c_void_p] <NEW_LINE> dll.encoderGetByteLength.restype = c_uint64 <NEW_LINE> dll.encoderGetByteLength.argtypes = [c_void_p] <NEW_LINE> dll.encoderCopy.restype = None <NEW_LINE> dll.encoderCopy.argtypes = [c_void_p, c_void_p] <NEW_LINE> encoded_primitives_cache = {} <NEW_LINE> for scene in scenes: <NEW_LINE> <INDENT> for node in scene.nodes: <NEW_LINE> <INDENT> __traverse_node(node, lambda node: __encode_node(node, dll, export_settings, encoded_primitives_cache)) <NEW_LINE> <DEDENT> <DEDENT> for scene in scenes: <NEW_LINE> <INDENT> for node in scene.nodes: <NEW_LINE> <INDENT> __traverse_node(node, lambda node: __cleanup_node(node))
Handles draco compression. Moves position, normal and texture coordinate attributes into a Draco encoded buffer.
625941c2046cf37aa974ccf1
def __init__(self, name, age): <NEW_LINE> <INDENT> self.name = name <NEW_LINE> self.age = age <NEW_LINE> self.sex = 0
初始化属性name和age __init__类似与php中的__construct
625941c2091ae35668666f09
def test_disabled(self): <NEW_LINE> <INDENT> dropped_packet_indexes = set( index for (index, frags_400, _, _) in self.pkt_infos if len(frags_400) > 1) <NEW_LINE> self.vapi.ip_reassembly_set(timeout_ms=1000, max_reassemblies=0, expire_walk_interval_ms=10000) <NEW_LINE> self.pg_enable_capture() <NEW_LINE> self.src_if.add_stream(self.fragments_400) <NEW_LINE> self.pg_start() <NEW_LINE> packets = self.dst_if.get_capture( len(self.pkt_infos) - len(dropped_packet_indexes)) <NEW_LINE> self.verify_capture(packets, dropped_packet_indexes) <NEW_LINE> self.src_if.assert_nothing_captured()
reassembly disabled
625941c27c178a314d6ef403
def addResources(self, resourceIds): <NEW_LINE> <INDENT> if not isinstance(resourceIds, list): <NEW_LINE> <INDENT> resourceIds = [resourceIds] <NEW_LINE> <DEDENT> templist = [] <NEW_LINE> self._resourceIds = [] <NEW_LINE> for res in resourceIds: <NEW_LINE> <INDENT> toAdd = res <NEW_LINE> if not isinstance(res, ExternalResource): <NEW_LINE> <INDENT> temp = ExternalResource() <NEW_LINE> temp.resourceId = res <NEW_LINE> toAdd = temp <NEW_LINE> <DEDENT> self.append(toAdd) <NEW_LINE> templist.append(toAdd) <NEW_LINE> <DEDENT> return templist
Add a new external reference with the given uris. If you're looking to add ExternalResource objects, append() or extend() them instead. Args: resourceIds: a list of uris as strings
625941c2b57a9660fec33829
def print_vd_discovery(self): <NEW_LINE> <INDENT> for volume in self._qsan._VDs: <NEW_LINE> <INDENT> element = {'{#VOLUME}': self._qsan._get_VD_name_by_id(volume)} <NEW_LINE> self._DATA['data'].append(element) <NEW_LINE> <DEDENT> print(json.dumps(self._DATA, indent=2))
Returns: {"data": [{"{#VOLUME}": "volname"}, ... ]}
625941c28da39b475bd64f19
def on_load_async(self, view): <NEW_LINE> <INDENT> filetype = lang(view) <NEW_LINE> if filetype is '' or view.is_scratch(): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> filepath = get_file_path() <NEW_LINE> content = view.substr(sublime.Region(0, view.size())) <NEW_LINE> t = Thread(None, notify_func, 'NotifyAsync', [filepath, content, self._on_errors, filetype]) <NEW_LINE> t.daemon = True <NEW_LINE> t.start()
Called when the file is finished loading
625941c2cc40096d615958f8
def checkSimpleCombDecoding(self, l): <NEW_LINE> <INDENT> class DecodingRecorder: <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> self.decodedList = [] <NEW_LINE> <DEDENT> def notifyPacketDecoded(self, packetId): <NEW_LINE> <INDENT> self.decodedList.append(packetId) <NEW_LINE> <DEDENT> <DEDENT> n = 1<<(1<<l) <NEW_LINE> coefList = [3,7,9,11,13,17,19] <NEW_LINE> coefList = [ x % n for x in coefList ] <NEW_LINE> c1,c2,c3,c4,c5,c6,c7 = coefList <NEW_LINE> for iList in itertools.permutations([0,1,2]): <NEW_LINE> <INDENT> pktList = liblc.makeCodedPacketList(l, 3) <NEW_LINE> recorder = DecodingRecorder() <NEW_LINE> pktSet = liblc.allocCPacketSet(l, recorder) <NEW_LINE> stat = liblc.new_reductionStat() <NEW_LINE> i0,i1,i2 = iList <NEW_LINE> pc0 = c1*pktList[i1].clone() + c2*pktList[i2].clone() <NEW_LINE> pc1 = c3*pktList[i0].clone() + c4*pktList[i2].clone() <NEW_LINE> pc2 = (c5*pktList[i0].clone() + c6*pktList[i1].clone() + c7*pktList[i2].clone()) <NEW_LINE> for p in [pc0, pc1, pc2]: <NEW_LINE> <INDENT> packetId = liblc.packet_set_add(pktSet, p.content, stat) <NEW_LINE> assert packetId != liblc.macro_PACKET_ID_NONE <NEW_LINE> <DEDENT> assert stat.decoded == 3 <NEW_LINE> assert sorted(recorder.decodedList) == range(3) <NEW_LINE> for j in range(3): <NEW_LINE> <INDENT> packetId = liblc.packet_set_get_id_of_pos(pktSet, j) <NEW_LINE> decoded = liblc.packet_set_get_coded_packet(pktSet, packetId) <NEW_LINE> self.assertTrue( liblc.coded_packet_was_decoded(decoded) ) <NEW_LINE> self.assertTrue( liblc.coded_packet_is_similar( decoded, pktList[j].content) ) <NEW_LINE> <DEDENT> liblc.freeCPacketSet(pktSet) <NEW_LINE> liblc.delete_reductionStat(stat)
Test if the packet_set code can correctly decode the set: { c1. P1 + c2. P2 , c3. P0 + c4. P2 , c5. P0 + c6. P1 + c7. P2 } with all permutations of coefficient positions
625941c2627d3e7fe0d68df6
def set_shot(self, shot_coords): <NEW_LINE> <INDENT> new_board_val = self.new_board_value(shot_coords) <NEW_LINE> self.update_field(shot_coords, new_board_val)
Updates the Board with the new Shot.
625941c2090684286d50ec8b
def hashcode(self, video_filepath): <NEW_LINE> <INDENT> hash_code = None <NEW_LINE> try: <NEW_LINE> <INDENT> struct_format = 'q' <NEW_LINE> struct_size = struct.calcsize(struct_format) <NEW_LINE> with open(video_filepath, "rb") as movie_file: <NEW_LINE> <INDENT> filesize = os.path.getsize(video_filepath) <NEW_LINE> movie_hash = filesize <NEW_LINE> if filesize < 65536 * 2: <NEW_LINE> <INDENT> raise VideoError() <NEW_LINE> <DEDENT> for x in range(65536//struct_size): <NEW_LINE> <INDENT> buffer = movie_file.read(struct_size) <NEW_LINE> (l_value,) = struct.unpack(struct_format, buffer) <NEW_LINE> movie_hash += l_value <NEW_LINE> movie_hash = movie_hash & 0xFFFFFFFFFFFFFFFF <NEW_LINE> <DEDENT> movie_file.seek(max(0, filesize - 65536), 0) <NEW_LINE> for x in range(65536//struct_size): <NEW_LINE> <INDENT> buffer = movie_file.read(struct_size) <NEW_LINE> (l_value,) = struct.unpack(struct_format, buffer) <NEW_LINE> movie_hash += l_value <NEW_LINE> movie_hash = movie_hash & 0xFFFFFFFFFFFFFFFF <NEW_LINE> <DEDENT> hash_code = "%016x" % movie_hash <NEW_LINE> <DEDENT> <DEDENT> except VideoError as error: <NEW_LINE> <INDENT> raise VideoSizeError(video_filepath) <NEW_LINE> <DEDENT> except Exception as error: <NEW_LINE> <INDENT> raise VideoHashCodeError(video_filepath, error) <NEW_LINE> <DEDENT> return hash_code
Generates Video Hash code.
625941c2eab8aa0e5d26daff
def __init__(self, client=None): <NEW_LINE> <INDENT> self.client = client or boto3.client('ses')
Constructs a new Simple Email Service with a boto3 SES client :param client: Client that will be used to interface with AWS SES. It is unlikely that you will ever need to pass another client to this constructor.
625941c2cc0a2c11143dce37
def __init__(self, **kwargs: Any) -> None: <NEW_LINE> <INDENT> self.tag = kwargs.get('tag', None) <NEW_LINE> self.ids = kwargs.get('ids', []) <NEW_LINE> self.classes = kwargs.get('classes', []) <NEW_LINE> self.attributes = kwargs.get('attributes', []) <NEW_LINE> self.nth = kwargs.get('nth', []) <NEW_LINE> self.selectors = kwargs.get('selectors', []) <NEW_LINE> self.relations = kwargs.get('relations', []) <NEW_LINE> self.rel_type = kwargs.get('rel_type', None) <NEW_LINE> self.contains = kwargs.get('contains', []) <NEW_LINE> self.lang = kwargs.get('lang', []) <NEW_LINE> self.flags = kwargs.get('flags', 0) <NEW_LINE> self.no_match = kwargs.get('no_match', False)
Initialize.
625941c28c3a873295158360
def mask_images(images: Iterable[SpatialImage], mask: np.ndarray, image_type: type = None) -> Iterable[np.ndarray]: <NEW_LINE> <INDENT> for images in multimask_images(images, (mask,), image_type): <NEW_LINE> <INDENT> yield images[0]
Mask images. Parameters ---------- images: Images to mask. mask: Mask to apply. image_type: Type to cast images to. Yields ------ np.ndarray Masked image.
625941c21f037a2d8b9461a6
def cast(*args): <NEW_LINE> <INDENT> return _itkBinaryThresholdImageFilterPython.itkBinaryThresholdImageFilterIUL2IUC2_Superclass_cast(*args)
cast(itkLightObject obj) -> itkBinaryThresholdImageFilterIUL2IUC2_Superclass
625941c2167d2b6e31218b3d
def improve_markdown(markdown: str) -> str: <NEW_LINE> <INDENT> md_text = html.unescape(markdown) <NEW_LINE> md_text = convert_code_block_style(md_text) <NEW_LINE> fixed_md, _ = blacken_docs.format_str( md_text, black.FileMode(target_versions="3.6", line_length=79) ) <NEW_LINE> return fixed_md
Improve markdown.
625941c2ac7a0e7691ed4077
def load_data_source(data_path, subreddit, page_samples, seed=None, relative=True): <NEW_LINE> <INDENT> pages_dir = os.path.join(data_path, 'pages') <NEW_LINE> subreddits_dir = os.path.join(data_path, 'subreddits') <NEW_LINE> sr_path = os.path.join(subreddits_dir, subreddit) <NEW_LINE> random.seed(seed) <NEW_LINE> data = {} <NEW_LINE> for json_file in os.listdir(sr_path): <NEW_LINE> <INDENT> with open(os.path.join(sr_path, json_file)) as fp: <NEW_LINE> <INDENT> post_data = json.load(fp) <NEW_LINE> <DEDENT> for post in post_data['data']['children']: <NEW_LINE> <INDENT> if post['kind'] == 't3': <NEW_LINE> <INDENT> url_path = get_path_from_url(pages_dir, post['data']['url']) <NEW_LINE> if relative: <NEW_LINE> <INDENT> url_path = os.path.relpath(url_path, pages_dir) <NEW_LINE> <DEDENT> data[url_path] = subreddit <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> remaining = set(search_files(pages_dir, relative=relative)) - set(data.keys()) <NEW_LINE> for url_path in random.sample(remaining, page_samples): <NEW_LINE> <INDENT> data[url_path] = None <NEW_LINE> <DEDENT> return data
Generates a dictionary of labeled and unlabelled pages from a Reddit Data Source as specified by the specification on the github Wiki. :param data_path: path to a Reddit Data Source. :param subreddit: labeled subreddit which is to be targeted. :param page_samples: number of random unlabelled page samples to use. :param seed: seed for the pseudo random generator. :param relative: relative or absolute paths. :return: dictionary of (label, path)
625941c2a8ecb033257d3075
def test_product_uninstalled(self): <NEW_LINE> <INDENT> self.assertFalse(self.installer.isProductInstalled( 'redturtle.drawio'))
Test if redturtle.drawio is cleanly uninstalled.
625941c2be7bc26dc91cd5ab
def voter_verify_secret_code_doc_template_values(url_root): <NEW_LINE> <INDENT> required_query_parameter_list = [ { 'name': 'api_key', 'value': 'string (from post, cookie, or get (in that order))', 'description': 'The unique key provided to any organization using the WeVoteServer APIs', }, { 'name': 'voter_device_id', 'value': 'string', 'description': 'An 88 character unique identifier linked to a voter record on the server. ', }, { 'name': 'secret_code', 'value': 'string', 'description': 'The six digit code to verify.', }, ] <NEW_LINE> optional_query_parameter_list = [ { 'name': 'code_sent_to_sms_phone_number', 'value': 'boolean', 'description': 'If true, process this verification for an SMS phone number. If false, process for email.', }, ] <NEW_LINE> potential_status_codes_list = [ ] <NEW_LINE> try_now_link_variables_dict = { } <NEW_LINE> api_response = '{\n' ' "status": string (description of what happened),\n' ' "success": boolean (True as long as no db errors),\n' ' "incorrect_secret_code_entered": boolean,\n' ' "number_of_tries_remaining_for_this_code": integer,\n' ' "secret_code_verified": boolean,\n' ' "voter_must_request_new_code": boolean,\n' ' "secret_code_system_locked_for_this_voter_device_id": string,\n' '}' <NEW_LINE> template_values = { 'api_name': 'voterVerifySecretCode', 'api_slug': 'voterVerifySecretCode', 'api_introduction': "Voter submits this six digit code to verify that they received an SMS message or email.", 'try_now_link': 'apis_v1:voterVerifySecretCodeView', 'try_now_link_variables_dict': try_now_link_variables_dict, 'url_root': url_root, 'get_or_post': 'GET', 'required_query_parameter_list': required_query_parameter_list, 'optional_query_parameter_list': optional_query_parameter_list, 'api_response': api_response, 'api_response_notes': "", 'potential_status_codes_list': potential_status_codes_list, } <NEW_LINE> return template_values
Show documentation about voterVerifySecretCode
625941c2167d2b6e31218b3e
def point_positive_int_test(): <NEW_LINE> <INDENT> block.save_sga(mock.Mock(method="POST", body=json.dumps({ "display_name": "Test Block", "points": '-10', "weight": 11}))) <NEW_LINE> self.assertEqual(block.points, orig_score) <NEW_LINE> block.save_sga(mock.Mock(method="POST", body=json.dumps({ "display_name": "Test Block", "points": '24.5', "weight": 11}))) <NEW_LINE> self.assertEqual(block.points, orig_score)
Tests point is positive number.
625941c23cc13d1c6d3c7323
def set_preferences(prefs): <NEW_LINE> <INDENT> for name, value in prefs.items(): <NEW_LINE> <INDENT> session[name] = value
Store multiple key/value pairs in the session.
625941c23617ad0b5ed67ea0
def upd_feat(c, f, w, v): <NEW_LINE> <INDENT> param = (f, c) <NEW_LINE> self._totals[param] += (self.i - self._tstamps[param]) * w <NEW_LINE> self._tstamps[param] = self.i <NEW_LINE> self.weights[f][c] = w + v
:params c:词性 :params f:特征 :params w:当前权重 :params v:权重更新值
625941c2b7558d58953c4ebf
def GraphSpi2(d): <NEW_LINE> <INDENT> main=Tk() <NEW_LINE> main.geometry('1000x1000+200+200') <NEW_LINE> main.title("Spirale DE TOTO") <NEW_LINE> def clavier(event): <NEW_LINE> <INDENT> touche=event.keysym <NEW_LINE> if touche=="Return": <NEW_LINE> <INDENT> canvas.create_text(500,30,text='On voit clairement que les nombres premiers en spirales suivent des diagonales précisent') <NEW_LINE> canvas.create_line(123,1000,1000,123) <NEW_LINE> canvas.create_line(0,883,883,0) <NEW_LINE> canvas.create_line(0,643,643,0) <NEW_LINE> canvas.create_line(363,1000,1000,363) <NEW_LINE> <DEDENT> <DEDENT> canvas=Canvas(main,height=1000,width=1000) <NEW_LINE> canvas.pack() <NEW_LINE> canvas.focus_set() <NEW_LINE> canvas.bind("<Key>",clavier) <NEW_LINE> U,V,W,Z=1,2,3,4 <NEW_LINE> x=500 <NEW_LINE> y=500 <NEW_LINE> k=1 <NEW_LINE> for i in range(1,Spirale(d)[0]+1): <NEW_LINE> <INDENT> if i==1: <NEW_LINE> <INDENT> canvas.create_text(x,y,text=".",fill='red',font="Arial 8 bold") <NEW_LINE> <DEDENT> if NP(i): <NEW_LINE> <INDENT> canvas.create_text(x,y,text=".",fill='black',font="Arial 8 bold") <NEW_LINE> <DEDENT> if i>=U and i<V: <NEW_LINE> <INDENT> x-=3 <NEW_LINE> <DEDENT> if i>=V and i<W: <NEW_LINE> <INDENT> y+=3 <NEW_LINE> <DEDENT> if i>=W: <NEW_LINE> <INDENT> x+=3 <NEW_LINE> <DEDENT> if i<U: <NEW_LINE> <INDENT> y-=3 <NEW_LINE> <DEDENT> if i==Z: <NEW_LINE> <INDENT> U,V,W,Z=U+8*(k-1)+6,V+8*(k-1)+8,W+8*(k-1)+10,Z+8*(k-1)+12 <NEW_LINE> k+=1 <NEW_LINE> <DEDENT> <DEDENT> main.mainloop()
d est le nombre de tours, lors de l'utilisation du programme , pour percevoir le phenomène de Euler, veuillez mettre un nombre de tours de l'ordre de 150, au delà le programme est très complexe ( pour 200 cela dure 20s) , et en dessous on ne voit pas bien le phenomène, veullez appuyer sur enter(ou return)
625941c26fb2d068a760f043
def doStart(self): <NEW_LINE> <INDENT> pass
void Akonadi.UnlinkJob.doStart()
625941c27047854f462a13b3
def predict(self, inputs, turnoff_noise=False, continuing=True): <NEW_LINE> <INDENT> if turnoff_noise: <NEW_LINE> <INDENT> self.noise_level = 0 <NEW_LINE> <DEDENT> if not continuing: <NEW_LINE> <INDENT> self._last_input = np.zeros((self.ninput, 1)) <NEW_LINE> self._last_state = np.zeros((self.ninternal, 1)) <NEW_LINE> self._last_output = np.zeros((self.noutput, 1)) <NEW_LINE> <DEDENT> ntime = inputs.shape[1] <NEW_LINE> outputs = np.zeros((self.noutput, ntime)) <NEW_LINE> states = np.zeros((self.ninternal, ntime)) <NEW_LINE> states[:, 0] = self._update(self._last_state, inputs[:, 0], self._last_output) <NEW_LINE> outputs[:, 0] = self.out_activation(self.W_out @ np.hstack((states[:, 0], inputs[:, 0]))) <NEW_LINE> for t in range(1, ntime): <NEW_LINE> <INDENT> states[:, t] = self._update(states[:, t - 1], inputs[:, t], outputs[:, t - 1]) <NEW_LINE> outputs[:, t] = self.out_activation(self.W_out @ np.hstack((states[:, t], inputs[:, t]))) <NEW_LINE> <DEDENT> return outputs
inputs: ninput x ntime continuing: 最後の訓練したstateでつづけるか turnoff_noise: ノイズを消すかどうか Return: outputs: noutput x ntime
625941c2aad79263cf3909e6
def resnet34(num_classes=1000): <NEW_LINE> <INDENT> model = ResNet(BasicBlock, [3, 4, 6, 3], num_classes) <NEW_LINE> return model
Constructs a ResNet-34 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
625941c2bde94217f3682d9a
def compareMagnitudes(mags1,mags2,unc=None,unc2=None,ignore=[],verbose=True): <NEW_LINE> <INDENT> chi = 0. <NEW_LINE> dm,em = [],[] <NEW_LINE> for f in list(mags1.keys()): <NEW_LINE> <INDENT> if f in list(mags2.keys()) and f in list(unc.keys()) and f not in ignore: <NEW_LINE> <INDENT> dm.append(mags1[f]-mags2[f]) <NEW_LINE> em.append(unc[f]) <NEW_LINE> <DEDENT> <DEDENT> dm = numpy.array(dm) <NEW_LINE> em = numpy.array(em) <NEW_LINE> offset = numpy.sum(dm/em**2)/numpy.sum (1./em**2) <NEW_LINE> dmo = numpy.array([m-offset for m in dm]) <NEW_LINE> return numpy.sum((dmo/em)**2), offset
this code compares a set of magnitudes using one of several statistics
625941c2a79ad161976cc0ed
def remove_OMC(self): <NEW_LINE> <INDENT> self.kat.removeBlock('OMCpath') <NEW_LINE> self.kat.removeBlock('OMC') <NEW_LINE> self.kat.nodes.replaceNode(self.kat.sOut,self.kat.sOut.nodes[0],self.kat.components['SRAR'].nodes[1].name)
Method for removing the OMC and OMCpath blocks. The SR mirror is reconnected to the sOut space, so that nB1 remains a valid output node.
625941c292d797404e304131
def test_check_known_host_exists(self): <NEW_LINE> <INDENT> shutil.copyfile( os.path.join(integration.FILES, 'ssh', 'known_hosts'), KNOWN_HOSTS) <NEW_LINE> arg = ['root', 'github.com'] <NEW_LINE> kwargs = {'config': KNOWN_HOSTS} <NEW_LINE> ret = self.run_function('ssh.check_known_host', arg, **dict(kwargs, fingerprint=GITHUB_FINGERPRINT)) <NEW_LINE> self.assertEqual(ret, 'exists') <NEW_LINE> ret = self.run_function('ssh.check_known_host', arg, **dict(kwargs, key=self.key)) <NEW_LINE> self.assertEqual(ret, 'exists')
Verify check_known_host_exists
625941c266673b3332b92039
def _calc_check_digits(number): <NEW_LINE> <INDENT> check = int(number) % 97 <NEW_LINE> return '%02d' % (check or 97)
Calculate the check digits over the provided part of the number.
625941c2f7d966606f6a9faa
def _setup_os_metadatas(metadata_session): <NEW_LINE> <INDENT> logging.info('setup os metadata table') <NEW_LINE> from compass.db.api import metadata <NEW_LINE> metadata.add_os_metadata_internal(metadata_session)
Initialize os metadata table.
625941c23346ee7daa2b2d13
def report(self, simulation, state, accept_move=True): <NEW_LINE> <INDENT> if accept_move: <NEW_LINE> <INDENT> super(MCReporter, self).report(simulation, state) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> return
Generate a report. Parameters ---------- simulation : simtk.openmm.app.Simulation The Simulation to generate a report for state : simtk.openmm.State The current state of the simulation accept_move : bool Whether or not the MC move is valid
625941c2004d5f362079a2dd
def remove(self, valor): <NEW_LINE> <INDENT> inicio = self.head <NEW_LINE> anterior = None <NEW_LINE> while inicio.proximo is not None: <NEW_LINE> <INDENT> anterior = inicio <NEW_LINE> inicio = inicio.proximo <NEW_LINE> if inicio.dado == valor: <NEW_LINE> <INDENT> anterior.proximo = inicio.proximo <NEW_LINE> inicio = anterior <NEW_LINE> self.tamanho -= 1
Remove todos os elementos que forem igual ao valor informado.
625941c230dc7b7665901910
def teleport(self, x, y): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> locations[self.x][self.y].playerLeaves(self.id) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> locations[x][y].playerEnters(self.id) <NEW_LINE> self.x = x <NEW_LINE> self.y = y
Locations use different methods when a player moves then when a NPC does.
625941c267a9b606de4a7e63
def process_bounds(self, zoom=None): <NEW_LINE> <INDENT> warnings.warn( DeprecationWarning("Method renamed to self.bounds_at_zoom(zoom).") ) <NEW_LINE> return self.bounds_at_zoom(zoom)
Deprecated.
625941c2e76e3b2f99f3a7b7
def _parse_text(path): <NEW_LINE> <INDENT> split_path = path.split(".") <NEW_LINE> if split_path[-1] == "gz": <NEW_LINE> <INDENT> lang = split_path[-2] <NEW_LINE> with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g: <NEW_LINE> <INDENT> return g.read().decode("utf-8").split("\n"), lang <NEW_LINE> <DEDENT> <DEDENT> if split_path[-1] == "txt": <NEW_LINE> <INDENT> lang = split_path[-2].split("_")[-1] <NEW_LINE> lang = "zh" if lang in ("ch", "cn") else lang <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> lang = split_path[-1] <NEW_LINE> <DEDENT> with open(path, "rb") as f: <NEW_LINE> <INDENT> return f.read().decode("utf-8").split("\n"), lang
Returns the sentences from a single text file, which may be gzipped.
625941c2d164cc6175782cf6
def delete_affinity_group(kwargs=None, conn=None, call=None): <NEW_LINE> <INDENT> if call != 'function': <NEW_LINE> <INDENT> raise SaltCloudSystemExit( 'The delete_affinity_group function must be called with -f or --function.' ) <NEW_LINE> <DEDENT> if kwargs is None: <NEW_LINE> <INDENT> kwargs = {} <NEW_LINE> <DEDENT> if 'name' not in kwargs: <NEW_LINE> <INDENT> raise SaltCloudSystemExit('A name must be specified as "name"') <NEW_LINE> <DEDENT> if not conn: <NEW_LINE> <INDENT> conn = get_conn() <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> conn.delete_affinity_group(kwargs['name']) <NEW_LINE> return {'Success': 'The affinity group was successfully deleted'} <NEW_LINE> <DEDENT> except AzureMissingResourceHttpError as exc: <NEW_LINE> <INDENT> raise SaltCloudSystemExit('{0}: {1}'.format(kwargs['name'], exc.message))
.. versionadded:: 2015.8.0 Delete a specific affinity group associated with the account CLI Examples: .. code-block:: bash salt-cloud -f delete_affinity_group my-azure name=my_affinity_group
625941c255399d3f0558865b