code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def setup(self): <NEW_LINE> <INDENT> self.no_runs = 100 <NEW_LINE> self.ds_1_1_1 = dcmread(EXPL_1_1_1F) <NEW_LINE> self.ds_1_1_3 = dcmread(EXPL_1_1_3F) <NEW_LINE> self.ds_8_1_1 = dcmread(EXPL_8_1_1F) <NEW_LINE> self.ds_8_1_2 = dcmread(EXPL_8_1_2F) <NEW_LINE> self.ds_8_3_1 = dcmread(EXPL_8_3_1F) <NEW_LINE> self.ds_8_3_2 = dcmread(EXPL_8_3_2F) <NEW_LINE> self.ds_16_1_1 = dcmread(EXPL_16_1_1F) <NEW_LINE> self.ds_16_1_10 = dcmread(EXPL_16_1_10F) <NEW_LINE> self.ds_16_3_1 = dcmread(EXPL_16_3_1F) <NEW_LINE> self.ds_16_3_2 = dcmread(EXPL_16_3_2F) <NEW_LINE> self.ds_32_1_1 = dcmread(IMPL_32_1_1F) <NEW_LINE> self.ds_32_1_15 = dcmread(IMPL_32_1_15F) <NEW_LINE> self.ds_32_3_1 = dcmread(EXPL_32_3_1F) <NEW_LINE> self.ds_32_3_2 = dcmread(EXPL_32_3_2F)
Setup the tests.
625941c232920d7e50b28179
def located_at(self, location): <NEW_LINE> <INDENT> if location.__class__ not in ( Location, ): <NEW_LINE> <INDENT> raise RelationLocatedAtError() <NEW_LINE> <DEDENT> return location in self._located_at_locations
Check `locatedAt` relation with given `location` object. FIXME: Document locatedAt relation. :param location: Object to validate relation `locatedAt` with. :type location: Location :return: True if `location` is related to `self` with `locatedAt`. :rtype: bool
625941c216aa5153ce362424
def preprocess(words_file = "../tools/word_data_unix.pkl", authors_file="../tools/email_authors.pkl"): <NEW_LINE> <INDENT> authors_file_handler = open(authors_file, "rb") <NEW_LINE> authors = pickle.load(authors_file_handler) <NEW_LINE> authors_file_handler.close() <NEW_LINE> words_file_handler = open(words_file, "rb") <NEW_LINE> word_data = pickle.load(words_file_handler) <NEW_LINE> words_file_handler.close() <NEW_LINE> features_train, features_test, labels_train, labels_test = train_test_split(word_data, authors, test_size=0.1, random_state=42) <NEW_LINE> vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english') <NEW_LINE> features_train_transformed = vectorizer.fit_transform(features_train) <NEW_LINE> features_test_transformed = vectorizer.transform(features_test) <NEW_LINE> selector = SelectPercentile(f_classif, percentile=10) <NEW_LINE> selector.fit(features_train_transformed, labels_train) <NEW_LINE> features_train_transformed = selector.transform(features_train_transformed).toarray() <NEW_LINE> features_test_transformed = selector.transform(features_test_transformed).toarray() <NEW_LINE> print("no. of Chris training emails:", sum(labels_train)) <NEW_LINE> print("no. of Sara training emails:", len(labels_train)-sum(labels_train)) <NEW_LINE> return features_train_transformed, features_test_transformed, labels_train, labels_test
this function takes a pre-made list of email texts (by default word_data.pkl) and the corresponding authors (by default email_authors.pkl) and performs a number of preprocessing steps: -- splits into training/testing sets (10% testing) -- vectorizes into tfidf matrix -- selects/keeps most helpful features after this, the feaures and labels are put into numpy arrays, which play nice with sklearn functions 4 objects are returned: -- training/testing features -- training/testing labels
625941c25e10d32532c5eed2
def delete_password(acc): <NEW_LINE> <INDENT> Password.delete_password(acc)
This is a function that will delete the password Args: acc - the acc of the pass the user wants to delete
625941c2a05bb46b383ec7ce
def validate(self): <NEW_LINE> <INDENT> self.model.eval() <NEW_LINE> val_loss = 0 <NEW_LINE> num_vis = 8 <NEW_LINE> visualizations = [] <NEW_LINE> label_trues, label_preds = [], [] <NEW_LINE> for batch_idx, (data, target) in tqdm.tqdm( enumerate(self.val_loader), total=len(self.val_loader), desc='Valid iteration=%d' % self.iteration, ncols=80, leave=False): <NEW_LINE> <INDENT> if self.cuda: <NEW_LINE> <INDENT> data, target = data.cuda(), target.cuda() <NEW_LINE> <DEDENT> data, target = Variable(data, volatile=True), Variable(target) <NEW_LINE> score = self.model(data) <NEW_LINE> loss = cross_entropy2d(score, target, size_average=self.size_average) <NEW_LINE> if np.isnan(float(loss.data[0])): <NEW_LINE> <INDENT> raise ValueError('loss is nan while validating') <NEW_LINE> <DEDENT> val_loss += float(loss.data[0]) / len(data) <NEW_LINE> imgs = data.data.cpu() <NEW_LINE> lbl_pred = score.data.max(1)[1].cpu().numpy()[:, :, :] <NEW_LINE> lbl_true = target.data.cpu() <NEW_LINE> for img, lt, lp in zip(imgs, lbl_true, lbl_pred): <NEW_LINE> <INDENT> img = self.val_loader.dataset.untransform(img.numpy()) <NEW_LINE> lt = lt.numpy() <NEW_LINE> label_trues.append(lt) <NEW_LINE> label_preds.append(lp) <NEW_LINE> if len(visualizations) < num_vis: <NEW_LINE> <INDENT> viz = fcn.utils.visualize_segmentation( lbl_pred=lp, lbl_true=lt, img=img, n_class=self.n_class) <NEW_LINE> visualizations.append(viz) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> metrics = torchfcn.utils.label_accuracy_score( label_trues, label_preds, self.n_class) <NEW_LINE> out = osp.join(self.out, 'visualization_viz') <NEW_LINE> if not osp.exists(out): <NEW_LINE> <INDENT> os.makedirs(out) <NEW_LINE> <DEDENT> out_file = osp.join(out, 'iter%012d.jpg' % self.iteration) <NEW_LINE> scipy.misc.imsave(out_file, fcn.utils.get_tile_image(visualizations)) <NEW_LINE> val_loss /= len(self.val_loader) <NEW_LINE> with open(osp.join(self.out, 'log.csv'), 'a') as f: <NEW_LINE> <INDENT> elapsed_time = datetime.datetime.now(pytz.timezone('Asia/Tokyo')) - self.timestamp_start <NEW_LINE> log = [self.epoch, self.iteration] + [''] * 5 + [val_loss] + list(metrics) + [elapsed_time] <NEW_LINE> log = map(str, log) <NEW_LINE> f.write(','.join(log) + '\n') <NEW_LINE> <DEDENT> mean_iu = metrics[2] <NEW_LINE> is_best = mean_iu > self.best_mean_iu <NEW_LINE> if is_best: <NEW_LINE> <INDENT> self.best_mean_iu = mean_iu <NEW_LINE> <DEDENT> torch.save({ 'epoch': self.epoch, 'iteration': self.iteration, 'arch': self.model.__class__.__name__, 'optim_state_dict': self.optim.state_dict(), 'model_state_dict': self.model.state_dict(), 'best_mean_iu': self.best_mean_iu, }, osp.join(self.out, 'checkpoint.pth.tar')) <NEW_LINE> if is_best: <NEW_LINE> <INDENT> shutil.copy(osp.join(self.out, 'checkpoint.pth.tar'), osp.join(self.out, 'model_best.pth.tar'))
Function to validate a training model on the val split.
625941c2b545ff76a8913dc1
def change_permissions(self, url, path, command=None): <NEW_LINE> <INDENT> LOG.info("Changing monkey's permissions") <NEW_LINE> if 'windows' in self.host.os['type']: <NEW_LINE> <INDENT> LOG.info("Permission change not required for windows") <NEW_LINE> return True <NEW_LINE> <DEDENT> if not command: <NEW_LINE> <INDENT> command = CHMOD_MONKEY % {'monkey_path': path} <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> resp = self.exploit(url, command) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> LOG.error("Something went wrong while trying to change permission: %s" % e) <NEW_LINE> return False <NEW_LINE> <DEDENT> if type(resp) is bool: <NEW_LINE> <INDENT> LOG.info("Permission change finished") <NEW_LINE> return resp <NEW_LINE> <DEDENT> if 'Operation not permitted' in resp: <NEW_LINE> <INDENT> LOG.error("Missing permissions to make monkey executable") <NEW_LINE> return False <NEW_LINE> <DEDENT> elif 'No such file or directory' in resp: <NEW_LINE> <INDENT> LOG.error("Could not change permission because monkey was not found. Check path parameter.") <NEW_LINE> return False <NEW_LINE> <DEDENT> LOG.info("Permission change finished") <NEW_LINE> return resp
Method for linux hosts. Makes monkey executable :param url: Where to send malicious packets :param path: Path to monkey on remote host :param command: Formatted command for permission change or None :return: response, False if failed and True if permission change is not needed
625941c2bf627c535bc13179
def is_suspicious(self, file_content): <NEW_LINE> <INDENT> last_tweet = json.loads(file_content[0]) <NEW_LINE> if self.is_multiple_of_seven(last_tweet['user']['id']): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> first_tweet = json.loads(file_content[-1]) <NEW_LINE> first_date = datetime.datetime.strptime( first_tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y') <NEW_LINE> last_date = datetime.datetime.strptime( last_tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y') <NEW_LINE> if self.less_than_ten_days_between(first_date, last_date): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> return False
Method to determine wether an specific user is suspicious of being a bot :param file_content: All the tweets from a user :type file_content: list :returns: True or False :rtype: boolean
625941c2c4546d3d9de729dd
def parse_cli(arg_list=None, origin=os.getcwd(), arg_parser=None, key_value_delimiters=('=', ':'), comment_seperators=(), key_delimiters=(',',), section_override_delimiters=('.',)): <NEW_LINE> <INDENT> arg_parser = default_arg_parser() if arg_parser is None else arg_parser <NEW_LINE> origin += os.path.sep <NEW_LINE> sections = OrderedDict(default=Section('Default')) <NEW_LINE> line_parser = LineParser(key_value_delimiters, comment_seperators, key_delimiters, {}, section_override_delimiters) <NEW_LINE> for arg_key, arg_value in sorted( vars(arg_parser.parse_args(arg_list)).items()): <NEW_LINE> <INDENT> if arg_key == 'settings' and arg_value is not None: <NEW_LINE> <INDENT> parse_custom_settings(sections, arg_value, origin, line_parser) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if isinstance(arg_value, list): <NEW_LINE> <INDENT> arg_value = ','.join([str(val) for val in arg_value]) <NEW_LINE> <DEDENT> append_to_sections(sections, arg_key, arg_value, origin, from_cli=True) <NEW_LINE> <DEDENT> <DEDENT> return sections
Parses the CLI arguments and creates sections out of it. :param arg_list: The CLI argument list. :param origin: Directory used to interpret relative paths given as argument. :param arg_parser: Instance of ArgParser that is used to parse none-setting arguments. :param key_value_delimiters: Delimiters to separate key and value in setting arguments. :param comment_seperators: Allowed prefixes for comments. :param key_delimiters: Delimiter to separate multiple keys of a setting argument. :param section_override_delimiters: The delimiter to delimit the section from the key name (e.g. the '.' in sect.key = value). :return: A dictionary holding section names as keys and the sections themselves as value.
625941c2283ffb24f3c558ae
def userLoginAction(): <NEW_LINE> <INDENT> return auth.userLoginAction(userData)
这里是让用户进行登录操作的提示 :return:
625941c25510c4643540f394
def getpixel(self, xy): <NEW_LINE> <INDENT> self.load() <NEW_LINE> if self.pyaccess: <NEW_LINE> <INDENT> return self.pyaccess.getpixel(xy) <NEW_LINE> <DEDENT> return self.im.getpixel(xy)
Returns the pixel value at a given position. :param xy: The coordinate, given as (x, y). See :ref:`coordinate-system`. :returns: The pixel value. If the image is a multi-layer image, this method returns a tuple.
625941c2e5267d203edcdc4a
def main(wf): <NEW_LINE> <INDENT> name = wf.args[0] <NEW_LINE> argcache = _arg_cache(name) <NEW_LINE> if not os.path.exists(argcache): <NEW_LINE> <INDENT> log.critical('No arg cache found : {!r}'.format(argcache)) <NEW_LINE> return 1 <NEW_LINE> <DEDENT> with open(argcache, 'rb') as file: <NEW_LINE> <INDENT> data = pickle.load(file) <NEW_LINE> <DEDENT> args = data['args'] <NEW_LINE> kwargs = data['kwargs'] <NEW_LINE> os.unlink(argcache) <NEW_LINE> pidfile = _pid_file(name) <NEW_LINE> _background() <NEW_LINE> with open(pidfile, 'wb') as file: <NEW_LINE> <INDENT> file.write('{}'.format(os.getpid())) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> log.debug('Task `{}` running'.format(name)) <NEW_LINE> log.debug('cmd : {!r}'.format(args)) <NEW_LINE> retcode = subprocess.call(args, **kwargs) <NEW_LINE> if retcode: <NEW_LINE> <INDENT> log.error('Command failed with [{}] : {!r}'.format(retcode, args)) <NEW_LINE> <DEDENT> <DEDENT> finally: <NEW_LINE> <INDENT> if os.path.exists(pidfile): <NEW_LINE> <INDENT> os.unlink(pidfile) <NEW_LINE> <DEDENT> log.debug('Task `{}` finished'.format(name))
Load cached arguments, fork into background, then call :meth:`subprocess.call` with cached arguments
625941c2bf627c535bc1317a
def insert(self, word: 'str') -> 'None': <NEW_LINE> <INDENT> p=self.dicts <NEW_LINE> for c in word: <NEW_LINE> <INDENT> if c not in p: <NEW_LINE> <INDENT> p[c]={} <NEW_LINE> <DEDENT> p=p[c] <NEW_LINE> <DEDENT> p['#']=True
Inserts a word into the trie.
625941c221a7993f00bc7c98
def select_commercial_trunks_tab(self): <NEW_LINE> <INDENT> self.select_static_tab(self.commercial_trunks_tab_locator, 'commercial trunks tab not found before specified time')
Implementing select commercial trunks tab functionality :return:
625941c296565a6dacc8f677
def train(self,k=10): <NEW_LINE> <INDENT> train_data = self.sentence_data <NEW_LINE> attributes = set(train_data[0].attributes.keys()) <NEW_LINE> weighted_data = WeightedData(train_data) <NEW_LINE> self.ensemble =[] <NEW_LINE> for ctr in range(k): <NEW_LINE> <INDENT> stump = makeTree(train_data,attributes,[],1) <NEW_LINE> error_sum=0 <NEW_LINE> for data in train_data: <NEW_LINE> <INDENT> predicted_tag = stump.decide(data) <NEW_LINE> if predicted_tag != data.tag: <NEW_LINE> <INDENT> error_sum += data.weight <NEW_LINE> <DEDENT> <DEDENT> for i in range(len(train_data)): <NEW_LINE> <INDENT> data = train_data[i] <NEW_LINE> predicted_tag = stump.decide(data) <NEW_LINE> if predicted_tag == data.tag: <NEW_LINE> <INDENT> new_weight = data.weight * (error_sum/(1-error_sum)) <NEW_LINE> weighted_data.update_weight(i,new_weight) <NEW_LINE> <DEDENT> <DEDENT> weighted_data.normalize() <NEW_LINE> stump.weight = math.log((1-error_sum)/error_sum) <NEW_LINE> self.ensemble.append(stump) <NEW_LINE> <DEDENT> out_file_obj = open(self.out_file,'wb') <NEW_LINE> pickle.dump(self,out_file_obj) <NEW_LINE> out_file_obj.close()
Generate an ADA booseted ensemble and store it in a hypothesis. :param k maximum number of stumps to be generated: :return:
625941c2627d3e7fe0d68dfa
def findTheDifference(self, s, t): <NEW_LINE> <INDENT> letters = dict() <NEW_LINE> for letter in s: <NEW_LINE> <INDENT> letters[letter] = letters.get(letter, 0) + 1 <NEW_LINE> <DEDENT> for letter in t: <NEW_LINE> <INDENT> if letter not in letters or letters[letter] == 0: <NEW_LINE> <INDENT> return letter <NEW_LINE> <DEDENT> letters[letter] -= 1
:type s: str :type t: str :rtype: str
625941c282261d6c526ab448
@requires_mne <NEW_LINE> def test_average_forward_solution(): <NEW_LINE> <INDENT> fwd = read_forward_solution(fname) <NEW_LINE> assert_raises(TypeError, average_forward_solutions, 1) <NEW_LINE> assert_raises(ValueError, average_forward_solutions, []) <NEW_LINE> assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [-1, 0]) <NEW_LINE> assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0]) <NEW_LINE> assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0, 0]) <NEW_LINE> assert_raises(TypeError, average_forward_solutions, [1, fwd]) <NEW_LINE> fwd_copy = average_forward_solutions([fwd]) <NEW_LINE> assert_array_equal(fwd['sol']['data'], fwd_copy['sol']['data']) <NEW_LINE> fwd_copy['sol']['data'] *= 0.5 <NEW_LINE> fname_copy = op.join(temp_dir, 'fwd.fif') <NEW_LINE> write_forward_solution(fname_copy, fwd_copy, overwrite=True) <NEW_LINE> cmd = ('mne_average_forward_solutions --fwd %s --fwd %s --out %s' % (fname, fname_copy, fname_copy)) <NEW_LINE> st, output = commands.getstatusoutput(cmd) <NEW_LINE> if st != 0: <NEW_LINE> <INDENT> raise RuntimeError('could not average forward solutions:\n%s' % output) <NEW_LINE> <DEDENT> fwd_ave = average_forward_solutions([fwd, fwd_copy]) <NEW_LINE> assert_array_equal(0.75 * fwd['sol']['data'], fwd_ave['sol']['data'])
Test averaging forward solutions
625941c297e22403b379cf44
def validate_token_ranges(tree): <NEW_LINE> <INDENT> testlevel = 1 <NEW_LINE> testclass = 'Format' <NEW_LINE> covered = set() <NEW_LINE> for cols in tree: <NEW_LINE> <INDENT> if not is_multiword_token(cols): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> m = interval_re.match(cols[ID]) <NEW_LINE> if not m: <NEW_LINE> <INDENT> testid = 'invalid-word-interval' <NEW_LINE> testmessage = "Spurious word interval definition: '%s'." % cols[ID] <NEW_LINE> warn(testmessage, testclass, testlevel=testlevel, testid=testid) <NEW_LINE> continue <NEW_LINE> <DEDENT> start, end = m.groups() <NEW_LINE> try: <NEW_LINE> <INDENT> start, end = int(start), int(end) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> assert False, 'internal error' <NEW_LINE> <DEDENT> if not start < end: <NEW_LINE> <INDENT> testid = 'reversed-word-interval' <NEW_LINE> testmessage = 'Spurious token interval %d-%d' % (start, end) <NEW_LINE> warn(testmessage, testclass, testlevel=testlevel, testid=testid) <NEW_LINE> continue <NEW_LINE> <DEDENT> if covered & set(range(start, end+1)): <NEW_LINE> <INDENT> testid = 'overlapping-word-intervals' <NEW_LINE> testmessage = 'Range overlaps with others: %s' % cols[ID] <NEW_LINE> warn(testmessage, testclass, testlevel=testlevel, testid=testid) <NEW_LINE> <DEDENT> covered |= set(range(start, end+1))
Checks that the word ranges for multiword tokens are valid.
625941c23c8af77a43ae374a
def validate(self): <NEW_LINE> <INDENT> return None
Validate request before rendering it. This hook gives a chance for all view classes to validate request before starting to render it. If validation passes, this function must return None. If there is validation erros, this function must return response object with error message. Currently this functionality is only used in sboard.ajax.JsonView.
625941c20a50d4780f666e3c
def build_seq_from_file(self, f, size_lim=None): <NEW_LINE> <INDENT> off = f.tell() <NEW_LINE> rv = [] <NEW_LINE> if (size_lim is None): <NEW_LINE> <INDENT> off_lim = f.seek(0,2) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> off_lim = off + size_lim <NEW_LINE> <DEDENT> while (off < off_lim): <NEW_LINE> <INDENT> f.seek(off) <NEW_LINE> (el, size) = self.build_from_file(f) <NEW_LINE> rv.append(el) <NEW_LINE> off += size <NEW_LINE> <DEDENT> if (off != off_lim): <NEW_LINE> <INDENT> raise EBMLError('Element size / filesize mismatch.') <NEW_LINE> <DEDENT> return rv
Deserialize sequence of EBML elements from filelike, up to size_lim (or EOF if not specified).
625941c210dbd63aa1bd2b4f
def test_get_monday_present(self): <NEW_LINE> <INDENT> week = test.create_week() <NEW_LINE> week.populate() <NEW_LINE> monday = funct.get_monday(week=week) <NEW_LINE> current_mon = date(2017,7,10) <NEW_LINE> self.assertEqual(monday, current_mon)
Returns most recent Monday. To test, uncomment and change current_mon date. Tested and works.
625941c2cb5e8a47e48b7a58
def _get_completion_model(self, completion, pos_args): <NEW_LINE> <INDENT> if completion == usertypes.Completion.option: <NEW_LINE> <INDENT> section = pos_args[0] <NEW_LINE> model = instances.get(completion).get(section) <NEW_LINE> <DEDENT> elif completion == usertypes.Completion.value: <NEW_LINE> <INDENT> section = pos_args[0] <NEW_LINE> option = pos_args[1] <NEW_LINE> try: <NEW_LINE> <INDENT> model = instances.get(completion)[section][option] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> model = None <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> model = instances.get(completion) <NEW_LINE> <DEDENT> if model is None: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return sortfilter.CompletionFilterModel(source=model, parent=self)
Get a completion model based on an enum member. Args: completion: A usertypes.Completion member. pos_args: The positional args entered before the cursor. Return: A completion model or None.
625941c20c0af96317bb8193
def get_root_path(self): <NEW_LINE> <INDENT> return realpath(os.getcwd())
Return the root file path
625941c20c0af96317bb8194
def white(s = ""): <NEW_LINE> <INDENT> return _put_color(WHITE) + _convert_value(s);
white(s) -> str Put the white foreground escape sequence in front of the 's'. This function is affected by the values of: ColorMode.mode and ConvertMode.mode.
625941c2ff9c53063f47c1a0
def file_dict(*packages): <NEW_LINE> <INDENT> errors = [] <NEW_LINE> ret = {} <NEW_LINE> pkgs = {} <NEW_LINE> if not packages: <NEW_LINE> <INDENT> cmd = 'rpm -qa --qf \'%{NAME} %{VERSION}\\n\'' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> cmd = 'rpm -q --qf \'%{{NAME}} %{{VERSION}}\\n\' {0}'.format( ' '.join(packages) ) <NEW_LINE> <DEDENT> out = __salt__['cmd.run'](cmd, output_loglevel='trace') <NEW_LINE> for line in out.splitlines(): <NEW_LINE> <INDENT> if 'is not installed' in line: <NEW_LINE> <INDENT> errors.append(line) <NEW_LINE> continue <NEW_LINE> <DEDENT> comps = line.split() <NEW_LINE> pkgs[comps[0]] = {'version': comps[1]} <NEW_LINE> <DEDENT> for pkg in pkgs.keys(): <NEW_LINE> <INDENT> files = [] <NEW_LINE> cmd = 'rpm -ql {0}'.format(pkg) <NEW_LINE> out = __salt__['cmd.run'](cmd, output_loglevel='trace') <NEW_LINE> for line in out.splitlines(): <NEW_LINE> <INDENT> files.append(line) <NEW_LINE> <DEDENT> ret[pkg] = files <NEW_LINE> <DEDENT> return {'errors': errors, 'packages': ret}
List the files that belong to a package, sorted by group. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). CLI Examples: .. code-block:: bash salt '*' lowpkg.file_dict httpd salt '*' lowpkg.file_dict httpd postfix salt '*' lowpkg.file_dict
625941c24d74a7450ccd416f
def open_parent(self): <NEW_LINE> <INDENT> self.ensure_one() <NEW_LINE> address_form_id = self.env.ref("base.view_partner_address_form").id <NEW_LINE> return { "type": "ir.actions.act_window", "res_model": "res.partner", "view_mode": "form", "views": [(address_form_id, "form")], "res_id": self.parent_id.id, "target": "new", "flags": {"form": {"action_buttons": True}}, }
Utility method used to add an "Open Parent" button in partner views
625941c2dd821e528d63b156
def get_jump_epoch_designmatrix(self, cand, fitpatch=None): <NEW_LINE> <INDENT> Umat = self.Umat(cand) <NEW_LINE> Uinv = self.Umat_i(Umat=Umat) <NEW_LINE> Mj = self.get_jump_designmatrix(cand, fitpatch=fitpatch) <NEW_LINE> return np.dot(Uinv, Mj)
Obtain the epoch-domain design matrix of inter-coherence-patch jumps Obtain the design matrix of jumps that disconnect all the coherence patches that are not phase connected. This version is carried out in the epoch-domain (so it's Ui Mj) :param cand: CandidateSolution candidate :param fitpatch: If not None, exclude this patch from the designmatrix jumps
625941c256b00c62f0f14604
def hash1(self, file=None): <NEW_LINE> <INDENT> if verbose > 3: <NEW_LINE> <INDENT> print("Calculating hash1:") <NEW_LINE> <DEDENT> if file is None: <NEW_LINE> <INDENT> file = self.fp <NEW_LINE> <DEDENT> oldpos = file.tell() <NEW_LINE> file.seek(-21, 2) <NEW_LINE> end = file.tell() <NEW_LINE> file.seek(0) <NEW_LINE> hash1 = hashlib.sha1() <NEW_LINE> rlen = end <NEW_LINE> while rlen > 0: <NEW_LINE> <INDENT> hash1.update(file.read(min(CHUNK_SIZE, rlen))) <NEW_LINE> rlen = end - file.tell() <NEW_LINE> <DEDENT> file.seek(oldpos) <NEW_LINE> if verbose > 3: <NEW_LINE> <INDENT> print(hash1.hexdigest()) <NEW_LINE> <DEDENT> return hash1
Calculate first hash value.
625941c2009cb60464c6335f
@contextlib.contextmanager <NEW_LINE> def immutable_object_cache(ctx, config): <NEW_LINE> <INDENT> log.info("start immutable object cache daemon") <NEW_LINE> for client, client_config in config.items(): <NEW_LINE> <INDENT> (remote,) = ctx.cluster.only(client).remotes.keys() <NEW_LINE> remote.run( args=[ 'sudo', 'killall', '-s', '9', 'ceph-immutable-object-cache', run.Raw('||'), 'true', ] ) <NEW_LINE> remote.run( args=[ 'ceph-immutable-object-cache', '-b', ] ) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> yield <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> log.info("check and cleanup immutable object cache") <NEW_LINE> for client, client_config in config.items(): <NEW_LINE> <INDENT> client_config = client_config if client_config is not None else dict() <NEW_LINE> (remote,) = ctx.cluster.only(client).remotes.keys() <NEW_LINE> cache_path = client_config.get('immutable object cache path', '/tmp/ceph-immutable-object-cache') <NEW_LINE> ls_command = '"$(ls {} )"'.format(cache_path) <NEW_LINE> remote.run( args=[ 'test', '-n', run.Raw(ls_command), ] ) <NEW_LINE> remote.run( args=[ 'sudo', 'killall', '-s', '9', 'ceph-immutable-object-cache', run.Raw('||'), 'true', ] ) <NEW_LINE> remote.run( args=[ 'sudo', 'rm', '-rf', cache_path, run.Raw('||'), 'true', ] )
setup and cleanup immutable object cache
625941c226238365f5f0ee17
def softmax(predictions): <NEW_LINE> <INDENT> shape = predictions.shape <NEW_LINE> if len(shape) == 1: <NEW_LINE> <INDENT> p = predictions.copy()[None] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> p = predictions.copy() <NEW_LINE> <DEDENT> p -= np.max(p) <NEW_LINE> probs = np.zeros_like(p, dtype=np.float) <NEW_LINE> num_batches, num_classes = p.shape <NEW_LINE> for i in range(num_batches): <NEW_LINE> <INDENT> denominator = np.sum(np.exp(p[i])) <NEW_LINE> for j in range(num_classes): <NEW_LINE> <INDENT> probs[i][j] = np.exp(p[i][j]) / denominator <NEW_LINE> <DEDENT> <DEDENT> return probs
Computes probabilities from scores Arguments: predictions, np array, shape is either (N) or (batch_size, N) - classifier output Returns: probs, np array of the same shape as predictions - probability for every class, 0..1
625941c2e1aae11d1e749c61
def capital_recovery_factor(ir, t): <NEW_LINE> <INDENT> return ir/(1-(1+ir)**-t)
The capital recovery factor is a coefficient applied to a loan to determine annual payments. This function needs an interest rate ir and the number of compounding periods that payments are split across. Example: Calculate annual loan payments for a 20-year loan with a 7 percent interest rate on $100. >>> crf = capital_recovery_factor(.07,20) >>> print ("Capital recovery factor for a loan with a 7 percent annual " + ... "interest rate, paid over 20 years is {crf:0.5f}. If the " + ... "principal was $100, loan payments would be ${lp:0.2f}"). format(crf=crf, lp=100 * crf) # doctest: +NORMALIZE_WHITESPACE Capital recovery factor for a loan with a 7 percent annual interest rate, paid over 20 years is 0.09439. If the principal was $100, loan payments would be $9.44
625941c27047854f462a13b8
@ensure_csrf_cookie <NEW_LINE> def delete_employees(request): <NEW_LINE> <INDENT> deleteids= request.POST['rowids'] <NEW_LINE> for id in deleteids.split(",") : <NEW_LINE> <INDENT> aservc=get_object_or_404(AppschedulerEmployees, pk=int(id) ) <NEW_LINE> aservc.delete() <NEW_LINE> <DEDENT> return HttpResponse(status=204)
Delete all selected employees
625941c23346ee7daa2b2d16
def processPublish(self, session, publish): <NEW_LINE> <INDENT> assert(session in self._session_to_subscriptions) <NEW_LINE> if (not self._option_uri_strict and not _URI_PAT_LOOSE_NON_EMPTY.match(publish.topic)) or ( self._option_uri_strict and not _URI_PAT_STRICT_NON_EMPTY.match(publish.topic)): <NEW_LINE> <INDENT> if publish.acknowledge: <NEW_LINE> <INDENT> reply = message.Error(message.Publish.MESSAGE_TYPE, publish.request, ApplicationError.INVALID_URI, ["publish with invalid topic URI '{}'".format(publish.topic)]) <NEW_LINE> session._transport.send(reply) <NEW_LINE> <DEDENT> return <NEW_LINE> <DEDENT> if publish.topic in self._topic_to_sessions and self._topic_to_sessions[publish.topic]: <NEW_LINE> <INDENT> subscription, receivers = self._topic_to_sessions[publish.topic] <NEW_LINE> if publish.eligible: <NEW_LINE> <INDENT> eligible = [] <NEW_LINE> for s in publish.eligible: <NEW_LINE> <INDENT> if s in self._session_id_to_session: <NEW_LINE> <INDENT> eligible.append(self._session_id_to_session[s]) <NEW_LINE> <DEDENT> <DEDENT> if eligible: <NEW_LINE> <INDENT> receivers = set(eligible) & receivers <NEW_LINE> <DEDENT> <DEDENT> if publish.exclude: <NEW_LINE> <INDENT> exclude = [] <NEW_LINE> for s in publish.exclude: <NEW_LINE> <INDENT> if s in self._session_id_to_session: <NEW_LINE> <INDENT> exclude.append(self._session_id_to_session[s]) <NEW_LINE> <DEDENT> <DEDENT> if exclude: <NEW_LINE> <INDENT> receivers = receivers - set(exclude) <NEW_LINE> <DEDENT> <DEDENT> if publish.excludeMe is None or publish.excludeMe: <NEW_LINE> <INDENT> me_also = False <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> me_also = True <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> subscription, receivers, me_also = None, [], False <NEW_LINE> <DEDENT> publication = util.id() <NEW_LINE> if publish.acknowledge: <NEW_LINE> <INDENT> msg = message.Published(publish.request, publication) <NEW_LINE> session._transport.send(msg) <NEW_LINE> <DEDENT> if receivers: <NEW_LINE> <INDENT> if publish.discloseMe: <NEW_LINE> <INDENT> publisher = session._session_id <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> publisher = None <NEW_LINE> <DEDENT> msg = message.Event(subscription, publication, args = publish.args, kwargs = publish.kwargs, publisher = publisher) <NEW_LINE> for receiver in receivers: <NEW_LINE> <INDENT> if me_also or receiver != session: <NEW_LINE> <INDENT> receiver._transport.send(msg)
Implements :func:`autobahn.wamp.interfaces.IBroker.processPublish`
625941c2cc40096d615958fd
def test_right_bounds_with_next(self): <NEW_LINE> <INDENT> self.testInst.prev() <NEW_LINE> with pytest.raises(StopIteration): <NEW_LINE> <INDENT> self.testInst.next()
Test if passing bounds raises StopIteration.
625941c292d797404e304135
def get_W(self): <NEW_LINE> <INDENT> m = self._model.m <NEW_LINE> n = self._model.n <NEW_LINE> k = self._model.k <NEW_LINE> k_aligned = _lib.ffm_get_k_aligned(k) <NEW_LINE> kALIGN = _lib.ffm_get_kALIGN() <NEW_LINE> align0 = 2*k_aligned <NEW_LINE> align1 = m*align0 <NEW_LINE> W = np.ctypeslib.as_array(self._model.W,(1,n*align1))[0] <NEW_LINE> W = W.reshape((n,m,align0)) <NEW_LINE> I = np.arange(k) + ( np.floor(np.arange(align0)/kALIGN).astype(int)*kALIGN )[:k] <NEW_LINE> W = W[:,:,I] <NEW_LINE> return W
Returns the model vectors W of shape n x m x k, where n is the number of feature indexes and m is the number of fields. Given input X, the computation of probabilites can then be done in python as follows: import itertools i = 0 #the sample index for which we want to compute probabilities sig = lambda x: 1/(1+np.exp(-x)) #sigmoid function pairs = itertools.combinations(X[i],2) proba = sig(sum( (v0*W[i0,j1]) @ (v1*W[i1,j0]) for (j0,i0,v0), (j1,i1,v1) in pairs )) print('python computation:',proba) #to crosscheck, this should be the same when ffm_data is generated from X: print('C computation:',model.predict(ffm_data)[i])
625941c22c8b7c6e89b3576e
def setStyle(self, *args): <NEW_LINE> <INDENT> return _osgText.TextBase_setStyle(self, *args)
setStyle(TextBase self, Style style)
625941c2e8904600ed9f1ed6
def callable_virtualenv(): <NEW_LINE> <INDENT> from time import sleep <NEW_LINE> from colorama import Back, Fore, Style <NEW_LINE> print(Fore.RED + 'some red text') <NEW_LINE> print(Back.GREEN + 'and with a green background') <NEW_LINE> print(Style.DIM + 'and in dim text') <NEW_LINE> print(Style.RESET_ALL) <NEW_LINE> for _ in range(10): <NEW_LINE> <INDENT> print(Style.DIM + 'Please wait...', flush=True) <NEW_LINE> sleep(10) <NEW_LINE> <DEDENT> print('Finished')
Example function that will be performed in a virtual environment. Importing at the module level ensures that it will not attempt to import the library before it is installed.
625941c266673b3332b9203d
def find_similar_word(self, word): <NEW_LINE> <INDENT> return self.w2v_model.most_similar(word)
通过w2v向量计算词条相似度
625941c23617ad0b5ed67ea5
def load_file_list(path=None, regx='.*.png', printable=False, keep_prefix=False): <NEW_LINE> <INDENT> if path is None: <NEW_LINE> <INDENT> path = os.getcwd() <NEW_LINE> <DEDENT> file_list = os.listdir(path) <NEW_LINE> return_list = [] <NEW_LINE> for _, f in enumerate(file_list): <NEW_LINE> <INDENT> if re.search(regx, f): <NEW_LINE> <INDENT> return_list.append(f) <NEW_LINE> <DEDENT> <DEDENT> if keep_prefix: <NEW_LINE> <INDENT> for i, f in enumerate(return_list): <NEW_LINE> <INDENT> return_list[i] = os.path.join(path, f) <NEW_LINE> <DEDENT> <DEDENT> if printable: <NEW_LINE> <INDENT> logging.info('Match file list = %s' % return_list) <NEW_LINE> logging.info('Number of files = %d' % len(return_list)) <NEW_LINE> <DEDENT> return return_list
Return a file list in a folder by given a path and regular expression. Parameters ---------- path : str or None A folder path, if `None`, use the current directory. regx : str The regx of file name. printable : boolean Whether to print the files infomation. keep_prefix : boolean Whether to keep path in the file name. Examples ---------- >>> file_list = tl.files.load_file_list(path=None, regx='w1pre_[0-9]+\.(npz)')
625941c276d4e153a657eadc
def test_action_case_2(self): <NEW_LINE> <INDENT> pc = DotDict() <NEW_LINE> f2jd = copy.deepcopy(cannonical_json_dump) <NEW_LINE> pc.upload_file_minidump_flash2 = DotDict() <NEW_LINE> pc.upload_file_minidump_flash2.json_dump = f2jd <NEW_LINE> pc.upload_file_minidump_flash2.json_dump['crashing_thread']['frames'][1] ['function'] = 'NtUserPeekMessage' <NEW_LINE> fake_processor = create_basic_fake_processor() <NEW_LINE> rc = DotDict() <NEW_LINE> rule = Bug812318() <NEW_LINE> action_result = rule.action(rc, pc, fake_processor) <NEW_LINE> ok_(action_result) <NEW_LINE> ok_('classifications' in pc) <NEW_LINE> eq_( pc.classifications.skunk_works.classification, 'NtUserPeekMessage-other' )
success - only 1st target found in top 5 frames of stack
625941c2fb3f5b602dac363d
def _find_thread_stack(thread_id): <NEW_LINE> <INDENT> for tid, stack in sys._current_frames().items(): <NEW_LINE> <INDENT> if tid == thread_id: <NEW_LINE> <INDENT> return stack <NEW_LINE> <DEDENT> <DEDENT> return None
Returns a stack object that can be used to dump a stack trace for the given thread id (or None if the id is not found).
625941c24f6381625f1149e8
def __init__(self): <NEW_LINE> <INDENT> self.Id = None <NEW_LINE> self.Name = None <NEW_LINE> self.Protocol = None
:param Id: Channel ID. :type Id: str :param Name: The channel name after modification. :type Name: str :param Protocol: The channel protocol after modification. Valid values: HLS, DASH. :type Protocol: str
625941c24e696a04525c93f8
def __init__(self, importeur, parser_cmd, nom, m_type="inconnu"): <NEW_LINE> <INDENT> self.importeur = importeur <NEW_LINE> self.parser_cmd = parser_cmd <NEW_LINE> self.nom = nom <NEW_LINE> self.type = m_type <NEW_LINE> self.statut = INSTANCIE
Constructeur d'un module. Par défaut, on lui attribue surtout un nom IDENTIFIANT, sans accents ni espaces, qui sera le nom du package même. Le type du module est soit primaire soit secondaire.
625941c2b7558d58953c4ec4
def GetResources(self, args): <NEW_LINE> <INDENT> group_ref = self.CreateInstanceGroupReference( name=args.name, region=args.region, zone=args.zone, zonal_resource_type='instanceGroups', regional_resource_type='regionInstanceGroups') <NEW_LINE> if group_ref.Collection() == 'compute.instanceGroups': <NEW_LINE> <INDENT> service = self.compute.instanceGroups <NEW_LINE> request = service.GetRequestType('Get')( instanceGroup=group_ref.Name(), zone=group_ref.zone, project=self.project) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> service = self.compute.regionInstanceGroups <NEW_LINE> request = service.GetRequestType('Get')( instanceGroup=group_ref.Name(), region=group_ref.region, project=self.project) <NEW_LINE> <DEDENT> errors = [] <NEW_LINE> results = list(request_helper.MakeRequests( requests=[(service, 'Get', request)], http=self.http, batch_url=self.batch_url, errors=errors, custom_get_requests=None)) <NEW_LINE> return results, errors
Retrieves response with named ports.
625941c2be383301e01b5435
def modify_env(old, new, verbose=True): <NEW_LINE> <INDENT> old_keys = list(old.keys()) <NEW_LINE> new_keys = list(new.keys()) <NEW_LINE> for key in new_keys: <NEW_LINE> <INDENT> if key in old_keys: <NEW_LINE> <INDENT> if not new[key] == old[key]: <NEW_LINE> <INDENT> _log.debug("Key in new environment found that is different from old one: %s (%s)", key, new[key]) <NEW_LINE> setvar(key, new[key], verbose=verbose) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> _log.debug("Key in new environment found that is not in old one: %s (%s)", key, new[key]) <NEW_LINE> setvar(key, new[key], verbose=verbose) <NEW_LINE> <DEDENT> <DEDENT> for key in old_keys: <NEW_LINE> <INDENT> if key not in new_keys: <NEW_LINE> <INDENT> _log.debug("Key in old environment found that is not in new one: %s (%s)", key, old[key]) <NEW_LINE> os.unsetenv(key) <NEW_LINE> del os.environ[key]
Compares two os.environ dumps. Adapts final environment.
625941c2293b9510aa2c3244
def draw_terrain(self, panel, level_id, terrain, coverage, revealed, visible): <NEW_LINE> <INDENT> walls_mask = self.walls_bitmask(level_id, coverage, revealed, self.walls_terrain_type) <NEW_LINE> mask_offset = Position( max(0, self.x*-1), max(0, self.y*-1) ) <NEW_LINE> revealed_not_visible = revealed ^ visible <NEW_LINE> renderables = self.ecs.manage(components.Renderable) <NEW_LINE> for terrain_id in np.unique(terrain): <NEW_LINE> <INDENT> if not terrain_id: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> terrain_mask = terrain == terrain_id <NEW_LINE> renderable = renderables.get(terrain_id) <NEW_LINE> if not renderable: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if terrain_id >> 4 == self.walls_terrain_type: <NEW_LINE> <INDENT> self.draw_bitmasked_terrain_tile( panel, walls_mask, terrain_mask, visible, revealed_not_visible, mask_offset, renderable, ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.draw_terrain_tile( panel, terrain_mask, visible, revealed_not_visible, mask_offset, renderable, )
Draw TERRAIN tiles.
625941c285dfad0860c3ae06
def compute_global_error(g): <NEW_LINE> <INDENT> Fx = 0 <NEW_LINE> for edge in g['edges']: <NEW_LINE> <INDENT> if edge['type'] == 'P': <NEW_LINE> <INDENT> x1 = main.v2t(g['x'][edge['fromIdx']:edge['fromIdx']+3]) <NEW_LINE> x2 = main.v2t(g['x'][edge['toIdx']:edge['toIdx']+3]) <NEW_LINE> <DEDENT> elif edge['type'] == 'L': <NEW_LINE> <INDENT> x = g['x'][edge['fromIdx']:edge['fromIdx']+3] <NEW_LINE> l = g['x'][edge['toIdx']:edge['toIdx']+2] <NEW_LINE> <DEDENT> <DEDENT> return Fx
Computes the total error of the graph
625941c255399d3f0558865f
def test_reverse_string(self): <NEW_LINE> <INDENT> self.assertEqual('ataD gnirtS yM', 'My String Data')
5 points Test with value My String Data return value should be ataD gnirtS yM
625941c2d6c5a10208143ff5
def __query_gen(self, url, variables, entity_name, query, end_cursor=''): <NEW_LINE> <INDENT> dst = self.get_dst_dir(query) <NEW_LINE> if end_cursor==True: <NEW_LINE> <INDENT> end_cursor = self.load_end_cursor(dst) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> end_cursor = '' <NEW_LINE> <DEDENT> nodes, end_cursor = self.query(url, variables, entity_name, query, end_cursor) <NEW_LINE> self.save_end_cursor(end_cursor,dst) <NEW_LINE> if nodes: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> for node in nodes: <NEW_LINE> <INDENT> yield node <NEW_LINE> <DEDENT> if end_cursor: <NEW_LINE> <INDENT> nodes, end_cursor = self.query(url, variables, entity_name, query, end_cursor) <NEW_LINE> self.save_end_cursor(end_cursor,dst) <NEW_LINE> print(len(nodes)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> except ValueError: <NEW_LINE> <INDENT> self.logger.exception('Failed to query ' + query)
Generator for hashtag and location.
625941c2287bf620b61d3a11
def plot_train_val_losses(training_loss: list, validation_loss: list, save_dir: str = None, display: bool = False, debug: bool = False, interval: str = 'Epochs', title: str = "Training Loss Curves") -> None: <NEW_LINE> <INDENT> x_values = list(np.arange(1, len(training_loss)+1)) <NEW_LINE> train_trace = go.Scatter(x=x_values, y=training_loss, mode='lines+markers', name='training_loss') <NEW_LINE> val_trace = go.Scatter(x=x_values, y=validation_loss, mode='lines+markers', name='validation_loss') <NEW_LINE> fig = go.Figure() <NEW_LINE> fig.add_trace(train_trace) <NEW_LINE> fig.add_trace(val_trace) <NEW_LINE> fig.update_layout(title=title, xaxis_title=interval, yaxis_title='Loss') <NEW_LINE> if display: <NEW_LINE> <INDENT> fig.show() <NEW_LINE> <DEDENT> if save_dir is not None: <NEW_LINE> <INDENT> fig.write_image(join(save_dir, "training_loss_curves.png")) <NEW_LINE> fig.write_html(join(save_dir, "training_loss_curves.html")) <NEW_LINE> fig.write_json(join(save_dir, "training_loss_curves.json")) <NEW_LINE> <DEDENT> return
Creates a one off train val plot. Args: training_loss (list): list of floats val_loss (list): list of floats save_dir (str): path to a location to save an exported image of the plot (or an interactive graph) debug (bool): default False ** kwargs (keyword arguments): checked for title, xlabel, ylabel, interval (epochs or batches) Returns: Nothing
625941c2a4f1c619b28affea
def simple_bind(self): <NEW_LINE> <INDENT> total_bound = 0.0 <NEW_LINE> mol_id = self.active_molecules[0] <NEW_LINE> mol_concentration = self.get_concentration(mol_id) <NEW_LINE> if mol_concentration <= 0.0: <NEW_LINE> <INDENT> for dendrite in self.dendrites: dendrite.set_bound(0.0) <NEW_LINE> return <NEW_LINE> <DEDENT> protein_counts = dict() <NEW_LINE> total_protein_count = 0.0 <NEW_LINE> for dendrite in self.dendrites: <NEW_LINE> <INDENT> if dendrite.density == 0.0: continue <NEW_LINE> count,affinity = (dendrite.density, dendrite.affinities[mol_id]) <NEW_LINE> protein_counts[dendrite] = count*affinity <NEW_LINE> total_protein_count += count*affinity <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> axon = self.axon <NEW_LINE> native_mol_id = axon.native_mol_id <NEW_LINE> axon_available = min(axon.density, axon.capacity-axon.get_concentration()) <NEW_LINE> if axon_available > 0.0: <NEW_LINE> <INDENT> count,affinity = (axon_available, axon.affinities[native_mol_id]) <NEW_LINE> protein_count = count*affinity <NEW_LINE> total_protein_count += protein_count <NEW_LINE> protein_fraction = protein_count / total_protein_count <NEW_LINE> k = (1 - (affinity*protein_fraction)) <NEW_LINE> bound = protein_count * (mol_concentration**2) / ( mol_concentration + k ) <NEW_LINE> axon.add_concentration(bound) <NEW_LINE> self.remove_concentration(bound, native_mol_id) <NEW_LINE> <DEDENT> <DEDENT> except AttributeError: pass <NEW_LINE> for dendrite,protein_count in protein_counts.iteritems(): <NEW_LINE> <INDENT> protein_count = protein_counts[dendrite] <NEW_LINE> protein_fraction = protein_count / total_protein_count <NEW_LINE> k = (1 - (affinity*protein_fraction)) <NEW_LINE> bound = protein_count * (mol_concentration**2) / ( mol_concentration + k ) <NEW_LINE> total_bound += bound <NEW_LINE> dendrite.set_bound(bound) <NEW_LINE> if self.verbose: <NEW_LINE> <INDENT> print("Concentrations:") <NEW_LINE> print(" P: %f M: %f" % (protein_count, mol_concentration)) <NEW_LINE> print("Proportions::") <NEW_LINE> print("fP: %f" % protein_fraction) <NEW_LINE> print("Constant and final bound count:") <NEW_LINE> print("k: %f bound: %f" % (k, bound)) <NEW_LINE> print("") <NEW_LINE> <DEDENT> <DEDENT> return total_bound
Bound/Unbound fraction f = [L] / ( [L] + K_d ) f is the fraction of molecules that are bound to receptors. L is the molecule concentration K_d is the dissociation constant This simpler version is less computationally expensive, but only works if the synaptic cleft contains only one molecule.
625941c24527f215b584c405
def test_sanity(self): <NEW_LINE> <INDENT> assert self.dim == len(self.Ls) <NEW_LINE> assert self.shape == self.Ls + (len(self.unit_cell), ) <NEW_LINE> assert self.N_cells == np.prod(self.Ls) <NEW_LINE> assert self.N_sites == np.prod(self.shape) <NEW_LINE> if self.bc.shape != (self.dim, ): <NEW_LINE> <INDENT> raise ValueError("Wrong len of bc") <NEW_LINE> <DEDENT> assert self.bc.dtype == np.bool <NEW_LINE> chinfo = None <NEW_LINE> for site in self.unit_cell: <NEW_LINE> <INDENT> if site is None: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if chinfo is None: <NEW_LINE> <INDENT> chinfo = site.leg.chinfo <NEW_LINE> <DEDENT> if not isinstance(site, Site): <NEW_LINE> <INDENT> raise ValueError("element of Unit cell is not Site.") <NEW_LINE> <DEDENT> if site.leg.chinfo != chinfo: <NEW_LINE> <INDENT> raise ValueError("All sites must have the same ChargeInfo!") <NEW_LINE> <DEDENT> <DEDENT> if self.basis.shape[0] != self.dim: <NEW_LINE> <INDENT> raise ValueError("Need one basis vector for each direction!") <NEW_LINE> <DEDENT> if self.unit_cell_positions.shape[0] != len(self.unit_cell): <NEW_LINE> <INDENT> raise ValueError("Need one position for each site in the unit cell.") <NEW_LINE> <DEDENT> if self.basis.shape[1] != self.unit_cell_positions.shape[1]: <NEW_LINE> <INDENT> raise ValueError("Different space dimensions of `basis` and `unit_cell_positions`") <NEW_LINE> <DEDENT> assert np.all(self._order >= 0) and np.all(self._order <= self.shape) <NEW_LINE> assert np.all( np.sum(self._order * self._strides, axis=1)[self._perm] == np.arange(self.N_sites)) <NEW_LINE> if self.bc_MPS not in MPS._valid_bc: <NEW_LINE> <INDENT> raise ValueError("invalid MPS boundary conditions") <NEW_LINE> <DEDENT> if self.bc[0] and self.bc_MPS == 'infinite': <NEW_LINE> <INDENT> raise ValueError("Need periodic boundary conditions along the x-direction " "for 'infinite' `bc_MPS`")
Sanity check. Raises ValueErrors, if something is wrong.
625941c2187af65679ca50ca
def test_reform_with_scalar_vector_errors(): <NEW_LINE> <INDENT> policy1 = Policy() <NEW_LINE> reform1 = {'SS_thd85': {2020: 30000}} <NEW_LINE> with pytest.raises(pt.ValidationError): <NEW_LINE> <INDENT> policy1.implement_reform(reform1) <NEW_LINE> <DEDENT> policy2 = Policy() <NEW_LINE> reform2 = {'ID_Medical_frt': {2020: [0.08]}} <NEW_LINE> with pytest.raises(pt.ValidationError): <NEW_LINE> <INDENT> policy2.implement_reform(reform2) <NEW_LINE> <DEDENT> policy3 = Policy() <NEW_LINE> reform3 = {'ID_Medical_frt': [{"year": 2020, "value": [0.08]}]} <NEW_LINE> with pytest.raises(pt.ValidationError): <NEW_LINE> <INDENT> policy3.adjust(reform3) <NEW_LINE> <DEDENT> policy4 = Policy() <NEW_LINE> ref4 = {"II_brk1": {2020: [9700, 19400, 9700, 13850, 19400, 19400]}} <NEW_LINE> with pytest.raises(pt.ValidationError): <NEW_LINE> <INDENT> policy4.implement_reform(ref4) <NEW_LINE> <DEDENT> policy5 = Policy() <NEW_LINE> ref5 = {"II_rt1": {2029: [.2, .3]}} <NEW_LINE> with pytest.raises(pt.ValidationError): <NEW_LINE> <INDENT> policy5.implement_reform(ref5)
Test catching scalar-vector confusion.
625941c2796e427e537b0570
def outstanding_teams(self): <NEW_LINE> <INDENT> return filter(lambda t: not t.finished(), self.teams)
Returns all teams that have not finished yet.
625941c2f548e778e58cd529
def __getitem__(self, n): <NEW_LINE> <INDENT> if self.access=="disk": <NEW_LINE> <INDENT> path=self.targets.keys()[n] <NEW_LINE> <DEDENT> elif self.access=="firstFat": <NEW_LINE> <INDENT> path=self.firstFats[n] <NEW_LINE> <DEDENT> return self.targets[path]
Renvoye le nième disque. Le fonctionnement dépend du paramètre self.access @param n un numéro @return le nième disque USB connecté sous forme d'instance de uDisk2
625941c224f1403a92600b14
def GetMatch(self, start): <NEW_LINE> <INDENT> start_time = start <NEW_LINE> if self.timezone and pytz is not None: <NEW_LINE> <INDENT> if not start_time.tzinfo: <NEW_LINE> <INDENT> start_time = pytz.utc.localize(start_time) <NEW_LINE> <DEDENT> start_time = start_time.astimezone(self.timezone) <NEW_LINE> start_time = start_time.replace(tzinfo=None) <NEW_LINE> <DEDENT> if self.months: <NEW_LINE> <INDENT> months = self._NextMonthGenerator(start_time.month, self.months) <NEW_LINE> <DEDENT> while True: <NEW_LINE> <INDENT> month, yearwraps = months.next() <NEW_LINE> candidate_month = start_time.replace(day=1, month=month, year=start_time.year + yearwraps) <NEW_LINE> if self.monthdays: <NEW_LINE> <INDENT> _, last_day = calendar.monthrange(candidate_month.year, candidate_month.month) <NEW_LINE> day_matches = sorted(x for x in self.monthdays if x <= last_day) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> day_matches = self._MatchingDays(candidate_month.year, month) <NEW_LINE> <DEDENT> if ((candidate_month.year, candidate_month.month) == (start_time.year, start_time.month)): <NEW_LINE> <INDENT> day_matches = [x for x in day_matches if x >= start_time.day] <NEW_LINE> while (day_matches and day_matches[0] == start_time.day and start_time.time() >= self.time): <NEW_LINE> <INDENT> day_matches.pop(0) <NEW_LINE> <DEDENT> <DEDENT> while day_matches: <NEW_LINE> <INDENT> out = candidate_month.replace(day=day_matches[0], hour=self.time.hour, minute=self.time.minute, second=0, microsecond=0) <NEW_LINE> if self.timezone and pytz is not None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> out = self.timezone.localize(out) <NEW_LINE> <DEDENT> except (NonExistentTimeError, IndexError): <NEW_LINE> <INDENT> for _ in range(24): <NEW_LINE> <INDENT> out = out.replace(minute=1) + datetime.timedelta(minutes=60) <NEW_LINE> try: <NEW_LINE> <INDENT> out = self.timezone.localize(out) <NEW_LINE> <DEDENT> except (NonExistentTimeError, IndexError): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> break <NEW_LINE> <DEDENT> <DEDENT> out = out.astimezone(pytz.utc) <NEW_LINE> <DEDENT> return out
Returns the next time that matches the schedule after time start. Arguments: start: a UTC datetime to start from. Matches will start after this time Returns: a datetime object
625941c230dc7b7665901915
def delete_queue(self, queue_name, fail_not_exist=False): <NEW_LINE> <INDENT> _validate_not_none('queue_name', queue_name) <NEW_LINE> request = HTTPRequest() <NEW_LINE> request.method = 'DELETE' <NEW_LINE> request.host = self._get_host() <NEW_LINE> request.path = '/' + str(queue_name) + '' <NEW_LINE> request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage) <NEW_LINE> request.headers = _update_storage_queue_header(request, self.account_name, self.account_key) <NEW_LINE> if not fail_not_exist: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self._perform_request(request) <NEW_LINE> return True <NEW_LINE> <DEDENT> except WindowsAzureError as e: <NEW_LINE> <INDENT> _dont_fail_not_exist(e) <NEW_LINE> return False <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self._perform_request(request) <NEW_LINE> return True
Permanently deletes the specified queue. queue_name: name of the queue. fail_not_exist: specify whether throw exception when queue doesn't exist.
625941c2627d3e7fe0d68dfb
@transact_ro <NEW_LINE> def get_stats(store, week_delta): <NEW_LINE> <INDENT> now = datetime_now() <NEW_LINE> week_delta = abs(week_delta) <NEW_LINE> if week_delta > 0: <NEW_LINE> <INDENT> target_week = utc_past_date(hours=(week_delta * 24 * 7)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> target_week = datetime_now() <NEW_LINE> <DEDENT> looked_week = target_week.isocalendar()[1] <NEW_LINE> looked_year = target_week.isocalendar()[0] <NEW_LINE> current_wday = now.weekday() <NEW_LINE> current_hour = now.hour <NEW_LINE> current_week = now.isocalendar()[1] <NEW_LINE> lower_bound = iso_to_gregorian(looked_year, looked_week, 1) <NEW_LINE> upper_bound = iso_to_gregorian(looked_year, looked_week, 7) <NEW_LINE> hourlyentries = store.find(Stats, And(Stats.start >= lower_bound, Stats.start <= upper_bound)) <NEW_LINE> week_entries = 0 <NEW_LINE> week_map = [[dict() for i in xrange(24)] for j in xrange(7)] <NEW_LINE> for hourdata in hourlyentries: <NEW_LINE> <INDENT> stats_day = int(hourdata.start.weekday()) <NEW_LINE> stats_hour = int(hourdata.start.isoformat()[11:13]) <NEW_LINE> hourly_dict = { 'hour': stats_hour, 'day': stats_day, 'summary': hourdata.summary, 'free_disk_space': hourdata.free_disk_space, 'valid': 0 } <NEW_LINE> if week_map[stats_day][stats_hour]: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> week_map[stats_day][stats_hour] = hourly_dict <NEW_LINE> week_entries += 1 <NEW_LINE> <DEDENT> if week_entries == (7 * 24): <NEW_LINE> <INDENT> return { 'complete': True, 'associated_date': datetime_to_ISO8601(target_week), 'heatmap': weekmap_to_heatmap(week_map) } <NEW_LINE> <DEDENT> for day in xrange(7): <NEW_LINE> <INDENT> for hour in xrange(24): <NEW_LINE> <INDENT> if week_map[day][hour]: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if current_week != looked_week: <NEW_LINE> <INDENT> marker = -1 <NEW_LINE> <DEDENT> elif day > current_wday or (day == current_wday and hour > current_hour): <NEW_LINE> <INDENT> marker = -2 <NEW_LINE> <DEDENT> elif current_wday == day and hour == current_hour: <NEW_LINE> <INDENT> marker = -3 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> marker = -1 <NEW_LINE> <DEDENT> week_map[day][hour] = { 'hour': hour, 'day': day, 'summary': {}, 'free_disk_space': 0, 'valid': marker } <NEW_LINE> <DEDENT> <DEDENT> return { 'complete': False, 'associated_date': datetime_to_ISO8601(target_week), 'heatmap': weekmap_to_heatmap(week_map) }
:param week_delta: commonly is 0, mean that you're taking this week. -1 is the previous week. At the moment do not support negative number and change of the year.
625941c215baa723493c3f20
def matches(self, message: str) -> bool: <NEW_LINE> <INDENT> return re.match(self.regex, message) != None
:param message: The message to check :return: True if the taunt is matched in the message, false if not
625941c2091ae35668666f0e
def open(self, path, flag="r"): <NEW_LINE> <INDENT> return self._client.open(six.text_type(path), flag)
Used to get a python filehandler object :param path: the name of the file to be opened :param flag: which mode should the file be opened in :return: a _io.TextIOWrapper object with utf-8 encoding
625941c26fb2d068a760f048
def swap_possible(board): <NEW_LINE> <INDENT> print("Not implemented yet!") <NEW_LINE> return False
Optional Challenge: helper function for swap Returns True if a swap is possible on the given board and False otherwise
625941c28c0ade5d55d3e966
def to_encdict(self): <NEW_LINE> <INDENT> d = {} <NEW_LINE> d['user'] = self._username <NEW_LINE> d['password'] = self._password <NEW_LINE> d['notes'] = self._notes <NEW_LINE> d['url'] = self._url <NEW_LINE> d['tags'] = self._tags <NEW_LINE> return d
Return a dictionary of encrypted records
625941c2fb3f5b602dac363e
def set_use_tcp(self, use_tcp=True): <NEW_LINE> <INDENT> self.use_tcp = use_tcp
Set TCP/UDP usage (by default will be used UDP)
625941c28a349b6b435e8120
def setup_training(self, session): <NEW_LINE> <INDENT> self.predictions = [] <NEW_LINE> self.loss = 0 <NEW_LINE> self.io.print_warning('Deep supervision application set to {}'.format(self.cfgs['deep_supervision'])) <NEW_LINE> for idx, b in enumerate(self.side_outputs): <NEW_LINE> <INDENT> output = tf.nn.sigmoid(b, name='output_{}'.format(idx)) <NEW_LINE> cost = sigmoid_cross_entropy_balanced(b, self.edgemaps, name='cross_entropy{}'.format(idx)) <NEW_LINE> self.predictions.append(output) <NEW_LINE> if self.cfgs['deep_supervision']: <NEW_LINE> <INDENT> self.loss += (self.cfgs['loss_weights'] * cost) <NEW_LINE> <DEDENT> <DEDENT> fuse_output = tf.nn.sigmoid(self.fuse, name='fuse') <NEW_LINE> fuse_cost = sigmoid_cross_entropy_balanced(self.fuse, self.edgemaps, name='cross_entropy_fuse') <NEW_LINE> self.predictions.append(fuse_output) <NEW_LINE> self.loss += (self.cfgs['loss_weights'] * fuse_cost) <NEW_LINE> pred = tf.cast(tf.greater(fuse_output, 0.5), tf.int32, name='predictions') <NEW_LINE> error = tf.cast(tf.not_equal(pred, tf.cast(self.edgemaps, tf.int32)), tf.float32) <NEW_LINE> self.error = tf.reduce_mean(error, name='pixel_error') <NEW_LINE> tf.summary.scalar('loss', self.loss) <NEW_LINE> tf.summary.scalar('error', self.error) <NEW_LINE> self.merged_summary = tf.summary.merge_all() <NEW_LINE> self.train_writer = tf.summary.FileWriter(self.cfgs['save_dir'] + '/train', session.graph) <NEW_LINE> self.val_writer = tf.summary.FileWriter(self.cfgs['save_dir'] + '/val')
Apply sigmoid non-linearity to side layer ouputs + fuse layer outputs Compute total loss := side_layer_loss + fuse_layer_loss Compute predicted edge maps from fuse layer as pseudo performance metric to track
625941c2498bea3a759b9a5c
def __init__(self, grid_size=GRID_SIZE, prob_2=PROB_2): <NEW_LINE> <INDENT> self.grid = np.ones(grid_size) <NEW_LINE> self.prob_2 = prob_2 <NEW_LINE> self.game_over = False <NEW_LINE> self.score = 0 <NEW_LINE> self.add_random_tile() <NEW_LINE> self.add_random_tile()
Initialize board by creating an array of shape grid_size. grid_size is assumed to be the same in both dimensions
625941c2379a373c97cfaaf1
def reloadVirtualMachineFromPath_Task(self, configurationPath): <NEW_LINE> <INDENT> return self.delegate("reloadVirtualMachineFromPath_Task")(configurationPath)
Reloads the configuration for this virtual machine from a given datastore path. This is equivalent to unregistering and registering the virtual machine from a different path. The virtual machine's hardware configuration, snapshots, guestinfo variables etc. will be replaced based on the new configuration file. Other information associated with the virtual machine object, such as events and permissions, will be preserved.Reloads the configuration for this virtual machine from a given datastore path. This is equivalent to unregistering and registering the virtual machine from a different path. The virtual machine's hardware configuration, snapshots, guestinfo variables etc. will be replaced based on the new configuration file. Other information associated with the virtual machine object, such as events and permissions, will be preserved.Reloads the configuration for this virtual machine from a given datastore path. This is equivalent to unregistering and registering the virtual machine from a different path. The virtual machine's hardware configuration, snapshots, guestinfo variables etc. will be replaced based on the new configuration file. Other information associated with the virtual machine object, such as events and permissions, will be preserved. :param configurationPath:
625941c28a43f66fc4b54014
def add_object(self, draw_object, position="top"): <NEW_LINE> <INDENT> if position == "top": <NEW_LINE> <INDENT> self.draw_objects.append(draw_object) <NEW_LINE> <DEDENT> elif position == "bottom": <NEW_LINE> <INDENT> self.draw_objects.insert(0, draw_object) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.draw_objects.insert(position, draw_object)
Add a new object to the canvas. :param: draw_object -- DrawObject to add :param position="top": Position to add the object. "top" puts the object on top of teh other objects. "bottom" puts them on the bottom of the stack. A integer puts it in that place in the order -- 0 is the bottom.
625941c2cb5e8a47e48b7a59
def forward(self, indices): <NEW_LINE> <INDENT> if indices.dim() == 2: <NEW_LINE> <INDENT> batch_size, seq_size = indices.size() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> batch_size, seq_size = indices.size(0), 1 <NEW_LINE> <DEDENT> if not indices.is_contiguous(): <NEW_LINE> <INDENT> indices = indices.contiguous() <NEW_LINE> <DEDENT> indices = indices.data.view(batch_size * seq_size, 1) <NEW_LINE> if self._bag: <NEW_LINE> <INDENT> if (self._offsets is None or self._offsets.size(0) != (batch_size * seq_size)): <NEW_LINE> <INDENT> self._offsets = Variable(torch.arange(0, indices.numel(), indices.size(1)).long()) <NEW_LINE> if indices.is_cuda: <NEW_LINE> <INDENT> self._offsets = self._offsets.cuda() <NEW_LINE> <DEDENT> <DEDENT> hashed_indices = Variable(self._get_hashed_indices(indices)) <NEW_LINE> embedding = self.embeddings(hashed_indices.view(-1), self._offsets) <NEW_LINE> embedding = embedding.view(batch_size, seq_size, -1) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> hashed_indices = Variable(self._get_hashed_indices(indices)) <NEW_LINE> embedding = self.embeddings(hashed_indices) <NEW_LINE> embedding = embedding.sum(1) <NEW_LINE> embedding = embedding.view(batch_size, seq_size, -1) <NEW_LINE> <DEDENT> return embedding
Retrieve embeddings corresponding to indices. See documentation on PyTorch ``nn.Embedding`` for details.
625941c2fff4ab517eb2f3e7
def relative_entropy(args): <NEW_LINE> <INDENT> parser = argparse.ArgumentParser() <NEW_LINE> parser.add_argument("-i", "--file", required=True, help="") <NEW_LINE> parser.add_argument("-k", "--ksize", required=True, type=int) <NEW_LINE> parser.add_argument("-o", "--output") <NEW_LINE> args = parser.parse_args(args) <NEW_LINE> if args.output is None: <NEW_LINE> <INDENT> foh = sys.stdout <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> foh = open(args.output, "w") <NEW_LINE> <DEDENT> relEntropy = fsig.calculate_relative_entropy(args.file, args.ksize) <NEW_LINE> print("# input file: {}".format(args.file)) <NEW_LINE> print("# Run on {}".format(str(datetime.datetime.now()))) <NEW_LINE> print(relEntropy, file=foh)
Calculate relative entropy of genome. Args: args (TODO): TODO Returns: TODO
625941c22ae34c7f2600d0de
def values(self): <NEW_LINE> <INDENT> return self.map(filter_values)
Iterates only values from the stream (last element from the :py:class:`tuple`). If element is single then it will be used. :return: new processed :py:class:`Stream` instance. >>> stream = Stream.range(5) >>> stream = stream.key_map(lambda item: item ** 3) >>> stream = stream.values() >>> list(stream) ... [0, 1, 2, 3, 4]
625941c21f5feb6acb0c4b00
def _titlecase_handler(self, word, **kwargs): <NEW_LINE> <INDENT> articles = ['a','an','and','as','at','but','by','en','for','if','in', 'of','on','or','the','to','v','v.','via','vs','vs.'] <NEW_LINE> if self.titlecase_articles and word.lower() in articles: <NEW_LINE> <INDENT> return word.title()
This function gets called for every word that is being titlecased. We will titlecase articles if the user adds the flag. If we don't return anything then it will be titlecased by the "titlecase" module itself. :param word: the word to check on :rtype: none/string
625941c260cbc95b062c64ef
def __init__(self, grid, title): <NEW_LINE> <INDENT> self.grid = grid <NEW_LINE> self.title = title <NEW_LINE> self.map_data = [] <NEW_LINE> self.map_mult_data = [] <NEW_LINE> resources = Ngl.Resources() <NEW_LINE> resources.sfXArray = self.grid.lon_sequence() <NEW_LINE> resources.sfYArray = self.grid.lat_sequence() <NEW_LINE> resources.mpProjection = "Robinson" <NEW_LINE> resources.mpLimitMode = "LatLon" <NEW_LINE> resources.mpMinLonF = self.grid.boundaries()["lon_min"] <NEW_LINE> resources.mpMaxLonF = self.grid.boundaries()["lon_max"] <NEW_LINE> resources.mpMinLatF = self.grid.boundaries()["lat_min"] <NEW_LINE> resources.mpMaxLatF = self.grid.boundaries()["lat_max"] <NEW_LINE> resources.wkColorMap = "wh-bl-gr-ye-re" <NEW_LINE> resources.mpGeophysicalLineThicknessF = 2.0 <NEW_LINE> resources.lbAutoManage = False <NEW_LINE> resources.lbOrientation = "Horizontal" <NEW_LINE> resources.lbLabelFont = "Helvetica" <NEW_LINE> resources.lbLabelFontHeightF = 0.0075 <NEW_LINE> resources.lbTitleFontHeightF = 0.01 <NEW_LINE> resources.cnFillOn = True <NEW_LINE> resources.cnLinesOn = False <NEW_LINE> resources.cnLineLabelsOn = False <NEW_LINE> resources.cnInfoLabelOn = False <NEW_LINE> resources.cnMaxLevelCount = 22 <NEW_LINE> resources.cnFillMode = "RasterFill" <NEW_LINE> self.resources = resources
Initialize an instance of MapPlots. Plotting of maps is powered by PyNGL. :type grid: :class:`.Grid` :arg grid: The Grid object describing the map data to be plotted. :arg str title: The title describing the map data.
625941c215baa723493c3f21
def perform(url, data): <NEW_LINE> <INDENT> handle = urlopen(url, urlencode(data)) <NEW_LINE> response = handle.read() <NEW_LINE> decoded = json.loads(response) <NEW_LINE> if decoded['status'] != 'ok': <NEW_LINE> <INDENT> if 'errors' in decoded: <NEW_LINE> <INDENT> raise Exception(decoded['errors']) <NEW_LINE> <DEDENT> raise Exception(decoded) <NEW_LINE> <DEDENT> return decoded
Perform CDN POST request
625941c207f4c71912b1142e
def _handle_reverse(string): <NEW_LINE> <INDENT> logging.info("Reversing string: {}".format(string)) <NEW_LINE> return reverse_success(string[::-1])
Reverse string
625941c282261d6c526ab449
def station_getinfo(self, id, **kwargs): <NEW_LINE> <INDENT> kwargs['_return_http_data_only'] = True <NEW_LINE> return self.station_getinfo_with_http_info(id, **kwargs)
Gets a the info to display in the sign-in station by it's ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.station_getinfo(id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str id: The id of the sign-in station to get. (required) :param str event: The id of the event, to override the one by schedule. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread.
625941c2187af65679ca50cb
def decimal_to_hex_dict_string_comp(): <NEW_LINE> <INDENT> dec_list = [str(dec) for dec in range(0, 16)] <NEW_LINE> dec_hex_dict = {digit: hex(int(digit)) for digit in dec_list} <NEW_LINE> return dec_hex_dict
Use a list comprehension to make a dictionary that maps decimal, 0 - 15, to hex.
625941c27d847024c06be267
def alias(self): <NEW_LINE> <INDENT> return _filter_swig.pfb_arb_resampler_ccf_sptr_alias(self)
alias(pfb_arb_resampler_ccf_sptr self) -> std::string
625941c28da39b475bd64f1e
def update(self): <NEW_LINE> <INDENT> self.reset() <NEW_LINE> agent = self.simulation.agent <NEW_LINE> mask = agent.active & self.room.contains_points(agent.position) <NEW_LINE> indices = np.arange(agent.size)[mask] <NEW_LINE> self.strategy[mask ^ True] = 1 <NEW_LINE> self.t_aset = self.t_aset_0 - self.simulation.time_tot <NEW_LINE> best_response_strategy(self.simulation.agent, indices, self.door, self.radius, self.strategy, self.strategies, self.t_aset, self.interval, self.simulation.dt_prev)
Update strategies for all agents.
625941c2aad79263cf3909eb
def displayBoard(self, board, tile_w, tile_h, num_rows, num_cols): <NEW_LINE> <INDENT> w = tile_w <NEW_LINE> h = tile_h <NEW_LINE> for row in range(num_rows): <NEW_LINE> <INDENT> for col in range(num_cols): <NEW_LINE> <INDENT> x = col * w + self.game_board_x <NEW_LINE> y = row * h + self.game_board_y <NEW_LINE> if (board[row][col].isFlipped): <NEW_LINE> <INDENT> pygame.draw.rect(self.window, self.colors["gray1"], [x, y, w, h]) <NEW_LINE> if (board[row][col].isMine()): <NEW_LINE> <INDENT> self.text(board[row][col].value, x + (w / 2), y + h, "Impact", tile_w * 2, board[row][col].textColor(), "center") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.text(board[row][col].value, x + (w / 2), y + (h / 2), "Impact", tile_w, board[row][col].textColor(), "center") <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if (board[row][col].isFlagged): <NEW_LINE> <INDENT> pygame.draw.rect(self.window, self.colors["red1"], [x, y, w, h]) <NEW_LINE> pygame.draw.rect(self.window, self.colors["red2"], [x + (w / 20), y + (h / 20), w - (w / 10), h - (h / 10)]) <NEW_LINE> pygame.draw.rect(self.window, self.colors["red3"], [x + (w / 4), y + (h / 4), w - (w / 2), h - (h / 2)]) <NEW_LINE> <DEDENT> elif (board[row][col].isMarked): <NEW_LINE> <INDENT> pygame.draw.rect(self.window, self.colors["green1"], [x, y, w, h]) <NEW_LINE> pygame.draw.rect(self.window, self.colors["green2"], [x + (w / 20), y + (h / 20), w - (w / 10), h - (h / 10)]) <NEW_LINE> pygame.draw.rect(self.window, self.colors["green3"], [x + (w / 4), y + (h / 4), w - (w / 2), h - (h / 2)]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pygame.draw.rect(self.window, self.colors["blue1"], [x, y, w, h]) <NEW_LINE> pygame.draw.rect(self.window, self.colors["blue2"], [x + (w / 20), y + (h / 20), w - (w / 10), h - (h / 10)]) <NEW_LINE> pygame.draw.rect(self.window, self.colors["blue3"], [x + (w / 4), y + (h / 4), w - (w / 2), h - (h / 2)])
- GUI display of the back end 2D array of the game board from the GameBoard class
625941c2d18da76e23532481
def __init__(self, krb5=None): <NEW_LINE> <INDENT> self._krb5 = None <NEW_LINE> self.discriminator = None <NEW_LINE> if krb5 is not None: <NEW_LINE> <INDENT> self.krb5 = krb5
ProvidersKrb5 - a model defined in Swagger
625941c28c3a873295158365
def test_get_and_set(self): <NEW_LINE> <INDENT> old = SOCO.treble() <NEW_LINE> self.assertIn(old, self.valid_values, NOT_IN_RANGE) <NEW_LINE> for value in [self.valid_values[0], self.valid_values[-1]]: <NEW_LINE> <INDENT> SOCO.treble(value) <NEW_LINE> wait() <NEW_LINE> self.assertEqual(SOCO.treble(), value, NOT_EXP) <NEW_LINE> <DEDENT> SOCO.treble(old) <NEW_LINE> wait()
Tests if the set functionlity works when given valid arguments
625941c276d4e153a657eadd
def testSegmentsOfSizeZero(self): <NEW_LINE> <INDENT> self._runTest(starts=[10,20], ends=[20,30], expStarts=[0, 30], expEnds=[10,len(self.chr1)], expTrackFormatType="Segments", debug=True)
Test that two ajasoned segments do not create a new segment of size 0 :return:
625941c2ad47b63b2c509f2d
def requires_https(self, host): <NEW_LINE> <INDENT> return host == "mtred.com"
Return True if the specified host requires HTTPs for balance update.
625941c2004d5f362079a2e1
def __init__(self, pvName, triggerValue, function, display, status, owner, doneValue=0, time=1, iterations=1): <NEW_LINE> <INDENT> PVTrigger.__init__(self, pvName, triggerValue, function, display) <NEW_LINE> self._numIterations = iterations <NEW_LINE> self._triggerTime = time <NEW_LINE> self._period = (self._triggerTime * 1000) / self._numIterations <NEW_LINE> self._doneValue = doneValue <NEW_LINE> self._statusControl = status <NEW_LINE> self._timer = wx.Timer(owner, -1) <NEW_LINE> return
Constructor
625941c24e4d5625662d4387
def sample_indep_edges(logits, is_edgesymmetric=False, tau=1.0, hard=False, hard_with_grad=False): <NEW_LINE> <INDENT> if is_edgesymmetric: <NEW_LINE> <INDENT> edge_types = logits.size(2) <NEW_LINE> n = int(0.5 * (1 + np.sqrt(4 * logits.size(1) + 1))) <NEW_LINE> reshaped_logits = logits.view(-1, n, n - 1, edge_types) <NEW_LINE> reshaped_logits = reshaped_logits.transpose(1, 2) <NEW_LINE> vertices = torch.triu_indices(n-1, n, offset=1) <NEW_LINE> edge_logits = reshaped_logits[:, vertices[0], vertices[1], :] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> edge_logits = logits <NEW_LINE> <DEDENT> uniforms = torch.empty_like(edge_logits).float().uniform_().clamp_(EPS, 1 - EPS) <NEW_LINE> gumbels = uniforms.log().neg().log().neg() <NEW_LINE> gumbels = gumbels.cuda() if logits.is_cuda else gumbels <NEW_LINE> edge_weights = gumbels + edge_logits <NEW_LINE> hard = True if hard_with_grad else hard <NEW_LINE> if hard: <NEW_LINE> <INDENT> top1_indices = torch.argmax(edge_weights, dim=-1, keepdim=True) <NEW_LINE> X = torch.zeros_like(edge_weights).scatter(-1, top1_indices, 1.0) <NEW_LINE> hard_X = X <NEW_LINE> <DEDENT> if not hard or hard_with_grad: <NEW_LINE> <INDENT> X = torch.nn.functional.softmax(edge_weights / tau, dim=-1) <NEW_LINE> <DEDENT> if hard_with_grad: <NEW_LINE> <INDENT> X = (hard_X - X).detach() + X <NEW_LINE> <DEDENT> if is_edgesymmetric: <NEW_LINE> <INDENT> samples = torch.zeros_like(reshaped_logits) <NEW_LINE> samples[:, vertices[0], vertices[1], :] = X <NEW_LINE> samples[:, vertices[1] - 1, vertices[0], :] = X <NEW_LINE> samples = samples.transpose(1, 2).contiguous().view(*logits.shape) <NEW_LINE> edge_weights_reshaped = torch.zeros_like(reshaped_logits) <NEW_LINE> edge_weights_reshaped[:, vertices[0], vertices[1]] = edge_weights <NEW_LINE> edge_weights_reshaped[:, vertices[1] - 1, vertices[0]] = edge_weights <NEW_LINE> edge_weights = edge_weights_reshaped.transpose(1, 2).contiguous().view(*logits.shape) <NEW_LINE> return samples, edge_weights <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return X, edge_weights
Sample independent edges given logits. Args: logits: Logits of shape (batch_size, n * (n - 1), edge_types). They correspond to a flattened and transposed adjacency matrix with the diagonals removed. We assume the logits are edge-symmetric. is_edgesymmetric: Whether or not e_ij == e_ji. If True, then we must only sample one gumbel per undirected edge. tau: Float representing temperature. hard: Whether or not to sample hard edges. hard_with_grad: Whether or not to allow sample hard, but have gradients for backprop. Returns: Sampled edges with the same shape as logits, and sampled edge weights of same shape as logits.
625941c2d58c6744b4257c0d
def peaking(Wn, dBgain, Q=None, BW=None, type='half', analog=False, output='ba'): <NEW_LINE> <INDENT> if Q is None and BW is None: <NEW_LINE> <INDENT> BW = 1 <NEW_LINE> <DEDENT> if Q is None: <NEW_LINE> <INDENT> Q = 1/(2*sinh(ln(2)/2*BW)) <NEW_LINE> <DEDENT> if type in ('half'): <NEW_LINE> <INDENT> A = 10.0**(dBgain/40.0) <NEW_LINE> Az = A <NEW_LINE> Ap = A <NEW_LINE> <DEDENT> elif type in ('constantq'): <NEW_LINE> <INDENT> A = 10.0**(dBgain/20.0) <NEW_LINE> if dBgain > 0: <NEW_LINE> <INDENT> Az = A <NEW_LINE> Ap = 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> Az = 1 <NEW_LINE> Ap = A <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError('"%s" is not a known peaking type' % type) <NEW_LINE> <DEDENT> b = np.array([1, Az/Q, 1]) <NEW_LINE> a = np.array([1, 1/(Ap*Q), 1]) <NEW_LINE> return _transform(b, a, Wn, analog, output)
Biquad peaking filter design Design a 2nd-order analog or digital peaking filter with variable Q and return the filter coefficients. Used in graphic or parametric EQs. Transfer function: H(s) = (s**2 + s*(Az/Q) + 1) / (s**2 + s/(Ap*Q) + 1) Parameters ---------- Wn : float Center frequency of the filter. For digital filters, `Wn` is normalized from 0 to 1, where 1 is the Nyquist frequency, pi radians/sample. (`Wn` is thus in half-cycles / sample.) For analog filters, `Wn` is an angular frequency (e.g. rad/s). dBgain : float The gain at the center frequency, in dB. Positive for boost, negative for cut. Q : float Quality factor of the filter. Examples: * Q = sqrt(2) (default) produces a bandwidth of 1 octave ftype : {'half', 'constant'}, optional Where on the curve to measure the bandwidth of the filter. ``half`` Bandwidth is defined using the points on the curve at which the gain in dB is half of the peak gain. This is the method used in "Cookbook formulae for audio EQ biquad filter coefficients" ``constant`` Bandwidth is defined using the points -3 dB down from the peak gain (or +3 dB up from the cut gain), maintaining constant Q regardless of center frequency or boost gain. This is symmetrical in dB, so that a boost and cut with identical parameters sum to unity gain. This is the method used in "Constant-Q" hardware equalizers. [ref: http://www.rane.com/note101.html] Klark Teknik calls this "symmetrical Q" http://www.klarkteknik.com/faq-06.php constant Q asymmetrical constant Q for both boost and cut, which makes them asymmetrical (not implemented) Half-gain Hybrid Defined symmetrical at half gain point except for 3 dB or less (not implemented) analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. output : {'ba', 'zpk', 'ss'}, optional Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or state-space ('ss'). Default is 'ba'. Notes ----- Due to bilinear transform, this is always 0 dB at fs/2, but it would be better if the curve fell off symmetrically. Orfanidis describes a digital filter that more accurately matches the analog filter, but it is far more complicated. Orfanidis, Sophocles J., "Digital Parametric Equalizer Design with Prescribed Nyquist-Frequency Gain"
625941c267a9b606de4a7e68
def pre_process(self): <NEW_LINE> <INDENT> start_time = time() <NEW_LINE> to_clean = list(self.data_frame.columns) <NEW_LINE> if self.location in to_clean: <NEW_LINE> <INDENT> self.data_frame = self.clean_locations() <NEW_LINE> <DEDENT> if self.person in to_clean: <NEW_LINE> <INDENT> self.data_frame = self.clean_persons() <NEW_LINE> <DEDENT> if self.organization in to_clean: <NEW_LINE> <INDENT> self.data_frame = self.clean_organizations() <NEW_LINE> <DEDENT> if self.tone in to_clean: <NEW_LINE> <INDENT> self.data_frame = self.seperate_tones() <NEW_LINE> <DEDENT> if self.theme in to_clean: <NEW_LINE> <INDENT> self.data_frame = self.clean_themes() <NEW_LINE> <DEDENT> end_time = time() <NEW_LINE> print( '\nTime taken for pre-processing the data --> {:.2f} seconds'.format(end_time - start_time)) <NEW_LINE> return self.data_frame
A wrapper functions that does all the pre-processig. (Except - flattening) **Returns :** pandas.DataFrame A clean and processed pandas DataFrame
625941c2b830903b967e98ba
def __init__( self, output_dir=None, output_fname='statistics.tsv', exp_name=None, log_freq=1, ): <NEW_LINE> <INDENT> self.output_dir = output_dir or '/tmp/experiments/%i' % int(time.time()) <NEW_LINE> if not os.path.exists(self.output_dir): <NEW_LINE> <INDENT> os.makedirs(self.output_dir) <NEW_LINE> <DEDENT> output_filepath = os.path.join(output_dir, output_fname) <NEW_LINE> self.output_file = open(output_filepath, 'w') <NEW_LINE> self.file_writer = csv.writer(self.output_file, delimiter='\t') <NEW_LINE> atexit.register(self.output_file.close) <NEW_LINE> self.exp_name = exp_name <NEW_LINE> self.log_freq = log_freq <NEW_LINE> self.first_row = True <NEW_LINE> self.log_headers = None <NEW_LINE> self.counter = 0
Initialize a new Logger. Args: output_dir (str): The output directory. If `None`, defaults to a directory of the form `/tmp/experiments/some-random-number`. output_fname (str): The output file name. exp_name (str): The name of the experiment. log_freq (int): The log frequency. If non-positive, no statistics will be displayed to the console.
625941c21f037a2d8b9461ab
def set_params(self,pars): <NEW_LINE> <INDENT> pass
Set any parameters from a dictionary. Args: pars: {string}
625941c257b8e32f52483447
def __repr__(self): <NEW_LINE> <INDENT> return '%s%s' % (self.__class__.__name__, { column: value for column, value in self._to_dict().items() })
Define a base way to print models
625941c26aa9bd52df036d50
def update(self, time): <NEW_LINE> <INDENT> self.power_up() <NEW_LINE> self.targetting()
Стандартный апдейт (см docs.txt) :param time: текущее время
625941c2b57a9660fec3382f
def __init__(self, templates, database, emitters, auxiliary_dir, dom_implementation_classes, output_dir): <NEW_LINE> <INDENT> super(HtmlDartiumSystem, self).__init__( templates, database, emitters, output_dir) <NEW_LINE> self._auxiliary_dir = auxiliary_dir <NEW_LINE> self._dom_implementation_classes = dom_implementation_classes
Prepared for generating wrapping implementation. - Creates emitter for Dart code.
625941c2009cb60464c63360
def __generate_icasadi_c_interface(self): <NEW_LINE> <INDENT> self.__logger.info("Generating intercafe.c (C interface)") <NEW_LINE> cint_template = OpEnOptimizerBuilder.__get_template('interface.c', 'icasadi') <NEW_LINE> cint_output_template = cint_template.render( meta=self.__meta, problem=self.__problem, build_config=self.__build_config, timestamp_created=datetime.datetime.now()) <NEW_LINE> cint_icallocator_path = os.path.abspath( os.path.join(self.__icasadi_target_dir(), "extern", "interface.c")) <NEW_LINE> with open(cint_icallocator_path, "w") as fh: <NEW_LINE> <INDENT> fh.write(cint_output_template)
Generates the C interface file interface.c
625941c2627d3e7fe0d68dfc
def __len__(self): <NEW_LINE> <INDENT> return len(self.keys)
:func:`len` of the DataKeySet = :func:`len` of its :attr:`keys`.
625941c25fdd1c0f98dc01e0
@app.route('/states/<state_id>/cities', methods=['GET', 'POST']) <NEW_LINE> def city(state_id): <NEW_LINE> <INDENT> if request.method == 'GET': <NEW_LINE> <INDENT> list = [] <NEW_LINE> for record in City.select().where(City.state == state_id): <NEW_LINE> <INDENT> hash = record.to_hash() <NEW_LINE> list.append(hash) <NEW_LINE> <DEDENT> return jsonify(list) <NEW_LINE> <DEDENT> elif request.method == 'POST': <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> record = City(name=request.form["name"], state=state_id) <NEW_LINE> record.save() <NEW_LINE> return jsonify(record.to_hash()) <NEW_LINE> <DEDENT> except IntegrityError: <NEW_LINE> <INDENT> return json_response( add_status_=False, status_=409, code=10002, msg="City already exists in this state" )
Handle GET and POST requests to /states/<state_id>/cities route. Return a list of all cities in state (according to database) in the case of a GET request. Create a new city record in the given state in the database in the case of a POST request.
625941c238b623060ff0ad9b
def lengthOfLongestSubstring(self, s): <NEW_LINE> <INDENT> if s == '': <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> slist = list(s) <NEW_LINE> sameflag = 0 <NEW_LINE> clist =set() <NEW_LINE> cont =0 <NEW_LINE> mxcont =0 <NEW_LINE> mxindex =len(s)-1 <NEW_LINE> for i in range(len(slist)): <NEW_LINE> <INDENT> j =i <NEW_LINE> sameflag = 0 <NEW_LINE> cont = 0 <NEW_LINE> clist = set() <NEW_LINE> while not sameflag: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if slist[j] in clist: <NEW_LINE> <INDENT> sameflag = 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> clist.add(slist[j]) <NEW_LINE> j +=1 <NEW_LINE> cont = cont +1 <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> if mxcont< cont: <NEW_LINE> <INDENT> mxcont = cont <NEW_LINE> <DEDENT> <DEDENT> return mxcont
:type s: str :rtype: int
625941c2cc0a2c11143dce3d
def response_ok(file_type, body): <NEW_LINE> <INDENT> headers = { "Date": email.utils.formatdate(usegmt=True), "Connection": "close", "Content-Length": str(len(body)) } <NEW_LINE> if file_type: <NEW_LINE> <INDENT> headers["Content-Type"] = file_type <NEW_LINE> <DEDENT> response = "HTTP/1.1 200 OK\r\n" <NEW_LINE> for key in headers.keys(): <NEW_LINE> <INDENT> response += key + ': ' + headers[key] + '\r\n' <NEW_LINE> <DEDENT> response += '\r\n' <NEW_LINE> if file_type and file_type[:5] == 'image' and file_type[6:] != 'x-icon': <NEW_LINE> <INDENT> response = b''.join([response.encode('utf8'), body]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> response += body <NEW_LINE> response = response.encode('utf8') <NEW_LINE> <DEDENT> return response
Set up and return 200 response.
625941c294891a1f4081ba56
def _test_forward_logical_and(data): <NEW_LINE> <INDENT> return _test_logical_binary(math_ops.logical_and, data)
One iteration of logical and
625941c2f8510a7c17cf96a9
def __iter__(self): <NEW_LINE> <INDENT> return self
Define the iterator interface.
625941c25fc7496912cc392b
def addcylindrical(self,pos,name = "1", csys = "Global", mergeOff = False, mergeNumber = 0): <NEW_LINE> <INDENT> r, theta ,z = pos <NEW_LINE> return_value, name = self._obj.AddCylindrical(x,theta,z,name,name,csys, mergeOff, mergeNumber) <NEW_LINE> assert return_value == 0 <NEW_LINE> return name
Same as addcartesian but in Cylindrical Coordinates instead. Therefore pos should be in theform (r, theta, z)
625941c2507cdc57c6306c84
def groupAnagrams(self, strs): <NEW_LINE> <INDENT> ans=[] <NEW_LINE> dic={} <NEW_LINE> for i in strs: <NEW_LINE> <INDENT> s=''.join(sorted(i)) <NEW_LINE> if s not in dic: <NEW_LINE> <INDENT> dic[s]=[i] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dic[s].append(i) <NEW_LINE> <DEDENT> <DEDENT> for i in dic.values(): <NEW_LINE> <INDENT> ans.append(i) <NEW_LINE> <DEDENT> return ans
:type strs: List[str] :rtype: List[List[str]]
625941c2167d2b6e31218b44