code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def is_detecting_anything(self): <NEW_LINE> <INDENT> nn = lambda x:x is not None <NEW_LINE> return len(list(filter(nn, self._prev_level)))>0 or len(list(filter(nn, self._minmax)))>0
is detecting something in at least one channel
625941c2b7558d58953c4eb6
def _transform(graph, target, source, excluded_edges=None, ooc_types=ooc_types, **kwargs): <NEW_LINE> <INDENT> x = source <NEW_LINE> excluded_edges = excluded_edges or set() <NEW_LINE> with ignoring(NotImplementedError): <NEW_LINE> <INDENT> if 'dshape' not in kwargs: <NEW_LINE> <INDENT> kwargs['dshape'] = discover(x) <NEW_LINE> <DEDENT> <DEDENT> pth = path(graph, type(source), target, excluded_edges=excluded_edges, ooc_types=ooc_types) <NEW_LINE> try: <NEW_LINE> <INDENT> for (A, B, f) in pth: <NEW_LINE> <INDENT> oldx = x <NEW_LINE> x = f(x, excluded_edges=excluded_edges, **kwargs) <NEW_LINE> <DEDENT> return x <NEW_LINE> <DEDENT> except NotImplementedError as e: <NEW_LINE> <INDENT> if kwargs.get('raise_on_errors'): <NEW_LINE> <INDENT> raise <NEW_LINE> <DEDENT> print("Failed on %s -> %s. Working around" % (A.__name__, B.__name__)) <NEW_LINE> print("Error message:\n%s" % e) <NEW_LINE> new_exclusions = excluded_edges | set([(A, B)]) <NEW_LINE> return _transform(graph, target, source, excluded_edges=new_exclusions, **kwargs)
Transform source to target type using graph of transformations
625941c2d7e4931a7ee9debb
def apply_edge(self, edge): <NEW_LINE> <INDENT> pass
Apply isometry to an edge. Not implemented!
625941c221bff66bcd6848f2
def __del_node(self, key): <NEW_LINE> <INDENT> node = self.key_index[key] <NEW_LINE> prev_node = node.prev <NEW_LINE> next_node = node.next <NEW_LINE> prev_node.next = next_node <NEW_LINE> next_node.prev = prev_node <NEW_LINE> del node <NEW_LINE> self.key_index.pop(key) <NEW_LINE> self.size -= 1
del a node and remove the key in key_index
625941c2e1aae11d1e749c53
def is_prepend(self): <NEW_LINE> <INDENT> return self._tag == 'prepend'
Check if the union tag is ``prepend``. :rtype: bool
625941c25f7d997b87174a34
def unprivatize_sel(modeladmin, request, queryset): <NEW_LINE> <INDENT> selcnt = len(queryset) <NEW_LINE> privcnt = queryset.update(private=False) <NEW_LINE> pstr = plural(selcnt, 'image was', 'images were') <NEW_LINE> msg = '{} of {} {} marked as not private.'.format(privcnt, selcnt, pstr) <NEW_LINE> modeladmin.message_user(request, msg, level=messages.SUCCESS) <NEW_LINE> return privcnt
marks selected images as private.
625941c2e8904600ed9f1ec9
def allocate(self, shared_outputs=None): <NEW_LINE> <INDENT> super(ParameterLayer, self).allocate(shared_outputs) <NEW_LINE> if self.W is None: <NEW_LINE> <INDENT> self.init_params(self.weight_shape) <NEW_LINE> <DEDENT> if self.batch_sum_shape is not None: <NEW_LINE> <INDENT> self.batch_sum = self.be.empty(self.batch_sum_shape, dtype=np.float32, **self.get_param_attrs())
Allocate output buffer to store activations from fprop. Arguments: shared_outputs (Tensor, optional): pre-allocated tensor for activations to be computed into
625941c201c39578d7e74dd9
def continuous_palette_for_color(color, bins=256): <NEW_LINE> <INDENT> r, g, b = [v / 255 for v in Mapbox[color].value] <NEW_LINE> h, s, v = colorsys.rgb_to_hsv(r, g, b) <NEW_LINE> palette = [] <NEW_LINE> for i in range(bins): <NEW_LINE> <INDENT> ns = (1 / bins) * (i + 1) <NEW_LINE> palette.extend([int(v * 255) for v in colorsys.hsv_to_rgb(h, ns, v)]) <NEW_LINE> <DEDENT> assert len(palette) // 3 == bins <NEW_LINE> return palette
Creates a continuous color palette based on a single color. Args: color: the rgb color tuple to create a continuous palette for. bins: the number of colors to create in the continuous palette. Returns: The continuous rgb color palette with 3*bins values represented as [r0,g0,b0,r1,g1,b1,..]
625941c230dc7b7665901906
def calculate_woe(data, independent_var, dependent_var, is_continuous=None, category_count=10): <NEW_LINE> <INDENT> total_bads = data[dependent_var].sum() <NEW_LINE> total_goods = len(data) - total_bads <NEW_LINE> if total_bads == 0 or total_goods == 0: <NEW_LINE> <INDENT> raise Exception('Target variable does not contain two classes. ') <NEW_LINE> <DEDENT> if is_continuous: <NEW_LINE> <INDENT> data[independent_var] = encode_continuous_column(data[independent_var], category_count=category_count) <NEW_LINE> <DEDENT> elif data[independent_var].dtype == np.float: <NEW_LINE> <INDENT> data[independent_var] = encode_continuous_column(data[independent_var], category_count=category_count) <NEW_LINE> <DEDENT> pivot = pd.pivot_table(data, index=independent_var, columns=dependent_var, aggfunc='count') <NEW_LINE> feature_uniques = data[independent_var].unique() <NEW_LINE> values = {'category': [], 'goods_count': [], 'bads_count': [], 'goods_percentage': [], 'bads_percentage': [], 'woe': [], 'iv': []} <NEW_LINE> for f in feature_uniques: <NEW_LINE> <INDENT> values['category'].append(f) <NEW_LINE> goods_count = pivot.loc[f][0] <NEW_LINE> values['goods_count'].append(goods_count) <NEW_LINE> bads_count = pivot.loc[f][1] <NEW_LINE> values['bads_count'].append(bads_count) <NEW_LINE> goods_percentage = goods_count / total_goods <NEW_LINE> values['goods_percentage'].append(goods_percentage) <NEW_LINE> bads_percentage = bads_count / total_bads <NEW_LINE> values['bads_percentage'].append(bads_percentage) <NEW_LINE> woe = np.log(goods_percentage / bads_percentage) <NEW_LINE> values['woe'].append(woe) <NEW_LINE> iv = (woe * (goods_percentage - bads_percentage)) <NEW_LINE> values['iv'].append(iv) <NEW_LINE> <DEDENT> return values
Calculates weight of evidence of a independent variable against a dependent variable :param data: dataframe which contains feature a :param independent_var: variable whose woe needs to be calculated :param dependent_var: target variable :param is_continuous: Default None; Boolean indicating whether the independent_var passed in categorical or continuous :param category_count: Default 10; If the independent variable is continuous, this parameter defines the number of categories to derive from the variable :return: dictionary containing woe and iv scores under key 'woe' and 'iv 'of the independent variable
625941c221bff66bcd6848f3
def test_factor3_roundtrip(self): <NEW_LINE> <INDENT> img = downsample(upsample(DATA1, 3), 3) <NEW_LINE> assert_allclose(img, DATA1, rtol=0, atol=1e-6)
Test roundtrip of upsampling then downsampling of an image by a factor of 3.
625941c24527f215b584c3f8
def _get_node_cost_absolute(self, prev_node_cost, first_word_score, sec_word_score): <NEW_LINE> <INDENT> return -sec_word_score
Implements the node scoring function absolute.
625941c2be8e80087fb20be4
@staff_member_required <NEW_LINE> def delete_correction(request, id): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> correction = CatalogCorrection.objects.get(id=id) <NEW_LINE> <DEDENT> except ObjectDoesNotExist: <NEW_LINE> <INDENT> return redirect(reverse("catalog_corrections")) <NEW_LINE> <DEDENT> correction.delete() <NEW_LINE> return redirect(reverse("catalog_corrections"))
Deletes the given catalog correction.
625941c20383005118ecf582
def test_answer_topic(self): <NEW_LINE> <INDENT> topic1 = TopicFactory(forum=self.forum11, author=self.user2) <NEW_LINE> PostFactory(topic=topic1, author=self.user2, position=1) <NEW_LINE> result = self.client.post( reverse("post-new") + f"?sujet={topic1.pk}", { "last_post": topic1.last_message.pk, "text": "C'est tout simplement l'histoire de la ville de Paris que je voudrais vous conter ", }, follow=False, ) <NEW_LINE> self.assertEqual(result.status_code, 302) <NEW_LINE> notification = Notification.objects.get(subscription__user=self.user2) <NEW_LINE> subscription_content_type = ContentType.objects.get_for_model(topic1) <NEW_LINE> self.assertEqual(notification.is_read, False) <NEW_LINE> self.assertEqual(notification.subscription.content_type, subscription_content_type) <NEW_LINE> self.assertEqual(notification.subscription.object_id, topic1.pk) <NEW_LINE> subscription = TopicAnswerSubscription.objects.get( object_id=topic1.pk, content_type__pk=subscription_content_type.pk, user=self.user1 ) <NEW_LINE> self.assertTrue(subscription.is_active)
When a user posts on a topic, a subscription to the said topic is created for this user.
625941c207d97122c4178826
def _tratar_creditos(pacote): <NEW_LINE> <INDENT> dados = pacote.obterDados() <NEW_LINE> if(not camada_rede.CREDITOS in dados): <NEW_LINE> <INDENT> _log.logar('Erro: Dados do pacote estao imcompletos, falta o campo ' + camada_rede.CREDITOS) <NEW_LINE> return <NEW_LINE> <DEDENT> con_id = pacote.obterIdConexao() <NEW_LINE> con = _pool_conexoes.obterConexao(con_id) <NEW_LINE> con.setCreditos(con.obterCreditos() + dados[camada_rede.CREDITOS]) <NEW_LINE> if(con_id in _esperandoCreditos): <NEW_LINE> <INDENT> _acordar_processos(con_id, _esperandoCreditos) <NEW_LINE> <DEDENT> _pool_conexoes.liberarConexao(con)
Recebe e processa um pacote de creditos, indicando que o usuario remoto deseja receber dados e envio creditos para uma determinada conexao
625941c2b830903b967e98ab
def get_matching_xpath_count(self, xpath): <NEW_LINE> <INDENT> count = len(self.apu._element_find("xpath=" + xpath, False, False)) <NEW_LINE> return str(count)
Returns number of elements matching `xpath` One should not use the xpath= prefix for 'xpath'. XPath is assumed. Correct: | count = | Get Matching Xpath Count | //div[@id='sales-pop'] Incorrect: | count = | Get Matching Xpath Count | xpath=//div[@id='sales-pop'] If you wish to assert the number of matching elements, use `Xpath Should Match X Times`.
625941c2e64d504609d747de
def _run_varying_temperature( self, initial_temperature: float, final_temperature: float, num_steps: int, temperature_change_mode: str = "relative", temperature_advance_on: str = "accepted", max_consecutive_rejects: Union[int, float, None] = 5000, early_stop_efficiency: Optional[float] = None ) -> None: <NEW_LINE> <INDENT> if early_stop_efficiency is None: <NEW_LINE> <INDENT> early_stop_efficiency = float("inf") <NEW_LINE> <DEDENT> if temperature_advance_on not in ["all", "accepted", None]: <NEW_LINE> <INDENT> raise RuntimeError( f"advance_on keyword must be one of 'all' or 'accepted' ") <NEW_LINE> <DEDENT> if temperature_change_mode == "absolute": <NEW_LINE> <INDENT> temperatures = list(np.linspace( initial_temperature, final_temperature, num=num_steps + 1)) <NEW_LINE> if max_consecutive_rejects is None: <NEW_LINE> <INDENT> raise RuntimeError( f"Please set the max_consecutive_rejects keyword.") <NEW_LINE> <DEDENT> if max_consecutive_rejects < 5: <NEW_LINE> <INDENT> raise RuntimeError(f"Keyword max_consecutive_rejects was set to {max_consecutive_rejects}," "which is suspisciously low.") <NEW_LINE> <DEDENT> <DEDENT> elif temperature_change_mode == "relative": <NEW_LINE> <INDENT> temperatures = list(np.geomspace( initial_temperature, final_temperature, num=num_steps + 1)) <NEW_LINE> max_consecutive_rejects = float("inf") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise RuntimeError( f"Do not know how to set temperatures with temperature_change_mode = {temperature_change_mode}." "Currently known modes: absolute, relative" ) <NEW_LINE> <DEDENT> pbar = tqdm(total=len(temperatures) - 1, disable=not self.progress_bar) <NEW_LINE> temperature = temperatures.pop(0) <NEW_LINE> while temperatures and self._consecutive_rejects < max_consecutive_rejects and self.best_efficiency < early_stop_efficiency and self.is_valid: <NEW_LINE> <INDENT> if temperature_advance_on == "all" or self._last_n_accepted[-1]: <NEW_LINE> <INDENT> temperature = temperatures.pop(0) <NEW_LINE> pbar.update(1) <NEW_LINE> <DEDENT> self._step(temperature=temperature) <NEW_LINE> pbar.set_postfix(ordered_dict={ "Temperature": temperature, "Efficiency": self.actuator.efficiency, "Acc. rate": np.mean(self._last_n_accepted), })
Run metropolis while varying the temperature. Implements different ways of varying the temperature: either linear or geometric changes, which occur either at all steps or only at accepted steps. Parameters ---------- initial_temperature : float Initial temperature, must be non-negative. final_temperature : float Final temperature, must be non-negative. num_steps : int Number of total steps when advance_on ='all' or number of accepted steps when advance_on='accepted'. temperature_change : str, optional Make temperature changes in relative or absolute terms, by default "relative". temperature_advance_on : str, optional Change the temperature after either 'all' steps or only after 'accepted' steps, by default "all". max_consecutive_rejects : int, optional Prematurely stop the simulation after n consecutive rejected MC steps. Ignored if temperature_advance_on is set to 'all'. Defaults to 5000.
625941c2091ae35668666f00
@pytest.mark.timeout(1) <NEW_LINE> def test_flag_wait(flag): <NEW_LINE> <INDENT> def wait(flag): <NEW_LINE> <INDENT> flag.wait(0.01, 'start', 'stop') <NEW_LINE> <DEDENT> thread = Thread(target=wait, args=(flag,)) <NEW_LINE> thread.start() <NEW_LINE> flag.set('start') <NEW_LINE> sleep(0.02) <NEW_LINE> flag.set('stop') <NEW_LINE> sleep(0.02) <NEW_LINE> thread.join()
Test asking a thread to wait on a flag.
625941c2507cdc57c6306c75
def count_demo(): <NEW_LINE> <INDENT> data = ["life is short,i like like python", "life is too long,i dislike python"] <NEW_LINE> transfer = CountVectorizer(stop_words=["is", "too"]) <NEW_LINE> data_new = transfer.fit_transform(data) <NEW_LINE> print("data_new:\n", data_new.toarray()) <NEW_LINE> print("特征名字:\n", transfer.get_feature_names()) <NEW_LINE> return None
文本特征抽取:CountVecotrizer :return:
625941c2de87d2750b85fd2f
def add_lock(callback): <NEW_LINE> <INDENT> @wraps(callback) <NEW_LINE> def lock_callback(*args, **kwargs): <NEW_LINE> <INDENT> if not offline: <NEW_LINE> <INDENT> callback(*args, **kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> callback_lock_event.wait() <NEW_LINE> callback_lock_event.clear() <NEW_LINE> callback(*args, **kwargs) <NEW_LINE> callback_lock_event.set() <NEW_LINE> <DEDENT> <DEDENT> return lock_callback
Lock decorator for callback functions, which is very helpful for running ROS offline with bag files. The lock forces callback functions sequentially, so we can show matplotlib plot, etc.
625941c28c0ade5d55d3e958
def count_dates_from_messages(messages: List[Message]) -> Tuple[Dates, Frequencies]: <NEW_LINE> <INDENT> mes = sorted(fromtimestamp(message['date']) for message in messages) <NEW_LINE> c = Counter(mes) <NEW_LINE> freq = [c[i] for i in c] <NEW_LINE> dates = [list(c)[i] for i in range(len(list(c)))] <NEW_LINE> return dates, freq
Получить список дат и их частот :param messages: список сообщений
625941c27d847024c06be258
def _getstartrng(ratetype): <NEW_LINE> <INDENT> return VIBRONICDICT[ratetype][4]
Access range within startlevel for ratetype in VIBRONICDICT.
625941c20a50d4780f666e2f
def exbookmarks(orig, ui, repo, *args, **opts): <NEW_LINE> <INDENT> delete = opts.get("delete") <NEW_LINE> rename = opts.get("rename") <NEW_LINE> inactive = opts.get("inactive") <NEW_LINE> remote = opts.get("remote") <NEW_LINE> subscriptions = opts.get("list_subscriptions") <NEW_LINE> track = opts.get("track") <NEW_LINE> untrack = opts.get("untrack") <NEW_LINE> disallowed = set(ui.configlist("remotenames", "disallowedbookmarks")) <NEW_LINE> if not args and (track or untrack): <NEW_LINE> <INDENT> book = repo._bookmarks.active <NEW_LINE> if book: <NEW_LINE> <INDENT> args = (book,) <NEW_LINE> <DEDENT> <DEDENT> if not delete: <NEW_LINE> <INDENT> for name in args: <NEW_LINE> <INDENT> if name in disallowed: <NEW_LINE> <INDENT> msg = _("bookmark '%s' not allowed by configuration") <NEW_LINE> raise error.Abort(msg % name) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if untrack: <NEW_LINE> <INDENT> if track: <NEW_LINE> <INDENT> msg = _("do not specify --untrack and --track at the same time") <NEW_LINE> raise error.Abort(msg) <NEW_LINE> <DEDENT> _removetracking(repo, args) <NEW_LINE> return <NEW_LINE> <DEDENT> if delete or rename or args or inactive: <NEW_LINE> <INDENT> if delete and track: <NEW_LINE> <INDENT> msg = _("do not specifiy --track and --delete at the same time") <NEW_LINE> raise error.Abort(msg) <NEW_LINE> <DEDENT> ret = orig(ui, repo, *args, **opts) <NEW_LINE> oldtracking = _readtracking(repo) <NEW_LINE> tracking = dict(oldtracking) <NEW_LINE> if rename and not track: <NEW_LINE> <INDENT> if rename in tracking: <NEW_LINE> <INDENT> tracked = tracking[rename] <NEW_LINE> del tracking[rename] <NEW_LINE> for arg in args: <NEW_LINE> <INDENT> tracking[arg] = tracked <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if track: <NEW_LINE> <INDENT> for arg in args: <NEW_LINE> <INDENT> tracking[arg] = track <NEW_LINE> <DEDENT> <DEDENT> if delete: <NEW_LINE> <INDENT> for arg in args: <NEW_LINE> <INDENT> if arg in tracking: <NEW_LINE> <INDENT> del tracking[arg] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if tracking != oldtracking: <NEW_LINE> <INDENT> _writetracking(repo, tracking) <NEW_LINE> precachedistance(repo) <NEW_LINE> <DEDENT> return ret <NEW_LINE> <DEDENT> fm = ui.formatter("bookmarks", opts) <NEW_LINE> if not remote and not subscriptions: <NEW_LINE> <INDENT> displaylocalbookmarks(ui, repo, opts, fm) <NEW_LINE> <DEDENT> if _isselectivepull(ui) and remote: <NEW_LINE> <INDENT> other = _getremotepeer(ui, repo, opts) <NEW_LINE> if other is None: <NEW_LINE> <INDENT> displayremotebookmarks(ui, repo, opts, fm) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> remotebookmarks = other.listkeys("bookmarks") <NEW_LINE> _showfetchedbookmarks(ui, other, remotebookmarks, opts, fm) <NEW_LINE> <DEDENT> <DEDENT> elif remote or subscriptions or opts.get("all"): <NEW_LINE> <INDENT> displayremotebookmarks(ui, repo, opts, fm) <NEW_LINE> <DEDENT> fm.end()
Bookmark output is sorted by bookmark name. This has the side benefit of grouping all remote bookmarks by remote name.
625941c2656771135c3eb80b
def parse_punctuation(cmd, word): <NEW_LINE> <INDENT> puncBefore = "" <NEW_LINE> puncAfter = "" <NEW_LINE> if cmd["soft_truncate_newlines"] or cmd["hard_truncate_newlines"]: <NEW_LINE> <INDENT> word = word.strip() <NEW_LINE> <DEDENT> if cmd["preserve_punctuation"]: <NEW_LINE> <INDENT> cutoff = get_punctuation_point(word, 0, 1) <NEW_LINE> if not cmd["void_outer"]: <NEW_LINE> <INDENT> puncBefore = word[:cutoff] <NEW_LINE> <DEDENT> word = word[cutoff:] <NEW_LINE> cutoff = get_punctuation_point(word, len(word) - 1, -1) <NEW_LINE> cutoff += 1 <NEW_LINE> if not cmd["void_outer"]: <NEW_LINE> <INDENT> puncAfter = word[cutoff:] <NEW_LINE> <DEDENT> elif word and word[-1] == "\n": <NEW_LINE> <INDENT> puncAfter += word[-1] <NEW_LINE> <DEDENT> word = word[:cutoff] <NEW_LINE> <DEDENT> elif word: <NEW_LINE> <INDENT> if word[-1] == "\n": <NEW_LINE> <INDENT> word = word[:-1] <NEW_LINE> puncAfter = "\n" <NEW_LINE> <DEDENT> <DEDENT> if cmd["void_inner"]: <NEW_LINE> <INDENT> temp = "" <NEW_LINE> for c in word: <NEW_LINE> <INDENT> if c.isalnum(): <NEW_LINE> <INDENT> temp += c <NEW_LINE> <DEDENT> <DEDENT> word = temp <NEW_LINE> <DEDENT> return puncBefore, word, puncAfter
Return the punctuation before the word, the stripped down word, and the punctuation after the word
625941c25fdd1c0f98dc01d1
def compress_list(L): <NEW_LINE> <INDENT> compressed_list = [] <NEW_LINE> i = 0 <NEW_LINE> while i < len(L): <NEW_LINE> <INDENT> compressed_list.append(L[i] + L[i + 1]) <NEW_LINE> i = i + 2 <NEW_LINE> <DEDENT> return compressed_list
(list of str) -> list of str Return a new list with adjacent pairs of string elements from L concatenated together, starting with indices 0 and 1, 2 and 3,and so on. Precondition: len(L) >= 2 and len(L) % 2 == 0 >>> compress_list(['a', 'b', 'c', 'd']) ['ab', 'cd']
625941c2be7bc26dc91cd5a2
def partition(self, s): <NEW_LINE> <INDENT> return self.helper(s)
:type s: str :rtype: List[List[str]]
625941c22eb69b55b151c84c
def test_layers_use_unique_data_altair(self): <NEW_LINE> <INDENT> filterings = set() <NEW_LINE> chart_dict = self.chart_altair.to_dict() <NEW_LINE> for layer in chart_dict["layer"]: <NEW_LINE> <INDENT> transforms = layer.get("transform", None) <NEW_LINE> current_filtering = tuple( transform for transform in transforms if "filter" in transform ) <NEW_LINE> current_filtering = ( current_filtering[0]["filter"] if len(current_filtering) > 0 else None ) <NEW_LINE> self.assertTrue(current_filtering not in filterings) <NEW_LINE> filterings.add(current_filtering)
Each layer of the visualization should use a unique slice of the data for plotting CDFs.
625941c2851cf427c661a4b0
def clim_iter(self): <NEW_LINE> <INDENT> if not self.climateinfo: <NEW_LINE> <INDENT> self.load_clim() <NEW_LINE> <DEDENT> k = list(self.climateinfo.keys()) <NEW_LINE> for p in k: <NEW_LINE> <INDENT> yield self.climateinfo[p]
Iterate though climate values args: None returns: Return an iterator over the climate values
625941c2566aa707497f450b
def run(self): <NEW_LINE> <INDENT> self.initialize() <NEW_LINE> self.train()
Apply procedures of training for a QN Args: exp_schedule: exploration strategy for epsilon lr_schedule: schedule for learning rate
625941c2a219f33f3462890b
def init_headers(self, context, datasets_since): <NEW_LINE> <INDENT> if self.args.files: <NEW_LINE> <INDENT> the_headers = headers.FileHeaderGenerator(context, self.files, datasets_since) <NEW_LINE> <DEDENT> elif self.args.datasets: <NEW_LINE> <INDENT> self.require_server_connection() <NEW_LINE> the_headers = headers.DatasetHeaderGenerator(context, [dset.upper() for dset in self.args.datasets], datasets_since) <NEW_LINE> log.info("Computing bestrefs for datasets", repr(self.args.datasets)) <NEW_LINE> <DEDENT> elif self.instruments: <NEW_LINE> <INDENT> self.require_server_connection() <NEW_LINE> log.info("Computing bestrefs for db datasets for", repr(list(self.instruments))) <NEW_LINE> the_headers = headers.InstrumentHeaderGenerator( context, self.instruments, datasets_since, self.args.save_pickle, self.server_info) <NEW_LINE> <DEDENT> elif self.args.load_pickles: <NEW_LINE> <INDENT> the_headers = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> log.error("Invalid header source configuration. " "Specify --files, --datasets, --instruments, --all-instruments, or --load-pickles.") <NEW_LINE> self.print_help() <NEW_LINE> sys.exit(-1) <NEW_LINE> <DEDENT> if self.args.load_pickles: <NEW_LINE> <INDENT> self.pickle_headers = headers.PickleHeaderGenerator( context, self.args.load_pickles, only_ids=self.only_ids, datasets_since=datasets_since) <NEW_LINE> if the_headers: <NEW_LINE> <INDENT> log.verbose("Augmenting primary parameter sets with pickle overrides.") <NEW_LINE> the_headers.update_headers(self.pickle_headers.headers, only_ids=self.only_ids) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> log.verbose("Computing bestrefs solely from pickle files:", repr(self.args.load_pickles)) <NEW_LINE> the_headers = self.pickle_headers <NEW_LINE> <DEDENT> <DEDENT> return the_headers
Create header a header generator for `context`, interpreting command line parameters.
625941c216aa5153ce362417
def unmanage(self): <NEW_LINE> <INDENT> if self.is_managed(): <NEW_LINE> <INDENT> self.unmanaged_at = datetime.utcnow() <NEW_LINE> <DEDENT> return self
Set this instance's unmanaged time to now() @return self
625941c229b78933be1e564e
def test_05(self): <NEW_LINE> <INDENT> obj1 = OldStyleClassWithoutArgs.get_instance() <NEW_LINE> obj2 = OldStyleClassWithoutArgs.get_instance() <NEW_LINE> self.assertEqual(id(obj1), id(obj2))
Test Case 05: Try getting two instance references of a decorated old-style class without arguments. Test is passed if both references identify the same object.
625941c23317a56b86939bfc
def compute_rmsd(ccdata1, ccdata2): <NEW_LINE> <INDENT> natom = ccdata1.natom <NEW_LINE> rmsd = 0.0 <NEW_LINE> maxdiff = 0.0 <NEW_LINE> for i in range(natom): <NEW_LINE> <INDENT> diff = norm(ccdata1.atomcoords[i] - ccdata2.atomcoords[i]) <NEW_LINE> rmsd += diff <NEW_LINE> if diff > maxdiff: <NEW_LINE> <INDENT> maxdiff = diff <NEW_LINE> <DEDENT> <DEDENT> rmsd /= natom <NEW_LINE> return rmsd, maxdiff
Compute RMSD between two molecules :returns: (rmsd, maxdisplacement)
625941c2097d151d1a222dfa
def get_func_help(bot, cmd, func): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> text = bot.memory.get_by_path(['command_help', cmd]).format( bot_cmd=bot.command_prefix) <NEW_LINE> return text <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> if "__doc__" in dir(func) and func.__doc__: <NEW_LINE> <INDENT> _docstring = func.__doc__.strip() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return "_{}_".format(_("command help not available")) <NEW_LINE> <DEDENT> _docstring = re.sub(r'\n +\* +', '\n* ', _docstring) <NEW_LINE> _docstring = re.sub(r"(?<!\n)\n(?= *[^ \t\n\r\f\v\*])", " ", _docstring) <NEW_LINE> _docstring = re.sub(r" +", " ", _docstring) <NEW_LINE> _docstring = re.sub(r" *\n\n+ *(?!\*)", "\n\n", _docstring) <NEW_LINE> _docstring = re.sub(r"(?<!\S)\/bot(?!\S)", bot.command_prefix, _docstring) <NEW_LINE> return _docstring
get a custom help message from memory or parse the doc string of the func Args: bot (hangupsbot.core.HangupsBot): the running instance cmd (str): an existing bot-command func (mixed): function or coroutine, the command function Returns: str: the custom message or the parsed doc string
625941c26e29344779a625b3
def start_tailer(self): <NEW_LINE> <INDENT> __logger__.debug("Remote Tailer: Launching thread to capture logs") <NEW_LINE> self.thread = threading.Thread(target=self._read_tailer, args=[self.tailer, self.local_capture_file_descriptor]) <NEW_LINE> self.thread.start() <NEW_LINE> __logger__.debug("Delay timer before starting: " + str(TIMER_DELAY_PERIOD)) <NEW_LINE> time.sleep(TIMER_DELAY_PERIOD)
This method starts a new thread to execute a tailing on the remote log file :return: None
625941c2711fe17d8254230e
def gower_normalization(K, out=None): <NEW_LINE> <INDENT> c = (K.shape[0] - 1) / (K.trace() - K.mean(0).sum()) <NEW_LINE> if out is None: <NEW_LINE> <INDENT> return c * K <NEW_LINE> <DEDENT> copyto(out, K) <NEW_LINE> out *= c
Perform Gower normalizion on covariance matrix K. The rescaled covariance matrix has sample variance of 1.
625941c25510c4643540f388
def forward(self, pointcloud : torch.Tensor, one_hot : torch.Tensor): <NEW_LINE> <INDENT> xyz, features = self._break_up_pc(pointcloud) <NEW_LINE> l_xyz, l_features = [xyz], [features] <NEW_LINE> for i in range(len(self.SA_modules)): <NEW_LINE> <INDENT> li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i]) <NEW_LINE> l_xyz.append(li_xyz) <NEW_LINE> l_features.append(li_features) <NEW_LINE> <DEDENT> one_hot = one_hot.unsqueeze(2) <NEW_LINE> l3_points = torch.cat([l_features[3], one_hot], dim=1) <NEW_LINE> l2_points = self.FP_modules[2](l_xyz[2], l_xyz[3], l_features[2], l3_points) <NEW_LINE> l1_points = self.FP_modules[1](l_xyz[1], l_xyz[2], l_features[1], l2_points) <NEW_LINE> l0_points = self.FP_modules[0](l_xyz[0], l_xyz[1], l_features[0], l1_points) <NEW_LINE> feats = self.features(l0_points) <NEW_LINE> prob = self.fc_lyaer(feats) <NEW_LINE> return feats, F.softmax(prob, dim=1)
Forward pass of the network Parameters ---------- pointcloud: Variable(torch.cuda.FloatTensor) (B, N, 3 + input_channels) tensor Point cloud to run predicts on Each point in the point-cloud MUST be formated as (x, y, z, features...) one_hot: shape (B, num_cls) return --------- feats : (B, 128, N) prob : (B, 2, N) between (0, 1)
625941c294891a1f4081ba47
def fmris(self, last=False, objects=True, ordered=False, pubs=EmptyI): <NEW_LINE> <INDENT> if objects: <NEW_LINE> <INDENT> for pub, stem, entry in self.__iter_entries(last=last, ordered=ordered, pubs=pubs): <NEW_LINE> <INDENT> yield fmri.PkgFmri(name=stem, publisher=pub, version=entry["version"]) <NEW_LINE> <DEDENT> return <NEW_LINE> <DEDENT> for pub, stem, entry in self.__iter_entries(last=last, ordered=ordered, pubs=pubs): <NEW_LINE> <INDENT> yield "pkg://{0}/{1}@{2}".format(pub, stem, entry["version"]) <NEW_LINE> <DEDENT> return
A generator function that produces FMRIs as it iterates over the contents of the catalog part. 'last' is a boolean value that indicates only the last fmri for each package on a per-publisher basis should be returned. As long as the CatalogPart has been saved since the last modifying operation, or sort() has has been called, this will also be the newest version of the package. 'objects' is an optional boolean value indicating whether FMRIs should be returned as FMRI objects or as strings. 'ordered' is an optional boolean value that indicates that results should sorted by stem and then by publisher and be in descending version order. If False, results will be in a ascending version order on a per-publisher, per-stem basis. 'pubs' is an optional list of publisher prefixes to restrict the results to. Results are always in catalog version order on a per- publisher, per-stem basis.
625941c24a966d76dd550fad
def _make_request(self, url, params=None): <NEW_LINE> <INDENT> final_url = self._build_url(url, params) <NEW_LINE> response = requests.get(final_url) <NEW_LINE> response.raise_for_status() <NEW_LINE> return response
Makes the actual API call to CrunchBase
625941c2b545ff76a8913db5
def main(): <NEW_LINE> <INDENT> df = import_csv_to_df(DATAFILENAME) <NEW_LINE> df = strip_whitespace(df) <NEW_LINE> df = blankety_stripper(df) <NEW_LINE> grouped_cols = get_groupings(df) <NEW_LINE> univariate_summary_dfs = get_counts(df,grouped_cols) <NEW_LINE> for key in univariate_summary_dfs: <NEW_LINE> <INDENT> filename = key <NEW_LINE> export_to_csv(univariate_summary_dfs[key], STOREFILENAME + 'summary_csvs/', filename) <NEW_LINE> <DEDENT> plot_basic_seaborn(univariate_summary_dfs)
Main function to run program
625941c2d18da76e23532473
def update_asso_table(kwargs,searchcnd): <NEW_LINE> <INDENT> pass
parameters: :kwargs:will be update data :type dict :searchcnd:the search condition that located the recorder, type: dict
625941c2d10714528d5ffc80
def use_federated_bank_view(self): <NEW_LINE> <INDENT> raise errors.Unimplemented()
Federates the view for methods in this session. A federated view will include assessments taken in banks which are children of this bank in the bank hierarchy. *compliance: mandatory -- This method is must be implemented.*
625941c2ff9c53063f47c193
def buscarCalle(self, calle, limit=0): <NEW_LINE> <INDENT> if self.minicache[0] == calle: <NEW_LINE> <INDENT> return self.minicache[1] if limit == 0 else self.minicache[1][:limit] <NEW_LINE> <DEDENT> res = [[], [], [], []] <NEW_LINE> calleNorm1 = normalizarTexto(calle, separador=' ', lower=False) <NEW_LINE> words1 = list(set(calleNorm1.split(' '))) <NEW_LINE> words1.sort(key=len, reverse=True) <NEW_LINE> regexps1 = [re.compile(r'^{0}| {1}'.format(re.escape(x), re.escape(x))) for x in words1] <NEW_LINE> words1 = set(words1) <NEW_LINE> for data in self.data: <NEW_LINE> <INDENT> if calle == data[1]: <NEW_LINE> <INDENT> res[0].append(Calle(data[0], data[1], data[3], data[4], self.partido, data[5])) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if (words1 == data[6]): <NEW_LINE> <INDENT> res[1].append(Calle(data[0], data[1], data[3], data[4], self.partido, data[5])) <NEW_LINE> <DEDENT> elif (words1 == words1 & data[6]): <NEW_LINE> <INDENT> res[2].append(Calle(data[0], data[1], data[3], data[4], self.partido, data[5])) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> match = True <NEW_LINE> for regexp in regexps1: <NEW_LINE> <INDENT> if regexp.search(data[2]) is None: <NEW_LINE> <INDENT> match = False <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> if match: <NEW_LINE> <INDENT> res[3].append(Calle(data[0], data[1], data[3], data[4], self.partido, data[5])) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> res = res[0] + res[1] + res[2] + res[3] <NEW_LINE> self.minicache = [calle, res] <NEW_LINE> return res if limit == 0 else res[:limit]
Busca calles cuyo nombre se corresponda con calle y devuelve un array con todas las instancias de Calle halladas @param calle: String a matchear @type calle: String @param limit: Maximo numero de respuestas a devolver. Cero es sin limite. @type limit: Integer @return: Array de instancias de Calle que matchearon calle @rtype: Array de Calle
625941c215baa723493c3f13
def __init__(self, numReqArgs=0, **kwargs): <NEW_LINE> <INDENT> optparse.OptionParser.__init__(self, **kwargs) <NEW_LINE> self.numRequiredArgs = numReqArgs <NEW_LINE> self.add_option('-o', '--outfn', action='store', type='string', help='output filename [default: stdout]') <NEW_LINE> self.add_option('-l', '--logfile', action='store', type='string', default=r'C:\tmp\pylog') <NEW_LINE> self.add_option('-m', '--logmode', action='store', type='string', default='a') <NEW_LINE> self.add_option('-v', '--loglevel', action='store', type='int', default=logging.WARNING) <NEW_LINE> return
Method: __init__ Input: self - this GenOptionParser numReqArgs - number of required arguments kwargs - dict of additional keyword arguments Output: self - a new GenOptionParser Functionality: constructor
625941c245492302aab5e260
def validate(self, outputfile): <NEW_LINE> <INDENT> with open(outputfile, 'w', encoding='utf-8') as f: <NEW_LINE> <INDENT> print('*******************************************') <NEW_LINE> print('Validate ', self._filename) <NEW_LINE> print('*******************************************') <NEW_LINE> f.write('*******************************************' + '\n') <NEW_LINE> f.write('Validate ' + self._filename + '\n') <NEW_LINE> f.write('*******************************************' + '\n') <NEW_LINE> f.write('Validate Nodule Keywords:' + '\n') <NEW_LINE> for key in self._noduleKey: <NEW_LINE> <INDENT> f.write(key + ', ') <NEW_LINE> <DEDENT> f.write('\n') <NEW_LINE> f.write('*******************************************' + '\n') <NEW_LINE> f.write('Validate Verified Keywords:' + '\n') <NEW_LINE> for key in self._verifiedKey: <NEW_LINE> <INDENT> f.write(key + ', ') <NEW_LINE> <DEDENT> f.write('\n') <NEW_LINE> f.write('*******************************************' + '\n') <NEW_LINE> if self._count == 0: <NEW_LINE> <INDENT> print("No Nodule for ", self._filename) <NEW_LINE> f.write("No Nodule for " + self._filename + '\n') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if self._count == 1: <NEW_LINE> <INDENT> self._obj['Nodules']['item'] = [self._obj['Nodules']['item']] <NEW_LINE> <DEDENT> for child in self._obj['Nodules']['item']: <NEW_LINE> <INDENT> if 'Label' in child: <NEW_LINE> <INDENT> print('Validate Nodule ', str(child['Label'])) <NEW_LINE> f.write('Validate Nodule ' + str(child['Label']) + '\n') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('Error ! No Label!') <NEW_LINE> f.write('Error ! No Label!' + '\n') <NEW_LINE> raise KeyError <NEW_LINE> <DEDENT> for key in self._noduleKey: <NEW_LINE> <INDENT> if key not in child: <NEW_LINE> <INDENT> print('Error ! No ', key, ' in Nodule') <NEW_LINE> f.write('Error ! No ' + key + ' in Nodule' + '\n') <NEW_LINE> raise KeyError <NEW_LINE> <DEDENT> <DEDENT> print('-----------------------------------') <NEW_LINE> f.write('-----------------------------------' + '\n') <NEW_LINE> if 'VerifiedNodule' in child: <NEW_LINE> <INDENT> if 'labelIndex' in child['VerifiedNodule']: <NEW_LINE> <INDENT> print('Validate Verified ', str(child['VerifiedNodule']['labelIndex'])) <NEW_LINE> f.write('Validate Verified ' + str(child['VerifiedNodule']['labelIndex']) + '\n') <NEW_LINE> if int(child['VerifiedNodule']['labelIndex']) != int(child['Label']): <NEW_LINE> <INDENT> print('Error ! Verified Index is not equal to Nodule Index !') <NEW_LINE> f.write('Error ! Verified Index is not equal to Nodule Index !' + '\n') <NEW_LINE> raise KeyError <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> for key in self._verifiedKey: <NEW_LINE> <INDENT> if key not in child['VerifiedNodule']: <NEW_LINE> <INDENT> print('Error ! No ', key, ' in VerifiedNodule') <NEW_LINE> f.write('Error ! No ' + key + ' in VerifiedNodule' + '\n') <NEW_LINE> raise KeyError <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> print('Error ! No labelIndex !') <NEW_LINE> f.write('Error ! No labelIndex !' + '\n') <NEW_LINE> raise KeyError <NEW_LINE> <DEDENT> <DEDENT> print('-----------------------------------') <NEW_LINE> f.write('-----------------------------------' + '\n') <NEW_LINE> <DEDENT> <DEDENT> print('\n') <NEW_LINE> f.write('\n')
Loop through all keywords :param outputfile: the output log file path
625941c266656f66f7cbc149
def __len__(self): <NEW_LINE> <INDENT> if self._len is not None: <NEW_LINE> <INDENT> return self._len <NEW_LINE> <DEDENT> if self._has_more: <NEW_LINE> <INDENT> list(self._iter_results()) <NEW_LINE> self._len = len(self._result_cache) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._len = self.count() <NEW_LINE> <DEDENT> return self._len
Modified version of the default __len__() which allows us to get the length with or without caching enabled.
625941c27b25080760e393f9
def plot_image(data, times, mask=None, ax=None, vmax=None, vmin=None, draw_mask=None, draw_contour=None, colorbar=True, draw_diag=True, draw_zerolines=True, xlabel="Time (s)", ylabel="Time (s)", cbar_unit="%", cmap="RdBu_r", mask_alpha=.75, mask_cmap="RdBu_r"): <NEW_LINE> <INDENT> if ax is None: <NEW_LINE> <INDENT> fig = plt.figure() <NEW_LINE> ax = plt.axes() <NEW_LINE> <DEDENT> if vmax is None: <NEW_LINE> <INDENT> vmax = np.abs(data).max() <NEW_LINE> <DEDENT> if vmin is None: <NEW_LINE> <INDENT> vmax = np.abs(data).max() <NEW_LINE> vmin = -vmax <NEW_LINE> <DEDENT> tmin, tmax = xlim = times[0], times[-1] <NEW_LINE> extent = [tmin, tmax, tmin, tmax] <NEW_LINE> im_args = dict(interpolation='nearest', origin='lower', extent=extent, aspect='auto', vmin=vmin, vmax=vmax) <NEW_LINE> if mask is not None: <NEW_LINE> <INDENT> draw_mask = True if draw_mask is None else draw_mask <NEW_LINE> draw_contour = True if draw_contour is None else draw_contour <NEW_LINE> <DEDENT> if any((draw_mask, draw_contour,)): <NEW_LINE> <INDENT> if mask is None: <NEW_LINE> <INDENT> raise ValueError("No mask to show!") <NEW_LINE> <DEDENT> <DEDENT> if draw_mask: <NEW_LINE> <INDENT> ax.imshow(data, alpha=mask_alpha, cmap=mask_cmap, **im_args) <NEW_LINE> im = ax.imshow(np.ma.masked_where(~mask, data), cmap=cmap, **im_args) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> im = ax.imshow(data, cmap=cmap, **im_args) <NEW_LINE> <DEDENT> if draw_contour and np.unique(mask).size == 2: <NEW_LINE> <INDENT> big_mask = np.kron(mask, np.ones((10, 10))) <NEW_LINE> ax.contour(big_mask, colors=["k"], extent=extent, linewidths=[1], aspect=1, corner_mask=False, antialiased=False, levels=[.5]) <NEW_LINE> <DEDENT> ax.set_xlim(xlim) <NEW_LINE> ax.set_ylim(xlim) <NEW_LINE> if draw_diag: <NEW_LINE> <INDENT> ax.plot((tmin, tmax), (tmin, tmax), color="k", linestyle=":") <NEW_LINE> <DEDENT> if draw_zerolines: <NEW_LINE> <INDENT> ax.axhline(0, color="k", linestyle=":") <NEW_LINE> ax.axvline(0, color="k", linestyle=":") <NEW_LINE> <DEDENT> ax.set_ylabel(ylabel) <NEW_LINE> ax.set_xlabel(xlabel) <NEW_LINE> if colorbar: <NEW_LINE> <INDENT> cbar = plt.colorbar(im, ax=ax) <NEW_LINE> cbar.ax.set_title(cbar_unit) <NEW_LINE> <DEDENT> ax.set_aspect(1. / ax.get_data_ratio()) <NEW_LINE> ax.set_title("GAT Matrix") <NEW_LINE> ax.title.set_position([.5, 1.025]) <NEW_LINE> return fig if ax is None else ax
Return fig and ax for further styling of GAT matrix, e.g., titles Parameters ---------- data: array of scores times: list of epoched time points mask: None | array ...
625941c25fcc89381b1e165c
def setPartOfValues2(self, *args): <NEW_LINE> <INDENT> return _ParaMEDMEM.DataArrayDouble_setPartOfValues2(self, *args)
setPartOfValues2(self, DataArrayDouble a, int bgTuples, int endTuples, int bgComp, int endComp, bool strictCompoCompare = True) setPartOfValues2(self, DataArrayDouble a, int bgTuples, int endTuples, int bgComp, int endComp) 1
625941c2e5267d203edcdc3e
def package_create(context, data_dict=None): <NEW_LINE> <INDENT> user = context['user'] <NEW_LINE> if context.get('package', False): <NEW_LINE> <INDENT> return is_owner(context, context.get('package').get('id')) <NEW_LINE> <DEDENT> data_dict = data_dict or {} <NEW_LINE> org_id = data_dict.get('owner_org', False) <NEW_LINE> if org_id and not kata_has_user_permission_for_org( org_id, user, 'create_dataset'): <NEW_LINE> <INDENT> return {'success': False, 'msg': _('User %s not authorized to add a dataset') % user} <NEW_LINE> <DEDENT> elif org_id and kata_has_user_permission_for_org(org_id, user, 'create_dataset'): <NEW_LINE> <INDENT> return {'success': True} <NEW_LINE> <DEDENT> if authz.auth_is_anon_user(context): <NEW_LINE> <INDENT> check1 = all(authz.check_config_permission(p) for p in ( 'anon_create_dataset', 'create_dataset_if_not_in_organization', 'create_unowned_dataset', )) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> check1 = True <NEW_LINE> <DEDENT> if not check1: <NEW_LINE> <INDENT> return {'success': False, 'msg': _('User %s not authorized to create packages') % user} <NEW_LINE> <DEDENT> check2 = _check_group_auth(context, data_dict) <NEW_LINE> if not check2: <NEW_LINE> <INDENT> return {'success': False, 'msg': _('User %s not authorized to edit these groups') % user} <NEW_LINE> <DEDENT> data_dict = data_dict or {} <NEW_LINE> org_id = data_dict.get('owner_org') <NEW_LINE> if org_id and not authz.has_user_permission_for_group_or_org( org_id, user, 'create_dataset'): <NEW_LINE> <INDENT> return {'success': False, 'msg': _('User %s not authorized to add dataset to this organization') % user} <NEW_LINE> <DEDENT> return {'success': True}
Modified from CKAN's original check. Any logged in user can add a dataset to any organisation. Packages owner check is done when adding a resource. :param context: context :param data_dict: data_dict :return: dictionary with 'success': True|False
625941c23eb6a72ae02ec477
def pushDominoes(self, dominoes): <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> new = dominoes.replace("R.L", "S") <NEW_LINE> new = new.replace(".L", "LL").replace("R.", "RR") <NEW_LINE> if new == dominoes: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dominoes = new <NEW_LINE> <DEDENT> <DEDENT> return dominoes.replace("S", "R.L")
:type dominoes: str :rtype: str
625941c2442bda511e8be3ba
def test_temporal_hash_operator4(self): <NEW_LINE> <INDENT> tra = tgis.TemporalRasterAlgebraParser(run = True, debug = True) <NEW_LINE> tra.parse(expression="R = if({contains},A # D == 1, C {#,contains} A)", basename="r", overwrite=True) <NEW_LINE> D = tgis.open_old_stds("R", type="strds") <NEW_LINE> D.select() <NEW_LINE> self.assertEqual(D.metadata.get_number_of_maps(), 1) <NEW_LINE> self.assertEqual(D.metadata.get_min_min(), 2) <NEW_LINE> self.assertEqual(D.metadata.get_max_max(), 2) <NEW_LINE> start, end = D.get_absolute_time() <NEW_LINE> self.assertEqual(start, datetime.datetime(2001, 1, 2)) <NEW_LINE> self.assertEqual(end, datetime.datetime(2001, 1, 4)) <NEW_LINE> self.assertEqual( D.check_temporal_topology(), True) <NEW_LINE> self.assertEqual(D.get_granularity(), u'2 days')
Testing the temporal hash operator in the raster algebra.
625941c2099cdd3c635f0bfb
def on_switch(self, callback): <NEW_LINE> <INDENT> self._switch_callback = callback
Set a callback for when the rotary encoder's switch is pressed. :param callback: A callback function without arguments.
625941c20c0af96317bb8187
def _init(): <NEW_LINE> <INDENT> def _tigetstr(cap_name): <NEW_LINE> <INDENT> import curses <NEW_LINE> cap = curses.tigetstr(cap_name) or '' <NEW_LINE> cap = re.sub(r'\$<\d+>[*]?', '', cap) <NEW_LINE> if cap_name == 'sgr0': <NEW_LINE> <INDENT> cap = re.sub(r'\017$', '', cap) <NEW_LINE> <DEDENT> return cap <NEW_LINE> <DEDENT> _term_stream = sys.stdout <NEW_LINE> try: <NEW_LINE> <INDENT> import curses <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> sys.stderr.write("INFO: no curses support: you won't see colors\n") <NEW_LINE> return <NEW_LINE> <DEDENT> import config <NEW_LINE> if not _term_stream.isatty() and 'color-always' not in config.color.style: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> curses.setupterm() <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> colors.COLS = curses.tigetnum('cols') <NEW_LINE> colors.LINES = curses.tigetnum('lines') <NEW_LINE> for capability in _STRING_CAPABILITIES: <NEW_LINE> <INDENT> (attrib, cap_name) = capability.split('=') <NEW_LINE> setattr(colors, attrib, _tigetstr(cap_name) or '') <NEW_LINE> <DEDENT> set_fg = _tigetstr('setf') <NEW_LINE> if set_fg: <NEW_LINE> <INDENT> for i, color in zip(range(len(_COLORS)), _COLORS): <NEW_LINE> <INDENT> setattr(colors, color, curses.tparm(set_fg, i) or '') <NEW_LINE> <DEDENT> <DEDENT> set_fg_ansi = _tigetstr('setaf') <NEW_LINE> if set_fg_ansi: <NEW_LINE> <INDENT> for i, color in zip(range(len(_ANSICOLORS)), _ANSICOLORS): <NEW_LINE> <INDENT> setattr(colors, color, curses.tparm(set_fg_ansi, i) or '') <NEW_LINE> <DEDENT> <DEDENT> set_bg = _tigetstr('setb') <NEW_LINE> if set_bg: <NEW_LINE> <INDENT> for i, color in zip(range(len(_COLORS)), _COLORS): <NEW_LINE> <INDENT> setattr(colors, 'BG_'+color, curses.tparm(set_bg, i) or '') <NEW_LINE> <DEDENT> <DEDENT> set_bg_ansi = _tigetstr('setab') <NEW_LINE> if set_bg_ansi: <NEW_LINE> <INDENT> for i, color in zip(range(len(_ANSICOLORS)), _ANSICOLORS): <NEW_LINE> <INDENT> setattr(colors, 'BG_'+color, curses.tparm(set_bg_ansi, i) or '')
Initialize attributes with appropriate values for the current terminal. `_term_stream` is the stream that will be used for terminal output; if this stream is not a tty, then the terminal is assumed to be a dumb terminal (i.e., have no capabilities).
625941c296565a6dacc8f66b
def insert_after(self, previous_node, data): <NEW_LINE> <INDENT> new_node = Node(data) <NEW_LINE> if previous_node is self.tail: <NEW_LINE> <INDENT> previous_node.next = new_node <NEW_LINE> self.tail = new_node <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> new_node.next = previous_node.next <NEW_LINE> previous_node.next = new_node
연결 리스트 주어진 노드 뒤 삽입 연산 메소드
625941c210dbd63aa1bd2b43
def SetDefaultColours(self, base_colour=None): <NEW_LINE> <INDENT> if base_colour is None: <NEW_LINE> <INDENT> base_colour = GetBaseColour() <NEW_LINE> <DEDENT> darker1_colour = StepColour(base_colour, 85) <NEW_LINE> darker2_colour = StepColour(base_colour, 75) <NEW_LINE> darker3_colour = StepColour(base_colour, 60) <NEW_LINE> darker4_colour = StepColour(base_colour, 40) <NEW_LINE> self._background_colour = base_colour <NEW_LINE> self._background_gradient_colour = StepColour(base_colour, 180) <NEW_LINE> self._inactive_caption_colour = darker1_colour <NEW_LINE> self._inactive_caption_gradient_colour = StepColour(base_colour, 97) <NEW_LINE> self._sash_brush = wx.Brush(base_colour) <NEW_LINE> self._background_brush = wx.Brush(base_colour) <NEW_LINE> self._border_pen = wx.Pen(darker2_colour) <NEW_LINE> self._gripper_brush = wx.Brush(base_colour) <NEW_LINE> self._gripper_pen1 = wx.Pen(darker4_colour) <NEW_LINE> self._gripper_pen2 = wx.Pen(darker3_colour) <NEW_LINE> self._gripper_pen3 = wx.WHITE_PEN <NEW_LINE> self._hint_background_colour = colourHintBackground <NEW_LINE> self._hint_border_colour = colourHintBorder
Sets the default colours, which are calculated from the given base colour. :param `base_colour`: an instance of :class:`wx.Colour`. If defaulted to ``None``, a colour is generated accordingly to the platform and theme.
625941c2b545ff76a8913db6
def tearDown(self): <NEW_LINE> <INDENT> self._fixture.tearDown()
Delete the test working directory. `self` is this test case.
625941c28e71fb1e9831d74a
def is_date_in_bounds(self, symbol, date): <NEW_LINE> <INDENT> if symbol in self.price_files: <NEW_LINE> <INDENT> df = self.price_files[symbol] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if utils.refresh(utils.get_file_path(config.prices_data_path, prices.price_table_filename, symbol=symbol), refresh=False): <NEW_LINE> <INDENT> prices.download_data_from_yahoo(symbol, start_date=self.start_date, end_date=self.end_date) <NEW_LINE> <DEDENT> df = pd.read_csv(utils.get_file_path(config.prices_data_path, prices.price_table_filename, symbol=symbol), index_col="Date", parse_dates=["Date"])[self.start_date:self.end_date] <NEW_LINE> self.price_files[symbol] = df <NEW_LINE> <DEDENT> if df.index[0] <= date <= df.index[-1]: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> return False
Returns true if the date is out of bounds for the symbol, else false Parameters: symbol : str date : datetime Returns: bool Returns true if the date is out of bounds for the symbol, else false
625941c266673b3332b92030
def query_relations(self, environment_id, collection_id, entities=None, context=None, sort=None, filter=None, count=None, evidence_count=None, **kwargs): <NEW_LINE> <INDENT> if environment_id is None: <NEW_LINE> <INDENT> raise ValueError('environment_id must be provided') <NEW_LINE> <DEDENT> if collection_id is None: <NEW_LINE> <INDENT> raise ValueError('collection_id must be provided') <NEW_LINE> <DEDENT> if entities is not None: <NEW_LINE> <INDENT> entities = [ self._convert_model(x, QueryRelationsEntity) for x in entities ] <NEW_LINE> <DEDENT> if context is not None: <NEW_LINE> <INDENT> context = self._convert_model(context, QueryEntitiesContext) <NEW_LINE> <DEDENT> if filter is not None: <NEW_LINE> <INDENT> filter = self._convert_model(filter, QueryRelationsFilter) <NEW_LINE> <DEDENT> headers = {} <NEW_LINE> if 'headers' in kwargs: <NEW_LINE> <INDENT> headers.update(kwargs.get('headers')) <NEW_LINE> <DEDENT> params = {'version': self.version} <NEW_LINE> data = { 'entities': entities, 'context': context, 'sort': sort, 'filter': filter, 'count': count, 'evidence_count': evidence_count } <NEW_LINE> url = '/v1/environments/{0}/collections/{1}/query_relations'.format( *self._encode_path_vars(environment_id, collection_id)) <NEW_LINE> response = self.request( method='POST', url=url, headers=headers, params=params, json=data, accept_json=True) <NEW_LINE> return response
Knowledge Graph relationship query. See the [Knowledge Graph documentation](https://console.bluemix.net/docs/services/discovery/building-kg.html) for more details. :param str environment_id: The ID of the environment. :param str collection_id: The ID of the collection. :param list[QueryRelationsEntity] entities: An array of entities to find relationships for. :param QueryEntitiesContext context: Entity text to provide context for the queried entity and rank based on that association. For example, if you wanted to query the city of London in England your query would look for `London` with the context of `England`. :param str sort: The sorting method for the relationships, can be `score` or `frequency`. `frequency` is the number of unique times each entity is identified. The default is `score`. :param QueryRelationsFilter filter: Filters to apply to the relationship query. :param int count: The number of results to return. The default is `10`. The maximum is `1000`. :param int evidence_count: The number of evidence items to return for each result. The default is `0`. The maximum number of evidence items per query is 10,000. :param dict headers: A `dict` containing the request headers :return: A `dict` containing the `QueryRelationsResponse` response. :rtype: dict
625941c2be7bc26dc91cd5a3
def calculate_reaction_propensities(self): <NEW_LINE> <INDENT> reaction_propensities = [] <NEW_LINE> for state_change_vector, stochastic_constant in zip( self.state_change_matrix, self.stochastic_constants): <NEW_LINE> <INDENT> propensity = stochastic_constant <NEW_LINE> for change, species_abundance in zip( state_change_vector, self.abundances[-1]): <NEW_LINE> <INDENT> if change >= 0: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> elif change == -1: <NEW_LINE> <INDENT> propensity *= species_abundance <NEW_LINE> <DEDENT> elif change == -2: <NEW_LINE> <INDENT> propensity *= species_abundance * (species_abundance - 1) <NEW_LINE> <DEDENT> <DEDENT> reaction_propensities.append(propensity) <NEW_LINE> <DEDENT> return np.array(reaction_propensities)
Determine probabilities for each reaction. The reaction propensity * dt gives the probability that a particular reaction will occur in the time interval [t, t+dt). See `manual/model_document.pdf` for scientific notes. Returns ------- reaction_propensities : np.ndarray (1D) Reaction propensities, one for each reaction.
625941c2a8ecb033257d306d
def initialize(self, training_info, model, environment, device): <NEW_LINE> <INDENT> pass
Initialize algo from reinforcer settings
625941c25fdd1c0f98dc01d2
def get_rand(self): <NEW_LINE> <INDENT> return random.random() * 2 * self.EPSILON - self.EPSILON
Calculate a random value off of the given EPSILON value in the range [-self.EPSILON, self.EPSILON]
625941c2cc0a2c11143dce2f
def test_getCertInChain(get_proxy): <NEW_LINE> <INDENT> proxyChain = get_proxy(USERCERT) <NEW_LINE> chainLength = proxyChain.getNumCertsInChain()['Value'] <NEW_LINE> res = proxyChain.getCertInChain(certPos=chainLength - 1) <NEW_LINE> assert res['OK'] <NEW_LINE> certSubject = res['Value'].getSubjectDN().get('Value') <NEW_LINE> assert certSubject == getCertOption(USERCERT, 'subjectDN') <NEW_LINE> assert certSubject == proxyChain.getCertInChain(certPos=- 1)['Value'].getSubjectDN().get('Value') <NEW_LINE> assert proxyChain.isProxy()['Value'] is True <NEW_LINE> assert proxyChain.isLimitedProxy()['Value'] is False <NEW_LINE> assert proxyChain.isVOMS()['Value'] is False <NEW_LINE> assert proxyChain.isRFC()['Value'] is True <NEW_LINE> assert proxyChain.isValidProxy()['Value'] is True
" retrieve the first certificate in the chain, and make sure it is the original one
625941c294891a1f4081ba48
def kill_hard(process): <NEW_LINE> <INDENT> with suppress(ProcessLookupError): <NEW_LINE> <INDENT> if not is_windows(): <NEW_LINE> <INDENT> process.send_signal(SIGINFO) <NEW_LINE> time.sleep(1) <NEW_LINE> <DEDENT> process.kill() <NEW_LINE> <DEDENT> stdout, stderr = process.communicate() <NEW_LINE> return process.returncode, stdout, stderr
Kill the specified process immediately using SIGKILL.
625941c282261d6c526ab43c
def BulkTransactions(self, txns, wasCached): <NEW_LINE> <INDENT> self.send_BulkTransactions(txns, wasCached) <NEW_LINE> return self.recv_BulkTransactions()
Parameters: - txns - wasCached
625941c2627d3e7fe0d68dee
def SetOutsideValue(self, *args): <NEW_LINE> <INDENT> return _itkSpatialObjectToImageFilterPython.itkSpatialObjectToImageFilterSO3IUL3_SetOutsideValue(self, *args)
SetOutsideValue(self, unsigned long _arg)
625941c2be8e80087fb20be5
def Decode(self, value, component_full_name, flag_values): <NEW_LINE> <INDENT> dpb_service_config = super(_DpbServiceDecoder, self).Decode( value, component_full_name, flag_values) <NEW_LINE> if dpb_service_config['service_type'] == dpb_service.EMR: <NEW_LINE> <INDENT> if flag_values.dpb_wordcount_fs != BaseDpbService.S3_FS: <NEW_LINE> <INDENT> raise errors.Config.InvalidValue('EMR service requires S3.') <NEW_LINE> <DEDENT> <DEDENT> if dpb_service_config['service_type'] == dpb_service.DATAFLOW: <NEW_LINE> <INDENT> if flag_values.dpb_dataflow_jar is None: <NEW_LINE> <INDENT> raise errors.Config.InvalidValue('Dataflow jar missing.') <NEW_LINE> <DEDENT> if flag_values.dpb_dataflow_staging_location is None: <NEW_LINE> <INDENT> raise errors.Config.InvalidValue('Dataflow Staging location missing.') <NEW_LINE> <DEDENT> <DEDENT> result = _DpbServiceSpec(self._GetOptionFullName(component_full_name), flag_values, **dpb_service_config) <NEW_LINE> return result
Verifies dpb(data processing backend) service dictionary of a benchmark config object. Args: value: dict Dpb Service config dictionary component_full_name: string. Fully qualified name of the configurable component containing the config option. flag_values: flags.FlagValues. Runtime flag values to be propagated to BaseSpec constructors. Returns: _DpbServiceSpec Build from the config passed in in value. Raises: errors.Config.InvalidValue upon invalid input value.
625941c2d164cc6175782ced
def get_grt_column(column_name, table_name, sql_type, defaultValue=None, comment=None, isNotNull=0, autoIncrement=0): <NEW_LINE> <INDENT> column = MagicMock( owner=get_grt_table(table_name), defaultValue=defaultValue, formattedType=sql_type, formattedRawType=sql_type, isNotNull=isNotNull, autoIncrement=autoIncrement ) <NEW_LINE> column.name = column_name <NEW_LINE> if comment is not None: <NEW_LINE> <INDENT> column.comment = comment <NEW_LINE> <DEDENT> return column
Mock a column Returns a Mock object representing the basic needs of a column Arguments: column_name {str} -- The name of the column table_name {str} -- The name of the table (a table will be accessible at o.owner) sql_type {str} -- The SQL type of the column Keyword Arguments: defaultValue {str} -- Default value (default: {None}) comment {str} -- Comment (default: {None}) isNotNull {number} -- Is not null (default: {0}) autoIncrement {number} -- Auto Increment (default: {0}) Returns: MagicMock -- GRT Compatible Column
625941c2925a0f43d2549e15
def test_update_without_params(self, client): <NEW_LINE> <INDENT> res_id = 2 <NEW_LINE> put_response = client.put("/bookings", json=dict(id=res_id)) <NEW_LINE> assert put_response.status_code == 406 <NEW_LINE> assert put_response.json["message"] == "Invalid input" <NEW_LINE> assert put_response.json["success"] is False <NEW_LINE> assert ( put_response.json["errors"] == "'resource_id', 'booked_from' or 'booked_to' JSON arguments should be provided" )
Try to update booking without parameters to update.
625941c2167d2b6e31218b35
@magicWord(category=CATEGORY_PROGRAMMER, types=[]) <NEW_LINE> def recoverCloset(): <NEW_LINE> <INDENT> target = spellbook.getTarget() <NEW_LINE> if not target: <NEW_LINE> <INDENT> target = spellbook.getInvoker() <NEW_LINE> <DEDENT> if not target: <NEW_LINE> <INDENT> return "Strange.. who are we talking about?" <NEW_LINE> <DEDENT> if not hasattr(target, "estate") or not hasattr(target.estate, "houses"): <NEW_LINE> <INDENT> return "no houses in the state" <NEW_LINE> <DEDENT> for house in target.estate.houses: <NEW_LINE> <INDENT> if house.doId == target.houseId: <NEW_LINE> <INDENT> fm = house.interior.furnitureManager <NEW_LINE> for item in reversed(fm.items): <NEW_LINE> <INDENT> if item.catalogItem.getFlags() & FLCloset: <NEW_LINE> <INDENT> fm.moveItemToAttic(item.doId); <NEW_LINE> return "Moved the closet" <NEW_LINE> <DEDENT> <DEDENT> fm.saveToHouse() <NEW_LINE> <DEDENT> <DEDENT> return "I cannot find your closet"
recover the closet
625941c2009cb60464c63353
def evaluate_sensor_selection(cns, flow, metric, w, sensor_identifier, training_data, runs=1): <NEW_LINE> <INDENT> node_sequence = [ExternalGeneratorSourceNode(), BaseNode.node_from_yaml(cns)] <NEW_LINE> for sub_node_spec in flow: <NEW_LINE> <INDENT> node_obj = BaseNode.node_from_yaml(sub_node_spec) <NEW_LINE> node_sequence.append(node_obj) <NEW_LINE> <DEDENT> for index, node in enumerate(node_sequence): <NEW_LINE> <INDENT> if node.is_trainable(): <NEW_LINE> <INDENT> node_sequence[index - 1].set_permanent_attributes(caching=True) <NEW_LINE> <DEDENT> if node.is_split_node(): <NEW_LINE> <INDENT> node_sequence[index - 1].set_permanent_attributes(caching=True) <NEW_LINE> <DEDENT> <DEDENT> flow = NodeChain(node_sequence) <NEW_LINE> for run in range(runs): <NEW_LINE> <INDENT> flow[-1].set_run_number(run) <NEW_LINE> flow[0].set_generator(training_data) <NEW_LINE> while True: <NEW_LINE> <INDENT> flow[-1].process_current_split() <NEW_LINE> if not flow[-1].use_next_split(): <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> for node in flow: <NEW_LINE> <INDENT> node.reset() <NEW_LINE> <DEDENT> <DEDENT> result_collection = flow[-1].get_result_dataset() <NEW_LINE> performance = result_collection.get_average_performance(metric) - w * result_collection.get_performance_std(metric) <NEW_LINE> return (sensor_identifier, performance)
Execute the evaluation flow
625941c24d74a7450ccd4163
@plugins.command() <NEW_LINE> def list(): <NEW_LINE> <INDENT> from pkg_resources import iter_entry_points <NEW_LINE> for ep in iter_entry_points('sentry.plugins'): <NEW_LINE> <INDENT> click.echo( u'%s: %s %s (%s)' % (ep.name, ep.dist.project_name, ep.dist.version, ep.dist.location, ) )
List all installed plugins
625941c23d592f4c4ed1d012
def draw_bullet(self): <NEW_LINE> <INDENT> pygame.draw.rect(self.screen, self.color, self.rect)
draws bullet to screen
625941c2f548e778e58cd51c
def update_reeds_var(attr, old, new): <NEW_LINE> <INDENT> update_reeds_wdg(wdg_type='vars')
When ReEDS var fields are updated, call update_reeds_wdg with the 'vars' flag
625941c27047854f462a13ab
def __init__(self, nickname, password, active): <NEW_LINE> <INDENT> self.nickname = nickname <NEW_LINE> self.set_new_password(password) <NEW_LINE> self.active = active
Saves user data in internal structures: Arguments: nickname -- user login. password -- not hashed user password. salt -- salt for password. active -- user status.
625941c25fc7496912cc391d
def regs(odrs, model): <NEW_LINE> <INDENT> s = [] <NEW_LINE> f = plt.figure() <NEW_LINE> for idx in range(len(odrs)): <NEW_LINE> <INDENT> odr = odrs[idx] <NEW_LINE> X = np.column_stack([x**i for i in range(odr+1)]) <NEW_LINE> model.fit(X, y) <NEW_LINE> s.append(model.coef_) <NEW_LINE> plt.subplot(2, 2, idx+1) <NEW_LINE> plt.tight_layout() <NEW_LINE> plt.plot(x, y, 'bo', fillstyle='none') <NEW_LINE> plt.plot(x, np.sin(x), 'b--', linewidth=2) <NEW_LINE> plt.plot(x, model.predict(X), 'r-', linewidth=2) <NEW_LINE> plt.grid() <NEW_LINE> plt.title('Order = {0:d}'.format(odr)) <NEW_LINE> <DEDENT> return f, s
Solve regression problems using a series of polynomials of different orders *odrs*. The parameter *model* specifies the type of regression method.
625941c2c432627299f04be4
def test_pauseProducing(self): <NEW_LINE> <INDENT> expectedResult = "hello, world" <NEW_LINE> readSize = 5 <NEW_LINE> output = StringIO() <NEW_LINE> consumer = FileConsumer(output) <NEW_LINE> producer = FileBodyProducer( StringIO(expectedResult), self.cooperator, readSize) <NEW_LINE> complete = producer.startProducing(consumer) <NEW_LINE> self._scheduled.pop(0)() <NEW_LINE> self.assertEqual(output.getvalue(), expectedResult[:5]) <NEW_LINE> producer.pauseProducing() <NEW_LINE> self._scheduled.pop(0)() <NEW_LINE> self.assertEqual(output.getvalue(), expectedResult[:5]) <NEW_LINE> self.assertEqual([], self._scheduled) <NEW_LINE> self.assertNoResult(complete)
L{FileBodyProducer.pauseProducing} temporarily suspends writing bytes from the input file to the given L{IConsumer}.
625941c26fb2d068a760f03b
def draw_vertical_servo_ctl(servo, window, title="Servo"): <NEW_LINE> <INDENT> add_title(window, title) <NEW_LINE> avail_height, width = window.getmaxyx() <NEW_LINE> avail_height -= 1 <NEW_LINE> midpoint = width // 2 <NEW_LINE> curr_val = servo.value <NEW_LINE> for y in range(2, avail_height): <NEW_LINE> <INDENT> if y == 2: <NEW_LINE> <INDENT> window.addstr(y, midpoint-2, "[{:.2f}]".format(curr_val)) <NEW_LINE> <DEDENT> elif y == avail_height-1: <NEW_LINE> <INDENT> window.addstr(y, midpoint-1, "[0]") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if y / avail_height >= curr_val: <NEW_LINE> <INDENT> window.addstr(y, midpoint-1, "[+]") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> window.addstr(y, midpoint-1, "| |")
Draw the servos on the given window, filling in the initial value. :param servo: gpiozero Servo :param window: curses window :return:
625941c27c178a314d6ef3fc
def can_get_hold_block(block, box_size_map, box_list, bin_obj, space): <NEW_LINE> <INDENT> residual_box_list_ind = box_size_map[block.item_size] <NEW_LINE> flag, used_box_ind = can_form_rectangle_block(block, residual_box_list_ind, box_list, bin_obj, space) <NEW_LINE> if flag: <NEW_LINE> <INDENT> if can_hold_block(block, space): <NEW_LINE> <INDENT> temp_residual_list_ind = [] <NEW_LINE> for ind in residual_box_list_ind: <NEW_LINE> <INDENT> if ind in used_box_ind: <NEW_LINE> <INDENT> box_list[ind].box_num -= used_box_ind[ind] <NEW_LINE> <DEDENT> <DEDENT> return True <NEW_LINE> <DEDENT> <DEDENT> return False
是否能够获取一个块使其能够被当前空间支撑 :param block: 当前校验的块 :param box_size_map: 箱子提货点和尺寸与箱子索引的映射 :param box_list: 箱子集合 :param bin_obj: 当前用来装载的集装箱 :param space: 当前用来装载的空间
625941c25fcc89381b1e165d
def cv_classifier_3(self, image): <NEW_LINE> <INDENT> red_img = image[:,:,2] <NEW_LINE> green_img = image[:,:,1] <NEW_LINE> if PRINT_IMAGES == True: <NEW_LINE> <INDENT> cv2.imwrite(self.output_images_path+"/"+'red_img.jpg', red_img) <NEW_LINE> cv2.imwrite(self.output_images_path+"/"+'green_img.jpg', green_img) <NEW_LINE> <DEDENT> red_area = np.sum(red_img == red_img.max()) <NEW_LINE> green_area = np.sum(green_img == green_img.max()) <NEW_LINE> if red_area - green_area >= 15: <NEW_LINE> <INDENT> decision = TrafficLight.RED <NEW_LINE> <DEDENT> elif green_area - red_area >= 15: <NEW_LINE> <INDENT> decision = TrafficLight.GREEN <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> decision = TrafficLight.UNKNOWN <NEW_LINE> <DEDENT> return decision
Args: Image: cv2.image in BGR
625941c2435de62698dfdbec
def memoize(maxsize): <NEW_LINE> <INDENT> assert maxsize >= 4, "Memoize cannot work if maxsize is less than 4" <NEW_LINE> def wrap(fct): <NEW_LINE> <INDENT> cache = {} <NEW_LINE> keys = [None for i in xrange(maxsize)] <NEW_LINE> seg_size = maxsize // 4 <NEW_LINE> pointers = [i * seg_size for i in xrange(4)] <NEW_LINE> max_pointers = [(i + 1) * seg_size for i in xrange(3)] + [maxsize] <NEW_LINE> def wrapper(*args): <NEW_LINE> <INDENT> key = repr(args) <NEW_LINE> res = cache.get(key) <NEW_LINE> if res: <NEW_LINE> <INDENT> pos, res = res <NEW_LINE> keys[pos] = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> res = fct(*args) <NEW_LINE> <DEDENT> value = res <NEW_LINE> for segment, pointer in enumerate(pointers): <NEW_LINE> <INDENT> newkey = keys[pointer] <NEW_LINE> keys[pointer] = key <NEW_LINE> cache[key] = (pointer, value) <NEW_LINE> pointers[segment] = pointer + 1 <NEW_LINE> if pointers[segment] == max_pointers[segment]: <NEW_LINE> <INDENT> pointers[segment] = segment * seg_size <NEW_LINE> <DEDENT> if newkey is None: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> segment, value = cache.pop(newkey) <NEW_LINE> key = newkey <NEW_LINE> <DEDENT> return res <NEW_LINE> <DEDENT> wrapper.__doc__ = fct.__doc__ <NEW_LINE> wrapper.__name__ = fct.__name__ <NEW_LINE> return wrapper <NEW_LINE> <DEDENT> return wrap
Decorator to 'memoize' a function - caching its results with a near LRU implementation. The cache keeps a list of keys logicaly separated in 4 segment : segment 1 | ... | segment4 [k,k,k,k,k,k,k, .. ,k,k,k,k,k,k,k] For each segment there is a pointer that loops on it. When a key is accessed from the cache it is promoted to the first segment (at the pointer place of segment one), the key under the pointer is moved to the next segment, the pointer is then incremented and so on. A key that is removed from the last segment is removed from the cache. :param: maxsize the size of the cache (must be greater than or equal to 4)
625941c2e1aae11d1e749c55
def action_install_webhooks( self, request, queryset ): <NEW_LINE> <INDENT> from djangoplicity.mailinglists.tasks import webhooks <NEW_LINE> for obj in queryset: <NEW_LINE> <INDENT> webhooks.delay( list_id=obj.list_id ) <NEW_LINE> <DEDENT> self.message_user( request, "Installing webhooks for lists %s." % ", ".join( [l.name for l in queryset] ) )
Action to request webhooks to be installed in MailChimp.
625941c2adb09d7d5db6c730
def matrix_list_in_one_vector(matrix_list): <NEW_LINE> <INDENT> return concatenate([m.ravel() for m in matrix_list])
Function concatenate all matrix from list in a single vector. Parameters ---------- matrix_list : list of array-like elements List of matrices. Returns ------- array-like Function will return a single vector wich contains all matrix transformed to the vector and concatenated in the same order as in the list. Examples -------- >>> import numpy as np >>> from neupy.algorithms.utils import * >>> >>> a = np.arange(9).reshape((3, 3)) >>> b = np.array([[10, 0], [0, -1]]) >>> >>> matrix_list_in_one_vector([a, b]) array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 0, 0, -1]) >>> matrix_list_in_one_vector([b, a]) array([10, 0, 0, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
625941c2460517430c39412a
def start(container, binds=None, port_bindings=None, lxc_conf=None, publish_all_ports=None, links=None, privileged=False, dns=None, volumes_from=None, network_mode=None, restart_policy=None, cap_add=None, cap_drop=None): <NEW_LINE> <INDENT> if not binds: <NEW_LINE> <INDENT> binds = {} <NEW_LINE> <DEDENT> if not isinstance(binds, dict): <NEW_LINE> <INDENT> raise SaltInvocationError('binds must be formatted as a dictionary') <NEW_LINE> <DEDENT> client = _get_client() <NEW_LINE> status = base_status.copy() <NEW_LINE> try: <NEW_LINE> <INDENT> dcontainer = _get_container_infos(container)['Id'] <NEW_LINE> if not is_running(container): <NEW_LINE> <INDENT> bindings = None <NEW_LINE> if port_bindings is not None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> bindings = {} <NEW_LINE> for k, v in port_bindings.items(): <NEW_LINE> <INDENT> bindings[k] = (v.get('HostIp', ''), v['HostPort']) <NEW_LINE> <DEDENT> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> raise SaltInvocationError( 'port_bindings must be formatted as a dictionary of ' 'dictionaries' ) <NEW_LINE> <DEDENT> <DEDENT> client.start(dcontainer, binds=binds, port_bindings=bindings, lxc_conf=lxc_conf, publish_all_ports=publish_all_ports, links=links, privileged=privileged, dns=dns, volumes_from=volumes_from, network_mode=network_mode, restart_policy=restart_policy, cap_add=cap_add, cap_drop=cap_drop) <NEW_LINE> if is_running(dcontainer): <NEW_LINE> <INDENT> _valid(status, comment='Container {0} was started'.format(container), id_=container) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> _invalid(status) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> _valid(status, comment='Container {0} was already started'.format(container), id_=container) <NEW_LINE> <DEDENT> <DEDENT> except Exception: <NEW_LINE> <INDENT> _invalid(status, id_=container, out=traceback.format_exc(), comment=( 'An exception occurred while starting ' 'your container {0}').format(container)) <NEW_LINE> <DEDENT> __salt__['mine.send']('docker.get_containers', host=True) <NEW_LINE> return status
Start the specified container container container id CLI Example: .. code-block:: bash salt '*' docker.start <container id>
625941c230c21e258bdfa43b
def request_view_and_leave_for_bandit_in_test(self, num_user, experiment, winner, times): <NEW_LINE> <INDENT> assert times['winner'] > 0 and times['loser'] > 0, "Times must be positive." <NEW_LINE> view_action = experiment.name + '_view' <NEW_LINE> leave_action = experiment.goal_set.get().act_subject + '_leave' <NEW_LINE> for i in range(num_user): <NEW_LINE> <INDENT> response = self.client.post('/useractions/', {'ip': str(i), 'action': view_action}, format='json') <NEW_LINE> assigned_group = response.data['groups'][experiment.name] <NEW_LINE> if assigned_group == winner: <NEW_LINE> <INDENT> winner_time = times['winner'] <NEW_LINE> time_in_page = norm.rvs(winner_time, (winner_time / 2)**2) <NEW_LINE> time.sleep(time_in_page) <NEW_LINE> self.client.post('/useractions/', {'ip': str(i), 'action': leave_action}, format='json') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> loser_time = times['loser'] <NEW_LINE> time_in_page = norm.rvs(loser_time, loser_time**2) <NEW_LINE> time.sleep(time_in_page) <NEW_LINE> self.client.post('/useractions/', {'ip': str(i), 'action': leave_action}, format='json')
Test 내에서 서버에 view와 leave에 대한 request를 보내는 method. Bandit algorithm이 가장 좋은 arm을 제대로 찾아내는지 보기 위해 사용됨. :param num_user: int, request를 보내는 가상 유저의 개수 :param experiment: Experiment 모델 인스턴스 :param winner: str, 가장 좋은 arm에 해당하는 Group 모델 인스턴스의 name 애트리뷰트 :param times: dict, 가장 좋은 arm에 할당되었을 때와 그 외의 arm에 할당되었을 때 각각 유저가 페이지에 머무를 평균 시간(초). {'winner': float, 'loser': float}의 형식이어야 함. 각 float 값은 0 이상의 값이어야 함. :return: 없음
625941c2b7558d58953c4eb8
def test_set(self, handler): <NEW_LINE> <INDENT> handler.create("test") <NEW_LINE> assert handler.set("test","foo") == "Yes"
Checks for a key in a blank filter
625941c291af0d3eaac9b9b7
def test_inspect_method(self): <NEW_LINE> <INDENT> archimedes = MockedArchimedes(KIBANA_URL, self.tmp_full) <NEW_LINE> objs = [obj for obj in archimedes.inspect()] <NEW_LINE> self.assertEqual(objs, [])
Test whether the method inspect properly works
625941c27d43ff24873a2c3f
def _find_resource_by_url(self, url, version): <NEW_LINE> <INDENT> resource = Resource(name=url[1:-1], url=url) <NEW_LINE> registry = ResourcesRegistry() <NEW_LINE> registry.register_resource(resource) <NEW_LINE> if version != "latest": <NEW_LINE> <INDENT> found_resource = registry.find_by_url(url, version) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> found_resource = registry.find_by_url(url) <NEW_LINE> <DEDENT> self.assertEqual(resource.name, found_resource.name) <NEW_LINE> self.assertEqual(resource.version, found_resource.version) <NEW_LINE> self.assertEqual(resource.url, found_resource.url) <NEW_LINE> self.assertEqual(resource.model, found_resource.model)
This method provides a test case template for find by url method.
625941c2e76e3b2f99f3a7af
def simple_read(file_name): <NEW_LINE> <INDENT> grid_file = open(file_name, 'r') <NEW_LINE> grid_list = [] <NEW_LINE> for line in grid_file.readlines(): <NEW_LINE> <INDENT> values = line.split() <NEW_LINE> values = [float(i) for i in values] <NEW_LINE> grid_list.append(values) <NEW_LINE> <DEDENT> grid_file.close() <NEW_LINE> return grid_list
Read in grids from a simple file.
625941c23346ee7daa2b2d0b
def register_node(self, nodeId, device, commTimeout, sourceId=None, cacheType=None): <NEW_LINE> <INDENT> self.nodes[nodeId] = {"device": device, "commTimeout": commTimeout, "sourceId": sourceId, "cacheType": cacheType};
Register a sensor/device as available for serving of data through this XMPP instance. The device object may by any custom implementation to support specific devices, but it must implement the functions: has_field request_fields according to the interfaces shown in the example device.py file. Arguments: nodeId -- The identifier for the device device -- The device object commTimeout -- Time in seconds to wait between each callback from device during a data readout. Float. sourceId -- [optional] identifying the data source controlling the device cacheType -- [optional] narrowing down the search to a specific kind of node
625941c25510c4643540f389
def xtest_2D_facet_markings_1 (self): <NEW_LINE> <INDENT> marked_facets = [7] <NEW_LINE> self._facet_marker_driver(2, 1, marked_facets, 8)
Test to see if the 2D facet markings behave as expected. 1 edge marked
625941c24527f215b584c3f9
def combine(self, range_obj) -> bool: <NEW_LINE> <INDENT> if self.is_disjoint(range_obj): <NEW_LINE> <INDENT> return Range(0) <NEW_LINE> <DEDENT> new_start = min(self.start, range_obj.start) <NEW_LINE> new_end = max(self.end, range_obj.end) <NEW_LINE> return Range(new_start, new_end)
Returns a new Range object which is a combination of the two ranges if they are not disjoint
625941c2fbf16365ca6f6160
def __str__(self): <NEW_LINE> <INDENT> string = self.GetString() <NEW_LINE> return string.encode(u'utf-8')
Returns a string representation.
625941c224f1403a92600b08
def add(self, o): <NEW_LINE> <INDENT> pass
Add a new Item
625941c229b78933be1e564f
def setUp(self): <NEW_LINE> <INDENT> self.paths = self.mktemp() <NEW_LINE> self.projects = self.mktemp() <NEW_LINE> os.makedirs(self.paths) <NEW_LINE> os.makedirs(self.projects) <NEW_LINE> self.manager = BranchManager(self.paths, self.projects) <NEW_LINE> self.cwd = os.getcwd() <NEW_LINE> self.repositories = FilePath(self.mktemp())
Create a branch manager with temporary directories for all its working filesystem paths.
625941c2cb5e8a47e48b7a4d
def status_result(): <NEW_LINE> <INDENT> if status.call_count > 2: <NEW_LINE> <INDENT> return 'stopped' <NEW_LINE> <DEDENT> return 'running'
Return value of the status property.
625941c23539df3088e2e2eb
@app.route('/goauth2redirect') <NEW_LINE> def goauth2redirect(): <NEW_LINE> <INDENT> flow = client.flow_from_clientsecrets( 'g_client_secrets.json', scope=['https://www.googleapis.com/auth/plus.me', 'https://www.googleapis.com/auth/userinfo.email'], redirect_uri=url_for('goauth2redirect', _external=True)) <NEW_LINE> if 'code' not in request.args: <NEW_LINE> <INDENT> auth_uri = flow.step1_get_authorize_url() <NEW_LINE> return redirect(auth_uri) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> auth_code = request.args.get('code') <NEW_LINE> credentials = flow.step2_exchange(auth_code) <NEW_LINE> session['credentials'] = credentials.to_json() <NEW_LINE> return redirect(url_for('glogin'))
Handles Google authentication flow. Builds a login flow object. Redirects user to Google authentication dialog if user is not logged in. Once user logs in, retrieves authorization code and upgrades code for credentials object. Then redirects to '/glogin' for user data retrieval.
625941c2462c4b4f79d1d671
def do_POST(self): <NEW_LINE> <INDENT> payload = self._handle_data() <NEW_LINE> global GLOBAL_CACHE <NEW_LINE> code, dataframe = self._handle_post(GLOBAL_CACHE, payload) <NEW_LINE> if code != 404: <NEW_LINE> <INDENT> GLOBAL_CACHE = dataframe <NEW_LINE> <DEDENT> self._set_headers(code=code)
Метод обрабатывает POST запросы от клиента. На данный момент доступны два адреса: /events - для обработки позиции курсора мыши /fin - для обработки статистики игры пользователя :return: None
625941c2796e427e537b0565
def js_escape(value): <NEW_LINE> <INDENT> return (value.replace('<', "\\u003c"). replace('>', "\\u003e"). replace('"', "\\u0022"). replace("'", "\\u0027"). replace("`", "\\u0060"). replace("(", "\\u0028"). replace(")", "\\u0029"). replace("{", "\\u007b"). replace("}", "\\u007d"). replace("-", "\\u002d"). replace("+", "\\u007d"). replace("$", "\\u0024"). replace("/", "\\u002f"))
JS XSS Escape
625941c2a8ecb033257d306e
def getTag(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return self.map[self.tagProperty] <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> return None
Gets the tag.
625941c29b70327d1c4e0d75
def add_zerotier(self, network, name=None): <NEW_LINE> <INDENT> name = name or network.name <NEW_LINE> nic = ZTNic(name, network.id, None, self._parent) <NEW_LINE> nic.client = network.client <NEW_LINE> self._items.append(nic) <NEW_LINE> return nic
Add zerotier by zerotier network :param network: Zerotier network instance (part of zerotierclient) :type network: JumpScale9Lib.clients.zerotier.ZerotierClient.ZeroTierNetwork :param name: Name for the nic if left blank will be the name of the network :type name: str
625941c20fa83653e4656f5d
def ellapsed(self): <NEW_LINE> <INDENT> now = time.time() <NEW_LINE> ellapsed = now-self.start <NEW_LINE> self.start = now <NEW_LINE> return ellapsed
return the number of seconds ellapsed
625941c23d592f4c4ed1d013