prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>FbxCameraStereo.java<|end_file_name|><|fim▁begin|>/* * MIT License * * Copyright (c) 2017 Alessandro Arcangeli <alessandroarcangeli.rm@gmail.com> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<|fim▁hole|> * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package mrkoopa.jfbx; public interface FbxCameraStereo extends FbxCamera { }<|fim▁end|>
<|file_name|>gettwittermps.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python import urllib2 import csv import xml.sax uri = "http://spreadsheets.google.com/tq?tqx=out:csv&key=0AjWA_TWMI4t_dFI5MWRWZkRWbFJ6MVhHQzVmVndrZnc&hl=en_GB" f = urllib2.urlopen(uri) csv_data = f.read() lines = csv_data.split("\n") rows = csv.reader(lines.__iter__(), delimiter=',', quotechar='"') class PeopleParser(xml.sax.handler.ContentHandler): def __init__(self): self.parser = xml.sax.make_parser() self.parser.setContentHandler(self) def parse(self,filename): self.office_id_to_person_id = {}<|fim▁hole|> self.parser.parse(filename) def startElement(self,name,attrs): if name == 'person': self.current_person_id = attrs['id'] elif name == 'office': self.office_id_to_person_id[attrs['id']] = self.current_person_id def endElement(self,name): if name == 'person': self.current_person_id = None people_parser = PeopleParser() people_parser.parse("../members/people.xml") person_id_to_twitter_username = {} output_filename = "../members/twitter-commons.xml" fp = open(output_filename,"w") fp.write('''<?xml version="1.0" encoding="ISO-8859-1"?> <publicwhip> ''') for r in rows: if len(r) < 5: continue member_id = r[2] twitter_username = r[4] if member_id == "url": # That's the header line... continue if len(twitter_username) == 0: continue if member_id not in people_parser.office_id_to_person_id: raise "No person ID found for %s in line %s" % (member_id,"#".join(r)) person_id = people_parser.office_id_to_person_id[member_id] fp.write("<personinfo id=\"%s\" twitter_username=\"%s\"/>\n"%(person_id,twitter_username)) fp.write("</publicwhip>")<|fim▁end|>
<|file_name|>project.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Code for projecting associated types out of trait references. use super::elaborate_predicates; use super::Obligation; use super::ObligationCause; use super::Overflow; use super::PredicateObligation; use super::SelectionContext; use super::SelectionError; use super::VtableImplData; use super::util; use middle::infer; use middle::subst::{Subst, Substs}; use middle::ty::{self, AsPredicate, ReferencesError, RegionEscape, HasProjectionTypes, ToPolyTraitRef, Ty}; use middle::ty_fold::{self, TypeFoldable, TypeFolder}; use std::rc::Rc; use syntax::ast; use syntax::parse::token; use util::common::FN_OUTPUT_NAME; use util::ppaux::Repr; pub type PolyProjectionObligation<'tcx> = Obligation<'tcx, ty::PolyProjectionPredicate<'tcx>>; pub type ProjectionObligation<'tcx> = Obligation<'tcx, ty::ProjectionPredicate<'tcx>>; pub type ProjectionTyObligation<'tcx> = Obligation<'tcx, ty::ProjectionTy<'tcx>>; /// When attempting to resolve `<T as TraitRef>::Name` ... pub enum ProjectionTyError<'tcx> { /// ...we found multiple sources of information and couldn't resolve the ambiguity. TooManyCandidates, /// ...an error occurred matching `T : TraitRef` TraitSelectionError(SelectionError<'tcx>), } #[derive(Clone)] pub struct MismatchedProjectionTypes<'tcx> { pub err: ty::type_err<'tcx> } #[derive(PartialEq, Eq)] enum ProjectionTyCandidate<'tcx> { ParamEnv(ty::PolyProjectionPredicate<'tcx>), Impl(VtableImplData<'tcx, PredicateObligation<'tcx>>), Closure(ast::DefId, Substs<'tcx>), FnPointer(Ty<'tcx>), } struct ProjectionTyCandidateSet<'tcx> { vec: Vec<ProjectionTyCandidate<'tcx>>, ambiguous: bool } /// Evaluates constraints of the form: /// /// for<...> <T as Trait>::U == V /// /// If successful, this may result in additional obligations. pub fn poly_project_and_unify_type<'cx,'tcx>( selcx: &mut SelectionContext<'cx,'tcx>, obligation: &PolyProjectionObligation<'tcx>) -> Result<Option<Vec<PredicateObligation<'tcx>>>, MismatchedProjectionTypes<'tcx>> { debug!("poly_project_and_unify_type(obligation={})", obligation.repr(selcx.tcx())); let infcx = selcx.infcx(); infcx.try(|snapshot| { let (skol_predicate, skol_map) = infcx.skolemize_late_bound_regions(&obligation.predicate, snapshot); let skol_obligation = obligation.with(skol_predicate); match project_and_unify_type(selcx, &skol_obligation) { Ok(result) => { match infcx.leak_check(&skol_map, snapshot) { Ok(()) => Ok(infcx.plug_leaks(skol_map, snapshot, &result)), Err(e) => Err(MismatchedProjectionTypes { err: e }), } } Err(e) => { Err(e) } } }) } /// Evaluates constraints of the form: /// /// <T as Trait>::U == V /// /// If successful, this may result in additional obligations. fn project_and_unify_type<'cx,'tcx>( selcx: &mut SelectionContext<'cx,'tcx>, obligation: &ProjectionObligation<'tcx>) -> Result<Option<Vec<PredicateObligation<'tcx>>>, MismatchedProjectionTypes<'tcx>> { debug!("project_and_unify_type(obligation={})", obligation.repr(selcx.tcx())); let Normalized { value: normalized_ty, obligations } = match opt_normalize_projection_type(selcx, obligation.predicate.projection_ty.clone(), obligation.cause.clone(), obligation.recursion_depth) { Some(n) => n, None => { consider_unification_despite_ambiguity(selcx, obligation); return Ok(None); } }; debug!("project_and_unify_type: normalized_ty={} obligations={}", normalized_ty.repr(selcx.tcx()), obligations.repr(selcx.tcx())); let infcx = selcx.infcx(); let origin = infer::RelateOutputImplTypes(obligation.cause.span); match infer::mk_eqty(infcx, true, origin, normalized_ty, obligation.predicate.ty) { Ok(()) => Ok(Some(obligations)), Err(err) => Err(MismatchedProjectionTypes { err: err }), } } fn consider_unification_despite_ambiguity<'cx,'tcx>(selcx: &mut SelectionContext<'cx,'tcx>, obligation: &ProjectionObligation<'tcx>) { debug!("consider_unification_despite_ambiguity(obligation={})", obligation.repr(selcx.tcx())); let def_id = obligation.predicate.projection_ty.trait_ref.def_id; match selcx.tcx().lang_items.fn_trait_kind(def_id) { Some(_) => { } None => { return; } } let infcx = selcx.infcx(); let self_ty = obligation.predicate.projection_ty.trait_ref.self_ty(); let self_ty = infcx.shallow_resolve(self_ty); debug!("consider_unification_despite_ambiguity: self_ty.sty={:?}", self_ty.sty); match self_ty.sty { ty::ty_closure(closure_def_id, substs) => { let closure_typer = selcx.closure_typer(); let closure_type = closure_typer.closure_type(closure_def_id, substs); let ty::Binder((_, ret_type)) = util::closure_trait_ref_and_return_type(infcx.tcx, def_id, self_ty, &closure_type.sig, util::TupleArgumentsFlag::No); let (ret_type, _) = infcx.replace_late_bound_regions_with_fresh_var( obligation.cause.span, infer::AssocTypeProjection(obligation.predicate.projection_ty.item_name), &ty::Binder(ret_type)); debug!("consider_unification_despite_ambiguity: ret_type={:?}", ret_type.repr(selcx.tcx())); let origin = infer::RelateOutputImplTypes(obligation.cause.span); let obligation_ty = obligation.predicate.ty; match infer::mk_eqty(infcx, true, origin, obligation_ty, ret_type) { Ok(()) => { } Err(_) => { /* ignore errors */ } } } _ => { } } } /// Normalizes any associated type projections in `value`, replacing /// them with a fully resolved type where possible. The return value /// combines the normalized result and any additional obligations that /// were incurred as result. pub fn normalize<'a,'b,'tcx,T>(selcx: &'a mut SelectionContext<'b,'tcx>, cause: ObligationCause<'tcx>, value: &T) -> Normalized<'tcx, T> where T : TypeFoldable<'tcx> + HasProjectionTypes + Clone + Repr<'tcx> { normalize_with_depth(selcx, cause, 0, value) } /// As `normalize`, but with a custom depth. pub fn normalize_with_depth<'a,'b,'tcx,T>(selcx: &'a mut SelectionContext<'b,'tcx>, cause: ObligationCause<'tcx>, depth: uint, value: &T) -> Normalized<'tcx, T> where T : TypeFoldable<'tcx> + HasProjectionTypes + Clone + Repr<'tcx> { let mut normalizer = AssociatedTypeNormalizer::new(selcx, cause, depth); let result = normalizer.fold(value); Normalized { value: result, obligations: normalizer.obligations, } } struct AssociatedTypeNormalizer<'a,'b:'a,'tcx:'b> { selcx: &'a mut SelectionContext<'b,'tcx>, cause: ObligationCause<'tcx>, obligations: Vec<PredicateObligation<'tcx>>, depth: uint, } impl<'a,'b,'tcx> AssociatedTypeNormalizer<'a,'b,'tcx> { fn new(selcx: &'a mut SelectionContext<'b,'tcx>, cause: ObligationCause<'tcx>, depth: uint) -> AssociatedTypeNormalizer<'a,'b,'tcx> { AssociatedTypeNormalizer { selcx: selcx, cause: cause, obligations: vec!(), depth: depth, } } fn fold<T:TypeFoldable<'tcx> + HasProjectionTypes + Clone>(&mut self, value: &T) -> T { let value = self.selcx.infcx().resolve_type_vars_if_possible(value); if !value.has_projection_types() { value.clone() } else { value.fold_with(self) } } } impl<'a,'b,'tcx> TypeFolder<'tcx> for AssociatedTypeNormalizer<'a,'b,'tcx> { fn tcx(&self) -> &ty::ctxt<'tcx> { self.selcx.tcx() } fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { // We don't want to normalize associated types that occur inside of region // binders, because they may contain bound regions, and we can't cope with that. // // Example: // // for<'a> fn(<T as Foo<&'a>>::A) // // Instead of normalizing `<T as Foo<&'a>>::A` here, we'll // normalize it when we instantiate those bound regions (which // should occur eventually). let ty = ty_fold::super_fold_ty(self, ty); match ty.sty { ty::ty_projection(ref data) if !data.has_escaping_regions() => { // (*) // (*) This is kind of hacky -- we need to be able to // handle normalization within binders because // otherwise we wind up a need to normalize when doing // trait matching (since you can have a trait // obligation like `for<'a> T::B : Fn(&'a int)`), but // we can't normalize with bound regions in scope. So // far now we just ignore binders but only normalize // if all bound regions are gone (and then we still // have to renormalize whenever we instantiate a // binder). It would be better to normalize in a // binding-aware fashion. let Normalized { value: ty, obligations } = normalize_projection_type(self.selcx, data.clone(), self.cause.clone(), self.depth); self.obligations.extend(obligations.into_iter()); ty } _ => { ty } } } } pub struct Normalized<'tcx,T> { pub value: T, pub obligations: Vec<PredicateObligation<'tcx>>, } pub type NormalizedTy<'tcx> = Normalized<'tcx, Ty<'tcx>>; impl<'tcx,T> Normalized<'tcx,T> { pub fn with<U>(self, value: U) -> Normalized<'tcx,U> { Normalized { value: value, obligations: self.obligations } } } /// The guts of `normalize`: normalize a specific projection like `<T /// as Trait>::Item`. The result is always a type (and possibly /// additional obligations). If ambiguity arises, which implies that /// there are unresolved type variables in the projection, we will /// substitute a fresh type variable `$X` and generate a new /// obligation `<T as Trait>::Item == $X` for later. pub fn normalize_projection_type<'a,'b,'tcx>( selcx: &'a mut SelectionContext<'b,'tcx>, projection_ty: ty::ProjectionTy<'tcx>, cause: ObligationCause<'tcx>, depth: uint) -> NormalizedTy<'tcx> { opt_normalize_projection_type(selcx, projection_ty.clone(), cause.clone(), depth) .unwrap_or_else(move || { // if we bottom out in ambiguity, create a type variable // and a deferred predicate to resolve this when more type // information is available. let ty_var = selcx.infcx().next_ty_var(); let projection = ty::Binder(ty::ProjectionPredicate { projection_ty: projection_ty, ty: ty_var }); let obligation = Obligation::with_depth(cause, depth+1, projection.as_predicate()); Normalized { value: ty_var, obligations: vec!(obligation) } }) } /// The guts of `normalize`: normalize a specific projection like `<T /// as Trait>::Item`. The result is always a type (and possibly /// additional obligations). Returns `None` in the case of ambiguity, /// which indicates that there are unbound type variables. fn opt_normalize_projection_type<'a,'b,'tcx>( selcx: &'a mut SelectionContext<'b,'tcx>, projection_ty: ty::ProjectionTy<'tcx>, cause: ObligationCause<'tcx>, depth: uint) -> Option<NormalizedTy<'tcx>> { debug!("normalize_projection_type(\ projection_ty={}, \ depth={})", projection_ty.repr(selcx.tcx()), depth); let obligation = Obligation::with_depth(cause.clone(), depth, projection_ty.clone()); match project_type(selcx, &obligation) { Ok(ProjectedTy::Progress(projected_ty, mut obligations)) => { // if projection succeeded, then what we get out of this // is also non-normalized (consider: it was derived from // an impl, where-clause etc) and hence we must // re-normalize it debug!("normalize_projection_type: projected_ty={} depth={} obligations={}", projected_ty.repr(selcx.tcx()), depth, obligations.repr(selcx.tcx())); if ty::type_has_projection(projected_ty) { let tcx = selcx.tcx(); let mut normalizer = AssociatedTypeNormalizer::new(selcx, cause, depth); let normalized_ty = normalizer.fold(&projected_ty); debug!("normalize_projection_type: normalized_ty={} depth={}", normalized_ty.repr(tcx), depth); obligations.extend(normalizer.obligations.into_iter()); Some(Normalized { value: normalized_ty, obligations: obligations, }) } else { Some(Normalized { value: projected_ty, obligations: obligations, }) } } Ok(ProjectedTy::NoProgress(projected_ty)) => { Some(Normalized { value: projected_ty, obligations: vec!() }) } Err(ProjectionTyError::TooManyCandidates) => { None } Err(ProjectionTyError::TraitSelectionError(_)) => { // if we got an error processing the `T as Trait` part, // just return `ty::err` but add the obligation `T : // Trait`, which when processed will cause the error to be // reported later Some(normalize_to_error(selcx, projection_ty, cause, depth)) } } } /// in various error cases, we just set ty_err and return an obligation /// that, when fulfilled, will lead to an error fn normalize_to_error<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>, projection_ty: ty::ProjectionTy<'tcx>, cause: ObligationCause<'tcx>, depth: uint) -> NormalizedTy<'tcx> { let trait_ref = projection_ty.trait_ref.to_poly_trait_ref(); let trait_obligation = Obligation { cause: cause, recursion_depth: depth, predicate: trait_ref.as_predicate() }; Normalized { value: selcx.tcx().types.err, obligations: vec!(trait_obligation) } } enum ProjectedTy<'tcx> { Progress(Ty<'tcx>, Vec<PredicateObligation<'tcx>>), NoProgress(Ty<'tcx>), } /// Compute the result of a projection type (if we can). fn project_type<'cx,'tcx>( selcx: &mut SelectionContext<'cx,'tcx>, obligation: &ProjectionTyObligation<'tcx>) -> Result<ProjectedTy<'tcx>, ProjectionTyError<'tcx>> { debug!("project(obligation={})", obligation.repr(selcx.tcx())); let recursion_limit = selcx.tcx().sess.recursion_limit.get(); if obligation.recursion_depth >= recursion_limit { debug!("project: overflow!"); return Err(ProjectionTyError::TraitSelectionError(Overflow)); } let obligation_trait_ref = selcx.infcx().resolve_type_vars_if_possible(&obligation.predicate.trait_ref); debug!("project: obligation_trait_ref={}", obligation_trait_ref.repr(selcx.tcx())); if obligation_trait_ref.references_error() { return Ok(ProjectedTy::Progress(selcx.tcx().types.err, vec!())); } let mut candidates = ProjectionTyCandidateSet { vec: Vec::new(), ambiguous: false, }; assemble_candidates_from_param_env(selcx, obligation, &obligation_trait_ref, &mut candidates); assemble_candidates_from_trait_def(selcx, obligation, &obligation_trait_ref, &mut candidates); if let Err(e) = assemble_candidates_from_impls(selcx, obligation, &obligation_trait_ref, &mut candidates) { return Err(ProjectionTyError::TraitSelectionError(e)); } debug!("{} candidates, ambiguous={}", candidates.vec.len(), candidates.ambiguous); // We probably need some winnowing logic similar to select here. // Drop duplicates. // // Note: `candidates.vec` seems to be on the critical path of the // compiler. Replacing it with an hash set was also tried, which would // render the following dedup unnecessary. It led to cleaner code but // prolonged compiling time of `librustc` from 5m30s to 6m in one test, or // ~9% performance lost. if candidates.vec.len() > 1 { let mut i = 0; while i < candidates.vec.len() { let has_dup = (0..i).any(|j| candidates.vec[i] == candidates.vec[j]); if has_dup { candidates.vec.swap_remove(i); } else { i += 1; } } } if candidates.ambiguous || candidates.vec.len() > 1 { return Err(ProjectionTyError::TooManyCandidates); } match candidates.vec.pop() { Some(candidate) => { let (ty, obligations) = confirm_candidate(selcx, obligation, candidate); Ok(ProjectedTy::Progress(ty, obligations)) } None => { Ok(ProjectedTy::NoProgress(ty::mk_projection(selcx.tcx(), obligation.predicate.trait_ref.clone(), obligation.predicate.item_name))) } } } /// The first thing we have to do is scan through the parameter /// environment to see whether there are any projection predicates /// there that can answer this question. fn assemble_candidates_from_param_env<'cx,'tcx>( selcx: &mut SelectionContext<'cx,'tcx>, obligation: &ProjectionTyObligation<'tcx>, obligation_trait_ref: &Rc<ty::TraitRef<'tcx>>, candidate_set: &mut ProjectionTyCandidateSet<'tcx>) { let env_predicates = selcx.param_env().caller_bounds.clone(); assemble_candidates_from_predicates(selcx, obligation, obligation_trait_ref, candidate_set, env_predicates); } /// In the case of a nested projection like <<A as Foo>::FooT as Bar>::BarT, we may find /// that the definition of `Foo` has some clues: /// /// ``` /// trait Foo { /// type FooT : Bar<BarT=i32> /// } /// ``` /// /// Here, for example, we could conclude that the result is `i32`. fn assemble_candidates_from_trait_def<'cx,'tcx>( selcx: &mut SelectionContext<'cx,'tcx>, obligation: &ProjectionTyObligation<'tcx>, obligation_trait_ref: &Rc<ty::TraitRef<'tcx>>, candidate_set: &mut ProjectionTyCandidateSet<'tcx>) { // Check whether the self-type is itself a projection. let trait_ref = match obligation_trait_ref.self_ty().sty { ty::ty_projection(ref data) => data.trait_ref.clone(), ty::ty_infer(ty::TyVar(_)) => { // If the self-type is an inference variable, then it MAY wind up // being a projected type, so induce an ambiguity. candidate_set.ambiguous = true; return; } _ => { return; } }; // If so, extract what we know from the trait and try to come up with a good answer. let trait_predicates = ty::lookup_predicates(selcx.tcx(), trait_ref.def_id); let bounds = trait_predicates.instantiate(selcx.tcx(), trait_ref.substs); assemble_candidates_from_predicates(selcx, obligation, obligation_trait_ref, candidate_set, bounds.predicates.into_vec()); } fn assemble_candidates_from_predicates<'cx,'tcx>( selcx: &mut SelectionContext<'cx,'tcx>, obligation: &ProjectionTyObligation<'tcx>, obligation_trait_ref: &Rc<ty::TraitRef<'tcx>>, candidate_set: &mut ProjectionTyCandidateSet<'tcx>, env_predicates: Vec<ty::Predicate<'tcx>>) { debug!("assemble_candidates_from_predicates(obligation={}, env_predicates={})", obligation.repr(selcx.tcx()), env_predicates.repr(selcx.tcx())); let infcx = selcx.infcx(); for predicate in elaborate_predicates(selcx.tcx(), env_predicates) { match predicate { ty::Predicate::Projection(ref data) => { let same_name = data.item_name() == obligation.predicate.item_name; let is_match = same_name && infcx.probe(|_| { let origin = infer::Misc(obligation.cause.span); let data_poly_trait_ref = data.to_poly_trait_ref(); let obligation_poly_trait_ref = obligation_trait_ref.to_poly_trait_ref(); infcx.sub_poly_trait_refs(false, origin, data_poly_trait_ref, obligation_poly_trait_ref).is_ok() }); debug!("assemble_candidates_from_predicates: candidate {} is_match {} same_name {}", data.repr(selcx.tcx()), is_match, same_name); if is_match { candidate_set.vec.push( ProjectionTyCandidate::ParamEnv(data.clone())); } } _ => { } } } } fn assemble_candidates_from_object_type<'cx,'tcx>( selcx: &mut SelectionContext<'cx,'tcx>, obligation: &ProjectionTyObligation<'tcx>, obligation_trait_ref: &Rc<ty::TraitRef<'tcx>>, candidate_set: &mut ProjectionTyCandidateSet<'tcx>, object_ty: Ty<'tcx>) { let infcx = selcx.infcx(); debug!("assemble_candidates_from_object_type(object_ty={})", object_ty.repr(infcx.tcx)); let data = match object_ty.sty { ty::ty_trait(ref data) => data, _ => { selcx.tcx().sess.span_bug( obligation.cause.span, &format!("assemble_candidates_from_object_type called with non-object: {}", object_ty.repr(selcx.tcx()))); } }; let projection_bounds = data.projection_bounds_with_self_ty(selcx.tcx(), object_ty); let env_predicates = projection_bounds.iter() .map(|p| p.as_predicate()) .collect(); assemble_candidates_from_predicates(selcx, obligation, obligation_trait_ref, candidate_set, env_predicates) } fn assemble_candidates_from_impls<'cx,'tcx>( selcx: &mut SelectionContext<'cx,'tcx>, obligation: &ProjectionTyObligation<'tcx>, obligation_trait_ref: &Rc<ty::TraitRef<'tcx>>, candidate_set: &mut ProjectionTyCandidateSet<'tcx>) -> Result<(), SelectionError<'tcx>> { // If we are resolving `<T as TraitRef<...>>::Item == Type`, // start out by selecting the predicate `T as TraitRef<...>`: let poly_trait_ref = obligation_trait_ref.to_poly_trait_ref(); let trait_obligation = obligation.with(poly_trait_ref.to_poly_trait_predicate()); let vtable = match selcx.select(&trait_obligation) { Ok(Some(vtable)) => vtable, Ok(None) => { candidate_set.ambiguous = true; return Ok(()); }<|fim▁hole|> Err(e) => { debug!("assemble_candidates_from_impls: selection error {}", e.repr(selcx.tcx())); return Err(e); } }; match vtable { super::VtableImpl(data) => { debug!("assemble_candidates_from_impls: impl candidate {}", data.repr(selcx.tcx())); candidate_set.vec.push( ProjectionTyCandidate::Impl(data)); } super::VtableObject(data) => { assemble_candidates_from_object_type( selcx, obligation, obligation_trait_ref, candidate_set, data.object_ty); } super::VtableClosure(closure_def_id, substs) => { candidate_set.vec.push( ProjectionTyCandidate::Closure(closure_def_id, substs)); } super::VtableFnPointer(fn_type) => { candidate_set.vec.push( ProjectionTyCandidate::FnPointer(fn_type)); } super::VtableParam(..) => { // This case tell us nothing about the value of an // associated type. Consider: // // ``` // trait SomeTrait { type Foo; } // fn foo<T:SomeTrait>(...) { } // ``` // // If the user writes `<T as SomeTrait>::Foo`, then the `T // : SomeTrait` binding does not help us decide what the // type `Foo` is (at least, not more specifically than // what we already knew). // // But wait, you say! What about an example like this: // // ``` // fn bar<T:SomeTrait<Foo=uint>>(...) { ... } // ``` // // Doesn't the `T : Sometrait<Foo=uint>` predicate help // resolve `T::Foo`? And of course it does, but in fact // that single predicate is desugared into two predicates // in the compiler: a trait predicate (`T : SomeTrait`) and a // projection. And the projection where clause is handled // in `assemble_candidates_from_param_env`. } super::VtableDefaultImpl(..) | super::VtableBuiltin(..) => { // These traits have no associated types. selcx.tcx().sess.span_bug( obligation.cause.span, &format!("Cannot project an associated type from `{}`", vtable.repr(selcx.tcx()))); } } Ok(()) } fn confirm_candidate<'cx,'tcx>( selcx: &mut SelectionContext<'cx,'tcx>, obligation: &ProjectionTyObligation<'tcx>, candidate: ProjectionTyCandidate<'tcx>) -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>) { let infcx = selcx.infcx(); debug!("confirm_candidate(candidate={}, obligation={})", candidate.repr(infcx.tcx), obligation.repr(infcx.tcx)); match candidate { ProjectionTyCandidate::ParamEnv(poly_projection) => { confirm_param_env_candidate(selcx, obligation, poly_projection) } ProjectionTyCandidate::Impl(impl_vtable) => { confirm_impl_candidate(selcx, obligation, impl_vtable) } ProjectionTyCandidate::Closure(def_id, substs) => { confirm_closure_candidate(selcx, obligation, def_id, &substs) } ProjectionTyCandidate::FnPointer(fn_type) => { confirm_fn_pointer_candidate(selcx, obligation, fn_type) } } } fn confirm_fn_pointer_candidate<'cx,'tcx>( selcx: &mut SelectionContext<'cx,'tcx>, obligation: &ProjectionTyObligation<'tcx>, fn_type: Ty<'tcx>) -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>) { let fn_type = selcx.infcx().shallow_resolve(fn_type); let sig = ty::ty_fn_sig(fn_type); confirm_callable_candidate(selcx, obligation, sig, util::TupleArgumentsFlag::Yes) } fn confirm_closure_candidate<'cx,'tcx>( selcx: &mut SelectionContext<'cx,'tcx>, obligation: &ProjectionTyObligation<'tcx>, closure_def_id: ast::DefId, substs: &Substs<'tcx>) -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>) { let closure_typer = selcx.closure_typer(); let closure_type = closure_typer.closure_type(closure_def_id, substs); confirm_callable_candidate(selcx, obligation, &closure_type.sig, util::TupleArgumentsFlag::No) } fn confirm_callable_candidate<'cx,'tcx>( selcx: &mut SelectionContext<'cx,'tcx>, obligation: &ProjectionTyObligation<'tcx>, fn_sig: &ty::PolyFnSig<'tcx>, flag: util::TupleArgumentsFlag) -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>) { let tcx = selcx.tcx(); debug!("confirm_closure_candidate({},{})", obligation.repr(tcx), fn_sig.repr(tcx)); // Note: we unwrap the binder here but re-create it below (1) let ty::Binder((trait_ref, ret_type)) = util::closure_trait_ref_and_return_type(tcx, obligation.predicate.trait_ref.def_id, obligation.predicate.trait_ref.self_ty(), fn_sig, flag); let predicate = ty::Binder(ty::ProjectionPredicate { // (1) recreate binder here projection_ty: ty::ProjectionTy { trait_ref: trait_ref, item_name: token::intern(FN_OUTPUT_NAME), }, ty: ret_type }); confirm_param_env_candidate(selcx, obligation, predicate) } fn confirm_param_env_candidate<'cx,'tcx>( selcx: &mut SelectionContext<'cx,'tcx>, obligation: &ProjectionTyObligation<'tcx>, poly_projection: ty::PolyProjectionPredicate<'tcx>) -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>) { let infcx = selcx.infcx(); let projection = infcx.replace_late_bound_regions_with_fresh_var( obligation.cause.span, infer::LateBoundRegionConversionTime::HigherRankedType, &poly_projection).0; assert_eq!(projection.projection_ty.item_name, obligation.predicate.item_name); let origin = infer::RelateOutputImplTypes(obligation.cause.span); match infcx.sub_trait_refs(false, origin, obligation.predicate.trait_ref.clone(), projection.projection_ty.trait_ref.clone()) { Ok(()) => { } Err(e) => { selcx.tcx().sess.span_bug( obligation.cause.span, &format!("Failed to unify `{}` and `{}` in projection: {}", obligation.repr(selcx.tcx()), projection.repr(selcx.tcx()), ty::type_err_to_str(selcx.tcx(), &e))); } } (projection.ty, vec!()) } fn confirm_impl_candidate<'cx,'tcx>( selcx: &mut SelectionContext<'cx,'tcx>, obligation: &ProjectionTyObligation<'tcx>, impl_vtable: VtableImplData<'tcx, PredicateObligation<'tcx>>) -> (Ty<'tcx>, Vec<PredicateObligation<'tcx>>) { // there don't seem to be nicer accessors to these: let impl_items_map = selcx.tcx().impl_items.borrow(); let impl_or_trait_items_map = selcx.tcx().impl_or_trait_items.borrow(); let impl_items = &impl_items_map[impl_vtable.impl_def_id]; let mut impl_ty = None; for impl_item in impl_items { let assoc_type = match impl_or_trait_items_map[impl_item.def_id()] { ty::TypeTraitItem(ref assoc_type) => assoc_type.clone(), ty::MethodTraitItem(..) => { continue; } }; if assoc_type.name != obligation.predicate.item_name { continue; } let impl_poly_ty = ty::lookup_item_type(selcx.tcx(), assoc_type.def_id); impl_ty = Some(impl_poly_ty.ty.subst(selcx.tcx(), &impl_vtable.substs)); break; } match impl_ty { Some(ty) => (ty, impl_vtable.nested.into_vec()), None => { // This means that the impl is missing a // definition for the associated type. This error // ought to be reported by the type checker method // `check_impl_items_against_trait`, so here we // just return ty_err. (selcx.tcx().types.err, vec!()) } } } impl<'tcx> Repr<'tcx> for ProjectionTyError<'tcx> { fn repr(&self, tcx: &ty::ctxt<'tcx>) -> String { match *self { ProjectionTyError::TooManyCandidates => format!("NoCandidate"), ProjectionTyError::TraitSelectionError(ref e) => format!("TraitSelectionError({})", e.repr(tcx)), } } } impl<'tcx> Repr<'tcx> for ProjectionTyCandidate<'tcx> { fn repr(&self, tcx: &ty::ctxt<'tcx>) -> String { match *self { ProjectionTyCandidate::ParamEnv(ref data) => format!("ParamEnv({})", data.repr(tcx)), ProjectionTyCandidate::Impl(ref data) => format!("Impl({})", data.repr(tcx)), ProjectionTyCandidate::Closure(ref a, ref b) => format!("Closure(({},{}))", a.repr(tcx), b.repr(tcx)), ProjectionTyCandidate::FnPointer(a) => format!("FnPointer(({}))", a.repr(tcx)), } } } impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Normalized<'tcx, T> { fn fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Normalized<'tcx, T> { Normalized { value: self.value.fold_with(folder), obligations: self.obligations.fold_with(folder), } } } impl<'tcx, T:Repr<'tcx>> Repr<'tcx> for Normalized<'tcx, T> { fn repr(&self, tcx: &ty::ctxt<'tcx>) -> String { format!("Normalized({},{})", self.value.repr(tcx), self.obligations.repr(tcx)) } }<|fim▁end|>
<|file_name|>subscriptionservice.js<|end_file_name|><|fim▁begin|>/** * @ngdoc service * @name ftepApp.SubscriptionService * @description * # SubscriptionService * Service for subscriptions. */ 'use strict'; define(['../ftepmodules', 'traversonHal'], function (ftepmodules, TraversonJsonHalAdapter) { ftepmodules.service('SubscriptionService', [ 'ftepProperties', '$q', 'traverson', function (ftepProperties, $q, traverson) {<|fim▁hole|> traverson.registerMediaType(TraversonJsonHalAdapter.mediaType, TraversonJsonHalAdapter); var rootUri = ftepProperties.URLv2; var halAPI = traverson.from(rootUri).jsonHal().useAngularHttp(); var deleteAPI = traverson.from(rootUri).useAngularHttp(); this.getUserSubscriptions = function(user) { var deferred = $q.defer(); halAPI.from(rootUri + '/subscriptions/search/findByOwner?owner=' + user._links.self.href) .newRequest() .getResource() .result .then( function(document) { deferred.resolve(document); }, function(error) { MessageService.addError('Failed to get subscriptions for user ' + user.name, error); deferred.reject(); }); return deferred.promise; }; this.updateSubscription = function(subscription) { var patchedSubscription = { packageName: subscription.packageName, storageQuota: subscription.storageQuota, processingQuota: subscription.processingQuota, subscriptionStart: subscription.subscriptionStart, subscriptionEnd: subscription.subscriptionEnd, commentText: subscription.commentText }; var deferred = $q.defer(); halAPI.from(rootUri + '/subscriptions/' + subscription.id) .newRequest() .patch(patchedSubscription) .result .then( function(document) { deferred.resolve(document); }, function(error) { MessageService.addError('Failed to update subscription ' + subscription.id, error); deferred.reject(); }); return deferred.promise; }; this.createSubscription = function(subscription, subscriptionOwner, subscriptionCreator) { var newSubscription = { owner: subscriptionOwner._links.self.href, packageName: subscription.packageName, storageQuota: subscription.storageQuota, processingQuota: subscription.processingQuota, subscriptionStart: subscription.subscriptionStart, subscriptionEnd: subscription.subscriptionEnd, commentText: subscription.commentText, creator: subscriptionCreator._links.self.href }; var deferred = $q.defer(); halAPI.from(rootUri + '/subscriptions') .newRequest() .post(newSubscription) .result .then( function(document) { deferred.resolve(document); }, function(error) { MessageService.addError('Failed to update subscription ' + subscription.id, error); deferred.reject(); }); return deferred.promise; }; this.deleteSubscription = function(subscription) { var deferred = $q.defer(); deleteAPI.from(rootUri + '/subscriptions/' + subscription.id) .newRequest() .delete() .result .then( function(document) { if (200 <= document.status && document.status < 300) { deferred.resolve(document); } else { MessageService.addError('Failed to delete subscription ' + subscription.id, error); deferred.reject(); } }, function(error) { MessageService.addError('Failed to delete subscription ' + subscription.id, error); deferred.reject(); }); return deferred.promise; }; this.cancelSubscription = function(subscription) { var deferred = $q.defer(); halAPI.from(rootUri + '/subscriptions/' + subscription.id + "/cancel") .newRequest() .post() .result .then( function(document) { deferred.resolve(document); }, function(error) { MessageService.addError('Failed to cancel subscription ' + subscription.id, error); deferred.reject(); }); return deferred.promise; }; return this; }]); });<|fim▁end|>
var self = this;
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>#![doc(html_root_url = "http://garrensmith.com/mango_smoothie/")] //! Mango Smoothie //! Mango Smoothie is a [CouchDB Mango](http://docs.couchdb.org/en/latest/api/database/find.html) / //! [Cloudant Query](https://docs.cloudant.com/cloudant_query.html) client library. //! //! # Create Indexes //! //! To create an index first specify the url to the CouchDB/Cloudant instance, then //! specify the fields to be indexed. //! //! ```ignore //! extern crate mango_smoothie; //! use mango_smoothie::{database}; //! //! let resp = database("http://tester:testerpass@127.0.0.1:5984/animaldb").unwrap() //! .create_index(&["class", "name"]); //! ``` //! //! # View Indexes //! //! To list all the available indexes do the following: //! //! ``` ignore //! let indexes = database("http://tester:testerpass@127.0.0.1:5984/animaldb").unwrap() //! .list_indexes().unwrap(); //! //! assert!(indexes.total_rows > 0); //! assert_eq!(indexes.indexes[0].name, "_all_docs".to_string()); //! assert!(indexes.indexes[0].def.fields[0].contains_key(&"_id".to_string())); //! ``` //! //! # Query Indexes //! //! Mango Smoothie uses the [serde_json](https://docs.serde.rs/serde_json/) //! macro to help with querying indexes. //! //! ``` ignore //! extern crate mango_smoothie; //! use mango_smoothie::{database}; //! #[macro_use] //! extern crate serde_json; //! //! let query = json!({ //! "selector": { //! "_id": { //! "$gt": "1" //! } //! }, //! "fields": ["_id", "name"], //! "skip": 3, //! "sort": [{"_id": "asc"}] //! }); //! //! let query_resp = db.query_index(query).unwrap(); //! assert_eq!(result.docs.len(), 5);<|fim▁hole|>//! assert_eq!(doc["class"], "mammal"); //! ``` #[macro_use] extern crate serde_derive; extern crate serde; extern crate serde_json; extern crate hyper; pub mod http; pub mod errors; mod database; pub use database::database;<|fim▁end|>
//! let doc = &result.docs[0];
<|file_name|>has-authority.directive.js<|end_file_name|><|fim▁begin|>(function() { 'use strict'; angular .module('rtsApp') .directive('hasAuthority', hasAuthority); hasAuthority.$inject = ['Principal']; function hasAuthority(Principal) { var directive = { restrict: 'A', link: linkFunc }; return directive; function linkFunc(scope, element, attrs) { var authority = attrs.hasAuthority.replace(/\s+/g, ''); var setVisible = function () { element.removeClass('hidden'); }, setHidden = function () { element.addClass('hidden'); }, defineVisibility = function (reset) { if (reset) { setVisible(); } Principal.hasAuthority(authority) .then(function (result) { if (result) { setVisible(); } else { setHidden(); } }); }; if (authority.length > 0) { defineVisibility(true); <|fim▁hole|> scope.$watch(function() { return Principal.isAuthenticated(); }, function() { defineVisibility(true); }); } } } })();<|fim▁end|>
<|file_name|>matcher.pb.go<|end_file_name|><|fim▁begin|>// Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.25.0 // protoc v3.14.0 // source: envoy/config/common/matcher/v4alpha/matcher.proto package envoy_config_common_matcher_v4alpha import ( _ "github.com/cncf/xds/go/udpa/annotations" v4alpha1 "github.com/envoyproxy/go-control-plane/envoy/config/core/v4alpha" v4alpha "github.com/envoyproxy/go-control-plane/envoy/config/route/v4alpha" v4alpha2 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v4alpha" _ "github.com/envoyproxy/protoc-gen-validate/validate" proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) // This is a compile-time assertion that a sufficiently up-to-date version // of the legacy proto package is being used. const _ = proto.ProtoPackageIsVersion4 // A matcher, which may traverse a matching tree in order to result in a match action. // During matching, the tree will be traversed until a match is found, or if no match // is found the action specified by the most specific on_no_match will be evaluated. // As an on_no_match might result in another matching tree being evaluated, this process // might repeat several times until the final OnMatch (or no match) is decided. // // This API is a work in progress. type Matcher struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Types that are assignable to MatcherType: // *Matcher_MatcherList_ // *Matcher_MatcherTree_ MatcherType isMatcher_MatcherType `protobuf_oneof:"matcher_type"` // Optional OnMatch to use if the matcher failed. // If specified, the OnMatch is used, and the matcher is considered // to have matched. // If not specified, the matcher is considered not to have matched. OnNoMatch *Matcher_OnMatch `protobuf:"bytes,3,opt,name=on_no_match,json=onNoMatch,proto3" json:"on_no_match,omitempty"` } func (x *Matcher) Reset() { *x = Matcher{} if protoimpl.UnsafeEnabled { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Matcher) String() string { return protoimpl.X.MessageStringOf(x) } func (*Matcher) ProtoMessage() {} func (x *Matcher) ProtoReflect() protoreflect.Message { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Matcher.ProtoReflect.Descriptor instead. func (*Matcher) Descriptor() ([]byte, []int) { return file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescGZIP(), []int{0} } func (m *Matcher) GetMatcherType() isMatcher_MatcherType { if m != nil { return m.MatcherType } return nil } func (x *Matcher) GetMatcherList() *Matcher_MatcherList { if x, ok := x.GetMatcherType().(*Matcher_MatcherList_); ok { return x.MatcherList } return nil } func (x *Matcher) GetMatcherTree() *Matcher_MatcherTree { if x, ok := x.GetMatcherType().(*Matcher_MatcherTree_); ok { return x.MatcherTree } return nil } func (x *Matcher) GetOnNoMatch() *Matcher_OnMatch { if x != nil { return x.OnNoMatch } return nil } type isMatcher_MatcherType interface { isMatcher_MatcherType() } type Matcher_MatcherList_ struct { // A linear list of matchers to evaluate. MatcherList *Matcher_MatcherList `protobuf:"bytes,1,opt,name=matcher_list,json=matcherList,proto3,oneof"` } type Matcher_MatcherTree_ struct { // A match tree to evaluate. MatcherTree *Matcher_MatcherTree `protobuf:"bytes,2,opt,name=matcher_tree,json=matcherTree,proto3,oneof"` } func (*Matcher_MatcherList_) isMatcher_MatcherType() {} func (*Matcher_MatcherTree_) isMatcher_MatcherType() {} // Match configuration. This is a recursive structure which allows complex nested match // configurations to be built using various logical operators. // [#next-free-field: 11] type MatchPredicate struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Types that are assignable to Rule: // *MatchPredicate_OrMatch // *MatchPredicate_AndMatch // *MatchPredicate_NotMatch // *MatchPredicate_AnyMatch // *MatchPredicate_HttpRequestHeadersMatch // *MatchPredicate_HttpRequestTrailersMatch // *MatchPredicate_HttpResponseHeadersMatch // *MatchPredicate_HttpResponseTrailersMatch // *MatchPredicate_HttpRequestGenericBodyMatch // *MatchPredicate_HttpResponseGenericBodyMatch Rule isMatchPredicate_Rule `protobuf_oneof:"rule"` } func (x *MatchPredicate) Reset() { *x = MatchPredicate{} if protoimpl.UnsafeEnabled { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *MatchPredicate) String() string { return protoimpl.X.MessageStringOf(x) } func (*MatchPredicate) ProtoMessage() {} func (x *MatchPredicate) ProtoReflect() protoreflect.Message { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use MatchPredicate.ProtoReflect.Descriptor instead. func (*MatchPredicate) Descriptor() ([]byte, []int) { return file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescGZIP(), []int{1} } func (m *MatchPredicate) GetRule() isMatchPredicate_Rule { if m != nil { return m.Rule } return nil } func (x *MatchPredicate) GetOrMatch() *MatchPredicate_MatchSet { if x, ok := x.GetRule().(*MatchPredicate_OrMatch); ok { return x.OrMatch } return nil } func (x *MatchPredicate) GetAndMatch() *MatchPredicate_MatchSet { if x, ok := x.GetRule().(*MatchPredicate_AndMatch); ok { return x.AndMatch } return nil } func (x *MatchPredicate) GetNotMatch() *MatchPredicate { if x, ok := x.GetRule().(*MatchPredicate_NotMatch); ok { return x.NotMatch } return nil } func (x *MatchPredicate) GetAnyMatch() bool { if x, ok := x.GetRule().(*MatchPredicate_AnyMatch); ok { return x.AnyMatch } return false } func (x *MatchPredicate) GetHttpRequestHeadersMatch() *HttpHeadersMatch { if x, ok := x.GetRule().(*MatchPredicate_HttpRequestHeadersMatch); ok { return x.HttpRequestHeadersMatch } return nil } func (x *MatchPredicate) GetHttpRequestTrailersMatch() *HttpHeadersMatch { if x, ok := x.GetRule().(*MatchPredicate_HttpRequestTrailersMatch); ok { return x.HttpRequestTrailersMatch } return nil } func (x *MatchPredicate) GetHttpResponseHeadersMatch() *HttpHeadersMatch { if x, ok := x.GetRule().(*MatchPredicate_HttpResponseHeadersMatch); ok { return x.HttpResponseHeadersMatch } return nil } func (x *MatchPredicate) GetHttpResponseTrailersMatch() *HttpHeadersMatch { if x, ok := x.GetRule().(*MatchPredicate_HttpResponseTrailersMatch); ok { return x.HttpResponseTrailersMatch } return nil } func (x *MatchPredicate) GetHttpRequestGenericBodyMatch() *HttpGenericBodyMatch { if x, ok := x.GetRule().(*MatchPredicate_HttpRequestGenericBodyMatch); ok { return x.HttpRequestGenericBodyMatch } return nil } func (x *MatchPredicate) GetHttpResponseGenericBodyMatch() *HttpGenericBodyMatch { if x, ok := x.GetRule().(*MatchPredicate_HttpResponseGenericBodyMatch); ok { return x.HttpResponseGenericBodyMatch } return nil } type isMatchPredicate_Rule interface { isMatchPredicate_Rule() } type MatchPredicate_OrMatch struct { // A set that describes a logical OR. If any member of the set matches, the match configuration // matches. OrMatch *MatchPredicate_MatchSet `protobuf:"bytes,1,opt,name=or_match,json=orMatch,proto3,oneof"` } type MatchPredicate_AndMatch struct { // A set that describes a logical AND. If all members of the set match, the match configuration // matches. AndMatch *MatchPredicate_MatchSet `protobuf:"bytes,2,opt,name=and_match,json=andMatch,proto3,oneof"` } type MatchPredicate_NotMatch struct { // A negation match. The match configuration will match if the negated match condition matches. NotMatch *MatchPredicate `protobuf:"bytes,3,opt,name=not_match,json=notMatch,proto3,oneof"` } type MatchPredicate_AnyMatch struct { // The match configuration will always match. AnyMatch bool `protobuf:"varint,4,opt,name=any_match,json=anyMatch,proto3,oneof"` } type MatchPredicate_HttpRequestHeadersMatch struct { // HTTP request headers match configuration. HttpRequestHeadersMatch *HttpHeadersMatch `protobuf:"bytes,5,opt,name=http_request_headers_match,json=httpRequestHeadersMatch,proto3,oneof"` } type MatchPredicate_HttpRequestTrailersMatch struct { // HTTP request trailers match configuration. HttpRequestTrailersMatch *HttpHeadersMatch `protobuf:"bytes,6,opt,name=http_request_trailers_match,json=httpRequestTrailersMatch,proto3,oneof"` } type MatchPredicate_HttpResponseHeadersMatch struct { // HTTP response headers match configuration. HttpResponseHeadersMatch *HttpHeadersMatch `protobuf:"bytes,7,opt,name=http_response_headers_match,json=httpResponseHeadersMatch,proto3,oneof"` } type MatchPredicate_HttpResponseTrailersMatch struct { // HTTP response trailers match configuration. HttpResponseTrailersMatch *HttpHeadersMatch `protobuf:"bytes,8,opt,name=http_response_trailers_match,json=httpResponseTrailersMatch,proto3,oneof"` } type MatchPredicate_HttpRequestGenericBodyMatch struct { // HTTP request generic body match configuration. HttpRequestGenericBodyMatch *HttpGenericBodyMatch `protobuf:"bytes,9,opt,name=http_request_generic_body_match,json=httpRequestGenericBodyMatch,proto3,oneof"` } type MatchPredicate_HttpResponseGenericBodyMatch struct { // HTTP response generic body match configuration. HttpResponseGenericBodyMatch *HttpGenericBodyMatch `protobuf:"bytes,10,opt,name=http_response_generic_body_match,json=httpResponseGenericBodyMatch,proto3,oneof"` } func (*MatchPredicate_OrMatch) isMatchPredicate_Rule() {} func (*MatchPredicate_AndMatch) isMatchPredicate_Rule() {} func (*MatchPredicate_NotMatch) isMatchPredicate_Rule() {} func (*MatchPredicate_AnyMatch) isMatchPredicate_Rule() {} func (*MatchPredicate_HttpRequestHeadersMatch) isMatchPredicate_Rule() {} func (*MatchPredicate_HttpRequestTrailersMatch) isMatchPredicate_Rule() {} func (*MatchPredicate_HttpResponseHeadersMatch) isMatchPredicate_Rule() {} func (*MatchPredicate_HttpResponseTrailersMatch) isMatchPredicate_Rule() {} func (*MatchPredicate_HttpRequestGenericBodyMatch) isMatchPredicate_Rule() {} func (*MatchPredicate_HttpResponseGenericBodyMatch) isMatchPredicate_Rule() {} // HTTP headers match configuration. type HttpHeadersMatch struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // HTTP headers to match. Headers []*v4alpha.HeaderMatcher `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` } func (x *HttpHeadersMatch) Reset() { *x = HttpHeadersMatch{} if protoimpl.UnsafeEnabled { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *HttpHeadersMatch) String() string { return protoimpl.X.MessageStringOf(x) } func (*HttpHeadersMatch) ProtoMessage() {} func (x *HttpHeadersMatch) ProtoReflect() protoreflect.Message { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use HttpHeadersMatch.ProtoReflect.Descriptor instead. func (*HttpHeadersMatch) Descriptor() ([]byte, []int) { return file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescGZIP(), []int{2} } func (x *HttpHeadersMatch) GetHeaders() []*v4alpha.HeaderMatcher { if x != nil { return x.Headers } return nil } // HTTP generic body match configuration. // List of text strings and hex strings to be located in HTTP body. // All specified strings must be found in the HTTP body for positive match. // The search may be limited to specified number of bytes from the body start. // // .. attention:: // // Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. // If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified // to scan only part of the http body. type HttpGenericBodyMatch struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). BytesLimit uint32 `protobuf:"varint,1,opt,name=bytes_limit,json=bytesLimit,proto3" json:"bytes_limit,omitempty"` // List of patterns to match. Patterns []*HttpGenericBodyMatch_GenericTextMatch `protobuf:"bytes,2,rep,name=patterns,proto3" json:"patterns,omitempty"` } func (x *HttpGenericBodyMatch) Reset() { *x = HttpGenericBodyMatch{} if protoimpl.UnsafeEnabled { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *HttpGenericBodyMatch) String() string { return protoimpl.X.MessageStringOf(x) } func (*HttpGenericBodyMatch) ProtoMessage() {} func (x *HttpGenericBodyMatch) ProtoReflect() protoreflect.Message { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use HttpGenericBodyMatch.ProtoReflect.Descriptor instead. func (*HttpGenericBodyMatch) Descriptor() ([]byte, []int) { return file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescGZIP(), []int{3} } func (x *HttpGenericBodyMatch) GetBytesLimit() uint32 { if x != nil { return x.BytesLimit } return 0 } func (x *HttpGenericBodyMatch) GetPatterns() []*HttpGenericBodyMatch_GenericTextMatch { if x != nil { return x.Patterns } return nil } // What to do if a match is successful. type Matcher_OnMatch struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Types that are assignable to OnMatch: // *Matcher_OnMatch_Matcher // *Matcher_OnMatch_Action OnMatch isMatcher_OnMatch_OnMatch `protobuf_oneof:"on_match"` } func (x *Matcher_OnMatch) Reset() { *x = Matcher_OnMatch{} if protoimpl.UnsafeEnabled { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Matcher_OnMatch) String() string { return protoimpl.X.MessageStringOf(x) } func (*Matcher_OnMatch) ProtoMessage() {} func (x *Matcher_OnMatch) ProtoReflect() protoreflect.Message { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Matcher_OnMatch.ProtoReflect.Descriptor instead. func (*Matcher_OnMatch) Descriptor() ([]byte, []int) { return file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescGZIP(), []int{0, 0} } func (m *Matcher_OnMatch) GetOnMatch() isMatcher_OnMatch_OnMatch { if m != nil { return m.OnMatch } return nil } func (x *Matcher_OnMatch) GetMatcher() *Matcher { if x, ok := x.GetOnMatch().(*Matcher_OnMatch_Matcher); ok { return x.Matcher } return nil } func (x *Matcher_OnMatch) GetAction() *v4alpha1.TypedExtensionConfig { if x, ok := x.GetOnMatch().(*Matcher_OnMatch_Action); ok { return x.Action } return nil } type isMatcher_OnMatch_OnMatch interface { isMatcher_OnMatch_OnMatch() } type Matcher_OnMatch_Matcher struct { // Nested matcher to evaluate. // If the nested matcher does not match and does not specify // on_no_match, then this matcher is considered not to have // matched, even if a predicate at this level or above returned // true. Matcher *Matcher `protobuf:"bytes,1,opt,name=matcher,proto3,oneof"` } type Matcher_OnMatch_Action struct { // Protocol-specific action to take. Action *v4alpha1.TypedExtensionConfig `protobuf:"bytes,2,opt,name=action,proto3,oneof"` } func (*Matcher_OnMatch_Matcher) isMatcher_OnMatch_OnMatch() {} func (*Matcher_OnMatch_Action) isMatcher_OnMatch_OnMatch() {} // A linear list of field matchers. // The field matchers are evaluated in order, and the first match // wins. type Matcher_MatcherList struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // A list of matchers. First match wins. Matchers []*Matcher_MatcherList_FieldMatcher `protobuf:"bytes,1,rep,name=matchers,proto3" json:"matchers,omitempty"` } func (x *Matcher_MatcherList) Reset() { *x = Matcher_MatcherList{} if protoimpl.UnsafeEnabled { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Matcher_MatcherList) String() string { return protoimpl.X.MessageStringOf(x) } func (*Matcher_MatcherList) ProtoMessage() {} func (x *Matcher_MatcherList) ProtoReflect() protoreflect.Message { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Matcher_MatcherList.ProtoReflect.Descriptor instead. func (*Matcher_MatcherList) Descriptor() ([]byte, []int) { return file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescGZIP(), []int{0, 1} } func (x *Matcher_MatcherList) GetMatchers() []*Matcher_MatcherList_FieldMatcher { if x != nil { return x.Matchers } return nil } type Matcher_MatcherTree struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Protocol-specific specification of input field to match on. Input *v4alpha1.TypedExtensionConfig `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"` // Exact or prefix match maps in which to look up the input value. // If the lookup succeeds, the match is considered successful, and // the corresponding OnMatch is used. // // Types that are assignable to TreeType: // *Matcher_MatcherTree_ExactMatchMap // *Matcher_MatcherTree_PrefixMatchMap // *Matcher_MatcherTree_CustomMatch TreeType isMatcher_MatcherTree_TreeType `protobuf_oneof:"tree_type"` } func (x *Matcher_MatcherTree) Reset() { *x = Matcher_MatcherTree{} if protoimpl.UnsafeEnabled { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Matcher_MatcherTree) String() string { return protoimpl.X.MessageStringOf(x) } func (*Matcher_MatcherTree) ProtoMessage() {} func (x *Matcher_MatcherTree) ProtoReflect() protoreflect.Message { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Matcher_MatcherTree.ProtoReflect.Descriptor instead. func (*Matcher_MatcherTree) Descriptor() ([]byte, []int) { return file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescGZIP(), []int{0, 2} } func (x *Matcher_MatcherTree) GetInput() *v4alpha1.TypedExtensionConfig { if x != nil { return x.Input } return nil } func (m *Matcher_MatcherTree) GetTreeType() isMatcher_MatcherTree_TreeType { if m != nil { return m.TreeType } return nil } func (x *Matcher_MatcherTree) GetExactMatchMap() *Matcher_MatcherTree_MatchMap { if x, ok := x.GetTreeType().(*Matcher_MatcherTree_ExactMatchMap); ok { return x.ExactMatchMap } return nil } func (x *Matcher_MatcherTree) GetPrefixMatchMap() *Matcher_MatcherTree_MatchMap { if x, ok := x.GetTreeType().(*Matcher_MatcherTree_PrefixMatchMap); ok { return x.PrefixMatchMap } return nil } func (x *Matcher_MatcherTree) GetCustomMatch() *v4alpha1.TypedExtensionConfig { if x, ok := x.GetTreeType().(*Matcher_MatcherTree_CustomMatch); ok { return x.CustomMatch } return nil } type isMatcher_MatcherTree_TreeType interface { isMatcher_MatcherTree_TreeType() } type Matcher_MatcherTree_ExactMatchMap struct { ExactMatchMap *Matcher_MatcherTree_MatchMap `protobuf:"bytes,2,opt,name=exact_match_map,json=exactMatchMap,proto3,oneof"` } type Matcher_MatcherTree_PrefixMatchMap struct { // Longest matching prefix wins. PrefixMatchMap *Matcher_MatcherTree_MatchMap `protobuf:"bytes,3,opt,name=prefix_match_map,json=prefixMatchMap,proto3,oneof"` } type Matcher_MatcherTree_CustomMatch struct { // Extension for custom matching logic. CustomMatch *v4alpha1.TypedExtensionConfig `protobuf:"bytes,4,opt,name=custom_match,json=customMatch,proto3,oneof"` } func (*Matcher_MatcherTree_ExactMatchMap) isMatcher_MatcherTree_TreeType() {} func (*Matcher_MatcherTree_PrefixMatchMap) isMatcher_MatcherTree_TreeType() {} func (*Matcher_MatcherTree_CustomMatch) isMatcher_MatcherTree_TreeType() {} // Predicate to determine if a match is successful. type Matcher_MatcherList_Predicate struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Types that are assignable to MatchType: // *Matcher_MatcherList_Predicate_SinglePredicate_ // *Matcher_MatcherList_Predicate_OrMatcher // *Matcher_MatcherList_Predicate_AndMatcher // *Matcher_MatcherList_Predicate_NotMatcher MatchType isMatcher_MatcherList_Predicate_MatchType `protobuf_oneof:"match_type"` } func (x *Matcher_MatcherList_Predicate) Reset() { *x = Matcher_MatcherList_Predicate{} if protoimpl.UnsafeEnabled { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Matcher_MatcherList_Predicate) String() string { return protoimpl.X.MessageStringOf(x) } func (*Matcher_MatcherList_Predicate) ProtoMessage() {} func (x *Matcher_MatcherList_Predicate) ProtoReflect() protoreflect.Message { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Matcher_MatcherList_Predicate.ProtoReflect.Descriptor instead. func (*Matcher_MatcherList_Predicate) Descriptor() ([]byte, []int) { return file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescGZIP(), []int{0, 1, 0} } func (m *Matcher_MatcherList_Predicate) GetMatchType() isMatcher_MatcherList_Predicate_MatchType { if m != nil { return m.MatchType } return nil } func (x *Matcher_MatcherList_Predicate) GetSinglePredicate() *Matcher_MatcherList_Predicate_SinglePredicate { if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_SinglePredicate_); ok { return x.SinglePredicate } return nil } func (x *Matcher_MatcherList_Predicate) GetOrMatcher() *Matcher_MatcherList_Predicate_PredicateList { if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_OrMatcher); ok { return x.OrMatcher } return nil } func (x *Matcher_MatcherList_Predicate) GetAndMatcher() *Matcher_MatcherList_Predicate_PredicateList { if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_AndMatcher); ok { return x.AndMatcher } return nil } func (x *Matcher_MatcherList_Predicate) GetNotMatcher() *Matcher_MatcherList_Predicate { if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_NotMatcher); ok { return x.NotMatcher } return nil } type isMatcher_MatcherList_Predicate_MatchType interface { isMatcher_MatcherList_Predicate_MatchType() } type Matcher_MatcherList_Predicate_SinglePredicate_ struct { // A single predicate to evaluate. SinglePredicate *Matcher_MatcherList_Predicate_SinglePredicate `protobuf:"bytes,1,opt,name=single_predicate,json=singlePredicate,proto3,oneof"` } type Matcher_MatcherList_Predicate_OrMatcher struct { // A list of predicates to be OR-ed together. OrMatcher *Matcher_MatcherList_Predicate_PredicateList `protobuf:"bytes,2,opt,name=or_matcher,json=orMatcher,proto3,oneof"` } type Matcher_MatcherList_Predicate_AndMatcher struct { // A list of predicates to be AND-ed together. AndMatcher *Matcher_MatcherList_Predicate_PredicateList `protobuf:"bytes,3,opt,name=and_matcher,json=andMatcher,proto3,oneof"` } type Matcher_MatcherList_Predicate_NotMatcher struct { // The invert of a predicate NotMatcher *Matcher_MatcherList_Predicate `protobuf:"bytes,4,opt,name=not_matcher,json=notMatcher,proto3,oneof"` } func (*Matcher_MatcherList_Predicate_SinglePredicate_) isMatcher_MatcherList_Predicate_MatchType() {} func (*Matcher_MatcherList_Predicate_OrMatcher) isMatcher_MatcherList_Predicate_MatchType() {} func (*Matcher_MatcherList_Predicate_AndMatcher) isMatcher_MatcherList_Predicate_MatchType() {} func (*Matcher_MatcherList_Predicate_NotMatcher) isMatcher_MatcherList_Predicate_MatchType() {} // An individual matcher. type Matcher_MatcherList_FieldMatcher struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Determines if the match succeeds. Predicate *Matcher_MatcherList_Predicate `protobuf:"bytes,1,opt,name=predicate,proto3" json:"predicate,omitempty"` // What to do if the match succeeds. OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"` } func (x *Matcher_MatcherList_FieldMatcher) Reset() { *x = Matcher_MatcherList_FieldMatcher{} if protoimpl.UnsafeEnabled { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Matcher_MatcherList_FieldMatcher) String() string { return protoimpl.X.MessageStringOf(x) } func (*Matcher_MatcherList_FieldMatcher) ProtoMessage() {} func (x *Matcher_MatcherList_FieldMatcher) ProtoReflect() protoreflect.Message { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Matcher_MatcherList_FieldMatcher.ProtoReflect.Descriptor instead. func (*Matcher_MatcherList_FieldMatcher) Descriptor() ([]byte, []int) { return file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescGZIP(), []int{0, 1, 1} } func (x *Matcher_MatcherList_FieldMatcher) GetPredicate() *Matcher_MatcherList_Predicate { if x != nil { return x.Predicate } return nil } func (x *Matcher_MatcherList_FieldMatcher) GetOnMatch() *Matcher_OnMatch { if x != nil { return x.OnMatch } return nil } // Predicate for a single input field. type Matcher_MatcherList_Predicate_SinglePredicate struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Protocol-specific specification of input field to match on. // [#extension-category: envoy.matching.common_inputs] Input *v4alpha1.TypedExtensionConfig `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"` // Types that are assignable to Matcher: // *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch // *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch Matcher isMatcher_MatcherList_Predicate_SinglePredicate_Matcher `protobuf_oneof:"matcher"` } func (x *Matcher_MatcherList_Predicate_SinglePredicate) Reset() { *x = Matcher_MatcherList_Predicate_SinglePredicate{} if protoimpl.UnsafeEnabled { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Matcher_MatcherList_Predicate_SinglePredicate) String() string { return protoimpl.X.MessageStringOf(x) } func (*Matcher_MatcherList_Predicate_SinglePredicate) ProtoMessage() {} func (x *Matcher_MatcherList_Predicate_SinglePredicate) ProtoReflect() protoreflect.Message { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Matcher_MatcherList_Predicate_SinglePredicate.ProtoReflect.Descriptor instead. func (*Matcher_MatcherList_Predicate_SinglePredicate) Descriptor() ([]byte, []int) { return file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescGZIP(), []int{0, 1, 0, 0} } func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetInput() *v4alpha1.TypedExtensionConfig { if x != nil { return x.Input } return nil } func (m *Matcher_MatcherList_Predicate_SinglePredicate) GetMatcher() isMatcher_MatcherList_Predicate_SinglePredicate_Matcher { if m != nil { return m.Matcher } return nil } func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetValueMatch() *v4alpha2.StringMatcher { if x, ok := x.GetMatcher().(*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch); ok { return x.ValueMatch } return nil } func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetCustomMatch() *v4alpha1.TypedExtensionConfig { if x, ok := x.GetMatcher().(*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch); ok { return x.CustomMatch } return nil } type isMatcher_MatcherList_Predicate_SinglePredicate_Matcher interface { isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() } type Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch struct { // Built-in string matcher. ValueMatch *v4alpha2.StringMatcher `protobuf:"bytes,2,opt,name=value_match,json=valueMatch,proto3,oneof"` } type Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch struct { // Extension for custom matching logic. // [#extension-category: envoy.matching.input_matchers] CustomMatch *v4alpha1.TypedExtensionConfig `protobuf:"bytes,3,opt,name=custom_match,json=customMatch,proto3,oneof"` } func (*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch) isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() { } func (*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch) isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() { } // A list of two or more matchers. Used to allow using a list within a oneof. type Matcher_MatcherList_Predicate_PredicateList struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Predicate []*Matcher_MatcherList_Predicate `protobuf:"bytes,1,rep,name=predicate,proto3" json:"predicate,omitempty"` } func (x *Matcher_MatcherList_Predicate_PredicateList) Reset() { *x = Matcher_MatcherList_Predicate_PredicateList{} if protoimpl.UnsafeEnabled { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Matcher_MatcherList_Predicate_PredicateList) String() string { return protoimpl.X.MessageStringOf(x) } func (*Matcher_MatcherList_Predicate_PredicateList) ProtoMessage() {} func (x *Matcher_MatcherList_Predicate_PredicateList) ProtoReflect() protoreflect.Message { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Matcher_MatcherList_Predicate_PredicateList.ProtoReflect.Descriptor instead. func (*Matcher_MatcherList_Predicate_PredicateList) Descriptor() ([]byte, []int) { return file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescGZIP(), []int{0, 1, 0, 1} } func (x *Matcher_MatcherList_Predicate_PredicateList) GetPredicate() []*Matcher_MatcherList_Predicate { if x != nil { return x.Predicate } return nil } // A map of configured matchers. Used to allow using a map within a oneof. type Matcher_MatcherTree_MatchMap struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Map map[string]*Matcher_OnMatch `protobuf:"bytes,1,rep,name=map,proto3" json:"map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *Matcher_MatcherTree_MatchMap) Reset() { *x = Matcher_MatcherTree_MatchMap{} if protoimpl.UnsafeEnabled { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Matcher_MatcherTree_MatchMap) String() string { return protoimpl.X.MessageStringOf(x) } func (*Matcher_MatcherTree_MatchMap) ProtoMessage() {} func (x *Matcher_MatcherTree_MatchMap) ProtoReflect() protoreflect.Message { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Matcher_MatcherTree_MatchMap.ProtoReflect.Descriptor instead. func (*Matcher_MatcherTree_MatchMap) Descriptor() ([]byte, []int) { return file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescGZIP(), []int{0, 2, 0} } func (x *Matcher_MatcherTree_MatchMap) GetMap() map[string]*Matcher_OnMatch { if x != nil { return x.Map } return nil } // A set of match configurations used for logical operations. type MatchPredicate_MatchSet struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // The list of rules that make up the set. Rules []*MatchPredicate `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` } func (x *MatchPredicate_MatchSet) Reset() { *x = MatchPredicate_MatchSet{} if protoimpl.UnsafeEnabled { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *MatchPredicate_MatchSet) String() string { return protoimpl.X.MessageStringOf(x) } func (*MatchPredicate_MatchSet) ProtoMessage() {} func (x *MatchPredicate_MatchSet) ProtoReflect() protoreflect.Message { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use MatchPredicate_MatchSet.ProtoReflect.Descriptor instead. func (*MatchPredicate_MatchSet) Descriptor() ([]byte, []int) { return file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescGZIP(), []int{1, 0} } func (x *MatchPredicate_MatchSet) GetRules() []*MatchPredicate { if x != nil { return x.Rules } return nil } type HttpGenericBodyMatch_GenericTextMatch struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Types that are assignable to Rule: // *HttpGenericBodyMatch_GenericTextMatch_StringMatch // *HttpGenericBodyMatch_GenericTextMatch_BinaryMatch Rule isHttpGenericBodyMatch_GenericTextMatch_Rule `protobuf_oneof:"rule"` } func (x *HttpGenericBodyMatch_GenericTextMatch) Reset() { *x = HttpGenericBodyMatch_GenericTextMatch{} if protoimpl.UnsafeEnabled { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *HttpGenericBodyMatch_GenericTextMatch) String() string { return protoimpl.X.MessageStringOf(x) } func (*HttpGenericBodyMatch_GenericTextMatch) ProtoMessage() {} func (x *HttpGenericBodyMatch_GenericTextMatch) ProtoReflect() protoreflect.Message { mi := &file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))<|fim▁hole|> } return ms } return mi.MessageOf(x) } // Deprecated: Use HttpGenericBodyMatch_GenericTextMatch.ProtoReflect.Descriptor instead. func (*HttpGenericBodyMatch_GenericTextMatch) Descriptor() ([]byte, []int) { return file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescGZIP(), []int{3, 0} } func (m *HttpGenericBodyMatch_GenericTextMatch) GetRule() isHttpGenericBodyMatch_GenericTextMatch_Rule { if m != nil { return m.Rule } return nil } func (x *HttpGenericBodyMatch_GenericTextMatch) GetStringMatch() string { if x, ok := x.GetRule().(*HttpGenericBodyMatch_GenericTextMatch_StringMatch); ok { return x.StringMatch } return "" } func (x *HttpGenericBodyMatch_GenericTextMatch) GetBinaryMatch() []byte { if x, ok := x.GetRule().(*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch); ok { return x.BinaryMatch } return nil } type isHttpGenericBodyMatch_GenericTextMatch_Rule interface { isHttpGenericBodyMatch_GenericTextMatch_Rule() } type HttpGenericBodyMatch_GenericTextMatch_StringMatch struct { // Text string to be located in HTTP body. StringMatch string `protobuf:"bytes,1,opt,name=string_match,json=stringMatch,proto3,oneof"` } type HttpGenericBodyMatch_GenericTextMatch_BinaryMatch struct { // Sequence of bytes to be located in HTTP body. BinaryMatch []byte `protobuf:"bytes,2,opt,name=binary_match,json=binaryMatch,proto3,oneof"` } func (*HttpGenericBodyMatch_GenericTextMatch_StringMatch) isHttpGenericBodyMatch_GenericTextMatch_Rule() { } func (*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch) isHttpGenericBodyMatch_GenericTextMatch_Rule() { } var File_envoy_config_common_matcher_v4alpha_matcher_proto protoreflect.FileDescriptor var file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDesc = []byte{ 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x1a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9e, 0x17, 0x0a, 0x07, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5d, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5d, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x54, 0x0a, 0x0b, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x09, 0x6f, 0x6e, 0x4e, 0x6f, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xe6, 0x01, 0x0a, 0x07, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x48, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x49, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xca, 0x0c, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x6b, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0xf2, 0x08, 0x0a, 0x09, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x7f, 0x0a, 0x10, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x52, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x71, 0x0a, 0x0a, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x50, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x73, 0x0a, 0x0b, 0x61, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x50, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x6e, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x0b, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a, 0xeb, 0x02, 0x0a, 0x0f, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x4f, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x4c, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x54, 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x3a, 0x53, 0x9a, 0xc5, 0x88, 0x1e, 0x4e, 0x0a, 0x4c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x0e, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xce, 0x01, 0x0a, 0x0d, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x6a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x3a, 0x51, 0x9a, 0xc5, 0x88, 0x1e, 0x4c, 0x0a, 0x4a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x3a, 0x43, 0x9a, 0xc5, 0x88, 0x1e, 0x3e, 0x0a, 0x3c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x11, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x9d, 0x02, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x6a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x59, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x3a, 0x46, 0x9a, 0xc5, 0x88, 0x1e, 0x41, 0x0a, 0x3f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x1a, 0x84, 0x06, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x4f, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x6b, 0x0a, 0x0f, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x6d, 0x0a, 0x10, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x54, 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xa4, 0x02, 0x0a, 0x08, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x66, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x9a, 0x01, 0x02, 0x08, 0x01, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x1a, 0x6c, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x42, 0x9a, 0xc5, 0x88, 0x1e, 0x3d, 0x0a, 0x3b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x42, 0x10, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x13, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x92, 0x0a, 0x0a, 0x0e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x59, 0x0a, 0x08, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x07, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x5b, 0x0a, 0x09, 0x61, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x08, 0x61, 0x6e, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x52, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x26, 0x0a, 0x09, 0x61, 0x6e, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x6a, 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x08, 0x61, 0x6e, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x74, 0x0a, 0x1a, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x17, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x76, 0x0a, 0x1b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x18, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x76, 0x0a, 0x1b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x18, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x78, 0x0a, 0x1c, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x19, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x81, 0x01, 0x0a, 0x1f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x1b, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x83, 0x01, 0x0a, 0x20, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x1c, 0x68, 0x74, 0x74, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0x9e, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x12, 0x53, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x74, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x8f, 0x01, 0x0a, 0x10, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x43, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xb0, 0x03, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x70, 0x0a, 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x54, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x1a, 0xc8, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x54, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x2c, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x2c, 0x0a, 0x0c, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x7a, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x3a, 0x4b, 0x9a, 0xc5, 0x88, 0x1e, 0x46, 0x0a, 0x44, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x54, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0b, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x42, 0x6f, 0x64, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x4b, 0x0a, 0x31, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x34, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x42, 0x0c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x03, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescOnce sync.Once file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescData = file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDesc ) func file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescGZIP() []byte { file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescOnce.Do(func() { file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescData) }) return file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDescData } var file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes = make([]protoimpl.MessageInfo, 15) var file_envoy_config_common_matcher_v4alpha_matcher_proto_goTypes = []interface{}{ (*Matcher)(nil), // 0: envoy.config.common.matcher.v4alpha.Matcher (*MatchPredicate)(nil), // 1: envoy.config.common.matcher.v4alpha.MatchPredicate (*HttpHeadersMatch)(nil), // 2: envoy.config.common.matcher.v4alpha.HttpHeadersMatch (*HttpGenericBodyMatch)(nil), // 3: envoy.config.common.matcher.v4alpha.HttpGenericBodyMatch (*Matcher_OnMatch)(nil), // 4: envoy.config.common.matcher.v4alpha.Matcher.OnMatch (*Matcher_MatcherList)(nil), // 5: envoy.config.common.matcher.v4alpha.Matcher.MatcherList (*Matcher_MatcherTree)(nil), // 6: envoy.config.common.matcher.v4alpha.Matcher.MatcherTree (*Matcher_MatcherList_Predicate)(nil), // 7: envoy.config.common.matcher.v4alpha.Matcher.MatcherList.Predicate (*Matcher_MatcherList_FieldMatcher)(nil), // 8: envoy.config.common.matcher.v4alpha.Matcher.MatcherList.FieldMatcher (*Matcher_MatcherList_Predicate_SinglePredicate)(nil), // 9: envoy.config.common.matcher.v4alpha.Matcher.MatcherList.Predicate.SinglePredicate (*Matcher_MatcherList_Predicate_PredicateList)(nil), // 10: envoy.config.common.matcher.v4alpha.Matcher.MatcherList.Predicate.PredicateList (*Matcher_MatcherTree_MatchMap)(nil), // 11: envoy.config.common.matcher.v4alpha.Matcher.MatcherTree.MatchMap nil, // 12: envoy.config.common.matcher.v4alpha.Matcher.MatcherTree.MatchMap.MapEntry (*MatchPredicate_MatchSet)(nil), // 13: envoy.config.common.matcher.v4alpha.MatchPredicate.MatchSet (*HttpGenericBodyMatch_GenericTextMatch)(nil), // 14: envoy.config.common.matcher.v4alpha.HttpGenericBodyMatch.GenericTextMatch (*v4alpha.HeaderMatcher)(nil), // 15: envoy.config.route.v4alpha.HeaderMatcher (*v4alpha1.TypedExtensionConfig)(nil), // 16: envoy.config.core.v4alpha.TypedExtensionConfig (*v4alpha2.StringMatcher)(nil), // 17: envoy.type.matcher.v4alpha.StringMatcher } var file_envoy_config_common_matcher_v4alpha_matcher_proto_depIdxs = []int32{ 5, // 0: envoy.config.common.matcher.v4alpha.Matcher.matcher_list:type_name -> envoy.config.common.matcher.v4alpha.Matcher.MatcherList 6, // 1: envoy.config.common.matcher.v4alpha.Matcher.matcher_tree:type_name -> envoy.config.common.matcher.v4alpha.Matcher.MatcherTree 4, // 2: envoy.config.common.matcher.v4alpha.Matcher.on_no_match:type_name -> envoy.config.common.matcher.v4alpha.Matcher.OnMatch 13, // 3: envoy.config.common.matcher.v4alpha.MatchPredicate.or_match:type_name -> envoy.config.common.matcher.v4alpha.MatchPredicate.MatchSet 13, // 4: envoy.config.common.matcher.v4alpha.MatchPredicate.and_match:type_name -> envoy.config.common.matcher.v4alpha.MatchPredicate.MatchSet 1, // 5: envoy.config.common.matcher.v4alpha.MatchPredicate.not_match:type_name -> envoy.config.common.matcher.v4alpha.MatchPredicate 2, // 6: envoy.config.common.matcher.v4alpha.MatchPredicate.http_request_headers_match:type_name -> envoy.config.common.matcher.v4alpha.HttpHeadersMatch 2, // 7: envoy.config.common.matcher.v4alpha.MatchPredicate.http_request_trailers_match:type_name -> envoy.config.common.matcher.v4alpha.HttpHeadersMatch 2, // 8: envoy.config.common.matcher.v4alpha.MatchPredicate.http_response_headers_match:type_name -> envoy.config.common.matcher.v4alpha.HttpHeadersMatch 2, // 9: envoy.config.common.matcher.v4alpha.MatchPredicate.http_response_trailers_match:type_name -> envoy.config.common.matcher.v4alpha.HttpHeadersMatch 3, // 10: envoy.config.common.matcher.v4alpha.MatchPredicate.http_request_generic_body_match:type_name -> envoy.config.common.matcher.v4alpha.HttpGenericBodyMatch 3, // 11: envoy.config.common.matcher.v4alpha.MatchPredicate.http_response_generic_body_match:type_name -> envoy.config.common.matcher.v4alpha.HttpGenericBodyMatch 15, // 12: envoy.config.common.matcher.v4alpha.HttpHeadersMatch.headers:type_name -> envoy.config.route.v4alpha.HeaderMatcher 14, // 13: envoy.config.common.matcher.v4alpha.HttpGenericBodyMatch.patterns:type_name -> envoy.config.common.matcher.v4alpha.HttpGenericBodyMatch.GenericTextMatch 0, // 14: envoy.config.common.matcher.v4alpha.Matcher.OnMatch.matcher:type_name -> envoy.config.common.matcher.v4alpha.Matcher 16, // 15: envoy.config.common.matcher.v4alpha.Matcher.OnMatch.action:type_name -> envoy.config.core.v4alpha.TypedExtensionConfig 8, // 16: envoy.config.common.matcher.v4alpha.Matcher.MatcherList.matchers:type_name -> envoy.config.common.matcher.v4alpha.Matcher.MatcherList.FieldMatcher 16, // 17: envoy.config.common.matcher.v4alpha.Matcher.MatcherTree.input:type_name -> envoy.config.core.v4alpha.TypedExtensionConfig 11, // 18: envoy.config.common.matcher.v4alpha.Matcher.MatcherTree.exact_match_map:type_name -> envoy.config.common.matcher.v4alpha.Matcher.MatcherTree.MatchMap 11, // 19: envoy.config.common.matcher.v4alpha.Matcher.MatcherTree.prefix_match_map:type_name -> envoy.config.common.matcher.v4alpha.Matcher.MatcherTree.MatchMap 16, // 20: envoy.config.common.matcher.v4alpha.Matcher.MatcherTree.custom_match:type_name -> envoy.config.core.v4alpha.TypedExtensionConfig 9, // 21: envoy.config.common.matcher.v4alpha.Matcher.MatcherList.Predicate.single_predicate:type_name -> envoy.config.common.matcher.v4alpha.Matcher.MatcherList.Predicate.SinglePredicate 10, // 22: envoy.config.common.matcher.v4alpha.Matcher.MatcherList.Predicate.or_matcher:type_name -> envoy.config.common.matcher.v4alpha.Matcher.MatcherList.Predicate.PredicateList 10, // 23: envoy.config.common.matcher.v4alpha.Matcher.MatcherList.Predicate.and_matcher:type_name -> envoy.config.common.matcher.v4alpha.Matcher.MatcherList.Predicate.PredicateList 7, // 24: envoy.config.common.matcher.v4alpha.Matcher.MatcherList.Predicate.not_matcher:type_name -> envoy.config.common.matcher.v4alpha.Matcher.MatcherList.Predicate 7, // 25: envoy.config.common.matcher.v4alpha.Matcher.MatcherList.FieldMatcher.predicate:type_name -> envoy.config.common.matcher.v4alpha.Matcher.MatcherList.Predicate 4, // 26: envoy.config.common.matcher.v4alpha.Matcher.MatcherList.FieldMatcher.on_match:type_name -> envoy.config.common.matcher.v4alpha.Matcher.OnMatch 16, // 27: envoy.config.common.matcher.v4alpha.Matcher.MatcherList.Predicate.SinglePredicate.input:type_name -> envoy.config.core.v4alpha.TypedExtensionConfig 17, // 28: envoy.config.common.matcher.v4alpha.Matcher.MatcherList.Predicate.SinglePredicate.value_match:type_name -> envoy.type.matcher.v4alpha.StringMatcher 16, // 29: envoy.config.common.matcher.v4alpha.Matcher.MatcherList.Predicate.SinglePredicate.custom_match:type_name -> envoy.config.core.v4alpha.TypedExtensionConfig 7, // 30: envoy.config.common.matcher.v4alpha.Matcher.MatcherList.Predicate.PredicateList.predicate:type_name -> envoy.config.common.matcher.v4alpha.Matcher.MatcherList.Predicate 12, // 31: envoy.config.common.matcher.v4alpha.Matcher.MatcherTree.MatchMap.map:type_name -> envoy.config.common.matcher.v4alpha.Matcher.MatcherTree.MatchMap.MapEntry 4, // 32: envoy.config.common.matcher.v4alpha.Matcher.MatcherTree.MatchMap.MapEntry.value:type_name -> envoy.config.common.matcher.v4alpha.Matcher.OnMatch 1, // 33: envoy.config.common.matcher.v4alpha.MatchPredicate.MatchSet.rules:type_name -> envoy.config.common.matcher.v4alpha.MatchPredicate 34, // [34:34] is the sub-list for method output_type 34, // [34:34] is the sub-list for method input_type 34, // [34:34] is the sub-list for extension type_name 34, // [34:34] is the sub-list for extension extendee 0, // [0:34] is the sub-list for field type_name } func init() { file_envoy_config_common_matcher_v4alpha_matcher_proto_init() } func file_envoy_config_common_matcher_v4alpha_matcher_proto_init() { if File_envoy_config_common_matcher_v4alpha_matcher_proto != nil { return } if !protoimpl.UnsafeEnabled { file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Matcher); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MatchPredicate); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HttpHeadersMatch); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HttpGenericBodyMatch); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Matcher_OnMatch); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Matcher_MatcherList); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Matcher_MatcherTree); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Matcher_MatcherList_Predicate); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Matcher_MatcherList_FieldMatcher); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Matcher_MatcherList_Predicate_SinglePredicate); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Matcher_MatcherList_Predicate_PredicateList); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Matcher_MatcherTree_MatchMap); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MatchPredicate_MatchSet); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HttpGenericBodyMatch_GenericTextMatch); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[0].OneofWrappers = []interface{}{ (*Matcher_MatcherList_)(nil), (*Matcher_MatcherTree_)(nil), } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[1].OneofWrappers = []interface{}{ (*MatchPredicate_OrMatch)(nil), (*MatchPredicate_AndMatch)(nil), (*MatchPredicate_NotMatch)(nil), (*MatchPredicate_AnyMatch)(nil), (*MatchPredicate_HttpRequestHeadersMatch)(nil), (*MatchPredicate_HttpRequestTrailersMatch)(nil), (*MatchPredicate_HttpResponseHeadersMatch)(nil), (*MatchPredicate_HttpResponseTrailersMatch)(nil), (*MatchPredicate_HttpRequestGenericBodyMatch)(nil), (*MatchPredicate_HttpResponseGenericBodyMatch)(nil), } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[4].OneofWrappers = []interface{}{ (*Matcher_OnMatch_Matcher)(nil), (*Matcher_OnMatch_Action)(nil), } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[6].OneofWrappers = []interface{}{ (*Matcher_MatcherTree_ExactMatchMap)(nil), (*Matcher_MatcherTree_PrefixMatchMap)(nil), (*Matcher_MatcherTree_CustomMatch)(nil), } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[7].OneofWrappers = []interface{}{ (*Matcher_MatcherList_Predicate_SinglePredicate_)(nil), (*Matcher_MatcherList_Predicate_OrMatcher)(nil), (*Matcher_MatcherList_Predicate_AndMatcher)(nil), (*Matcher_MatcherList_Predicate_NotMatcher)(nil), } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[9].OneofWrappers = []interface{}{ (*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch)(nil), (*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch)(nil), } file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes[14].OneofWrappers = []interface{}{ (*HttpGenericBodyMatch_GenericTextMatch_StringMatch)(nil), (*HttpGenericBodyMatch_GenericTextMatch_BinaryMatch)(nil), } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDesc, NumEnums: 0, NumMessages: 15, NumExtensions: 0, NumServices: 0, }, GoTypes: file_envoy_config_common_matcher_v4alpha_matcher_proto_goTypes, DependencyIndexes: file_envoy_config_common_matcher_v4alpha_matcher_proto_depIdxs, MessageInfos: file_envoy_config_common_matcher_v4alpha_matcher_proto_msgTypes, }.Build() File_envoy_config_common_matcher_v4alpha_matcher_proto = out.File file_envoy_config_common_matcher_v4alpha_matcher_proto_rawDesc = nil file_envoy_config_common_matcher_v4alpha_matcher_proto_goTypes = nil file_envoy_config_common_matcher_v4alpha_matcher_proto_depIdxs = nil }<|fim▁end|>
if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi)
<|file_name|>socket_common.go<|end_file_name|><|fim▁begin|>// +build !386 package socket import ( "syscall" "unsafe" )<|fim▁hole|> _, _, e1 := syscall.Syscall(syscall.SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = e1 } return } func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { _, _, e1 := syscall.Syscall6(syscall.SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = e1 } return }<|fim▁end|>
func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
<|file_name|>video-modal.component.ts<|end_file_name|><|fim▁begin|>import { Component, ViewChild } from '@angular/core'; import { DomSanitizer } from '@angular/platform-browser'; import { DoctorsListService } from './doctors-list.service'; import { DoctorDetails } from '../shared/database/doctor-details'; /** * Component for video modal window * @export * @class VideoModalComponent */ @Component({ selector: 'mm-video-modal', template: ` <!--modal [cssClass]="cssClass" #videoModal--> <modal #videoModal> <modal-header [show-close]="true" (click)="close()"> </modal-header> <modal-body> <div class="embed-responsive embed-responsive-16by9"> <iframe class="embed-responsive-item" [src]="videoUrl" allowfullscreen> </iframe> </div> </modal-body> </modal> `/*, styles: [` /deep/ .modal-style { width: 100% !important; height: auto !important; } `]*/ }) export class VideoModalComponent { videoUrl: any = ''; //cssClass: string = 'modal-style'; @ViewChild('videoModal') videoModal: VideoModalComponent; constructor(private doctorsListService: DoctorsListService, private domSanitizer:DomSanitizer) { this.videoUrl = this.domSanitizer.bypassSecurityTrustResourceUrl(this.videoUrl); }<|fim▁hole|> open(size: string) { this.videoUrl = this.doctorsListService.getVideoUrl(); this.videoModal.open(size); } /** * function to stop playing the video when the modal window is closed * @memberof VideoModalComponent */ close() { let iframe = document.getElementsByTagName('iframe')[0].contentWindow; let func = 'pauseVideo'; // to pause the youtube video on closing the modal window iframe.postMessage('{"event":"command","func":"' + func + '","args":""}','*'); this.videoModal.close(); } }<|fim▁end|>
<|file_name|>metadef_namespaces.py<|end_file_name|><|fim▁begin|># Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import http.client as http import urllib.parse as urlparse from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils import webob.exc from wsme.rest import json from glance.api import policy from glance.api.v2.model.metadef_namespace import Namespace from glance.api.v2.model.metadef_namespace import Namespaces from glance.api.v2.model.metadef_object import MetadefObject from glance.api.v2.model.metadef_property_type import PropertyType from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociation from glance.api.v2.model.metadef_tag import MetadefTag from glance.api.v2 import policy as api_policy from glance.common import exception from glance.common import utils from glance.common import wsgi from glance.common import wsme_utils import glance.db import glance.gateway from glance.i18n import _, _LE import glance.notifier import glance.schema LOG = logging.getLogger(__name__) CONF = cfg.CONF class NamespaceController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.gateway = glance.gateway.Gateway(db_api=self.db_api, notifier=self.notifier, policy_enforcer=self.policy) self.ns_schema_link = '/v2/schemas/metadefs/namespace' self.obj_schema_link = '/v2/schemas/metadefs/object' self.tag_schema_link = '/v2/schemas/metadefs/tag' def index(self, req, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): try: ns_repo = self.gateway.get_metadef_namespace_repo( req.context, authorization_layer=False) policy_check = api_policy.MetadefAPIPolicy( req.context, enforcer=self.policy) # NOTE(abhishekk): This is just a "do you have permission to # list namespace" check. Each namespace is checked against # get_metadef_namespace below. policy_check.get_metadef_namespaces() # NOTE(abhishekk): We also need to fetch resource_types associated # with namespaces, so better to check we have permission for the # same in advance. policy_check.list_metadef_resource_types() # Get namespace id if marker: namespace_obj = ns_repo.get(marker) marker = namespace_obj.namespace_id database_ns_list = ns_repo.list( marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters) ns_list = [ ns for ns in database_ns_list if api_policy.MetadefAPIPolicy( req.context, md_resource=ns, enforcer=self.policy).check( 'get_metadef_namespace')] rs_repo = ( self.gateway.get_metadef_resource_type_repo( req.context, authorization_layer=False)) for db_namespace in ns_list: # Get resource type associations filters = dict() filters['namespace'] = db_namespace.namespace repo_rs_type_list = rs_repo.list(filters=filters) resource_type_list = [ ResourceTypeAssociation.to_wsme_model( resource_type ) for resource_type in repo_rs_type_list] if resource_type_list: db_namespace.resource_type_associations = ( resource_type_list) namespace_list = [Namespace.to_wsme_model( db_namespace, get_namespace_href(db_namespace), self.ns_schema_link) for db_namespace in ns_list] namespaces = Namespaces() namespaces.namespaces = namespace_list if len(namespace_list) != 0 and len(namespace_list) == limit: namespaces.next = ns_list[-1].namespace except exception.Forbidden as e: LOG.debug("User not permitted to retrieve metadata namespaces " "index") raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) return namespaces @utils.mutating def create(self, req, namespace): try: namespace_created = False # Create Namespace ns_factory = self.gateway.get_metadef_namespace_factory( req.context, authorization_layer=False) ns_repo = self.gateway.get_metadef_namespace_repo( req.context, authorization_layer=False) # NOTE(abhishekk): Here we are going to check if user is authorized # to create namespace, resource_types, objects, properties etc. policy_check = api_policy.MetadefAPIPolicy( req.context, enforcer=self.policy) policy_check.add_metadef_namespace() if namespace.resource_type_associations: policy_check.add_metadef_resource_type_association() if namespace.objects: policy_check.add_metadef_object() if namespace.properties: policy_check.add_metadef_property() if namespace.tags: policy_check.add_metadef_tag() # NOTE(abhishekk): As we are getting rid of auth layer, this # is the place where we should add owner if it is not specified # in request. kwargs = namespace.to_dict() if 'owner' not in kwargs: kwargs.update({'owner': req.context.owner})<|fim▁hole|> new_namespace = ns_factory.new_namespace(**kwargs) ns_repo.add(new_namespace) namespace_created = True # Create Resource Types if namespace.resource_type_associations: rs_factory = (self.gateway.get_metadef_resource_type_factory( req.context, authorization_layer=False)) rs_repo = self.gateway.get_metadef_resource_type_repo( req.context, authorization_layer=False) for resource_type in namespace.resource_type_associations: new_resource = rs_factory.new_resource_type( namespace=namespace.namespace, **resource_type.to_dict()) rs_repo.add(new_resource) # Create Objects if namespace.objects: object_factory = self.gateway.get_metadef_object_factory( req.context, authorization_layer=False) object_repo = self.gateway.get_metadef_object_repo( req.context, authorization_layer=False) for metadata_object in namespace.objects: new_meta_object = object_factory.new_object( namespace=namespace.namespace, **metadata_object.to_dict()) object_repo.add(new_meta_object) # Create Tags if namespace.tags: tag_factory = self.gateway.get_metadef_tag_factory( req.context, authorization_layer=False) tag_repo = self.gateway.get_metadef_tag_repo( req.context, authorization_layer=False) for metadata_tag in namespace.tags: new_meta_tag = tag_factory.new_tag( namespace=namespace.namespace, **metadata_tag.to_dict()) tag_repo.add(new_meta_tag) # Create Namespace Properties if namespace.properties: prop_factory = (self.gateway.get_metadef_property_factory( req.context, authorization_layer=False)) prop_repo = self.gateway.get_metadef_property_repo( req.context, authorization_layer=False) for (name, value) in namespace.properties.items(): new_property_type = ( prop_factory.new_namespace_property( namespace=namespace.namespace, **self._to_property_dict(name, value) )) prop_repo.add(new_property_type) except exception.Invalid as e: msg = (_("Couldn't create metadata namespace: %s") % encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=msg) except exception.Forbidden as e: self._cleanup_namespace(ns_repo, namespace, namespace_created) LOG.debug("User not permitted to create metadata namespace") raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: self._cleanup_namespace(ns_repo, namespace, namespace_created) raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: self._cleanup_namespace(ns_repo, namespace, namespace_created) raise webob.exc.HTTPConflict(explanation=e.msg) # Return the user namespace as we don't expose the id to user new_namespace.properties = namespace.properties new_namespace.objects = namespace.objects new_namespace.resource_type_associations = ( namespace.resource_type_associations) new_namespace.tags = namespace.tags return Namespace.to_wsme_model(new_namespace, get_namespace_href(new_namespace), self.ns_schema_link) def _to_property_dict(self, name, value): # Convert the model PropertyTypes dict to a JSON string db_property_type_dict = dict() db_property_type_dict['schema'] = json.tojson(PropertyType, value) db_property_type_dict['name'] = name return db_property_type_dict def _cleanup_namespace(self, namespace_repo, namespace, namespace_created): if namespace_created: try: namespace_obj = namespace_repo.get(namespace.namespace) namespace_obj.delete() namespace_repo.remove(namespace_obj) LOG.debug("Cleaned up namespace %(namespace)s ", {'namespace': namespace.namespace}) except Exception as e: msg = (_LE("Failed to delete namespace %(namespace)s." "Exception: %(exception)s"), {'namespace': namespace.namespace, 'exception': encodeutils.exception_to_unicode(e)}) LOG.error(msg) def show(self, req, namespace, filters=None): try: # Get namespace ns_repo = self.gateway.get_metadef_namespace_repo( req.context, authorization_layer=False) try: namespace_obj = ns_repo.get(namespace) policy_check = api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy) policy_check.get_metadef_namespace() except (exception.Forbidden, webob.exc.HTTPForbidden): LOG.debug("User not permitted to show namespace '%s'", namespace) # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project raise webob.exc.HTTPNotFound() # NOTE(abhishekk): We also need to fetch resource_types, objects, # properties, tags associated with namespace, so better to check # whether user has permissions for the same. policy_check.list_metadef_resource_types() policy_check.get_metadef_objects() policy_check.get_metadef_properties() policy_check.get_metadef_tags() namespace_detail = Namespace.to_wsme_model( namespace_obj, get_namespace_href(namespace_obj), self.ns_schema_link) ns_filters = dict() ns_filters['namespace'] = namespace # Get objects object_repo = self.gateway.get_metadef_object_repo( req.context, authorization_layer=False) db_metaobject_list = object_repo.list(filters=ns_filters) object_list = [MetadefObject.to_wsme_model( db_metaobject, get_object_href(namespace, db_metaobject), self.obj_schema_link) for db_metaobject in db_metaobject_list] if object_list: namespace_detail.objects = object_list # Get resource type associations rs_repo = self.gateway.get_metadef_resource_type_repo( req.context, authorization_layer=False) db_resource_type_list = rs_repo.list(filters=ns_filters) resource_type_list = [ResourceTypeAssociation.to_wsme_model( resource_type) for resource_type in db_resource_type_list] if resource_type_list: namespace_detail.resource_type_associations = ( resource_type_list) # Get properties prop_repo = self.gateway.get_metadef_property_repo( req.context, authorization_layer=False) db_properties = prop_repo.list(filters=ns_filters) property_list = Namespace.to_model_properties(db_properties) if property_list: namespace_detail.properties = property_list if filters and filters['resource_type']: namespace_detail = self._prefix_property_name( namespace_detail, filters['resource_type']) # Get tags tag_repo = self.gateway.get_metadef_tag_repo( req.context, authorization_layer=False) db_metatag_list = tag_repo.list(filters=ns_filters) tag_list = [MetadefTag(**{'name': db_metatag.name}) for db_metatag in db_metatag_list] if tag_list: namespace_detail.tags = tag_list except exception.Forbidden as e: LOG.debug("User not permitted to show metadata namespace " "'%s'", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) return namespace_detail def update(self, req, user_ns, namespace): namespace_repo = self.gateway.get_metadef_namespace_repo( req.context, authorization_layer=False) try: ns_obj = namespace_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): Here we are just checking if use is authorized # to modify the namespace or not api_policy.MetadefAPIPolicy( req.context, md_resource=ns_obj, enforcer=self.policy).modify_metadef_namespace() ns_obj._old_namespace = ns_obj.namespace ns_obj.namespace = wsme_utils._get_value(user_ns.namespace) ns_obj.display_name = wsme_utils._get_value(user_ns.display_name) ns_obj.description = wsme_utils._get_value(user_ns.description) # Following optional fields will default to same values as in # create namespace if not specified ns_obj.visibility = ( wsme_utils._get_value(user_ns.visibility) or 'private') ns_obj.protected = ( wsme_utils._get_value(user_ns.protected) or False) ns_obj.owner = ( wsme_utils._get_value(user_ns.owner) or req.context.owner) updated_namespace = namespace_repo.save(ns_obj) except exception.Invalid as e: msg = (_("Couldn't update metadata namespace: %s") % encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=msg) except exception.Forbidden as e: LOG.debug("User not permitted to update metadata namespace " "'%s'", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) return Namespace.to_wsme_model(updated_namespace, get_namespace_href(updated_namespace), self.ns_schema_link) def delete(self, req, namespace): namespace_repo = self.gateway.get_metadef_namespace_repo( req.context, authorization_layer=False) try: namespace_obj = namespace_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): Here we are just checking user is authorized to # delete the namespace or not. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).delete_metadef_namespace() namespace_obj.delete() namespace_repo.remove(namespace_obj) except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata namespace " "'%s'", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) def delete_objects(self, req, namespace): ns_repo = self.gateway.get_metadef_namespace_repo( req.context, authorization_layer=False) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): This call currently checks whether user # has permission to delete the namespace or not before deleting # the objects associated with it. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).delete_metadef_namespace() namespace_obj.delete() ns_repo.remove_objects(namespace_obj) except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata objects " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) def delete_tags(self, req, namespace): ns_repo = self.gateway.get_metadef_namespace_repo( req.context, authorization_layer=False) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): This call currently checks whether user # has permission to delete the namespace or not before deleting # the objects associated with it. policy_check = api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy) policy_check.delete_metadef_namespace() # NOTE(abhishekk): This call checks whether user # has permission to delete the tags or not. policy_check.delete_metadef_tags() namespace_obj.delete() ns_repo.remove_tags(namespace_obj) except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata tags " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) def delete_properties(self, req, namespace): ns_repo = self.gateway.get_metadef_namespace_repo( req.context, authorization_layer=False) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): This call currently checks whether user # has permission to delete the namespace or not before deleting # the objects associated with it. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).delete_metadef_namespace() namespace_obj.delete() ns_repo.remove_properties(namespace_obj) except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata properties " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) def _prefix_property_name(self, namespace_detail, user_resource_type): prefix = None if user_resource_type and namespace_detail.resource_type_associations: for resource_type in namespace_detail.resource_type_associations: if resource_type.name == user_resource_type: prefix = resource_type.prefix break if prefix: if namespace_detail.properties: new_property_dict = dict() for (key, value) in namespace_detail.properties.items(): new_property_dict[prefix + key] = value namespace_detail.properties = new_property_dict if namespace_detail.objects: for object in namespace_detail.objects: new_object_property_dict = dict() for (key, value) in object.properties.items(): new_object_property_dict[prefix + key] = value object.properties = new_object_property_dict if object.required and len(object.required) > 0: required = [prefix + name for name in object.required] object.required = required return namespace_detail class RequestDeserializer(wsgi.JSONRequestDeserializer): _disallowed_properties = ['self', 'schema', 'created_at', 'updated_at'] def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] @classmethod def _check_allowed(cls, image): for key in cls._disallowed_properties: if key in image: msg = _("Attribute '%s' is read-only.") % key raise webob.exc.HTTPForbidden(explanation=msg) def index(self, request): params = request.params.copy() limit = params.pop('limit', None) marker = params.pop('marker', None) sort_dir = params.pop('sort_dir', 'desc') if limit is None: limit = CONF.limit_param_default limit = min(CONF.api_limit_max, int(limit)) query_params = { 'sort_key': params.pop('sort_key', 'created_at'), 'sort_dir': self._validate_sort_dir(sort_dir), 'filters': self._get_filters(params) } if marker is not None: query_params['marker'] = marker if limit is not None: query_params['limit'] = self._validate_limit(limit) return query_params def _validate_sort_dir(self, sort_dir): if sort_dir not in ['asc', 'desc']: msg = _('Invalid sort direction: %s') % sort_dir raise webob.exc.HTTPBadRequest(explanation=msg) return sort_dir def _get_filters(self, filters): visibility = filters.get('visibility') if visibility: if visibility not in ['public', 'private']: msg = _('Invalid visibility value: %s') % visibility raise webob.exc.HTTPBadRequest(explanation=msg) return filters def _validate_limit(self, limit): try: limit = int(limit) except ValueError: msg = _("limit param must be an integer") raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _("limit param must be positive") raise webob.exc.HTTPBadRequest(explanation=msg) return limit def show(self, request): params = request.params.copy() query_params = { 'filters': self._get_filters(params) } return query_params def create(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) namespace = json.fromjson(Namespace, body) return dict(namespace=namespace) def update(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) namespace = json.fromjson(Namespace, body) return dict(user_ns=namespace) class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema def create(self, response, namespace): ns_json = json.tojson(Namespace, namespace) response = self.__render(ns_json, response, http.CREATED) response.location = get_namespace_href(namespace) def show(self, response, namespace): ns_json = json.tojson(Namespace, namespace) response = self.__render(ns_json, response) def index(self, response, result): params = dict(response.request.params) params.pop('marker', None) query = urlparse.urlencode(params) result.first = "/v2/metadefs/namespaces" result.schema = "/v2/schemas/metadefs/namespaces" if query: result.first = '%s?%s' % (result.first, query) if result.next: params['marker'] = result.next next_query = urlparse.urlencode(params) result.next = '/v2/metadefs/namespaces?%s' % next_query ns_json = json.tojson(Namespaces, result) response = self.__render(ns_json, response) def update(self, response, namespace): ns_json = json.tojson(Namespace, namespace) response = self.__render(ns_json, response, http.OK) def delete(self, response, result): response.status_int = http.NO_CONTENT def delete_objects(self, response, result): response.status_int = http.NO_CONTENT def delete_properties(self, response, result): response.status_int = http.NO_CONTENT def delete_tags(self, response, result): response.status_int = http.NO_CONTENT def __render(self, json_data, response, response_status=None): body = jsonutils.dumps(json_data, ensure_ascii=False) response.unicode_body = body response.content_type = 'application/json' if response_status: response.status_int = response_status return response def _get_base_definitions(): return get_schema_definitions() def get_schema_definitions(): return { "positiveInteger": { "type": "integer", "minimum": 0 }, "positiveIntegerDefault0": { "allOf": [ {"$ref": "#/definitions/positiveInteger"}, {"default": 0} ] }, "stringArray": { "type": "array", "items": {"type": "string"}, # "minItems": 1, "uniqueItems": True }, "property": { "type": "object", "additionalProperties": { "type": "object", "required": ["title", "type"], "properties": { "name": { "type": "string", "maxLength": 80 }, "title": { "type": "string" }, "description": { "type": "string" }, "operators": { "type": "array", "items": { "type": "string" } }, "type": { "type": "string", "enum": [ "array", "boolean", "integer", "number", "object", "string", None ] }, "required": { "$ref": "#/definitions/stringArray" }, "minimum": { "type": "number" }, "maximum": { "type": "number" }, "maxLength": { "$ref": "#/definitions/positiveInteger" }, "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, "pattern": { "type": "string", "format": "regex" }, "enum": { "type": "array" }, "readonly": { "type": "boolean" }, "default": {}, "items": { "type": "object", "properties": { "type": { "type": "string", "enum": [ "array", "boolean", "integer", "number", "object", "string", None ] }, "enum": { "type": "array" } } }, "maxItems": { "$ref": "#/definitions/positiveInteger" }, "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, "uniqueItems": { "type": "boolean", "default": False }, "additionalItems": { "type": "boolean" }, } } } } def _get_base_properties(): return { "namespace": { "type": "string", "description": _("The unique namespace text."), "maxLength": 80, }, "display_name": { "type": "string", "description": _("The user friendly name for the namespace. Used " "by UI if available."), "maxLength": 80, }, "description": { "type": "string", "description": _("Provides a user friendly description of the " "namespace."), "maxLength": 500, }, "visibility": { "type": "string", "description": _("Scope of namespace accessibility."), "enum": ["public", "private"], }, "protected": { "type": "boolean", "description": _("If true, namespace will not be deletable."), }, "owner": { "type": "string", "description": _("Owner of the namespace."), "maxLength": 255, }, "created_at": { "type": "string", "readOnly": True, "description": _("Date and time of namespace creation"), "format": "date-time" }, "updated_at": { "type": "string", "readOnly": True, "description": _("Date and time of the last namespace" " modification"), "format": "date-time" }, "schema": { 'readOnly': True, "type": "string" }, "self": { 'readOnly': True, "type": "string" }, "resource_type_associations": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" }, "prefix": { "type": "string" }, "properties_target": { "type": "string" } } } }, "properties": { "$ref": "#/definitions/property" }, "objects": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" }, "description": { "type": "string" }, "required": { "$ref": "#/definitions/stringArray" }, "properties": { "$ref": "#/definitions/property" }, } } }, "tags": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" } } } }, } def get_schema(): properties = _get_base_properties() definitions = _get_base_definitions() mandatory_attrs = Namespace.get_mandatory_attrs() schema = glance.schema.Schema( 'namespace', properties, required=mandatory_attrs, definitions=definitions ) return schema def get_collection_schema(): namespace_schema = get_schema() return glance.schema.CollectionSchema('namespaces', namespace_schema) def get_namespace_href(namespace): base_href = '/v2/metadefs/namespaces/%s' % namespace.namespace return base_href def get_object_href(namespace_name, metadef_object): base_href = ('/v2/metadefs/namespaces/%s/objects/%s' % (namespace_name, metadef_object.name)) return base_href def get_tag_href(namespace_name, metadef_tag): base_href = ('/v2/metadefs/namespaces/%s/tags/%s' % (namespace_name, metadef_tag.name)) return base_href def create_resource(): """Namespaces resource factory method""" schema = get_schema() deserializer = RequestDeserializer(schema) serializer = ResponseSerializer(schema) controller = NamespaceController() return wsgi.Resource(controller, deserializer, serializer)<|fim▁end|>
<|file_name|>test_foram.py<|end_file_name|><|fim▁begin|>from unittest import TestCase from pyage.core import inject from pyage_forams.solutions.foram import Foram class TestForam(TestCase): def test_step(self): inject.config = "pyage_forams.conf.dummy_conf" foram = Foram(10)<|fim▁hole|><|fim▁end|>
# foram.step()
<|file_name|>GatewayInclusive.js<|end_file_name|><|fim▁begin|>bpmnGatewayInclusive=function(width,_30ab){ VectorFigure.call(this); this.stroke =1; }; bpmnGatewayInclusive.prototype=new VectorFigure; bpmnGatewayInclusive.prototype.type="bpmnGatewayInclusive"; bpmnGatewayInclusive.prototype.paint=function(){ VectorFigure.prototype.paint.call(this); if(typeof workflow.zoomfactor == 'undefined') workflow.zoomfactor = 1; //Set the Task Limitation if(typeof this.limitFlag == 'undefined' || this.limitFlag == false) { this.originalWidth = 40; this.originalHeight = 40; this.orgXPos = this.getX(); this.orgYPos = this.getY(); this.orgFontSize =this.fontSize; } this.width = this.originalWidth * workflow.zoomfactor; this.height = this.originalHeight * workflow.zoomfactor; var cw = this.getWidth(); var ch = this.getHeight(); var x=new Array(0,cw*0.5,cw,cw*0.5); var y=new Array(ch*0.5,ch,ch*0.5,0); //var x=new Array(0,this.width/2,this.width,this.width/2); //var y=new Array(this.height/2,this.height,this.height/2,0); var x2 = new Array(); var y2 = new Array(); for(var i=0;i<x.length;i++){ x2[i]=x[i]+4; y2[i]=y[i]+1; } this.graphics.setStroke(this.stroke); this.graphics.setColor( "#c0c0c0" ); this.graphics.fillPolygon(x2,y2); this.graphics.setStroke(1); this.graphics.setColor( "#ffffe5" ); this.graphics.fillPolygon(x,y); this.graphics.setColor("#c8c865"); this.graphics.drawPolygon(x,y);<|fim▁hole|> this.graphics.setColor("#c8c865"); this.graphics.drawEllipse(this.getWidth()/4,this.getHeight()/4,this.getWidth()/2,this.getHeight()/2); this.graphics.paint(); if (this.input1 != null) { this.input1.setPosition(0, this.height / 2); } if (this.input2 != null) { this.input2.setPosition(this.width / 2, 0); } if (this.output1 != null) { this.output1.setPosition(this.height / 2, this.width); } if (this.output2 != null) { this.output2.setPosition(this.width, this.height / 2); } if (this.output3 != null) { this.output3.setPosition(0, this.height /2 ); } }; bpmnGatewayInclusive.prototype.setWorkflow=function(_40c5){ VectorFigure.prototype.setWorkflow.call(this,_40c5); if(_40c5!=null){ var h2 = this.height/2; var w2 = this.width/2; var gatewayPortName = ['output1', 'output2', 'output3', 'input1', 'input2' ]; var gatewayPortType = ['OutputPort','OutputPort','OutputPort','InputPort','InputPort']; var gatewayPositionX= [w2, this.width, 0 , 0, w2 ]; var gatewayPositionY= [this.width, h2, h2, h2, 0 ]; for(var i=0; i< gatewayPortName.length ; i++){ eval('this.'+gatewayPortName[i]+' = new '+gatewayPortType[i]+'()'); //Create New Port eval('this.'+gatewayPortName[i]+'.setWorkflow(_40c5)'); //Add port to the workflow eval('this.'+gatewayPortName[i]+'.setName("'+gatewayPortName[i]+'")'); //Set PortName eval('this.'+gatewayPortName[i]+'.setZOrder(-1)'); //Set Z-Order of the port to -1. It will be below all the figure eval('this.'+gatewayPortName[i]+'.setBackgroundColor(new Color(255, 255, 255))'); //Setting Background of the port to white eval('this.'+gatewayPortName[i]+'.setColor(new Color(255, 255, 255))'); //Setting Border of the port to white eval('this.addPort(this.'+gatewayPortName[i]+','+gatewayPositionX[i]+', '+gatewayPositionY[i]+')'); //Setting Position of the port } } }; bpmnGatewayInclusive.prototype.getContextMenu=function(){ if(this.id != null){ this.workflow.handleContextMenu(this); } };<|fim▁end|>
var x_cir = 15; var y_cir = 15;
<|file_name|>addAktModal.component.js<|end_file_name|><|fim▁begin|>class AddAktModalController{ constructor(API, $uibModal, $state,$timeout){ 'ngInject'; let vm=this; vm.API=API; vm.program=vm.resolve.program; vm.programs=vm.resolve.programs; vm.data={ program:vm.program.id, title:null, date:new Date()<|fim▁hole|> altInputFormats: ['yyyy-MM-dd', 'dd.MM.yyyy'], formatDay: 'dd', formatMonth: 'MM', formatYear: 'yyyy', minDate: new Date(), startingDay: 1 }; vm.date = { opened: false }; vm.openCalendar=()=>{ vm.date.opened=true; }; vm.close=()=>{ this.modalInstance.dismiss('cancel'); }; vm.add=()=>{ let calendar = this.API.all('akt'); calendar.post({ program:vm.data.program, title:vm.data.title, date:moment(vm.data.date).format('YYYY-MM-DD'), }).then((response) => { vm.success=true; $timeout( ()=>{ $state.reload(); }, 300); $timeout(()=> { vm.close(); }, 500); }, (response) => { vm.message=response.data.uniqemessage; vm.errors=response.data.errors; }); } // } $onInit(){ } } export const AddAktModalComponent = { templateUrl: './views/app/components/addAktModal/addAktModal.component.html', controller: AddAktModalController, controllerAs: 'vm', bindings: { modalInstance: "<", resolve: "<" } }<|fim▁end|>
}; vm.dateOptions = {
<|file_name|>modpathfile.py<|end_file_name|><|fim▁begin|>""" Module to read MODPATH output files. The module contains two important classes that can be accessed by the user. * EndpointFile (ascii endpoint file) * PathlineFile (ascii pathline file) """ import numpy as np from ..utils.flopy_io import loadtxt class PathlineFile(): """ PathlineFile Class. Parameters ---------- filename : string Name of the pathline file verbose : bool Write information to the screen. Default is False. Attributes ---------- Methods ------- See Also -------- Notes ----- The PathlineFile class provides simple ways to retrieve MODPATH 6 pathline data from a MODPATH 6 ascii pathline file. Examples -------- >>> import flopy >>> pthobj = flopy.utils.PathlineFile('model.mppth') >>> p1 = pthobj.get_data(partid=1) """ kijnames = ['k', 'i', 'j', 'particleid', 'particlegroup', 'linesegmentindex'] def __init__(self, filename, verbose=False): """ Class constructor. """ self.fname = filename self.dtype, self.outdtype = self._get_dtypes() self._build_index() self._data = loadtxt(self.file, dtype=self.dtype, skiprows=self.skiprows) # set number of particle ids self.nid = self._data['particleid'].max() # convert layer, row, and column indices; particle id and group; and # line segment indices to zero-based for n in self.kijnames: self._data[n] -= 1 # close the input file self.file.close() return def _build_index(self): """ Set position of the start of the pathline data. """ self.skiprows = 0 self.file = open(self.fname, 'r') while True: line = self.file.readline() if isinstance(line, bytes): line = line.decode() if self.skiprows < 1: if 'MODPATH_PATHLINE_FILE 6' not in line.upper(): errmsg = '{} is not a valid pathline file'.format(self.fname) raise Exception(errmsg) self.skiprows += 1 if 'end header' in line.lower(): break self.file.seek(0) def _get_dtypes(self): """ Build numpy dtype for the MODPATH 6 pathline file. """ dtype = np.dtype([("particleid", np.int), ("particlegroup", np.int), ("timepointindex", np.int), ("cumulativetimestep", np.int), ("time", np.float32), ("x", np.float32), ("y", np.float32), ("z", np.float32), ("k", np.int), ("i", np.int), ("j", np.int), ("grid", np.int), ("xloc", np.float32), ("yloc", np.float32), ("zloc", np.float32), ("linesegmentindex", np.int)]) outdtype = np.dtype([("x", np.float32), ("y", np.float32), ("z", np.float32), ("time", np.float32), ("k", np.int), ("id", np.int)]) return dtype, outdtype def get_maxid(self): """ Get the maximum pathline number in the file pathline file Returns ---------- out : int Maximum pathline number. """ return self.maxid def get_maxtime(self): """ Get the maximum time in pathline file Returns ---------- out : float Maximum pathline time. """ return self.data['time'].max() def get_data(self, partid=0, totim=None, ge=True): """ get pathline data from the pathline file for a single pathline. Parameters ---------- partid : int The zero-based particle id. The first record is record 0. totim : float The simulation time. All pathline points for particle partid that are greater than or equal to (ge=True) or less than or equal to (ge=False) totim will be returned. Default is None ge : bool Boolean that determines if pathline times greater than or equal to or less than or equal to totim is used to create a subset of pathlines. Default is True. Returns ---------- ra : numpy record array A numpy recarray with the x, y, z, time, k, and particleid for pathline partid. See Also -------- Notes ----- Examples -------- >>> import flopy.utils.modpathfile as mpf >>> pthobj = flopy.utils.PathlineFile('model.mppth') >>> p1 = pthobj.get_data(partid=1) """ idx = self._data['particleid'] == partid if totim is not None: if ge: idx = (self._data['time'] >= totim) & (self._data['particleid'] == partid) else: idx = (self._data['time'] <= totim) & (self._data['particleid'] == partid) else: idx = self._data['particleid'] == partid self._ta = self._data[idx] ra = np.rec.fromarrays((self._ta['x'], self._ta['y'], self._ta['z'], self._ta['time'], self._ta['k'], self._ta['particleid']), dtype=self.outdtype) return ra def get_alldata(self, totim=None, ge=True): """ get pathline data from the pathline file for all pathlines and all times. Parameters ---------- totim : float The simulation time. All pathline points for particle partid that are greater than or equal to (ge=True) or less than or equal to (ge=False) totim will be returned. Default is None ge : bool Boolean that determines if pathline times greater than or equal to or less than or equal to totim is used to create a subset of pathlines. Default is True. Returns ---------- plist : a list of numpy record array A list of numpy recarrays with the x, y, z, time, k, and particleid for all pathlines. See Also -------- Notes ----- Examples -------- >>> import flopy.utils.modpathfile as mpf >>> pthobj = flopy.utils.PathlineFile('model.mppth') >>> p = pthobj.get_alldata() """ plist = [] for partid in range(self.nid): plist.append(self.get_data(partid=partid, totim=totim, ge=ge)) return plist def get_destination_pathline_data(self, dest_cells): """Get pathline data for set of destination cells. Parameters ---------- dest_cells : list or array of tuples (k, i, j) of each destination cell (zero-based) Returns ------- pthldest : np.recarray Slice of pathline data array (e.g. PathlineFile._data) containing only pathlines with final k,i,j in dest_cells. """ ra = self._data.view(np.recarray) # find the intersection of endpoints and dest_cells # convert dest_cells to same dtype for comparison raslice = ra[['k', 'i', 'j']] dest_cells = np.array(dest_cells, dtype=raslice.dtype) inds = np.in1d(raslice, dest_cells) epdest = ra[inds].copy().view(np.recarray) # use particle ids to get the rest of the paths inds = np.in1d(ra.particleid, epdest.particleid) pthldes = ra[inds].copy() pthldes.sort(order=['particleid', 'time']) return pthldes def write_shapefile(self, pathline_data=None, one_per_particle=True, direction='ending', shpname='endpoings.shp', sr=None, epsg=None, **kwargs): """Write pathlines to shapefile. pathline_data : np.recarry Record array of same form as that returned by EndpointFile.get_alldata. (if none, EndpointFile.get_alldata() is exported). one_per_particle : boolean (default True) True writes a single LineString with a single set of attribute data for each particle. False writes a record/geometry for each pathline segment (each row in the PathLine file). This option can be used to visualize attribute information (time, model layer, etc.) across a pathline in a GIS. direction : str String defining if starting or ending particle locations should be included in shapefile attribute information. Only used if one_per_particle=False. (default is 'ending') shpname : str File path for shapefile sr : flopy.utils.reference.SpatialReference instance Used to scale and rotate Global x,y,z values in MODPATH Endpoint file epsg : int EPSG code for writing projection (.prj) file. If this is not supplied, the proj4 string or epgs code associated with sr will be used. kwargs : keyword arguments to flopy.export.shapefile_utils.recarray2shp """ from ..utils.reference import SpatialReference from ..utils.geometry import LineString from ..export.shapefile_utils import recarray2shp pth = pathline_data if pth is None: pth = self._data.view(np.recarray) pth = pth.copy() pth.sort(order=['particleid', 'time']) if sr is None: sr = SpatialReference() particles = np.unique(pth.particleid) geoms = [] # 1 geometry for each path if one_per_particle: loc_inds = 0 if direction == 'ending': loc_inds = -1 pthdata = [] for pid in particles: ra = pth[pth.particleid == pid] x, y = sr.transform(ra.x, ra.y) z = ra.z geoms.append(LineString(list(zip(x, y, z)))) pthdata.append((pid, ra.particlegroup[0], ra.time.max(), ra.k[loc_inds], ra.i[loc_inds], ra.j[loc_inds])) pthdata = np.array(pthdata, dtype=[('particleid', np.int), ('particlegroup', np.int), ('time', np.float), ('k', np.int), ('i', np.int), ('j', np.int) ]).view(np.recarray) # geometry for each row in PathLine file else: dtype = pth.dtype #pthdata = np.empty((0, len(dtype)), dtype=dtype).view(np.recarray) pthdata = [] for pid in particles: ra = pth[pth.particleid == pid] x, y = sr.transform(ra.x, ra.y) z = ra.z geoms += [LineString([(x[i-1], y[i-1], z[i-1]), (x[i], y[i], z[i])]) for i in np.arange(1, (len(ra)))] #pthdata = np.append(pthdata, ra[1:]).view(np.recarray) pthdata += ra[1:].tolist() pthdata = np.array(pthdata, dtype=dtype).view(np.recarray) # convert back to one-based for n in set(self.kijnames).intersection(set(pthdata.dtype.names)): pthdata[n] += 1 recarray2shp(pthdata, geoms, shpname=shpname, epsg=sr.epsg, **kwargs) class EndpointFile(): """ EndpointFile Class. Parameters ---------- filename : string Name of the endpoint file verbose : bool Write information to the screen. Default is False. Attributes<|fim▁hole|> ------- See Also -------- Notes ----- The EndpointFile class provides simple ways to retrieve MODPATH 6 endpoint data from a MODPATH 6 ascii endpoint file. Examples -------- >>> import flopy >>> endobj = flopy.utils.EndpointFile('model.mpend') >>> e1 = endobj.get_data(partid=1) """ kijnames = ['k0', 'i0', 'j0', 'k', 'i', 'j', 'particleid', 'particlegroup'] def __init__(self, filename, verbose=False): """ Class constructor. """ self.fname = filename self.dtype = self._get_dtypes() self._build_index() self._data = loadtxt(self.file, dtype=self.dtype, skiprows=self.skiprows) # set number of particle ids self.nid = self._data['particleid'].max() # convert layer, row, and column indices; particle id and group; and # line segment indices to zero-based for n in self.kijnames: self._data[n] -= 1 # close the input file self.file.close() return def _build_index(self): """ Set position of the start of the pathline data. """ self.skiprows = 0 self.file = open(self.fname, 'r') idx = 0 while True: line = self.file.readline() if isinstance(line, bytes): line = line.decode() if self.skiprows < 1: if 'MODPATH_ENDPOINT_FILE 6' not in line.upper(): errmsg = '{} is not a valid endpoint file'.format(self.fname) raise Exception(errmsg) self.skiprows += 1 if idx == 1: t = line.strip() self.direction = 1 if int(t[0]) == 2: self.direction = -1 if 'end header' in line.lower(): break self.file.seek(0) def _get_dtypes(self): """ Build numpy dtype for the MODPATH 6 endpoint file. """ dtype = np.dtype([("particleid", np.int), ("particlegroup", np.int), ('status', np.int), ('initialtime', np.float32), ('finaltime', np.float32), ('initialgrid', np.int), ('k0', np.int), ('i0', np.int), ('j0', np.int), ('initialcellface', np.int), ('initialzone', np.int), ('xloc0', np.float32), ('yloc0', np.float32), ('zloc0', np.float32), ('x0', np.float32), ('y0', np.float32), ('z0', np.float32), ('finalgrid', np.int), ('k', np.int), ('i', np.int), ('j', np.int), ('finalcellface', np.int), ('finalzone', np.int), ('xloc', np.float32), ('yloc', np.float32), ('zloc', np.float32), ('x', np.float32), ('y', np.float32), ('z', np.float32), ('label', '|S40')]) return dtype def get_maxid(self): """ Get the maximum endpoint particle id in the file endpoint file Returns ---------- out : int Maximum endpoint particle id. """ return self.maxid def get_maxtime(self): """ Get the maximum time in the endpoint file Returns ---------- out : float Maximum endpoint time. """ return self.data['finaltime'].max() def get_maxtraveltime(self): """ Get the maximum travel time in the endpoint file Returns ---------- out : float Maximum endpoint travel time. """ return (self.data['finaltime'] - self.data['initialtime']).max() def get_data(self, partid=0): """ Get endpoint data from the endpoint file for a single particle. Parameters ---------- partid : int The zero-based particle id. The first record is record 0. (default is 0) Returns ---------- ra : numpy record array A numpy recarray with the endpoint particle data for endpoint partid. See Also -------- Notes ----- Examples -------- >>> import flopy >>> endobj = flopy.utils.EndpointFile('model.mpend') >>> e1 = endobj.get_data(partid=1) """ idx = self._data['particleid'] == partid ra = self._data[idx] return ra def get_alldata(self): """ Get endpoint data from the endpoint file for all endpoints. Parameters ---------- Returns ---------- ra : numpy record array A numpy recarray with the endpoint particle data See Also -------- Notes ----- Examples -------- >>> import flopy >>> endobj = flopy.utils.EndpointFile('model.mpend') >>> e = endobj.get_alldata() """ ra = self._data.view(np.recarray).copy() # if final: # ra = np.rec.fromarrays((self._data['x'], self._data['y'], self._data['z'], # self._data['finaltime'], self._data['k'], # self._data['particleid']), dtype=self.outdtype) # else: # ra = np.rec.fromarrays((self._data['x0'], self._data['y0'], self._data['z0'], # self._data['initialtime'], self._data['k0'], # self._data['particleid']), dtype=self.outdtype) return ra def get_destination_endpoint_data(self, dest_cells): """Get endpoint data for set of destination cells. Parameters ---------- dest_cells : list or array of tuples (k, i, j) of each destination cell (zero-based) Returns ------- epdest : np.recarray Slice of endpoint data array (e.g. EndpointFile.get_alldata) containing only data with final k,i,j in dest_cells. """ ra = self.get_alldata() # find the intersection of endpoints and dest_cells # convert dest_cells to same dtype for comparison raslice = ra[['k', 'i', 'j']] dest_cells = np.array(dest_cells, dtype=raslice.dtype) inds = np.in1d(raslice, dest_cells) epdest = ra[inds].copy().view(np.recarray) return epdest def write_shapefile(self, endpoint_data=None, shpname='endpoings.shp', direction='ending', sr=None, epsg=None, **kwargs): """Write particle starting / ending locations to shapefile. endpoint_data : np.recarry Record array of same form as that returned by EndpointFile.get_alldata. (if none, EndpointFile.get_alldata() is exported). shpname : str File path for shapefile direction : str String defining if starting or ending particle locations should be considered. (default is 'ending') sr : flopy.utils.reference.SpatialReference instance Used to scale and rotate Global x,y,z values in MODPATH Endpoint file epsg : int EPSG code for writing projection (.prj) file. If this is not supplied, the proj4 string or epgs code associated with sr will be used. kwargs : keyword arguments to flopy.export.shapefile_utils.recarray2shp """ from ..utils.reference import SpatialReference from ..utils.geometry import Point from ..export.shapefile_utils import recarray2shp epd = endpoint_data.copy() if epd is None: epd = self.get_alldata() if direction.lower() == 'ending': xcol, ycol, zcol = 'x', 'y', 'z' elif direction.lower() == 'starting': xcol, ycol, zcol = 'x0', 'y0', 'z0' else: errmsg = 'flopy.map.plot_endpoint direction must be "ending" ' + \ 'or "starting".' raise Exception(errmsg) if sr is None: sr = SpatialReference() x, y = sr.transform(epd[xcol], epd[ycol]) z = epd[zcol] geoms = [Point(x[i], y[i], z[i]) for i in range(len(epd))] # convert back to one-based for n in self.kijnames: epd[n] += 1 recarray2shp(epd, geoms, shpname=shpname, epsg=epsg, **kwargs)<|fim▁end|>
---------- Methods
<|file_name|>adjustment.rs<|end_file_name|><|fim▁begin|>// Copyright 2013-2015, The Rust-GNOME Project Developers. // See the COPYRIGHT file at the top-level directory of this distribution. // Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT> //! Adjustment — A representation of an adjustable bounded value use libc::c_double; use ffi; /** * A representation of an adjustable bounded value * * # Availables signals: * * `changed` : No Recursion * * `value-changed` : No Recursion */ pub struct Adjustment { pointer: *mut ffi::C_GtkAdjustment } impl Adjustment { pub fn new(value: f64, lower: f64, upper: f64, step_increment: f64, page_increment: f64, page_size: f64) -> Option<Adjustment> { let tmp_pointer = unsafe { ffi::gtk_adjustment_new(value as c_double, lower as c_double, upper as c_double, step_increment as c_double, page_increment as c_double, page_size as c_double) }; if tmp_pointer.is_null() { None } else { Some(Adjustment { pointer: tmp_pointer }) } } pub fn get_value(&self) -> f64 { unsafe { ffi::gtk_adjustment_get_value(self.pointer) as f64 } } pub fn set_value(&self, value: f64) -> () { unsafe { ffi::gtk_adjustment_set_value(self.pointer, value as c_double) } } pub fn get_lower(&self) -> f64 { unsafe { ffi::gtk_adjustment_get_lower(self.pointer) as f64 } } pub fn set_lower(&self, lower: f64) -> () { unsafe { ffi::gtk_adjustment_set_lower(self.pointer, lower as c_double) } } pub fn get_page_increment(&self) -> f64 { unsafe { ffi::gtk_adjustment_get_page_increment(self.pointer) as f64 } } pub fn set_page_increment(&self, page_increment: f64) -> () { unsafe { ffi::gtk_adjustment_set_page_increment(self.pointer, page_increment as c_double) } } pub fn get_page_size(&self) -> f64 { unsafe { ffi::gtk_adjustment_get_page_size(self.pointer) as f64 } } pub fn set_page_size(&self, page_size: f64) -> () { unsafe { ffi::gtk_adjustment_set_page_size(self.pointer, page_size as c_double) } } pub fn get_step_increment(&self) -> f64 { unsafe { ffi::gtk_adjustment_get_step_increment(self.pointer) as f64 } } pub fn set_step_increment(&self, step_increment: f64) -> () { unsafe { ffi::gtk_adjustment_set_step_increment(self.pointer, step_increment as c_double) } } pub fn get_upper(&self) -> f64 { unsafe { ffi::gtk_adjustment_get_upper(self.pointer) as f64 } } pub fn set_upper(&self, upper: f64) -> () { unsafe { ffi::gtk_adjustment_set_upper(self.pointer, upper as c_double) } } pub fn get_minimum_increment(&self) -> f64 { unsafe { ffi::gtk_adjustment_get_minimum_increment(self.pointer) as f64 } } pub fn clamp_page(&self, lower: f64, upper: f64) -> () { unsafe { ffi::gtk_adjustment_clamp_page(self.pointer, lower as c_double, upper as c_double); } } pub fn changed(&self) -> () { unsafe { ffi::gtk_adjustment_changed(self.pointer); } } pub fn value_changed(&self) -> () { unsafe { ffi::gtk_adjustment_value_changed(self.pointer) } } pub fn configure(&self, value: f64, lower: f64, upper: f64, step_increment: f64, page_increment: f64, page_size: f64) -> () { unsafe { ffi::gtk_adjustment_configure(self.pointer, value as c_double, lower as c_double, upper as c_double, step_increment as c_double, page_increment as c_double, page_size as c_double); } } #[doc(hidden)] pub fn unwrap_pointer(&self) -> *mut ffi::C_GtkAdjustment { self.pointer } #[doc(hidden)] pub fn wrap_pointer(c_adjustment: *mut ffi::C_GtkAdjustment) -> Adjustment {<|fim▁hole|> unsafe { ::glib_ffi::g_object_ref(c_adjustment as *mut _); } Adjustment { pointer: c_adjustment } } } impl_drop!(Adjustment, GTK_ADJUSTMENT);<|fim▁end|>
<|file_name|>query_executor.go<|end_file_name|><|fim▁begin|>package query import ( "context" "errors" "fmt" "os" "runtime/debug" "strconv" "sync" "sync/atomic" "time" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxql" "go.uber.org/zap" ) var ( // ErrInvalidQuery is returned when executing an unknown query type. ErrInvalidQuery = errors.New("invalid query") // ErrNotExecuted is returned when a statement is not executed in a query. // This can occur when a previous statement in the same query has errored. ErrNotExecuted = errors.New("not executed") // ErrQueryInterrupted is an error returned when the query is interrupted. ErrQueryInterrupted = errors.New("query interrupted") // ErrQueryAborted is an error returned when the query is aborted. ErrQueryAborted = errors.New("query aborted") // ErrQueryEngineShutdown is an error sent when the query cannot be // created because the query engine was shutdown. ErrQueryEngineShutdown = errors.New("query engine shutdown") // ErrQueryTimeoutLimitExceeded is an error when a query hits the max time allowed to run. ErrQueryTimeoutLimitExceeded = errors.New("query-timeout limit exceeded") // ErrAlreadyKilled is returned when attempting to kill a query that has already been killed. ErrAlreadyKilled = errors.New("already killed") ) // Statistics for the QueryExecutor const ( statQueriesActive = "queriesActive" // Number of queries currently being executed. statQueriesExecuted = "queriesExecuted" // Number of queries that have been executed (started). statQueriesFinished = "queriesFinished" // Number of queries that have finished. statQueryExecutionDuration = "queryDurationNs" // Total (wall) time spent executing queries. statRecoveredPanics = "recoveredPanics" // Number of panics recovered by Query Executor. // PanicCrashEnv is the environment variable that, when set, will prevent // the handler from recovering any panics. PanicCrashEnv = "INFLUXDB_PANIC_CRASH" ) // ErrDatabaseNotFound returns a database not found error for the given database name. func ErrDatabaseNotFound(name string) error { return fmt.Errorf("database not found: %s", name) } // ErrMaxSelectPointsLimitExceeded is an error when a query hits the maximum number of points. func ErrMaxSelectPointsLimitExceeded(n, limit int) error { return fmt.Errorf("max-select-point limit exceeed: (%d/%d)", n, limit) } // ErrMaxConcurrentQueriesLimitExceeded is an error when a query cannot be run // because the maximum number of queries has been reached. func ErrMaxConcurrentQueriesLimitExceeded(n, limit int) error { return fmt.Errorf("max-concurrent-queries limit exceeded(%d, %d)", n, limit) } // Authorizer reports whether certain operations are authorized. type Authorizer interface { // AuthorizeDatabase indicates whether the given Privilege is authorized on the database with the given name. AuthorizeDatabase(p influxql.Privilege, name string) bool // AuthorizeQuery returns an error if the query cannot be executed AuthorizeQuery(database string, query *influxql.Query) error // AuthorizeSeriesRead determines if a series is authorized for reading AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool // AuthorizeSeriesWrite determines if a series is authorized for writing AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool } // OpenAuthorizer is the Authorizer used when authorization is disabled. // It allows all operations. type OpenAuthorizer struct{} var _ Authorizer = OpenAuthorizer{} // AuthorizeDatabase returns true to allow any operation on a database. func (_ OpenAuthorizer) AuthorizeDatabase(influxql.Privilege, string) bool { return true } func (_ OpenAuthorizer) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool { return true } func (_ OpenAuthorizer) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool { return true } func (_ OpenAuthorizer) AuthorizeQuery(_ string, _ *influxql.Query) error { return nil } // ExecutionOptions contains the options for executing a query. type ExecutionOptions struct { // The database the query is running against. Database string // How to determine whether the query is allowed to execute, // what resources can be returned in SHOW queries, etc. Authorizer Authorizer // The requested maximum number of points to return in each result. ChunkSize int // If this query is being executed in a read-only context. ReadOnly bool // Node to execute on. NodeID uint64 // Quiet suppresses non-essential output from the query executor. Quiet bool // AbortCh is a channel that signals when results are no longer desired by the caller. AbortCh <-chan struct{} } // ExecutionContext contains state that the query is currently executing with. type ExecutionContext struct { // The statement ID of the executing query. StatementID int // The query ID of the executing query. QueryID uint64 // The query task information available to the StatementExecutor. Query *QueryTask // Output channel where results and errors should be sent. Results chan *Result // A channel that is closed when the query is interrupted. InterruptCh <-chan struct{} // Options used to start this query. ExecutionOptions }<|fim▁hole|>// been aborted. func (ctx *ExecutionContext) send(result *Result) error { select { case <-ctx.AbortCh: return ErrQueryAborted case ctx.Results <- result: } return nil } // Send sends a Result to the Results channel and will exit if the query has // been interrupted or aborted. func (ctx *ExecutionContext) Send(result *Result) error { select { case <-ctx.InterruptCh: return ErrQueryInterrupted case <-ctx.AbortCh: return ErrQueryAborted case ctx.Results <- result: } return nil } type contextKey int const ( iteratorsContextKey contextKey = iota ) // NewContextWithIterators returns a new context.Context with the *Iterators slice added. // The query planner will add instances of AuxIterator to the Iterators slice. func NewContextWithIterators(ctx context.Context, itr *Iterators) context.Context { return context.WithValue(ctx, iteratorsContextKey, itr) } // tryAddAuxIteratorToContext will capture itr in the *Iterators slice, when configured // with a call to NewContextWithIterators. func tryAddAuxIteratorToContext(ctx context.Context, itr AuxIterator) { if v, ok := ctx.Value(iteratorsContextKey).(*Iterators); ok { *v = append(*v, itr) } } // StatementExecutor executes a statement within the QueryExecutor. type StatementExecutor interface { // ExecuteStatement executes a statement. Results should be sent to the // results channel in the ExecutionContext. ExecuteStatement(stmt influxql.Statement, ctx ExecutionContext) error } // StatementNormalizer normalizes a statement before it is executed. type StatementNormalizer interface { // NormalizeStatement adds a default database and policy to the // measurements in the statement. NormalizeStatement(stmt influxql.Statement, database string) error } // QueryExecutor executes every statement in an Query. type QueryExecutor struct { // Used for executing a statement in the query. StatementExecutor StatementExecutor // Used for tracking running queries. TaskManager *TaskManager // Logger to use for all logging. // Defaults to discarding all log output. Logger *zap.Logger // expvar-based stats. stats *QueryStatistics } // NewQueryExecutor returns a new instance of QueryExecutor. func NewQueryExecutor() *QueryExecutor { return &QueryExecutor{ TaskManager: NewTaskManager(), Logger: zap.NewNop(), stats: &QueryStatistics{}, } } // QueryStatistics keeps statistics related to the QueryExecutor. type QueryStatistics struct { ActiveQueries int64 ExecutedQueries int64 FinishedQueries int64 QueryExecutionDuration int64 RecoveredPanics int64 } // Statistics returns statistics for periodic monitoring. func (e *QueryExecutor) Statistics(tags map[string]string) []models.Statistic { return []models.Statistic{{ Name: "queryExecutor", Tags: tags, Values: map[string]interface{}{ statQueriesActive: atomic.LoadInt64(&e.stats.ActiveQueries), statQueriesExecuted: atomic.LoadInt64(&e.stats.ExecutedQueries), statQueriesFinished: atomic.LoadInt64(&e.stats.FinishedQueries), statQueryExecutionDuration: atomic.LoadInt64(&e.stats.QueryExecutionDuration), statRecoveredPanics: atomic.LoadInt64(&e.stats.RecoveredPanics), }, }} } // Close kills all running queries and prevents new queries from being attached. func (e *QueryExecutor) Close() error { return e.TaskManager.Close() } // SetLogOutput sets the writer to which all logs are written. It must not be // called after Open is called. func (e *QueryExecutor) WithLogger(log *zap.Logger) { e.Logger = log.With(zap.String("service", "query")) e.TaskManager.Logger = e.Logger } // ExecuteQuery executes each statement within a query. func (e *QueryExecutor) ExecuteQuery(query *influxql.Query, opt ExecutionOptions, closing chan struct{}) <-chan *Result { results := make(chan *Result) go e.executeQuery(query, opt, closing, results) return results } func (e *QueryExecutor) executeQuery(query *influxql.Query, opt ExecutionOptions, closing <-chan struct{}, results chan *Result) { defer close(results) defer e.recover(query, results) atomic.AddInt64(&e.stats.ActiveQueries, 1) atomic.AddInt64(&e.stats.ExecutedQueries, 1) defer func(start time.Time) { atomic.AddInt64(&e.stats.ActiveQueries, -1) atomic.AddInt64(&e.stats.FinishedQueries, 1) atomic.AddInt64(&e.stats.QueryExecutionDuration, time.Since(start).Nanoseconds()) }(time.Now()) qid, task, err := e.TaskManager.AttachQuery(query, opt.Database, closing) if err != nil { select { case results <- &Result{Err: err}: case <-opt.AbortCh: } return } defer e.TaskManager.DetachQuery(qid) // Setup the execution context that will be used when executing statements. ctx := ExecutionContext{ QueryID: qid, Query: task, Results: results, InterruptCh: task.closing, ExecutionOptions: opt, } var i int LOOP: for ; i < len(query.Statements); i++ { ctx.StatementID = i stmt := query.Statements[i] // If a default database wasn't passed in by the caller, check the statement. defaultDB := opt.Database if defaultDB == "" { if s, ok := stmt.(influxql.HasDefaultDatabase); ok { defaultDB = s.DefaultDatabase() } } // Do not let queries manually use the system measurements. If we find // one, return an error. This prevents a person from using the // measurement incorrectly and causing a panic. if stmt, ok := stmt.(*influxql.SelectStatement); ok { for _, s := range stmt.Sources { switch s := s.(type) { case *influxql.Measurement: if influxql.IsSystemName(s.Name) { command := "the appropriate meta command" switch s.Name { case "_fieldKeys": command = "SHOW FIELD KEYS" case "_measurements": command = "SHOW MEASUREMENTS" case "_series": command = "SHOW SERIES" case "_tagKeys": command = "SHOW TAG KEYS" case "_tags": command = "SHOW TAG VALUES" } results <- &Result{ Err: fmt.Errorf("unable to use system source '%s': use %s instead", s.Name, command), } break LOOP } } } } // Rewrite statements, if necessary. // This can occur on meta read statements which convert to SELECT statements. newStmt, err := RewriteStatement(stmt) if err != nil { results <- &Result{Err: err} break } stmt = newStmt // Normalize each statement if possible. if normalizer, ok := e.StatementExecutor.(StatementNormalizer); ok { if err := normalizer.NormalizeStatement(stmt, defaultDB); err != nil { if err := ctx.send(&Result{Err: err}); err == ErrQueryAborted { return } break } } // Log each normalized statement. if !ctx.Quiet { e.Logger.Info(stmt.String()) } // Send any other statements to the underlying statement executor. err = e.StatementExecutor.ExecuteStatement(stmt, ctx) if err == ErrQueryInterrupted { // Query was interrupted so retrieve the real interrupt error from // the query task if there is one. if qerr := task.Error(); qerr != nil { err = qerr } } // Send an error for this result if it failed for some reason. if err != nil { if err := ctx.send(&Result{ StatementID: i, Err: err, }); err == ErrQueryAborted { return } // Stop after the first error. break } // Check if the query was interrupted during an uninterruptible statement. interrupted := false if ctx.InterruptCh != nil { select { case <-ctx.InterruptCh: interrupted = true default: // Query has not been interrupted. } } if interrupted { break } } // Send error results for any statements which were not executed. for ; i < len(query.Statements)-1; i++ { if err := ctx.send(&Result{ StatementID: i, Err: ErrNotExecuted, }); err == ErrQueryAborted { return } } } // Determines if the QueryExecutor will recover any panics or let them crash // the server. var willCrash bool func init() { var err error if willCrash, err = strconv.ParseBool(os.Getenv(PanicCrashEnv)); err != nil { willCrash = false } } func (e *QueryExecutor) recover(query *influxql.Query, results chan *Result) { if err := recover(); err != nil { atomic.AddInt64(&e.stats.RecoveredPanics, 1) // Capture the panic in _internal stats. e.Logger.Error(fmt.Sprintf("%s [panic:%s] %s", query.String(), err, debug.Stack())) results <- &Result{ StatementID: -1, Err: fmt.Errorf("%s [panic:%s]", query.String(), err), } if willCrash { e.Logger.Error(fmt.Sprintf("\n\n=====\nAll goroutines now follow:")) buf := debug.Stack() e.Logger.Error(fmt.Sprintf("%s", buf)) os.Exit(1) } } } // QueryMonitorFunc is a function that will be called to check if a query // is currently healthy. If the query needs to be interrupted for some reason, // the error should be returned by this function. type QueryMonitorFunc func(<-chan struct{}) error // QueryTask is the internal data structure for managing queries. // For the public use data structure that gets returned, see QueryTask. type QueryTask struct { query string database string status TaskStatus startTime time.Time closing chan struct{} monitorCh chan error err error mu sync.Mutex } // Monitor starts a new goroutine that will monitor a query. The function // will be passed in a channel to signal when the query has been finished // normally. If the function returns with an error and the query is still // running, the query will be terminated. func (q *QueryTask) Monitor(fn QueryMonitorFunc) { go q.monitor(fn) } // Error returns any asynchronous error that may have occured while executing // the query. func (q *QueryTask) Error() error { q.mu.Lock() defer q.mu.Unlock() return q.err } func (q *QueryTask) setError(err error) { q.mu.Lock() q.err = err q.mu.Unlock() } func (q *QueryTask) monitor(fn QueryMonitorFunc) { if err := fn(q.closing); err != nil { select { case <-q.closing: case q.monitorCh <- err: } } } // close closes the query task closing channel if the query hasn't been previously killed. func (q *QueryTask) close() { q.mu.Lock() if q.status != KilledTask { close(q.closing) } q.mu.Unlock() } func (q *QueryTask) kill() error { q.mu.Lock() if q.status == KilledTask { q.mu.Unlock() return ErrAlreadyKilled } q.status = KilledTask close(q.closing) q.mu.Unlock() return nil }<|fim▁end|>
// send sends a Result to the Results channel and will exit if the query has
<|file_name|>timeEntry.py<|end_file_name|><|fim▁begin|>### Copyright (C) 2005 Thomas M. Hinkle ### Copyright (C) 2009 Rolf Leggewie ### ### This library is free software; you can redistribute it and/or ### modify it under the terms of the GNU General Public License as ### published by the Free Software Foundation; either version 2 of the ### License, or (at your option) any later version. ### ### This library is distributed in the hope that it will be useful, ### but WITHOUT ANY WARRANTY; without even the implied warranty of ### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ### General Public License for more details. ### ### You should have received a copy of the GNU General Public License ### along with this library; if not, write to the Free Software ### Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 ### USA import gtk from gettext import gettext as _ import gourmet.convert as convert import validatingEntry TIME_TO_READ = 1000 class TimeEntry (validatingEntry.ValidatingEntry): __gtype_name__ = 'TimeEntry' def __init__ (self, conv=None): if not conv: self.conv = convert.get_converter() else: self.conv = conv validatingEntry.ValidatingEntry.__init__(self) self.entry.get_value = self.get_value self.entry.set_value = self.set_value def find_errors_in_progress (self, txt): if (not txt) or self.conv.timestring_to_seconds(txt): return None elif not convert.NUMBER_MATCHER.match(txt.split()[0]): return _('Time must begin with a number or fraction followed by a unit (minutes, hours, etc.).') else: words = txt.split() #if len(words) == 1: # self._hide_warning_slowly()<|fim▁hole|> return None else: partial_unit = words[-1] for u in self.conv.unit_to_seconds.keys(): if u.lower().find(partial_unit.lower())==0: return None #self._hide_warning_slowly() #return return _('Invalid input.') + \ _('Time must be expressed in hours, minutes, seconds, etc.') self._show_warning() #else: # self.set_warning_text("Invalid or incomplete time") # self._show_warning() def find_completed_errors (self,*args): txt = self.entry.get_text() if txt and not self.conv.timestring_to_seconds(txt): return _('Invalid input.') + \ _('Time must be expressed in hours, minutes, seconds, etc.') words = txt.split() if len(words) == 1: self._hide_warning_slowly() return elif convert.NUMBER_MATCHER.match(words[-1]): return else: partial_unit = words[-1] for u in self.conv.unit_to_seconds.keys(): if u.lower().find(partial_unit.lower())==0: self._hide_warning_slowly() return self.valid = False self.warn = True self.set_warning_text('Invalid input.' + 'Time must be expressed in hours, minutes, seconds, etc.') self._show_warning() def set_value (self,seconds): self.entry.set_text( convert.seconds_to_timestring(seconds, fractions=convert.FRACTIONS_ASCII) ) def get_value (self): return self.conv.timestring_to_seconds(self.entry.get_text()) def make_time_entry(): te=TimeEntry() te.show() return te if __name__ == '__main__': w=gtk.Window() vb = gtk.VBox() hb = gtk.HBox() l=gtk.Label('_Label') l.set_use_underline(True) l.set_alignment(0,0.5) hb.pack_start(l) te=TimeEntry() import sys te.connect('changed',lambda w: sys.stderr.write('Time value: %s'%w.get_value())) l.set_mnemonic_widget(te) hb.pack_start(te,expand=False,fill=False) vb.add(hb) qb = gtk.Button(stock=gtk.STOCK_QUIT) vb.add(qb) l.show() hb.show() qb.show() te.show() vb.show() qb.connect('clicked',lambda *args: w.hide() and gtk.main_quit() or gtk.main_quit()) w.add(vb) w.show() w.connect('delete_event',gtk.main_quit) gtk.main()<|fim▁end|>
# return if convert.NUMBER_MATCHER.match(words[-1]):
<|file_name|>utils.py<|end_file_name|><|fim▁begin|>import re from project.models import PageChunk contacts_map_coordinates = \ re.compile(<|fim▁hole|> r".*" ) def inject_pagechunks(): chunks = {chunk.name: chunk.text for chunk in PageChunk.query.all()} return {"pagechunks": chunks}<|fim▁end|>
r".*" r"@(?P<latitude>\-?[\d\.]+)," r"(?P<longitude>\-?[\d\.]+)," r"(?P<zoom>[\d\.]+)z"
<|file_name|>cgroup_manager_test.go<|end_file_name|><|fim▁begin|>/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package e2e_node import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) // getResourceList returns a ResourceList with the // specified cpu and memory resource values func getResourceList(cpu, memory string) api.ResourceList { res := api.ResourceList{} if cpu != "" { res[api.ResourceCPU] = resource.MustParse(cpu) } if memory != "" { res[api.ResourceMemory] = resource.MustParse(memory) } return res } // getResourceRequirements returns a ResourceRequirements object func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements { res := api.ResourceRequirements{} res.Requests = requests res.Limits = limits return res } // makePodToVerifyCgroups returns a pod that verifies the existence of the specified cgroups. func makePodToVerifyCgroups(cgroupNames []cm.CgroupName) *api.Pod { // convert the names to their literal cgroupfs forms... cgroupFsNames := []string{} for _, cgroupName := range cgroupNames { if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" { cgroupFsNames = append(cgroupFsNames, cm.ConvertCgroupNameToSystemd(cgroupName, true)) } else { cgroupFsNames = append(cgroupFsNames, string(cgroupName)) } } // build the pod command to either verify cgroups exist command := "" for _, cgroupFsName := range cgroupFsNames { localCommand := "if [ ! -d /tmp/memory/" + cgroupFsName + " ] || [ ! -d /tmp/cpu/" + cgroupFsName + " ]; then exit 1; fi; " command += localCommand } pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod" + string(uuid.NewUUID()), }, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyNever, Containers: []api.Container{ { Image: "gcr.io/google_containers/busybox:1.24", Name: "container" + string(uuid.NewUUID()), Command: []string{"sh", "-c", command}, VolumeMounts: []api.VolumeMount{ { Name: "sysfscgroup", MountPath: "/tmp", }, }, }, }, Volumes: []api.Volume{ { Name: "sysfscgroup", VolumeSource: api.VolumeSource{ HostPath: &api.HostPathVolumeSource{Path: "/sys/fs/cgroup"}, }, }, }, }, } return pod } // makePodToVerifyCgroupRemoved verfies the specified cgroup does not exist. func makePodToVerifyCgroupRemoved(cgroupName cm.CgroupName) *api.Pod { cgroupFsName := string(cgroupName) if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" { cgroupFsName = cm.ConvertCgroupNameToSystemd(cm.CgroupName(cgroupName), true) } pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod" + string(uuid.NewUUID()), }, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyOnFailure, Containers: []api.Container{ { Image: "gcr.io/google_containers/busybox:1.24", Name: "container" + string(uuid.NewUUID()), Command: []string{"sh", "-c", "for i in `seq 1 10`; do if [ ! -d /tmp/memory/" + cgroupFsName + " ] && [ ! -d /tmp/cpu/" + cgroupFsName + " ]; then exit 0; else sleep 10; fi; done; exit 1"}, VolumeMounts: []api.VolumeMount{ { Name: "sysfscgroup", MountPath: "/tmp", }, }, }, }, Volumes: []api.Volume{ { Name: "sysfscgroup", VolumeSource: api.VolumeSource{ HostPath: &api.HostPathVolumeSource{Path: "/sys/fs/cgroup"}, }, }, }, }, } return pod }<|fim▁hole|> var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() { f := framework.NewDefaultFramework("kubelet-cgroup-manager") Describe("QOS containers", func() { Context("On enabling QOS cgroup hierarchy", func() { It("Top level QoS containers should have been created", func() { if !framework.TestContext.KubeletConfig.ExperimentalCgroupsPerQOS { return } cgroupsToVerify := []cm.CgroupName{cm.CgroupName(qos.Burstable), cm.CgroupName(qos.BestEffort)} pod := makePodToVerifyCgroups(cgroupsToVerify) f.PodClient().Create(pod) err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) }) }) }) Describe("Pod containers", func() { Context("On scheduling a Guaranteed Pod", func() { It("Pod containers should have been created under the cgroup-root", func() { if !framework.TestContext.KubeletConfig.ExperimentalCgroupsPerQOS { return } var ( guaranteedPod *api.Pod podUID string ) By("Creating a Guaranteed pod in Namespace", func() { guaranteedPod = f.PodClient().Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod" + string(uuid.NewUUID()), Namespace: f.Namespace.Name, }, Spec: api.PodSpec{ Containers: []api.Container{ { Image: framework.GetPauseImageName(f.ClientSet), Name: "container" + string(uuid.NewUUID()), Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), }, }, }, }) podUID = string(guaranteedPod.UID) }) By("Checking if the pod cgroup was created", func() { cgroupsToVerify := []cm.CgroupName{cm.CgroupName("pod" + podUID)} pod := makePodToVerifyCgroups(cgroupsToVerify) f.PodClient().Create(pod) err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) }) By("Checking if the pod cgroup was deleted", func() { gp := int64(1) Expect(f.PodClient().Delete(guaranteedPod.Name, &api.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred()) pod := makePodToVerifyCgroupRemoved(cm.CgroupName("pod" + podUID)) f.PodClient().Create(pod) err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) }) }) }) Context("On scheduling a BestEffort Pod", func() { It("Pod containers should have been created under the BestEffort cgroup", func() { if !framework.TestContext.KubeletConfig.ExperimentalCgroupsPerQOS { return } var ( podUID string bestEffortPod *api.Pod ) By("Creating a BestEffort pod in Namespace", func() { bestEffortPod = f.PodClient().Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod" + string(uuid.NewUUID()), Namespace: f.Namespace.Name, }, Spec: api.PodSpec{ Containers: []api.Container{ { Image: framework.GetPauseImageName(f.ClientSet), Name: "container" + string(uuid.NewUUID()), Resources: getResourceRequirements(getResourceList("", ""), getResourceList("", "")), }, }, }, }) podUID = string(bestEffortPod.UID) }) By("Checking if the pod cgroup was created", func() { cgroupsToVerify := []cm.CgroupName{cm.CgroupName("BestEffort/pod" + podUID)} pod := makePodToVerifyCgroups(cgroupsToVerify) f.PodClient().Create(pod) err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) }) By("Checking if the pod cgroup was deleted", func() { gp := int64(1) Expect(f.PodClient().Delete(bestEffortPod.Name, &api.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred()) pod := makePodToVerifyCgroupRemoved(cm.CgroupName("BestEffort/pod" + podUID)) f.PodClient().Create(pod) err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) }) }) }) Context("On scheduling a Burstable Pod", func() { It("Pod containers should have been created under the Burstable cgroup", func() { if !framework.TestContext.KubeletConfig.ExperimentalCgroupsPerQOS { return } var ( podUID string burstablePod *api.Pod ) By("Creating a Burstable pod in Namespace", func() { burstablePod = f.PodClient().Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod" + string(uuid.NewUUID()), Namespace: f.Namespace.Name, }, Spec: api.PodSpec{ Containers: []api.Container{ { Image: framework.GetPauseImageName(f.ClientSet), Name: "container" + string(uuid.NewUUID()), Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")), }, }, }, }) podUID = string(burstablePod.UID) }) By("Checking if the pod cgroup was created", func() { cgroupsToVerify := []cm.CgroupName{cm.CgroupName("Burstable/pod" + podUID)} pod := makePodToVerifyCgroups(cgroupsToVerify) f.PodClient().Create(pod) err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) }) By("Checking if the pod cgroup was deleted", func() { gp := int64(1) Expect(f.PodClient().Delete(burstablePod.Name, &api.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred()) pod := makePodToVerifyCgroupRemoved(cm.CgroupName("Burstable/pod" + podUID)) f.PodClient().Create(pod) err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) }) }) }) }) })<|fim▁end|>
<|file_name|>life.go<|end_file_name|><|fim▁begin|>package state import ( "fmt" "labix.org/v2/mgo" "labix.org/v2/mgo/txn" "launchpad.net/juju-core/trivial" ) // Life represents the lifecycle state of the entities // Relation, Unit, Service and Machine. type Life int8 const ( Alive Life = iota Dying Dead nLife ) var notDeadDoc = D{{"life", D{{"$ne", Dead}}}} var isAliveDoc = D{{"life", Alive}} var lifeStrings = [nLife]string{ Alive: "alive", Dying: "dying", Dead: "dead", } func (l Life) String() string { return lifeStrings[l] } // Living describes state entities with a lifecycle. type Living interface { Life() Life EnsureDying() error EnsureDead() error Refresh() error } // ensureDying advances the specified entity's life status to Dying, if necessary. func ensureDying(st *State, coll *mgo.Collection, id interface{}, desc string) error { ops := []txn.Op{{ C: coll.Name,<|fim▁hole|> if err := st.runner.Run(ops, "", nil); err == txn.ErrAborted { return nil } else if err != nil { return fmt.Errorf("cannot start termination of %s %#v: %v", desc, id, err) } return nil } // ensureDead advances the specified entity's life status to Dead, if necessary. // Preconditions can be supplied in assertOps; if the preconditions fail, the error // will contain assertMsg. If the entity is not found, no error is returned. func ensureDead(st *State, coll *mgo.Collection, id interface{}, desc string, assertOps []txn.Op, assertMsg string) (err error) { defer trivial.ErrorContextf(&err, "cannot finish termination of %s %#v", desc, id) ops := append(assertOps, txn.Op{ C: coll.Name, Id: id, Update: D{{"$set", D{{"life", Dead}}}}, }) if err = st.runner.Run(ops, "", nil); err == nil { return nil } else if err != txn.ErrAborted { return err } var doc struct{ Life } if err = coll.FindId(id).One(&doc); err == mgo.ErrNotFound { return nil } else if err != nil { return err } else if doc.Life != Dead { return fmt.Errorf(assertMsg) } return nil } func isAlive(coll *mgo.Collection, id interface{}) (bool, error) { n, err := coll.Find(D{{"_id", id}, {"life", Alive}}).Count() return n == 1, err } func isNotDead(coll *mgo.Collection, id interface{}) (bool, error) { n, err := coll.Find(D{{"_id", id}, {"life", D{{"$ne", Dead}}}}).Count() return n == 1, err }<|fim▁end|>
Id: id, Assert: isAliveDoc, Update: D{{"$set", D{{"life", Dying}}}}, }}
<|file_name|>growth_errors_nans.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python from __future__ import division from __future__ import print_function import argparse import numpy as np import pandas as pd import sys import os import matplotlib as mpl #mpl.use('Agg') from matplotlib import ticker import matplotlib.pyplot as plt import matplotlib.colors as colors import matplotlib.dates as md from matplotlib.collections import LineCollection import pylab mpl.rc('text.latex', preamble='\usepackage{color}') from scipy.signal import argrelextrema from scipy import interpolate from scipy.optimize import curve_fit from sklearn.kernel_ridge import KernelRidge from sklearn.model_selection import GridSearchCV from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import RBF, WhiteKernel, ExpSineSquared from scipy import stats from scipy.spatial.distance import pdist,cdist import datetime import time import glob import numpy.ma as ma import importlib import time_tools_attractor as ti import io_tools_attractor as io import data_tools_attractor as dt import stat_tools_attractor as st fmt1 = "%.1f" fmt2 = "%.2f" fmt3 = "%.3f" np.set_printoptions(precision=4) ################# DEFAULT ARGS ######################### inBaseDir = '/scratch/lforesti/data/' # '/store/msrad/radar/precip_attractor/data/' #'/scratch/lforesti/data/' outBaseDir = '/users/lforesti/results/' tmpBaseDir = '/scratch/lforesti/tmp/' pltType = 'spread' #'evolution' 'spread' timeSampMin = 5 spreadMeasure = 'scatter'#'std' or 'scatter' ########GET ARGUMENTS FROM CMD LINE#### parser = argparse.ArgumentParser(description='Plot radar rainfall field statistics.') parser.add_argument('-start', default='201601310000', type=str,help='Starting date YYYYMMDDHHmmSS.') parser.add_argument('-end', default='201601310000', type=str,help='Starting date YYYYMMDDHHmmSS.') parser.add_argument('-product', default='AQC', type=str,help='Which radar rainfall product to use (AQC, CPC, etc).') parser.add_argument('-wols', default=0, type=int,help='Whether to use the weighted ordinary leas squares or not in the fitting of the power spectrum.') parser.add_argument('-minR', default=0.08, type=float,help='Minimum rainfall rate for computation of WAR and various statistics.') parser.add_argument('-minWAR', default=1, type=float,help='Minimum WAR threshold for plotting.') parser.add_argument('-minCorrBeta', default=0.5, type=float,help='Minimum correlation coeff. for beta for plotting.') parser.add_argument('-accum', default=5, type=int,help='Accumulation time of the product [minutes].') parser.add_argument('-temp', default=5, type=int,help='Temporal sampling of the products [minutes].') parser.add_argument('-format', default='netcdf', type=str,help='Format of the file containing the statistics [csv,netcdf].') parser.add_argument('-plt', default='spread', type=str,help='Plot type [spread, evolution].') parser.add_argument('-refresh', default=0, type=int,help='Whether to refresh the binary .npy archive or not.') args = parser.parse_args() refreshArchive = bool(args.refresh) print('Refresh archive:', refreshArchive) product = args.product pltType = args.plt timeAccumMin = args.accum timeSampMin = args.temp timeAccumMinStr = '%05i' % timeAccumMin timeSampMinStr = '%05i' % timeSampMin if (int(args.start) > int(args.end)): print('Time end should be after time start') sys.exit(1) if (int(args.start) < 198001010000) or (int(args.start) > 203001010000): print('Invalid -start or -end time arguments.') sys.exit(1) else: timeStartStr = args.start timeEndStr = args.end timeStart = ti.timestring2datetime(timeStartStr) timeEnd = ti.timestring2datetime(timeEndStr) if spreadMeasure != 'std' and spreadMeasure != 'scatter': print('The measure of spread should be either std or scatter') sys.exit(1) if spreadMeasure == 'std': txtYlabel = 'Normalized st. deviation' if spreadMeasure == 'scatter': txtYlabel = 'Normalized robust spread' txtYlabel = 'Normalized half scatter' ############### OPEN FILES WITH STATS ## Open single binary python file with stats to speed up (if it exists) tmpArchiveFileName = tmpBaseDir + timeStartStr + '-' + timeEndStr + '_temporaryAttractor.npy' tmpArchiveFileNameVariables = tmpBaseDir + timeStartStr + '-' + timeEndStr + '_temporaryAttractor_varNames.npy' if (os.path.isfile(tmpArchiveFileName) == True) and (refreshArchive == False): arrayStats = np.load(tmpArchiveFileName) arrayStats = arrayStats.tolist() variableNames = np.load(tmpArchiveFileNameVariables) print('Loaded:', tmpArchiveFileName) else: ## Open whole list of CSV or netCDF files if args.format == 'csv': arrayStats, variableNames = io.csv_list2array(timeStart, timeEnd, inBaseDir, analysisType='STATS', \ product = product, timeAccumMin = timeSampMin, minR=args.minR, wols=args.wols) elif args.format == 'netcdf': arrayStats, variableNames = io.netcdf_list2array(timeStart, timeEnd, inBaseDir, analysisType='STATS', \ product = product, timeAccumMin = timeAccumMin, minR=args.minR, wols=args.wols, variableBreak = 0) else: print('Please provide a valid file format.') sys.exit(1) # Check if there are data if (len(arrayStats) == 0) & (args.format == 'csv'): print("No data found in CSV files.") sys.exit(1) if (len(arrayStats) == 0) & (args.format == 'netcdf'): print("No data found in NETCDF files.") sys.exit(1) ## Save data into a single binary bython file to speed up further analysis with same dataset arrayData = [] if refreshArchive == True: np.save(tmpArchiveFileName, arrayStats) np.save(tmpArchiveFileNameVariables, variableNames) print('Saved:',tmpArchiveFileName) ################ Fill both datetime and data arrays with NaNs where there is no data # Generate list of datetime objects timeIntList = dt.get_column_list(arrayStats, 0) timeStamps_datetime = ti.timestring_array2datetime_array(timeIntList) nrSamples = len(timeStamps_datetime) print('Number of analysed radar fields in archive: ', nrSamples) nrSamplesTotal = int((timeStamps_datetime[nrSamples-1] - timeStamps_datetime[0]).total_seconds()/(timeSampMin*60)) print('Number of missing fields: ', nrSamplesTotal-nrSamples) # Fill attractor array with NaNs to consider every missing time stamp arrayStats, timeStamps_datetime = dt.fill_attractor_array_nan(arrayStats, timeStamps_datetime) print(len(arrayStats), len(timeStamps_datetime), 'samples after filling holes with NaNs.') print('Variables from file: ', variableNames) ######## Prepare numpy arrays timeStamps_absolute = ti.datetime2absolutetime(np.array(timeStamps_datetime)) # Convert list of lists to numpy arrays arrayStats = np.array(arrayStats) timeStamps_datetime = np.array(timeStamps_datetime) timeStamps_absolute = np.array(timeStamps_absolute) ################################################################################# ####################### PARAMETERS TO ANALYZE GROWTH OF ERRORS varNames = ['war', 'r_cmean', 'r_mean', 'eccentricity', 'beta1', 'beta2'] logIMFWAR = True logTime = True # Keep it True (or false to check exponential growth of errors) logSpread = True # Keep it True maxLeadTimeHours = 96 ylims = [10**-1.7,10**0.5] # Selection criteria for valid trajectories warThreshold = args.minWAR betaCorrThreshold = args.minCorrBeta independenceTimeHours = 1 minNrTraj = 20 # Minimum number of trajectories nrIQR = 5 # Multiplier of the IQR to define a sample as outlier verbosity = 1 # Whether to plot the function fits to the growth of errors one by one plotFits = False print('Variables for plotting: ', varNames) #################################################################################### ####################### PREPARE DATA ############################################### maxLeadTimeMin = 60*maxLeadTimeHours # Generate labels for plotting varLabels = [] for var in range(0, len(varNames)): if varNames[var] == 'war': if logIMFWAR: varLabels.append('WAR [dB]') else: varLabels.append('WAR') if varNames[var] == 'r_mean': if logIMFWAR: varLabels.append('IMF [dB]') else: varLabels.append('IMF') if varNames[var] == 'r_cmean': if logIMFWAR: varLabels.append('MM [dB]') else: varLabels.append('MM') if varNames[var] == 'eccentricity': if logIMFWAR: varLabels.append('1-eccentricity [dB]') else: varLabels.append('Eccentricity') if varNames[var] == 'beta1': varLabels.append(r'$\beta_1$') if varNames[var] == 'beta2': varLabels.append(r'$\beta_2$') # Get indices of variables indicesVars = dt.get_variable_indices(varNames, variableNames) # Put indices into dictionary dictIdx = dict(zip(varNames, indicesVars)) dictLabels = dict(zip(varNames, varLabels)) print(dictIdx) # WAR threshold boolWAR = (arrayStats[:,dictIdx['war']] >= warThreshold) # Beta correlation threshold boolBetaCorr = (np.abs(arrayStats[:,dictIdx['beta1']+1]) >= np.abs(betaCorrThreshold)) & (np.abs(arrayStats[:,dictIdx['beta2']+1]) >= np.abs(betaCorrThreshold)) # Combination of thresholds boolTot = np.logical_and(boolWAR == True, boolBetaCorr == True) ############### Select subset of variables and change sign of beta arrayStats_attractor = [] for var in range(0, len(varNames)): varName = varNames[var] if (varName == 'beta1') | (varName == 'beta2'): arrayStats_attractor.append(-arrayStats[:,dictIdx[varName]]) elif (varName == 'war') | (varName == 'r_mean') | (varName == 'r_cmean') | (varName == 'eccentricity'): if logIMFWAR == True: if varName == 'eccentricity': arrayStats_attractor.append(dt.to_dB(1-arrayStats[:,dictIdx[varName]])) else: arrayStats_attractor.append(dt.to_dB(arrayStats[:,dictIdx[varName]])) else: arrayStats_attractor.append(arrayStats[:,dictIdx[varName]]) else: arrayStats_attractor.append(arrayStats[:,dictIdx[varName]]) # Convert lists to numpy arrays arrayStats_attractor = np.array(arrayStats_attractor).T # Replace "bad" samples with NaNs arrayStats_attractor[boolWAR==False,:] = np.nan # Calculate global statistics on the data arrayStats_Mean = np.nanmean(arrayStats_attractor, axis=0) arrayStats_Std = np.nanstd(arrayStats_attractor, axis=0) arrayStats_Scatter = st.nanscatter(arrayStats_attractor, axis=0) <|fim▁hole|>arrayStats_increments = np.vstack((arrayStats_increments[0,:], arrayStats_increments)) ## Compute global statistics on the data increments arrayStats_increments_Mean = np.nanmean(arrayStats_increments, axis=0) arrayStats_increments_Std = np.nanstd(arrayStats_increments, axis=0) Q25 = np.nanpercentile(arrayStats_increments,25, axis=0) Q75 = np.nanpercentile(arrayStats_increments,75, axis=0) arrayStats_increments_IQR = Q75 - Q25 # Print info on statistics of data and increments if verbosity >= 1: print('Means : ', arrayStats_Mean) print('St.devs: ', arrayStats_Std) print('Scatter: ', arrayStats_Scatter) print('Means increments : ', arrayStats_increments_Mean) print('St.devs increments: ', arrayStats_increments_Std) print('IQR increments : ', arrayStats_increments_IQR) ##########PLOT INCREMENTS # Plot time series of increments plotIncrements = True if plotIncrements: nrRowsSubplots = 2 nrColsSubplots = 3 p=1 fig = plt.figure(figsize=(22,10)) for var in range(0, len(varNames)): ax = plt.subplot(nrRowsSubplots, nrColsSubplots, p) plt.plot(arrayStats_increments[:,var]) ax.axhline(y=Q25[var] - nrIQR*arrayStats_increments_IQR[var],color='r') ax.axhline(y=Q75[var] + nrIQR*arrayStats_increments_IQR[var],color='r') plt.title('Time series increments for ' + varNames[var]) p += 1 plt.show() # Plot histogram of increments p=1 fig = plt.figure(figsize=(22,10)) for var in range(0, len(varNames)): plt.subplot(nrRowsSubplots, nrColsSubplots, p) histRange = [Q25[var] - nrIQR*arrayStats_increments_IQR[var], Q75[var] + nrIQR*arrayStats_increments_IQR[var]] bins = np.hstack((np.nanmin(arrayStats_increments[:,var]), np.linspace(histRange[0],histRange[1], 50), np.nanmax(arrayStats_increments[:,var]))) n, bins, patches = plt.hist(arrayStats_increments[:,var], 50, range=histRange, facecolor='green', alpha=0.75) plt.title('Histogram of increments for ' + varNames[var]) p += 1 plt.show() # Calculate global statistics on the data by removing the bad increments arrayStats_attractor_nanincrements = arrayStats_attractor.copy() for var in range(0, len(varNames)): histRange = [Q25[var] - nrIQR*arrayStats_increments_IQR[var], Q75[var] + nrIQR*arrayStats_increments_IQR[var]] boolGoodIncrementsVar = (arrayStats_increments[:,var] >= histRange[0]) & (arrayStats_increments[:,var] <= histRange[1]) arrayStats_attractor_nanincrements[~boolGoodIncrementsVar,var] = np.nan arrayStats_Mean = np.nanmean(arrayStats_attractor_nanincrements, axis=0) arrayStats_Std = np.nanstd(arrayStats_attractor_nanincrements, axis=0) arrayStats_Scatter = st.nanscatter(arrayStats_attractor_nanincrements, axis=0) # Print info on statistics of data (without bad increments) if verbosity >= 1: print('Means : ', arrayStats_Mean) print('St.devs: ', arrayStats_Std) print('Scatter: ', arrayStats_Scatter) ###########INITIAL CONDITIONS ##### Set the initial conditions of analogues intelligently (using percentiles) arrayStats_minPerc = np.nanpercentile(arrayStats_attractor, 20, axis=0) arrayStats_maxPerc = np.nanpercentile(arrayStats_attractor, 90, axis=0) if verbosity >= 1: print('MinPerc: ', arrayStats_minPerc) print('MaxPerc: ', arrayStats_maxPerc) initialCondIntervals = (arrayStats_maxPerc - arrayStats_minPerc)/100.0 nrIntervals = 5 initialCondRange = [] for var in range(0, len(varNames)): initialCondRange_variable = np.linspace(arrayStats_minPerc[var], arrayStats_maxPerc[var], nrIntervals).tolist() initialCondRange.append(initialCondRange_variable) print('Initial conditions: ', np.array(initialCondRange)) print('Initial intervals: ', np.array(initialCondIntervals)) #################################################################################################### ############### COMPUTE GROWTH OF ERRORS AND PLOT RESULTS nrLeadTimes = int(maxLeadTimeMin/timeSampMin) nrDimensions = arrayStats_attractor.shape[1] # Generate lead times leadTimesMin = [] for lt in range(0,nrLeadTimes): leadTimesMin.append(lt*timeSampMin) leadTimesMin = np.array(leadTimesMin) colormap = plt.cm.gist_rainbow # plt.cm.gray nrRowsSubplots = 2 nrColsSubplots = 3 p = 0 if nrRowsSubplots == nrColsSubplots: fgSize = (13, 13) else: fgSize = (20, 13) fig = plt.figure(figsize=fgSize) ax = fig.add_axes() ax = fig.add_subplot(111) tic = time.clock() for variable in range(0, len(varNames)): ## LOOP OVER VARIABLES analysisSteps = initialCondRange[variable] nrSteps = len(analysisSteps) p = p + 1 # subplot number print('\n') varMax = 0 varMin = 999 axSP = plt.subplot(nrRowsSubplots, nrColsSubplots, p) print('Subplot nr: ', p, ', variable: ', varNames[variable]) print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') plot_lines = [] decorr_time_hours = [] for step in range(0, nrSteps): ## LOOP OVER STEPS FOR INITIAL CONDITIONS # Define min and max values for initial conditions minInit = analysisSteps[step] maxInit = analysisSteps[step] + initialCondIntervals[variable] if (varNames[variable] == 'war' or varNames[variable] == 'r_mean' or varNames[variable] == 'r_cmean' or varNames[variable] == 'eccentricity') and logIMFWAR == True: if varNames[variable] == 'eccentricity': minInitLab = dt.from_dB(minInit) maxInitLab = dt.from_dB(maxInit) else: minInitLab = dt.from_dB(minInit) maxInitLab = dt.from_dB(maxInit) else: minInitLab = minInit maxInitLab = maxInit # Select data and time stamps of initial conditions initialConditions_data = (arrayStats_attractor[:,variable] >= minInit) & (arrayStats_attractor[:,variable] <= maxInit) initialConditions_timestamps = timeStamps_absolute[initialConditions_data] nrInitPoints = np.sum(initialConditions_data == True) print('zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz') print(nrInitPoints, ' starting points in ', varLabels[variable], ' range ', minInit,'-',maxInit) # Compute time differences between consecutive time stamps of initial conditions timeDiffs = np.diff(initialConditions_timestamps) # Create array of time stamps that have a certain temporal independence (e.g. 24 hours) independenceTimeSecs = 60*60*independenceTimeHours timeDiffsAccum = 0 initialConditions_timestamps_indep = [] for i in range(0,nrInitPoints-1): if (timeDiffs[i] >= independenceTimeSecs) | (timeDiffsAccum >= independenceTimeSecs): initialConditions_timestamps_indep.append(initialConditions_timestamps[i]) timeDiffsAccum = 0 else: # Increment the accumulated time difference to avoid excluding the next sample # if closer than 24 hours from the previous (if not included) but further than 24 hours than the before the previous timeDiffsAccum = timeDiffsAccum + timeDiffs[i] initialConditions_timestamps_indep = np.array(initialConditions_timestamps_indep) nrInitPoints = len(initialConditions_timestamps_indep) print(nrInitPoints, ' independent starting points in ', varLabels[variable], ' range ', minInit,'-',maxInit) ################## GET ANALOGUE DATA SEQUENCES FOLLOWING TIME STAMPS # Loop over such points and get data sequences trajectories = [] # list of trajectories for i in range(0,nrInitPoints): tsAbs = initialConditions_timestamps_indep[i] idx = np.where(timeStamps_absolute == tsAbs)[0] if len(idx) != 1: print(idx) print(timeStamps_absolute[idx[0]], timeStamps_absolute[idx[1]]) print('You have duplicate time stamps in your dataset. Taking the first...') sys.exit(1) indicesSequence = np.arange(idx,idx+nrLeadTimes) # Select data sequences # Handle sequences that go beyond the dataset limits if np.sum(indicesSequence >= len(timeStamps_absolute)) > 0: indicesSequence = indicesSequence[indicesSequence < len(timeStamps_absolute)] sequenceTimes = timeStamps_absolute[indicesSequence] sequenceData = arrayStats_attractor[indicesSequence,:] increments = arrayStats_increments[indicesSequence,:] # Analyse increments of each time series and replace with NaNs if jumps are unrealistic minIncrements = Q25[variable] - nrIQR*arrayStats_increments_IQR[variable] maxIncrements = Q75[variable] + nrIQR*arrayStats_increments_IQR[variable] # Criterion to define whether an increment is unrealistically large #boolLargeIncrements = np.abs(increments) >= arrayStats_Std[variable] boolLargeIncrements = (increments[:,variable] < minIncrements) | (increments[:,variable] > maxIncrements) boolLargeIncrements[0] = False # The increment of the first element from the one before the start of the sequence is not considered as wrong idxFirsBadIncrement = np.argmax(boolLargeIncrements == True) maxNrBadIncrements = 5 if np.sum(boolLargeIncrements) > maxNrBadIncrements: # Replace all data with NaNs sequenceData[:,variable] = np.nan #else: # Replace data from first bad increment till the end with NaNs #sequenceData[idxFirsBadIncrement:,variable] = np.nan # Check the continuity of time stamps (no holes) timeDiffMin = np.array(np.diff(sequenceTimes)/60, dtype=int) # Nr of invalid samples not having the correct time stamp (it should be zero if correctly filled with NaNs) nrInvalidTimes = np.sum(timeDiffMin != timeSampMin) # Check how many valid data (not NaNs) you have in the future sequence nrValidSamples = np.sum(~np.isnan(sequenceData[:,variable])) nrConsecValidSamples = np.argmax(np.isnan(sequenceData[:,variable])) # Criteria to consider a sequence as valid minNrValidSamples = 36 minNrConsecValidSamples = 12 # one hour from start # Collect valid trajectories criterion = (nrValidSamples >= minNrValidSamples) & (nrConsecValidSamples >= minNrConsecValidSamples) \ & (nrInvalidTimes == 0) & (len(sequenceTimes) == nrLeadTimes) goodTrajectory = False if criterion == True: goodTrajectory = True trajectories.append(sequenceData) ### Print info on increments and valid samples... #print(increments[:,variable]) if verbosity >= 2: print('Trajectory nr', i,'starting at', ti.absolutetime2datetime(tsAbs)) print('Nr. invalid increments :', np.sum(boolLargeIncrements, axis=0)) print('Valid increment limits:' , minIncrements, maxIncrements) print('First bad increment at index', idxFirsBadIncrement, 'with value', increments[idxFirsBadIncrement, variable]) print('Nr. valid samples in sequence : ', nrValidSamples, '/',nrLeadTimes) print('Nr. consecutive valid samples in sequence: ', nrConsecValidSamples, '/',nrLeadTimes) print('Valid trajectory?', goodTrajectory) print('---------------') # Append trajectory to the list of trajectories trajectories = np.array(trajectories) print(len(trajectories), ' valid trajectories in ', varLabels[variable], ' range ', minInit,'-',maxInit) if len(trajectories) > minNrTraj: #print(trajectories.shape[0], ' x ', trajectories.shape[1], ' x ', trajectories.shape[2], '($N_{analogue}$) x ($N_{leadtimes}$) x ($N_{dim}$)') ################## COMPUTE SPREAD OF TRAJECTORIES spreadArray = [] for lt in range(0,nrLeadTimes): dataLeadTime = trajectories[:,lt,:] # Evaluate number of valid data nrValidPoints = np.sum(~np.isnan(dataLeadTime), axis=0) boolNrPoints = nrValidPoints < 20 # Compute ensemble spread if spreadMeasure == 'std': spreadLeadTime = np.nanstd(dataLeadTime/arrayStats_Std, axis=0) if spreadMeasure == 'scatter': spreadLeadTime = st.nanscatter(dataLeadTime/(arrayStats_Scatter/2.0), axis=0)/2.0 # Replace spread with nan if not enough samples for a given lead time if np.sum(boolNrPoints) >=1: spreadLeadTime[boolNrPoints] = np.nan # Append spread spreadArray.append(spreadLeadTime) spreadArray = np.array(spreadArray) ################## DECORRELATION TIME ESTIMATION #### TESTS WITH DIFFERENT FITTED MODELS dB_shift_hr = 0.5 if logTime: predictor = dt.to_dB(leadTimesMin/60 + dB_shift_hr) else: predictor = leadTimesMin/60 predictand = dt.to_dB(spreadArray[:,variable]) # Remove NaNs nans = np.isnan(predictand) predictor = predictor[~nans] predictand = predictand[~nans] if varNames[variable] == 'eccentricity': predictor = predictor[~np.isinf(predictand)] predictand = predictand[~np.isinf(predictand)] # Prediction grid predictor_grid = np.linspace(np.min(predictor), np.max(predictor), 1000) #### KERNEL RIDGE REGRESSION alphaVec = [0.1, 0.01] sigmaVec = np.arange(5.0, 5.5, 0.5) if len(alphaVec) > 1 or len(sigmaVec) > 1: # Grid search of parameters param_grid = {"alpha": alphaVec, "kernel": [RBF(length_scale) for length_scale in sigmaVec]} kr = KernelRidge() kr = GridSearchCV(KernelRidge(), cv=5, param_grid=param_grid) else: # Run with pre-defined parameter set kr = KernelRidge(alpha=alphaVec[0], kernel='rbf', gamma=sigmaVec[0]) # Fit model kr.fit(predictor.reshape(-1,1), predictand.reshape(-1,1)) # Get best parameters bestAlpha_kr = kr.best_params_['alpha'] bestSigma_kr = kr.best_params_['kernel'].length_scale # Predict over grid kr_fit = kr.predict(predictor_grid.reshape(-1,1)) # Compute derivatives of prediction kr_der1 = np.gradient(kr_fit[:,0]) kr_der2 = np.gradient(kr_der1) # Estimate decorrelation time KR if bestSigma_kr >= 2: minDer1 = 0.005 #0.001 else: minDer1 = 0.0 minNormSpread = 0.75 minNormSpread = 0.75*np.nanmedian(dt.from_dB(predictand)[dt.from_dB(predictor)+dB_shift_hr >= maxLeadTimeHours/2]) print('Minimum spread to reach:', minNormSpread) minNormSpread_dB = dt.to_dB(minNormSpread) decorrBool = (kr_der1 <= minDer1) & (kr_der2 < 0) & (kr_fit[:,0] >= minNormSpread_dB) decorrIndex_kr = np.where(decorrBool == True)[0] # Find first local minimum of the derivative firstLocalMinimumIndex = argrelextrema(kr_der1, np.less)[0] firstLocalMinimumIndex = firstLocalMinimumIndex[0] if len(decorrIndex_kr) == 0: kr_decorr_bad = True decorrIndex_kr = len(kr_der1)-1 else: kr_decorr_bad = False decorrIndex_kr = decorrIndex_kr[0] # Take as decorrelation time as the first local minimum before the derivative gets to zero criterionLocalMinimum = (decorrIndex_kr > firstLocalMinimumIndex) & (kr_fit[firstLocalMinimumIndex,0] >= minNormSpread_dB) & (bestSigma_kr >= 2) if criterionLocalMinimum: print('Taking first local minimum as decorrelation time') decorrIndex_kr = firstLocalMinimumIndex # Get decorr time if logTime: decorr_time_kr = dt.from_dB(predictor_grid[decorrIndex_kr])-dB_shift_hr else: decorr_time_kr = predictor_grid[decorrIndex_kr] #### Spherical model fit weighting = 1#dt.from_dB(predictor) popt, pcov = curve_fit(st.spherical_model, predictor, predictand, sigma=weighting) print('Spherical model params:', popt) spherical_fit = st.spherical_model(predictor_grid, popt[0], popt[1], popt[2]) if logTime: decorr_time_sph = dt.from_dB(popt[2])-dB_shift_hr else: decorr_time_sph = popt[2] #### Exponential model fit popt, pcov = curve_fit(st.exponential_model, predictor, predictand, sigma=weighting) print('Exponential model params:', popt) exponential_fit = st.exponential_model(predictor_grid, popt[0], popt[1], popt[2]) if logTime: decorr_time_exp = dt.from_dB(popt[2])-dB_shift_hr else: decorr_time_exp = popt[2] # Estimate decorrelation time simply using a threshold on the KR fit or the raw data spreadThreshold = 0.95 idxDecorr = np.argmax(dt.from_dB(kr_fit) >= spreadThreshold, axis=0)[0] if idxDecorr == 0: spreadThreshold = 0.8 idxDecorr = np.argmax(dt.from_dB(kr_fit) >= spreadThreshold, axis=0)[0] decorr_time_th = dt.from_dB(predictor_grid[idxDecorr])-dB_shift_hr if verbosity >= 1: print('Lifetime KR : ', decorr_time_kr, ' h') print('Lifetime spherical : ', decorr_time_sph, ' h') print('Lifetime exponential : ', decorr_time_exp, ' h') print('Lifetime threshold >=', spreadThreshold, ': ',decorr_time_th, ' h') #### PLOT THE FITS TO ERROR GROWTH FUNCTIONS if plotFits: plt.close() plt.figure(figsize = (10,10)) ax1 = plt.subplot(111) ax1.scatter(predictor, predictand, marker='o', s=5, color='k') #ax1.plot(predictor_grid, mars_fit, 'r', label='Multivariate Adaptive Regression Splines (MARS)') krLabel = r'Kernel Ridge Regression (KR), $\alpha$=' + str(bestAlpha_kr) + r', $\sigma$=' + str(bestSigma_kr) p1, = ax1.plot(predictor_grid, kr_fit, 'g', label=krLabel, linewidth=2) p2, = ax1.plot(predictor_grid, spherical_fit, 'b', label='Spherical variogram model', linewidth=2) p3, = ax1.plot(predictor_grid, exponential_fit, 'r', label='Exponential variogram model', linewidth=2) # Plot derivatives and decorrelation time ax2 = ax1.twinx() ax2.plot(predictor_grid, kr_der1, 'g--') #ax2.plot(predictor_grid, kr_der2*20, 'g:') ax2.axvline(x=predictor_grid[decorrIndex_kr], ymin=0.2, color='g') ax2.axvline(x=dt.to_dB(decorr_time_sph + dB_shift_hr), ymin=0.2, color='b') ax2.axvline(x=dt.to_dB(decorr_time_exp + dB_shift_hr), ymin=0.2, color='r') p4 = ax2.axvline(x=dt.to_dB(decorr_time_th + dB_shift_hr), ymin=0.2, color='k') ax2.axhline(y=0, color='g') # Labels legend p1_label = 'Lifetime KR : ' + fmt1 % decorr_time_kr + ' h' p2_label = 'Lifetime spherical : ' + fmt1 % decorr_time_sph + ' h' p3_label = 'Lifetime exponential: ' + fmt1 % decorr_time_exp + ' h' p4_label = 'Lifetime >= ' + fmt2 % spreadThreshold + ' : ' + fmt1 % decorr_time_th + ' h' plot_lifetimes = [p1,p2,p3,p4] labels_lifetimes = [p1_label, p2_label, p3_label, p4_label] # Plot legend with lifetimes legend_lifetime = plt.legend(plot_lifetimes, labels_lifetimes, loc='upper left', labelspacing=0.1) plt.gca().add_artist(legend_lifetime) # Plot legend of models ax1.legend(loc='lower right',labelspacing=0.1) ax1.set_xlabel('Lead time, hours', fontsize=20) # Format X and Y axis ax1.set_ylabel(txtYlabel, fontsize=20) ax2.set_ylabel('Function derivative', fontsize=20) plt.setp(ax1.get_xticklabels(), fontsize=14) plt.setp(ax1.get_yticklabels(), fontsize=14) plt.setp(ax2.get_yticklabels(), fontsize=14) plt.xlim([np.min(predictor)-1, np.max(predictor)+1]) if maxLeadTimeHours == 24: xtickLabels = np.array([0.08,0.5,1,2,3,4,5,6,9,12,18,24]) if maxLeadTimeHours == 48: xtickLabels = np.array([0.08,0.5,1,2,3,4,5,6,9,12,18,24,36,48]) if maxLeadTimeHours == 96: xtickLabels = np.array([0.08,0.5,1,2,3,4,5,6,9,12,18,24,36,48,72,96]) xticklocations = dt.to_dB(xtickLabels + dB_shift_hr) xtickLabels = dt.dynamic_formatting_floats(xtickLabels) ax1.set_xticks(xticklocations) xticks = ax1.set_xticklabels(xtickLabels, fontsize=14) ytickLabels = [0.01,0.02,0.03,0.04,0.05,0.1,0.15,0.2,0.3,0.4,0.5,0.6,0.8,1,1.2,1.4] yticklocations = dt.to_dB(ytickLabels) ytickLabels = dt.dynamic_formatting_floats(ytickLabels) ax1.set_yticks(yticklocations) ax1.set_yticklabels(ytickLabels, fontsize=14) strTitleLine1 = r'Spread growth for ' + varLabels[variable] strTitleLine2 = 'Time series starting in range ' + str(fmt2 % minInitLab) + '-' + str(fmt2 % maxInitLab) + ' (N = ' + str(trajectories.shape[0]) + ')' plt.title(strTitleLine1 + '\n' + strTitleLine2, fontsize=22) plt.show() # fileName = outBaseDir + product + '_' + pltType + '_' + timeStartStr + '-' + timeEndStr + '0_' + 'Rgt' + str(args.minR) + '_WOLS' + str(args.wols) + '_00005_warGt' + str("%0.1f" % warThreshold) + '_logIMFWAR' + str(int(logIMFWAR)) + '_' + timeAccumMinStr + '.png' # print('Saving: ',fileName) # plt.savefig(fileName, dpi=300) #sys.exit() ################## PLOTTING ################################################################################ linewidth=2.0 labelFontSize = 16 legendFontSize = 12 axesTicksFontSize = 14 plt.tick_params(axis='both', which='major', labelsize=axesTicksFontSize) # Plot growth of spread legTxt = ' Range ' + str(fmt2 % minInitLab) + '-' + str(fmt2 % maxInitLab) + ' (N = ' + str(trajectories.shape[0]) + ')' if pltType == 'spread': if (logTime == True) & (logSpread == True): l, = axSP.loglog(leadTimesMin/60, spreadArray[:,variable], label=legTxt, linewidth=linewidth) elif logTime == True: l, = axSP.semilogx(leadTimesMin/60, spreadArray[:,variable], label=legTxt, linewidth=linewidth) elif logSpread == True: l, = axSP.semilogy(leadTimesMin/60, spreadArray[:,variable], label=legTxt, linewidth=linewidth) else: l, = axSP.plot(leadTimesMin/60, spreadArray[:,variable], label=legTxt, linewidth=linewidth) # Get lines for second legend plot_lines.append(l) if kr_decorr_bad == True: strLifetime = 'Lifetime = ' + (fmt1 % decorr_time_exp) + ' h' else: strLifetime = 'Lifetime = ' + (fmt1 % decorr_time_kr) + ' h' decorr_time_hours.append(strLifetime) # Plot evolution of trajectories stepEvol = 3 if (pltType == 'evolution') & (step == stepEvol): if (logTime == True) & (logSpread == True): #axSP.loglog(leadTimesMin/60,trajectories[1:20,:,variable].T, color='blue') axSP.plot(leadTimesMin/60,trajectories[1:20,:,variable].T, color='blue') if (logTime == True) & (logSpread == False): axSP.semilogx(leadTimesMin/60,trajectories[1:20,:,variable].T, color='blue') else: axSP.plot(leadTimesMin/60,trajectories[1:20,:,variable].T, color='blue') if pltType == 'spread': # Line colors colors = [colormap(i) for i in np.linspace(0, 1, len(axSP.lines))] for i,j in enumerate(axSP.lines): j.set_color(colors[i]) legendFontSize =12 # Add additional legend with decorrelation time legend1 = plt.legend(plot_lines, decorr_time_hours, loc='upper left', fontsize=12, labelspacing=0.1) plt.gca().add_artist(legend1) # Add legend plt.ylim(ylims) if (logTime == True) & (logSpread == True): plt.xlim([timeSampMin/60, maxLeadTimeMin/60]) axSP.legend(loc='lower right', fontsize=legendFontSize) elif logTime == True: axSP.legend(loc='upper left', fontsize=legendFontSize) elif logSpread == True: axSP.legend(loc='lower right', fontsize=legendFontSize) else: axSP.legend(loc='lower right', fontsize=legendFontSize) # Plot line of spread saturation plt.axhline(1.0, color='k', linestyle='dashed') # Add labels and title plt.xlabel('Lead time [hours]', fontsize=labelFontSize) if (pltType == 'evolution') & (step == stepEvol): plt.ylabel(varLabels[variable], fontsize=labelFontSize) strTitle = 'Evolution of ' + varLabels[variable] + ' starting at ' + str(fmt2 % minInitLab) + '-' + str(fmt2 % maxInitLab) plt.title(strTitle, fontsize=18) if pltType == 'spread': plt.ylabel(txtYlabel, fontsize=labelFontSize) strTitle = 'Spread growth for ' + varLabels[variable] plt.title(strTitle, fontsize=18) plt.grid(True,which="both", axis='xy') # axSP.yaxis.set_major_formatter(ticker.FormatStrFormatter("%.1d")) # axSP.xaxis.set_major_formatter(ticker.FormatStrFormatter("%.1d")) axSP.xaxis.set_major_formatter(ticker.FuncFormatter(dt.myLogFormat)) axSP.yaxis.set_major_formatter(ticker.FuncFormatter(dt.myLogFormat)) toc = time.clock() print('Total elapsed time: ', toc-tic, ' seconds.') # Main title titleStr = 'Growth of spread in the attractor for \n' + product + ': ' + str(timeStamps_datetime[0]) + ' - ' + str(timeStamps_datetime[len(timeStamps_datetime)-1]) plt.suptitle(titleStr, fontsize=20) # Save figure fileName = outBaseDir + product + '_' + pltType + '_' + timeStartStr + '-' + timeEndStr + '0_' + 'Rgt' + str(args.minR) + '_WOLS' + str(args.wols) + '_00005_warGt' + str("%0.1f" % warThreshold) + '_logIMFWAR' + str(int(logIMFWAR)) + '_' + timeAccumMinStr + '.png' print('Saving: ',fileName) plt.savefig(fileName, dpi=300)<|fim▁end|>
## Compute data increments (changes from one time instant to the other) arrayStats_increments = np.diff(arrayStats_attractor, axis=0) # Set first increment equal to the second
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Fork of Arc for Servo. This has the following advantages over std::Arc: //! * We don't waste storage on the weak reference count. //! * We don't do extra RMU operations to handle the possibility of weak references. //! * We can experiment with arena allocation (todo). //! * We can add methods to support our custom use cases [1]. //! * We have support for dynamically-sized types (see from_header_and_iter). //! * We have support for thin arcs to unsized types (see ThinArc). //! //! [1] https://bugzilla.mozilla.org/show_bug.cgi?id=1360883 // The semantics of Arc are alread documented in the Rust docs, so we don't // duplicate those here. #![allow(missing_docs)] #[cfg(feature = "servo")] extern crate serde; extern crate nodrop; #[cfg(feature = "servo")] use heapsize::HeapSizeOf; use nodrop::NoDrop; #[cfg(feature = "servo")] use serde::{Deserialize, Serialize}; use std::{isize, usize}; use std::borrow; use std::cmp::Ordering; use std::convert::From; use std::fmt; use std::hash::{Hash, Hasher}; use std::iter::{ExactSizeIterator, Iterator}; use std::mem; use std::ops::{Deref, DerefMut}; use std::process; use std::ptr; use std::slice; use std::sync::atomic; use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; // Private macro to get the offset of a struct field in bytes from the address of the struct. macro_rules! offset_of { ($container:path, $field:ident) => {{ // Make sure the field actually exists. This line ensures that a compile-time error is // generated if $field is accessed through a Deref impl. let $container { $field: _, .. }; // Create an (invalid) instance of the container and calculate the offset to its // field. Using a null pointer might be UB if `&(*(0 as *const T)).field` is interpreted to // be nullptr deref. let invalid: $container = ::std::mem::uninitialized(); let offset = &invalid.$field as *const _ as usize - &invalid as *const _ as usize; // Do not run destructors on the made up invalid instance. ::std::mem::forget(invalid); offset as isize }}; } /// A soft limit on the amount of references that may be made to an `Arc`. /// /// Going above this limit will abort your program (although not /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// Wrapper type for pointers to get the non-zero optimization. When /// NonZero/Shared/Unique are stabilized, we should just use Shared /// here to get the same effect. Gankro is working on this in [1]. /// /// It's unfortunate that this needs to infect all the caller types /// with 'static. It would be nice to just use a &() and a PhantomData<T> /// instead, but then the compiler can't determine whether the &() should /// be thin or fat (which depends on whether or not T is sized). Given /// that this is all a temporary hack, this restriction is fine for now. /// /// [1] https://github.com/rust-lang/rust/issues/27730 pub struct NonZeroPtrMut<T: ?Sized + 'static>(&'static mut T); impl<T: ?Sized> NonZeroPtrMut<T> { pub fn new(ptr: *mut T) -> Self { assert!(!(ptr as *mut u8).is_null()); NonZeroPtrMut(unsafe { mem::transmute(ptr) }) } pub fn ptr(&self) -> *mut T { self.0 as *const T as *mut T } } impl<T: ?Sized + 'static> Clone for NonZeroPtrMut<T> { fn clone(&self) -> Self { NonZeroPtrMut::new(self.ptr()) } } impl<T: ?Sized + 'static> fmt::Pointer for NonZeroPtrMut<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.ptr(), f) } } impl<T: ?Sized + 'static> fmt::Debug for NonZeroPtrMut<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { <Self as fmt::Pointer>::fmt(self, f) } } impl<T: ?Sized + 'static> PartialEq for NonZeroPtrMut<T> { fn eq(&self, other: &Self) -> bool { self.ptr() == other.ptr() } } impl<T: ?Sized + 'static> Eq for NonZeroPtrMut<T> {} pub struct Arc<T: ?Sized + 'static> { p: NonZeroPtrMut<ArcInner<T>>, } /// An Arc that is known to be uniquely owned /// /// This lets us build arcs that we can mutate before /// freezing, without needing to change the allocation pub struct UniqueArc<T: ?Sized + 'static>(Arc<T>); impl<T> UniqueArc<T> { #[inline] /// Construct a new UniqueArc pub fn new(data: T) -> Self { UniqueArc(Arc::new(data)) } #[inline] /// Convert to a shareable Arc<T> once we're done using it pub fn shareable(self) -> Arc<T> { self.0 } } impl<T> Deref for UniqueArc<T> { type Target = T; fn deref(&self) -> &T { &*self.0 } } impl<T> DerefMut for UniqueArc<T> { fn deref_mut(&mut self) -> &mut T { // We know this to be uniquely owned unsafe { &mut (*self.0.ptr()).data } } } unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {} unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {} struct ArcInner<T: ?Sized> { count: atomic::AtomicUsize, data: T, } unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {} unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {} impl<T> Arc<T> { #[inline] pub fn new(data: T) -> Self { let x = Box::new(ArcInner { count: atomic::AtomicUsize::new(1), data: data, }); Arc { p: NonZeroPtrMut::new(Box::into_raw(x)) } } pub fn into_raw(this: Self) -> *const T { let ptr = unsafe { &((*this.ptr()).data) as *const _ }; mem::forget(this); ptr } pub unsafe fn from_raw(ptr: *const T) -> Self { // To find the corresponding pointer to the `ArcInner` we need // to subtract the offset of the `data` field from the pointer. let ptr = (ptr as *const u8).offset(-offset_of!(ArcInner<T>, data)); Arc { p: NonZeroPtrMut::new(ptr as *mut ArcInner<T>), } } } impl<T: ?Sized> Arc<T> { #[inline] fn inner(&self) -> &ArcInner<T> { // This unsafety is ok because while this arc is alive we're guaranteed // that the inner pointer is valid. Furthermore, we know that the // `ArcInner` structure itself is `Sync` because the inner data is // `Sync` as well, so we're ok loaning out an immutable pointer to these // contents. unsafe { &*self.ptr() } } // Non-inlined part of `drop`. Just invokes the destructor. #[inline(never)] unsafe fn drop_slow(&mut self) { let _ = Box::from_raw(self.ptr()); } #[inline] pub fn ptr_eq(this: &Self, other: &Self) -> bool { this.ptr() == other.ptr() } fn ptr(&self) -> *mut ArcInner<T> { self.p.ptr() } } impl<T: ?Sized> Clone for Arc<T> { #[inline] fn clone(&self) -> Self { // Using a relaxed ordering is alright here, as knowledge of the // original reference prevents other threads from erroneously deleting // the object. // // As explained in the [Boost documentation][1], Increasing the // reference counter can always be done with memory_order_relaxed: New // references to an object can only be formed from an existing // reference, and passing an existing reference from one thread to // another must already provide any required synchronization. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) let old_size = self.inner().count.fetch_add(1, Relaxed); // However we need to guard against massive refcounts in case someone // is `mem::forget`ing Arcs. If we don't do this the count can overflow // and users will use-after free. We racily saturate to `isize::MAX` on // the assumption that there aren't ~2 billion threads incrementing // the reference count at once. This branch will never be taken in // any realistic program. // // We abort because such a program is incredibly degenerate, and we // don't care to support it. if old_size > MAX_REFCOUNT { process::abort(); } Arc { p: NonZeroPtrMut::new(self.ptr()) } } } impl<T: ?Sized> Deref for Arc<T> { type Target = T; #[inline] fn deref(&self) -> &T { &self.inner().data } } impl<T: Clone> Arc<T> { #[inline] pub fn make_mut(this: &mut Self) -> &mut T { if !this.is_unique() { // Another pointer exists; clone *this = Arc::new((**this).clone()); } unsafe { // This unsafety is ok because we're guaranteed that the pointer // returned is the *only* pointer that will ever be returned to T. Our // reference count is guaranteed to be 1 at this point, and we required // the Arc itself to be `mut`, so we're returning the only possible // reference to the inner data. &mut (*this.ptr()).data } } } impl<T: ?Sized> Arc<T> { #[inline] pub fn get_mut(this: &mut Self) -> Option<&mut T> { if this.is_unique() { unsafe { // See make_mut() for documentation of the threadsafety here. Some(&mut (*this.ptr()).data) } } else { None } } #[inline] fn is_unique(&self) -> bool { // We can use Relaxed here, but the justification is a bit subtle. // // The reason to use Acquire would be to synchronize with other threads // that are modifying the refcount with Release, i.e. to ensure that // their writes to memory guarded by this refcount are flushed. However, // we know that threads only modify the contents of the Arc when they // observe the refcount to be 1, and no other thread could observe that // because we're holding one strong reference here. self.inner().count.load(Relaxed) == 1 } } impl<T: ?Sized> Drop for Arc<T> { #[inline] fn drop(&mut self) { // Because `fetch_sub` is already atomic, we do not need to synchronize // with other threads unless we are going to delete the object. if self.inner().count.fetch_sub(1, Release) != 1 { return; } // FIXME(bholley): Use the updated comment when [2] is merged. // // This load is needed to prevent reordering of use of the data and // deletion of the data. Because it is marked `Release`, the decreasing // of the reference count synchronizes with this `Acquire` load. This // means that use of the data happens before decreasing the reference // count, which happens before this load, which happens before the // deletion of the data. // // As explained in the [Boost documentation][1], // // > It is important to enforce any possible access to the object in one // > thread (through an existing reference) to *happen before* deleting // > the object in a different thread. This is achieved by a "release" // > operation after dropping a reference (any access to the object // > through this reference must obviously happened before), and an // > "acquire" operation before deleting the object. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) // [2]: https://github.com/rust-lang/rust/pull/41714 self.inner().count.load(Acquire); unsafe { self.drop_slow(); } } } impl<T: ?Sized + PartialEq> PartialEq for Arc<T> { fn eq(&self, other: &Arc<T>) -> bool { *(*self) == *(*other) } fn ne(&self, other: &Arc<T>) -> bool { *(*self) != *(*other) } } impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> { fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> { (**self).partial_cmp(&**other) } fn lt(&self, other: &Arc<T>) -> bool { *(*self) < *(*other) } fn le(&self, other: &Arc<T>) -> bool { *(*self) <= *(*other) } fn gt(&self, other: &Arc<T>) -> bool { *(*self) > *(*other) } fn ge(&self, other: &Arc<T>) -> bool { *(*self) >= *(*other) } } impl<T: ?Sized + Ord> Ord for Arc<T> { fn cmp(&self, other: &Arc<T>) -> Ordering { (**self).cmp(&**other) } } impl<T: ?Sized + Eq> Eq for Arc<T> {} impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&**self, f) } } impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl<T: ?Sized> fmt::Pointer for Arc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.ptr(), f) } } impl<T: Default> Default for Arc<T> { fn default() -> Arc<T> { Arc::new(Default::default()) } } impl<T: ?Sized + Hash> Hash for Arc<T> { fn hash<H: Hasher>(&self, state: &mut H) { (**self).hash(state) } } impl<T> From<T> for Arc<T> { fn from(t: T) -> Self { Arc::new(t) } } impl<T: ?Sized> borrow::Borrow<T> for Arc<T> { fn borrow(&self) -> &T { &**self } } impl<T: ?Sized> AsRef<T> for Arc<T> { fn as_ref(&self) -> &T { &**self } } // This is what the HeapSize crate does for regular arc, but is questionably // sound. See https://github.com/servo/heapsize/issues/37 #[cfg(feature = "servo")] impl<T: HeapSizeOf> HeapSizeOf for Arc<T> { fn heap_size_of_children(&self) -> usize { (**self).heap_size_of_children() } } #[cfg(feature = "servo")] impl<'de, T: Deserialize<'de>> Deserialize<'de> for Arc<T> { fn deserialize<D>(deserializer: D) -> Result<Arc<T>, D::Error> where D: ::serde::de::Deserializer<'de>, { T::deserialize(deserializer).map(Arc::new) } } #[cfg(feature = "servo")] impl<T: Serialize> Serialize for Arc<T> { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: ::serde::ser::Serializer, { (**self).serialize(serializer) } } /// Structure to allow Arc-managing some fixed-sized data and a variably-sized /// slice in a single allocation. #[derive(Debug, Eq, PartialEq, PartialOrd)] pub struct HeaderSlice<H, T: ?Sized> { /// The fixed-sized data. pub header: H, /// The dynamically-sized data. pub slice: T, } #[inline(always)] fn divide_rounding_up(dividend: usize, divisor: usize) -> usize { (dividend + divisor - 1) / divisor } impl<H, T> Arc<HeaderSlice<H, [T]>> { /// Creates an Arc for a HeaderSlice using the given header struct and /// iterator to generate the slice. The resulting Arc will be fat. #[inline] pub fn from_header_and_iter<I>(header: H, mut items: I) -> Self where I: Iterator<Item=T> + ExactSizeIterator { use ::std::mem::size_of; assert!(size_of::<T>() != 0, "Need to think about ZST"); // Compute the required size for the allocation. let num_items = items.len(); let size = { // First, determine the alignment of a hypothetical pointer to a // HeaderSlice. let fake_slice_ptr_align: usize = mem::align_of::<ArcInner<HeaderSlice<H, [T; 1]>>>(); // Next, synthesize a totally garbage (but properly aligned) pointer // to a sequence of T. let fake_slice_ptr = fake_slice_ptr_align as *const T; // Convert that sequence to a fat pointer. The address component of // the fat pointer will be garbage, but the length will be correct. let fake_slice = unsafe { slice::from_raw_parts(fake_slice_ptr, num_items) }; // Pretend the garbage address points to our allocation target (with // a trailing sequence of T), rather than just a sequence of T. let fake_ptr = fake_slice as *const [T] as *const ArcInner<HeaderSlice<H, [T]>>; let fake_ref: &ArcInner<HeaderSlice<H, [T]>> = unsafe { &*fake_ptr }; // Use size_of_val, which will combine static information about the // type with the length from the fat pointer. The garbage address // will not be used. mem::size_of_val(fake_ref) }; let ptr: *mut ArcInner<HeaderSlice<H, [T]>>; unsafe { // Allocate the buffer. We use Vec because the underlying allocation // machinery isn't available in stable Rust. // // To avoid alignment issues, we allocate words rather than bytes, // rounding up to the nearest word size. let buffer = if mem::align_of::<T>() <= mem::align_of::<usize>() { Self::allocate_buffer::<usize>(size) } else if mem::align_of::<T>() <= mem::align_of::<u64>() { // On 32-bit platforms <T> may have 8 byte alignment while usize has 4 byte aligment. // Use u64 to avoid over-alignment. // This branch will compile away in optimized builds. Self::allocate_buffer::<u64>(size) } else { panic!("Over-aligned type not handled"); }; // Synthesize the fat pointer. We do this by claiming we have a direct // pointer to a [T], and then changing the type of the borrow. The key // point here is that the length portion of the fat pointer applies // only to the number of elements in the dynamically-sized portion of // the type, so the value will be the same whether it points to a [T] // or something else with a [T] as its last member. let fake_slice: &mut [T] = slice::from_raw_parts_mut(buffer as *mut T, num_items); ptr = fake_slice as *mut [T] as *mut ArcInner<HeaderSlice<H, [T]>>; // Write the data. // // Note that any panics here (i.e. from the iterator) are safe, since // we'll just leak the uninitialized memory. ptr::write(&mut ((*ptr).count), atomic::AtomicUsize::new(1)); ptr::write(&mut ((*ptr).data.header), header); let mut current: *mut T = &mut (*ptr).data.slice[0]; for _ in 0..num_items { ptr::write(current, items.next().expect("ExactSizeIterator over-reported length")); current = current.offset(1); } assert!(items.next().is_none(), "ExactSizeIterator under-reported length"); // We should have consumed the buffer exactly. debug_assert!(current as *mut u8 == buffer.offset(size as isize)); } // Return the fat Arc. assert_eq!(size_of::<Self>(), size_of::<usize>() * 2, "The Arc will be fat"); Arc { p: NonZeroPtrMut::new(ptr) } } #[inline] unsafe fn allocate_buffer<W>(size: usize) -> *mut u8 { let words_to_allocate = divide_rounding_up(size, mem::size_of::<W>()); let mut vec = Vec::<W>::with_capacity(words_to_allocate); vec.set_len(words_to_allocate); Box::into_raw(vec.into_boxed_slice()) as *mut W as *mut u8 } } /// Header data with an inline length. Consumers that use HeaderWithLength as the /// Header type in HeaderSlice can take advantage of ThinArc. #[derive(Debug, Eq, PartialEq, PartialOrd)] pub struct HeaderWithLength<H> { /// The fixed-sized data. pub header: H, /// The slice length. length: usize, } impl<H> HeaderWithLength<H> { /// Creates a new HeaderWithLength. pub fn new(header: H, length: usize) -> Self { HeaderWithLength { header: header, length: length, } } } type HeaderSliceWithLength<H, T> = HeaderSlice<HeaderWithLength<H>, T>; pub struct ThinArc<H: 'static, T: 'static> { ptr: *mut ArcInner<HeaderSliceWithLength<H, [T; 1]>>, } unsafe impl<H: Sync + Send, T: Sync + Send> Send for ThinArc<H, T> {} unsafe impl<H: Sync + Send, T: Sync + Send> Sync for ThinArc<H, T> {} // Synthesize a fat pointer from a thin pointer. // // See the comment around the analogous operation in from_header_and_iter. fn thin_to_thick<H, T>(thin: *mut ArcInner<HeaderSliceWithLength<H, [T; 1]>>) -> *mut ArcInner<HeaderSliceWithLength<H, [T]>> { let len = unsafe { (*thin).data.header.length }; let fake_slice: *mut [T] = unsafe { slice::from_raw_parts_mut(thin as *mut T, len) }; fake_slice as *mut ArcInner<HeaderSliceWithLength<H, [T]>> } impl<H: 'static, T: 'static> ThinArc<H, T> { /// Temporarily converts |self| into a bonafide Arc and exposes it to the /// provided callback. The refcount is not modified. #[inline(always)] pub fn with_arc<F, U>(&self, f: F) -> U<|fim▁hole|> { // Synthesize transient Arc, which never touches the refcount of the ArcInner. let transient = NoDrop::new(Arc { p: NonZeroPtrMut::new(thin_to_thick(self.ptr)) }); // Expose the transient Arc to the callback, which may clone it if it wants. let result = f(&transient); // Forget the transient Arc to leave the refcount untouched. mem::forget(transient); // Forward the result. result } } impl<H, T> Deref for ThinArc<H, T> { type Target = HeaderSliceWithLength<H, [T]>; fn deref(&self) -> &Self::Target { unsafe { &(*thin_to_thick(self.ptr)).data } } } impl<H: 'static, T: 'static> Clone for ThinArc<H, T> { fn clone(&self) -> Self { ThinArc::with_arc(self, |a| Arc::into_thin(a.clone())) } } impl<H: 'static, T: 'static> Drop for ThinArc<H, T> { fn drop(&mut self) { let _ = Arc::from_thin(ThinArc { ptr: self.ptr }); } } impl<H: 'static, T: 'static> Arc<HeaderSliceWithLength<H, [T]>> { /// Converts an Arc into a ThinArc. This consumes the Arc, so the refcount /// is not modified. pub fn into_thin(a: Self) -> ThinArc<H, T> { assert!(a.header.length == a.slice.len(), "Length needs to be correct for ThinArc to work"); let fat_ptr: *mut ArcInner<HeaderSliceWithLength<H, [T]>> = a.ptr(); mem::forget(a); let thin_ptr = fat_ptr as *mut [usize] as *mut usize; ThinArc { ptr: thin_ptr as *mut ArcInner<HeaderSliceWithLength<H, [T; 1]>> } } /// Converts a ThinArc into an Arc. This consumes the ThinArc, so the refcount /// is not modified. pub fn from_thin(a: ThinArc<H, T>) -> Self { let ptr = thin_to_thick(a.ptr); mem::forget(a); Arc { p: NonZeroPtrMut::new(ptr) } } } impl<H: PartialEq + 'static, T: PartialEq + 'static> PartialEq for ThinArc<H, T> { fn eq(&self, other: &ThinArc<H, T>) -> bool { ThinArc::with_arc(self, |a| { ThinArc::with_arc(other, |b| { *a == *b }) }) } } impl<H: Eq + 'static, T: Eq + 'static> Eq for ThinArc<H, T> {} #[cfg(test)] mod tests { use std::clone::Clone; use std::ops::Drop; use std::sync::atomic; use std::sync::atomic::Ordering::{Acquire, SeqCst}; use super::{Arc, HeaderWithLength, ThinArc}; #[derive(PartialEq)] struct Canary(*mut atomic::AtomicUsize); impl Drop for Canary { fn drop(&mut self) { unsafe { (*self.0).fetch_add(1, SeqCst); } } } #[test] fn slices_and_thin() { let mut canary = atomic::AtomicUsize::new(0); let c = Canary(&mut canary as *mut atomic::AtomicUsize); let v = vec![5, 6]; let header = HeaderWithLength::new(c, v.len()); { let x = Arc::into_thin(Arc::from_header_and_iter(header, v.into_iter())); let y = ThinArc::with_arc(&x, |q| q.clone()); let _ = y.clone(); let _ = x == x; Arc::from_thin(x.clone()); } assert!(canary.load(Acquire) == 1); } }<|fim▁end|>
where F: FnOnce(&Arc<HeaderSliceWithLength<H, [T]>>) -> U
<|file_name|>CWE127_Buffer_Underread__CWE839_fscanf_83_bad.cpp<|end_file_name|><|fim▁begin|>/* TEMPLATE GENERATED TESTCASE FILE <|fim▁hole|>Template File: sources-sinks-83_bad.tmpl.cpp */ /* * @description * CWE: 127 Buffer Underread * BadSource: fscanf Read data from the console using fscanf() * GoodSource: Non-negative but less than 10 * Sinks: * GoodSink: Ensure the array index is valid * BadSink : Improperly check the array index by not checking to see if the value is negative * Flow Variant: 83 Data flow: data passed to class constructor and destructor by declaring the class object on the stack * * */ #ifndef OMITBAD #include "std_testcase.h" #include "CWE127_Buffer_Underread__CWE839_fscanf_83.h" namespace CWE127_Buffer_Underread__CWE839_fscanf_83 { CWE127_Buffer_Underread__CWE839_fscanf_83_bad::CWE127_Buffer_Underread__CWE839_fscanf_83_bad(int dataCopy) { data = dataCopy; /* POTENTIAL FLAW: Read data from the console using fscanf() */ fscanf(stdin, "%d", &data); } CWE127_Buffer_Underread__CWE839_fscanf_83_bad::~CWE127_Buffer_Underread__CWE839_fscanf_83_bad() { { int buffer[10] = { 0 }; /* POTENTIAL FLAW: Attempt to access a negative index of the array * This check does not check to see if the array index is negative */ if (data < 10) { printIntLine(buffer[data]); } else { printLine("ERROR: Array index is too big."); } } } } #endif /* OMITBAD */<|fim▁end|>
Filename: CWE127_Buffer_Underread__CWE839_fscanf_83_bad.cpp Label Definition File: CWE127_Buffer_Underread__CWE839.label.xml
<|file_name|>Tag.js<|end_file_name|><|fim▁begin|>import React from "react"; import PropTypes from "prop-types"; import styled from "styled-components"; const StyledTag = styled.span` color: ${props => props.color}; background: ${props => props.bgcolor}; padding: ${props => props.padding}; margin: ${props => props.margin}; border-radius: 3px; border: ${props => `1px solid ${props.border}`}; max-width: ${props => props.maxWidth}; word-break: break-all; line-height: 20px; `; const Tag = ({ text, size = "medium", color = {<|fim▁hole|> border: "#d9d9d9", color: "rgba(0,0,0,0.65)" }, margin = "", maxWidth = "300px" }) => { const getPaddingBySize = size => { const choices = { small: "0px 5px", medium: "1px 6px", large: "5px 10px" }; return choices[size]; }; return ( <StyledTag bgcolor={color.bgcolor} border={color.border} color={color.color} padding={getPaddingBySize(size)} margin={margin} maxWidth={maxWidth} > {text} </StyledTag> ); }; Tag.propTypes = { color: PropTypes.shape({ bgcolor: PropTypes.string, border: PropTypes.string, color: PropTypes.string }), text: PropTypes.string, size: PropTypes.string, margin: PropTypes.string, maxWidth: PropTypes.string }; export default Tag;<|fim▁end|>
bgcolor: "#fafafa",
<|file_name|>bin2pset.cpp<|end_file_name|><|fim▁begin|>/// @addtogroup homology /// @{ ///////////////////////////////////////////////////////////////////////////// /// /// @file bin2pset.cpp /// /// @author Pawel Pilarczyk /// ///////////////////////////////////////////////////////////////////////////// // Copyright (C) 1997-2013 by Pawel Pilarczyk. // // This file is part of the Homology Library. This library is free software; // you can redistribute it and/or modify it under the terms of the GNU // General Public License as published by the Free Software Foundation; // either version 2 of the License, or (at your option) any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License along // with this software; see the file "license.txt". If not, write to the // Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, // MA 02111-1307, USA. // Started on November 18, 2005. Last revision: November 18, 2005. #include "chomp/system/config.h" #include "chomp/system/textfile.h" #include "chomp/system/timeused.h" #include "chomp/system/arg.h" #include <exception> #include <new> #include <iostream> #include <fstream> #include <cstdio> #include <cstdlib> #include <ctime> using namespace std; using namespace chomp::homology; // -------------------------------------------------- // -------------------- OVERTURE -------------------- // -------------------------------------------------- const char *title = "\ BinCube->PointSet, ver. 0.01. Copyright (C) 1997-2013 by Pawel Pilarczyk.\n\ This is free software. No warranty. Consult 'license.txt' for details."; const char *helpinfo = "\ Call with: file.bin file.cub -d dim -s size\n\ This program reads a set of points encoded in a binary format, and writes\n\ the coordinates of the points corresponding to nonzero bits.\n\ Additional arguments:\n\ -d N - set the space dimension (must be specified),\n\ -s N - set the size of the binary cube (repeat for other directions),\n\ -x N, -y N, -z N - set the size in the specific direction,\n\ -i N - skip the initial N bytes in the input file.\n\ -h - display this brief help information only and exit.\n\ For more information ask the author at http://www.PawelPilarczyk.com/."; const int maxdimension = 32; // -------------------------------------------------- // -------------------- BIN2PSET -------------------- // -------------------------------------------------- void inc_counter (int *counter, const int *sizes, int length) { while (length --) { ++ *counter; if (*counter < *sizes) return; *counter = 0; ++ counter; ++ sizes; } return; } /* inc_counter */ int writecubes (const char *buf, int size, int *coord, int dim, ostream &outfile) { int ncubes = 0; int buflength = (size + 7) >> 3; for (int pos = 0; pos < buflength; ++ pos) { if (!buf [pos]) continue; int maxbit = 8; if ((pos << 3) + maxbit > size) maxbit = size - (pos << 3); int byte = buf [pos]; for (int bit = 0; bit < maxbit; ++ bit) { if (!(byte & (1 << bit))) continue; outfile << "(" << ((pos << 3) + bit); for (int i = 1; i < dim; ++ i) outfile << "," << coord [i]; outfile << ")\n"; ++ ncubes; } } return ncubes; } /* writecubes */ int bincube2pointset (char *inname, char *outname, int dim, const int* sizes, int initialskip) // Returns: 0 = Ok, -1 = error (shows msg). { // open the input file ifstream infile; infile. open (inname, ios::binary | ios::in); if (initialskip) infile. seekg (initialskip); if (!infile) fileerror (inname); // prepare data to scan for cubes in the file int coord [maxdimension]; for (int i = 0; i < maxdimension; ++ i) coord [i] = 0; int linelength = (sizes [0] + 7) >> 3; char *buf = new char [linelength]; // open the output file ofstream outfile (outname); if (!outfile) fileerror (outname, "create"); // process all the rows from the file sout << "Processing lines of bits... "; int counter = 0; int ncubes = 0; while (1) { // read a line from the input file and break if none infile. read (buf, linelength); if (infile. eof ()) break; // write the cubes listed within this line to a file ncubes += writecubes (buf, sizes [0], coord, dim, outfile); // increase the other coordinates for the next line inc_counter (coord + 1, sizes + 1, dim - 1); // update the counter and show it if necessary ++ counter; if (!(counter % 1847)) scon << std::setw (10) << counter << "\b\b\b\b\b\b\b\b\b\b"; } sout << ncubes << " cubes extracted.\n"; // finalize delete [] buf; return 0; } /* binary cube to pointset */ // -------------------------------------------------- // ---------------------- MAIN ---------------------- // -------------------------------------------------- int main (int argc, char *argv []) // Return: 0 = Ok, -1 = Error, 1 = Help displayed, 2 = Wrong arguments. { // prepare user-configurable data char *inname = 0, *outname = 0; int dim = 0; int sizes [maxdimension]; for (int i = 0; i < maxdimension; ++ i) sizes [i] = 0; int nsizes = 0; int initialskip = 0; // analyze the command line arguments a; arg (a, 0, inname); arg (a, 0, outname); arg (a, "d", dim); arg (a, "s", sizes, nsizes, maxdimension); arg (a, "i", initialskip); arg (a, "x", sizes [0]); arg (a, "y", sizes [1]); arg (a, "z", sizes [2]); arghelp (a); argstreamprepare (a); int argresult = a. analyze (argc, argv); argstreamset (); // show the program's main title if (argresult >= 0) sout << title << '\n'; // check whether the entered arguments are correct and adjust sizes if (!outname) argresult = 1; for (int i = 0; i < 3; ++ i) if (sizes [i] && (nsizes <= i)) nsizes = i + 1; if (!dim && (nsizes > 1)) dim = nsizes; if (nsizes) { for (int i = nsizes; i < maxdimension; ++ i) sizes [i] = sizes [nsizes - 1]; } for (int i = 0; i < nsizes; ++ i) { if (sizes [i] > 0) continue; sout << "ERROR: The sizes must be positive.\n"; argresult = 1; break; } if (!argresult && !nsizes) { sout << "ERROR: The size of the binary cube " "must be defined.\n"; argresult = 1; } if (!argresult && !dim) { sout << "ERROR: Please, define the dimension of the " "binary cube.\n"; argresult = 1; } // if something was incorrect, show an additional message and exit if (argresult < 0) { sout << "Call with '--help' for help.\n"; return 2; } // if help requested or no output name defined, show help information if (argresult > 0) { sout << helpinfo << '\n'; return 1; } // try running the main function and catch an error message if thrown try { bincube2pointset (inname, outname, dim, sizes, initialskip); program_time = 1; return 0; }<|fim▁hole|> } catch (const std::exception &e) { sout << "ERROR: " << e. what () << '\n'; return -1; } catch (...) { sout << "ABORT: An unknown error occurred.\n"; return -1; } } /* main */ /// @}<|fim▁end|>
catch (const char *msg) { sout << "ERROR: " << msg << '\n'; return -1;
<|file_name|>test_api.py<|end_file_name|><|fim▁begin|># Copyright (c) 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for DB API.""" import mock from oslo_config import cfg from oslo_utils import importutils from oslo_db import api from oslo_db import exception from oslo_db.tests import utils as test_utils sqla = importutils.try_import('sqlalchemy') if not sqla: raise ImportError("Unable to import module 'sqlalchemy'.") def get_backend(): return DBAPI() class DBAPI(object): def _api_raise(self, *args, **kwargs): """Simulate raising a database-has-gone-away error This method creates a fake OperationalError with an ID matching a valid MySQL "database has gone away" situation. It also decrements<|fim▁hole|> how many times this function is called by the wrapper. When error_counter reaches zero, this function returns True, simulating the database becoming available again and the query succeeding. """ if self.error_counter > 0: self.error_counter -= 1 orig = sqla.exc.DBAPIError(False, False, False) orig.args = [2006, 'Test raise operational error'] e = exception.DBConnectionError(orig) raise e else: return True def api_raise_default(self, *args, **kwargs): return self._api_raise(*args, **kwargs) @api.safe_for_db_retry def api_raise_enable_retry(self, *args, **kwargs): return self._api_raise(*args, **kwargs) def api_class_call1(_self, *args, **kwargs): return args, kwargs class DBAPITestCase(test_utils.BaseTestCase): def test_dbapi_full_path_module_method(self): dbapi = api.DBAPI('oslo_db.tests.test_api') result = dbapi.api_class_call1(1, 2, kwarg1='meow') expected = ((1, 2), {'kwarg1': 'meow'}) self.assertEqual(expected, result) def test_dbapi_unknown_invalid_backend(self): self.assertRaises(ImportError, api.DBAPI, 'tests.unit.db.not_existent') def test_dbapi_lazy_loading(self): dbapi = api.DBAPI('oslo_db.tests.test_api', lazy=True) self.assertIsNone(dbapi._backend) dbapi.api_class_call1(1, 'abc') self.assertIsNotNone(dbapi._backend) def test_dbapi_from_config(self): conf = cfg.ConfigOpts() dbapi = api.DBAPI.from_config(conf, backend_mapping={'sqlalchemy': __name__}) self.assertIsNotNone(dbapi._backend) class DBReconnectTestCase(DBAPITestCase): def setUp(self): super(DBReconnectTestCase, self).setUp() self.test_db_api = DBAPI() patcher = mock.patch(__name__ + '.get_backend', return_value=self.test_db_api) patcher.start() self.addCleanup(patcher.stop) def test_raise_connection_error(self): self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__}) self.test_db_api.error_counter = 5 self.assertRaises(exception.DBConnectionError, self.dbapi._api_raise) def test_raise_connection_error_decorated(self): self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__}) self.test_db_api.error_counter = 5 self.assertRaises(exception.DBConnectionError, self.dbapi.api_raise_enable_retry) self.assertEqual(4, self.test_db_api.error_counter, 'Unexpected retry') def test_raise_connection_error_enabled(self): self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__}, use_db_reconnect=True) self.test_db_api.error_counter = 5 self.assertRaises(exception.DBConnectionError, self.dbapi.api_raise_default) self.assertEqual(4, self.test_db_api.error_counter, 'Unexpected retry') def test_retry_one(self): self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__}, use_db_reconnect=True, retry_interval=1) try: func = self.dbapi.api_raise_enable_retry self.test_db_api.error_counter = 1 self.assertTrue(func(), 'Single retry did not succeed.') except Exception: self.fail('Single retry raised an un-wrapped error.') self.assertEqual( 0, self.test_db_api.error_counter, 'Counter not decremented, retry logic probably failed.') def test_retry_two(self): self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__}, use_db_reconnect=True, retry_interval=1, inc_retry_interval=False) try: func = self.dbapi.api_raise_enable_retry self.test_db_api.error_counter = 2 self.assertTrue(func(), 'Multiple retry did not succeed.') except Exception: self.fail('Multiple retry raised an un-wrapped error.') self.assertEqual( 0, self.test_db_api.error_counter, 'Counter not decremented, retry logic probably failed.') def test_retry_until_failure(self): self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__}, use_db_reconnect=True, retry_interval=1, inc_retry_interval=False, max_retries=3) func = self.dbapi.api_raise_enable_retry self.test_db_api.error_counter = 5 self.assertRaises( exception.DBError, func, 'Retry of permanent failure did not throw DBError exception.') self.assertNotEqual( 0, self.test_db_api.error_counter, 'Retry did not stop after sql_max_retries iterations.') class DBRetryRequestCase(DBAPITestCase): def test_retry_wrapper_succeeds(self): @api.wrap_db_retry(max_retries=10, retry_on_request=True) def some_method(): pass some_method() def test_retry_wrapper_reaches_limit(self): max_retries = 10 @api.wrap_db_retry(max_retries=10, retry_on_request=True) def some_method(res): res['result'] += 1 raise exception.RetryRequest(ValueError()) res = {'result': 0} self.assertRaises(ValueError, some_method, res) self.assertEqual(max_retries + 1, res['result']) def test_retry_wrapper_exception_checker(self): def exception_checker(exc): return isinstance(exc, ValueError) and exc.args[0] < 5 @api.wrap_db_retry(max_retries=10, retry_on_request=True, exception_checker=exception_checker) def some_method(res): res['result'] += 1 raise ValueError(res['result']) res = {'result': 0} self.assertRaises(ValueError, some_method, res) # our exception checker should have stopped returning True after 5 self.assertEqual(5, res['result']) @mock.patch.object(DBAPI, 'api_class_call1') @mock.patch.object(api, 'wrap_db_retry') def test_mocked_methods_are_not_wrapped(self, mocked_wrap, mocked_method): dbapi = api.DBAPI('oslo_db.tests.test_api') dbapi.api_class_call1() self.assertFalse(mocked_wrap.called)<|fim▁end|>
the error_counter so that we can artificially keep track of
<|file_name|>hard.py<|end_file_name|><|fim▁begin|>""" Curriculum-based course timetabling solver; solves timetabling problems formulated in .ectt file format (http://tabu.diegm.uniud.it/ctt/) Copyright (C) 2013 Stephan E. Becker This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.""" __author__ = 'Stephan Becker' import math import data # hard constraint: Availability def teacherIsAvailable(event, timeslot): """ return True if the teacher of the course is available in the timeslot """ if event is None: return True for constraint in data.unavailability_constraints: if event.id == constraint.courseID and timeslot == constraint.timeslot: return False return True # hard constraint: Lectures (part 2 of 2) def timeslotHasSameLecture(event, timeslot): """ checks if a lecture of the same course is already assigned to this timeslot, returns True if there is already a lecture of the course in this timeslot """ if event is None: return False for room in range(data.numberOfRooms): if not data.timetable[(room, timeslot)] is None: if data.timetable[(room, timeslot)].id == event.id: return True return False def timeslotHasSameTeacher(event, timeslot): """ checks if a course with the same teacher is already assigned to this timeslot, returns True if there is """ if event is None: return False for room in range(data.numberOfRooms): currentEv = data.timetable[(room, timeslot)] # is the current course also taught by this teacher? if not currentEv is None: if currentEv.id in data.teachers[event.teacher]: return True return False <|fim▁hole|>def timeslotHasSameCurriculum(event, timeslot): """ checks if a course in the same timeslot is part of the same curriculum returns True if it is """ if event is None: return False curriculaOfEvent = data.coursesToCurricula[event.id] # which curricula is this course part of? for room in range(data.numberOfRooms): currentEv = data.timetable[(room, timeslot)] if not currentEv is None: for cu in curriculaOfEvent: # checks whether the current course is also part of the same curriculum if currentEv.id in data.curriculaToCourses[cu]: return True return False def assignCourseToPosition(course, position): """ assign the course to the position in the timetable """ # if data.timetable[position] is None and courseFitsIntoTimeslot(course, position[1]): data.timetable[position] = course data.emptyPositions.remove(position) data.forbiddenPositions.append(position) def removeCourseAtPosition(position): """ remove the course which was assigned at the position from the timetable and add it to unassigned events returns the removed course """ ev = data.timetable[position] if not ev is None: data.timetable[position] = None data.emptyPositions.append(position) return ev def courseFitsIntoTimeslot(course, timeslot): return not timeslotHasSameLecture(course, timeslot) and teacherIsAvailable(course, timeslot) \ and not timeslotHasSameTeacher(course, timeslot) and not timeslotHasSameCurriculum(course, timeslot)<|fim▁end|>
<|file_name|>deactivate_placements.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # # Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This code example deactivates all active placements. To determine which placements exist, run get_all_placements.py.""" __author__ = 'api.shamjeff@gmail.com (Jeff Sham)' # Locate the client library. If module was installed via "setup.py" script, then # the following two lines are not needed. import os import sys sys.path.insert(0, os.path.join('..', '..', '..', '..')) # Import appropriate classes from the client library. from adspygoogle import DfpClient from adspygoogle.dfp import DfpUtils # Initialize client object. client = DfpClient(path=os.path.join('..', '..', '..', '..')) # Initialize appropriate service. placement_service = client.GetService('PlacementService', version='v201306') # Create query. values = [{ 'key': 'status', 'value': { 'xsi_type': 'TextValue', 'value': 'ACTIVE' } }] query = 'WHERE status = :status' # Get placements by statement. placements = DfpUtils.GetAllEntitiesByStatementWithService(<|fim▁hole|> 'deactivated.' % (placement['id'], placement['name'], placement['status'])) print 'Number of placements to be deactivated: %s' % len(placements) # Perform action. result = placement_service.PerformPlacementAction( {'type': 'DeactivatePlacements'}, {'query': query, 'values': values})[0] # Display results. if result and int(result['numChanges']) > 0: print 'Number of placements deactivated: %s' % result['numChanges'] else: print 'No placements were deactivated.'<|fim▁end|>
placement_service, query=query, bind_vars=values) for placement in placements: print ('Placement with id \'%s\', name \'%s\', and status \'%s\' will be '
<|file_name|>stack.py<|end_file_name|><|fim▁begin|>from functools import partial from xml.sax.saxutils import quoteattr from navmazing import NavigateToSibling, NavigateToAttribute from selenium.common.exceptions import NoSuchElementException import cfme.fixtures.pytest_selenium as sel from cfme import web_ui as ui from cfme.exceptions import DestinationNotFound, StackNotFound, CandidateNotFound from cfme.web_ui import Quadicon, flash, Form, fill, form_buttons, paginator, toolbar as tb, \ match_location, accordion from cfme.exceptions import CFMEException, FlashMessageException from utils.appliance import Navigatable from utils.appliance.implementations.ui import navigator, navigate_to, CFMENavigateStep from utils.pretty import Pretty from utils.wait import wait_for cfg_btn = partial(tb.select, "Configuration") pol_btn = partial(tb.select, 'Policy') lifecycle_btn = partial(tb.select, 'Lifecycle') edit_tags_form = Form( fields=[ ("select_tag", ui.Select("select#tag_cat")), ("select_value", ui.Select("select#tag_add")) ]) match_page = partial(match_location, controller='orchestration_stack', title='Stacks') class Stack(Pretty, Navigatable): _param_name = "Stack" pretty_attrs = ['name'] def __init__(self, name, provider, quad_name=None, appliance=None): self.name = name self.quad_name = quad_name or 'stack' self.provider = provider Navigatable.__init__(self, appliance=appliance) def find_quadicon(self): """Find and return the quadicon belonging to this stack Args: Returns: :py:class:`cfme.web_ui.Quadicon` instance """ paginator.results_per_page(100) for page in paginator.pages(): quadicon = Quadicon(self.name, self.quad_name) if sel.is_displayed(quadicon): return quadicon else: raise StackNotFound("Stack '{}' not found in UI!".format(self.name)) def delete(self, from_dest='All'): """ Delete the stack, starting from the destination provided by from_dest @param from_dest: where to delete from, a valid navigation destination for Stack """ # Navigate to the starting destination if from_dest in navigator.list_destinations(self): navigate_to(self, from_dest) else: msg = 'cfme.cloud.stack does not have destination {}'.format(from_dest) raise DestinationNotFound(msg) # Delete using the method appropriate for the starting destination if from_dest == 'All': sel.check(Quadicon(self.name, self.quad_name).checkbox()) cfg_btn("Remove Orchestration Stacks", invokes_alert=True) elif from_dest == 'Details': cfg_btn("Remove this Orchestration Stack", invokes_alert=True) sel.handle_alert() # The delete initiated message may get missed if the delete is fast try: flash.assert_message_contain("Delete initiated for 1 Orchestration Stacks") except FlashMessageException as ex: if 'No flash message contains' in ex.message: flash.assert_message_contain("The selected Orchestration Stacks was deleted") self.wait_for_delete() def edit_tags(self, tag, value): navigate_to(self, 'EditTags') fill(edit_tags_form, {'select_tag': tag, 'select_value': value}, action=form_buttons.save) flash.assert_success_message('Tag edits were successfully saved') company_tag = self.get_tags() if company_tag != "{}: {}".format(tag.replace(" *", ""), value): raise CFMEException("{} ({}) tag is not assigned!".format(tag.replace(" *", ""), value)) def get_tags(self): navigate_to(self, 'Details') row = sel.elements("//*[(self::th or self::td) and normalize-space(.)={}]/../.." "//td[img[contains(@src, 'smarttag')]]".format(quoteattr("My Company Tags"))) company_tag = sel.text(row).strip() return company_tag def refresh_view_and_provider(self): self.provider.refresh_provider_relationships() tb.refresh() def wait_for_delete(self): def _wait_to_disappear(): try: self.find_quadicon() except StackNotFound: return True else: return False navigate_to(self, 'All') wait_for(_wait_to_disappear, fail_condition=False, message="Wait stack to disappear", num_sec=15 * 60, fail_func=self.refresh_view_and_provider, delay=30) def wait_for_appear(self): def _wait_to_appear(): try: self.find_quadicon() except StackNotFound: return False else: return True navigate_to(self, 'All') wait_for(_wait_to_appear, fail_condition=False, message="Wait stack to appear", num_sec=15 * 60, fail_func=self.refresh_view_and_provider, delay=30) def retire_stack(self, wait=True): navigate_to(self, 'All') sel.check(self.find_quadicon()) lifecycle_btn("Retire this Orchestration Stack", invokes_alert=True) sel.handle_alert() flash.assert_success_message('Retirement initiated for 1 Orchestration' ' Stack from the CFME Database') if wait: self.wait_for_delete() @navigator.register(Stack, 'All') class All(CFMENavigateStep): prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')<|fim▁hole|> def step(self): self.prerequisite_view.navigation.select('Compute', 'Clouds', 'Stacks') def resetter(self): tb.select('Grid View') sel.check(paginator.check_all()) sel.uncheck(paginator.check_all()) @navigator.register(Stack, 'Details') class Details(CFMENavigateStep): prerequisite = NavigateToSibling('All') def am_i_here(self): return match_page(summary='{} (Summary)'.format(self.obj.name)) def step(self): sel.click(self.obj.find_quadicon()) @navigator.register(Stack, 'EditTags') class EditTags(CFMENavigateStep): prerequisite = NavigateToSibling('Details') def step(self): pol_btn('Edit Tags') @navigator.register(Stack, 'RelationshipSecurityGroups') class RelationshipsSecurityGroups(CFMENavigateStep): prerequisite = NavigateToSibling('Details') def am_i_here(self): return match_page(summary='{} (All Security Groups)'.format(self.obj.name)) def step(self): accordion.click('Relationships') # Click by anchor title since text contains a dynamic component try: sel.click('//*[@id="stack_rel"]//a[@title="Show all Security Groups"]') except NoSuchElementException: raise CandidateNotFound('No security groups for stack, cannot navigate') @navigator.register(Stack, 'RelationshipParameters') class RelationshipParameters(CFMENavigateStep): prerequisite = NavigateToSibling('Details') def am_i_here(self): return match_page(summary='{} (Parameters)'.format(self.obj.name)) def step(self): accordion.click('Relationships') # Click by anchor title since text contains a dynamic component sel.click('//*[@id="stack_rel"]//a[@title="Show all Parameters"]') @navigator.register(Stack, 'RelationshipOutputs') class RelationshipOutputs(CFMENavigateStep): prerequisite = NavigateToSibling('Details') def am_i_here(self): return match_page(summary='{} (Outputs)'.format(self.obj.name)) def step(self): accordion.click('Relationships') # Click by anchor title since text contains a dynamic component try: sel.click('//*[@id="stack_rel"]//a[@title="Show all Outputs"]') except NoSuchElementException: raise CandidateNotFound('No Outputs for stack, cannot navigate') @navigator.register(Stack, 'RelationshipResources') class RelationshipResources(CFMENavigateStep): prerequisite = NavigateToSibling('Details') def am_i_here(self): return match_page(summary='{} (Resources)'.format(self.obj.name)) def step(self): accordion.click('Relationships') # Click by anchor title since text contains a dynamic component sel.click('//*[@id="stack_rel"]//a[@title="Show all Resources"]')<|fim▁end|>
def am_i_here(self): return match_page(summary='Orchestration Stacks')
<|file_name|>for.cpp<|end_file_name|><|fim▁begin|>#include <iostream> using namespace std; #include <omp.h> #define SIZE 8 int main(void){ int x[SIZE]; int sum=0; for(int i=0;i<SIZE;i++){<|fim▁hole|> #pragma omp parallel for reduction (+:sum) for(int i=0;i<SIZE;i++){ sum+=x[i]; } cout<<sum<<std::endl; return 0; }<|fim▁end|>
x[i]=i; }
<|file_name|>reserve-service.js<|end_file_name|><|fim▁begin|>import axios from 'axios'; import {getAuthInfo} from './firebase-service'; import {VIANCA, CHAN, TOPA, IBA_COLOMBIA} from './service-store'; export const ERROR_RESULT = 'ERROR'; export const RESERVED_RESULT = 'R'; export const INSUFICIENT_RESULT = 'I'; export const NOT_FOUND_RESULT = 'NF'; export function submitReserve(reserve, airlineCode) { let _this = this; let apiUrl = getAirlineUrl(airlineCode); reserve.token = getAuthInfo().idToken; return axios.post(apiUrl, reserve).then(result => { if (!result.message) { console.error("No \"message\" attribute in result: ", result) return ERROR_RESULT; } return result.message; }).catch(error => console.log(error)); }<|fim▁hole|> switch (airlineCode) { case VIANCA.code: return VIANCA.submitReserveUrl; case CHAN.code: return CHAN.submitReserveUrl; case TOPA.code: return TOPA.submitReserveUrl; case IBA_COLOMBIA.code: return IBA_COLOMBIA.submitReserveUrl; default: } } export function searchForReserve(apiUrl) { let _this = this; let url = apiUrl + '?token=' + getAuthInfo().idToken; // + getToken(); return axios.get(url); } export function submitAllReserveSearch() { console.log('Fetching reserve searchs'); let viancaResult = searchForReserve(VIANCA.fetchReserves); let chanResult = searchForReserve(CHAN.fetchReserves); let topaResult = searchForReserve(TOPA.fetchReserves); let ibaResult = searchForReserve(IBA_COLOMBIA.fetchReserves); let mergedResults = axios.all([viancaResult, chanResult, topaResult, ibaResult]); return mergedResults; }<|fim▁end|>
function getAirlineUrl(airlineCode) {
<|file_name|>markdown_it.js<|end_file_name|><|fim▁begin|>import markdownIt from 'markdown-it'; import _ from 'lodash'; import footnotes from 'markdown-it-footnote'; export default function (opts) { const mergedOpts = _.assign({}, opts, { html: true, linkify: true });<|fim▁hole|> // Remember old renderer, if overriden, or proxy to default renderer const defaultRender = md.renderer.rules.link_open || function (tokens, idx, options, env, self) { return self.renderToken(tokens, idx, options); }; md.renderer.rules.link_open = function (tokens, idx, options, env, self) { const aIndex = tokens[idx].attrIndex('target'); if (aIndex < 0) { tokens[idx].attrPush(['target', '_blank']); // add new attribute } else { tokens[idx].attrs[aIndex][1] = '_blank'; // replace value of existing attr } // pass token to default renderer. return defaultRender(tokens, idx, options, env, self); }; } return md; }<|fim▁end|>
const md = markdownIt(mergedOpts).use(footnotes); if (mergedOpts.openLinksExternally) {
<|file_name|>fake_kops_client.go<|end_file_name|><|fim▁begin|>/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software<|fim▁hole|>distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fake import ( rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" v1alpha1 "k8s.io/kops/pkg/client/clientset_generated/clientset/typed/kops/v1alpha1" ) type FakeKopsV1alpha1 struct { *testing.Fake } func (c *FakeKopsV1alpha1) Clusters(namespace string) v1alpha1.ClusterInterface { return &FakeClusters{c, namespace} } func (c *FakeKopsV1alpha1) Federations(namespace string) v1alpha1.FederationInterface { return &FakeFederations{c, namespace} } func (c *FakeKopsV1alpha1) InstanceGroups(namespace string) v1alpha1.InstanceGroupInterface { return &FakeInstanceGroups{c, namespace} } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeKopsV1alpha1) RESTClient() rest.Interface { var ret *rest.RESTClient return ret }<|fim▁end|>
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>use super::core::{ af_array, AfError, Array, BinaryOp, Fromf64, HasAfEnum, RealNumber, ReduceByKeyInput, Scanable, HANDLE_ERROR, }; use libc::{c_double, c_int, c_uint}; extern "C" { fn af_sum(out: *mut af_array, input: af_array, dim: c_int) -> c_int; fn af_sum_nan(out: *mut af_array, input: af_array, dim: c_int, nanval: c_double) -> c_int; fn af_product(out: *mut af_array, input: af_array, dim: c_int) -> c_int; fn af_product_nan(out: *mut af_array, input: af_array, dim: c_int, val: c_double) -> c_int; fn af_min(out: *mut af_array, input: af_array, dim: c_int) -> c_int; fn af_max(out: *mut af_array, input: af_array, dim: c_int) -> c_int; fn af_all_true(out: *mut af_array, input: af_array, dim: c_int) -> c_int; fn af_any_true(out: *mut af_array, input: af_array, dim: c_int) -> c_int; fn af_count(out: *mut af_array, input: af_array, dim: c_int) -> c_int; fn af_sum_all(r: *mut c_double, i: *mut c_double, input: af_array) -> c_int; fn af_sum_nan_all(r: *mut c_double, i: *mut c_double, input: af_array, val: c_double) -> c_int; fn af_product_all(r: *mut c_double, i: *mut c_double, input: af_array) -> c_int; fn af_product_nan_all( r: *mut c_double, i: *mut c_double, input: af_array, val: c_double, ) -> c_int; fn af_min_all(r: *mut c_double, i: *mut c_double, input: af_array) -> c_int; fn af_max_all(r: *mut c_double, i: *mut c_double, input: af_array) -> c_int; fn af_all_true_all(r: *mut c_double, i: *mut c_double, input: af_array) -> c_int; fn af_any_true_all(r: *mut c_double, i: *mut c_double, input: af_array) -> c_int; fn af_count_all(r: *mut c_double, i: *mut c_double, input: af_array) -> c_int; fn af_imin(out: *mut af_array, idx: *mut af_array, input: af_array, dim: c_int) -> c_int; fn af_imax(out: *mut af_array, idx: *mut af_array, input: af_array, dim: c_int) -> c_int; fn af_imin_all(r: *mut c_double, i: *mut c_double, idx: *mut c_uint, input: af_array) -> c_int; fn af_imax_all(r: *mut c_double, i: *mut c_double, idx: *mut c_uint, input: af_array) -> c_int; fn af_accum(out: *mut af_array, input: af_array, dim: c_int) -> c_int; fn af_where(out: *mut af_array, input: af_array) -> c_int; fn af_diff1(out: *mut af_array, input: af_array, dim: c_int) -> c_int; fn af_diff2(out: *mut af_array, input: af_array, dim: c_int) -> c_int; fn af_sort(out: *mut af_array, input: af_array, dim: c_uint, ascend: bool) -> c_int; fn af_sort_index( o: *mut af_array, i: *mut af_array, inp: af_array, d: c_uint, a: bool, ) -> c_int; fn af_set_unique(out: *mut af_array, input: af_array, is_sorted: bool) -> c_int; fn af_set_union(out: *mut af_array, first: af_array, second: af_array, is_unq: bool) -> c_int; fn af_set_intersect(out: *mut af_array, one: af_array, two: af_array, is_unq: bool) -> c_int; fn af_sort_by_key( out_keys: *mut af_array, out_vals: *mut af_array, in_keys: af_array, in_vals: af_array, dim: c_uint, ascend: bool, ) -> c_int; fn af_scan(out: *mut af_array, inp: af_array, dim: c_int, op: c_uint, inclusive: bool) -> c_int; fn af_scan_by_key( out: *mut af_array, key: af_array, inp: af_array, dim: c_int, op: c_uint, inclusive: bool, ) -> c_int; fn af_all_true_by_key( keys_out: *mut af_array, vals_out: *mut af_array, keys: af_array, vals: af_array, dim: c_int, ) -> c_int; fn af_any_true_by_key( keys_out: *mut af_array, vals_out: *mut af_array, keys: af_array, vals: af_array, dim: c_int, ) -> c_int; fn af_count_by_key( keys_out: *mut af_array, vals_out: *mut af_array, keys: af_array, vals: af_array, dim: c_int, ) -> c_int; fn af_max_by_key( keys_out: *mut af_array, vals_out: *mut af_array, keys: af_array, vals: af_array, dim: c_int, ) -> c_int; fn af_min_by_key( keys_out: *mut af_array, vals_out: *mut af_array, keys: af_array, vals: af_array, dim: c_int, ) -> c_int; fn af_product_by_key( keys_out: *mut af_array, vals_out: *mut af_array, keys: af_array, vals: af_array, dim: c_int, ) -> c_int; fn af_product_by_key_nan( keys_out: *mut af_array, vals_out: *mut af_array, keys: af_array, vals: af_array, dim: c_int, nan_val: c_double, ) -> c_int; fn af_sum_by_key( keys_out: *mut af_array, vals_out: *mut af_array, keys: af_array, vals: af_array, dim: c_int, ) -> c_int; fn af_sum_by_key_nan( keys_out: *mut af_array, vals_out: *mut af_array, keys: af_array, vals: af_array, dim: c_int, nan_val: c_double, ) -> c_int; fn af_max_ragged( val_out: *mut af_array, idx_out: *mut af_array, input: af_array, ragged_len: af_array, dim: c_int, ) -> c_int; } macro_rules! dim_reduce_func_def { ($doc_str: expr, $fn_name: ident, $ffi_name: ident, $out_type: ty) => { #[doc=$doc_str] pub fn $fn_name<T>(input: &Array<T>, dim: i32) -> Array<$out_type> where T: HasAfEnum, $out_type: HasAfEnum, { unsafe { let mut temp: af_array = std::ptr::null_mut(); let err_val = $ffi_name(&mut temp as *mut af_array, input.get(), dim); HANDLE_ERROR(AfError::from(err_val)); temp.into() } } }; } dim_reduce_func_def!( " Sum elements along a given dimension # Parameters - `input` - Input Array - `dim` - Dimension along which the input Array will be reduced # Return Values Result Array after summing all elements along given dimension # Examples ```rust use arrayfire::{Dim4, print, randu, sum}; let dims = Dim4::new(&[5, 3, 1, 1]); let a = randu::<f32>(dims); print(&a); let b = sum(&a, 0); print(&b); let c = sum(&a, 1); print(&c); ``` ", sum, af_sum, T::AggregateOutType ); dim_reduce_func_def!( " Compute product of elements along a given dimension # Parameters - `input` - Input Array - `dim` - Dimension along which the input Array will be reduced # Return Values Result Array after multiplying all elements along given dimension # Examples ```rust use arrayfire::{Dim4, print, randu, product}; let dims = Dim4::new(&[5, 3, 1, 1]); let a = randu::<f32>(dims); print(&a); let b = product(&a, 0); print(&b); let c = product(&a, 1); print(&c); ``` ", product, af_product, T::ProductOutType ); dim_reduce_func_def!( " Find minimum among elements of given dimension # Parameters - `input` - Input Array - `dim` - Dimension along which the input Array will be reduced # Return Values Result Array after finding minimum among elements along a given dimension # Examples ```rust use arrayfire::{Dim4, print, randu, min}; let dims = Dim4::new(&[5, 3, 1, 1]); let a = randu::<f32>(dims); print(&a); let b = min(&a, 0); print(&b); let c = min(&a, 1); print(&c); ``` ", min, af_min, T::InType ); dim_reduce_func_def!( " Find maximum among elements of given dimension # Parameters - `input` - Input Array - `dim` - Dimension along which the input Array will be reduced # Return Values Result Array after finding maximum among elements along a given dimension # Examples ```rust use arrayfire::{Dim4, print, randu, max}; let dims = Dim4::new(&[5, 3, 1, 1]); let a = randu::<f32>(dims); print(&a); let b = max(&a, 0); print(&b); let c = max(&a, 1); print(&c); ``` ", max, af_max, T::InType ); dim_reduce_func_def!( " Find if all of the values along a given dimension in the Array are true # Parameters - `input` - Input Array - `dim` - Dimension along which the predicate is evaluated # Return Values Result Array that contains the result of `AND` operation of all elements along given dimension # Examples ```rust use arrayfire::{Dim4, print, randu, all_true}; let dims = Dim4::new(&[5, 3, 1, 1]); let a = randu::<f32>(dims); print(&a); let b = all_true(&a, 0); print(&b); let c = all_true(&a, 1); print(&c); ``` ", all_true, af_all_true, bool ); dim_reduce_func_def!( " Find if any of the values along a given dimension in the Array are true # Parameters - `input` - Input Array - `dim` - Dimension along which the predicate is evaluated # Return Values Result Array that contains the result of `OR` operation of all elements along given dimension # Examples ```rust use arrayfire::{Dim4, print, randu, any_true}; let dims = Dim4::new(&[5, 3, 1, 1]); let a = randu::<f32>(dims); print(&a); let b = any_true(&a, 0); print(&b); let c = any_true(&a, 1); print(&c); ``` ", any_true, af_any_true, bool ); dim_reduce_func_def!( " Count number of non-zero elements along a given dimension # Parameters - `input` - Input Array - `dim` - Dimension along which the non-zero elements are counted # Return Values Result Array with number of non-zero elements along a given dimension # Examples ```rust use arrayfire::{Dim4, gt, print, randu, count}; let dims = Dim4::new(&[5, 3, 1, 1]); let cnst: f32 = 0.5; let a = gt(&randu::<f32>(dims), &cnst, false); print(&a); let b = count(&a, 0); print(&b); let c = count(&a, 1); print(&c); ``` ", count, af_count, u32 ); dim_reduce_func_def!( " Perform exclusive sum of elements along a given dimension # Parameters - `input` - Input Array - `dim` - Dimension along which the exclusive scan operation is carried out # Return Values Result Array with exclusive sums of input Array elements along a given dimension # Examples ```rust use arrayfire::{Dim4, print, randu, accum}; let dims = Dim4::new(&[5, 3, 1, 1]); let a = randu::<f32>(dims); print(&a); let b = accum(&a, 0); print(&b); let c = accum(&a, 1); print(&c); ``` ", accum, af_accum, T::AggregateOutType ); dim_reduce_func_def!( " Calculate first order numerical difference along a given dimension # Parameters - `input` - Input Array - `dim` - Dimension along which first order difference is calculated # Return Values Result Array with first order difference values # Examples ```rust use arrayfire::{Dim4, print, randu, diff1}; let dims = Dim4::new(&[5, 3, 1, 1]); let a = randu::<f32>(dims); print(&a); let b = diff1(&a, 0); print(&b); let c = diff1(&a, 1); print(&c); ``` ", diff1, af_diff1, T::InType ); dim_reduce_func_def!( " Calculate second order numerical difference along a given dimension # Parameters - `input` - Input Array - `dim` - Dimension along which second order difference is calculated # Return Values Result Array with second order difference values # Examples ```rust use arrayfire::{Dim4, print, randu, diff2}; let dims = Dim4::new(&[5, 3, 1, 1]); let a = randu::<f32>(dims); print(&a); let b = diff2(&a, 0); print(&b); let c = diff2(&a, 1); print(&c); ``` ", diff2, af_diff2, T::InType ); /// Sum along specific dimension using user specified value instead of `NAN` values /// /// Sum values of the `input` Array along `dim` dimension after replacing any `NAN` values in the /// Array with the value of the parameter `nanval`. /// /// # Parameters /// /// - `input` is the input Array /// - `dim` is reduction dimension /// - `nanval` is value with which all the `NAN` values of Array are replaced with /// /// # Return Values /// /// Array that is reduced along given dimension via addition operation pub fn sum_nan<T>(input: &Array<T>, dim: i32, nanval: f64) -> Array<T::AggregateOutType> where T: HasAfEnum, T::AggregateOutType: HasAfEnum, { unsafe { let mut temp: af_array = std::ptr::null_mut(); let err_val = af_sum_nan(&mut temp as *mut af_array, input.get(), dim, nanval); HANDLE_ERROR(AfError::from(err_val)); temp.into() } } /// Product of elements along specific dimension using user specified value instead of `NAN` values /// /// Compute product of the values of the `input` Array along `dim` dimension after replacing any `NAN` values in the Array with `nanval` value. /// /// # Parameters /// /// - `input` is the input Array /// - `dim` is reduction dimension /// - `nanval` is value with which all the `NAN` values of Array are replaced with /// /// # Return Values /// /// Array that is reduced along given dimension via multiplication operation pub fn product_nan<T>(input: &Array<T>, dim: i32, nanval: f64) -> Array<T::ProductOutType> where T: HasAfEnum, T::ProductOutType: HasAfEnum, { unsafe { let mut temp: af_array = std::ptr::null_mut(); let err_val = af_product_nan(&mut temp as *mut af_array, input.get(), dim, nanval); HANDLE_ERROR(AfError::from(err_val)); temp.into() } } macro_rules! all_reduce_func_def { ($doc_str: expr, $fn_name: ident, $ffi_name: ident, $assoc_type:ident) => { #[doc=$doc_str] pub fn $fn_name<T>( input: &Array<T>, ) -> ( <<T as HasAfEnum>::$assoc_type as HasAfEnum>::BaseType, <<T as HasAfEnum>::$assoc_type as HasAfEnum>::BaseType, ) where T: HasAfEnum, <T as HasAfEnum>::$assoc_type: HasAfEnum, <<T as HasAfEnum>::$assoc_type as HasAfEnum>::BaseType: HasAfEnum + Fromf64, { let mut real: f64 = 0.0; let mut imag: f64 = 0.0; unsafe { let err_val = $ffi_name( &mut real as *mut c_double, &mut imag as *mut c_double, input.get(), ); HANDLE_ERROR(AfError::from(err_val)); } ( <<T as HasAfEnum>::$assoc_type as HasAfEnum>::BaseType::fromf64(real), <<T as HasAfEnum>::$assoc_type as HasAfEnum>::BaseType::fromf64(imag), ) } }; } all_reduce_func_def!( " Sum all values of the Array # Parameters - `input` is the input Array # Return Values A tuple containing the summation result. Note: For non-complex data type Arrays, second value of tuple is zero. # Examples ```rust use arrayfire::{Dim4, print, randu, sum_all}; let dims = Dim4::new(&[5, 5, 1, 1]); let a = randu::<f32>(dims); print(&a); println!(\"Result : {:?}\", sum_all(&a)); ``` ", sum_all, af_sum_all, AggregateOutType ); all_reduce_func_def!( " Product of all values of the Array # Parameters - `input` is the input Array # Return Values A tuple containing the product result. Note: For non-complex data type Arrays, second value of tuple is zero. # Examples ```rust use arrayfire::{Dim4, print, randu, product_all}; let dims = Dim4::new(&[5, 5, 1, 1]); let a = randu::<f32>(dims); print(&a); let res = product_all(&a); println!(\"Result : {:?}\", res); ``` ", product_all, af_product_all, ProductOutType ); all_reduce_func_def!( " Find minimum among all values of the Array # Parameters - `input` is the input Array # Return Values A tuple containing the minimum value. Note: For non-complex data type Arrays, second value of tuple is zero. # Examples ```rust use arrayfire::{Dim4, print, randu, min_all}; let dims = Dim4::new(&[5, 5, 1, 1]); let a = randu::<f32>(dims); print(&a); println!(\"Result : {:?}\", min_all(&a)); ``` ", min_all, af_min_all, InType ); all_reduce_func_def!( " Find maximum among all values of the Array # Parameters - `input` is the input Array # Return Values A tuple containing the maximum value. Note: For non-complex data type Arrays, second value of tuple is zero. # Examples ```rust use arrayfire::{Dim4, print, randu, max_all}; let dims = Dim4::new(&[5, 5, 1, 1]); let a = randu::<f32>(dims); print(&a); println!(\"Result : {:?}\", max_all(&a)); ``` ", max_all, af_max_all, InType ); macro_rules! all_reduce_func_def2 { ($doc_str: expr, $fn_name: ident, $ffi_name: ident, $out_type:ty) => { #[doc=$doc_str] pub fn $fn_name<T>(input: &Array<T>) -> ($out_type, $out_type) where T: HasAfEnum, $out_type: HasAfEnum + Fromf64, { let mut real: f64 = 0.0; let mut imag: f64 = 0.0; unsafe { let err_val = $ffi_name( &mut real as *mut c_double, &mut imag as *mut c_double, input.get(), ); HANDLE_ERROR(AfError::from(err_val)); } (<$out_type>::fromf64(real), <$out_type>::fromf64(imag)) } }; } all_reduce_func_def2!( " Find if all values of Array are non-zero # Parameters - `input` is the input Array # Return Values A tuple containing the result of `AND` operation on all values of Array. # Examples ```rust use arrayfire::{Dim4, print, randu, all_true_all}; let dims = Dim4::new(&[5, 5, 1, 1]); let a = randu::<f32>(dims); print(&a); println!(\"Result : {:?}\", all_true_all(&a)); ``` ", all_true_all, af_all_true_all, bool ); all_reduce_func_def2!( " Find if any value of Array is non-zero # Parameters - `input` is the input Array # Return Values A tuple containing the result of `OR` operation on all values of Array. # Examples ```rust use arrayfire::{Dim4, print, randu, any_true_all}; let dims = Dim4::new(&[5, 5, 1, 1]); let a = randu::<f32>(dims); print(&a); println!(\"Result : {:?}\", any_true_all(&a)); ``` ", any_true_all, af_any_true_all, bool ); all_reduce_func_def2!( " Count number of non-zero values in the Array # Parameters - `input` is the input Array # Return Values A tuple containing the count of non-zero values in the Array. # Examples ```rust use arrayfire::{Dim4, print, randu, count_all}; let dims = Dim4::new(&[5, 5, 1, 1]); let a = randu::<f32>(dims); print(&a); println!(\"Result : {:?}\", count_all(&a)); ``` ", count_all, af_count_all, u64 ); /// Sum all values using user provided value for `NAN` /// /// Sum all the values of the `input` Array after replacing any `NAN` values with `val`. /// /// # Parameters /// /// - `input` is the input Array /// - `val` is the val that replaces all `NAN` values of the Array before reduction operation is /// performed. /// /// # Return Values /// /// A tuple of summation result. /// /// Note: For non-complex data type Arrays, second value of tuple is zero. pub fn sum_nan_all<T>( input: &Array<T>, val: f64, ) -> ( <<T as HasAfEnum>::AggregateOutType as HasAfEnum>::BaseType, <<T as HasAfEnum>::AggregateOutType as HasAfEnum>::BaseType, ) where T: HasAfEnum, <T as HasAfEnum>::AggregateOutType: HasAfEnum, <<T as HasAfEnum>::AggregateOutType as HasAfEnum>::BaseType: HasAfEnum + Fromf64, { let mut real: f64 = 0.0; let mut imag: f64 = 0.0; unsafe { let err_val = af_sum_nan_all( &mut real as *mut c_double, &mut imag as *mut c_double, input.get(), val, ); HANDLE_ERROR(AfError::from(err_val)); } ( <<T as HasAfEnum>::AggregateOutType as HasAfEnum>::BaseType::fromf64(real), <<T as HasAfEnum>::AggregateOutType as HasAfEnum>::BaseType::fromf64(imag), ) } /// Product of all values using user provided value for `NAN` /// /// Compute the product of all the values of the `input` Array after replacing any `NAN` values with `val` /// /// # Parameters /// /// - `input` is the input Array /// - `val` is the val that replaces all `NAN` values of the Array before reduction operation is /// performed. /// /// # Return Values /// /// A tuple of product result. /// /// Note: For non-complex data type Arrays, second value of tuple is zero. pub fn product_nan_all<T>( input: &Array<T>, val: f64, ) -> ( <<T as HasAfEnum>::ProductOutType as HasAfEnum>::BaseType, <<T as HasAfEnum>::ProductOutType as HasAfEnum>::BaseType, ) where T: HasAfEnum, <T as HasAfEnum>::ProductOutType: HasAfEnum, <<T as HasAfEnum>::ProductOutType as HasAfEnum>::BaseType: HasAfEnum + Fromf64,<|fim▁hole|>{ let mut real: f64 = 0.0; let mut imag: f64 = 0.0; unsafe { let err_val = af_product_nan_all( &mut real as *mut c_double, &mut imag as *mut c_double, input.get(), val, ); HANDLE_ERROR(AfError::from(err_val)); } ( <<T as HasAfEnum>::ProductOutType as HasAfEnum>::BaseType::fromf64(real), <<T as HasAfEnum>::ProductOutType as HasAfEnum>::BaseType::fromf64(imag), ) } macro_rules! dim_ireduce_func_def { ($doc_str: expr, $fn_name: ident, $ffi_name: ident, $out_type: ident) => { #[doc=$doc_str] pub fn $fn_name<T>(input: &Array<T>, dim: i32) -> (Array<T::$out_type>, Array<u32>) where T: HasAfEnum, T::$out_type: HasAfEnum, { unsafe { let mut temp: af_array = std::ptr::null_mut(); let mut idx: af_array = std::ptr::null_mut(); let err_val = $ffi_name( &mut temp as *mut af_array, &mut idx as *mut af_array, input.get(), dim, ); HANDLE_ERROR(AfError::from(err_val)); (temp.into(), idx.into()) } } }; } dim_ireduce_func_def!(" Find minimum value along given dimension and their corresponding indices # Parameters - `input` - Input Array - `dim` - Dimension along which the input Array will be reduced # Return Values A tuple of Arrays: Array minimum values and Array containing their index along the reduced dimension. ", imin, af_imin, InType); dim_ireduce_func_def!(" Find maximum value along given dimension and their corresponding indices # Parameters - `input` - Input Array - `dim` - Dimension along which the input Array will be reduced # Return Values A tuple of Arrays: Array maximum values and Array containing their index along the reduced dimension. ", imax, af_imax, InType); macro_rules! all_ireduce_func_def { ($doc_str: expr, $fn_name: ident, $ffi_name: ident, $assoc_type:ident) => { #[doc=$doc_str] pub fn $fn_name<T>( input: &Array<T>, ) -> ( <<T as HasAfEnum>::$assoc_type as HasAfEnum>::BaseType, <<T as HasAfEnum>::$assoc_type as HasAfEnum>::BaseType, u32, ) where T: HasAfEnum, <T as HasAfEnum>::$assoc_type: HasAfEnum, <<T as HasAfEnum>::$assoc_type as HasAfEnum>::BaseType: HasAfEnum + Fromf64, { let mut real: f64 = 0.0; let mut imag: f64 = 0.0; let mut temp: u32 = 0; unsafe { let err_val = $ffi_name( &mut real as *mut c_double, &mut imag as *mut c_double, &mut temp as *mut c_uint, input.get(), ); HANDLE_ERROR(AfError::from(err_val)); } ( <<T as HasAfEnum>::$assoc_type as HasAfEnum>::BaseType::fromf64(real), <<T as HasAfEnum>::$assoc_type as HasAfEnum>::BaseType::fromf64(imag), temp, ) } }; } all_ireduce_func_def!( " Find minimum and it's index in the whole Array # Parameters `input` - Input Array # Return Values A triplet with * minimum element of Array in the first component. * second component of value zero if Array is of non-complex type. * index of minimum element in the third component. ", imin_all, af_imin_all, InType ); all_ireduce_func_def!( " Find maximum and it's index in the whole Array # Parameters `input` - Input Array # Return Values A triplet with - maximum element of Array in the first component. - second component of value zero if Array is of non-complex type. - index of maximum element in the third component. ", imax_all, af_imax_all, InType ); /// Locate the indices of non-zero elements. /// /// The locations are provided by flattening the input into a linear array. /// /// # Parameters /// /// - `input` - Input Array /// /// # Return Values /// /// Array of indices where the input Array has non-zero values. pub fn locate<T: HasAfEnum>(input: &Array<T>) -> Array<u32> { unsafe { let mut temp: af_array = std::ptr::null_mut(); let err_val = af_where(&mut temp as *mut af_array, input.get()); HANDLE_ERROR(AfError::from(err_val)); temp.into() } } /// Sort the values in input Arrays /// /// Sort an multidimensional Array along a given dimension /// /// # Parameters /// /// - `input` - Input Array /// - `dim` - Dimension along which to sort /// - `ascending` - Sorted output will have ascending values if /// ```True``` and descending order otherwise. /// /// # Return Values /// /// Sorted Array. pub fn sort<T>(input: &Array<T>, dim: u32, ascending: bool) -> Array<T> where T: HasAfEnum + RealNumber, { unsafe { let mut temp: af_array = std::ptr::null_mut(); let err_val = af_sort(&mut temp as *mut af_array, input.get(), dim, ascending); HANDLE_ERROR(AfError::from(err_val)); temp.into() } } /// Sort the values in input Arrays /// /// # Parameters /// /// - `input` - Input Array /// - `dim` - Dimension along which to sort /// - `ascending` - Sorted output will have ascending values if /// ```True``` and descending order otherwise. /// /// # Return Values /// /// A tuple of Arrays. /// /// The first Array contains the keys based on sorted values. /// /// The second Array contains the original indices of the sorted values. pub fn sort_index<T>(input: &Array<T>, dim: u32, ascending: bool) -> (Array<T>, Array<u32>) where T: HasAfEnum + RealNumber, { unsafe { let mut temp: af_array = std::ptr::null_mut(); let mut idx: af_array = std::ptr::null_mut(); let err_val = af_sort_index( &mut temp as *mut af_array, &mut idx as *mut af_array, input.get(), dim, ascending, ); HANDLE_ERROR(AfError::from(err_val)); (temp.into(), idx.into()) } } /// Sort the values in input Arrays /// /// Sort an multidimensional Array based on keys /// /// # Parameters /// /// - `keys` - Array with key values /// - `vals` - Array with input values /// - `dim` - Dimension along which to sort /// - `ascending` - Sorted output will have ascending values if ```True``` and descending order otherwise. /// /// # Return Values /// /// A tuple of Arrays. /// /// The first Array contains the keys based on sorted values. /// /// The second Array contains the sorted values. pub fn sort_by_key<K, V>( keys: &Array<K>, vals: &Array<V>, dim: u32, ascending: bool, ) -> (Array<K>, Array<V>) where K: HasAfEnum + RealNumber, V: HasAfEnum, { unsafe { let mut temp: af_array = std::ptr::null_mut(); let mut temp2: af_array = std::ptr::null_mut(); let err_val = af_sort_by_key( &mut temp as *mut af_array, &mut temp2 as *mut af_array, keys.get(), vals.get(), dim, ascending, ); HANDLE_ERROR(AfError::from(err_val)); (temp.into(), temp2.into()) } } /// Find unique values from a Set /// /// # Parameters /// /// - `input` - Input Array /// - `is_sorted` - is a boolean variable. If ```True`` /// indicates, the `input` Array is sorted. /// /// # Return Values /// /// An Array of unique values from the input Array. pub fn set_unique<T>(input: &Array<T>, is_sorted: bool) -> Array<T> where T: HasAfEnum + RealNumber, { unsafe { let mut temp: af_array = std::ptr::null_mut(); let err_val = af_set_unique(&mut temp as *mut af_array, input.get(), is_sorted); HANDLE_ERROR(AfError::from(err_val)); temp.into() } } /// Find union of two sets /// /// # Parameters /// /// - `first` is one of the input sets /// - `second` is the other of the input sets /// - `is_unique` is a boolean value indicates if the input sets are unique /// /// # Return Values /// /// An Array with union of the input sets pub fn set_union<T>(first: &Array<T>, second: &Array<T>, is_unique: bool) -> Array<T> where T: HasAfEnum + RealNumber, { unsafe { let mut temp: af_array = std::ptr::null_mut(); let err_val = af_set_union( &mut temp as *mut af_array, first.get(), second.get(), is_unique, ); HANDLE_ERROR(AfError::from(err_val)); temp.into() } } /// Find intersection of two sets /// /// # Parameters /// /// - `first` is one of the input sets /// - `second` is the other of the input sets /// - `is_unique` is a boolean value indicates if the input sets are unique /// /// # Return Values /// /// An Array with intersection of the input sets pub fn set_intersect<T>(first: &Array<T>, second: &Array<T>, is_unique: bool) -> Array<T> where T: HasAfEnum + RealNumber, { unsafe { let mut temp: af_array = std::ptr::null_mut(); let err_val = af_set_intersect( &mut temp as *mut af_array, first.get(), second.get(), is_unique, ); HANDLE_ERROR(AfError::from(err_val)); temp.into() } } /// Generalized scan /// /// # Parameters /// /// - `input` is the data on which scan is to be performed /// - `dim` is the dimension along which scan operation is to be performed /// - `op` takes value of [BinaryOp](./enum.BinaryOp.html) enum indicating /// the type of scan operation /// - `inclusive` says if inclusive/exclusive scan is to be performed /// /// # Return Values /// /// Output Array of scanned input pub fn scan<T>( input: &Array<T>, dim: i32, op: BinaryOp, inclusive: bool, ) -> Array<T::AggregateOutType> where T: HasAfEnum, T::AggregateOutType: HasAfEnum, { unsafe { let mut temp: af_array = std::ptr::null_mut(); let err_val = af_scan( &mut temp as *mut af_array, input.get(), dim, op as u32, inclusive, ); HANDLE_ERROR(AfError::from(err_val)); temp.into() } } /// Generalized scan by key /// /// # Parameters /// /// - `key` is the key Array /// - `input` is the data on which scan is to be performed /// - `dim` is the dimension along which scan operation is to be performed /// - `op` takes value of [BinaryOp](./enum.BinaryOp.html) enum indicating /// the type of scan operation /// - `inclusive` says if inclusive/exclusive scan is to be performed /// /// # Return Values /// /// Output Array of scanned input pub fn scan_by_key<K, V>( key: &Array<K>, input: &Array<V>, dim: i32, op: BinaryOp, inclusive: bool, ) -> Array<V::AggregateOutType> where V: HasAfEnum, V::AggregateOutType: HasAfEnum, K: HasAfEnum + Scanable, { unsafe { let mut temp: af_array = std::ptr::null_mut(); let err_val = af_scan_by_key( &mut temp as *mut af_array, key.get(), input.get(), dim, op as u32, inclusive, ); HANDLE_ERROR(AfError::from(err_val)); temp.into() } } macro_rules! dim_reduce_by_key_func_def { ($brief_str: expr, $ex_str: expr, $fn_name: ident, $ffi_name: ident, $out_type: ty) => { #[doc=$brief_str] /// # Parameters /// /// - `keys` - key Array /// - `vals` - value Array /// - `dim` - Dimension along which the input Array is reduced /// /// # Return Values /// /// Tuple of Arrays, with output keys and values after reduction /// #[doc=$ex_str] pub fn $fn_name<KeyType, ValueType>( keys: &Array<KeyType>, vals: &Array<ValueType>, dim: i32, ) -> (Array<KeyType>, Array<$out_type>) where KeyType: ReduceByKeyInput, ValueType: HasAfEnum, $out_type: HasAfEnum, { unsafe { let mut out_keys: af_array = std::ptr::null_mut(); let mut out_vals: af_array = std::ptr::null_mut(); let err_val = $ffi_name( &mut out_keys as *mut af_array, &mut out_vals as *mut af_array, keys.get(), vals.get(), dim, ); HANDLE_ERROR(AfError::from(err_val)); (out_keys.into(), out_vals.into()) } } }; } dim_reduce_by_key_func_def!( " Key based AND of elements along a given dimension All positive non-zero values are considered true, while negative and zero values are considered as false. ", " # Examples ```rust use arrayfire::{Dim4, print, randu, all_true_by_key}; let dims = Dim4::new(&[5, 3, 1, 1]); let vals = randu::<f32>(dims); let keys = randu::<u32>(Dim4::new(&[5, 1, 1, 1])); print(&vals); print(&keys); let (out_keys, out_vals) = all_true_by_key(&keys, &vals, 0); print(&out_keys); print(&out_vals); ``` ", all_true_by_key, af_all_true_by_key, ValueType::AggregateOutType ); dim_reduce_by_key_func_def!( " Key based OR of elements along a given dimension All positive non-zero values are considered true, while negative and zero values are considered as false. ", " # Examples ```rust use arrayfire::{Dim4, print, randu, any_true_by_key}; let dims = Dim4::new(&[5, 3, 1, 1]); let vals = randu::<f32>(dims); let keys = randu::<u32>(Dim4::new(&[5, 1, 1, 1])); print(&vals); print(&keys); let (out_keys, out_vals) = any_true_by_key(&keys, &vals, 0); print(&out_keys); print(&out_vals); ``` ", any_true_by_key, af_any_true_by_key, ValueType::AggregateOutType ); dim_reduce_by_key_func_def!( "Find total count of elements with similar keys along a given dimension", "", count_by_key, af_count_by_key, ValueType::AggregateOutType ); dim_reduce_by_key_func_def!( "Find maximum among values of similar keys along a given dimension", "", max_by_key, af_max_by_key, ValueType::AggregateOutType ); dim_reduce_by_key_func_def!( "Find minimum among values of similar keys along a given dimension", "", min_by_key, af_min_by_key, ValueType::AggregateOutType ); dim_reduce_by_key_func_def!( "Find product of all values with similar keys along a given dimension", "", product_by_key, af_product_by_key, ValueType::ProductOutType ); dim_reduce_by_key_func_def!( "Find sum of all values with similar keys along a given dimension", "", sum_by_key, af_sum_by_key, ValueType::AggregateOutType ); macro_rules! dim_reduce_by_key_nan_func_def { ($brief_str: expr, $ex_str: expr, $fn_name: ident, $ffi_name: ident, $out_type: ty) => { #[doc=$brief_str] /// /// This version of sum by key can replaced all NaN values in the input /// with a user provided value before performing the reduction operation. /// # Parameters /// /// - `keys` - key Array /// - `vals` - value Array /// - `dim` - Dimension along which the input Array is reduced /// /// # Return Values /// /// Tuple of Arrays, with output keys and values after reduction /// #[doc=$ex_str] pub fn $fn_name<KeyType, ValueType>( keys: &Array<KeyType>, vals: &Array<ValueType>, dim: i32, replace_value: f64, ) -> (Array<KeyType>, Array<$out_type>) where KeyType: ReduceByKeyInput, ValueType: HasAfEnum, $out_type: HasAfEnum, { unsafe { let mut out_keys: af_array = std::ptr::null_mut(); let mut out_vals: af_array = std::ptr::null_mut(); let err_val = $ffi_name( &mut out_keys as *mut af_array, &mut out_vals as *mut af_array, keys.get(), vals.get(), dim, replace_value, ); HANDLE_ERROR(AfError::from(err_val)); (out_keys.into(), out_vals.into()) } } }; } dim_reduce_by_key_nan_func_def!( "Compute sum of all values with similar keys along a given dimension", "", sum_by_key_nan, af_sum_by_key_nan, ValueType::AggregateOutType ); dim_reduce_by_key_nan_func_def!( "Compute product of all values with similar keys along a given dimension", "", product_by_key_nan, af_product_by_key_nan, ValueType::ProductOutType ); /// Max reduction along given axis as per ragged lengths provided /// /// # Parameters /// /// - `input` contains the input values to be reduced /// - `ragged_len` array containing number of elements to use when reducing along `dim` /// - `dim` is the dimension along which the max operation occurs /// /// # Return Values /// /// Tuple of Arrays: /// - First element: An Array containing the maximum ragged values in `input` along `dim` /// according to `ragged_len` /// - Second Element: An Array containing the locations of the maximum ragged values in /// `input` along `dim` according to `ragged_len` /// /// # Examples /// ```rust /// use arrayfire::{Array, dim4, print, randu, max_ragged}; /// let vals: [f32; 6] = [1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0]; /// let rlens: [u32; 2] = [9, 2]; /// let varr = Array::new(&vals, dim4![3, 2]); /// let rarr = Array::new(&rlens, dim4![1, 2]); /// print(&varr); /// // 1 4 /// // 2 5 /// // 3 6 /// print(&rarr); // numbers of elements to participate in reduction along given axis /// // 9 2 /// let (out, idx) = max_ragged(&varr, &rarr, 0); /// print(&out); /// // 3 5 /// print(&idx); /// // 2 1 //Since 3 is max element for given length 9 along first column /// //Since 5 is max element for given length 2 along second column /// ``` pub fn max_ragged<T>( input: &Array<T>, ragged_len: &Array<u32>, dim: i32, ) -> (Array<T::InType>, Array<u32>) where T: HasAfEnum, T::InType: HasAfEnum, { unsafe { let mut out_vals: af_array = std::ptr::null_mut(); let mut out_idxs: af_array = std::ptr::null_mut(); let err_val = af_max_ragged( &mut out_vals as *mut af_array, &mut out_idxs as *mut af_array, input.get(), ragged_len.get(), dim, ); HANDLE_ERROR(AfError::from(err_val)); (out_vals.into(), out_idxs.into()) } } #[cfg(test)] mod tests { use super::super::core::c32; use super::{imax_all, imin_all, product_nan_all, sum_all, sum_nan_all}; use crate::core::set_device; use crate::randu; #[test] fn all_reduce_api() { set_device(0); let a = randu!(c32; 10, 10); println!("Reduction of complex f32 matrix: {:?}", sum_all(&a)); let b = randu!(bool; 10, 10); println!("reduction of bool matrix: {:?}", sum_all(&b)); println!( "reduction of complex f32 matrix after replacing nan with 1.0: {:?}", product_nan_all(&a, 1.0) ); println!( "reduction of bool matrix after replacing nan with 0.0: {:?}", sum_nan_all(&b, 0.0) ); } #[test] fn all_ireduce_api() { set_device(0); let a = randu!(c32; 10); println!("Reduction of complex f32 matrix: {:?}", imin_all(&a)); let b = randu!(u32; 10); println!("reduction of bool matrix: {:?}", imax_all(&b)); } }<|fim▁end|>
<|file_name|>svg.mako.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public<|fim▁hole|> <%namespace name="helpers" file="/helpers.mako.rs" /> <% data.new_style_struct("SVG", inherited=False, gecko_name="SVGReset") %> ${helpers.single_keyword("dominant-baseline", """auto use-script no-change reset-size ideographic alphabetic hanging mathematical central middle text-after-edge text-before-edge""", products="gecko")} ${helpers.single_keyword("vector-effect", "none non-scaling-stroke", products="gecko")} // Section 13 - Gradients and Patterns ${helpers.predefined_type("stop-opacity", "Opacity", "1.0", products="gecko")} // Section 15 - Filter Effects ${helpers.predefined_type("flood-opacity", "Opacity", "1.0", products="gecko")} // CSS Masking Module Level 1 // https://www.w3.org/TR/css-masking-1/ ${helpers.single_keyword("mask-type", "luminance alpha", products="gecko")}<|fim▁end|>
* License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<|file_name|>apps.py<|end_file_name|><|fim▁begin|>from django.apps import AppConfig from django.utils.translation import ugettext_lazy as _<|fim▁hole|> class AuthConfig(AppConfig): name = 'esia_auth' verbose_name = _('Esia META')<|fim▁end|>
<|file_name|>test_graph.py<|end_file_name|><|fim▁begin|>import pytest # TODO: use same globals for reverse operations such as add, remove GRAPHS = [ ({}, [], []), ({'nodeA': {}}, ['nodeA'], []), ({'nodeA': {'nodeB': 'weight'}, 'nodeB': {}}, ['nodeA', 'nodeB'], [('nodeA', 'nodeB')]), ({'nodeA': {'nodeB': 'weight'}, 'nodeB': {'nodeA': 'weight'}}, ['nodeA', 'nodeB'], [('nodeA', 'nodeB'), ('nodeB', 'nodeA')]), ({'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'}, 'nodeB': {'nodeA': 'weight'}, 'nodeC': {'nodeA': 'weight', 'nodeC': 'weight'}}, ['nodeA', 'nodeB', 'nodeC'], [('nodeA', 'nodeB'), ('nodeA', 'nodeC'), ('nodeB', 'nodeA'), ('nodeC', 'nodeA'), ('nodeC', 'nodeC')]), ] GRAPHS_FOR_NODE_INSERT = [ ({}, 'nodeN', {'nodeN': {}}), ({'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'}}, 'nodeN', {'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'}, 'nodeN': {}}), ({'nodeA': {'nodeA': 'weight', 'nodeB': 'weight'}, 'nodeB': {'nodeC': 'weight', 'nodeA': 'weight'}}, 'nodeN', {'nodeA': {'nodeA': 'weight', 'nodeB': 'weight'}, 'nodeB': {'nodeC': 'weight', 'nodeA': 'weight'}, 'nodeN': {}}), ] GRAPHS_ADD_EDGE = [ ({'nodeA': {'nodeB': 'weight'}, 'nodeB': {'nodeA': 'weight'}}, "nodeX", "nodeY", {'nodeA': {'nodeB': 'weight'}, 'nodeB': {'nodeA': 'weight'}, 'nodeX': {'nodeY': 'weight'}, 'nodeY': {}}), ({'nodeA': {'nodeB': 'weight'}, 'nodeB': {'nodeA': 'weight'}}, 'nodeA', 'nodeB', {'nodeA': {'nodeB': 'weight'}, 'nodeB': {'nodeA': 'weight'}}), ({'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'}, 'nodeB': {'nodeA': 'weight'}, 'nodeC': {'nodeA': 'weight', 'nodeC': 'weight'}}, 'nodeB', 'nodeC', {'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'}, 'nodeB': {'nodeA': 'weight', 'nodeC': 'weight'}, 'nodeC': {'nodeA': 'weight', 'nodeC': 'weight'}}), ] GRAPHS_DEL_NODE = [ ({'nodeA': {'nodeB': 'weight'}, 'nodeB': {'nodeA': 'weight'}, 'nodeX': {'nodeY': 'weight'}, 'nodeY': {}}, 'nodeA', {'nodeB': {}, 'nodeX': {'nodeY': 'weight'}, 'nodeY': {}}), ({'nodeA': {'nodeB': 'weight'}, 'nodeB': {'nodeA': 'weight'}}, 'nodeB', {'nodeA': {}}), ] GRAPHS_DEL_EDGE = [ ({'nodeA': {'nodeB': 'weight'}, 'nodeB': {}}, 'nodeA', 'nodeB', {'nodeA': {}, 'nodeB': {}}), ({'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'}, 'nodeB': {}, 'nodeC': {}}, 'nodeA', 'nodeB', {'nodeA': {'nodeC': 'weight'}, 'nodeB': {}, 'nodeC': {}}) ] NEIGHBORS = [ ({'nodeA': {}, 'nodeB': {'nodeA': 'weight'}}, 'nodeB', ['nodeA']), ({'nodeA': {}, 'nodeB': {'nodeA': 'weight'}}, 'nodeA', []), ({'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'}, 'nodeB': {'nodeA': 'weight'}, 'nodeC': {'nodeA': 'weight'}}, 'nodeA', ['nodeB', 'nodeC']), ] ADJACENT = [ ({'nodeA': {'nodeB': 'weight'}, 'nodeB': {}}, 'nodeA', 'nodeB', True), ({'nodeA': {'nodeB': 'weight'}, 'nodeB': {}}, 'nodeB', 'nodeA', False), ] ADJACENT_NODES_GONE = [ ({'nodeA': {'nodeB': 'weight'}, 'nodeB': {}}, 'nodeX', 'nodeB'), ({'nodeA': {'nodeB': 'weight'}, 'nodeB': {}}, 'nodeX', 'nodeY'), ({'nodeA': {'nodeB': 'weight'}, 'nodeB': {}}, 'nodeA', 'nodeY'), ] NODE_TRAVERSAL_BREADTH = [ ({'A': {'B': 'weight', 'C': 'weight'}, 'B': {'A': 'weight', 'D': 'weight', 'E': 'weight'}, 'C': {'A': 'weight', 'F': 'weight', 'G': 'weight'}, 'D': {'B': 'weight', 'H': 'weight'}, 'E': {'B': 'weight'}, 'F': {'C': 'weight'}, 'G': {'C': 'weight'}, 'H': {'D': 'weight'}}, 'A', ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']), ({'A': {'B': 'weight', 'C': 'weight'}, 'B': {'C': 'weight', 'D': 'weight'}, 'C': {}, 'D': {}}, 'A', ['A', 'B', 'C', 'D']), ({'a': {}}, 'a', ['a']), ] NODE_TRAVERSAL_DEPTH = [ ({'A': {'B': 'weight', 'E': 'weight'}, "B": {'C': 'weight', 'D': 'weight'}, 'E': {}, 'C': {}, 'D': {}}, 'A', ['A', 'E', 'B', 'D', 'C']), ({'A': {'B': 'weight', 'E': 'weight'}, "B": {'C': 'weight', 'D': 'weight'}, 'E': {}, 'C': {'A': 'weight', 'E': 'weight'}, 'D': {}}, 'A', ['A', 'E', 'B', 'D', 'C']), ({'a': {'b': 'weight', 'g': 'weight'}, 'b': {'c': 'weight'}, 'g': {'h': 'weight', 'j': 'weight'}, 'c': {'d': 'weight'}, 'h': {'i': 'weight'}, 'j': {'k': 'weight'}, 'd': {'e': 'weight', 'f': 'weight'}, 'i': {}, 'k': {}, 'e': {}, 'f': {}}, 'a', ['a', 'g', 'j', 'k', 'h', 'i', 'b', 'c', 'd', 'f', 'e']), ({'a': {}}, 'a', ['a']), ] GET_WEIGHT = [ ({'A': {'B': 'weight1', 'E': 'weight2'}, "B": {'C': 'weight3', 'D': 'weight4'}, 'E': {}, 'C': {}, 'D': {}}, 'A', 'B', 'weight1',), ({'A': {'B': 'weight1', 'E': 'weight2'}, "B": {'C': 'weight3', 'D': 'weight4'}, 'E': {}, 'C': {}, 'D': {}}, 'B', 'C', 'weight3',), ({'A': {'B': 'weight1', 'E': 'weight2'}, "B": {'C': 'weight3', 'D': 'weight4'}, 'E': {}, 'C': {},<|fim▁hole|> 'B', 'D', 'weight4',), ] @pytest.fixture def graph_fixture(scope='function'): from graph import Graph return Graph() @pytest.mark.parametrize(("built_graph", "node", "expected"), GRAPHS_DEL_NODE) def test_del_node_exists(graph_fixture, built_graph, node, expected): graph_fixture._container = built_graph graph_fixture.del_node(node) assert graph_fixture._container == expected @pytest.mark.parametrize(("built_graph", "node_list", "edge_list"), GRAPHS) def test_nodes(graph_fixture, built_graph, node_list, edge_list): graph_fixture._container = built_graph result = graph_fixture.nodes() assert set(result) == set(node_list) @pytest.mark.parametrize(("built_graph", "node_list", "edge_list"), GRAPHS) def test_edges(graph_fixture, built_graph, node_list, edge_list): graph_fixture._container = built_graph result = graph_fixture.edges() assert set(edge_list) == set(result) @pytest.mark.parametrize(("built_graph", "new_node", "expected"), GRAPHS_FOR_NODE_INSERT) def test_add_node(graph_fixture, built_graph, new_node, expected): graph_fixture._container = built_graph graph_fixture.add_node(new_node) assert graph_fixture._container == expected @pytest.mark.parametrize(("built_graph", "n1", "n2", "expected"), GRAPHS_ADD_EDGE) def test_add_edge(graph_fixture, built_graph, n1, n2, expected): graph_fixture._container = built_graph graph_fixture.add_edge(n1, n2) assert graph_fixture._container == expected def test_del_node_not_exists(graph_fixture): graph_fixture._container = {'nodeA': {'nodeA': 'weight'}, 'nodeB': {}} with pytest.raises(KeyError): graph_fixture.del_node('nodeX') @pytest.mark.parametrize(("built_graph", "node1", "node2", "expected"), GRAPHS_DEL_EDGE) def test_del_edge(graph_fixture, built_graph, node1, node2, expected): graph_fixture._container = built_graph graph_fixture.del_edge(node1, node2) assert graph_fixture._container == expected def test_del_edge_not_exists(graph_fixture): graph_fixture._container = {'nodeA': {}} with pytest.raises(ValueError): graph_fixture.del_edge('nodeA', 'nodeB') def test_has_node_true(graph_fixture): graph_fixture._container = {'nodeA': {}} assert graph_fixture.has_node('nodeA') def test_has_node_false(graph_fixture): graph_fixture._container = {'nodeA': {}} assert not graph_fixture.has_node('nodeB') @pytest.mark.parametrize(("built_graph", 'node', 'expected'), NEIGHBORS) def test_neighbors(graph_fixture, built_graph, node, expected): graph_fixture._container = built_graph assert set(graph_fixture.neighbors(node)) == set(expected) def test_neighbors_none(graph_fixture): graph_fixture._container = {'nodeA': {}} with pytest.raises(KeyError): graph_fixture.neighbors('nodeB') @pytest.mark.parametrize(('built_graph', 'n1', 'n2', 'expected'), ADJACENT) def test_adjacent(graph_fixture, built_graph, n1, n2, expected): # if n1, n2 don't exist: raise error graph_fixture._container = built_graph assert graph_fixture.adjacent(n1, n2) == expected @pytest.mark.parametrize(('built_graph', 'n1', 'n2'), ADJACENT_NODES_GONE) def test_adjacent_not_exists(graph_fixture, built_graph, n1, n2): # if n1, n2 don't exist: raise error graph_fixture._container = built_graph with pytest.raises(KeyError): graph_fixture.adjacent(n1, n2) @pytest.mark.parametrize(('built_graph', 'node', 'expected'), NODE_TRAVERSAL_BREADTH) def test_traverse_breadth(graph_fixture, built_graph, node, expected): graph_fixture._container = built_graph assert graph_fixture.breadth_first_traversal(node) == expected def test_empty_graph_breadth(graph_fixture): graph_fixture._container = {} with pytest.raises(IndexError): graph_fixture.breadth_first_traversal('X') @pytest.mark.parametrize(('built_graph', 'node', 'expected'), NODE_TRAVERSAL_DEPTH) def test_traverse_depth(graph_fixture, built_graph, node, expected): graph_fixture._container = built_graph assert graph_fixture.depth_first_traversal(node) == expected def test_traverse_depth_empty(graph_fixture): graph_fixture._container = {} with pytest.raises(IndexError): graph_fixture.depth_first_traversal('node') @pytest.mark.parametrize(('built_graph', 'n1', 'n2', 'expected'), GET_WEIGHT) def test_get_weight(graph_fixture, built_graph, n1, n2, expected): graph_fixture._container = built_graph assert graph_fixture.get_weight(n1, n2) == expected<|fim▁end|>
'D': {}},
<|file_name|>test_executor_check_feed.py<|end_file_name|><|fim▁begin|># Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy<|fim▁hole|>import paddle.fluid as fluid class TestExecutor(unittest.TestCase): def net(self): lr = fluid.data(name="lr", shape=[1], dtype='float32') x = fluid.data(name="x", shape=[None, 1], dtype='float32') y = fluid.data(name="y", shape=[None, 1], dtype='float32') y_predict = fluid.layers.fc(input=x, size=1, act=None) cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = fluid.layers.mean(cost) opt = fluid.optimizer.Adam(learning_rate=lr) opt.minimize(avg_cost) return lr, avg_cost def test_program_check_feed(self): main_program = fluid.Program() startup_program = fluid.Program() scope = fluid.Scope() with fluid.program_guard(main_program, startup_program): with fluid.scope_guard(scope): cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) lr, cost = self.net() exe.run(startup_program) train_data = [[1.0], [2.0], [3.0], [4.0]] y_true = [[2.0], [4.0], [6.0], [8.0]] a = 0 with self.assertRaises(ValueError): exe.run(feed={'x': train_data, 'lr': a}, fetch_list=[lr, cost], return_numpy=False, use_prune=True) def test_compiled_program_check_feed(self): main_program = fluid.Program() startup_program = fluid.Program() scope = fluid.Scope() with fluid.program_guard(main_program, startup_program): with fluid.scope_guard(scope): cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) lr, cost = self.net() exe.run(startup_program) compiled_prog = fluid.CompiledProgram( main_program).with_data_parallel(loss_name=cost.name) train_data = [[1.0], [2.0], [3.0], [4.0]] y_true = [[2.0], [4.0], [6.0], [8.0]] a = 0 with self.assertRaises(ValueError): exe.run(compiled_prog, feed={'x': train_data, 'lr': a}, fetch_list=[lr, cost], return_numpy=False, use_prune=True) if __name__ == '__main__': unittest.main()<|fim▁end|>
import paddle.fluid.core as core
<|file_name|>env_win.cc<|end_file_name|><|fim▁begin|>// This file contains source that originates from: // http://code.google.com/p/leveldbwin/source/browse/trunk/win32_impl_src/env_win32.h // http://code.google.com/p/leveldbwin/source/browse/trunk/win32_impl_src/port_win32.cc // Those files dont' have any explict license headers but the // project (http://code.google.com/p/leveldbwin/) lists the 'New BSD License' // as the license. #if defined(LEVELDB_PLATFORM_WINDOWS) #include <map> #undef UNICODE #include "leveldb/env.h" #include "port/port.h" #include "leveldb/slice.h" #include "util/logging.h" #include <shlwapi.h> #include <process.h> #include <cstring> #include <stdio.h> #include <errno.h> #include <io.h> #include <algorithm> #ifdef max #undef max #endif #ifndef va_copy #define va_copy(d,s) ((d) = (s)) #endif #if defined DeleteFile #undef DeleteFile #endif //Declarations namespace leveldb { namespace Win32 { #define DISALLOW_COPY_AND_ASSIGN(TypeName) \ TypeName(const TypeName&); \ void operator=(const TypeName&) std::string GetCurrentDir(); std::wstring GetCurrentDirW(); static const std::string CurrentDir = GetCurrentDir(); static const std::wstring CurrentDirW = GetCurrentDirW(); std::string& ModifyPath(std::string& path); std::wstring& ModifyPath(std::wstring& path); std::string GetLastErrSz(); std::wstring GetLastErrSzW(); size_t GetPageSize(); typedef void (*ScheduleProc)(void*) ;<|fim▁hole|>{ WorkItemWrapper(ScheduleProc proc_,void* content_); ScheduleProc proc; void* pContent; }; DWORD WINAPI WorkItemWrapperProc(LPVOID pContent); class Win32SequentialFile : public SequentialFile { public: friend class Win32Env; virtual ~Win32SequentialFile(); virtual Status Read(size_t n, Slice* result, char* scratch); virtual Status Skip(uint64_t n); BOOL isEnable(); private: BOOL _Init(); void _CleanUp(); Win32SequentialFile(const std::string& fname); std::string _filename; ::HANDLE _hFile; DISALLOW_COPY_AND_ASSIGN(Win32SequentialFile); }; class Win32RandomAccessFile : public RandomAccessFile { public: friend class Win32Env; virtual ~Win32RandomAccessFile(); virtual Status Read(uint64_t offset, size_t n, Slice* result,char* scratch) const; BOOL isEnable(); private: BOOL _Init(LPCWSTR path); void _CleanUp(); Win32RandomAccessFile(const std::string& fname); HANDLE _hFile; const std::string _filename; DISALLOW_COPY_AND_ASSIGN(Win32RandomAccessFile); }; class Win32MapFile : public WritableFile { public: Win32MapFile(const std::string& fname); ~Win32MapFile(); virtual Status Append(const Slice& data); virtual Status Close(); virtual Status Flush(); virtual Status Sync(); BOOL isEnable(); private: std::string _filename; HANDLE _hFile; size_t _page_size; size_t _map_size; // How much extra memory to map at a time char* _base; // The mapped region HANDLE _base_handle; char* _limit; // Limit of the mapped region char* _dst; // Where to write next (in range [base_,limit_]) char* _last_sync; // Where have we synced up to uint64_t _file_offset; // Offset of base_ in file //LARGE_INTEGER file_offset_; // Have we done an munmap of unsynced data? bool _pending_sync; // Roundup x to a multiple of y static size_t _Roundup(size_t x, size_t y); size_t _TruncateToPageBoundary(size_t s); bool _UnmapCurrentRegion(); bool _MapNewRegion(); DISALLOW_COPY_AND_ASSIGN(Win32MapFile); BOOL _Init(LPCWSTR Path); }; class Win32FileLock : public FileLock { public: friend class Win32Env; virtual ~Win32FileLock(); BOOL isEnable(); private: BOOL _Init(LPCWSTR path); void _CleanUp(); Win32FileLock(const std::string& fname); HANDLE _hFile; std::string _filename; DISALLOW_COPY_AND_ASSIGN(Win32FileLock); }; class Win32Logger : public Logger { public: friend class Win32Env; virtual ~Win32Logger(); virtual void Logv(const char* format, va_list ap); private: explicit Win32Logger(WritableFile* pFile); WritableFile* _pFileProxy; DISALLOW_COPY_AND_ASSIGN(Win32Logger); }; class Win32Env : public Env { public: Win32Env(); virtual ~Win32Env(); virtual Status NewSequentialFile(const std::string& fname, SequentialFile** result); virtual Status NewRandomAccessFile(const std::string& fname, RandomAccessFile** result); virtual Status NewWritableFile(const std::string& fname, WritableFile** result); virtual bool FileExists(const std::string& fname); virtual Status GetChildren(const std::string& dir, std::vector<std::string>* result); virtual Status DeleteFile(const std::string& fname); virtual Status CreateDir(const std::string& dirname); virtual Status DeleteDir(const std::string& dirname); virtual Status GetFileSize(const std::string& fname, uint64_t* file_size); virtual Status RenameFile(const std::string& src, const std::string& target); virtual Status LockFile(const std::string& fname, FileLock** lock); virtual Status UnlockFile(FileLock* lock); virtual void Schedule( void (*function)(void* arg), void* arg); virtual void StartThread(void (*function)(void* arg), void* arg); virtual Status GetTestDirectory(std::string* path); //virtual void Logv(WritableFile* log, const char* format, va_list ap); virtual Status NewLogger(const std::string& fname, Logger** result); virtual uint64_t NowMicros(); virtual void SleepForMicroseconds(int micros); }; void ToWidePath(const std::string& value, std::wstring& target) { wchar_t buffer[MAX_PATH]; MultiByteToWideChar(CP_ACP, 0, value.c_str(), -1, buffer, MAX_PATH); target = buffer; } void ToNarrowPath(const std::wstring& value, std::string& target) { char buffer[MAX_PATH]; WideCharToMultiByte(CP_ACP, 0, value.c_str(), -1, buffer, MAX_PATH, NULL, NULL); target = buffer; } std::string GetCurrentDir() { CHAR path[MAX_PATH]; ::GetModuleFileNameA(::GetModuleHandleA(NULL),path,MAX_PATH); *strrchr(path,'\\') = 0; return std::string(path); } std::wstring GetCurrentDirW() { WCHAR path[MAX_PATH]; ::GetModuleFileNameW(::GetModuleHandleW(NULL),path,MAX_PATH); *wcsrchr(path,L'\\') = 0; return std::wstring(path); } std::string& ModifyPath(std::string& path) { if(path[0] == '/' || path[0] == '\\'){ path = CurrentDir + path; } std::replace(path.begin(),path.end(),'/','\\'); return path; } std::wstring& ModifyPath(std::wstring& path) { if(path[0] == L'/' || path[0] == L'\\'){ path = CurrentDirW + path; } std::replace(path.begin(),path.end(),L'/',L'\\'); return path; } std::string GetLastErrSz() { LPWSTR lpMsgBuf; FormatMessageW( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, GetLastError(), 0, // Default language (LPWSTR) &lpMsgBuf, 0, NULL ); std::string Err; ToNarrowPath(lpMsgBuf, Err); LocalFree( lpMsgBuf ); return Err; } std::wstring GetLastErrSzW() { LPVOID lpMsgBuf; FormatMessageW( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, GetLastError(), 0, // Default language (LPWSTR) &lpMsgBuf, 0, NULL ); std::wstring Err = (LPCWSTR)lpMsgBuf; LocalFree(lpMsgBuf); return Err; } WorkItemWrapper::WorkItemWrapper( ScheduleProc proc_,void* content_ ) : proc(proc_),pContent(content_) { } DWORD WINAPI WorkItemWrapperProc(LPVOID pContent) { WorkItemWrapper* item = static_cast<WorkItemWrapper*>(pContent); ScheduleProc TempProc = item->proc; void* arg = item->pContent; delete item; TempProc(arg); return 0; } size_t GetPageSize() { SYSTEM_INFO si; GetSystemInfo(&si); return std::max(si.dwPageSize,si.dwAllocationGranularity); } const size_t g_PageSize = GetPageSize(); Win32SequentialFile::Win32SequentialFile( const std::string& fname ) : _filename(fname),_hFile(NULL) { _Init(); } Win32SequentialFile::~Win32SequentialFile() { _CleanUp(); } Status Win32SequentialFile::Read( size_t n, Slice* result, char* scratch ) { Status sRet; DWORD hasRead = 0; if(_hFile && ReadFile(_hFile,scratch,n,&hasRead,NULL) ){ *result = Slice(scratch,hasRead); } else { sRet = Status::IOError(_filename, Win32::GetLastErrSz() ); } return sRet; } Status Win32SequentialFile::Skip( uint64_t n ) { Status sRet; LARGE_INTEGER Move,NowPointer; Move.QuadPart = n; if(!SetFilePointerEx(_hFile,Move,&NowPointer,FILE_CURRENT)){ sRet = Status::IOError(_filename,Win32::GetLastErrSz()); } return sRet; } BOOL Win32SequentialFile::isEnable() { return _hFile ? TRUE : FALSE; } BOOL Win32SequentialFile::_Init() { std::wstring path; ToWidePath(_filename, path); _hFile = CreateFileW(path.c_str(), GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); return _hFile ? TRUE : FALSE; } void Win32SequentialFile::_CleanUp() { if(_hFile){ CloseHandle(_hFile); _hFile = NULL; } } Win32RandomAccessFile::Win32RandomAccessFile( const std::string& fname ) : _filename(fname),_hFile(NULL) { std::wstring path; ToWidePath(fname, path); _Init( path.c_str() ); } Win32RandomAccessFile::~Win32RandomAccessFile() { _CleanUp(); } Status Win32RandomAccessFile::Read(uint64_t offset,size_t n,Slice* result,char* scratch) const { Status sRet; OVERLAPPED ol = {0}; ZeroMemory(&ol,sizeof(ol)); ol.Offset = (DWORD)offset; ol.OffsetHigh = (DWORD)(offset >> 32); DWORD hasRead = 0; if(!ReadFile(_hFile,scratch,n,&hasRead,&ol)) sRet = Status::IOError(_filename,Win32::GetLastErrSz()); else *result = Slice(scratch,hasRead); return sRet; } BOOL Win32RandomAccessFile::_Init( LPCWSTR path ) { BOOL bRet = FALSE; if(!_hFile) _hFile = ::CreateFileW(path,GENERIC_READ,FILE_SHARE_READ,NULL,OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL | FILE_FLAG_RANDOM_ACCESS,NULL); if(!_hFile || _hFile == INVALID_HANDLE_VALUE ) _hFile = NULL; else bRet = TRUE; return bRet; } BOOL Win32RandomAccessFile::isEnable() { return _hFile ? TRUE : FALSE; } void Win32RandomAccessFile::_CleanUp() { if(_hFile){ ::CloseHandle(_hFile); _hFile = NULL; } } size_t Win32MapFile::_Roundup( size_t x, size_t y ) { return ((x + y - 1) / y) * y; } size_t Win32MapFile::_TruncateToPageBoundary( size_t s ) { s -= (s & (_page_size - 1)); assert((s % _page_size) == 0); return s; } bool Win32MapFile::_UnmapCurrentRegion() { bool result = true; if (_base != NULL) { if (_last_sync < _limit) { // Defer syncing this data until next Sync() call, if any _pending_sync = true; } if (!UnmapViewOfFile(_base) || !CloseHandle(_base_handle)) result = false; _file_offset += _limit - _base; _base = NULL; _base_handle = NULL; _limit = NULL; _last_sync = NULL; _dst = NULL; // Increase the amount we map the next time, but capped at 1MB if (_map_size < (1<<20)) { _map_size *= 2; } } return result; } bool Win32MapFile::_MapNewRegion() { assert(_base == NULL); //LONG newSizeHigh = (LONG)((file_offset_ + map_size_) >> 32); //LONG newSizeLow = (LONG)((file_offset_ + map_size_) & 0xFFFFFFFF); DWORD off_hi = (DWORD)(_file_offset >> 32); DWORD off_lo = (DWORD)(_file_offset & 0xFFFFFFFF); LARGE_INTEGER newSize; newSize.QuadPart = _file_offset + _map_size; SetFilePointerEx(_hFile, newSize, NULL, FILE_BEGIN); SetEndOfFile(_hFile); _base_handle = CreateFileMappingA( _hFile, NULL, PAGE_READWRITE, 0, 0, 0); if (_base_handle != NULL) { _base = (char*) MapViewOfFile(_base_handle, FILE_MAP_ALL_ACCESS, off_hi, off_lo, _map_size); if (_base != NULL) { _limit = _base + _map_size; _dst = _base; _last_sync = _base; return true; } } return false; } Win32MapFile::Win32MapFile( const std::string& fname) : _filename(fname), _hFile(NULL), _page_size(Win32::g_PageSize), _map_size(_Roundup(65536, Win32::g_PageSize)), _base(NULL), _base_handle(NULL), _limit(NULL), _dst(NULL), _last_sync(NULL), _file_offset(0), _pending_sync(false) { std::wstring path; ToWidePath(fname, path); _Init(path.c_str()); assert((Win32::g_PageSize & (Win32::g_PageSize - 1)) == 0); } Status Win32MapFile::Append( const Slice& data ) { const char* src = data.data(); size_t left = data.size(); Status s; while (left > 0) { assert(_base <= _dst); assert(_dst <= _limit); size_t avail = _limit - _dst; if (avail == 0) { if (!_UnmapCurrentRegion() || !_MapNewRegion()) { return Status::IOError("WinMmapFile.Append::UnmapCurrentRegion or MapNewRegion: ", Win32::GetLastErrSz()); } } size_t n = (left <= avail) ? left : avail; memcpy(_dst, src, n); _dst += n; src += n; left -= n; } return s; } Status Win32MapFile::Close() { Status s; size_t unused = _limit - _dst; if (!_UnmapCurrentRegion()) { s = Status::IOError("WinMmapFile.Close::UnmapCurrentRegion: ",Win32::GetLastErrSz()); } else if (unused > 0) { // Trim the extra space at the end of the file LARGE_INTEGER newSize; newSize.QuadPart = _file_offset - unused; if (!SetFilePointerEx(_hFile, newSize, NULL, FILE_BEGIN)) { s = Status::IOError("WinMmapFile.Close::SetFilePointer: ",Win32::GetLastErrSz()); } else SetEndOfFile(_hFile); } if (!CloseHandle(_hFile)) { if (s.ok()) { s = Status::IOError("WinMmapFile.Close::CloseHandle: ", Win32::GetLastErrSz()); } } _hFile = INVALID_HANDLE_VALUE; _base = NULL; _base_handle = NULL; _limit = NULL; return s; } Status Win32MapFile::Sync() { Status s; if (_pending_sync) { // Some unmapped data was not synced _pending_sync = false; if (!FlushFileBuffers(_hFile)) { s = Status::IOError("WinMmapFile.Sync::FlushFileBuffers: ",Win32::GetLastErrSz()); } } if (_dst > _last_sync) { // Find the beginnings of the pages that contain the first and last // bytes to be synced. size_t p1 = _TruncateToPageBoundary(_last_sync - _base); size_t p2 = _TruncateToPageBoundary(_dst - _base - 1); _last_sync = _dst; if (!FlushViewOfFile(_base + p1, p2 - p1 + _page_size)) { s = Status::IOError("WinMmapFile.Sync::FlushViewOfFile: ",Win32::GetLastErrSz()); } } return s; } Status Win32MapFile::Flush() { return Status::OK(); } Win32MapFile::~Win32MapFile() { if (_hFile != INVALID_HANDLE_VALUE) { Win32MapFile::Close(); } } BOOL Win32MapFile::_Init( LPCWSTR Path ) { DWORD Flag = PathFileExistsW(Path) ? OPEN_EXISTING : CREATE_ALWAYS; _hFile = CreateFileW(Path, GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ|FILE_SHARE_DELETE|FILE_SHARE_WRITE, NULL, Flag, FILE_ATTRIBUTE_NORMAL, NULL); if(!_hFile || _hFile == INVALID_HANDLE_VALUE) return FALSE; else return TRUE; } BOOL Win32MapFile::isEnable() { return _hFile ? TRUE : FALSE; } Win32FileLock::Win32FileLock( const std::string& fname ) : _hFile(NULL),_filename(fname) { std::wstring path; ToWidePath(fname, path); _Init(path.c_str()); } Win32FileLock::~Win32FileLock() { _CleanUp(); } BOOL Win32FileLock::_Init( LPCWSTR path ) { BOOL bRet = FALSE; if(!_hFile) _hFile = ::CreateFileW(path,0,0,NULL,CREATE_ALWAYS,FILE_ATTRIBUTE_NORMAL,NULL); if(!_hFile || _hFile == INVALID_HANDLE_VALUE ){ _hFile = NULL; } else bRet = TRUE; return bRet; } void Win32FileLock::_CleanUp() { ::CloseHandle(_hFile); _hFile = NULL; } BOOL Win32FileLock::isEnable() { return _hFile ? TRUE : FALSE; } Win32Logger::Win32Logger(WritableFile* pFile) : _pFileProxy(pFile) { assert(_pFileProxy); } Win32Logger::~Win32Logger() { if(_pFileProxy) delete _pFileProxy; } void Win32Logger::Logv( const char* format, va_list ap ) { uint64_t thread_id = ::GetCurrentThreadId(); // We try twice: the first time with a fixed-size stack allocated buffer, // and the second time with a much larger dynamically allocated buffer. char buffer[500]; for (int iter = 0; iter < 2; iter++) { char* base; int bufsize; if (iter == 0) { bufsize = sizeof(buffer); base = buffer; } else { bufsize = 30000; base = new char[bufsize]; } char* p = base; char* limit = base + bufsize; SYSTEMTIME st; GetLocalTime(&st); p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ", int(st.wYear), int(st.wMonth), int(st.wDay), int(st.wHour), int(st.wMinute), int(st.wMinute), int(st.wMilliseconds), static_cast<long long unsigned int>(thread_id)); // Print the message if (p < limit) { va_list backup_ap; va_copy(backup_ap, ap); p += vsnprintf(p, limit - p, format, backup_ap); va_end(backup_ap); } // Truncate to available space if necessary if (p >= limit) { if (iter == 0) { continue; // Try again with larger buffer } else { p = limit - 1; } } // Add newline if necessary if (p == base || p[-1] != '\n') { *p++ = '\n'; } assert(p <= limit); DWORD hasWritten = 0; if(_pFileProxy){ _pFileProxy->Append(Slice(base, p - base)); _pFileProxy->Flush(); } if (base != buffer) { delete[] base; } break; } } bool Win32Env::FileExists(const std::string& fname) { std::string path = fname; std::wstring wpath; ToWidePath(ModifyPath(path), wpath); return ::PathFileExistsW(wpath.c_str()) ? true : false; } Status Win32Env::GetChildren(const std::string& dir, std::vector<std::string>* result) { Status sRet; ::WIN32_FIND_DATAW wfd; std::string path = dir; ModifyPath(path); path += "\\*.*"; std::wstring wpath; ToWidePath(path, wpath); ::HANDLE hFind = ::FindFirstFileW(wpath.c_str() ,&wfd); if(hFind && hFind != INVALID_HANDLE_VALUE){ BOOL hasNext = TRUE; std::string child; while(hasNext){ ToNarrowPath(wfd.cFileName, child); if(child != ".." && child != ".") { result->push_back(child); } hasNext = ::FindNextFileW(hFind,&wfd); } ::FindClose(hFind); } else sRet = Status::IOError(dir,"Could not get children."); return sRet; } void Win32Env::SleepForMicroseconds( int micros ) { ::Sleep((micros + 999) /1000); } Status Win32Env::DeleteFile( const std::string& fname ) { Status sRet; std::string path = fname; std::wstring wpath; ToWidePath(ModifyPath(path), wpath); if(!::DeleteFileW(wpath.c_str())) { sRet = Status::IOError(path, "Could not delete file."); } return sRet; } Status Win32Env::GetFileSize( const std::string& fname, uint64_t* file_size ) { Status sRet; std::string path = fname; std::wstring wpath; ToWidePath(ModifyPath(path), wpath); HANDLE file = ::CreateFileW(wpath.c_str(), GENERIC_READ,FILE_SHARE_READ,NULL,OPEN_EXISTING,FILE_ATTRIBUTE_NORMAL,NULL); LARGE_INTEGER li; if(::GetFileSizeEx(file,&li)){ *file_size = (uint64_t)li.QuadPart; }else sRet = Status::IOError(path,"Could not get the file size."); CloseHandle(file); return sRet; } Status Win32Env::RenameFile( const std::string& src, const std::string& target ) { Status sRet; std::string src_path = src; std::wstring wsrc_path; ToWidePath(ModifyPath(src_path), wsrc_path); std::string target_path = target; std::wstring wtarget_path; ToWidePath(ModifyPath(target_path), wtarget_path); if(!MoveFileW(wsrc_path.c_str(), wtarget_path.c_str() ) ){ DWORD err = GetLastError(); if(err == 0x000000b7){ if(!::DeleteFileW(wtarget_path.c_str() ) ) sRet = Status::IOError(src, "Could not rename file."); else if(!::MoveFileW(wsrc_path.c_str(), wtarget_path.c_str() ) ) sRet = Status::IOError(src, "Could not rename file."); } } return sRet; } Status Win32Env::LockFile( const std::string& fname, FileLock** lock ) { Status sRet; std::string path = fname; ModifyPath(path); Win32FileLock* _lock = new Win32FileLock(path); if(!_lock->isEnable()){ delete _lock; *lock = NULL; sRet = Status::IOError(path, "Could not lock file."); } else *lock = _lock; return sRet; } Status Win32Env::UnlockFile( FileLock* lock ) { Status sRet; delete lock; return sRet; } void Win32Env::Schedule( void (*function)(void* arg), void* arg ) { QueueUserWorkItem(Win32::WorkItemWrapperProc, new Win32::WorkItemWrapper(function,arg), WT_EXECUTEDEFAULT); } void Win32Env::StartThread( void (*function)(void* arg), void* arg ) { ::_beginthread(function,0,arg); } Status Win32Env::GetTestDirectory( std::string* path ) { Status sRet; WCHAR TempPath[MAX_PATH]; ::GetTempPathW(MAX_PATH,TempPath); ToNarrowPath(TempPath, *path); path->append("leveldb\\test\\"); ModifyPath(*path); return sRet; } uint64_t Win32Env::NowMicros() { #ifndef USE_VISTA_API #define GetTickCount64 GetTickCount #endif return (uint64_t)(GetTickCount64()*1000); } static Status CreateDirInner( const std::string& dirname ) { Status sRet; DWORD attr = ::GetFileAttributes(dirname.c_str()); if (attr == INVALID_FILE_ATTRIBUTES) { // doesn't exist: std::size_t slash = dirname.find_last_of("\\"); if (slash != std::string::npos){ sRet = CreateDirInner(dirname.substr(0, slash)); if (!sRet.ok()) return sRet; } BOOL result = ::CreateDirectory(dirname.c_str(), NULL); if (result == FALSE) { sRet = Status::IOError(dirname, "Could not create directory."); return sRet; } } return sRet; } Status Win32Env::CreateDir( const std::string& dirname ) { std::string path = dirname; if(path[path.length() - 1] != '\\'){ path += '\\'; } ModifyPath(path); return CreateDirInner(path); } Status Win32Env::DeleteDir( const std::string& dirname ) { Status sRet; std::wstring path; ToWidePath(dirname, path); ModifyPath(path); if(!::RemoveDirectoryW( path.c_str() ) ){ sRet = Status::IOError(dirname, "Could not delete directory."); } return sRet; } Status Win32Env::NewSequentialFile( const std::string& fname, SequentialFile** result ) { Status sRet; std::string path = fname; ModifyPath(path); Win32SequentialFile* pFile = new Win32SequentialFile(path); if(pFile->isEnable()){ *result = pFile; }else { delete pFile; sRet = Status::IOError(path, Win32::GetLastErrSz()); } return sRet; } Status Win32Env::NewRandomAccessFile( const std::string& fname, RandomAccessFile** result ) { Status sRet; std::string path = fname; Win32RandomAccessFile* pFile = new Win32RandomAccessFile(ModifyPath(path)); if(!pFile->isEnable()){ delete pFile; *result = NULL; sRet = Status::IOError(path, Win32::GetLastErrSz()); }else *result = pFile; return sRet; } Status Win32Env::NewLogger( const std::string& fname, Logger** result ) { Status sRet; std::string path = fname; Win32MapFile* pMapFile = new Win32MapFile(ModifyPath(path)); if(!pMapFile->isEnable()){ delete pMapFile; *result = NULL; sRet = Status::IOError(path,"could not create a logger."); }else *result = new Win32Logger(pMapFile); return sRet; } Status Win32Env::NewWritableFile( const std::string& fname, WritableFile** result ) { Status sRet; std::string path = fname; Win32MapFile* pFile = new Win32MapFile(ModifyPath(path)); if(!pFile->isEnable()){ *result = NULL; sRet = Status::IOError(fname,Win32::GetLastErrSz()); }else *result = pFile; return sRet; } Win32Env::Win32Env() { } Win32Env::~Win32Env() { } } // Win32 namespace static port::OnceType once = LEVELDB_ONCE_INIT; static Env* default_env; static void InitDefaultEnv() { default_env = new Win32::Win32Env(); } Env* Env::Default() { port::InitOnce(&once, InitDefaultEnv); return default_env; } } // namespace leveldb #endif // defined(LEVELDB_PLATFORM_WINDOWS)<|fim▁end|>
struct WorkItemWrapper
<|file_name|>collector.py<|end_file_name|><|fim▁begin|><|fim▁hole|>''' TBD '''<|fim▁end|>
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Provides fakes for several of Telemetry's internal objects. These allow code like story_runner and Benchmark to be run and tested without compiling or starting a browser. Class names prepended with an underscore are intended to be implementation details, and should not be subclassed; however, some, like _FakeBrowser, have public APIs that may need to be called in tests. """ from telemetry.internal.backends.chrome_inspector import websocket from telemetry.internal.browser import browser_options from telemetry.internal.platform import system_info from telemetry.page import shared_page_state from telemetry.util import image_util from telemetry.testing.internal import fake_gpu_info from types import ModuleType # Classes and functions which are intended to be part of the public # fakes API. class FakePlatform(object): def __init__(self): self._network_controller = None self._tracing_controller = None self._has_battor = False self._os_name = 'FakeOS' self._device_type_name = 'abc' self._is_svelte = False self._is_aosp = True @property def is_host_platform(self): raise NotImplementedError @property def network_controller(self): if self._network_controller is None: self._network_controller = _FakeNetworkController() return self._network_controller @property def tracing_controller(self): if self._tracing_controller is None: self._tracing_controller = _FakeTracingController() return self._tracing_controller def Initialize(self): pass def CanMonitorThermalThrottling(self): return False def IsThermallyThrottled(self): return False def HasBeenThermallyThrottled(self): return False def GetArchName(self): raise NotImplementedError def SetOSName(self, name): self._os_name = name def GetOSName(self): return self._os_name def GetOSVersionName(self): raise NotImplementedError def GetOSVersionDetailString(self): raise NotImplementedError def StopAllLocalServers(self): pass def WaitForBatteryTemperature(self, _): pass def HasBattOrConnected(self): return self._has_battor def SetBattOrDetected(self, b): assert isinstance(b, bool) self._has_battor = b # TODO(rnephew): Investigate moving from setters to @property. def SetDeviceTypeName(self, name): self._device_type_name = name def GetDeviceTypeName(self): return self._device_type_name def SetIsSvelte(self, b): assert isinstance(b, bool) self._is_svelte = b def IsSvelte(self): if self._os_name != 'android': raise NotImplementedError<|fim▁hole|> return self._is_svelte def SetIsAosp(self, b): assert isinstance(b, bool) self._is_aosp = b def IsAosp(self): return self._is_aosp and self._os_name == 'android' class FakeLinuxPlatform(FakePlatform): def __init__(self): super(FakeLinuxPlatform, self).__init__() self.screenshot_png_data = None self.http_server_directories = [] self.http_server = FakeHTTPServer() @property def is_host_platform(self): return True def GetDeviceTypeName(self): return 'Desktop' def GetArchName(self): return 'x86_64' def GetOSName(self): return 'linux' def GetOSVersionName(self): return 'trusty' def GetOSVersionDetailString(self): return '' def CanTakeScreenshot(self): return bool(self.screenshot_png_data) def TakeScreenshot(self, file_path): if not self.CanTakeScreenshot(): raise NotImplementedError img = image_util.FromBase64Png(self.screenshot_png_data) image_util.WritePngFile(img, file_path) return True def SetHTTPServerDirectories(self, paths): self.http_server_directories.append(paths) class FakeHTTPServer(object): def UrlOf(self, url): del url # unused return 'file:///foo' class FakePossibleBrowser(object): def __init__(self, execute_on_startup=None, execute_after_browser_creation=None): self._returned_browser = _FakeBrowser(FakeLinuxPlatform()) self.browser_type = 'linux' self.supports_tab_control = False self.is_remote = False self.execute_on_startup = execute_on_startup self.execute_after_browser_creation = execute_after_browser_creation @property def returned_browser(self): """The browser object that will be returned through later API calls.""" return self._returned_browser def Create(self, finder_options): if self.execute_on_startup is not None: self.execute_on_startup() del finder_options # unused if self.execute_after_browser_creation is not None: self.execute_after_browser_creation(self._returned_browser) return self.returned_browser @property def platform(self): """The platform object from the returned browser. To change this or set it up, change the returned browser's platform. """ return self.returned_browser.platform def IsRemote(self): return self.is_remote def SetCredentialsPath(self, _): pass class FakeSharedPageState(shared_page_state.SharedPageState): def __init__(self, test, finder_options, story_set): super(FakeSharedPageState, self).__init__(test, finder_options, story_set) def _GetPossibleBrowser(self, test, finder_options): p = FakePossibleBrowser() self.ConfigurePossibleBrowser(p) return p def ConfigurePossibleBrowser(self, possible_browser): """Override this to configure the PossibleBrowser. Can make changes to the browser's configuration here via e.g.: possible_browser.returned_browser.returned_system_info = ... """ pass def DidRunStory(self, results): # TODO(kbr): add a test which throws an exception from DidRunStory # to verify the fix from https://crrev.com/86984d5fc56ce00e7b37ebe . super(FakeSharedPageState, self).DidRunStory(results) class FakeSystemInfo(system_info.SystemInfo): def __init__(self, model_name='', gpu_dict=None, command_line=''): if gpu_dict == None: gpu_dict = fake_gpu_info.FAKE_GPU_INFO super(FakeSystemInfo, self).__init__(model_name, gpu_dict, command_line) class _FakeBrowserFinderOptions(browser_options.BrowserFinderOptions): def __init__(self, execute_on_startup=None, execute_after_browser_creation=None, *args, **kwargs): browser_options.BrowserFinderOptions.__init__(self, *args, **kwargs) self.fake_possible_browser = \ FakePossibleBrowser( execute_on_startup=execute_on_startup, execute_after_browser_creation=execute_after_browser_creation) def CreateBrowserFinderOptions(browser_type=None, execute_on_startup=None, execute_after_browser_creation=None): """Creates fake browser finder options for discovering a browser.""" return _FakeBrowserFinderOptions( browser_type=browser_type, execute_on_startup=execute_on_startup, execute_after_browser_creation=execute_after_browser_creation) # Internal classes. Note that end users may still need to both call # and mock out methods of these classes, but they should not be # subclassed. class _FakeBrowser(object): def __init__(self, platform): self._tabs = _FakeTabList(self) # Fake the creation of the first tab. self._tabs.New() self._returned_system_info = FakeSystemInfo() self._platform = platform self._browser_type = 'release' self._is_crashed = False @property def platform(self): return self._platform @platform.setter def platform(self, incoming): """Allows overriding of the fake browser's platform object.""" assert isinstance(incoming, FakePlatform) self._platform = incoming @property def returned_system_info(self): """The object which will be returned from calls to GetSystemInfo.""" return self._returned_system_info @returned_system_info.setter def returned_system_info(self, incoming): """Allows overriding of the returned SystemInfo object. Incoming argument must be an instance of FakeSystemInfo.""" assert isinstance(incoming, FakeSystemInfo) self._returned_system_info = incoming @property def browser_type(self): """The browser_type this browser claims to be ('debug', 'release', etc.)""" return self._browser_type @browser_type.setter def browser_type(self, incoming): """Allows setting of the browser_type.""" self._browser_type = incoming @property def credentials(self): return _FakeCredentials() def Close(self): self._is_crashed = False @property def supports_system_info(self): return True def GetSystemInfo(self): return self.returned_system_info @property def supports_tab_control(self): return True @property def tabs(self): return self._tabs def DumpStateUponFailure(self): pass class _FakeCredentials(object): def WarnIfMissingCredentials(self, _): pass class _FakeTracingController(object): def __init__(self): self._is_tracing = False def StartTracing(self, tracing_config, timeout=10): self._is_tracing = True del tracing_config del timeout def StopTracing(self): self._is_tracing = False @property def is_tracing_running(self): return self._is_tracing def ClearStateIfNeeded(self): pass def IsChromeTracingSupported(self): return True class _FakeNetworkController(object): def __init__(self): self.wpr_mode = None self.extra_wpr_args = None self.is_initialized = False self.is_open = False self.use_live_traffic = None def InitializeIfNeeded(self, use_live_traffic=False): self.use_live_traffic = use_live_traffic def UpdateTrafficSettings(self, round_trip_latency_ms=None, download_bandwidth_kbps=None, upload_bandwidth_kbps=None): pass def Open(self, wpr_mode, extra_wpr_args, use_wpr_go=False): del use_wpr_go # Unused. self.wpr_mode = wpr_mode self.extra_wpr_args = extra_wpr_args self.is_open = True def Close(self): self.wpr_mode = None self.extra_wpr_args = None self.is_initialized = False self.is_open = False def StartReplay(self, archive_path, make_javascript_deterministic=False): del make_javascript_deterministic # Unused. assert self.is_open self.is_initialized = archive_path is not None def StopReplay(self): self.is_initialized = False class _FakeTab(object): def __init__(self, browser, tab_id): self._browser = browser self._tab_id = str(tab_id) self._collect_garbage_count = 0 self.test_png = None @property def collect_garbage_count(self): return self._collect_garbage_count @property def id(self): return self._tab_id @property def browser(self): return self._browser def WaitForDocumentReadyStateToBeComplete(self, timeout=0): pass def Navigate(self, url, script_to_evaluate_on_commit=None, timeout=0): del script_to_evaluate_on_commit, timeout # unused if url == 'chrome://crash': self.browser._is_crashed = True raise Exception def WaitForDocumentReadyStateToBeInteractiveOrBetter(self, timeout=0): pass def WaitForFrameToBeDisplayed(self, timeout=0): pass def IsAlive(self): return True def CloseConnections(self): pass def CollectGarbage(self): self._collect_garbage_count += 1 def Close(self): pass @property def screenshot_supported(self): return self.test_png is not None def Screenshot(self): assert self.screenshot_supported, 'Screenshot is not supported' return image_util.FromBase64Png(self.test_png) class _FakeTabList(object): _current_tab_id = 0 def __init__(self, browser): self._tabs = [] self._browser = browser def New(self, timeout=300): del timeout # unused type(self)._current_tab_id += 1 t = _FakeTab(self._browser, type(self)._current_tab_id) self._tabs.append(t) return t def __iter__(self): return self._tabs.__iter__() def __len__(self): return len(self._tabs) def __getitem__(self, index): if self._tabs[index].browser._is_crashed: raise Exception else: return self._tabs[index] def GetTabById(self, identifier): """The identifier of a tab can be accessed with tab.id.""" for tab in self._tabs: if tab.id == identifier: return tab return None class FakeInspectorWebsocket(object): _NOTIFICATION_EVENT = 1 _NOTIFICATION_CALLBACK = 2 """A fake InspectorWebsocket. A fake that allows tests to send pregenerated data. Normal InspectorWebsockets allow for any number of domain handlers. This fake only allows up to 1 domain handler, and assumes that the domain of the response always matches that of the handler. """ def __init__(self, mock_timer): self._mock_timer = mock_timer self._notifications = [] self._response_handlers = {} self._pending_callbacks = {} self._handler = None def RegisterDomain(self, _, handler): self._handler = handler def AddEvent(self, method, params, time): if self._notifications: assert self._notifications[-1][1] < time, ( 'Current response is scheduled earlier than previous response.') response = {'method': method, 'params': params} self._notifications.append((response, time, self._NOTIFICATION_EVENT)) def AddAsyncResponse(self, method, result, time): if self._notifications: assert self._notifications[-1][1] < time, ( 'Current response is scheduled earlier than previous response.') response = {'method': method, 'result': result} self._notifications.append((response, time, self._NOTIFICATION_CALLBACK)) def AddResponseHandler(self, method, handler): self._response_handlers[method] = handler def SyncRequest(self, request, *args, **kwargs): del args, kwargs # unused handler = self._response_handlers[request['method']] return handler(request) if handler else None def AsyncRequest(self, request, callback): self._pending_callbacks.setdefault(request['method'], []).append(callback) def SendAndIgnoreResponse(self, request): pass def Connect(self, _): pass def DispatchNotifications(self, timeout): current_time = self._mock_timer.time() if not self._notifications: self._mock_timer.SetTime(current_time + timeout + 1) raise websocket.WebSocketTimeoutException() response, time, kind = self._notifications[0] if time - current_time > timeout: self._mock_timer.SetTime(current_time + timeout + 1) raise websocket.WebSocketTimeoutException() self._notifications.pop(0) self._mock_timer.SetTime(time + 1) if kind == self._NOTIFICATION_EVENT: self._handler(response) elif kind == self._NOTIFICATION_CALLBACK: callback = self._pending_callbacks.get(response['method']).pop(0) callback(response) else: raise Exception('Unexpected response type') class FakeTimer(object): """ A fake timer to fake out the timing for a module. Args: module: module to fake out the time """ def __init__(self, module=None): self._elapsed_time = 0 self._module = module self._actual_time = None if module: assert isinstance(module, ModuleType) self._actual_time = module.time self._module.time = self def sleep(self, time): self._elapsed_time += time def time(self): return self._elapsed_time def SetTime(self, time): self._elapsed_time = time def __del__(self): self.Restore() def Restore(self): if self._module: self._module.time = self._actual_time self._module = None self._actual_time = None<|fim▁end|>
<|file_name|>value.rs<|end_file_name|><|fim▁begin|>use std::cmp::PartialEq; use std::fmt; use std::borrow::Cow; use mime_parse::Mime; use quoted_string::{self, ContentChars, AsciiCaseInsensitiveEq}; /// a `Value` usable for a charset parameter. /// /// # Example /// ``` /// let mime = mime::TEXT_PLAIN_UTF_8; /// assert_eq!(mime.param(mime::CHARSET), Some(mime::UTF_8)); /// ``` pub const UTF_8: Value = Value { source: "utf-8", ascii_case_insensitive: true, }; /// A parameter value section of a `MediaType` or `MediaRange`. /// /// Except for the `charset` parameter, parameters are compared case-sensitive. #[derive(Clone, Copy)] pub struct Value<'a> { source: &'a str, ascii_case_insensitive: bool, } pub(crate) fn params(mime: &Mime) -> impl Iterator<Item = (&str, Value)> { mime.params().map(|(n, v)| { let value = Value::new(v).for_name(n); (n, value) }) } pub(crate) fn param<'a>(mime: &'a Mime, key: &str) -> Option<Value<'a>> { params(mime).find(|e| key == e.0).map(|e| e.1) } impl<'a> Value<'a> { fn new(source: &'a str) -> Self { Value { source, ascii_case_insensitive: false, } } fn for_name(mut self, name: &str) -> Self { debug_assert!(crate::is_ascii_lowercase(name)); self.ascii_case_insensitive = name == crate::CHARSET; self } /// Returns the underlying representation. /// /// The underlying representation differs from the content, /// as it can contain quotes surrounding the content and /// quoted-pairs, even if non of them are necessary to /// represent the content. /// /// For example the representation `r#""a\"\ b""#` corresponds /// to the content `r#""a" b"#`. Another semantically equivalent /// (i.e. with the same content) representation is `r#""a\" b""` /// /// # Example /// /// ``` /// let mime = r#"text/plain; param="abc def""#.parse::<mime::MediaType>().unwrap(); /// let param = mime.param("param").unwrap(); /// assert_eq!(param.as_str_repr(), r#""abc def""#); /// ``` pub fn as_str_repr(&self) -> &'a str { self.source } <|fim▁hole|> /// quoted pairs. /// /// If the underlying representation is a quoted string containing /// quoted-pairs `Cow::Owned` is returned. /// /// If the underlying representation is a quoted-string without /// quoted-pairs `Cow::Borrowed` is returned as normal /// str slicing can be used to strip the surrounding double quoted. /// /// If the underlying representation is not a quoted-string /// `Cow::Borrowed` is returned, too. /// /// # Example /// /// ``` /// let raw_mime = r#"text/plain; p1="char is \""; p2="simple"; p3=simple2"#; /// let mime = raw_mime.parse::<mime::MediaType>().unwrap(); /// /// let param1 = mime.param("p1").unwrap(); /// let expected = r#"char is ""#; /// assert_eq!(param1.to_content(), expected); /// /// let param2 = mime.param("p2").unwrap(); /// assert_eq!(param2.to_content(), "simple"); /// /// let param3 = mime.param("p3").unwrap(); /// assert_eq!(param3.to_content(), "simple2"); /// ``` /// pub fn to_content(&self) -> Cow<'a, str> { quoted_string::unquote_unchecked(self.source) } } impl<'a, 'b> PartialEq<Value<'b>> for Value<'a> { #[inline] fn eq(&self, other: &Value<'b>) -> bool { let left_content_chars = ContentChars::from_string_unchecked(self.source); let right_content_chars = ContentChars::from_string_unchecked(other.source); if self.ascii_case_insensitive || other.ascii_case_insensitive { left_content_chars.eq_ignore_ascii_case(&right_content_chars) } else { left_content_chars == right_content_chars } } } impl<'a> PartialEq<str> for Value<'a> { fn eq(&self, other: &str) -> bool { if self.source.starts_with('"') { let content_chars = ContentChars::from_string_unchecked(self.source); if self.ascii_case_insensitive { content_chars.eq_ignore_ascii_case(other) } else { content_chars == other } } else if self.ascii_case_insensitive { self.source.eq_ignore_ascii_case(other) } else { self.source == other } } } impl<'a, 'b> PartialEq<&'b str> for Value<'a> { #[inline] fn eq(&self, other: & &'b str) -> bool { self == *other } } impl<'a, 'b> PartialEq<Value<'b>> for &'a str { #[inline] fn eq(&self, other: &Value<'b>) -> bool { other == self } } impl<'a> PartialEq<Value<'a>> for str { #[inline] fn eq(&self, other: &Value<'a>) -> bool { other == self } } impl<'a> From<Value<'a>> for Cow<'a, str> { #[inline] fn from(value: Value<'a>) -> Self { value.to_content() } } impl<'a> fmt::Debug for Value<'a> { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(self.source, f) } } impl<'a> fmt::Display for Value<'a> { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(self.source, f) } } #[cfg(test)] mod test { use std::borrow::Cow; use std::cmp::PartialEq; use std::fmt::Debug; use super::Value; fn bidi_eq<A: Debug+PartialEq<B>, B: Debug+PartialEq<A>>(left: A, right: B) { assert_eq!(left, right); assert_eq!(right, left); } fn bidi_ne<A: Debug+PartialEq<B>, B: Debug+PartialEq<A>>(left: A, right: B) { assert_ne!(left, right); assert_ne!(right, left); } #[test] fn test_value_eq_str() { let value = Value { source: "abc", ascii_case_insensitive: false }; let value_quoted = Value { source: "\"abc\"", ascii_case_insensitive: false }; let value_quoted_with_esacpes = Value { source: "\"a\\bc\"", ascii_case_insensitive: false }; bidi_eq(value, "abc"); bidi_ne(value, "\"abc\""); bidi_ne(value, "\"a\\bc\""); bidi_eq(value_quoted, "abc"); bidi_ne(value_quoted, "\"abc\""); bidi_ne(value_quoted, "\"a\\bc\""); bidi_eq(value_quoted_with_esacpes, "abc"); bidi_ne(value_quoted_with_esacpes, "\"abc\""); bidi_ne(value_quoted_with_esacpes, "\"a\\bc\""); assert_ne!(value, "aBc"); assert_ne!(value_quoted, "aBc"); assert_ne!(value_quoted_with_esacpes, "aBc"); } #[test] fn test_value_eq_str_ascii_case_insensitive() { let value = Value { source: "abc", ascii_case_insensitive: true }; let value_quoted = Value { source: "\"abc\"", ascii_case_insensitive: true }; let value_quoted_with_esacpes = Value { source: "\"a\\bc\"", ascii_case_insensitive: true }; //1st. all case sensitive checks which still apply bidi_eq(value, "abc"); bidi_ne(value, "\"abc\""); bidi_ne(value, "\"a\\bc\""); bidi_eq(value_quoted, "abc"); bidi_ne(value_quoted, "\"abc\""); bidi_ne(value_quoted, "\"a\\bc\""); bidi_eq(value_quoted_with_esacpes, "abc"); bidi_ne(value_quoted_with_esacpes, "\"abc\""); bidi_ne(value_quoted_with_esacpes, "\"a\\bc\""); //2nd the case insensitive check bidi_eq(value, "aBc"); bidi_ne(value, "\"aBc\""); bidi_ne(value, "\"a\\Bc\""); bidi_eq(value_quoted, "aBc"); bidi_ne(value_quoted, "\"aBc\""); bidi_ne(value_quoted, "\"a\\Bc\""); bidi_eq(value_quoted_with_esacpes, "aBc"); bidi_ne(value_quoted_with_esacpes, "\"aBc\""); bidi_ne(value_quoted_with_esacpes, "\"a\\Bc\""); } #[test] fn test_value_eq_value() { let value = Value { source: "abc", ascii_case_insensitive: false }; let value_quoted = Value { source: "\"abc\"", ascii_case_insensitive: false }; let value_quoted_with_esacpes = Value { source: "\"a\\bc\"", ascii_case_insensitive: false }; assert_eq!(value, value); assert_eq!(value_quoted, value_quoted); assert_eq!(value_quoted_with_esacpes, value_quoted_with_esacpes); bidi_eq(value, value_quoted); bidi_eq(value, value_quoted_with_esacpes); bidi_eq(value_quoted, value_quoted_with_esacpes); } #[test] fn test_value_eq_value_case_insensitive() { let value = Value { source: "Abc", ascii_case_insensitive: true }; let value_quoted = Value { source: "\"aBc\"", ascii_case_insensitive: true }; let value_quoted_with_esacpes = Value { source: "\"a\\bC\"", ascii_case_insensitive: true }; assert_eq!(value, value); assert_eq!(value_quoted, value_quoted); assert_eq!(value_quoted_with_esacpes, value_quoted_with_esacpes); bidi_eq(value, value_quoted); bidi_eq(value, value_quoted_with_esacpes); bidi_eq(value_quoted, value_quoted_with_esacpes); } #[test] fn test_value_eq_value_mixed_case_sensitivity() { let value = Value { source: "Abc", ascii_case_insensitive: true }; let value_quoted = Value { source: "\"aBc\"", ascii_case_insensitive: false }; let value_quoted_with_esacpes = Value { source: "\"a\\bC\"", ascii_case_insensitive: false }; bidi_eq(value, value_quoted); bidi_eq(value, value_quoted_with_esacpes); //both are ascii case insensitive bidi_ne(value_quoted, value_quoted_with_esacpes); } #[test] fn test_as_str_repr() { let value = Value::new("\"ab cd\""); assert_eq!(value, "ab cd"); assert_eq!(value.as_str_repr(), "\"ab cd\""); } #[test] fn test_to_content_not_quoted() { let value = Value::new("abc"); assert_eq!(value.to_content(), Cow::Borrowed("abc")); } #[test] fn test_to_content_quoted_simple() { let value = Value::new("\"ab cd\""); assert_eq!(value.to_content(), Cow::Borrowed("ab cd")); } #[test] fn test_to_content_with_quoted_pair() { let value = Value::new("\"ab\\\"cd\""); assert_eq!(value, "ab\"cd"); let expected: Cow<'static, str> = Cow::Owned("ab\"cd".into()); assert_eq!(value.to_content(), expected); } }<|fim▁end|>
/// Returns the content of this instance. /// /// It differs to the representation in that it will remove the /// quotation marks from the quoted string and will "unquote"
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Copyright 2018 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. mod process; mod vcpu; use std::fs::File; use std::io; use std::io::Read; use std::os::unix::net::UnixDatagram; use std::path::Path; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Barrier}; use std::thread; use std::time::{Duration, Instant}; use libc::{ c_int, c_ulong, fcntl, ioctl, socketpair, AF_UNIX, EAGAIN, EBADF, EDEADLK, EEXIST, EINTR, EINVAL, ENOENT, EOVERFLOW, EPERM, FIOCLEX, F_SETPIPE_SZ, MS_NODEV, MS_NOEXEC, MS_NOSUID, MS_RDONLY, O_NONBLOCK, SIGCHLD, SOCK_SEQPACKET, }; use protobuf::ProtobufError; use remain::sorted; use thiserror::Error; use anyhow::{anyhow, bail, Context, Result}; use base::{ add_fd_flags, block_signal, clear_signal, drop_capabilities, enable_core_scheduling, error, getegid, geteuid, info, pipe, register_rt_signal_handler, validate_raw_descriptor, warn, AsRawDescriptor, Error as SysError, Event, FromRawDescriptor, Killable, MmapError, PollToken, Result as SysResult, SignalFd, WaitContext, SIGRTMIN, }; use kvm::{Cap, Datamatch, IoeventAddress, Kvm, Vcpu, VcpuExit, Vm}; use minijail::{self, Minijail}; use net_util::{Tap, TapT}; use vm_memory::{GuestMemory, MemoryPolicy}; use self::process::*; use self::vcpu::*; use crate::{Config, Executable}; const MAX_DATAGRAM_SIZE: usize = 4096; const MAX_VCPU_DATAGRAM_SIZE: usize = 0x40000; /// An error that occurs when communicating with the plugin process. #[sorted] #[derive(Error, Debug)] pub enum CommError { #[error("failed to decode plugin request: {0}")] DecodeRequest(ProtobufError), #[error("failed to encode plugin response: {0}")] EncodeResponse(ProtobufError), #[error("plugin request socket has been hung up")] PluginSocketHup, #[error("failed to recv from plugin request socket: {0}")] PluginSocketRecv(SysError), #[error("failed to send to plugin request socket: {0}")] PluginSocketSend(SysError), } fn new_seqpacket_pair() -> SysResult<(UnixDatagram, UnixDatagram)> { let mut fds = [0, 0]; unsafe { let ret = socketpair(AF_UNIX, SOCK_SEQPACKET, 0, fds.as_mut_ptr()); if ret == 0 { ioctl(fds[0], FIOCLEX); Ok(( UnixDatagram::from_raw_descriptor(fds[0]), UnixDatagram::from_raw_descriptor(fds[1]), )) } else { Err(SysError::last()) } } } struct VcpuPipe { crosvm_read: File, plugin_write: File, plugin_read: File, crosvm_write: File, } fn new_pipe_pair() -> SysResult<VcpuPipe> { let to_crosvm = pipe(true)?; let to_plugin = pipe(true)?; // Increasing the pipe size can be a nice-to-have to make sure that // messages get across atomically (and made sure that writes don't block), // though it's not necessary a hard requirement for things to work. let flags = unsafe { fcntl( to_crosvm.0.as_raw_descriptor(), F_SETPIPE_SZ, MAX_VCPU_DATAGRAM_SIZE as c_int, ) }; if flags < 0 || flags != MAX_VCPU_DATAGRAM_SIZE as i32 { warn!( "Failed to adjust size of crosvm pipe (result {}): {}", flags, SysError::last() ); } let flags = unsafe { fcntl( to_plugin.0.as_raw_descriptor(), F_SETPIPE_SZ, MAX_VCPU_DATAGRAM_SIZE as c_int, ) }; if flags < 0 || flags != MAX_VCPU_DATAGRAM_SIZE as i32 { warn!( "Failed to adjust size of plugin pipe (result {}): {}", flags, SysError::last() ); } Ok(VcpuPipe { crosvm_read: to_crosvm.0, plugin_write: to_crosvm.1, plugin_read: to_plugin.0, crosvm_write: to_plugin.1, }) } fn proto_to_sys_err(e: ProtobufError) -> SysError { match e { ProtobufError::IoError(e) => SysError::new(e.raw_os_error().unwrap_or(EINVAL)), _ => SysError::new(EINVAL), } } fn io_to_sys_err(e: io::Error) -> SysError { SysError::new(e.raw_os_error().unwrap_or(EINVAL)) } fn mmap_to_sys_err(e: MmapError) -> SysError { match e { MmapError::SystemCallFailed(e) => e, _ => SysError::new(EINVAL), } } fn create_plugin_jail(root: &Path, log_failures: bool, seccomp_policy: &Path) -> Result<Minijail> { // All child jails run in a new user namespace without any users mapped, // they run as nobody unless otherwise configured. let mut j = Minijail::new().context("failed to create jail")?; j.namespace_pids(); j.namespace_user(); j.uidmap(&format!("0 {0} 1", geteuid())) .context("failed to set uidmap for jail")?; j.gidmap(&format!("0 {0} 1", getegid())) .context("failed to set gidmap for jail")?; j.namespace_user_disable_setgroups(); // Don't need any capabilities. j.use_caps(0); // Create a new mount namespace with an empty root FS. j.namespace_vfs(); j.enter_pivot_root(root) .context("failed to set jail pivot root")?; // Run in an empty network namespace. j.namespace_net(); j.no_new_privs(); // By default we'll prioritize using the pre-compiled .bpf over the .policy // file (the .bpf is expected to be compiled using "trap" as the failure // behavior instead of the default "kill" behavior). // Refer to the code comment for the "seccomp-log-failures" // command-line parameter for an explanation about why the |log_failures| // flag forces the use of .policy files (and the build-time alternative to // this run-time flag). let bpf_policy_file = seccomp_policy.with_extension("bpf"); if bpf_policy_file.exists() && !log_failures { j.parse_seccomp_program(&bpf_policy_file) .context("failed to parse jail seccomp BPF program")?; } else { // Use TSYNC only for the side effect of it using SECCOMP_RET_TRAP, // which will correctly kill the entire device process if a worker // thread commits a seccomp violation. j.set_seccomp_filter_tsync(); if log_failures { j.log_seccomp_filter_failures(); } j.parse_seccomp_filters(&seccomp_policy.with_extension("policy")) .context("failed to parse jail seccomp filter")?; } j.use_seccomp_filter(); // Don't do init setup. j.run_as_init(); // Create a tmpfs in the plugin's root directory so that we can bind mount it's executable // file into it. The size=67108864 is size=64*1024*1024 or size=64MB. j.mount_with_data( Path::new("none"), Path::new("/"), "tmpfs", (MS_NOSUID | MS_NODEV | MS_NOEXEC) as usize, "size=67108864", ) .context("failed to mount root")?; // Because we requested to "run as init", minijail will not mount /proc for us even though // plugin will be running in its own PID namespace, so we have to mount it ourselves. j.mount( Path::new("proc"), Path::new("/proc"), "proc", (MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_RDONLY) as usize, ) .context("failed to mount proc")?; Ok(j) } /// Each `PluginObject` represents one object that was instantiated by the guest using the `Create` /// request. /// /// Each such object has an ID associated with it that exists in an ID space shared by every variant /// of `PluginObject`. This allows all the objects to be indexed in a single map, and allows for a /// common destroy method. /// /// In addition to the destory method, each object may have methods specific to its variant type. /// These variant methods must be done by matching the variant to the expected type for that method. /// For example, getting the dirty log from a `Memory` object starting with an ID: /// /// ```ignore /// match objects.get(&request_id) { /// Some(&PluginObject::Memory { slot, length }) => vm.get_dirty_log(slot, &mut dirty_log[..]), /// _ => return Err(SysError::new(ENOENT)), /// } /// ``` enum PluginObject { IoEvent { evt: Event, addr: IoeventAddress, length: u32, datamatch: u64, }, Memory { slot: u32, length: usize, }, IrqEvent { irq_id: u32, evt: Event, }, } impl PluginObject { fn destroy(self, vm: &mut Vm) -> SysResult<()> { match self { PluginObject::IoEvent { evt, addr, length, datamatch, } => match length { 0 => vm.unregister_ioevent(&evt, addr, Datamatch::AnyLength), 1 => vm.unregister_ioevent(&evt, addr, Datamatch::U8(Some(datamatch as u8))), 2 => vm.unregister_ioevent(&evt, addr, Datamatch::U16(Some(datamatch as u16))), 4 => vm.unregister_ioevent(&evt, addr, Datamatch::U32(Some(datamatch as u32))), 8 => vm.unregister_ioevent(&evt, addr, Datamatch::U64(Some(datamatch as u64))), _ => Err(SysError::new(EINVAL)), }, PluginObject::Memory { slot, .. } => vm.remove_memory_region(slot).and(Ok(())), PluginObject::IrqEvent { irq_id, evt } => vm.unregister_irqfd(&evt, irq_id), } } } pub fn run_vcpus( kvm: &Kvm, vm: &Vm, plugin: &Process, vcpu_count: u32, kill_signaled: &Arc<AtomicBool>, exit_evt: &Event, vcpu_handles: &mut Vec<thread::JoinHandle<()>>, ) -> Result<()> { let vcpu_thread_barrier = Arc::new(Barrier::new((vcpu_count) as usize)); let use_kvm_signals = !kvm.check_extension(Cap::ImmediateExit); // If we need to force a vcpu to exit from a VM then a SIGRTMIN signal is sent // to that vcpu's thread. If KVM is running the VM then it'll return -EINTR. // An issue is what to do when KVM isn't running the VM (where we could be // in the kernel or in the app). // // If KVM supports "immediate exit" then we set a signal handler that will // set the |immediate_exit| flag that tells KVM to return -EINTR before running // the VM. // // If KVM doesn't support immediate exit then we'll block SIGRTMIN in the app // and tell KVM to unblock SIGRTMIN before running the VM (at which point a blocked // signal might get asserted). There's overhead to have KVM unblock and re-block // SIGRTMIN each time it runs the VM, so this mode should be avoided. if use_kvm_signals { unsafe { extern "C" fn handle_signal(_: c_int) {} // Our signal handler does nothing and is trivially async signal safe. // We need to install this signal handler even though we do block // the signal below, to ensure that this signal will interrupt // execution of KVM_RUN (this is implementation issue). register_rt_signal_handler(SIGRTMIN() + 0, handle_signal) .expect("failed to register vcpu signal handler"); } // We do not really want the signal handler to run... block_signal(SIGRTMIN() + 0).expect("failed to block signal"); } else { unsafe { extern "C" fn handle_signal(_: c_int) { Vcpu::set_local_immediate_exit(true); } register_rt_signal_handler(SIGRTMIN() + 0, handle_signal) .expect("failed to register vcpu signal handler"); } } for cpu_id in 0..vcpu_count { let kill_signaled = kill_signaled.clone(); let vcpu_thread_barrier = vcpu_thread_barrier.clone(); let vcpu_exit_evt = exit_evt.try_clone().context("failed to clone event")?; let vcpu_plugin = plugin.create_vcpu(cpu_id)?; let vcpu = Vcpu::new(cpu_id as c_ulong, kvm, vm).context("error creating vcpu")?; vcpu_handles.push( thread::Builder::new() .name(format!("crosvm_vcpu{}", cpu_id)) .spawn(move || { if use_kvm_signals { // Tell KVM to not block anything when entering kvm run // because we will be using first RT signal to kick the VCPU. vcpu.set_signal_mask(&[]) .expect("failed to set up KVM VCPU signal mask"); } if let Err(e) = enable_core_scheduling() { error!("Failed to enable core scheduling: {}", e); } let vcpu = vcpu .to_runnable(Some(SIGRTMIN() + 0)) .expect("Failed to set thread id"); let res = vcpu_plugin.init(&vcpu); vcpu_thread_barrier.wait(); if let Err(e) = res { error!("failed to initialize vcpu {}: {}", cpu_id, e); } else { loop { let mut interrupted_by_signal = false; let run_res = vcpu.run(); match run_res { Ok(run) => match run { VcpuExit::IoIn { port, mut size } => { let mut data = [0; 256]; if size > data.len() { error!( "unsupported IoIn size of {} bytes at port {:#x}", size, port ); size = data.len(); } vcpu_plugin.io_read(port as u64, &mut data[..size], &vcpu); if let Err(e) = vcpu.set_data(&data[..size]) { error!( "failed to set return data for IoIn at port {:#x}: {}", port, e ); } } VcpuExit::IoOut { port, mut size, data, } => { if size > data.len() { error!("unsupported IoOut size of {} bytes at port {:#x}", size, port); size = data.len(); } vcpu_plugin.io_write(port as u64, &data[..size], &vcpu); } VcpuExit::MmioRead { address, size } => { let mut data = [0; 8]; vcpu_plugin.mmio_read( address as u64, &mut data[..size], &vcpu, ); // Setting data for mmio can not fail. let _ = vcpu.set_data(&data[..size]); } VcpuExit::MmioWrite { address, size, data, } => { vcpu_plugin.mmio_write( address as u64, &data[..size], &vcpu, ); } VcpuExit::HypervHcall { input, params } => { let mut data = [0; 8]; vcpu_plugin.hyperv_call(input, params, &mut data, &vcpu); // Setting data for hyperv call can not fail. let _ = vcpu.set_data(&data); } VcpuExit::HypervSynic { msr, control, evt_page, msg_page, } => { vcpu_plugin .hyperv_synic(msr, control, evt_page, msg_page, &vcpu); } VcpuExit::Hlt => break, VcpuExit::Shutdown => break, VcpuExit::InternalError => { error!("vcpu {} has internal error", cpu_id); break; } r => warn!("unexpected vcpu exit: {:?}", r), }, Err(e) => match e.errno() { EINTR => interrupted_by_signal = true, EAGAIN => {} _ => { error!("vcpu hit unknown error: {}", e); break; } }, } if kill_signaled.load(Ordering::SeqCst) { break; }<|fim▁hole|> // Only handle the pause request if kvm reported that it was // interrupted by a signal. This helps to entire that KVM has had a chance // to finish emulating any IO that may have immediately happened. // If we eagerly check pre_run() then any IO that we // just reported to the plugin won't have been processed yet by KVM. // Not eagerly calling pre_run() also helps to reduce // any overhead from checking if a pause request is pending. // The assumption is that pause requests aren't common // or frequent so it's better to optimize for the non-pause execution paths. if interrupted_by_signal { if use_kvm_signals { clear_signal(SIGRTMIN() + 0) .expect("failed to clear pending signal"); } else { vcpu.set_immediate_exit(false); } if let Err(e) = vcpu_plugin.pre_run(&vcpu) { error!("failed to process pause on vcpu {}: {}", cpu_id, e); break; } } } } vcpu_exit_evt .write(1) .expect("failed to signal vcpu exit event"); }) .context("error spawning vcpu thread")?, ); } Ok(()) } #[derive(PollToken)] enum Token { Exit, ChildSignal, Stderr, Plugin { index: usize }, } /// Run a VM with a plugin process specified by `cfg`. /// /// Not every field of `cfg` will be used. In particular, most field that pertain to a specific /// device are ignored because the plugin is responsible for emulating hardware. pub fn run_config(cfg: Config) -> Result<()> { info!("crosvm starting plugin process"); // Masking signals is inherently dangerous, since this can persist across clones/execs. Do this // before any jailed devices have been spawned, so that we can catch any of them that fail very // quickly. let sigchld_fd = SignalFd::new(SIGCHLD).context("failed to create signalfd")?; // Create a pipe to capture error messages from plugin and minijail. let (mut stderr_rd, stderr_wr) = pipe(true).context("failed to create stderr pipe")?; add_fd_flags(stderr_rd.as_raw_descriptor(), O_NONBLOCK) .context("error marking stderr nonblocking")?; let jail = if cfg.sandbox { // An empty directory for jailed plugin pivot root. let root_path = match &cfg.plugin_root { Some(dir) => dir, None => Path::new(option_env!("DEFAULT_PIVOT_ROOT").unwrap_or("/var/empty")), }; if root_path.is_relative() { bail!("path to the root directory must be absolute"); } if !root_path.exists() { bail!("no root directory for jailed process to pivot root into"); } if !root_path.is_dir() { bail!("specified root directory is not a directory"); } let policy_path = cfg.seccomp_policy_dir.join("plugin"); let mut jail = create_plugin_jail(root_path, cfg.seccomp_log_failures, &policy_path)?; // Update gid map of the jail if caller provided supplemental groups. if !cfg.plugin_gid_maps.is_empty() { let map = format!("0 {} 1", getegid()) + &cfg .plugin_gid_maps .into_iter() .map(|m| format!(",{} {} {}", m.inner, m.outer, m.count)) .collect::<String>(); jail.gidmap(&map).context("failed to set gidmap for jail")?; } // Mount minimal set of devices (full, zero, urandom, etc). We can not use // jail.mount_dev() here because crosvm may not be running with CAP_SYS_ADMIN. let device_names = ["full", "null", "urandom", "zero"]; for name in &device_names { let device = Path::new("/dev").join(&name); jail.mount_bind(&device, &device, true) .context("failed to mount dev")?; } for bind_mount in &cfg.plugin_mounts { jail.mount_bind(&bind_mount.src, &bind_mount.dst, bind_mount.writable) .with_context(|| { format!( "failed to bind mount {} -> {} as {} ", bind_mount.src.display(), bind_mount.dst.display(), if bind_mount.writable { "writable" } else { "read only" } ) })?; } Some(jail) } else { None }; let mut tap_interfaces: Vec<Tap> = Vec::new(); if let Some(host_ip) = cfg.host_ip { if let Some(netmask) = cfg.netmask { if let Some(mac_address) = cfg.mac_address { let tap = Tap::new(false, false).context("error opening tap device")?; tap.set_ip_addr(host_ip).context("error setting tap ip")?; tap.set_netmask(netmask) .context("error setting tap netmask")?; tap.set_mac_address(mac_address) .context("error setting tap mac address")?; tap.enable().context("error enabling tap device")?; tap_interfaces.push(tap); } } } for tap_fd in cfg.tap_fd { // Safe because we ensure that we get a unique handle to the fd. let tap = unsafe { Tap::from_raw_descriptor( validate_raw_descriptor(tap_fd).context("failed to validate raw tap fd")?, ) .context("failed to create tap device from raw fd")? }; tap_interfaces.push(tap); } let plugin_args: Vec<&str> = cfg.params.iter().map(|s| &s[..]).collect(); let plugin_path = match cfg.executable_path { Some(Executable::Plugin(ref plugin_path)) => plugin_path.as_path(), _ => panic!("Executable was not a plugin"), }; let vcpu_count = cfg.vcpu_count.unwrap_or(1) as u32; let mem = GuestMemory::new(&[]).unwrap(); let mut mem_policy = MemoryPolicy::empty(); if cfg.hugepages { mem_policy |= MemoryPolicy::USE_HUGEPAGES; } mem.set_memory_policy(mem_policy); let kvm = Kvm::new_with_path(&cfg.kvm_device_path).context("error creating Kvm")?; let mut vm = Vm::new(&kvm, mem).context("error creating vm")?; vm.create_irq_chip() .context("failed to create kvm irqchip")?; vm.create_pit().context("failed to create kvm PIT")?; let mut plugin = Process::new(vcpu_count, plugin_path, &plugin_args, jail, stderr_wr)?; // Now that the jail for the plugin has been created and we had a chance to adjust gids there, // we can drop all our capabilities in case we had any. drop_capabilities().context("failed to drop process capabilities")?; let mut res = Ok(()); // If Some, we will exit after enough time is passed to shutdown cleanly. let mut dying_instant: Option<Instant> = None; let duration_to_die = Duration::from_millis(1000); let exit_evt = Event::new().context("failed to create event")?; let kill_signaled = Arc::new(AtomicBool::new(false)); let mut vcpu_handles = Vec::with_capacity(vcpu_count as usize); let wait_ctx = WaitContext::build_with(&[ (&exit_evt, Token::Exit), (&sigchld_fd, Token::ChildSignal), (&stderr_rd, Token::Stderr), ]) .context("failed to add control descriptors to wait context")?; let mut sockets_to_drop = Vec::new(); let mut redo_wait_ctx_sockets = true; // In this loop, make every attempt to not return early. If an error is encountered, set `res` // to the error, set `dying_instant` to now, and signal the plugin that it will be killed soon. // If the plugin cannot be signaled because it is dead of `signal_kill` failed, simply break // from the poll loop so that the VCPU threads can be cleaned up. 'wait: loop { // After we have waited long enough, it's time to give up and exit. if dying_instant .map(|i| i.elapsed() >= duration_to_die) .unwrap_or(false) { break; } if redo_wait_ctx_sockets { for (index, socket) in plugin.sockets().iter().enumerate() { wait_ctx .add(socket, Token::Plugin { index }) .context("failed to add plugin sockets to wait context")?; } } let plugin_socket_count = plugin.sockets().len(); let events = { let poll_res = match dying_instant { Some(inst) => wait_ctx.wait_timeout(duration_to_die - inst.elapsed()), None => wait_ctx.wait(), }; match poll_res { Ok(v) => v, Err(e) => { // Polling no longer works, time to break and cleanup, if res.is_ok() { res = Err(e).context("failed to poll all FDs"); } break; } } }; for event in events.iter().filter(|e| e.is_hungup) { if let Token::Stderr = event.token { let _ = wait_ctx.delete(&stderr_rd); } } for event in events.iter().filter(|e| e.is_readable) { match event.token { Token::Exit => { // No need to check the exit event if we are already doing cleanup. let _ = wait_ctx.delete(&exit_evt); dying_instant.get_or_insert(Instant::now()); let sig_res = plugin.signal_kill(); if res.is_ok() && sig_res.is_err() { res = sig_res.context("error sending kill signal to plugin on exit event"); } } Token::ChildSignal => { // Print all available siginfo structs, then exit the loop. loop { match sigchld_fd.read() { Ok(Some(siginfo)) => { // If the plugin process has ended, there is no need to continue // processing plugin connections, so we break early. if siginfo.ssi_pid == plugin.pid() as u32 { break 'wait; } // Because SIGCHLD is not expected from anything other than the // plugin process, report it as an error. if res.is_ok() { res = Err(anyhow!( "process {} died with signal {}, status {}, and code {}", siginfo.ssi_pid, siginfo.ssi_signo, siginfo.ssi_status, siginfo.ssi_code, )); } } Ok(None) => break, // No more signals to read. Err(e) => { // Something really must be messed up for this to happen, continue // processing connections for a limited time. if res.is_ok() { res = Err(e).context("failed to read signal fd"); } break; } } } // As we only spawn the plugin process, getting a SIGCHLD can only mean // something went wrong. dying_instant.get_or_insert(Instant::now()); let sig_res = plugin.signal_kill(); if res.is_ok() && sig_res.is_err() { res = sig_res.context("error sending kill signal to plugin on SIGCHLD"); } } Token::Stderr => loop { let mut buf = [0u8; 4096]; match stderr_rd.read(&mut buf) { Ok(len) => { for l in String::from_utf8_lossy(&buf[0..len]).lines() { error!("minijail/plugin: {}", l); } } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { break; } Err(e) => { error!("failed reading from stderr: {}", e); break; } } }, Token::Plugin { index } => { match plugin.handle_socket(index, &kvm, &mut vm, &vcpu_handles, &tap_interfaces) { Ok(_) => {} // A HUP is an expected event for a socket, so don't bother warning about // it. Err(CommError::PluginSocketHup) => sockets_to_drop.push(index), // Only one connection out of potentially many is broken. Drop it, but don't // start cleaning up. Because the error isn't returned, we will warn about // it here. Err(e) => { warn!("error handling plugin socket: {}", e); sockets_to_drop.push(index); } } } } } if vcpu_handles.is_empty() && dying_instant.is_none() && plugin.is_started() { let res = run_vcpus( &kvm, &vm, &plugin, vcpu_count, &kill_signaled, &exit_evt, &mut vcpu_handles, ); if let Err(e) = res { dying_instant.get_or_insert(Instant::now()); error!("failed to start vcpus: {}", e); } } redo_wait_ctx_sockets = !sockets_to_drop.is_empty() || plugin.sockets().len() != plugin_socket_count; // Cleanup all of the sockets that we have determined were disconnected or suffered some // other error. plugin.drop_sockets(&mut sockets_to_drop); sockets_to_drop.clear(); if redo_wait_ctx_sockets { for socket in plugin.sockets() { let _ = wait_ctx.delete(socket); } } } // vcpu threads MUST see the kill signaled flag, otherwise they may re-enter the VM. kill_signaled.store(true, Ordering::SeqCst); // Depending on how we ended up here, the plugin process, or a VCPU thread waiting for requests // might be stuck. The `signal_kill` call will unstick all the VCPU threads by closing their // blocked connections. plugin .signal_kill() .context("error sending kill signal to plugin on cleanup")?; for handle in vcpu_handles { match handle.kill(SIGRTMIN() + 0) { Ok(_) => { if let Err(e) = handle.join() { error!("failed to join vcpu thread: {:?}", e); } } Err(e) => error!("failed to kill vcpu thread: {}", e), } } match plugin.try_wait() { // The plugin has run out of time by now Ok(ProcessStatus::Running) => Err(anyhow!("plugin did not exit within timeout")), // Return an error discovered earlier in this function. Ok(ProcessStatus::Success) => res.map_err(anyhow::Error::msg), Ok(ProcessStatus::Fail(code)) => Err(anyhow!("plugin exited with error: {}", code)), Ok(ProcessStatus::Signal(code)) => Err(anyhow!("plugin exited with signal {}", code)), Err(e) => Err(anyhow!("error waiting for plugin to exit: {}", e)), } }<|fim▁end|>
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""Support for RainMachine devices.""" import asyncio from datetime import timedelta import logging from regenmaschine import Client from regenmaschine.errors import RainMachineError import voluptuous as vol from homeassistant.const import ( ATTR_ATTRIBUTION, CONF_IP_ADDRESS, CONF_PASSWORD, CONF_PORT, CONF_SSL, ) from homeassistant.core import callback from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers import aiohttp_client, config_validation as cv from homeassistant.helpers.dispatcher import async_dispatcher_send from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import async_track_time_interval from homeassistant.helpers.service import verify_domain_control from .const import ( CONF_ZONE_RUN_TIME, DATA_CLIENT, DATA_PROGRAMS, DATA_PROVISION_SETTINGS, DATA_RESTRICTIONS_CURRENT, DATA_RESTRICTIONS_UNIVERSAL, DATA_ZONES, DATA_ZONES_DETAILS, DEFAULT_ZONE_RUN, DOMAIN, PROGRAM_UPDATE_TOPIC, SENSOR_UPDATE_TOPIC, ZONE_UPDATE_TOPIC, ) _LOGGER = logging.getLogger(__name__) CONF_PROGRAM_ID = "program_id" CONF_SECONDS = "seconds" CONF_ZONE_ID = "zone_id" DATA_LISTENER = "listener" DEFAULT_ATTRIBUTION = "Data provided by Green Electronics LLC" DEFAULT_ICON = "mdi:water" DEFAULT_SCAN_INTERVAL = timedelta(seconds=60) DEFAULT_SSL = True SERVICE_ALTER_PROGRAM = vol.Schema({vol.Required(CONF_PROGRAM_ID): cv.positive_int}) SERVICE_ALTER_ZONE = vol.Schema({vol.Required(CONF_ZONE_ID): cv.positive_int}) SERVICE_PAUSE_WATERING = vol.Schema({vol.Required(CONF_SECONDS): cv.positive_int}) SERVICE_START_PROGRAM_SCHEMA = vol.Schema( {vol.Required(CONF_PROGRAM_ID): cv.positive_int} ) SERVICE_START_ZONE_SCHEMA = vol.Schema( { vol.Required(CONF_ZONE_ID): cv.positive_int, vol.Optional(CONF_ZONE_RUN_TIME, default=DEFAULT_ZONE_RUN): cv.positive_int, } ) SERVICE_STOP_PROGRAM_SCHEMA = vol.Schema( {vol.Required(CONF_PROGRAM_ID): cv.positive_int} ) SERVICE_STOP_ZONE_SCHEMA = vol.Schema({vol.Required(CONF_ZONE_ID): cv.positive_int}) CONFIG_SCHEMA = cv.deprecated(DOMAIN, invalidation_version="0.119") async def async_setup(hass, config): """Set up the RainMachine component.""" hass.data[DOMAIN] = {DATA_CLIENT: {}, DATA_LISTENER: {}} return True async def async_setup_entry(hass, config_entry): """Set up RainMachine as config entry.""" entry_updates = {} if not config_entry.unique_id: # If the config entry doesn't already have a unique ID, set one: entry_updates["unique_id"] = config_entry.data[CONF_IP_ADDRESS] if CONF_ZONE_RUN_TIME in config_entry.data: # If a zone run time exists in the config entry's data, pop it and move it to # options: data = {**config_entry.data} entry_updates["data"] = data entry_updates["options"] = { **config_entry.options, CONF_ZONE_RUN_TIME: data.pop(CONF_ZONE_RUN_TIME), } if entry_updates: hass.config_entries.async_update_entry(config_entry, **entry_updates) _verify_domain_control = verify_domain_control(hass, DOMAIN) websession = aiohttp_client.async_get_clientsession(hass) client = Client(session=websession) try: await client.load_local( config_entry.data[CONF_IP_ADDRESS], config_entry.data[CONF_PASSWORD], port=config_entry.data[CONF_PORT], ssl=config_entry.data.get(CONF_SSL, DEFAULT_SSL), ) except RainMachineError as err: _LOGGER.error("An error occurred: %s", err) raise ConfigEntryNotReady from err else: # regenmaschine can load multiple controllers at once, but we only grab the one # we loaded above: controller = next(iter(client.controllers.values())) rainmachine = RainMachine(hass, config_entry, controller) # Update the data object, which at this point (prior to any sensors registering # "interest" in the API), will focus on grabbing the latest program and zone data: await rainmachine.async_update() hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = rainmachine for component in ("binary_sensor", "sensor", "switch"): hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, component) ) @_verify_domain_control async def disable_program(call): """Disable a program.""" await rainmachine.controller.programs.disable(call.data[CONF_PROGRAM_ID]) await rainmachine.async_update_programs_and_zones() @_verify_domain_control async def disable_zone(call): """Disable a zone.""" await rainmachine.controller.zones.disable(call.data[CONF_ZONE_ID]) await rainmachine.async_update_programs_and_zones() @_verify_domain_control async def enable_program(call): """Enable a program.""" await rainmachine.controller.programs.enable(call.data[CONF_PROGRAM_ID]) await rainmachine.async_update_programs_and_zones() @_verify_domain_control async def enable_zone(call):<|fim▁hole|> await rainmachine.controller.zones.enable(call.data[CONF_ZONE_ID]) await rainmachine.async_update_programs_and_zones() @_verify_domain_control async def pause_watering(call): """Pause watering for a set number of seconds.""" await rainmachine.controller.watering.pause_all(call.data[CONF_SECONDS]) await rainmachine.async_update_programs_and_zones() @_verify_domain_control async def start_program(call): """Start a particular program.""" await rainmachine.controller.programs.start(call.data[CONF_PROGRAM_ID]) await rainmachine.async_update_programs_and_zones() @_verify_domain_control async def start_zone(call): """Start a particular zone for a certain amount of time.""" await rainmachine.controller.zones.start( call.data[CONF_ZONE_ID], call.data[CONF_ZONE_RUN_TIME] ) await rainmachine.async_update_programs_and_zones() @_verify_domain_control async def stop_all(call): """Stop all watering.""" await rainmachine.controller.watering.stop_all() await rainmachine.async_update_programs_and_zones() @_verify_domain_control async def stop_program(call): """Stop a program.""" await rainmachine.controller.programs.stop(call.data[CONF_PROGRAM_ID]) await rainmachine.async_update_programs_and_zones() @_verify_domain_control async def stop_zone(call): """Stop a zone.""" await rainmachine.controller.zones.stop(call.data[CONF_ZONE_ID]) await rainmachine.async_update_programs_and_zones() @_verify_domain_control async def unpause_watering(call): """Unpause watering.""" await rainmachine.controller.watering.unpause_all() await rainmachine.async_update_programs_and_zones() for service, method, schema in [ ("disable_program", disable_program, SERVICE_ALTER_PROGRAM), ("disable_zone", disable_zone, SERVICE_ALTER_ZONE), ("enable_program", enable_program, SERVICE_ALTER_PROGRAM), ("enable_zone", enable_zone, SERVICE_ALTER_ZONE), ("pause_watering", pause_watering, SERVICE_PAUSE_WATERING), ("start_program", start_program, SERVICE_START_PROGRAM_SCHEMA), ("start_zone", start_zone, SERVICE_START_ZONE_SCHEMA), ("stop_all", stop_all, {}), ("stop_program", stop_program, SERVICE_STOP_PROGRAM_SCHEMA), ("stop_zone", stop_zone, SERVICE_STOP_ZONE_SCHEMA), ("unpause_watering", unpause_watering, {}), ]: hass.services.async_register(DOMAIN, service, method, schema=schema) hass.data[DOMAIN][DATA_LISTENER] = config_entry.add_update_listener( async_reload_entry ) return True async def async_unload_entry(hass, config_entry): """Unload an OpenUV config entry.""" hass.data[DOMAIN][DATA_CLIENT].pop(config_entry.entry_id) cancel_listener = hass.data[DOMAIN][DATA_LISTENER].pop(config_entry.entry_id) cancel_listener() tasks = [ hass.config_entries.async_forward_entry_unload(config_entry, component) for component in ("binary_sensor", "sensor", "switch") ] await asyncio.gather(*tasks) return True async def async_reload_entry(hass, config_entry): """Handle an options update.""" await hass.config_entries.async_reload(config_entry.entry_id) class RainMachine: """Define a generic RainMachine object.""" def __init__(self, hass, config_entry, controller): """Initialize.""" self._async_cancel_time_interval_listener = None self.config_entry = config_entry self.controller = controller self.data = {} self.device_mac = controller.mac self.hass = hass self._api_category_count = { DATA_PROVISION_SETTINGS: 0, DATA_RESTRICTIONS_CURRENT: 0, DATA_RESTRICTIONS_UNIVERSAL: 0, } self._api_category_locks = { DATA_PROVISION_SETTINGS: asyncio.Lock(), DATA_RESTRICTIONS_CURRENT: asyncio.Lock(), DATA_RESTRICTIONS_UNIVERSAL: asyncio.Lock(), } async def _async_update_listener_action(self, now): """Define an async_track_time_interval action to update data.""" await self.async_update() @callback def async_deregister_sensor_api_interest(self, api_category): """Decrement the number of entities with data needs from an API category.""" # If this deregistration should leave us with no registration at all, remove the # time interval: if sum(self._api_category_count.values()) == 0: if self._async_cancel_time_interval_listener: self._async_cancel_time_interval_listener() self._async_cancel_time_interval_listener = None return self._api_category_count[api_category] -= 1 async def async_fetch_from_api(self, api_category): """Execute the appropriate coroutine to fetch particular data from the API.""" if api_category == DATA_PROGRAMS: data = await self.controller.programs.all(include_inactive=True) elif api_category == DATA_PROVISION_SETTINGS: data = await self.controller.provisioning.settings() elif api_category == DATA_RESTRICTIONS_CURRENT: data = await self.controller.restrictions.current() elif api_category == DATA_RESTRICTIONS_UNIVERSAL: data = await self.controller.restrictions.universal() elif api_category == DATA_ZONES: data = await self.controller.zones.all(include_inactive=True) elif api_category == DATA_ZONES_DETAILS: # This API call needs to be separate from the DATA_ZONES one above because, # maddeningly, the DATA_ZONES_DETAILS API call doesn't include the current # state of the zone: data = await self.controller.zones.all(details=True, include_inactive=True) self.data[api_category] = data async def async_register_sensor_api_interest(self, api_category): """Increment the number of entities with data needs from an API category.""" # If this is the first registration we have, start a time interval: if not self._async_cancel_time_interval_listener: self._async_cancel_time_interval_listener = async_track_time_interval( self.hass, self._async_update_listener_action, DEFAULT_SCAN_INTERVAL, ) self._api_category_count[api_category] += 1 # If a sensor registers interest in a particular API call and the data doesn't # exist for it yet, make the API call and grab the data: async with self._api_category_locks[api_category]: if api_category not in self.data: await self.async_fetch_from_api(api_category) async def async_update(self): """Update all RainMachine data.""" tasks = [self.async_update_programs_and_zones(), self.async_update_sensors()] await asyncio.gather(*tasks) async def async_update_sensors(self): """Update sensor/binary sensor data.""" _LOGGER.debug("Updating sensor data for RainMachine") # Fetch an API category if there is at least one interested entity: tasks = {} for category, count in self._api_category_count.items(): if count == 0: continue tasks[category] = self.async_fetch_from_api(category) results = await asyncio.gather(*tasks.values(), return_exceptions=True) for api_category, result in zip(tasks, results): if isinstance(result, RainMachineError): _LOGGER.error( "There was an error while updating %s: %s", api_category, result ) continue async_dispatcher_send(self.hass, SENSOR_UPDATE_TOPIC) async def async_update_programs_and_zones(self): """Update program and zone data. Program and zone updates always go together because of how linked they are: programs affect zones and certain combinations of zones affect programs. Note that this call does not take into account interested entities when making the API calls; we make the reasonable assumption that switches will always be enabled. """ _LOGGER.debug("Updating program and zone data for RainMachine") tasks = { DATA_PROGRAMS: self.async_fetch_from_api(DATA_PROGRAMS), DATA_ZONES: self.async_fetch_from_api(DATA_ZONES), DATA_ZONES_DETAILS: self.async_fetch_from_api(DATA_ZONES_DETAILS), } results = await asyncio.gather(*tasks.values(), return_exceptions=True) for api_category, result in zip(tasks, results): if isinstance(result, RainMachineError): _LOGGER.error( "There was an error while updating %s: %s", api_category, result ) async_dispatcher_send(self.hass, PROGRAM_UPDATE_TOPIC) async_dispatcher_send(self.hass, ZONE_UPDATE_TOPIC) class RainMachineEntity(Entity): """Define a generic RainMachine entity.""" def __init__(self, rainmachine): """Initialize.""" self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION} self._device_class = None self._name = None self.rainmachine = rainmachine @property def device_class(self): """Return the device class.""" return self._device_class @property def device_info(self): """Return device registry information for this entity.""" return { "identifiers": {(DOMAIN, self.rainmachine.controller.mac)}, "name": self.rainmachine.controller.name, "manufacturer": "RainMachine", "model": ( f"Version {self.rainmachine.controller.hardware_version} " f"(API: {self.rainmachine.controller.api_version})" ), "sw_version": self.rainmachine.controller.software_version, } @property def device_state_attributes(self) -> dict: """Return the state attributes.""" return self._attrs @property def name(self) -> str: """Return the name of the entity.""" return self._name @property def should_poll(self): """Disable polling.""" return False @callback def _update_state(self): """Update the state.""" self.update_from_latest_data() self.async_write_ha_state() @callback def update_from_latest_data(self): """Update the entity.""" raise NotImplementedError<|fim▁end|>
"""Enable a zone."""
<|file_name|>builtin-superkinds-capabilities-transitive.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Tests "transitivity" of super-builtin-kinds on traits. Here, if // we have a Foo, we know we have a Bar, and if we have a Bar, we // know we have a Send. So if we have a Foo we should know we have // a Send. Basically this just makes sure rustc is using // each_bound_trait_and_supertraits in type_contents correctly. trait Bar : Send { } trait Foo : Bar { }<|fim▁hole|>impl <T: Send> Foo for T { } impl <T: Send> Bar for T { } fn foo<T: Foo>(val: T, chan: Sender<T>) { chan.send(val); } pub fn main() { let (tx, rx) = channel(); foo(31337, tx); assert!(rx.recv() == 31337); }<|fim▁end|>
<|file_name|>InitConfigFileParser.cpp<|end_file_name|><|fim▁begin|>/* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License, version 2.0, as published by the Free Software Foundation. This program is also distributed with certain software (including but not limited to OpenSSL) that is licensed under separate terms,<|fim▁hole|> separately licensed software that they have included with MySQL. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0, for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <ndb_global.h> #include <ndb_version.h> #include "InitConfigFileParser.hpp" #include "Config.hpp" #include <NdbOut.hpp> #include "ConfigInfo.hpp" #include "EventLogger.hpp" #include <m_string.h> #include <util/SparseBitmask.hpp> #include "../common/util/parse_mask.hpp" extern EventLogger *g_eventLogger; const int MAX_LINE_LENGTH = 1024; // Max length of line of text in config file static void trim(char *); //**************************************************************************** // Ctor / Dtor //**************************************************************************** InitConfigFileParser::InitConfigFileParser() { m_info = new ConfigInfo(); } InitConfigFileParser::~InitConfigFileParser() { delete m_info; } //**************************************************************************** // Read Config File //**************************************************************************** InitConfigFileParser::Context::Context(const ConfigInfo * info) : m_userProperties(true), m_configValues(1000, 20) { m_config = new Properties(true); m_defaults = new Properties(true); } InitConfigFileParser::Context::~Context(){ if(m_config != 0) delete m_config; if(m_defaults != 0) delete m_defaults; } Config * InitConfigFileParser::parseConfig(const char * filename) { FILE * file = fopen(filename, "r"); if(file == 0){ g_eventLogger->error("Error opening '%s', error: %d, %s", filename, errno, strerror(errno)); return 0; } Config * ret = parseConfig(file); fclose(file); return ret; } Config * InitConfigFileParser::parseConfig(FILE * file) { char line[MAX_LINE_LENGTH]; Context ctx(m_info); ctx.m_lineno = 0; ctx.m_currentSection = 0; /************* * Open file * *************/ if (file == NULL) { return 0; } /*********************** * While lines to read * ***********************/ while (fgets(line, MAX_LINE_LENGTH, file)) { ctx.m_lineno++; trim(line); if (isEmptyLine(line)) // Skip if line is empty or comment continue; // End with NULL instead of newline if (line[strlen(line)-1] == '\n') line[strlen(line)-1] = '\0'; /******************************** * 1. Parse new default section * ********************************/ if (char* section = parseDefaultSectionHeader(line)) { if(!storeSection(ctx)){ free(section); ctx.reportError("Could not store previous default section " "of configuration file."); return 0; } BaseString::snprintf(ctx.fname, sizeof(ctx.fname), "%s", section); free(section); ctx.type = InitConfigFileParser::DefaultSection; ctx.m_sectionLineno = ctx.m_lineno; ctx.m_currentSection = new Properties(true); ctx.m_userDefaults = NULL; require((ctx.m_currentInfo = m_info->getInfo(ctx.fname)) != 0); require((ctx.m_systemDefaults = m_info->getDefaults(ctx.fname)) != 0); continue; } /************************ * 2. Parse new section * ************************/ if (char* section = parseSectionHeader(line)) { if(!storeSection(ctx)){ free(section); ctx.reportError("Could not store previous section " "of configuration file."); return 0; } BaseString::snprintf(ctx.fname, sizeof(ctx.fname), "%s", section); free(section); ctx.type = InitConfigFileParser::Section; ctx.m_sectionLineno = ctx.m_lineno; ctx.m_currentSection = new Properties(true); ctx.m_userDefaults = getSection(ctx.fname, ctx.m_defaults); require((ctx.m_currentInfo = m_info->getInfo(ctx.fname)) != 0); require((ctx.m_systemDefaults = m_info->getDefaults(ctx.fname)) != 0); continue; } /**************************** * 3. Parse name-value pair * ****************************/ if (!parseNameValuePair(ctx, line)) { ctx.reportError("Could not parse name-value pair in config file."); return 0; } } if (ferror(file)){ ctx.reportError("Failure in reading"); return 0; } if(!storeSection(ctx)) { ctx.reportError("Could not store section of configuration file."); return 0; } return run_config_rules(ctx); } Config* InitConfigFileParser::run_config_rules(Context& ctx) { for(size_t i = 0; ConfigInfo::m_ConfigRules[i].m_configRule != 0; i++){ ctx.type = InitConfigFileParser::Undefined; ctx.m_info = m_info; ctx.m_currentSection = 0; ctx.m_userDefaults = 0; ctx.m_currentInfo = 0; ctx.m_systemDefaults = 0; Vector<ConfigInfo::ConfigRuleSection> tmp; if(!(* ConfigInfo::m_ConfigRules[i].m_configRule)(tmp, ctx, ConfigInfo::m_ConfigRules[i].m_ruleData)) return 0; for(unsigned j = 0; j<tmp.size(); j++){ BaseString::snprintf(ctx.fname, sizeof(ctx.fname), "%s", tmp[j].m_sectionType.c_str()); ctx.type = InitConfigFileParser::Section; ctx.m_currentSection = tmp[j].m_sectionData; ctx.m_userDefaults = getSection(ctx.fname, ctx.m_defaults); require((ctx.m_currentInfo = m_info->getInfo(ctx.fname)) != 0); require((ctx.m_systemDefaults = m_info->getDefaults(ctx.fname)) != 0); if(!storeSection(ctx)) return 0; } } Uint32 nConnections = 0; Uint32 nComputers = 0; Uint32 nNodes = 0; Uint32 nExtConnections = 0; const char * system = "?"; ctx.m_userProperties.get("NoOfConnections", &nConnections); ctx.m_userProperties.get("NoOfComputers", &nComputers); ctx.m_userProperties.get("NoOfNodes", &nNodes); ctx.m_userProperties.get("ExtNoOfConnections", &nExtConnections); ctx.m_userProperties.get("ExtSystem", &system); ctx.m_config->put("NoOfConnections", nConnections); ctx.m_config->put("NoOfComputers", nComputers); ctx.m_config->put("NoOfNodes", nNodes); char tmpLine[MAX_LINE_LENGTH]; BaseString::snprintf(tmpLine, MAX_LINE_LENGTH, "EXTERNAL SYSTEM_%s:NoOfConnections", system); ctx.m_config->put(tmpLine, nExtConnections); return new Config(ctx.m_configValues.getConfigValues()); } //**************************************************************************** // Parse Name-Value Pair //**************************************************************************** bool InitConfigFileParser::parseNameValuePair(Context& ctx, const char* line) { if (ctx.m_currentSection == NULL){ ctx.reportError("Value specified outside section"); return false; } // ************************************* // Split string at first occurrence of // '=' or ':' // ************************************* Vector<BaseString> tmp_string_split; if (BaseString(line).split(tmp_string_split, "=:", 2) != 2) { ctx.reportError("Parse error"); return false; } // ************************************* // Remove all after # // ************************************* Vector<BaseString> tmp_string_split2; tmp_string_split[1].split(tmp_string_split2, "#", 2); tmp_string_split[1]=tmp_string_split2[0]; // ************************************* // Remove leading and trailing chars // ************************************* { for (int i = 0; i < 2; i++) tmp_string_split[i].trim("\r\n \t"); } return storeNameValuePair(ctx, tmp_string_split[0].c_str(), // fname tmp_string_split[1].c_str()); // value } bool InitConfigFileParser::storeNameValuePair(Context& ctx, const char* fname, const char* value) { if (ctx.m_currentSection->contains(fname)) { ctx.reportError("[%s] Parameter %s specified twice", ctx.fname, fname); return false; } if (!ctx.m_currentInfo->contains(fname)) { ctx.reportError("[%s] Unknown parameter: %s", ctx.fname, fname); return false; } ConfigInfo::Status status = m_info->getStatus(ctx.m_currentInfo, fname); if (status == ConfigInfo::CI_NOTIMPLEMENTED) { ctx.reportWarning("[%s] %s not yet implemented", ctx.fname, fname); } if (status == ConfigInfo::CI_DEPRECATED) { const char * desc = m_info->getDescription(ctx.m_currentInfo, fname); if(desc && desc[0]){ ctx.reportWarning("[%s] %s is deprecated, use %s instead", ctx.fname, fname, desc); } else if (desc == 0){ ctx.reportWarning("[%s] %s is deprecated", ctx.fname, fname); } } const ConfigInfo::Type type = m_info->getType(ctx.m_currentInfo, fname); switch(type){ case ConfigInfo::CI_BOOL: { bool value_bool; if (!convertStringToBool(value, value_bool)) { ctx.reportError("Illegal boolean value for parameter %s", fname); return false; } require(ctx.m_currentSection->put(fname, value_bool)); break; } case ConfigInfo::CI_INT: case ConfigInfo::CI_INT64:{ Uint64 value_int; if (!convertStringToUint64(value, value_int)) { ctx.reportError("Illegal integer value for parameter %s", fname); return false; } if (!m_info->verify(ctx.m_currentInfo, fname, value_int)) { ctx.reportError("Illegal value %s for parameter %s.\n" "Legal values are between %llu and %llu", value, fname, m_info->getMin(ctx.m_currentInfo, fname), m_info->getMax(ctx.m_currentInfo, fname)); return false; } if(type == ConfigInfo::CI_INT){ require(ctx.m_currentSection->put(fname, (Uint32)value_int)); } else { require(ctx.m_currentSection->put64(fname, value_int)); } break; } case ConfigInfo::CI_STRING: require(ctx.m_currentSection->put(fname, value)); break; case ConfigInfo::CI_ENUM:{ Uint32 value_int; if (!m_info->verify_enum(ctx.m_currentInfo, fname, value, value_int)) { BaseString values; m_info->get_enum_values(ctx.m_currentInfo, fname, values); ctx.reportError("Illegal value '%s' for parameter %s. " "Legal values are: '%s'", value, fname, values.c_str()); return false; } require(ctx.m_currentSection->put(fname, value_int)); break; } case ConfigInfo::CI_BITMASK:{ if (strlen(value) <= 0) { ctx.reportError("Illegal value '%s' for parameter %s. " "Error: Zero length string", value, fname); return false; } Uint64 max = m_info->getMax(ctx.m_currentInfo, fname); SparseBitmask mask((unsigned)max); int res = parse_mask(value, mask); if (res < 0) { BaseString desc("Unknown error."); switch(res) { case -1: desc.assign("Invalid syntax for bitmask"); break; case -2: desc.assfmt("Too large id used in bitmask, max is %llu", max); break; default: break; } ctx.reportError("Illegal value '%s' for parameter %s. Error: %s", value, fname, desc.c_str()); return false; } require(ctx.m_currentSection->put(fname, value)); break; } case ConfigInfo::CI_SECTION: abort(); } return true; } //**************************************************************************** // Is Empty Line //**************************************************************************** bool InitConfigFileParser::isEmptyLine(const char* line) const { int i; // Check if it is a comment line if (line[0] == '#') return true; // Check if it is a line with only spaces for (i = 0; i < MAX_LINE_LENGTH && line[i] != '\n' && line[i] != '\0'; i++) { if (line[i] != ' ' && line[i] != '\t') return false; } return true; } //**************************************************************************** // Convert String to Int //**************************************************************************** bool InitConfigFileParser::convertStringToUint64(const char* s, Uint64& val, Uint32 log10base) { if (s == NULL) return false; if (strlen(s) == 0) return false; errno = 0; char* p; Int64 v = my_strtoll(s, &p, log10base); if (errno != 0) return false; long mul = 0; if (p != &s[strlen(s)]){ char * tmp = strdup(p); trim(tmp); switch(tmp[0]){ case 'k': case 'K': mul = 10; break; case 'M': mul = 20; break; case 'G': mul = 30; break; default: free(tmp); return false; } free(tmp); } val = (v << mul); return true; } bool InitConfigFileParser::convertStringToBool(const char* s, bool& val) { if (s == NULL) return false; if (strlen(s) == 0) return false; if (!strcmp(s, "Y") || !strcmp(s, "y") || !strcmp(s, "Yes") || !strcmp(s, "YES") || !strcmp(s, "yes") || !strcmp(s, "True") || !strcmp(s, "TRUE") || !strcmp(s, "true") || !strcmp(s, "1")) { val = true; return true; } if (!strcmp(s, "N") || !strcmp(s, "n") || !strcmp(s, "No") || !strcmp(s, "NO") || !strcmp(s, "no") || !strcmp(s, "False") || !strcmp(s, "FALSE") || !strcmp(s, "false") || !strcmp(s, "0")) { val = false; return true; } return false; // Failure to convert } //**************************************************************************** // Parse Section Header //**************************************************************************** static void trim(char * str){ int len = (int)strlen(str); for(len--; (str[len] == '\r' || str[len] == '\n' || str[len] == ' ' || str[len] == '\t') && len > 0; len--) str[len] = 0; int pos = 0; while(str[pos] == ' ' || str[pos] == '\t') pos++; if(str[pos] == '\"' && str[len] == '\"') { pos++; str[len] = 0; len--; } memmove(str, &str[pos], len - pos + 2); } char* InitConfigFileParser::parseSectionHeader(const char* line) const { char * tmp = strdup(line); if(tmp[0] != '['){ free(tmp); return NULL; } if(tmp[strlen(tmp)-1] != ']'){ free(tmp); return NULL; } tmp[strlen(tmp)-1] = 0; tmp[0] = ' '; trim(tmp); // Get the correct header name if an alias { const char *tmp_alias= m_info->getAlias(tmp); if (tmp_alias) { free(tmp); tmp= strdup(tmp_alias); } } // Lookup token among sections if(!m_info->isSection(tmp)) { free(tmp); return NULL; } if(m_info->getInfo(tmp)) return tmp; free(tmp); return NULL; } //**************************************************************************** // Parse Default Section Header //**************************************************************************** char* InitConfigFileParser::parseDefaultSectionHeader(const char* line) const { static char token1[MAX_LINE_LENGTH], token2[MAX_LINE_LENGTH]; int no = sscanf(line, "[%120[A-Z_a-z] %120[A-Z_a-z]]", token1, token2); // Not correct no of tokens if (no != 2) return NULL; // Not correct keyword at end if (!native_strcasecmp(token2, "DEFAULT") == 0) return NULL; const char *token1_alias= m_info->getAlias(token1); if (token1_alias == 0) token1_alias= token1; if(m_info->getInfo(token1_alias)){ return strdup(token1_alias); } // Did not find section return NULL; } const Properties * InitConfigFileParser::getSection(const char * name, const Properties * src){ const Properties * p; if(src && src->get(name, &p)) return p; return 0; } //**************************************************************************** // STORE section //**************************************************************************** bool InitConfigFileParser::storeSection(Context& ctx){ if(ctx.m_currentSection == NULL) return true; for(int i = (int)strlen(ctx.fname) - 1; i>=0; i--){ ctx.fname[i] = toupper(ctx.fname[i]); } BaseString::snprintf(ctx.pname, sizeof(ctx.pname), "%s", ctx.fname); char buf[255]; if(ctx.type == InitConfigFileParser::Section) BaseString::snprintf(buf, sizeof(buf), "%s", ctx.fname); if(ctx.type == InitConfigFileParser::DefaultSection) BaseString::snprintf(buf, sizeof(buf), "%s DEFAULT", ctx.fname); BaseString::snprintf(ctx.fname, sizeof(ctx.fname), "%s", buf); if(ctx.type == InitConfigFileParser::Section){ for(int i = 0; i<m_info->m_NoOfRules; i++){ const ConfigInfo::SectionRule & rule = m_info->m_SectionRules[i]; if(!strcmp(rule.m_section, "*") || !strcmp(rule.m_section, ctx.fname)){ if(!(* rule.m_sectionRule)(ctx, rule.m_ruleData)){ return false; } } } } if(ctx.type == InitConfigFileParser::DefaultSection && !ctx.m_defaults->put(ctx.pname, ctx.m_currentSection)) { ctx.reportError("Duplicate default section not allowed"); return false; } if(ctx.type == InitConfigFileParser::Section) require(ctx.m_config->put(ctx.pname, ctx.m_currentSection)); delete ctx.m_currentSection; ctx.m_currentSection = NULL; return true; } void InitConfigFileParser::Context::reportError(const char * fmt, ...){ va_list ap; char buf[1000]; va_start(ap, fmt); if (fmt != 0) BaseString::vsnprintf(buf, sizeof(buf)-1, fmt, ap); va_end(ap); g_eventLogger->error("at line %d: %s", m_lineno, buf); //m_currentSection->print(); } void InitConfigFileParser::Context::reportWarning(const char * fmt, ...){ va_list ap; char buf[1000]; va_start(ap, fmt); if (fmt != 0) BaseString::vsnprintf(buf, sizeof(buf)-1, fmt, ap); va_end(ap); g_eventLogger->warning("at line %d: %s", m_lineno, buf); } #include <my_sys.h> #include <my_getopt.h> #ifdef HAVE_MY_DEFAULT_H #include <my_default.h> #endif static int order = 1; static my_bool parse_mycnf_opt(int, const struct my_option * opt, char * value) { long *app_type= (long*) &opt->app_type; if(opt->comment) (*app_type)++; else *app_type = order++; return 0; } bool InitConfigFileParser::store_in_properties(Vector<struct my_option>& options, InitConfigFileParser::Context& ctx, const char * name) { for(unsigned i = 0; i<options.size(); i++) { if (options[i].app_type == 0) { // Option not found in in my.cnf continue; } const char* section = options[i].comment; if (!section) { // Option which is not to be saved, like "ndbd", "ndbapi", "mysqld" etc. continue; } if (strcmp(section, name) == 0) { const char* value = NULL; char buf[32]; switch(options[i].var_type){ case GET_INT: case GET_UINT: BaseString::snprintf(buf, sizeof(buf), "%u", *(Uint32*)options[i].value); value = buf; break; case GET_ULL: BaseString::snprintf(buf, sizeof(buf), "%llu", *(Uint64*)options[i].value); value = buf; break; case GET_STR: value = *(char**)options[i].value; break; default: abort(); } const char* fname = options[i].name; if (!storeNameValuePair(ctx, fname, value)) return false; } } return true; } bool InitConfigFileParser::handle_mycnf_defaults(Vector<struct my_option>& options, InitConfigFileParser::Context& ctx, const char * name) { strcpy(ctx.fname, name); ctx.type = InitConfigFileParser::DefaultSection; ctx.m_currentSection = new Properties(true); ctx.m_userDefaults = NULL; require((ctx.m_currentInfo = m_info->getInfo(ctx.fname)) != 0); require((ctx.m_systemDefaults = m_info->getDefaults(ctx.fname)) != 0); if(store_in_properties(options, ctx, name)) return storeSection(ctx); return false; } static int load_defaults(Vector<struct my_option>& options, const char* groups[]) { int argc = 1; const char * argv[] = { "ndb_mgmd", 0, 0, 0, 0 }; BaseString file; BaseString extra_file; BaseString group_suffix; const char *save_file = my_defaults_file; #if MYSQL_VERSION_ID >= 50508 const #endif char *save_extra_file = my_defaults_extra_file; const char *save_group_suffix = my_defaults_group_suffix; if (my_defaults_file) { file.assfmt("--defaults-file=%s", my_defaults_file); argv[argc++] = file.c_str(); } if (my_defaults_extra_file) { extra_file.assfmt("--defaults-extra-file=%s", my_defaults_extra_file); argv[argc++] = extra_file.c_str(); } if (my_defaults_group_suffix) { group_suffix.assfmt("--defaults-group-suffix=%s", my_defaults_group_suffix); argv[argc++] = group_suffix.c_str(); } char ** tmp = (char**)argv; int ret = load_defaults("my", groups, &argc, &tmp); my_defaults_file = save_file; my_defaults_extra_file = save_extra_file; my_defaults_group_suffix = save_group_suffix; if (ret == 0) { return handle_options(&argc, &tmp, options.getBase(), parse_mycnf_opt); } return ret; } bool InitConfigFileParser::load_mycnf_groups(Vector<struct my_option> & options, InitConfigFileParser::Context& ctx, const char * name, const char *groups[]) { unsigned i; Vector<struct my_option> copy; for(i = 0; i<options.size(); i++) { if(options[i].comment && strcmp(options[i].comment, name) == 0) { options[i].app_type = 0; copy.push_back(options[i]); } } struct my_option end; memset(&end, 0, sizeof(end)); copy.push_back(end); if (load_defaults(copy, groups)) return false; return store_in_properties(copy, ctx, name); } Config * InitConfigFileParser::parse_mycnf() { Config * res = 0; Vector<struct my_option> options; for(int i = 0 ; i < ConfigInfo::m_NoOfParams ; ++ i) { { struct my_option opt; memset(&opt, 0, sizeof(opt)); const ConfigInfo::ParamInfo& param = ConfigInfo::m_ParamInfo[i]; switch(param._type){ case ConfigInfo::CI_BOOL: opt.value = (uchar **)malloc(sizeof(int)); opt.var_type = GET_INT; break; case ConfigInfo::CI_INT: opt.value = (uchar**)malloc(sizeof(uint)); opt.var_type = GET_UINT; break; case ConfigInfo::CI_INT64: opt.value = (uchar**)malloc(sizeof(Uint64)); opt.var_type = GET_ULL; break; case ConfigInfo::CI_ENUM: case ConfigInfo::CI_STRING: case ConfigInfo::CI_BITMASK: opt.value = (uchar**)malloc(sizeof(char *)); opt.var_type = GET_STR; break; default: continue; } opt.name = param._fname; opt.id = 256; opt.app_type = 0; opt.arg_type = REQUIRED_ARG; opt.comment = param._section; options.push_back(opt); } } struct my_option *ndbd, *ndb_mgmd, *mysqld, *api; /** * Add ndbd, ndb_mgmd, api/mysqld */ Uint32 idx = options.size(); { struct my_option opt; memset(&opt, 0, sizeof(opt)); opt.name = "ndbd"; opt.id = 256; opt.value = (uchar**)malloc(sizeof(char*)); opt.var_type = GET_STR; opt.arg_type = REQUIRED_ARG; options.push_back(opt); opt.name = "ndb_mgmd"; opt.id = 256; opt.value = (uchar**)malloc(sizeof(char*)); opt.var_type = GET_STR; opt.arg_type = REQUIRED_ARG; options.push_back(opt); opt.name = "mysqld"; opt.id = 256; opt.value = (uchar**)malloc(sizeof(char*)); opt.var_type = GET_STR; opt.arg_type = REQUIRED_ARG; options.push_back(opt); opt.name = "ndbapi"; opt.id = 256; opt.value = (uchar**)malloc(sizeof(char*)); opt.var_type = GET_STR; opt.arg_type = REQUIRED_ARG; options.push_back(opt); memset(&opt, 0, sizeof(opt)); options.push_back(opt); ndbd = &options[idx]; ndb_mgmd = &options[idx+1]; mysqld = &options[idx+2]; api = &options[idx+3]; } Context ctx(m_info); const char *groups[]= { "cluster_config", 0 }; if (load_defaults(options, groups)) goto end; ctx.m_lineno = 0; if(!handle_mycnf_defaults(options, ctx, "DB")) goto end; if(!handle_mycnf_defaults(options, ctx, "API")) goto end; if(!handle_mycnf_defaults(options, ctx, "MGM")) goto end; if(!handle_mycnf_defaults(options, ctx, "TCP")) goto end; if(!handle_mycnf_defaults(options, ctx, "SHM")) goto end; if(!handle_mycnf_defaults(options, ctx, "SCI")) goto end; { struct sect { struct my_option* src; const char * name; } sections[] = { { ndb_mgmd, "MGM" }, { ndbd, "DB" }, { mysqld, "API" }, { api, "API" } }; for(unsigned i = 0; i + 1 < NDB_ARRAY_SIZE(sections) ; i++) { for(unsigned j = i + 1; j < NDB_ARRAY_SIZE(sections) ; j++) { if (sections[j].src->app_type < sections[i].src->app_type) { sect swap = sections[i]; sections[i] = sections[j]; sections[j] = swap; } } } ctx.type = InitConfigFileParser::Section; ctx.m_sectionLineno = ctx.m_lineno; for(unsigned i = 0; i < NDB_ARRAY_SIZE(sections) ; i++) { if (sections[i].src->app_type) { strcpy(ctx.fname, sections[i].name); BaseString str(*(char**)sections[i].src->value); Vector<BaseString> list; str.split(list, ","); const char * defaults_groups[] = { 0, 0, 0 }; for(unsigned j = 0; j<list.size(); j++) { // Remove leading and trailing spaces from hostname list[j].trim(); BaseString group_idx; BaseString group_host; group_idx.assfmt("%s.%s.%d", groups[0], sections[i].src->name, j + 1); group_host.assfmt("%s.%s.%s", groups[0], sections[i].src->name, list[j].c_str()); defaults_groups[0] = group_idx.c_str(); if(list[j].length()) defaults_groups[1] = group_host.c_str(); else defaults_groups[1] = 0; ctx.m_currentSection = new Properties(true); ctx.m_userDefaults = getSection(ctx.fname, ctx.m_defaults); require((ctx.m_currentInfo = m_info->getInfo(ctx.fname)) != 0); require((ctx.m_systemDefaults = m_info->getDefaults(ctx.fname))!= 0); if(!load_mycnf_groups(options, ctx, sections[i].name, defaults_groups)) goto end; // The [cluster_config] section in my.cnf specifies the hostname, // but it can also be specified a second time in the nodes section // make sure they match if specified in both places, else // save the value from cluster_config section // // Example: // [cluster_config] // ndbd=hostname1 // ^^^^^^^^^ // [cluster_config.ndbd.1] // HostName=hostname1 // ^^^^^^^^^ // if (ctx.m_currentSection->contains("HostName")) { // HostName specified a second time, check that it matches const char* host_name; require(ctx.m_currentSection->get("HostName", &host_name)); if (strcmp(host_name, list[j].c_str())) { ctx.reportError("Illegal value 'HostName=%s' specified for " "%s, previously set to '%s'", host_name, group_idx.c_str(), list[j].c_str()); goto end; } } else { require(ctx.m_currentSection->put("HostName", list[j].c_str())); } if(!storeSection(ctx)) goto end; } } } } res = run_config_rules(ctx); end: for(int i = 0; options[i].name; i++) free(options[i].value); return res; } template class Vector<struct my_option>; /* See include/my_getopt.h for the declaration of struct my_option */<|fim▁end|>
as designated in a particular file or component or in included license documentation. The authors of MySQL hereby grant you an additional permission to link the program and your derivative works with the
<|file_name|>package.py<|end_file_name|><|fim▁begin|>############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Dtbuild2(Package): """Simple package which acts as a build dependency""" <|fim▁hole|> homepage = "http://www.example.com" url = "http://www.example.com/dtbuild2-1.0.tar.gz" version('1.0', '0123456789abcdef0123456789abcdef') def install(self, spec, prefix): pass<|fim▁end|>
<|file_name|>magnet_dummy.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- """ This file contains the dummy for a magnet interface. Qudi is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Qudi is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Qudi. If not, see <http://www.gnu.org/licenses/>. Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/> """ from collections import OrderedDict from core.base import Base from interface.magnet_interface import MagnetInterface class MagnetAxisDummy: """ Generic dummy magnet representing one axis. """ def __init__(self, label): self.label = label self.pos = 0.0 self.status = 0, {0: 'MagnetDummy Idle'} class MagnetDummy(Base, MagnetInterface): """This is the Interface class to define the controls for the simple magnet hardware. """ _modtype = 'MagnetDummy' _modclass = 'hardware' _out = {'magnetstage': 'MagnetInterface'} def __init__(self, config, **kwargs): super().__init__(config=config, **kwargs) self.log.info('The following configuration was found.') # checking for the right configuration for key in config.keys(): self.log.info('{0}: {1}'.format(key,config[key])) #these label should be actually set by the config. self._x_axis = MagnetAxisDummy('x') self._y_axis = MagnetAxisDummy('y') self._z_axis = MagnetAxisDummy('z') self._phi_axis = MagnetAxisDummy('phi') #TODO: Checks if configuration is set and is reasonable def on_activate(self, e): """ Definition and initialisation of the GUI. @param object e: Fysom.event object from Fysom class. An object created by the state machine module Fysom, which is connected to a specific event (have a look in the Base Class). This object contains the passed event, the state before the event happened and the destination of the state which should be reached after the event had happened. """ pass def on_deactivate(self, e): """ Deactivate the module properly. @param object e: Fysom.event object from Fysom class. A more detailed explanation can be found in the method activation. """ pass def get_constraints(self): """ Retrieve the hardware constrains from the motor device. @return dict: dict with constraints for the magnet hardware. These constraints will be passed via the logic to the GUI so that proper display elements with boundary conditions could be made. Provides all the constraints for each axis of a motorized stage (like total travel distance, velocity, ...) Each axis has its own dictionary, where the label is used as the identifier throughout the whole module. The dictionaries for each axis are again grouped together in a constraints dictionary in the form {'<label_axis0>': axis0 } where axis0 is again a dict with the possible values defined below. The possible keys in the constraint are defined here in the interface file. If the hardware does not support the values for the constraints, then insert just None. If you are not sure about the meaning, look in other hardware files to get an impression. """ constraints = OrderedDict() axis0 = {} axis0['label'] = self._x_axis.label # name is just as a sanity included axis0['unit'] = 'm' # the SI units axis0['ramp'] = ['Sinus','Linear'] # a possible list of ramps axis0['pos_min'] = 0 axis0['pos_max'] = 100e-3 # that is basically the traveling range axis0['pos_step'] = 0.001e-3 axis0['vel_min'] = 0 axis0['vel_max'] = 100e-3 axis0['vel_step'] = 0.01e-3 axis0['acc_min'] = 0.1e-3 axis0['acc_max'] = 0.0 axis0['acc_step'] = 0.0 <|fim▁hole|> axis1['unit'] = 'm' # the SI units axis1['ramp'] = ['Sinus','Linear'] # a possible list of ramps axis1['pos_min'] = 0 axis1['pos_max'] = 100e-3 # that is basically the traveling range axis1['pos_step'] = 0.001e-3 axis1['vel_min'] = 0 axis1['vel_max'] = 100e-3 axis1['vel_step'] = 0.01e-3 axis1['acc_min'] = 0.1e-3 axis1['acc_max'] = 0.0 axis1['acc_step'] = 0.0 axis2 = {} axis2['label'] = self._z_axis.label # that axis label should be obtained from config axis2['unit'] = 'm' # the SI units axis2['ramp'] = ['Sinus','Linear'] # a possible list of ramps axis2['pos_min'] = 0 axis2['pos_max'] = 100e-3 # that is basically the traveling range axis2['pos_step'] = 0.001e-3 axis2['vel_min'] = 0 axis2['vel_max'] = 100e-3 axis2['vel_step'] = 0.01e-3 axis2['acc_min'] = 0.1e-3 axis2['acc_max'] = 0.0 axis2['acc_step'] = 0.0 axis3 = {} axis3['label'] = self._phi_axis.label # that axis label should be obtained from config axis3['unit'] = '°' # the SI units axis3['ramp'] = ['Sinus','Trapez'] # a possible list of ramps axis3['pos_min'] = 0 axis3['pos_max'] = 360 # that is basically the traveling range axis3['pos_step'] = 0.1 axis3['vel_min'] = 1 axis3['vel_max'] = 20 axis3['vel_step'] = 0.1 axis3['acc_min'] = None axis3['acc_max'] = None axis3['acc_step'] = None # assign the parameter container for x to a name which will identify it constraints[axis0['label']] = axis0 constraints[axis1['label']] = axis1 constraints[axis2['label']] = axis2 constraints[axis3['label']] = axis3 return constraints def move_rel(self, param_dict): """ Moves magnet in given direction (relative movement) @param dict param_dict: dictionary, which passes all the relevant parameters, which should be changed. With get_constraints() you can obtain all possible parameters of that stage. According to this parameter set you have to pass a dictionary with keys that are called like the parameters from get_constraints() and assign a SI value to that. For a movement in x the dict should e.g. have the form: dict = { 'x' : 23 } where the label 'x' corresponds to the chosen axis label. A smart idea would be to ask the position after the movement. """ curr_pos_dict = self.get_pos() constraints = self.get_constraints() if param_dict.get(self._x_axis.label) is not None: move_x = param_dict[self._x_axis.label] curr_pos_x = curr_pos_dict[self._x_axis.label] if (curr_pos_x + move_x > constraints[self._x_axis.label]['pos_max'] ) or\ (curr_pos_x + move_x < constraints[self._x_axis.label]['pos_min']): self.log.warning('Cannot make further movement of the axis ' '"{0}" with the step {1}, since the border [{2},{3}] ' ' of the magnet was reached! Ignore ' 'command!'.format( self._x_axis.label, move_x, constraints[self._x_axis.label]['pos_min'], constraints[self._x_axis.label]['pos_max'])) else: self._x_axis.pos = self._x_axis.pos + move_x if param_dict.get(self._y_axis.label) is not None: move_y = param_dict[self._y_axis.label] curr_pos_y = curr_pos_dict[self._y_axis.label] if (curr_pos_y + move_y > constraints[self._y_axis.label]['pos_max'] ) or\ (curr_pos_y + move_y < constraints[self._y_axis.label]['pos_min']): self.log.warning('Cannot make further movement of the axis ' '"{0}" with the step {1}, since the border [{2},{3}] ' ' of the magnet was reached! Ignore ' 'command!'.format( self._y_axis.label, move_y, constraints[self._y_axis.label]['pos_min'], constraints[self._y_axis.label]['pos_max'])) else: self._y_axis.pos = self._y_axis.pos + move_y if param_dict.get(self._z_axis.label) is not None: move_z = param_dict[self._z_axis.label] curr_pos_z = curr_pos_dict[self._z_axis.label] if (curr_pos_z + move_z > constraints[self._z_axis.label]['pos_max'] ) or\ (curr_pos_z + move_z < constraints[self._z_axis.label]['pos_min']): self.log.warning('Cannot make further movement of the axis ' '"{0}" with the step {1}, since the border [{2},{3}] ' ' of the magnet was reached! Ignore ' 'command!'.format( self._z_axis.label, move_z, constraints[self._z_axis.label]['pos_min'], constraints[self._z_axis.label]['pos_max'])) else: self._z_axis.pos = self._z_axis.pos + move_z if param_dict.get(self._phi_axis.label) is not None: move_phi = param_dict[self._phi_axis.label] curr_pos_phi = curr_pos_dict[self._phi_axis.label] if (curr_pos_phi + move_phi > constraints[self._phi_axis.label]['pos_max'] ) or\ (curr_pos_phi + move_phi < constraints[self._phi_axis.label]['pos_min']): self.log.warning('Cannot make further movement of the axis ' '"{0}" with the step {1}, since the border [{2},{3}] ' ' of the magnet was reached! Ignore ' 'command!'.format( self._phi_axis.label, move_phi, constraints[self._phi_axis.label]['pos_min'], constraints[self._phi_axis.label]['pos_max'])) else: self._phi_axis.pos = self._phi_axis.pos + move_phi def move_abs(self, param_dict): """ Moves magnet to absolute position (absolute movement) @param dict param_dict: dictionary, which passes all the relevant parameters, which should be changed. Usage: {'axis_label': <a-value>}. 'axis_label' must correspond to a label given to one of the axis. A smart idea would be to ask the position after the movement. """ constraints = self.get_constraints() if param_dict.get(self._x_axis.label) is not None: desired_pos = param_dict[self._x_axis.label] constr = constraints[self._x_axis.label] if not(constr['pos_min'] <= desired_pos <= constr['pos_max']): self.log.warning('Cannot make absolute movement of the axis ' '"{0}" to possition {1}, since it exceeds the limits ' '[{2},{3}] of the magnet! Command is ' 'ignored!'.format( self._x_axis.label, desired_pos, constr['pos_min'], constr['pos_max'])) else: self._x_axis.pos = desired_pos if param_dict.get(self._y_axis.label) is not None: desired_pos = param_dict[self._y_axis.label] constr = constraints[self._y_axis.label] if not(constr['pos_min'] <= desired_pos <= constr['pos_max']): self.log.warning('Cannot make absolute movement of the axis ' '"{0}" to possition {1}, since it exceeds the limits ' '[{2},{3}] of the magnet! Command is ' 'ignored!'.format( self._y_axis.label, desired_pos, constr['pos_min'], constr['pos_max'])) else: self._y_axis.pos = desired_pos if param_dict.get(self._z_axis.label) is not None: desired_pos = param_dict[self._z_axis.label] constr = constraints[self._z_axis.label] if not(constr['pos_min'] <= desired_pos <= constr['pos_max']): self.log.warning('Cannot make absolute movement of the axis ' '"{0}" to possition {1}, since it exceeds the limits ' '[{2},{3}] of the magnet! Command is ' 'ignored!'.format( self._z_axis.label, desired_pos, constr['pos_min'], constr['pos_max'])) else: self._z_axis.pos = desired_pos if param_dict.get(self._phi_axis.label) is not None: desired_pos = param_dict[self._phi_axis.label] constr = constraints[self._phi_axis.label] if not(constr['pos_min'] <= desired_pos <= constr['pos_max']): self.log.warning('Cannot make absolute movement of the axis ' '"{0}" to possition {1}, since it exceeds the limits ' '[{2},{3}] of the magnet! Command is ignored!'.format( self._phi_axis.label, desired_pos, constr['pos_min'], constr['pos_max'])) else: self._phi_axis.pos = desired_pos def abort(self): """ Stops movement of the stage @return int: error code (0:OK, -1:error) """ self.log.info('MagnetDummy: Movement stopped!') return 0 def get_pos(self, param_list=None): """ Gets current position of the magnet stage arms @param list param_list: optional, if a specific position of an axis is desired, then the labels of the needed axis should be passed as the param_list. If nothing is passed, then from each axis the position is asked. @return dict: with keys being the axis labels and item the current position. """ pos = {} if param_list is not None: if self._x_axis.label in param_list: pos[self._x_axis.label] = self._x_axis.pos if self._y_axis.label in param_list: pos[self._y_axis.label] = self._y_axis.pos if self._z_axis.label in param_list: pos[self._z_axis.label] = self._z_axis.pos if self._phi_axis.label in param_list: pos[self._phi_axis.label] = self._phi_axis.pos else: pos[self._x_axis.label] = self._x_axis.pos pos[self._y_axis.label] = self._y_axis.pos pos[self._z_axis.label] = self._z_axis.pos pos[self._phi_axis.label] = self._phi_axis.pos return pos def get_status(self, param_list=None): """ Get the status of the position @param list param_list: optional, if a specific status of an axis is desired, then the labels of the needed axis should be passed in the param_list. If nothing is passed, then from each axis the status is asked. @return dict: with the axis label as key and the status number as item. """ status = {} if param_list is not None: if self._x_axis.label in param_list: status[self._x_axis.label] = self._x_axis.status if self._y_axis.label in param_list: status[self._y_axis.label] = self._y_axis.status if self._z_axis.label in param_list: status[self._z_axis.label] = self._z_axis.status if self._phi_axis.label in param_list: status[self._phi_axis.label] = self._phi_axis.status else: status[self._x_axis.label] = self._x_axis.status status[self._y_axis.label] = self._y_axis.status status[self._z_axis.label] = self._z_axis.status status[self._phi_axis.label] = self._phi_axis.status return status def calibrate(self, param_list=None): """ Calibrates the magnet stage. @param dict param_list: param_list: optional, if a specific calibration of an axis is desired, then the labels of the needed axis should be passed in the param_list. If nothing is passed, then all connected axis will be calibrated. @return int: error code (0:OK, -1:error) After calibration the stage moves to home position which will be the zero point for the passed axis. The calibration procedure will be different for each stage. """ if param_list is not None: if self._x_axis.label in param_list: self._x_axis.pos = 0.0 if self._y_axis.label in param_list: self._y_axis.pos = 0.0 if self._z_axis.label in param_list: self._z_axis.pos = 0.0 if self._phi_axis.label in param_list: self._phi_axis.pos = 0.0 else: self._x_axis.pos = 0.0 self._y_axis.pos = 0.0 self._z_axis.pos = 0.0 self._phi_axis.pos = 0.0 return 0 def get_velocity(self, param_list=None): """ Gets the current velocity for all connected axes. @param dict param_list: optional, if a specific velocity of an axis is desired, then the labels of the needed axis should be passed as the param_list. If nothing is passed, then from each axis the velocity is asked. @return dict : with the axis label as key and the velocity as item. """ vel = {} if param_list is not None: if self._x_axis.label in param_list: vel[self._x_axis.label] = self._x_axis.vel if self._y_axis.label in param_list: vel[self._x_axis.label] = self._y_axis.vel if self._z_axis.label in param_list: vel[self._x_axis.label] = self._z_axis.vel if self._phi_axis.label in param_list: vel[self._phi_axis.label] = self._phi_axis.vel else: vel[self._x_axis.label] = self._x_axis.get_vel vel[self._y_axis.label] = self._y_axis.get_vel vel[self._z_axis.label] = self._z_axis.get_vel vel[self._phi_axis.label] = self._phi_axis.vel return vel def set_velocity(self, param_dict=None): """ Write new value for velocity. @param dict param_dict: dictionary, which passes all the relevant parameters, which should be changed. Usage: {'axis_label': <the-velocity-value>}. 'axis_label' must correspond to a label given to one of the axis. """ constraints = self.get_constraints() if param_dict.get(self._x_axis.label) is not None: desired_vel = param_dict[self._x_axis.label] constr = constraints[self._x_axis.label] if not(constr['vel_min'] <= desired_vel <= constr['vel_max']): self.log.warning('Cannot make absolute movement of the axis ' '"{0}" to possition {1}, since it exceeds the limits ' '[{2},{3}] ! Command is ignored!'.format( self._x_axis.label, desired_vel, constr['vel_min'], constr['vel_max'])) else: self._x_axis.vel = desired_vel if param_dict.get(self._y_axis.label) is not None: desired_vel = param_dict[self._y_axis.label] constr = constraints[self._y_axis.label] if not(constr['vel_min'] <= desired_vel <= constr['vel_max']): self.log.warning('Cannot make absolute movement of the axis ' '"{0}" to possition {1}, since it exceeds the limits ' '[{2},{3}] ! Command is ignored!'.format( self._y_axis.label, desired_vel, constr['vel_min'], constr['vel_max'])) else: self._y_axis.vel = desired_vel if param_dict.get(self._z_axis.label) is not None: desired_vel = param_dict[self._z_axis.label] constr = constraints[self._z_axis.label] if not(constr['vel_min'] <= desired_vel <= constr['vel_max']): self.log.warning('Cannot make absolute movement of the axis ' '"{0}" to possition {1}, since it exceeds the limits ' '[{2},{3}] ! Command is ignored!'.format( self._z_axis.label, desired_vel, constr['vel_min'], constr['vel_max'])) else: self._z_axis.vel = desired_vel if param_dict.get(self._phi_axis.label) is not None: desired_vel = param_dict[self._phi_axis.label] constr = constraints[self._phi_axis.label] if not(constr['vel_min'] <= desired_vel <= constr['vel_max']): self.log.warning('Cannot make absolute movement of the axis ' '"{0}" to possition {1}, since it exceeds the limits ' '[{2},{3}] ! Command is ignored!'.format( self._phi_axis.label, desired_vel, constr['vel_min'], constr['vel_max'])) else: self._phi_axis.vel = desired_vel def tell(self, param_dict=None): """ Send a command to the magnet. @param dict param_dict: dictionary, which passes all the relevant parameters, which should be changed. Usage: {'axis_label': <the command string>}. 'axis_label' must correspond to a label given to one of the axis. @return int: error code (0:OK, -1:error) """ self.log.info('You can tell the magnet dummy as much as you want, it ' 'has always an open ear for you. But do not expect an ' 'answer, it is very shy!') return 0 def ask(self, param_dict=None): """ Ask the magnet a question. @param dict param_dict: dictionary, which passes all the relevant parameters, which should be changed. Usage: {'axis_label': <the question string>}. 'axis_label' must correspond to a label given to one of the axis. @return string: contains the answer coming from the magnet """ self.log.info('Dude, I am a dummy! Your question(s) "{0}" to the ' 'axis "{1}" is/are way to complicated for me :D ! If you ' 'want to talk to someone, ask Siri, maybe she will listen to ' 'you and answer your questions :P.'.format( list(param_dict.values()), list(param_dict))) return_val = {} for entry in param_dict: return_val[entry] = 'Nothing to say, Motor is quite.' return return_val def set_magnet_idle_state(self, magnet_idle=True): """ Set the magnet to couple/decouple to/from the control. @param bool magnet_idle: if True then magnet will be set to idle and each movement command will be ignored from the hardware file. If False the magnet will react on movement changes of any kind. @return bool: the actual state which was set in the magnet hardware. True = idle, decoupled from control False = Not Idle, coupled to control """ self._idle_state = magnet_idle return self._idle_state def get_magnet_idle_state(self): """ Retrieve the current state of the magnet, whether it is idle or not. @return bool: the actual state which was set in the magnet hardware. True = idle, decoupled from control False = Not Idle, coupled to control """ return self._idle_state def initialize(self): """ Acts as a switch. When all coils of the superconducting magnet are heated it cools them, else the coils get heated. @return int: (0: Ok, -1:error) """ raise InterfaceImplementationError('magnet_interface>initialize') return -1<|fim▁end|>
axis1 = {} axis1['label'] = self._y_axis.label # that axis label should be obtained from config
<|file_name|>test_workbook_service.py<|end_file_name|><|fim▁begin|># Copyright 2014 - Mirantis, Inc. # Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from mistral.db.v2 import api as db_api from mistral.lang import parser as spec_parser from mistral.services import workbooks as wb_service from mistral.tests.unit import base # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') WORKBOOK = """ --- version: '2.0' name: my_wb tags: [test] actions: concat: base: std.echo base-input: output: "{$.str1}{$.str2}" workflows: wf1: #Sample Comment 1 type: reverse tags: [wf_test] input: - param1 output: result: "{$.result}" tasks: task1: action: std.echo output="{$.param1}" publish: result: "{$}" wf2: type: direct output: result: "{$.result}" tasks: task1: workflow: my_wb.wf1 param1='Hi' task_name='task1' publish: result: "The result of subworkflow is '{$.final_result}'" """ WORKBOOK_WF1_DEFINITION = """wf1: #Sample Comment 1 type: reverse tags: [wf_test] input: - param1 output: result: "{$.result}" tasks: task1: action: std.echo output="{$.param1}" publish: result: "{$}" """ WORKBOOK_WF2_DEFINITION = """wf2: type: direct output: result: "{$.result}" tasks: task1: workflow: my_wb.wf1 param1='Hi' task_name='task1' publish: result: "The result of subworkflow is '{$.final_result}'" """ UPDATED_WORKBOOK = """ --- version: '2.0' name: my_wb tags: [test] actions: concat: base: std.echo base-input: output: "{$.str1}{$.str2}" workflows:<|fim▁hole|> result: "{$.result}" tasks: task1: workflow: my_wb.wf2 param1='Hi' task_name='task1' publish: result: "The result of subworkflow is '{$.final_result}'" wf2: type: reverse input: - param1 output: result: "{$.result}" tasks: task1: action: std.echo output="{$.param1}" publish: result: "{$}" """ UPDATED_WORKBOOK_WF1_DEFINITION = """wf1: type: direct output: result: "{$.result}" tasks: task1: workflow: my_wb.wf2 param1='Hi' task_name='task1' publish: result: "The result of subworkflow is '{$.final_result}'" """ UPDATED_WORKBOOK_WF2_DEFINITION = """wf2: type: reverse input: - param1 output: result: "{$.result}" tasks: task1: action: std.echo output="{$.param1}" publish: result: "{$}" """ ACTION_DEFINITION = """concat: base: std.echo base-input: output: "{$.str1}{$.str2}" """ class WorkbookServiceTest(base.DbTestCase): def test_create_workbook(self): namespace = 'test_workbook_service_0123_namespace' wb_db = wb_service.create_workbook_v2(WORKBOOK, namespace=namespace) self.assertIsNotNone(wb_db) self.assertEqual('my_wb', wb_db.name) self.assertEqual(namespace, wb_db.namespace) self.assertEqual(WORKBOOK, wb_db.definition) self.assertIsNotNone(wb_db.spec) self.assertListEqual(['test'], wb_db.tags) db_actions = db_api.get_action_definitions( name='my_wb.concat', namespace=namespace ) self.assertEqual(1, len(db_actions)) # Action. action_db = self._assert_single_item(db_actions, name='my_wb.concat') self.assertFalse(action_db.is_system) action_spec = spec_parser.get_action_spec(action_db.spec) self.assertEqual('concat', action_spec.get_name()) self.assertEqual('std.echo', action_spec.get_base()) self.assertEqual(ACTION_DEFINITION, action_db.definition) db_wfs = db_api.get_workflow_definitions() self.assertEqual(2, len(db_wfs)) # Workflow 1. wf1_db = self._assert_single_item(db_wfs, name='my_wb.wf1') wf1_spec = spec_parser.get_workflow_spec(wf1_db.spec) self.assertEqual('wf1', wf1_spec.get_name()) self.assertEqual('reverse', wf1_spec.get_type()) self.assertListEqual(['wf_test'], wf1_spec.get_tags()) self.assertListEqual(['wf_test'], wf1_db.tags) self.assertEqual(namespace, wf1_db.namespace) self.assertEqual(WORKBOOK_WF1_DEFINITION, wf1_db.definition) # Workflow 2. wf2_db = self._assert_single_item(db_wfs, name='my_wb.wf2') wf2_spec = spec_parser.get_workflow_spec(wf2_db.spec) self.assertEqual('wf2', wf2_spec.get_name()) self.assertEqual('direct', wf2_spec.get_type()) self.assertEqual(namespace, wf2_db.namespace) self.assertEqual(WORKBOOK_WF2_DEFINITION, wf2_db.definition) def test_create_same_workbook_in_different_namespaces(self): first_namespace = 'first_namespace' second_namespace = 'second_namespace' first_wb = wb_service.create_workbook_v2(WORKBOOK, namespace=first_namespace) self.assertIsNotNone(first_wb) self.assertEqual('my_wb', first_wb.name) self.assertEqual(first_namespace, first_wb.namespace) second_wb = wb_service.create_workbook_v2(WORKBOOK, namespace=second_namespace) self.assertIsNotNone(second_wb) self.assertEqual('my_wb', second_wb.name) self.assertEqual(second_namespace, second_wb.namespace) def test_create_workbook_with_default_namespace(self): wb_db = wb_service.create_workbook_v2(WORKBOOK) self.assertIsNotNone(wb_db) self.assertEqual('my_wb', wb_db.name) self.assertEqual('', wb_db.namespace) db_api.delete_workbook('my_wb') def test_update_workbook(self): namespace = 'test_workbook_service_0123_namespace' # Create workbook. wb_db = wb_service.create_workbook_v2(WORKBOOK, namespace=namespace) self.assertIsNotNone(wb_db) self.assertEqual(2, len(db_api.get_workflow_definitions())) # Update workbook. wb_db = wb_service.update_workbook_v2( UPDATED_WORKBOOK, namespace=namespace ) self.assertIsNotNone(wb_db) self.assertEqual('my_wb', wb_db.name) self.assertEqual(namespace, wb_db.namespace) self.assertEqual(UPDATED_WORKBOOK, wb_db.definition) self.assertListEqual(['test'], wb_db.tags) db_wfs = db_api.get_workflow_definitions() self.assertEqual(2, len(db_wfs)) # Workflow 1. wf1_db = self._assert_single_item(db_wfs, name='my_wb.wf1') wf1_spec = spec_parser.get_workflow_spec(wf1_db.spec) self.assertEqual('wf1', wf1_spec.get_name()) self.assertEqual('direct', wf1_spec.get_type()) self.assertEqual(namespace, wf1_db.namespace) self.assertEqual(UPDATED_WORKBOOK_WF1_DEFINITION, wf1_db.definition) # Workflow 2. wf2_db = self._assert_single_item(db_wfs, name='my_wb.wf2') wf2_spec = spec_parser.get_workflow_spec(wf2_db.spec) self.assertEqual('wf2', wf2_spec.get_name()) self.assertEqual('reverse', wf2_spec.get_type()) self.assertEqual(namespace, wf2_db.namespace) self.assertEqual(UPDATED_WORKBOOK_WF2_DEFINITION, wf2_db.definition) def test_delete_workbook(self): namespace = 'pqr' # Create workbook. wb_service.create_workbook_v2(WORKBOOK, namespace=namespace) db_wfs = db_api.get_workflow_definitions() db_actions = db_api.get_action_definitions(name='my_wb.concat', namespace=namespace) self.assertEqual(2, len(db_wfs)) self.assertEqual(1, len(db_actions)) db_api.delete_workbook('my_wb', namespace=namespace) db_wfs = db_api.get_workflow_definitions() db_actions = db_api.get_action_definitions(name='my_wb.concat', namespace=namespace) # Deleting workbook shouldn't delete workflows and actions self.assertEqual(2, len(db_wfs)) self.assertEqual(1, len(db_actions))<|fim▁end|>
wf1: type: direct output:
<|file_name|>C.js<|end_file_name|><|fim▁begin|>/** * +--------------------------------------------------------------------+ * | This HTML_CodeSniffer file is Copyright (c) | * | Squiz Australia Pty Ltd ABN 53 131 581 247 | * +--------------------------------------------------------------------+ * | IMPORTANT: Your use of this Software is subject to the terms of | * | the Licence provided in the file licence.txt. If you cannot find | * | this file please contact Squiz (www.squiz.com.au) so we may | * | provide you a copy. | * +--------------------------------------------------------------------+ * */ /* Japanese translation by Yoshiki Kato @burnworks - v1.0.0 - 2016-03-01 */ var HTMLCS_Section508_Sniffs_C = { /** * Determines the elements to register for processing. * * Each element of the returned array can either be an element name, or "_top" * which is the top element of the tested code. * * @returns {Array} The list of elements.<|fim▁hole|> { return ['_top']; }, /** * Process the registered element. * * @param {DOMNode} element The element registered. * @param {DOMNode} top The top element of the tested code. */ process: function(element, top) { HTMLCS.addMessage(HTMLCS.NOTICE, top, '色が情報を伝える、あるいは視覚的な要素を判別するための唯一の視覚的手段になっていないことを確認してください。 Ensure that any information conveyed using colour alone is also available without colour, such as through context or markup.', 'Colour'); } };<|fim▁end|>
*/ register: function()
<|file_name|>external.go<|end_file_name|><|fim▁begin|>package ca import ( "bytes" cryptorand "crypto/rand" "crypto/tls" "crypto/x509" "encoding/hex" "encoding/json" "encoding/pem" "io" "io/ioutil" "net/http" "sync" "time" "github.com/cloudflare/cfssl/api" "github.com/cloudflare/cfssl/config" "github.com/cloudflare/cfssl/csr" "github.com/cloudflare/cfssl/signer" "github.com/docker/swarmkit/log" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/net/context" "golang.org/x/net/context/ctxhttp" ) const ( // ExternalCrossSignProfile is the profile that we will be sending cross-signing CSR sign requests with ExternalCrossSignProfile = "CA" // CertificateMaxSize is the maximum expected size of a certificate. // While there is no specced upper limit to the size of an x509 certificate in PEM format, // one with a ridiculous RSA key size (16384) and 26 256-character DNS SAN fields is about 14k. // While there is no upper limit on the length of certificate chains, long chains are impractical. // To be conservative, and to also account for external CA certificate responses in JSON format // from CFSSL, we'll set the max to be 256KiB. CertificateMaxSize int64 = 256 << 10 ) // ErrNoExternalCAURLs is an error used it indicate that an ExternalCA is // configured with no URLs to which it can proxy certificate signing requests. var ErrNoExternalCAURLs = errors.New("no external CA URLs") // ExternalCA is able to make certificate signing requests to one of a list // remote CFSSL API endpoints. type ExternalCA struct { ExternalRequestTimeout time.Duration mu sync.Mutex rootCA *RootCA urls []string client *http.Client } // NewExternalCA creates a new ExternalCA which uses the given tlsConfig to // authenticate to any of the given URLS of CFSSL API endpoints. func NewExternalCA(rootCA *RootCA, tlsConfig *tls.Config, urls ...string) *ExternalCA { return &ExternalCA{ ExternalRequestTimeout: 5 * time.Second, rootCA: rootCA, urls: urls, client: &http.Client{ Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, }, } } // Copy returns a copy of the external CA that can be updated independently func (eca *ExternalCA) Copy() *ExternalCA { eca.mu.Lock() defer eca.mu.Unlock() return &ExternalCA{ ExternalRequestTimeout: eca.ExternalRequestTimeout, rootCA: eca.rootCA, urls: eca.urls, client: eca.client, } } <|fim▁hole|>func (eca *ExternalCA) UpdateTLSConfig(tlsConfig *tls.Config) { eca.mu.Lock() defer eca.mu.Unlock() eca.client = &http.Client{ Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, } } // UpdateURLs updates the list of CSR API endpoints by setting it to the given urls. func (eca *ExternalCA) UpdateURLs(urls ...string) { eca.mu.Lock() defer eca.mu.Unlock() eca.urls = urls } // UpdateRootCA changes the root CA used to append intermediates func (eca *ExternalCA) UpdateRootCA(rca *RootCA) { eca.mu.Lock() eca.rootCA = rca eca.mu.Unlock() } // Sign signs a new certificate by proxying the given certificate signing // request to an external CFSSL API server. func (eca *ExternalCA) Sign(ctx context.Context, req signer.SignRequest) (cert []byte, err error) { // Get the current HTTP client and list of URLs in a small critical // section. We will use these to make certificate signing requests. eca.mu.Lock() urls := eca.urls client := eca.client intermediates := eca.rootCA.Intermediates eca.mu.Unlock() if len(urls) == 0 { return nil, ErrNoExternalCAURLs } csrJSON, err := json.Marshal(req) if err != nil { return nil, errors.Wrap(err, "unable to JSON-encode CFSSL signing request") } // Try each configured proxy URL. Return after the first success. If // all fail then the last error will be returned. for _, url := range urls { requestCtx, cancel := context.WithTimeout(ctx, eca.ExternalRequestTimeout) cert, err = makeExternalSignRequest(requestCtx, client, url, csrJSON) cancel() if err == nil { return append(cert, intermediates...), err } log.G(ctx).Debugf("unable to proxy certificate signing request to %s: %s", url, err) } return nil, err } // CrossSignRootCA takes a RootCA object, generates a CA CSR, sends a signing request with the CA CSR to the external // CFSSL API server in order to obtain a cross-signed root func (eca *ExternalCA) CrossSignRootCA(ctx context.Context, rca RootCA) ([]byte, error) { // ExtractCertificateRequest generates a new key request, and we want to continue to use the old // key. However, ExtractCertificateRequest will also convert the pkix.Name to csr.Name, which we // need in order to generate a signing request rcaSigner, err := rca.Signer() if err != nil { return nil, err } rootCert := rcaSigner.parsedCert cfCSRObj := csr.ExtractCertificateRequest(rootCert) der, err := x509.CreateCertificateRequest(cryptorand.Reader, &x509.CertificateRequest{ RawSubjectPublicKeyInfo: rootCert.RawSubjectPublicKeyInfo, RawSubject: rootCert.RawSubject, PublicKeyAlgorithm: rootCert.PublicKeyAlgorithm, Subject: rootCert.Subject, Extensions: rootCert.Extensions, DNSNames: rootCert.DNSNames, EmailAddresses: rootCert.EmailAddresses, IPAddresses: rootCert.IPAddresses, }, rcaSigner.cryptoSigner) if err != nil { return nil, err } req := signer.SignRequest{ Request: string(pem.EncodeToMemory(&pem.Block{ Type: "CERTIFICATE REQUEST", Bytes: der, })), Subject: &signer.Subject{ CN: rootCert.Subject.CommonName, Names: cfCSRObj.Names, }, Profile: ExternalCrossSignProfile, } // cfssl actually ignores non subject alt name extensions in the CSR, so we have to add the CA extension in the signing // request as well for _, ext := range rootCert.Extensions { if ext.Id.Equal(BasicConstraintsOID) { req.Extensions = append(req.Extensions, signer.Extension{ ID: config.OID(ext.Id), Critical: ext.Critical, Value: hex.EncodeToString(ext.Value), }) } } return eca.Sign(ctx, req) } func makeExternalSignRequest(ctx context.Context, client *http.Client, url string, csrJSON []byte) (cert []byte, err error) { resp, err := ctxhttp.Post(ctx, client, url, "application/json", bytes.NewReader(csrJSON)) if err != nil { return nil, recoverableErr{err: errors.Wrap(err, "unable to perform certificate signing request")} } defer resp.Body.Close() b := io.LimitReader(resp.Body, CertificateMaxSize) body, err := ioutil.ReadAll(b) if err != nil { return nil, recoverableErr{err: errors.Wrap(err, "unable to read CSR response body")} } if resp.StatusCode != http.StatusOK { return nil, recoverableErr{err: errors.Errorf("unexpected status code in CSR response: %d - %s", resp.StatusCode, string(body))} } var apiResponse api.Response if err := json.Unmarshal(body, &apiResponse); err != nil { logrus.Debugf("unable to JSON-parse CFSSL API response body: %s", string(body)) return nil, recoverableErr{err: errors.Wrap(err, "unable to parse JSON response")} } if !apiResponse.Success || apiResponse.Result == nil { if len(apiResponse.Errors) > 0 { return nil, errors.Errorf("response errors: %v", apiResponse.Errors) } return nil, errors.New("certificate signing request failed") } result, ok := apiResponse.Result.(map[string]interface{}) if !ok { return nil, errors.Errorf("invalid result type: %T", apiResponse.Result) } certPEM, ok := result["certificate"].(string) if !ok { return nil, errors.Errorf("invalid result certificate field type: %T", result["certificate"]) } return []byte(certPEM), nil }<|fim▁end|>
// UpdateTLSConfig updates the HTTP Client for this ExternalCA by creating // a new client which uses the given tlsConfig.
<|file_name|>basic_token_embedder_test.py<|end_file_name|><|fim▁begin|># pylint: disable=no-self-use,invalid-name import pytest import torch from torch.autograd import Variable from allennlp.common import Params from allennlp.common.checks import ConfigurationError from allennlp.data import Vocabulary from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder<|fim▁hole|>class TestBasicTextFieldEmbedder(AllenNlpTestCase): def setUp(self): super(TestBasicTextFieldEmbedder, self).setUp() self.vocab = Vocabulary() self.vocab.add_token_to_namespace("1") self.vocab.add_token_to_namespace("2") self.vocab.add_token_to_namespace("3") self.vocab.add_token_to_namespace("4") params = Params({ "words1": { "type": "embedding", "embedding_dim": 2 }, "words2": { "type": "embedding", "embedding_dim": 5 }, "words3": { "type": "embedding", "embedding_dim": 3 } }) self.token_embedder = BasicTextFieldEmbedder.from_params(self.vocab, params) self.inputs = { "words1": Variable(torch.LongTensor([[0, 2, 3, 5]])), "words2": Variable(torch.LongTensor([[1, 4, 3, 2]])), "words3": Variable(torch.LongTensor([[1, 5, 1, 2]])) } def test_get_output_dim_aggregates_dimension_from_each_embedding(self): assert self.token_embedder.get_output_dim() == 10 def test_forward_asserts_input_field_match(self): self.inputs['words4'] = self.inputs['words3'] del self.inputs['words3'] with pytest.raises(ConfigurationError): self.token_embedder(self.inputs) self.inputs['words3'] = self.inputs['words4'] del self.inputs['words4'] def test_forward_concats_resultant_embeddings(self): assert self.token_embedder(self.inputs).size() == (1, 4, 10)<|fim▁end|>
from allennlp.common.testing import AllenNlpTestCase
<|file_name|>alert.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2014 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "alert.h" #include "base58.h" #include "chainparams.h" #include "clientversion.h" #include "net.h" #include "pubkey.h" #include "timedata.h" #include "ui_interface.h" #include "util.h" #include "utilstrencodings.h" #include <stdint.h> #include <algorithm> #include <map> #include <boost/algorithm/string/classification.hpp> #include <boost/algorithm/string/replace.hpp> #include <boost/foreach.hpp> #include <boost/thread.hpp> using namespace std; map<uint256, CAlert> mapAlerts; CCriticalSection cs_mapAlerts; void CUnsignedAlert::SetNull() { nVersion = 1; nRelayUntil = 0; nExpiration = 0; nID = 0; nCancel = 0; setCancel.clear(); nMinVer = 0; nMaxVer = 0; setSubVer.clear(); nPriority = 0; strComment.clear(); strStatusBar.clear(); strReserved.clear(); } std::string CUnsignedAlert::ToString() const { std::string strSetCancel; BOOST_FOREACH(int n, setCancel) strSetCancel += strprintf("%d ", n); std::string strSetSubVer; BOOST_FOREACH(const std::string& str, setSubVer) strSetSubVer += "\"" + str + "\" "; return strprintf( "CAlert(\n" " nVersion = %d\n" " nRelayUntil = %d\n" " nExpiration = %d\n" " nID = %d\n" " nCancel = %d\n" " setCancel = %s\n" " nMinVer = %d\n" " nMaxVer = %d\n" " setSubVer = %s\n" " nPriority = %d\n" " strComment = \"%s\"\n" " strStatusBar = \"%s\"\n" ")\n", nVersion, nRelayUntil, nExpiration, nID, nCancel, strSetCancel, nMinVer, nMaxVer, strSetSubVer, nPriority, strComment, strStatusBar); } void CAlert::SetNull() { CUnsignedAlert::SetNull(); vchMsg.clear(); vchSig.clear(); } bool CAlert::IsNull() const { return (nExpiration == 0); } uint256 CAlert::GetHash() const { return Hash(this->vchMsg.begin(), this->vchMsg.end()); } bool CAlert::IsInEffect() const { return (GetAdjustedTime() < nExpiration); } bool CAlert::Cancels(const CAlert& alert) const { if (!IsInEffect()) return false; // this was a no-op before 31403 return (alert.nID <= nCancel || setCancel.count(alert.nID)); } bool CAlert::AppliesTo(int nVersion, const std::string& strSubVerIn) const { // TODO: rework for client-version-embedded-in-strSubVer ? return (IsInEffect() && nMinVer <= nVersion && nVersion <= nMaxVer && (setSubVer.empty() || setSubVer.count(strSubVerIn))); } bool CAlert::AppliesToMe() const { return AppliesTo(PROTOCOL_VERSION, strSubVersion); } bool CAlert::RelayTo(CNode* pnode) const { if (!IsInEffect()) return false; // don't relay to nodes which haven't sent their version message if (pnode->nVersion == 0) return false; // returns true if wasn't already contained in the set if (pnode->setKnown.insert(GetHash()).second) { if (AppliesTo(pnode->nVersion, pnode->strSubVer) || AppliesToMe() || GetAdjustedTime() < nRelayUntil) { pnode->PushMessage("alert", *this); return true; } } return false; } bool CAlert::Sign() { CDataStream sMsg(SER_NETWORK, CLIENT_VERSION); sMsg << *(CUnsignedAlert*)this; vchMsg = std::vector<unsigned char>(sMsg.begin(), sMsg.end()); CBitcoinSecret vchSecret; if (!vchSecret.SetString(GetArg("-alertkey", ""))) { printf("CAlert::SignAlert() : vchSecret.SetString failed\n"); return false; } CKey key = vchSecret.GetKey(); if (!key.Sign(Hash(vchMsg.begin(), vchMsg.end()), vchSig)) { printf("CAlert::SignAlert() : key.Sign failed\n"); return false; } return true; } bool CAlert::CheckSignature(const std::vector<unsigned char>& alertKey) const { CPubKey key(Params().AlertKey()); if (!key.Verify(Hash(vchMsg.begin(), vchMsg.end()), vchSig)) return error("CAlert::CheckSignature() : verify signature failed"); // Now unserialize the data CDataStream sMsg(vchMsg, SER_NETWORK, PROTOCOL_VERSION); sMsg >> *(CUnsignedAlert*)this; return true; } CAlert CAlert::getAlertByHash(const uint256 &hash) { CAlert retval; { LOCK(cs_mapAlerts); map<uint256, CAlert>::iterator mi = mapAlerts.find(hash); if(mi != mapAlerts.end()) retval = mi->second; } return retval; } bool CAlert::ProcessAlert(const std::vector<unsigned char>& alertKey, bool fThread) { if (!CheckSignature(alertKey)) return false; if (!IsInEffect()) return false; // alert.nID=max is reserved for if the alert key is // compromised. It must have a pre-defined message, // must never expire, must apply to all versions, // and must cancel all previous // alerts or it will be ignored (so an attacker can't // send an "everything is OK, don't panic" version that // cannot be overridden): int maxInt = std::numeric_limits<int>::max(); if (nID == maxInt) { if (!( nExpiration == maxInt && nCancel == (maxInt-1) && nMinVer == 0 && nMaxVer == maxInt && setSubVer.empty() && nPriority == maxInt && strStatusBar == "URGENT: Alert key compromised, upgrade required" )) return false; } { LOCK(cs_mapAlerts); // Cancel previous alerts for (map<uint256, CAlert>::iterator mi = mapAlerts.begin(); mi != mapAlerts.end();) { const CAlert& alert = (*mi).second; if (Cancels(alert)) { LogPrint("alert", "cancelling alert %d\n", alert.nID); uiInterface.NotifyAlertChanged((*mi).first, CT_DELETED); mapAlerts.erase(mi++); } else if (!alert.IsInEffect()) { LogPrint("alert", "expiring alert %d\n", alert.nID); uiInterface.NotifyAlertChanged((*mi).first, CT_DELETED); mapAlerts.erase(mi++); } else mi++; } // Check if this alert has been cancelled BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts) { const CAlert& alert = item.second; if (alert.Cancels(*this)) { LogPrint("alert", "alert already cancelled by %d\n", alert.nID); return false; } } // Add to mapAlerts mapAlerts.insert(make_pair(GetHash(), *this)); // Notify UI and -alertnotify if it applies to me if(AppliesToMe()) { uiInterface.NotifyAlertChanged(GetHash(), CT_NEW); Notify(strStatusBar, fThread); } } LogPrint("alert", "accepted alert %d, AppliesToMe()=%d\n", nID, AppliesToMe()); return true; } void CAlert::Notify(const std::string& strMessage, bool fThread) {<|fim▁hole|> // Alert text should be plain ascii coming from a trusted source, but to // be safe we first strip anything not in safeChars, then add single quotes around // the whole string before passing it to the shell: std::string singleQuote("'"); std::string safeStatus = SanitizeString(strMessage); safeStatus = singleQuote+safeStatus+singleQuote; boost::replace_all(strCmd, "%s", safeStatus); if (fThread) boost::thread t(runCommand, strCmd); // thread runs free else runCommand(strCmd); }<|fim▁end|>
std::string strCmd = GetArg("-alertnotify", ""); if (strCmd.empty()) return;
<|file_name|>bitcoin_cs.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="cs" version="2.0"> <defaultcodec>UTF-8</defaultcodec> <context> <name>AboutDialog</name> <message> <location filename="../forms/aboutdialog.ui" line="+14"/> <source>About Bitcoin</source> <translation>O Gaycoinu</translation> </message> <message> <location line="+39"/> <source>&lt;b&gt;Bitcoin&lt;/b&gt; version</source> <translation>&lt;b&gt;Gaycoin&lt;/b&gt; verze</translation> </message> <message> <location line="+57"/> <source> This is experimental software. Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young (eay@cryptsoft.com) and UPnP software written by Thomas Bernard.</source> <translation> Tohle je experimentální program. Šířen pod licencí MIT/X11, viz přiložený soubor COPYING nebo http://www.opensource.org/licenses/mit-license.php. Tento produkt zahrnuje programy vyvinuté OpenSSL Projektem pro použití v OpenSSL Toolkitu (http://www.openssl.org/) a kryptografický program od Erika Younga (eay@cryptsoft.com) a program UPnP od Thomase Bernarda.</translation> </message> <message> <location filename="../aboutdialog.cpp" line="+14"/> <source>Copyright</source> <translation>Copyright</translation> </message> <message> <location line="+0"/> <source>The Bitcoin developers</source> <translation>Vývojáři Gaycoinu</translation> </message> </context> <context> <name>AddressBookPage</name> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>Address Book</source> <translation>Adresář</translation> </message> <message> <location line="+19"/> <source>Double-click to edit address or label</source> <translation>Dvojklikem myši začneš upravovat označení adresy</translation> </message> <message> <location line="+27"/> <source>Create a new address</source> <translation>Vytvoř novou adresu</translation> </message> <message> <location line="+14"/> <source>Copy the currently selected address to the system clipboard</source> <translation>Zkopíruj aktuálně vybranou adresu do systémové schránky</translation> </message> <message> <location line="-11"/> <source>&amp;New Address</source> <translation>Nová &amp;adresa</translation> </message> <message> <location filename="../addressbookpage.cpp" line="+63"/> <source>These are your Bitcoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source> <translation>Tohle jsou tvé Gaycoinové adresy pro příjem plateb. Můžeš dát pokaždé každému plátci novou adresu, abys věděl, kdo ti kdy kolik platil.</translation> </message> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>&amp;Copy Address</source> <translation>&amp;Kopíruj adresu</translation> </message> <message> <location line="+11"/> <source>Show &amp;QR Code</source> <translation>Zobraz &amp;QR kód</translation> </message> <message> <location line="+11"/> <source>Sign a message to prove you own a Bitcoin address</source> <translation>Podepiš zprávu, čímž prokážeš, že jsi vlastníkem Gaycoinové adresy</translation> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation>Po&amp;depiš zprávu</translation> </message> <message> <location line="+25"/> <source>Delete the currently selected address from the list</source> <translation>Smaž zvolenou adresu ze seznamu</translation> </message> <message> <location line="+27"/> <source>Export the data in the current tab to a file</source> <translation>Exportuj data z tohoto panelu do souboru</translation> </message> <message> <location line="+3"/> <source>&amp;Export</source> <translation>&amp;Export</translation> </message> <message> <location line="-44"/> <source>Verify a message to ensure it was signed with a specified Bitcoin address</source> <translation>Ověř zprávu, aby ses ujistil, že byla podepsána danou Gaycoinovou adresou</translation> </message> <message> <location line="+3"/> <source>&amp;Verify Message</source> <translation>&amp;Ověř zprávu</translation> </message> <message> <location line="+14"/> <source>&amp;Delete</source> <translation>S&amp;maž</translation> </message> <message> <location filename="../addressbookpage.cpp" line="-5"/> <source>These are your Bitcoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source> <translation>Tohle jsou tvé Gaycoinové adresy pro posílání plateb. Před odesláním mincí si vždy zkontroluj částku a cílovou adresu.</translation> </message> <message> <location line="+13"/> <source>Copy &amp;Label</source> <translation>Kopíruj &amp;označení</translation> </message> <message> <location line="+1"/> <source>&amp;Edit</source> <translation>&amp;Uprav</translation> </message> <message> <location line="+1"/> <source>Send &amp;Coins</source> <translation>Pošli min&amp;ce</translation> </message> <message> <location line="+260"/> <source>Export Address Book Data</source> <translation>Exportuj data adresáře</translation> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>CSV formát (*.csv)</translation> </message> <message> <location line="+13"/> <source>Error exporting</source> <translation>Chyba při exportu</translation> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation>Nemohu zapisovat do souboru %1.</translation> </message> </context> <context> <name>AddressTableModel</name> <message> <location filename="../addresstablemodel.cpp" line="+144"/> <source>Label</source> <translation>Označení</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Adresa</translation> </message> <message> <location line="+36"/> <source>(no label)</source> <translation>(bez označení)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <location filename="../forms/askpassphrasedialog.ui" line="+26"/> <source>Passphrase Dialog</source> <translation>Změna hesla</translation> </message> <message> <location line="+21"/> <source>Enter passphrase</source> <translation>Zadej platné heslo</translation> </message> <message> <location line="+14"/> <source>New passphrase</source> <translation>Zadej nové heslo</translation> </message> <message> <location line="+14"/> <source>Repeat new passphrase</source> <translation>Totéž heslo ještě jednou</translation> </message> <message> <location filename="../askpassphrasedialog.cpp" line="+33"/> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;10 or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation>Zadej nové heslo k peněžence.&lt;br/&gt;Použij &lt;b&gt;alespoň 10 náhodných znaků&lt;/b&gt; nebo &lt;b&gt;alespoň osm slov&lt;/b&gt;.</translation> </message> <message> <location line="+1"/> <source>Encrypt wallet</source> <translation>Zašifruj peněženku</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>K provedení této operace musíš zadat heslo k peněžence, aby se mohla odemknout.</translation> </message> <message> <location line="+5"/> <source>Unlock wallet</source> <translation>Odemkni peněženku</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>K provedení této operace musíš zadat heslo k peněžence, aby se mohla dešifrovat.</translation> </message> <message> <location line="+5"/> <source>Decrypt wallet</source> <translation>Dešifruj peněženku</translation> </message> <message> <location line="+3"/> <source>Change passphrase</source> <translation>Změň heslo</translation> </message> <message> <location line="+1"/> <source>Enter the old and new passphrase to the wallet.</source> <translation>Zadej staré a nové heslo k peněžence.</translation> </message> <message> <location line="+46"/> <source>Confirm wallet encryption</source> <translation>Potvrď zašifrování peněženky</translation> </message> <message> <location line="+1"/> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR BITCOINS&lt;/b&gt;!</source> <translation>Varování: Pokud si zašifruješ peněženku a ztratíš či zapomeneš heslo, &lt;b&gt;PŘIJDEŠ O VŠECHNY GAYCOINY&lt;/b&gt;!</translation> </message> <message> <location line="+0"/> <source>Are you sure you wish to encrypt your wallet?</source> <translation>Jsi si jistý, že chceš peněženku zašifrovat?</translation> </message> <message> <location line="+15"/> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation>DŮLEŽITÉ: Všechny předchozí zálohy peněženky by měly být nahrazeny nově vygenerovanou, zašifrovanou peněženkou. Z bezpečnostních důvodů budou předchozí zálohy nešifrované peněženky nepoužitelné, jakmile začneš používat novou zašifrovanou peněženku.</translation> </message> <message> <location line="+100"/> <location line="+24"/> <source>Warning: The Caps Lock key is on!</source> <translation>Upozornění: Caps Lock je zapnutý!</translation> </message> <message> <location line="-130"/> <location line="+58"/> <source>Wallet encrypted</source> <translation>Peněženka je zašifrována</translation> </message> <message> <location line="-56"/> <source>Bitcoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your bitcoins from being stolen by malware infecting your computer.</source> <translation>Gaycoin se teď ukončí, aby dokončil zašifrování. Pamatuj však, že pouhé zašifrování peněženky úplně nezabraňuje krádeži tvých gaycoinů malwarem, kterým se může počítač nakazit.</translation> </message> <message> <location line="+13"/> <location line="+7"/> <location line="+42"/> <location line="+6"/> <source>Wallet encryption failed</source> <translation>Zašifrování peněženky selhalo</translation> </message> <message> <location line="-54"/> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>Zašifrování peněženky selhalo kvůli vnitřní chybě. Tvá peněženka tedy nebyla zašifrována.</translation> </message> <message> <location line="+7"/> <location line="+48"/> <source>The supplied passphrases do not match.</source> <translation>Zadaná hesla nejsou shodná.</translation> </message> <message> <location line="-37"/> <source>Wallet unlock failed</source> <translation>Odemčení peněženky selhalo</translation> </message> <message> <location line="+1"/> <location line="+11"/> <location line="+19"/> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>Nezadal jsi správné heslo pro dešifrování peněženky.</translation> </message> <message> <location line="-20"/> <source>Wallet decryption failed</source> <translation>Dešifrování peněženky selhalo</translation> </message> <message> <location line="+14"/> <source>Wallet passphrase was successfully changed.</source> <translation>Heslo k peněžence bylo v pořádku změněno.</translation> </message> </context> <context> <name>BitcoinGUI</name> <message> <location filename="../bitcoingui.cpp" line="+233"/> <source>Sign &amp;message...</source> <translation>Po&amp;depiš zprávu...</translation> </message> <message> <location line="+280"/> <source>Synchronizing with network...</source> <translation>Synchronizuji se se sítí...</translation> </message> <message> <location line="-349"/> <source>&amp;Overview</source> <translation>&amp;Přehled</translation> </message> <message> <location line="+1"/> <source>Show general overview of wallet</source> <translation>Zobraz celkový přehled peněženky</translation> </message> <message> <location line="+20"/> <source>&amp;Transactions</source> <translation>&amp;Transakce</translation> </message> <message> <location line="+1"/> <source>Browse transaction history</source> <translation>Procházej historii transakcí</translation> </message> <message> <location line="+7"/> <source>Edit the list of stored addresses and labels</source> <translation>Uprav seznam uložených adres a jejich označení</translation> </message> <message> <location line="-14"/> <source>Show the list of addresses for receiving payments</source> <translation>Zobraz seznam adres pro příjem plateb</translation> </message> <message> <location line="+31"/> <source>E&amp;xit</source> <translation>&amp;Konec</translation> </message> <message> <location line="+1"/> <source>Quit application</source> <translation>Ukonči aplikaci</translation> </message> <message> <location line="+4"/> <source>Show information about Bitcoin</source> <translation>Zobraz informace o Gaycoinu</translation> </message> <message> <location line="+2"/> <source>About &amp;Qt</source> <translation>O &amp;Qt</translation> </message> <message> <location line="+1"/> <source>Show information about Qt</source> <translation>Zobraz informace o Qt</translation> </message> <message> <location line="+2"/> <source>&amp;Options...</source> <translation>&amp;Možnosti...</translation> </message> <message> <location line="+6"/> <source>&amp;Encrypt Wallet...</source> <translation>Zaši&amp;fruj peněženku...</translation> </message> <message> <location line="+3"/> <source>&amp;Backup Wallet...</source> <translation>&amp;Zazálohuj peněženku...</translation> </message> <message> <location line="+2"/> <source>&amp;Change Passphrase...</source> <translation>Změň &amp;heslo...</translation> </message> <message> <location line="+285"/> <source>Importing blocks from disk...</source> <translation>Importuji bloky z disku...</translation> </message> <message> <location line="+3"/> <source>Reindexing blocks on disk...</source> <translation>Vytvářím nový index bloků na disku...</translation> </message> <message> <location line="-347"/> <source>Send coins to a Bitcoin address</source> <translation>Pošli mince na Gaycoinovou adresu</translation> </message> <message> <location line="+49"/> <source>Modify configuration options for Bitcoin</source> <translation>Uprav nastavení Gaycoinu</translation> </message> <message> <location line="+9"/> <source>Backup wallet to another location</source> <translation>Zazálohuj peněženku na jiné místo</translation> </message> <message> <location line="+2"/> <source>Change the passphrase used for wallet encryption</source> <translation>Změň heslo k šifrování peněženky</translation> </message> <message> <location line="+6"/> <source>&amp;Debug window</source> <translation>&amp;Ladicí okno</translation> </message> <message> <location line="+1"/> <source>Open debugging and diagnostic console</source> <translation>Otevři ladicí a diagnostickou konzoli</translation> </message> <message> <location line="-4"/> <source>&amp;Verify message...</source> <translation>&amp;Ověř zprávu...</translation> </message> <message> <location line="-165"/> <location line="+530"/> <source>Bitcoin</source> <translation>Gaycoin</translation> </message> <message> <location line="-530"/> <source>Wallet</source> <translation>Peněženka</translation> </message> <message> <location line="+101"/> <source>&amp;Send</source> <translation>&amp;Pošli</translation> </message> <message> <location line="+7"/> <source>&amp;Receive</source> <translation>Při&amp;jmi</translation> </message> <message> <location line="+14"/> <source>&amp;Addresses</source> <translation>&amp;Adresy</translation> </message> <message> <location line="+22"/> <source>&amp;About Bitcoin</source> <translation>O &amp;Gaycoinu</translation> </message> <message> <location line="+9"/> <source>&amp;Show / Hide</source> <translation>&amp;Zobraz/Skryj</translation> </message> <message> <location line="+1"/> <source>Show or hide the main Window</source> <translation>Zobraz nebo skryj hlavní okno</translation> </message> <message> <location line="+3"/> <source>Encrypt the private keys that belong to your wallet</source> <translation>Zašifruj soukromé klíče ve své peněžence</translation> </message> <message> <location line="+7"/> <source>Sign messages with your Bitcoin addresses to prove you own them</source> <translation>Podepiš zprávy svými Gaycoinovými adresami, čímž prokážeš, že jsi jejich vlastníkem</translation> </message> <message> <location line="+2"/> <source>Verify messages to ensure they were signed with specified Bitcoin addresses</source> <translation>Ověř zprávy, aby ses ujistil, že byly podepsány danými Gaycoinovými adresami</translation> </message> <message> <location line="+28"/> <source>&amp;File</source> <translation>&amp;Soubor</translation> </message> <message> <location line="+7"/> <source>&amp;Settings</source> <translation>&amp;Nastavení</translation> </message> <message> <location line="+6"/> <source>&amp;Help</source> <translation>Ná&amp;pověda</translation> </message> <message> <location line="+9"/> <source>Tabs toolbar</source> <translation>Panel s listy</translation> </message> <message> <location line="+17"/> <location line="+10"/> <source>[testnet]</source> <translation>[testnet]</translation> </message> <message> <location line="+47"/> <source>Bitcoin client</source> <translation>Gaycoin klient</translation> </message> <message numerus="yes"> <location line="+141"/> <source>%n active connection(s) to Bitcoin network</source> <translation><numerusform>%n aktivní spojení do Gaycoinové sítě</numerusform><numerusform>%n aktivní spojení do Gaycoinové sítě</numerusform><numerusform>%n aktivních spojení do Gaycoinové sítě</numerusform></translation> </message> <message> <location line="+22"/> <source>No block source available...</source> <translation>Není dostupný žádný zdroj bloků...</translation> </message> <message> <location line="+12"/> <source>Processed %1 of %2 (estimated) blocks of transaction history.</source> <translation>Zpracováno %1 z přibližně %2 bloků transakční historie.</translation> </message> <message> <location line="+4"/> <source>Processed %1 blocks of transaction history.</source> <translation>Zpracováno %1 bloků transakční historie.</translation> </message> <message numerus="yes"> <location line="+20"/> <source>%n hour(s)</source> <translation><numerusform>hodinu</numerusform><numerusform>%n hodiny</numerusform><numerusform>%n hodin</numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s)</source> <translation><numerusform>den</numerusform><numerusform>%n dny</numerusform><numerusform>%n dnů</numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n week(s)</source> <translation><numerusform>týden</numerusform><numerusform>%n týdny</numerusform><numerusform>%n týdnů</numerusform></translation> </message> <message> <location line="+4"/> <source>%1 behind</source> <translation>Stahuji ještě bloky transakcí za poslední %1</translation> </message> <message> <location line="+14"/> <source>Last received block was generated %1 ago.</source> <translation>Poslední stažený blok byl vygenerován %1 zpátky.</translation> </message> <message> <location line="+2"/> <source>Transactions after this will not yet be visible.</source> <translation>Následné transakce ještě nebudou vidět.</translation> </message> <message> <location line="+22"/> <source>Error</source> <translation>Chyba</translation> </message> <message> <location line="+3"/> <source>Warning</source> <translation>Upozornění</translation> </message> <message> <location line="+3"/> <source>Information</source> <translation>Informace</translation> </message> <message> <location line="+70"/> <source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source> <translation>Tahle transakce přesahuje velikostní limit. I tak ji ale můžeš poslat, pokud za ni zaplatíš poplatek %1, který půjde uzlům, které tvou transakci zpracují, a navíc tak podpoříš síť. Chceš zaplatit poplatek?</translation> </message> <message> <location line="-140"/> <source>Up to date</source> <translation>Aktuální</translation> </message> <message> <location line="+31"/> <source>Catching up...</source> <translation>Stahuji...</translation> </message> <message> <location line="+113"/> <source>Confirm transaction fee</source> <translation>Potvrď transakční poplatek</translation> </message> <message> <location line="+8"/> <source>Sent transaction</source> <translation>Odeslané transakce</translation> </message> <message> <location line="+0"/> <source>Incoming transaction</source> <translation>Příchozí transakce</translation> </message> <message> <location line="+1"/> <source>Date: %1 Amount: %2 Type: %3 Address: %4 </source> <translation>Datum: %1 Částka: %2 Typ: %3 Adresa: %4 </translation> </message> <message> <location line="+33"/> <location line="+23"/> <source>URI handling</source> <translation>Zpracování URI</translation> </message> <message> <location line="-23"/> <location line="+23"/> <source>URI can not be parsed! This can be caused by an invalid Bitcoin address or malformed URI parameters.</source> <translation>Nepodařilo se analyzovat URI! Důvodem může být neplatná Gaycoinová adresa nebo poškozené parametry URI.</translation> </message> <message> <location line="+17"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>Peněženka je &lt;b&gt;zašifrovaná&lt;/b&gt; a momentálně &lt;b&gt;odemčená&lt;/b&gt;</translation> </message> <message> <location line="+8"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>Peněženka je &lt;b&gt;zašifrovaná&lt;/b&gt; a momentálně &lt;b&gt;zamčená&lt;/b&gt;</translation> </message> <message> <location filename="../bitcoin.cpp" line="+111"/> <source>A fatal error occurred. Bitcoin can no longer continue safely and will quit.</source> <translation>Stala se fatální chyba. Gaycoin nemůže bezpečně pokračovat v činnosti, a proto skončí.</translation> </message> </context> <context> <name>ClientModel</name> <message> <location filename="../clientmodel.cpp" line="+104"/> <source>Network Alert</source> <translation>Upozornění sítě</translation> </message> </context> <context> <name>EditAddressDialog</name> <message> <location filename="../forms/editaddressdialog.ui" line="+14"/> <source>Edit Address</source> <translation>Uprav adresu</translation> </message> <message> <location line="+11"/> <source>&amp;Label</source> <translation>&amp;Označení</translation> </message> <message> <location line="+10"/> <source>The label associated with this address book entry</source> <translation>Označení spojené s tímto záznamem v adresáři</translation> </message> <message> <location line="+7"/> <source>&amp;Address</source> <translation>&amp;Adresa</translation> </message> <message> <location line="+10"/> <source>The address associated with this address book entry. This can only be modified for sending addresses.</source> <translation>Adresa spojená s tímto záznamem v adresáři. Lze upravovat jen pro odesílací adresy.</translation> </message> <message> <location filename="../editaddressdialog.cpp" line="+21"/> <source>New receiving address</source> <translation>Nová přijímací adresa</translation> </message> <message> <location line="+4"/> <source>New sending address</source> <translation>Nová odesílací adresa</translation> </message> <message> <location line="+3"/> <source>Edit receiving address</source> <translation>Uprav přijímací adresu</translation> </message> <message> <location line="+4"/> <source>Edit sending address</source> <translation>Uprav odesílací adresu</translation> </message> <message> <location line="+76"/> <source>The entered address &quot;%1&quot; is already in the address book.</source> <translation>Zadaná adresa &quot;%1&quot; už v adresáři je.</translation> </message> <message> <location line="-5"/> <source>The entered address &quot;%1&quot; is not a valid Bitcoin address.</source> <translation>Zadaná adresa &quot;%1&quot; není platná Gaycoinová adresa.</translation> </message> <message> <location line="+10"/> <source>Could not unlock wallet.</source> <translation>Nemohu odemknout peněženku.</translation> </message> <message> <location line="+5"/> <source>New key generation failed.</source> <translation>Nepodařilo se mi vygenerovat nový klíč.</translation> </message> </context> <context> <name>GUIUtil::HelpMessageBox</name> <message> <location filename="../guiutil.cpp" line="+424"/> <location line="+12"/> <source>Bitcoin-Qt</source> <translation>Gaycoin-Qt</translation> </message> <message> <location line="-12"/> <source>version</source> <translation>verze</translation> </message> <message> <location line="+2"/> <source>Usage:</source> <translation>Užití:</translation> </message> <message> <location line="+1"/> <source>command-line options</source> <translation>možnosti příkazové řádky</translation> </message> <message> <location line="+4"/> <source>UI options</source> <translation>Možnosti UI</translation> </message> <message> <location line="+1"/> <source>Set language, for example &quot;de_DE&quot; (default: system locale)</source> <translation>Nastavit jazyk, například &quot;de_DE&quot; (výchozí: systémové nastavení)</translation> </message> <message> <location line="+1"/> <source>Start minimized</source> <translation>Nastartovat minimalizovaně</translation> </message> <message> <location line="+1"/> <source>Show splash screen on startup (default: 1)</source> <translation>Zobrazit startovací obrazovku (výchozí: 1)</translation> </message> </context> <context> <name>OptionsDialog</name> <message> <location filename="../forms/optionsdialog.ui" line="+14"/> <source>Options</source> <translation>Možnosti</translation> </message> <message> <location line="+16"/> <source>&amp;Main</source> <translation>&amp;Hlavní</translation> </message> <message> <location line="+6"/> <source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source> <translation>Dobrovolný transakční poplatek za každý započatý kB dopomáhá k rychlému zpracování tvých transakcí. Většina transakcí má do 1 kB.</translation> </message> <message> <location line="+15"/> <source>Pay transaction &amp;fee</source> <translation>Platit &amp;transakční poplatek</translation> </message> <message> <location line="+31"/> <source>Automatically start Bitcoin after logging in to the system.</source> <translation>Automaticky spustí Gaycoin po přihlášení do systému.</translation> </message> <message> <location line="+3"/> <source>&amp;Start Bitcoin on system login</source> <translation>S&amp;pustit Gaycoin po přihlášení do systému</translation> </message> <message> <location line="+35"/> <source>Reset all client options to default.</source> <translation>Vrátí všechny volby na výchozí hodnoty.</translation> </message> <message> <location line="+3"/> <source>&amp;Reset Options</source> <translation>&amp;Obnovit nastavení</translation> </message> <message> <location line="+13"/> <source>&amp;Network</source> <translation>&amp;Síť</translation> </message> <message> <location line="+6"/> <source>Automatically open the Bitcoin client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation>Automaticky otevře potřebný port na routeru. Tohle funguje jen za předpokladu, že tvůj router podporuje UPnP a že je UPnP povolené.</translation> </message> <message> <location line="+3"/> <source>Map port using &amp;UPnP</source> <translation>Namapovat port přes &amp;UPnP</translation> </message> <message> <location line="+7"/> <source>Connect to the Bitcoin network through a SOCKS proxy (e.g. when connecting through Tor).</source> <translation>Připojí se do Gaycoinové sítě přes SOCKS proxy (např. když se připojuje přes Tor).</translation> </message> <message> <location line="+3"/> <source>&amp;Connect through SOCKS proxy:</source> <translation>&amp;Připojit přes SOCKS proxy:</translation> </message> <message> <location line="+9"/> <source>Proxy &amp;IP:</source> <translation>&amp;IP adresa proxy:</translation> </message> <message> <location line="+19"/> <source>IP address of the proxy (e.g. 127.0.0.1)</source> <translation>IP adresa proxy (např. 127.0.0.1)</translation> </message> <message> <location line="+7"/> <source>&amp;Port:</source> <translation>Por&amp;t:</translation> </message> <message> <location line="+19"/> <source>Port of the proxy (e.g. 9050)</source> <translation>Port proxy (např. 9050)</translation> </message> <message> <location line="+7"/> <source>SOCKS &amp;Version:</source> <translation>&amp;Verze SOCKS:</translation> </message> <message> <location line="+13"/> <source>SOCKS version of the proxy (e.g. 5)</source> <translation>Verze SOCKS proxy (např. 5)</translation> </message> <message> <location line="+36"/> <source>&amp;Window</source> <translation>O&amp;kno</translation> </message> <message> <location line="+6"/> <source>Show only a tray icon after minimizing the window.</source> <translation>Po minimalizaci okna zobrazí pouze ikonu v panelu.</translation> </message> <message> <location line="+3"/> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation>&amp;Minimalizovávat do ikony v panelu</translation> </message> <message> <location line="+7"/> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source> <translation>Zavřením se aplikace minimalizuje. Pokud je tato volba zaškrtnuta, tak se aplikace ukončí pouze zvolením Konec v menu.</translation> </message> <message> <location line="+3"/> <source>M&amp;inimize on close</source> <translation>Za&amp;vřením minimalizovat</translation> </message> <message> <location line="+21"/> <source>&amp;Display</source> <translation>Zobr&amp;azení</translation> </message> <message> <location line="+8"/> <source>User Interface &amp;language:</source> <translation>&amp;Jazyk uživatelského rozhraní:</translation> </message> <message> <location line="+13"/> <source>The user interface language can be set here. This setting will take effect after restarting Bitcoin.</source> <translation>Tady lze nastavit jazyk uživatelského rozhraní. Nastavení se projeví až po restartování Gaycoinu.</translation> </message> <message> <location line="+11"/> <source>&amp;Unit to show amounts in:</source> <translation>J&amp;ednotka pro částky: </translation> </message> <message> <location line="+13"/> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation>Zvol výchozí podjednotku, která se bude zobrazovat v programu a při posílání mincí.</translation> </message> <message> <location line="+9"/> <source>Whether to show Bitcoin addresses in the transaction list or not.</source> <translation>Zda ukazovat gaycoinové adresy ve výpisu transakcí nebo ne.</translation> </message> <message> <location line="+3"/> <source>&amp;Display addresses in transaction list</source> <translation>Ukazo&amp;vat adresy ve výpisu transakcí</translation> </message> <message> <location line="+71"/> <source>&amp;OK</source> <translation>&amp;Budiž</translation> </message> <message> <location line="+7"/> <source>&amp;Cancel</source> <translation>&amp;Zrušit</translation> </message> <message> <location line="+10"/> <source>&amp;Apply</source> <translation>&amp;Uložit</translation> </message> <message> <location filename="../optionsdialog.cpp" line="+53"/> <source>default</source> <translation>výchozí</translation> </message> <message> <location line="+130"/> <source>Confirm options reset</source> <translation>Potvrzení obnovení nastavení</translation> </message> <message> <location line="+1"/> <source>Some settings may require a client restart to take effect.</source> <translation>Některá nastavení mohou vyžadovat restart klienta, aby se mohly projevit.</translation> </message> <message> <location line="+0"/> <source>Do you want to proceed?</source> <translation>Chceš pokračovat?</translation> </message> <message> <location line="+42"/> <location line="+9"/> <source>Warning</source> <translation>Upozornění</translation> </message> <message> <location line="-9"/> <location line="+9"/> <source>This setting will take effect after restarting Bitcoin.</source> <translation>Nastavení se projeví až po restartování Gaycoinu.</translation> </message> <message> <location line="+29"/> <source>The supplied proxy address is invalid.</source> <translation>Zadaná adresa proxy je neplatná.</translation> </message> </context> <context> <name>OverviewPage</name> <message> <location filename="../forms/overviewpage.ui" line="+14"/> <source>Form</source> <translation>Formulář</translation> </message> <message> <location line="+50"/> <location line="+166"/> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the Bitcoin network after a connection is established, but this process has not completed yet.</source> <translation>Zobrazené informace nemusí být aktuální. Tvá peněženka se automaticky sesynchronizuje s Gaycoinovou sítí, jakmile se s ní spojí. Zatím ale ještě není synchronizace dokončena.</translation> </message> <message> <location line="-124"/> <source>Balance:</source> <translation>Stav účtu:</translation> </message> <message> <location line="+29"/> <source>Unconfirmed:</source> <translation>Nepotvrzeno:</translation> </message> <message> <location line="-78"/> <source>Wallet</source> <translation>Peněženka</translation> </message> <message> <location line="+107"/> <source>Immature:</source> <translation>Nedozráno:</translation> </message> <message> <location line="+13"/> <source>Mined balance that has not yet matured</source> <translation>Vytěžené mince, které ještě nejsou zralé</translation> </message> <message> <location line="+46"/> <source>&lt;b&gt;Recent transactions&lt;/b&gt;</source> <translation>&lt;b&gt;Poslední transakce&lt;/b&gt;</translation> </message> <message> <location line="-101"/> <source>Your current balance</source> <translation>Aktuální stav tvého účtu</translation> </message> <message> <location line="+29"/> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source> <translation>Celkem z transakcí, které ještě nejsou potvrzené a které se ještě nezapočítávají do celkového stavu účtu</translation> </message> <message> <location filename="../overviewpage.cpp" line="+116"/> <location line="+1"/> <source>out of sync</source> <translation>nesynchronizováno</translation> </message> </context> <context> <name>PaymentServer</name> <message> <location filename="../paymentserver.cpp" line="+107"/> <source>Cannot start bitcoin: click-to-pay handler</source> <translation>Nemůžu spustit gaycoin: obsluha click-to-pay</translation> </message> </context> <context> <name>QRCodeDialog</name> <message> <location filename="../forms/qrcodedialog.ui" line="+14"/> <source>QR Code Dialog</source> <translation>QR kód</translation> </message> <message> <location line="+59"/> <source>Request Payment</source> <translation>Požadovat platbu</translation> </message> <message> <location line="+56"/> <source>Amount:</source> <translation>Částka:</translation> </message> <message> <location line="-44"/> <source>Label:</source> <translation>Označení:</translation> </message> <message> <location line="+19"/> <source>Message:</source> <translation>Zpráva:</translation> </message> <message> <location line="+71"/> <source>&amp;Save As...</source> <translation>&amp;Ulož jako...</translation> </message> <message> <location filename="../qrcodedialog.cpp" line="+62"/> <source>Error encoding URI into QR Code.</source> <translation>Chyba při kódování URI do QR kódu.</translation> </message> <message> <location line="+40"/> <source>The entered amount is invalid, please check.</source> <translation>Zadaná částka je neplatná, překontroluj ji prosím.</translation> </message> <message> <location line="+23"/> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation>Výsledná URI je příliš dlouhá, zkus zkrátit text označení / zprávy.</translation> </message> <message> <location line="+25"/> <source>Save QR Code</source> <translation>Ulož QR kód</translation> </message> <message> <location line="+0"/> <source>PNG Images (*.png)</source> <translation>PNG obrázky (*.png)</translation> </message> </context> <context> <name>RPCConsole</name> <message> <location filename="../forms/rpcconsole.ui" line="+46"/> <source>Client name</source> <translation>Název klienta</translation> </message> <message> <location line="+10"/> <location line="+23"/> <location line="+26"/> <location line="+23"/> <location line="+23"/> <location line="+36"/> <location line="+53"/> <location line="+23"/> <location line="+23"/> <location filename="../rpcconsole.cpp" line="+339"/> <source>N/A</source> <translation>N/A</translation> </message> <message> <location line="-217"/> <source>Client version</source> <translation>Verze klienta</translation> </message> <message> <location line="-45"/> <source>&amp;Information</source> <translation>&amp;Informace</translation> </message> <message> <location line="+68"/> <source>Using OpenSSL version</source> <translation>Používaná verze OpenSSL</translation> </message> <message> <location line="+49"/> <source>Startup time</source> <translation>Čas spuštění</translation> </message> <message> <location line="+29"/> <source>Network</source> <translation>Síť</translation> </message> <message> <location line="+7"/> <source>Number of connections</source> <translation>Počet spojení</translation> </message> <message> <location line="+23"/> <source>On testnet</source> <translation>V testnetu</translation> </message> <message> <location line="+23"/> <source>Block chain</source> <translation>Řetězec bloků</translation> </message> <message> <location line="+7"/> <source>Current number of blocks</source> <translation>Aktuální počet bloků</translation> </message> <message> <location line="+23"/> <source>Estimated total blocks</source> <translation>Odhad celkového počtu bloků</translation> </message> <message> <location line="+23"/> <source>Last block time</source><|fim▁hole|> </message> <message> <location line="+52"/> <source>&amp;Open</source> <translation>&amp;Otevřít</translation> </message> <message> <location line="+16"/> <source>Command-line options</source> <translation>Argumenty z příkazové řádky</translation> </message> <message> <location line="+7"/> <source>Show the Bitcoin-Qt help message to get a list with possible Bitcoin command-line options.</source> <translation>Seznam parametrů Gaycoinu pro příkazovou řádku získáš v nápovědě Gaycoinu Qt.</translation> </message> <message> <location line="+3"/> <source>&amp;Show</source> <translation>&amp;Zobrazit</translation> </message> <message> <location line="+24"/> <source>&amp;Console</source> <translation>&amp;Konzole</translation> </message> <message> <location line="-260"/> <source>Build date</source> <translation>Datum kompilace</translation> </message> <message> <location line="-104"/> <source>Bitcoin - Debug window</source> <translation>Gaycoin - ladicí okno</translation> </message> <message> <location line="+25"/> <source>Bitcoin Core</source> <translation>Jádro Gaycoinu</translation> </message> <message> <location line="+279"/> <source>Debug log file</source> <translation>Soubor s ladicími záznamy</translation> </message> <message> <location line="+7"/> <source>Open the Bitcoin debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation>Otevři soubor s ladicími záznamy Gaycoinu z aktuálního datového adresáře. U velkých logů to může pár vteřin zabrat.</translation> </message> <message> <location line="+102"/> <source>Clear console</source> <translation>Vyčistit konzoli</translation> </message> <message> <location filename="../rpcconsole.cpp" line="-30"/> <source>Welcome to the Bitcoin RPC console.</source> <translation>Vítej v Gaycoinové RPC konzoli.</translation> </message> <message> <location line="+1"/> <source>Use up and down arrows to navigate history, and &lt;b&gt;Ctrl-L&lt;/b&gt; to clear screen.</source> <translation>V historii se pohybuješ šipkami nahoru a dolů a pomocí &lt;b&gt;Ctrl-L&lt;/b&gt; čistíš obrazovku.</translation> </message> <message> <location line="+1"/> <source>Type &lt;b&gt;help&lt;/b&gt; for an overview of available commands.</source> <translation>Napsáním &lt;b&gt;help&lt;/b&gt; si vypíšeš přehled dostupných příkazů.</translation> </message> </context> <context> <name>SendCoinsDialog</name> <message> <location filename="../forms/sendcoinsdialog.ui" line="+14"/> <location filename="../sendcoinsdialog.cpp" line="+124"/> <location line="+5"/> <location line="+5"/> <location line="+5"/> <location line="+6"/> <location line="+5"/> <location line="+5"/> <source>Send Coins</source> <translation>Pošli mince</translation> </message> <message> <location line="+50"/> <source>Send to multiple recipients at once</source> <translation>Pošli více příjemcům naráz</translation> </message> <message> <location line="+3"/> <source>Add &amp;Recipient</source> <translation>Při&amp;dej příjemce</translation> </message> <message> <location line="+20"/> <source>Remove all transaction fields</source> <translation>Smaž všechny transakční formuláře</translation> </message> <message> <location line="+3"/> <source>Clear &amp;All</source> <translation>Všechno s&amp;maž</translation> </message> <message> <location line="+22"/> <source>Balance:</source> <translation>Stav účtu:</translation> </message> <message> <location line="+10"/> <source>123.456 GAY</source> <translation>123.456 GAY</translation> </message> <message> <location line="+31"/> <source>Confirm the send action</source> <translation>Potvrď odeslání</translation> </message> <message> <location line="+3"/> <source>S&amp;end</source> <translation>P&amp;ošli</translation> </message> <message> <location filename="../sendcoinsdialog.cpp" line="-59"/> <source>&lt;b&gt;%1&lt;/b&gt; to %2 (%3)</source> <translation>&lt;b&gt;%1&lt;/b&gt; pro %2 (%3)</translation> </message> <message> <location line="+5"/> <source>Confirm send coins</source> <translation>Potvrď odeslání mincí</translation> </message> <message> <location line="+1"/> <source>Are you sure you want to send %1?</source> <translation>Jsi si jistý, že chceš poslat %1?</translation> </message> <message> <location line="+0"/> <source> and </source> <translation> a </translation> </message> <message> <location line="+23"/> <source>The recipient address is not valid, please recheck.</source> <translation>Adresa příjemce je neplatná, překontroluj ji prosím.</translation> </message> <message> <location line="+5"/> <source>The amount to pay must be larger than 0.</source> <translation>Odesílaná částka musí být větší než 0.</translation> </message> <message> <location line="+5"/> <source>The amount exceeds your balance.</source> <translation>Částka překračuje stav účtu.</translation> </message> <message> <location line="+5"/> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation>Celková částka při připočítání poplatku %1 překročí stav účtu.</translation> </message> <message> <location line="+6"/> <source>Duplicate address found, can only send to each address once per send operation.</source> <translation>Zaznamenána duplikovaná adresa; každá adresa může být v odesílané platbě pouze jednou.</translation> </message> <message> <location line="+5"/> <source>Error: Transaction creation failed!</source> <translation>Chyba: Vytvoření transakce selhalo!</translation> </message> <message> <location line="+5"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation>Chyba: Transakce byla odmítnuta. Tohle může nastat, pokud nějaké mince z tvé peněženky už jednou byly utraceny, například pokud používáš kopii souboru wallet.dat a mince byly utraceny v druhé kopii, ale nebyly označeny jako utracené v této.</translation> </message> </context> <context> <name>SendCoinsEntry</name> <message> <location filename="../forms/sendcoinsentry.ui" line="+14"/> <source>Form</source> <translation>Formulář</translation> </message> <message> <location line="+15"/> <source>A&amp;mount:</source> <translation>Čás&amp;tka:</translation> </message> <message> <location line="+13"/> <source>Pay &amp;To:</source> <translation>&amp;Komu:</translation> </message> <message> <location line="+34"/> <source>The address to send the payment to (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source> <translation>Adresa příjemce (např. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation> </message> <message> <location line="+60"/> <location filename="../sendcoinsentry.cpp" line="+26"/> <source>Enter a label for this address to add it to your address book</source> <translation>Zadej označení této adresy; obojí se ti pak uloží do adresáře</translation> </message> <message> <location line="-78"/> <source>&amp;Label:</source> <translation>O&amp;značení:</translation> </message> <message> <location line="+28"/> <source>Choose address from address book</source> <translation>Vyber adresu z adresáře</translation> </message> <message> <location line="+10"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="+7"/> <source>Paste address from clipboard</source> <translation>Vlož adresu ze schránky</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+7"/> <source>Remove this recipient</source> <translation>Smaž tohoto příjemce</translation> </message> <message> <location filename="../sendcoinsentry.cpp" line="+1"/> <source>Enter a Bitcoin address (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source> <translation>Zadej Gaycoinovou adresu (např. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <location filename="../forms/signverifymessagedialog.ui" line="+14"/> <source>Signatures - Sign / Verify a Message</source> <translation>Podpisy - podepsat/ověřit zprávu</translation> </message> <message> <location line="+13"/> <source>&amp;Sign Message</source> <translation>&amp;Podepiš zprávu</translation> </message> <message> <location line="+6"/> <source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation>Podepsáním zprávy svými adresami můžeš prokázat, že je skutečně vlastníš. Buď opatrný a nepodepisuj nic vágního; například při phishingových útocích můžeš být lákán, abys něco takového podepsal. Podepisuj pouze zcela úplná a detailní prohlášení, se kterými souhlasíš.</translation> </message> <message> <location line="+18"/> <source>The address to sign the message with (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source> <translation>Adresa, kterou se zpráva podepíše (např. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation> </message> <message> <location line="+10"/> <location line="+213"/> <source>Choose an address from the address book</source> <translation>Vyber adresu z adresáře</translation> </message> <message> <location line="-203"/> <location line="+213"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="-203"/> <source>Paste address from clipboard</source> <translation>Vlož adresu ze schránky</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+12"/> <source>Enter the message you want to sign here</source> <translation>Sem vepiš zprávu, kterou chceš podepsat</translation> </message> <message> <location line="+7"/> <source>Signature</source> <translation>Podpis</translation> </message> <message> <location line="+27"/> <source>Copy the current signature to the system clipboard</source> <translation>Zkopíruj aktuálně vybraný podpis do systémové schránky</translation> </message> <message> <location line="+21"/> <source>Sign the message to prove you own this Bitcoin address</source> <translation>Podepiš zprávu, čímž prokážeš, že jsi vlastníkem této Gaycoinové adresy</translation> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation>Po&amp;depiš zprávu</translation> </message> <message> <location line="+14"/> <source>Reset all sign message fields</source> <translation>Vymaž všechna pole formuláře pro podepsání zrávy</translation> </message> <message> <location line="+3"/> <location line="+146"/> <source>Clear &amp;All</source> <translation>Všechno &amp;smaž</translation> </message> <message> <location line="-87"/> <source>&amp;Verify Message</source> <translation>&amp;Ověř zprávu</translation> </message> <message> <location line="+6"/> <source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source> <translation>K ověření podpisu zprávy zadej podepisující adresu, zprávu (ověř si, že správně kopíruješ zalomení řádků, mezery, tabulátory apod.) a podpis. Dávej pozor na to, abys nezkopíroval do podpisu víc, než co je v samotné podepsané zprávě, abys nebyl napálen man-in-the-middle útokem.</translation> </message> <message> <location line="+21"/> <source>The address the message was signed with (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source> <translation>Adresa, kterou je zpráva podepsána (např. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation> </message> <message> <location line="+40"/> <source>Verify the message to ensure it was signed with the specified Bitcoin address</source> <translation>Ověř zprávu, aby ses ujistil, že byla podepsána danou Gaycoinovou adresou</translation> </message> <message> <location line="+3"/> <source>Verify &amp;Message</source> <translation>O&amp;věř zprávu</translation> </message> <message> <location line="+14"/> <source>Reset all verify message fields</source> <translation>Vymaž všechna pole formuláře pro ověření zrávy</translation> </message> <message> <location filename="../signverifymessagedialog.cpp" line="+27"/> <location line="+3"/> <source>Enter a Bitcoin address (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source> <translation>Zadej Gaycoinovou adresu (např. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation> </message> <message> <location line="-2"/> <source>Click &quot;Sign Message&quot; to generate signature</source> <translation>Kliknutím na &quot;Podepiš zprávu&quot; vygeneruješ podpis</translation> </message> <message> <location line="+3"/> <source>Enter Bitcoin signature</source> <translation>Vlož Gaycoinový podpis</translation> </message> <message> <location line="+82"/> <location line="+81"/> <source>The entered address is invalid.</source> <translation>Zadaná adresa je neplatná.</translation> </message> <message> <location line="-81"/> <location line="+8"/> <location line="+73"/> <location line="+8"/> <source>Please check the address and try again.</source> <translation>Zkontroluj ji prosím a zkus to pak znovu.</translation> </message> <message> <location line="-81"/> <location line="+81"/> <source>The entered address does not refer to a key.</source> <translation>Zadaná adresa nepasuje ke klíči.</translation> </message> <message> <location line="-73"/> <source>Wallet unlock was cancelled.</source> <translation>Odemčení peněženky bylo zrušeno.</translation> </message> <message> <location line="+8"/> <source>Private key for the entered address is not available.</source> <translation>Soukromý klíč pro zadanou adresu není dostupný.</translation> </message> <message> <location line="+12"/> <source>Message signing failed.</source> <translation>Podepisování zprávy selhalo.</translation> </message> <message> <location line="+5"/> <source>Message signed.</source> <translation>Zpráv podepsána.</translation> </message> <message> <location line="+59"/> <source>The signature could not be decoded.</source> <translation>Podpis nejde dekódovat.</translation> </message> <message> <location line="+0"/> <location line="+13"/> <source>Please check the signature and try again.</source> <translation>Zkontroluj ho prosím a zkus to pak znovu.</translation> </message> <message> <location line="+0"/> <source>The signature did not match the message digest.</source> <translation>Podpis se neshoduje s hašem zprávy.</translation> </message> <message> <location line="+7"/> <source>Message verification failed.</source> <translation>Ověřování zprávy selhalo.</translation> </message> <message> <location line="+5"/> <source>Message verified.</source> <translation>Zpráva ověřena.</translation> </message> </context> <context> <name>SplashScreen</name> <message> <location filename="../splashscreen.cpp" line="+22"/> <source>The Bitcoin developers</source> <translation>Vývojáři Gaycoinu</translation> </message> <message> <location line="+1"/> <source>[testnet]</source> <translation>[testnet]</translation> </message> </context> <context> <name>TransactionDesc</name> <message> <location filename="../transactiondesc.cpp" line="+20"/> <source>Open until %1</source> <translation>Otřevřeno dokud %1</translation> </message> <message> <location line="+6"/> <source>%1/offline</source> <translation>%1/offline</translation> </message> <message> <location line="+2"/> <source>%1/unconfirmed</source> <translation>%1/nepotvrzeno</translation> </message> <message> <location line="+2"/> <source>%1 confirmations</source> <translation>%1 potvrzení</translation> </message> <message> <location line="+18"/> <source>Status</source> <translation>Stav</translation> </message> <message numerus="yes"> <location line="+7"/> <source>, broadcast through %n node(s)</source> <translation><numerusform>, rozesláno přes 1 uzel</numerusform><numerusform>, rozesláno přes %n uzly</numerusform><numerusform>, rozesláno přes %n uzlů</numerusform></translation> </message> <message> <location line="+4"/> <source>Date</source> <translation>Datum</translation> </message> <message> <location line="+7"/> <source>Source</source> <translation>Zdroj</translation> </message> <message> <location line="+0"/> <source>Generated</source> <translation>Vygenerováno</translation> </message> <message> <location line="+5"/> <location line="+17"/> <source>From</source> <translation>Od</translation> </message> <message> <location line="+1"/> <location line="+22"/> <location line="+58"/> <source>To</source> <translation>Pro</translation> </message> <message> <location line="-77"/> <location line="+2"/> <source>own address</source> <translation>vlastní adresa</translation> </message> <message> <location line="-2"/> <source>label</source> <translation>označení</translation> </message> <message> <location line="+37"/> <location line="+12"/> <location line="+45"/> <location line="+17"/> <location line="+30"/> <source>Credit</source> <translation>Příjem</translation> </message> <message numerus="yes"> <location line="-102"/> <source>matures in %n more block(s)</source> <translation><numerusform>dozraje po jednom bloku</numerusform><numerusform>dozraje po %n blocích</numerusform><numerusform>dozraje po %n blocích</numerusform></translation> </message> <message> <location line="+2"/> <source>not accepted</source> <translation>neakceptováno</translation> </message> <message> <location line="+44"/> <location line="+8"/> <location line="+15"/> <location line="+30"/> <source>Debit</source> <translation>Výdaj</translation> </message> <message> <location line="-39"/> <source>Transaction fee</source> <translation>Transakční poplatek</translation> </message> <message> <location line="+16"/> <source>Net amount</source> <translation>Čistá částka</translation> </message> <message> <location line="+6"/> <source>Message</source> <translation>Zpráva</translation> </message> <message> <location line="+2"/> <source>Comment</source> <translation>Komentář</translation> </message> <message> <location line="+2"/> <source>Transaction ID</source> <translation>ID transakce</translation> </message> <message> <location line="+3"/> <source>Generated coins must mature 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to &quot;not accepted&quot; and it won&apos;t be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation>Vygenerované mince musí čekat 120 bloků, než mohou být utraceny. Když jsi vygeneroval tenhle blok, tak byl rozposlán do sítě, aby byl přidán do řetězce bloků. Pokud se mu nepodaří dostat se do řetězce, změní se na &quot;neakceptovaný&quot; a nepůjde utratit. To se občas může stát, pokud jiný uzel vygeneruje blok zhruba ve stejném okamžiku jako ty.</translation> </message> <message> <location line="+7"/> <source>Debug information</source> <translation>Ladicí informace</translation> </message> <message> <location line="+8"/> <source>Transaction</source> <translation>Transakce</translation> </message> <message> <location line="+3"/> <source>Inputs</source> <translation>Vstupy</translation> </message> <message> <location line="+23"/> <source>Amount</source> <translation>Částka</translation> </message> <message> <location line="+1"/> <source>true</source> <translation>true</translation> </message> <message> <location line="+0"/> <source>false</source> <translation>false</translation> </message> <message> <location line="-209"/> <source>, has not been successfully broadcast yet</source> <translation>, ještě nebylo rozesláno</translation> </message> <message numerus="yes"> <location line="-35"/> <source>Open for %n more block(s)</source> <translation><numerusform>Otevřeno pro 1 další blok</numerusform><numerusform>Otevřeno pro %n další bloky</numerusform><numerusform>Otevřeno pro %n dalších bloků</numerusform></translation> </message> <message> <location line="+70"/> <source>unknown</source> <translation>neznámo</translation> </message> </context> <context> <name>TransactionDescDialog</name> <message> <location filename="../forms/transactiondescdialog.ui" line="+14"/> <source>Transaction details</source> <translation>Detaily transakce</translation> </message> <message> <location line="+6"/> <source>This pane shows a detailed description of the transaction</source> <translation>Toto okno zobrazuje detailní popis transakce</translation> </message> </context> <context> <name>TransactionTableModel</name> <message> <location filename="../transactiontablemodel.cpp" line="+225"/> <source>Date</source> <translation>Datum</translation> </message> <message> <location line="+0"/> <source>Type</source> <translation>Typ</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Adresa</translation> </message> <message> <location line="+0"/> <source>Amount</source> <translation>Částka</translation> </message> <message numerus="yes"> <location line="+57"/> <source>Open for %n more block(s)</source> <translation><numerusform>Otevřeno pro 1 další blok</numerusform><numerusform>Otevřeno pro %n další bloky</numerusform><numerusform>Otevřeno pro %n dalších bloků</numerusform></translation> </message> <message> <location line="+3"/> <source>Open until %1</source> <translation>Otřevřeno dokud %1</translation> </message> <message> <location line="+3"/> <source>Offline (%1 confirmations)</source> <translation>Offline (%1 potvrzení)</translation> </message> <message> <location line="+3"/> <source>Unconfirmed (%1 of %2 confirmations)</source> <translation>Nepotvrzeno (%1 z %2 potvrzení)</translation> </message> <message> <location line="+3"/> <source>Confirmed (%1 confirmations)</source> <translation>Potvrzeno (%1 potvrzení)</translation> </message> <message numerus="yes"> <location line="+8"/> <source>Mined balance will be available when it matures in %n more block(s)</source> <translation><numerusform>Vytěžené mince budou použitelné po dozrání, tj. po jednom bloku</numerusform><numerusform>Vytěžené mince budou použitelné po dozrání, tj. po %n blocích</numerusform><numerusform>Vytěžené mince budou použitelné po dozrání, tj. po %n blocích</numerusform></translation> </message> <message> <location line="+5"/> <source>This block was not received by any other nodes and will probably not be accepted!</source> <translation>Tento blok nedostal žádný jiný uzel a pravděpodobně nebude akceptován!</translation> </message> <message> <location line="+3"/> <source>Generated but not accepted</source> <translation>Vygenerováno, ale neakceptováno</translation> </message> <message> <location line="+43"/> <source>Received with</source> <translation>Přijato do</translation> </message> <message> <location line="+2"/> <source>Received from</source> <translation>Přijato od</translation> </message> <message> <location line="+3"/> <source>Sent to</source> <translation>Posláno na</translation> </message> <message> <location line="+2"/> <source>Payment to yourself</source> <translation>Platba sama sobě</translation> </message> <message> <location line="+2"/> <source>Mined</source> <translation>Vytěženo</translation> </message> <message> <location line="+38"/> <source>(n/a)</source> <translation>(n/a)</translation> </message> <message> <location line="+199"/> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation>Stav transakce. Najetím myši na toto políčko si zobrazíš počet potvrzení.</translation> </message> <message> <location line="+2"/> <source>Date and time that the transaction was received.</source> <translation>Datum a čas přijetí transakce.</translation> </message> <message> <location line="+2"/> <source>Type of transaction.</source> <translation>Druh transakce.</translation> </message> <message> <location line="+2"/> <source>Destination address of transaction.</source> <translation>Cílová adresa transakce.</translation> </message> <message> <location line="+2"/> <source>Amount removed from or added to balance.</source> <translation>Částka odečtená z nebo přičtená k účtu.</translation> </message> </context> <context> <name>TransactionView</name> <message> <location filename="../transactionview.cpp" line="+52"/> <location line="+16"/> <source>All</source> <translation>Vše</translation> </message> <message> <location line="-15"/> <source>Today</source> <translation>Dnes</translation> </message> <message> <location line="+1"/> <source>This week</source> <translation>Tento týden</translation> </message> <message> <location line="+1"/> <source>This month</source> <translation>Tento měsíc</translation> </message> <message> <location line="+1"/> <source>Last month</source> <translation>Minulý měsíc</translation> </message> <message> <location line="+1"/> <source>This year</source> <translation>Letos</translation> </message> <message> <location line="+1"/> <source>Range...</source> <translation>Rozsah...</translation> </message> <message> <location line="+11"/> <source>Received with</source> <translation>Přijato</translation> </message> <message> <location line="+2"/> <source>Sent to</source> <translation>Posláno</translation> </message> <message> <location line="+2"/> <source>To yourself</source> <translation>Sám sobě</translation> </message> <message> <location line="+1"/> <source>Mined</source> <translation>Vytěženo</translation> </message> <message> <location line="+1"/> <source>Other</source> <translation>Ostatní</translation> </message> <message> <location line="+7"/> <source>Enter address or label to search</source> <translation>Zadej adresu nebo označení pro její vyhledání</translation> </message> <message> <location line="+7"/> <source>Min amount</source> <translation>Minimální částka</translation> </message> <message> <location line="+34"/> <source>Copy address</source> <translation>Kopíruj adresu</translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation>Kopíruj její označení</translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation>Kopíruj částku</translation> </message> <message> <location line="+1"/> <source>Copy transaction ID</source> <translation>Kopíruj ID transakce</translation> </message> <message> <location line="+1"/> <source>Edit label</source> <translation>Uprav označení</translation> </message> <message> <location line="+1"/> <source>Show transaction details</source> <translation>Zobraz detaily transakce</translation> </message> <message> <location line="+139"/> <source>Export Transaction Data</source> <translation>Exportuj transakční data</translation> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>CSV formát (*.csv)</translation> </message> <message> <location line="+8"/> <source>Confirmed</source> <translation>Potvrzeno</translation> </message> <message> <location line="+1"/> <source>Date</source> <translation>Datum</translation> </message> <message> <location line="+1"/> <source>Type</source> <translation>Typ</translation> </message> <message> <location line="+1"/> <source>Label</source> <translation>Označení</translation> </message> <message> <location line="+1"/> <source>Address</source> <translation>Adresa</translation> </message> <message> <location line="+1"/> <source>Amount</source> <translation>Částka</translation> </message> <message> <location line="+1"/> <source>ID</source> <translation>ID</translation> </message> <message> <location line="+4"/> <source>Error exporting</source> <translation>Chyba při exportu</translation> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation>Nemohu zapisovat do souboru %1.</translation> </message> <message> <location line="+100"/> <source>Range:</source> <translation>Rozsah:</translation> </message> <message> <location line="+8"/> <source>to</source> <translation>až</translation> </message> </context> <context> <name>WalletModel</name> <message> <location filename="../walletmodel.cpp" line="+193"/> <source>Send Coins</source> <translation>Pošli mince</translation> </message> </context> <context> <name>WalletView</name> <message> <location filename="../walletview.cpp" line="+42"/> <source>&amp;Export</source> <translation>&amp;Export</translation> </message> <message> <location line="+1"/> <source>Export the data in the current tab to a file</source> <translation>Exportuj data z tohoto panelu do souboru</translation> </message> <message> <location line="+193"/> <source>Backup Wallet</source> <translation>Záloha peněženky</translation> </message> <message> <location line="+0"/> <source>Wallet Data (*.dat)</source> <translation>Data peněženky (*.dat)</translation> </message> <message> <location line="+3"/> <source>Backup Failed</source> <translation>Zálohování selhalo</translation> </message> <message> <location line="+0"/> <source>There was an error trying to save the wallet data to the new location.</source> <translation>Při ukládání peněženky na nové místo se přihodila nějaká chyba.</translation> </message> <message> <location line="+4"/> <source>Backup Successful</source> <translation>Úspěšně zazálohováno</translation> </message> <message> <location line="+0"/> <source>The wallet data was successfully saved to the new location.</source> <translation>Data z peněženky byla v pořádku uložena na nové místo.</translation> </message> </context> <context> <name>bitcoin-core</name> <message> <location filename="../bitcoinstrings.cpp" line="+94"/> <source>Bitcoin version</source> <translation>Verze Gaycoinu</translation> </message> <message> <location line="+102"/> <source>Usage:</source> <translation>Užití:</translation> </message> <message> <location line="-29"/> <source>Send command to -server or bitcoind</source> <translation>Poslat příkaz pro -server nebo gaycoind</translation> </message> <message> <location line="-23"/> <source>List commands</source> <translation>Výpis příkazů</translation> </message> <message> <location line="-12"/> <source>Get help for a command</source> <translation>Získat nápovědu pro příkaz</translation> </message> <message> <location line="+24"/> <source>Options:</source> <translation>Možnosti:</translation> </message> <message> <location line="+24"/> <source>Specify configuration file (default: bitcoin.conf)</source> <translation>Konfigurační soubor (výchozí: gaycoin.conf)</translation> </message> <message> <location line="+3"/> <source>Specify pid file (default: bitcoind.pid)</source> <translation>PID soubor (výchozí: gaycoind.pid)</translation> </message> <message> <location line="-1"/> <source>Specify data directory</source> <translation>Adresář pro data</translation> </message> <message> <location line="-9"/> <source>Set database cache size in megabytes (default: 25)</source> <translation>Nastavit velikost databázové vyrovnávací paměti v megabajtech (výchozí: 25)</translation> </message> <message> <location line="-28"/> <source>Listen for connections on &lt;port&gt; (default: 11015 or testnet: 5744)</source> <translation>Čekat na spojení na &lt;portu&gt; (výchozí: 11015 nebo testnet: 5744)</translation> </message> <message> <location line="+5"/> <source>Maintain at most &lt;n&gt; connections to peers (default: 125)</source> <translation>Povolit nejvýše &lt;n&gt; připojení k uzlům (výchozí: 125)</translation> </message> <message> <location line="-48"/> <source>Connect to a node to retrieve peer addresses, and disconnect</source> <translation>Připojit se k uzlu, získat adresy jeho protějšků a odpojit se</translation> </message> <message> <location line="+82"/> <source>Specify your own public address</source> <translation>Specifikuj svou veřejnou adresu</translation> </message> <message> <location line="+3"/> <source>Threshold for disconnecting misbehaving peers (default: 100)</source> <translation>Práh pro odpojování zlobivých uzlů (výchozí: 100)</translation> </message> <message> <location line="-134"/> <source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source> <translation>Doba ve vteřinách, po kterou se nebudou moci zlobivé uzly znovu připojit (výchozí: 86400)</translation> </message> <message> <location line="-29"/> <source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source> <translation>Při nastavování naslouchacího RPC portu %i pro IPv4 nastala chyba: %s</translation> </message> <message> <location line="+27"/> <source>Listen for JSON-RPC connections on &lt;port&gt; (default: 21015 or testnet: 5745)</source> <translation>Čekat na JSON RPC spojení na &lt;portu&gt; (výchozí: 21015 nebo testnet: 5745)</translation> </message> <message> <location line="+37"/> <source>Accept command line and JSON-RPC commands</source> <translation>Akceptovat příkazy z příkazové řádky a přes JSON-RPC</translation> </message> <message> <location line="+76"/> <source>Run in the background as a daemon and accept commands</source> <translation>Běžet na pozadí jako démon a akceptovat příkazy</translation> </message> <message> <location line="+37"/> <source>Use the test network</source> <translation>Použít testovací síť (testnet)</translation> </message> <message> <location line="-112"/> <source>Accept connections from outside (default: 1 if no -proxy or -connect)</source> <translation>Přijímat spojení zvenčí (výchozí: 1, pokud není zadáno -proxy nebo -connect)</translation> </message> <message> <location line="-80"/> <source>%s, you must set a rpcpassword in the configuration file: %s It is recommended you use the following random password: rpcuser=bitcoinrpc rpcpassword=%s (you do not need to remember this password) The username and password MUST NOT be the same. If the file does not exist, create it with owner-readable-only file permissions. It is also recommended to set alertnotify so you are notified of problems; for example: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.com </source> <translation>%s, musíš nastavit rpcpassword v konfiguračním souboru: %s Je vhodné použít následující náhodné heslo: rpcuser=bitcoinrpc rpcpassword=%s (není potřeba si ho pamatovat) rpcuser a rpcpassword NESMÍ být stejné. Pokud konfigurační soubor ještě neexistuje, vytvoř ho tak, aby ho mohl číst pouze vlastník. Je také doporučeno si nastavit alertnotify, abys byl upozorněn na případné problémy; například: alertnotify=echo %%s | mail -s &quot;Bitcoin Alert&quot; admin@foo.com </translation> </message> <message> <location line="+17"/> <source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source> <translation>Při nastavování naslouchacího RPC portu %u pro IPv6 nastala chyba, vracím se k IPv4: %s</translation> </message> <message> <location line="+3"/> <source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source> <translation>Poslouchat na zadané adrese. Pro zápis IPv6 adresy použij notaci [adresa]:port</translation> </message> <message> <location line="+3"/> <source>Cannot obtain a lock on data directory %s. Bitcoin is probably already running.</source> <translation>Nedaří se mi získat zámek na datový adresář %s. Gaycoin pravděpodobně už jednou běží.</translation> </message> <message> <location line="+3"/> <source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation>Chyba: Transakce byla odmítnuta! Tohle může nastat, pokud nějaké mince z tvé peněženky už jednou byly utraceny, například pokud používáš kopii souboru wallet.dat a mince byly utraceny v druhé kopii, ale nebyly označeny jako utracené v této.</translation> </message> <message> <location line="+4"/> <source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source> <translation>Chyba: Tahle transakce vyžaduje transakční poplatek nejméně %s kvůli velikosti zasílané částky, komplexnosti nebo použití nedávno přijatých mincí!</translation> </message> <message> <location line="+3"/> <source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source> <translation>Spustit příkaz po přijetí relevantního hlášení (%s se v příkazu nahradí za zprávu)</translation> </message> <message> <location line="+3"/> <source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source> <translation>Spustit příkaz, když se objeví transakce týkající se peněženky (%s se v příkazu nahradí za TxID)</translation> </message> <message> <location line="+11"/> <source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source> <translation>Nastavit maximální velikost prioritních/nízkopoplatkových transakcí v bajtech (výchozí: 27000)</translation> </message> <message> <location line="+6"/> <source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source> <translation>Tohle je testovací verze – používej ji jen na vlastní riziko, ale rozhodně ji nepoužívej k těžbě nebo pro obchodní aplikace</translation> </message> <message> <location line="+5"/> <source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source> <translation>Upozornění: -paytxfee je nastaveno velmi vysoko! Toto je transakční poplatek, který zaplatíš za každou poslanou transakci.</translation> </message> <message> <location line="+3"/> <source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source> <translation>Upozornění: Zobrazené transakce nemusí být správné! Možná potřebuješ aktualizovat nebo ostatní uzly potřebují aktualizovat.</translation> </message> <message> <location line="+3"/> <source>Warning: Please check that your computer&apos;s date and time are correct! If your clock is wrong Bitcoin will not work properly.</source> <translation>Upozornění: Zkontroluj, že máš v počítači správně nastavený datum a čas! Pokud jsou nastaveny špatně, Gaycoin nebude fungovat správně.</translation> </message> <message> <location line="+3"/> <source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation>Upozornění: nastala chyba při čtení souboru wallet.dat! Všechny klíče se přečetly správně, ale data o transakcích nebo záznamy v adresáři mohou chybět či být nesprávné.</translation> </message> <message> <location line="+3"/> <source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation>Upozornění: soubor wallet.dat je poškozený, data jsou však zachráněna! Původní soubor wallet.dat je uložený jako wallet.{timestamp}.bak v %s. Pokud je stav tvého účtu nebo transakce nesprávné, zřejmě bys měl obnovit zálohu.</translation> </message> <message> <location line="+14"/> <source>Attempt to recover private keys from a corrupt wallet.dat</source> <translation>Pokusit se zachránit soukromé klíče z poškozeného souboru wallet.dat</translation> </message> <message> <location line="+2"/> <source>Block creation options:</source> <translation>Možnosti vytvoření bloku:</translation> </message> <message> <location line="+5"/> <source>Connect only to the specified node(s)</source> <translation>Připojit se pouze k zadanému uzlu (příp. zadaným uzlům)</translation> </message> <message> <location line="+3"/> <source>Corrupted block database detected</source> <translation>Bylo zjištěno poškození databáze bloků</translation> </message> <message> <location line="+1"/> <source>Discover own IP address (default: 1 when listening and no -externalip)</source> <translation>Zjistit vlastní IP adresu (výchozí: 1, pokud naslouchá a není zadáno -externalip)</translation> </message> <message> <location line="+1"/> <source>Do you want to rebuild the block database now?</source> <translation>Chceš přestavět databázi bloků hned teď?</translation> </message> <message> <location line="+2"/> <source>Error initializing block database</source> <translation>Chyba při zakládání databáze bloků</translation> </message> <message> <location line="+1"/> <source>Error initializing wallet database environment %s!</source> <translation>Chyba při vytváření databázového prostředí %s pro peněženku!</translation> </message> <message> <location line="+1"/> <source>Error loading block database</source> <translation>Chyba při načítání databáze bloků</translation> </message> <message> <location line="+4"/> <source>Error opening block database</source> <translation>Chyba při otevírání databáze bloků</translation> </message> <message> <location line="+2"/> <source>Error: Disk space is low!</source> <translation>Problém: Na disku je málo místa!</translation> </message> <message> <location line="+1"/> <source>Error: Wallet locked, unable to create transaction!</source> <translation>Chyba: Peněženka je zamčená, nemohu vytvořit transakci!</translation> </message> <message> <location line="+1"/> <source>Error: system error: </source> <translation>Chyba: systémová chyba: </translation> </message> <message> <location line="+1"/> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation>Nepodařilo se naslouchat na žádném portu. Použij -listen=0, pokud to byl tvůj záměr.</translation> </message> <message> <location line="+1"/> <source>Failed to read block info</source> <translation>Nepodařilo se přečíst informace o bloku</translation> </message> <message> <location line="+1"/> <source>Failed to read block</source> <translation>Nepodařilo se přečíst blok</translation> </message> <message> <location line="+1"/> <source>Failed to sync block index</source> <translation>Nepodařilo se sesynchronizovat index bloků</translation> </message> <message> <location line="+1"/> <source>Failed to write block index</source> <translation>Nepodařilo se zapsat index bloků</translation> </message> <message> <location line="+1"/> <source>Failed to write block info</source> <translation>Nepodařilo se zapsat informace o bloku</translation> </message> <message> <location line="+1"/> <source>Failed to write block</source> <translation>Nepodařilo se zapsat blok</translation> </message> <message> <location line="+1"/> <source>Failed to write file info</source> <translation>Nepodařilo se zapsat informace o souboru</translation> </message> <message> <location line="+1"/> <source>Failed to write to coin database</source> <translation>Selhal zápis do databáze mincí</translation> </message> <message> <location line="+1"/> <source>Failed to write transaction index</source> <translation>Nepodařilo se zapsat index transakcí</translation> </message> <message> <location line="+1"/> <source>Failed to write undo data</source> <translation>Nepodařilo se zapsat data o vracení změn</translation> </message> <message> <location line="+2"/> <source>Find peers using DNS lookup (default: 1 unless -connect)</source> <translation>Hledat uzly přes DNS (výchozí: 1, pokud není zadáno -connect)</translation> </message> <message> <location line="+1"/> <source>Generate coins (default: 0)</source> <translation>Generovat mince (výchozí: 0)</translation> </message> <message> <location line="+2"/> <source>How many blocks to check at startup (default: 288, 0 = all)</source> <translation>Kolik bloků při startu zkontrolovat (výchozí: 288, 0 = všechny)</translation> </message> <message> <location line="+1"/> <source>How thorough the block verification is (0-4, default: 3)</source> <translation>Jak moc důkladná má být verifikace bloků (0-4, výchozí: 3)</translation> </message> <message> <location line="+19"/> <source>Not enough file descriptors available.</source> <translation>Je nedostatek deskriptorů souborů.</translation> </message> <message> <location line="+8"/> <source>Rebuild block chain index from current blk000??.dat files</source> <translation>Znovu vytvořit index řetězce bloků z aktuálních blk000??.dat souborů</translation> </message> <message> <location line="+16"/> <source>Set the number of threads to service RPC calls (default: 4)</source> <translation>Nastavení počtu vláken pro servisní RPC volání (výchozí: 4)</translation> </message> <message> <location line="+26"/> <source>Verifying blocks...</source> <translation>Ověřuji bloky...</translation> </message> <message> <location line="+1"/> <source>Verifying wallet...</source> <translation>Kontroluji peněženku...</translation> </message> <message> <location line="-69"/> <source>Imports blocks from external blk000??.dat file</source> <translation>Importovat bloky z externího souboru blk000??.dat</translation> </message> <message> <location line="-76"/> <source>Set the number of script verification threads (up to 16, 0 = auto, &lt;0 = leave that many cores free, default: 0)</source> <translation>Nastavení počtu vláken pro verifikaci skriptů (max. 16, 0 = automaticky, &lt;0 = nechat daný počet jader volný, výchozí: 0)</translation> </message> <message> <location line="+77"/> <source>Information</source> <translation>Informace</translation> </message> <message> <location line="+3"/> <source>Invalid -tor address: &apos;%s&apos;</source> <translation>Neplatná -tor adresa: &apos;%s&apos;</translation> </message> <message> <location line="+1"/> <source>Invalid amount for -minrelaytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>Neplatná částka pro -minrelaytxfee=&lt;částka&gt;: &apos;%s&apos;</translation> </message> <message> <location line="+1"/> <source>Invalid amount for -mintxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>Neplatná částka pro -mintxfee=&lt;částka&gt;: &apos;%s&apos;</translation> </message> <message> <location line="+8"/> <source>Maintain a full transaction index (default: 0)</source> <translation>Spravovat úplný index transakcí (výchozí: 0)</translation> </message> <message> <location line="+2"/> <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</source> <translation>Maximální velikost přijímacího bufferu pro každé spojení, &lt;n&gt;*1000 bajtů (výchozí: 5000)</translation> </message> <message> <location line="+1"/> <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</source> <translation>Maximální velikost odesílacího bufferu pro každé spojení, &lt;n&gt;*1000 bajtů (výchozí: 1000)</translation> </message> <message> <location line="+2"/> <source>Only accept block chain matching built-in checkpoints (default: 1)</source> <translation>Uznávat pouze řetěz bloků, který odpovídá vnitřním kontrolním bodům (výchozí: 1)</translation> </message> <message> <location line="+1"/> <source>Only connect to nodes in network &lt;net&gt; (IPv4, IPv6 or Tor)</source> <translation>Připojit se pouze k uzlům v &lt;net&gt; síti (IPv4, IPv6 nebo Tor)</translation> </message> <message> <location line="+2"/> <source>Output extra debugging information. Implies all other -debug* options</source> <translation>Tisknout speciální ladicí informace. Implikuje použití všech -debug* voleb</translation> </message> <message> <location line="+1"/> <source>Output extra network debugging information</source> <translation>Tisknout speciální ladicí informace o síti</translation> </message> <message> <location line="+2"/> <source>Prepend debug output with timestamp</source> <translation>Připojit před ladicí výstup časové razítko</translation> </message> <message> <location line="+5"/> <source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source> <translation>Možnosti SSL: (viz instrukce nastavení SSL v Gaycoin Wiki)</translation> </message> <message> <location line="+1"/> <source>Select the version of socks proxy to use (4-5, default: 5)</source> <translation>Zvol verzi socks proxy (4-5, výchozí: 5)</translation> </message> <message> <location line="+3"/> <source>Send trace/debug info to console instead of debug.log file</source> <translation>Posílat stopovací/ladicí informace do konzole místo do souboru debug.log</translation> </message> <message> <location line="+1"/> <source>Send trace/debug info to debugger</source> <translation>Posílat stopovací/ladicí informace do debuggeru</translation> </message> <message> <location line="+5"/> <source>Set maximum block size in bytes (default: 250000)</source> <translation>Nastavit maximální velikost bloku v bajtech (výchozí: 250000)</translation> </message> <message> <location line="+1"/> <source>Set minimum block size in bytes (default: 0)</source> <translation>Nastavit minimální velikost bloku v bajtech (výchozí: 0)</translation> </message> <message> <location line="+2"/> <source>Shrink debug.log file on client startup (default: 1 when no -debug)</source> <translation>Při spuštění klienta zmenšit soubor debug.log (výchozí: 1, pokud není zadáno -debug)</translation> </message> <message> <location line="+1"/> <source>Signing transaction failed</source> <translation>Podepisování transakce selhalo</translation> </message> <message> <location line="+2"/> <source>Specify connection timeout in milliseconds (default: 5000)</source> <translation>Zadej časový limit spojení v milisekundách (výchozí: 5000)</translation> </message> <message> <location line="+4"/> <source>System error: </source> <translation>Systémová chyba: </translation> </message> <message> <location line="+4"/> <source>Transaction amount too small</source> <translation>Částka v transakci je příliš malá</translation> </message> <message> <location line="+1"/> <source>Transaction amounts must be positive</source> <translation>Částky v transakci musí být kladné</translation> </message> <message> <location line="+1"/> <source>Transaction too large</source> <translation>Transace je příliš velká</translation> </message> <message> <location line="+7"/> <source>Use UPnP to map the listening port (default: 0)</source> <translation>Použít UPnP k namapování naslouchacího portu (výchozí: 0)</translation> </message> <message> <location line="+1"/> <source>Use UPnP to map the listening port (default: 1 when listening)</source> <translation>Použít UPnP k namapování naslouchacího portu (výchozí: 1, pokud naslouchá)</translation> </message> <message> <location line="+1"/> <source>Use proxy to reach tor hidden services (default: same as -proxy)</source> <translation>Použít proxy k připojení ke skryté služby (výchozí: stejné jako -proxy)</translation> </message> <message> <location line="+2"/> <source>Username for JSON-RPC connections</source> <translation>Uživatelské jméno pro JSON-RPC spojení</translation> </message> <message> <location line="+4"/> <source>Warning</source> <translation>Upozornění</translation> </message> <message> <location line="+1"/> <source>Warning: This version is obsolete, upgrade required!</source> <translation>Upozornění: tahle verze je zastaralá, měl bys ji aktualizovat!</translation> </message> <message> <location line="+1"/> <source>You need to rebuild the databases using -reindex to change -txindex</source> <translation>Je třeba přestavět databázi použitím -reindex, aby bylo možné změnit -txindex</translation> </message> <message> <location line="+1"/> <source>wallet.dat corrupt, salvage failed</source> <translation>Soubor wallet.dat je poškozen, jeho záchrana se nezdařila</translation> </message> <message> <location line="-50"/> <source>Password for JSON-RPC connections</source> <translation>Heslo pro JSON-RPC spojení</translation> </message> <message> <location line="-67"/> <source>Allow JSON-RPC connections from specified IP address</source> <translation>Povolit JSON-RPC spojení ze specifikované IP adresy</translation> </message> <message> <location line="+76"/> <source>Send commands to node running on &lt;ip&gt; (default: 127.0.0.1)</source> <translation>Posílat příkazy uzlu běžícím na &lt;ip&gt; (výchozí: 127.0.0.1)</translation> </message> <message> <location line="-120"/> <source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source> <translation>Spustit příkaz, když se změní nejlepší blok (%s se v příkazu nahradí hashem bloku)</translation> </message> <message> <location line="+147"/> <source>Upgrade wallet to latest format</source> <translation>Převést peněženku na nejnovější formát</translation> </message> <message> <location line="-21"/> <source>Set key pool size to &lt;n&gt; (default: 100)</source> <translation>Nastavit zásobník klíčů na velikost &lt;n&gt; (výchozí: 100)</translation> </message> <message> <location line="-12"/> <source>Rescan the block chain for missing wallet transactions</source> <translation>Přeskenovat řetězec bloků na chybějící transakce tvé pěněženky</translation> </message> <message> <location line="+35"/> <source>Use OpenSSL (https) for JSON-RPC connections</source> <translation>Použít OpenSSL (https) pro JSON-RPC spojení</translation> </message> <message> <location line="-26"/> <source>Server certificate file (default: server.cert)</source> <translation>Soubor se serverovým certifikátem (výchozí: server.cert)</translation> </message> <message> <location line="+1"/> <source>Server private key (default: server.pem)</source> <translation>Soubor se serverovým soukromým klíčem (výchozí: server.pem)</translation> </message> <message> <location line="-151"/> <source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source> <translation>Akceptovatelné šifry (výchozí: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</translation> </message> <message> <location line="+165"/> <source>This help message</source> <translation>Tato nápověda</translation> </message> <message> <location line="+6"/> <source>Unable to bind to %s on this computer (bind returned error %d, %s)</source> <translation>Nedaří se mi připojit na %s na tomhle počítači (operace bind vrátila chybu %d, %s)</translation> </message> <message> <location line="-91"/> <source>Connect through socks proxy</source> <translation>Připojit se přes socks proxy</translation> </message> <message> <location line="-10"/> <source>Allow DNS lookups for -addnode, -seednode and -connect</source> <translation>Povolit DNS dotazy pro -addnode (přidání uzlu), -seednode a -connect (připojení)</translation> </message> <message> <location line="+55"/> <source>Loading addresses...</source> <translation>Načítám adresy...</translation> </message> <message> <location line="-35"/> <source>Error loading wallet.dat: Wallet corrupted</source> <translation>Chyba při načítání wallet.dat: peněženka je poškozená</translation> </message> <message> <location line="+1"/> <source>Error loading wallet.dat: Wallet requires newer version of Bitcoin</source> <translation>Chyba při načítání wallet.dat: peněženka vyžaduje novější verzi Gaycoinu</translation> </message> <message> <location line="+93"/> <source>Wallet needed to be rewritten: restart Bitcoin to complete</source> <translation>Soubor s peněženkou potřeboval přepsat: restartuj Gaycoin, aby se operace dokončila</translation> </message> <message> <location line="-95"/> <source>Error loading wallet.dat</source> <translation>Chyba při načítání wallet.dat</translation> </message> <message> <location line="+28"/> <source>Invalid -proxy address: &apos;%s&apos;</source> <translation>Neplatná -proxy adresa: &apos;%s&apos;</translation> </message> <message> <location line="+56"/> <source>Unknown network specified in -onlynet: &apos;%s&apos;</source> <translation>V -onlynet byla uvedena neznámá síť: &apos;%s&apos;</translation> </message> <message> <location line="-1"/> <source>Unknown -socks proxy version requested: %i</source> <translation>V -socks byla požadována neznámá verze proxy: %i</translation> </message> <message> <location line="-96"/> <source>Cannot resolve -bind address: &apos;%s&apos;</source> <translation>Nemohu přeložit -bind adresu: &apos;%s&apos;</translation> </message> <message> <location line="+1"/> <source>Cannot resolve -externalip address: &apos;%s&apos;</source> <translation>Nemohu přeložit -externalip adresu: &apos;%s&apos;</translation> </message> <message> <location line="+44"/> <source>Invalid amount for -paytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>Neplatná částka pro -paytxfee=&lt;částka&gt;: &apos;%s&apos;</translation> </message> <message> <location line="+1"/> <source>Invalid amount</source> <translation>Neplatná částka</translation> </message> <message> <location line="-6"/> <source>Insufficient funds</source> <translation>Nedostatek prostředků</translation> </message> <message> <location line="+10"/> <source>Loading block index...</source> <translation>Načítám index bloků...</translation> </message> <message> <location line="-57"/> <source>Add a node to connect to and attempt to keep the connection open</source> <translation>Přidat uzel, ke kterému se připojit a snažit se spojení udržet</translation> </message> <message> <location line="-25"/> <source>Unable to bind to %s on this computer. Bitcoin is probably already running.</source> <translation>Nedaří se mi připojit na %s na tomhle počítači. Gaycoin už pravděpodobně jednou běží.</translation> </message> <message> <location line="+64"/> <source>Fee per KB to add to transactions you send</source> <translation>Poplatek za kB, který se přidá ke každé odeslané transakci</translation> </message> <message> <location line="+19"/> <source>Loading wallet...</source> <translation>Načítám peněženku...</translation> </message> <message> <location line="-52"/> <source>Cannot downgrade wallet</source> <translation>Nemohu převést peněženku do staršího formátu</translation> </message> <message> <location line="+3"/> <source>Cannot write default address</source> <translation>Nemohu napsat výchozí adresu</translation> </message> <message> <location line="+64"/> <source>Rescanning...</source> <translation>Přeskenovávám...</translation> </message> <message> <location line="-57"/> <source>Done loading</source> <translation>Načítání dokončeno</translation> </message> <message> <location line="+82"/> <source>To use the %s option</source> <translation>K použití volby %s</translation> </message> <message> <location line="-74"/> <source>Error</source> <translation>Chyba</translation> </message> <message> <location line="-31"/> <source>You must set rpcpassword=&lt;password&gt; in the configuration file: %s If the file does not exist, create it with owner-readable-only file permissions.</source> <translation>Musíš nastavit rpcpassword=&lt;heslo&gt; v konfiguračním souboru: %s Pokud konfigurační soubor ještě neexistuje, vytvoř ho tak, aby ho mohl číst pouze vlastník.</translation> </message> </context> </TS><|fim▁end|>
<translation>Čas posledního bloku</translation>
<|file_name|>index.js<|end_file_name|><|fim▁begin|>export * from './MenuButton';<|fim▁hole|><|fim▁end|>
export * from './ScoreRecap.js';
<|file_name|>reflect_test.go<|end_file_name|><|fim▁begin|>// Copyright 2012, 2013 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package rpc_test import ( "reflect" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/rpc/rpcreflect" "github.com/juju/juju/testing" ) // We test rpcreflect in this package, so that the // tests can all share the same testing Root type. type reflectSuite struct { testing.BaseSuite } var _ = gc.Suite(&reflectSuite{}) func (*reflectSuite) TestTypeOf(c *gc.C) { rtype := rpcreflect.TypeOf(reflect.TypeOf(&Root{})) c.Assert(rtype.DiscardedMethods(), gc.DeepEquals, []string{ "Discard1", "Discard2", "Discard3", }) expect := map[string]reflect.Type{ "CallbackMethods": reflect.TypeOf(&CallbackMethods{}), "ChangeAPIMethods": reflect.TypeOf(&ChangeAPIMethods{}), "DelayedMethods": reflect.TypeOf(&DelayedMethods{}), "ErrorMethods": reflect.TypeOf(&ErrorMethods{}), "InterfaceMethods": reflect.TypeOf((*InterfaceMethods)(nil)).Elem(), "SimpleMethods": reflect.TypeOf(&SimpleMethods{}), } c.Assert(rtype.MethodNames(), gc.HasLen, len(expect)) for name, expectGoType := range expect { m, err := rtype.Method(name) c.Assert(err, jc.ErrorIsNil) c.Assert(m, gc.NotNil) c.Assert(m.Call, gc.NotNil) c.Assert(m.ObjType, gc.Equals, rpcreflect.ObjTypeOf(expectGoType)) c.Assert(m.ObjType.GoType(), gc.Equals, expectGoType) } m, err := rtype.Method("not found") c.Assert(err, gc.Equals, rpcreflect.ErrMethodNotFound) c.Assert(m, gc.DeepEquals, rpcreflect.RootMethod{}) } func (*reflectSuite) TestObjTypeOf(c *gc.C) { objType := rpcreflect.ObjTypeOf(reflect.TypeOf(&SimpleMethods{})) c.Check(objType.DiscardedMethods(), gc.DeepEquals, []string{ "Discard1", "Discard2", "Discard3", "Discard4", }) expect := map[string]*rpcreflect.ObjMethod{ "SliceArg": { Params: reflect.TypeOf(struct{ X []string }{}), Result: reflect.TypeOf(stringVal{}), }, } for narg := 0; narg < 2; narg++ { for nret := 0; nret < 2; nret++ { for nerr := 0; nerr < 2; nerr++ { retErr := nerr != 0 var m rpcreflect.ObjMethod if narg > 0 { m.Params = reflect.TypeOf(stringVal{}) } if nret > 0 { m.Result = reflect.TypeOf(stringVal{}) } expect[callName(narg, nret, retErr)] = &m } } } c.Assert(objType.MethodNames(), gc.HasLen, len(expect)) for name, expectMethod := range expect { m, err := objType.Method(name) c.Check(err, jc.ErrorIsNil) c.Assert(m, gc.NotNil)<|fim▁hole|> c.Check(m.Call, gc.NotNil) c.Check(m.Params, gc.Equals, expectMethod.Params) c.Check(m.Result, gc.Equals, expectMethod.Result) } m, err := objType.Method("not found") c.Check(err, gc.Equals, rpcreflect.ErrMethodNotFound) c.Check(m, gc.DeepEquals, rpcreflect.ObjMethod{}) } func (*reflectSuite) TestValueOf(c *gc.C) { v := rpcreflect.ValueOf(reflect.ValueOf(nil)) c.Check(v.IsValid(), jc.IsFalse) c.Check(func() { v.FindMethod("foo", 0, "bar") }, gc.PanicMatches, "FindMethod called on invalid Value") root := &Root{} v = rpcreflect.ValueOf(reflect.ValueOf(root)) c.Check(v.IsValid(), jc.IsTrue) c.Check(v.GoValue().Interface(), gc.Equals, root) } func (*reflectSuite) TestFindMethod(c *gc.C) { // FindMethod is actually extensively tested because it's // used in the implementation of the rpc server, // so just a simple sanity check test here. root := &Root{ simple: make(map[string]*SimpleMethods), } root.simple["a99"] = &SimpleMethods{root: root, id: "a99"} v := rpcreflect.ValueOf(reflect.ValueOf(root)) m, err := v.FindMethod("foo", 0, "bar") c.Assert(err, gc.ErrorMatches, `unknown object type "foo"`) c.Assert(err, gc.FitsTypeOf, (*rpcreflect.CallNotImplementedError)(nil)) c.Assert(m, gc.IsNil) m, err = v.FindMethod("SimpleMethods", 0, "bar") c.Assert(err, gc.ErrorMatches, "no such request - method SimpleMethods.bar is not implemented") c.Assert(err, gc.FitsTypeOf, (*rpcreflect.CallNotImplementedError)(nil)) c.Assert(m, gc.IsNil) m, err = v.FindMethod("SimpleMethods", 0, "Call1r1e") c.Assert(err, jc.ErrorIsNil) c.Assert(m.ParamsType(), gc.Equals, reflect.TypeOf(stringVal{})) c.Assert(m.ResultType(), gc.Equals, reflect.TypeOf(stringVal{})) ret, err := m.Call("a99", reflect.ValueOf(stringVal{"foo"})) c.Assert(err, jc.ErrorIsNil) c.Assert(ret.Interface(), gc.Equals, stringVal{"Call1r1e ret"}) } func (*reflectSuite) TestFindMethodAcceptsAnyVersion(c *gc.C) { root := &Root{ simple: make(map[string]*SimpleMethods), } root.simple["a99"] = &SimpleMethods{root: root, id: "a99"} v := rpcreflect.ValueOf(reflect.ValueOf(root)) m, err := v.FindMethod("SimpleMethods", 0, "Call1r1e") c.Assert(err, jc.ErrorIsNil) c.Assert(m.ParamsType(), gc.Equals, reflect.TypeOf(stringVal{})) c.Assert(m.ResultType(), gc.Equals, reflect.TypeOf(stringVal{})) m, err = v.FindMethod("SimpleMethods", 1, "Call1r1e") c.Assert(err, jc.ErrorIsNil) c.Assert(m.ParamsType(), gc.Equals, reflect.TypeOf(stringVal{})) c.Assert(m.ResultType(), gc.Equals, reflect.TypeOf(stringVal{})) }<|fim▁end|>
<|file_name|>.karma.conf.js<|end_file_name|><|fim▁begin|>// Karma configuration // Generated on Tue Jan 06 2015 16:30:03 GMT-0800 (PST) module.exports = function(config) { config.set({ // base path that will be used to resolve all patterns (eg. files, exclude) basePath: 'browser', // frameworks to use // available frameworks: https://npmjs.org/browse/keyword/karma-adapter frameworks: ['mocha'], // list of files / patterns to load in the browser files: [ {pattern: 'fullnode.js', watched: true, included: false, served: true}, {pattern: 'fullnode-worker.js', watched: true, included: false, served: true}, 'tests.js' ], // list of files to exclude exclude: [ ], // preprocess matching files before serving them to the browser // available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor preprocessors: { }, // test results reporter to use // possible values: 'dots', 'progress' // available reporters: https://npmjs.org/browse/keyword/karma-reporter reporters: ['progress'], <|fim▁hole|> // enable / disable colors in the output (reporters and logs) colors: true, // level of logging // possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG logLevel: config.LOG_DEBUG, // enable / disable watching file and executing tests whenever any file changes autoWatch: false, // start these browsers // available browser launchers: https://npmjs.org/browse/keyword/karma-launcher browsers: ['Firefox', 'Chrome'], // Continuous Integration mode // if true, Karma captures browsers, runs the tests and exits singleRun: true }); };<|fim▁end|>
// web server port port: 9876,
<|file_name|>config_regression_test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python """This modules contains regression tests for config API handler.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from absl import app from grr_response_server.gui import api_regression_test_lib from grr_response_server.gui.api_plugins import config as config_plugin from grr_response_server.gui.api_plugins import config_test as config_plugin_test class ApiListGrrBinariesHandlerRegressionTest( config_plugin_test.ApiGrrBinaryTestMixin, api_regression_test_lib.ApiRegressionTest): api_method = "ListGrrBinaries" handler = config_plugin.ApiListGrrBinariesHandler def Run(self): self.SetUpBinaries() self.Check("ListGrrBinaries") class ApiGetGrrBinaryHandlerRegressionTest( config_plugin_test.ApiGrrBinaryTestMixin, api_regression_test_lib.ApiRegressionTest): api_method = "GetGrrBinary" handler = config_plugin.ApiGetGrrBinaryHandler def Run(self): self.SetUpBinaries() self.Check( "GetGrrBinary", args=config_plugin.ApiGetGrrBinaryArgs(type="PYTHON_HACK", path="test")) self.Check( "GetGrrBinary", args=config_plugin.ApiGetGrrBinaryArgs( type="EXECUTABLE", path="windows/test.exe")) class ApiGetGrrBinaryBlobHandlerRegressionTest( config_plugin_test.ApiGrrBinaryTestMixin, api_regression_test_lib.ApiRegressionTest): api_method = "GetGrrBinaryBlob" handler = config_plugin.ApiGetGrrBinaryBlobHandler def Run(self): self.SetUpBinaries() self.Check( "GetGrrBinaryBlob", args=config_plugin.ApiGetGrrBinaryBlobArgs( type="PYTHON_HACK", path="test")) self.Check( "GetGrrBinaryBlob", args=config_plugin.ApiGetGrrBinaryBlobArgs( type="EXECUTABLE", path="windows/test.exe"))<|fim▁hole|> def main(argv): api_regression_test_lib.main(argv) if __name__ == "__main__": app.run(main)<|fim▁end|>
<|file_name|>test_utils.py<|end_file_name|><|fim▁begin|>from numpy.testing import assert_allclose, assert_equal from . import plt from .. import utils def test_path_data(): circle = plt.Circle((0, 0), 1) vertices, codes = utils.SVG_path(circle.get_path()) assert_allclose(vertices.shape, (25, 2)) assert_equal(codes, ['M', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'Z']) def test_linestyle(): linestyles = {'solid': 'none', '-': 'none', #'dashed': '6,6', '--': '6,6', #'dotted': '2,2', ':': '2,2', #'dashdot': '4,4,2,4', '-.': '4,4,2,4', '': None, 'None': None} for ls, result in linestyles.items(): line, = plt.plot([1, 2, 3], linestyle=ls) assert_equal(utils.get_dasharray(line), result) def test_axis_w_fixed_formatter(): positions, labels = [0, 1, 10], ['A','B','C'] plt.xticks(positions, labels) props = utils.get_axis_properties(plt.gca().xaxis) assert_equal(props['tickvalues'], positions)<|fim▁hole|> assert_equal(props['tickformat'], labels)<|fim▁end|>
<|file_name|>models.py<|end_file_name|><|fim▁begin|>"""Group models.""" from django.conf import settings from django.contrib.auth import get_user_model from django.contrib.auth.models import Group as AuthGroup, Permission from django.core.cache import cache from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from django.db import models from django.db.models import Q from django.db.models.signals import m2m_changed from django.utils.safestring import mark_safe from taggit.managers import TaggableManager from taggit.models import Tag import autocomplete_light from open_connect.media.models import Image, ShortenedURL from open_connect.connectmessages.models import Thread, Message from open_connect.connect_core.utils.location import get_coordinates, STATES from open_connect.connect_core.utils.models import TimestampModel from open_connect.groups.tasks import remove_user_from_group autocomplete_light.register(Tag) GROUP_STATUSES = ( ('active', 'Active'), ('deleted', 'Deleted') ) class Category(TimestampModel): """Group Category""" slug = models.SlugField(unique=True) name = models.CharField(max_length=127) color = models.CharField( verbose_name='Category Color', max_length=7, default='#000000') class Meta(object): """Meta options for Category model""" verbose_name = 'Category' verbose_name_plural = 'Categories' def __unicode__(self): """Unicode Representation of Category""" return self.name class GroupManager(models.Manager): """Manager for Group model.""" def get_queryset(self): """ Ensures that all queries fror groups also queries the auth group model """ return super(GroupManager, self).get_queryset().select_related( 'group', 'category').exclude(status='deleted') def with_deleted(self): """Includes deleted groups.""" return super(GroupManager, self).get_queryset().select_related( 'group', 'category') # pylint: disable=no-self-use def create(self, **kwargs): """Create a new group.""" name = kwargs.pop('name', None) if 'group' not in kwargs and name: kwargs['group'] = AuthGroup.objects.create(name=name) return super(GroupManager, self).create(**kwargs) def published(self, **kwargs): """Get published groups.""" return self.get_queryset().filter(published=True, **kwargs) def search(self, search=None, location=None): """Groups search""" groups = Group.objects.published().select_related('image', 'group') if search: groups = groups.filter( Q(group__name__icontains=search) | Q(category__name__icontains=search) | Q(description__icontains=search) | Q(tags__slug__icontains=search) ).distinct() if location: groups = self.location_search(location, queryset=groups) return groups def location_search(self, location, queryset=None): """Groups search by location.""" coords = get_coordinates(location) # If no coordinates are provided, return an empty queryset if not coords: return Group.objects.none() if queryset is None: queryset = Group.objects.published() # Pass the job of finding distance to the database using this query sql = ( 'SELECT ' '(degrees(acos( ' 'sin(radians(latitude)) ' '* sin(radians(%s)) ' '+ cos(radians(latitude)) ' '* cos(radians(%s)) ' '* cos(radians(longitude - %s) ) ' ') ) * 69.09)' ) result = queryset.extra( select={'distance': sql}, select_params=(coords[0], coords[0], coords[1]), # We use the same SQL again to do filtering by distance and # radius. We cannot use the param in the `SELECT` because # of a postgres limitation where=['(' + sql + ') <= "groups_group"."radius"'], params=(coords[0], coords[0], coords[1]), order_by=['-featured', 'distance', 'group__name'] ).distinct() return result class Group(TimestampModel): """Group model.""" group = models.OneToOneField(AuthGroup) private = models.BooleanField( default=False, help_text='Membership to private groups is moderated.' ) published = models.BooleanField( default=True, verbose_name=u'Publish this group', help_text='Published groups can be seen by all users.' ' Unpublished groups can only be seen if' ' you have the link.' ) moderated = models.BooleanField( default=False, verbose_name=u'Moderate this group', help_text='Posts by users must be moderated by an admin.' ) featured = models.BooleanField( default=False, verbose_name=u'This is an official group', help_text='Official groups are managed by staff and ' 'appear first in search results.', db_index=True ) member_list_published = models.BooleanField( default=True, help_text='Group member list is public' ) category = models.ForeignKey( 'groups.Category', verbose_name=u'Category', default=1) display_location = models.CharField(blank=True, max_length=100) latitude = models.FloatField(blank=True, null=True) longitude = models.FloatField(blank=True, null=True) radius = models.IntegerField(blank=True, null=True) is_national = models.BooleanField(default=True, db_index=True) # owners get permissions using a receiver below: group_owners_changed owners = models.ManyToManyField( settings.AUTH_USER_MODEL, blank=True, related_name='owned_groups_set') whitelist_users = models.ManyToManyField( settings.AUTH_USER_MODEL, blank=True, related_name='whitelist_set' ) description = models.TextField(blank=True) tags = TaggableManager(blank=True) image = models.ForeignKey(Image, blank=True, null=True) state = models.CharField( max_length=3, choices=[(s, s) for s in STATES], blank=True, db_index=True ) tos_accepted_at = models.DateTimeField(blank=True, null=True) created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True) status = models.CharField( choices=GROUP_STATUSES, default='active', db_index=True, max_length=50 ) objects = GroupManager() class Meta(object): # pylint: disable=no-init,too-few-public-methods """Group meta options.""" ordering = ['-featured', '-is_national', 'group__name'] permissions = ( ('can_edit_any_group', 'Can edit any group.'), ) def __unicode__(self): """Convert group to a unicode string.""" return u'%s' % self.group.name def save(self, force_insert=False, force_update=False, using=None, update_fields=None): """Override save to auto-set is_national.""" if not all([self.longitude, self.latitude, self.radius]): self.is_national = True else: self.is_national = False return super(Group, self).save( force_insert, force_update, using, update_fields) def delete(self, using=None): """Don't actually delete.""" self.status = 'deleted' self.save() for user in self.group.user_set.all().iterator(): remove_user_from_group.delay(user=user, group=self) def get_absolute_url(self): """Get the full local URL of an object""" return reverse('group_details', args=(self.pk,)) @property def full_url(self): """The URL (including the origin) of the group detail page""" return settings.ORIGIN + self.get_absolute_url() def clean(self): """Custom group validation.""" required_together = [ self.latitude, self.longitude, self.radius] if any(required_together) and not all(required_together): raise ValidationError( "If a location is specified, name, latitude," " longitude, and radius are required." ) def get_members(self): """Return a queryset of all users in the group.""" return self.group.user_set.all() def get_members_avatar_prioritized(self): """Return a queryset of group members prioritizing those with avatars""" # Selecting null as an extra column and sorting on that column # to preserve sorting when switching between MySQL and PostgreSQL. return self.get_members().extra( select={'image_null': 'image_id is null'} ).select_related('image').order_by('image_null') def public_threads_by_user(self, user): """All approved threads sent to group that the user is allowed to see""" return Thread.public.by_user(user).filter(group=self) def public_threads(self): """All the threads sent to this group.""" return Thread.public.by_group(group=self) @property def unmoderated_messages(self): """ Return all unmoderated messages """ return Message.objects.filter( thread__group=self, status='pending') @property def total_unmoderated_messages(self): """ Returns the total number of unmoderated messages """ return self.unmoderated_messages.count() def images(self, user): """Returns popular images related to this group.""" # We need to defer the exif field with distinct or postgres punches # you in the face. http://bit.ly/1k7HBs8 return Image.popular.with_user( user=user ).filter( message__thread__group=self ) def links(self): """Returns popular links related to this group.""" return ShortenedURL.popular.filter( message__thread__group=self, message__status='approved') def group_owners_changed(**kwargs): """ Handle changes in group ownership. This could be broken out into 2 signal receivers, but that would involve 2 duplicate queries to the User table to get a list of changed owners """ # If this is a change in owners, grab the list of owners if kwargs['action'] in ['post_add', 'post_remove']: users = get_user_model().objects.filter(pk__in=kwargs['pk_set']) # Clear the user's 'owned_groups' cache for user in users: cache.delete(user.cache_key + 'owned_groups') # Make sure group owners can direct message all other users if kwargs['action'] == 'post_add': direct_message_permission = Permission.objects.get( codename='can_initiate_direct_messages', content_type__app_label='accounts') for user in users: user.user_permissions.add(direct_message_permission) m2m_changed.connect(group_owners_changed, Group.owners.through)<|fim▁hole|> class GroupRequestManager(models.Manager): """Manager for GroupRequest.""" def unapproved(self): """Get unapproved requests.""" return super( GroupRequestManager, self ).get_queryset().filter(moderated_by__isnull=True) class GroupRequest(TimestampModel): """GroupRequest model.""" user = models.ForeignKey(settings.AUTH_USER_MODEL) group = models.ForeignKey(Group) moderated_by = models.ForeignKey( settings.AUTH_USER_MODEL, blank=True, null=True, related_name='approved_by' ) moderated_at = models.DateTimeField(blank=True, null=True) approved = models.NullBooleanField(blank=True) objects = GroupRequestManager() def __unicode__(self): """Convert GroupRequest to a unicode string.""" return mark_safe( u'<a href="{url}">{name} ({email} / {state}, {zip_code})' u' requested to join {group}.</a>'.format( url=self.user.get_absolute_url(), email=self.user.email, state=self.user.state, zip_code=self.user.zip_code, name=self.user.get_real_name(), group=self.group ) )<|fim▁end|>
<|file_name|>SpectrumPalette.story.tsx<|end_file_name|><|fim▁begin|>import React from 'react'; import SpectrumPalette from './SpectrumPalette'; import { withCenteredStory } from '../../util/storybook/withCenteredStory'; import { UseState } from '../../util/storybook/UseState'; import { renderComponentWithTheme } from '../../util/storybook/withTheme'; import mdx from './ColorPicker.mdx'; export default {<|fim▁hole|> docs: { page: mdx, }, }, }; export const simple = () => { return ( <UseState initialState="red"> {(selectedColor, updateSelectedColor) => { return renderComponentWithTheme(SpectrumPalette, { color: selectedColor, onChange: updateSelectedColor }); }} </UseState> ); };<|fim▁end|>
title: 'Pickers and Editors/ColorPicker/Palettes/SpectrumPalette', component: SpectrumPalette, decorators: [withCenteredStory], parameters: {
<|file_name|>JsonArray.java<|end_file_name|><|fim▁begin|>/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.util.json; import java.io.IOException; import java.io.StringWriter; import java.io.Writer; import java.math.BigDecimal; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.Map; /** * JsonArray is a common non-thread safe data format for a collection of data. * The contents of a JsonArray are only validated as JSON values on * serialization. * * @see Jsoner * @since 2.0.0 */ public class JsonArray extends ArrayList<Object> implements Jsonable { /** * The serialization version this class is compatible with. This value * doesn't need to be incremented if and only if the only changes to occur * were updating comments, updating javadocs, adding new fields to the * class, changing the fields from static to non-static, or changing the * fields from transient to non transient. All other changes require this * number be incremented. */ private static final long serialVersionUID = 1L; /** Instantiates an empty JsonArray. */ public JsonArray() { } /** * Instantiate a new JsonArray using ArrayList's constructor of the same * type. * * @param collection represents the elements to produce the JsonArray with. */ public JsonArray(final Collection<?> collection) { super(collection); } /** * A convenience method that assumes every element of the JsonArray is * castable to T before adding it to a collection of Ts. * * @param <T> represents the type that all of the elements of the JsonArray * should be cast to and the type the collection will contain. * @param destination represents where all of the elements of the JsonArray * are added to after being cast to the generic type provided. * @throws ClassCastException if the unchecked cast of an element to T * fails. */ @SuppressWarnings("unchecked") public <T> void asCollection(final Collection<T> destination) { for (final Object o : this) { destination.add((T)o); } } /** * A convenience method that assumes there is a BigDecimal, Number, or * String at the given index. If a Number or String is there it is used to * construct a new BigDecimal. * * @param index representing where the value is expected to be at. * @return the value stored at the key or the default provided if the key * doesn't exist. * @throws ClassCastException if there was a value but didn't match the * assumed return types. * @throws IndexOutOfBoundsException if the index is outside of the range of * element indexes in the JsonArray. * @throws NumberFormatException if a String isn't a valid representation of * a BigDecimal. * @see BigDecimal * @see Number#doubleValue() */ public BigDecimal getBigDecimal(final int index) { Object returnable = this.get(index); if (returnable instanceof BigDecimal) { /* Success there was a BigDecimal. */ } else if (returnable instanceof Number) { /* A number can be used to construct a BigDecimal. */ returnable = new BigDecimal(returnable.toString()); } else if (returnable instanceof String) { /* A number can be used to construct a BigDecimal. */ returnable = new BigDecimal((String)returnable); } return (BigDecimal)returnable; } /** * A convenience method that assumes there is a Boolean or String value at * the given index. * * @param index represents where the value is expected to be at. * @return the value at the index provided cast to a boolean. * @throws ClassCastException if there was a value but didn't match the * assumed return type. * @throws IndexOutOfBoundsException if the index is outside of the range of * element indexes in the JsonArray. */ public Boolean getBoolean(final int index) { Object returnable = this.get(index); if (returnable instanceof String) { returnable = Boolean.valueOf((String)returnable); } return (Boolean)returnable; } /** * A convenience method that assumes there is a Number or String value at * the given index. * * @param index represents where the value is expected to be at. * @return the value at the index provided cast to a byte. * @throws ClassCastException if there was a value but didn't match the * assumed return type. * @throws NumberFormatException if a String isn't a valid representation of * a BigDecimal or if the Number represents the double or float * Infinity or NaN. * @throws IndexOutOfBoundsException if the index is outside of the range of * element indexes in the JsonArray. * @see Number */ public Byte getByte(final int index) { Object returnable = this.get(index); if (returnable == null) { return null; } if (returnable instanceof String) { /* A String can be used to construct a BigDecimal. */ returnable = new BigDecimal((String)returnable); } return ((Number)returnable).byteValue(); } /** * A convenience method that assumes there is a Collection value at the * given index. * * @param <T> the kind of collection to expect at the index. Note unless * manually added, collection values will be a JsonArray. * @param index represents where the value is expected to be at. * @return the value at the index provided cast to a Collection. * @throws ClassCastException if there was a value but didn't match the * assumed return type. * @throws IndexOutOfBoundsException if the index is outside of the range of * element indexes in the JsonArray. * @see Collection */ @SuppressWarnings("unchecked") public <T extends Collection<?>> T getCollection(final int index) { /* * The unchecked warning is suppressed because there is no way of * guaranteeing at compile time the cast will work. */ return (T)this.get(index); } /** * A convenience method that assumes there is a Number or String value at * the given index. * * @param index represents where the value is expected to be at. * @return the value at the index provided cast to a double. * @throws ClassCastException if there was a value but didn't match the * assumed return type. * @throws NumberFormatException if a String isn't a valid representation of * a BigDecimal or if the Number represents the double or float * Infinity or NaN. * @throws IndexOutOfBoundsException if the index is outside of the range of * element indexes in the JsonArray. * @see Number */ public Double getDouble(final int index) { Object returnable = this.get(index); if (returnable == null) { return null; } if (returnable instanceof String) { /* A String can be used to construct a BigDecimal. */ returnable = new BigDecimal((String)returnable); } return ((Number)returnable).doubleValue(); } /** * A convenience method that assumes there is a String value at the given * index representing a fully qualified name in dot notation of an enum. * * @param index representing where the value is expected to be at. * @param <T> the Enum type the value at the index is expected to belong to. * @return the enum based on the string found at the index, or null if the * value at the index was null. * @throws ClassNotFoundException if the element was a String but the * declaring enum type couldn't be determined with it. * @throws ClassCastException if the element at the index was not a String * or if the fully qualified enum name is of the wrong type. * @throws IllegalArgumentException if an enum type was dynamically * determined but it doesn't define an enum with the dynamically * determined name. * @throws IndexOutOfBoundsException if the index is outside of the range of * element indexes in the JsonArray. * @see Enum#valueOf(Class, String) */ @SuppressWarnings("unchecked") public <T extends Enum<T>> T getEnum(final int index) throws ClassNotFoundException { /* * Supressing the unchecked warning because the returnType is * dynamically identified and could lead to a ClassCastException when * returnType is cast to Class<T>, which is expected by the method's * contract. */ T returnable; final String element; final String[] splitValues; final int numberOfValues; final StringBuilder returnTypeName; final StringBuilder enumName; final Class<T> returnType; /* Make sure the element at the index is a String. */ element = this.getString(index); if (element == null) { return null; } /* Get the package, class, and enum names. */ splitValues = element.split("\\."); numberOfValues = splitValues.length; returnTypeName = new StringBuilder(); enumName = new StringBuilder(); for (int i = 0; i < numberOfValues; i++) { if (i == (numberOfValues - 1)) { /* * If it is the last split value then it should be the name of * the Enum since dots are not allowed in enum names. */ enumName.append(splitValues[i]); } else if (i == (numberOfValues - 2)) { /* * If it is the penultimate split value then it should be the * end of the package/enum type and not need a dot appended to * it. */ returnTypeName.append(splitValues[i]); } else { /* * Must be part of the package/enum type and will need a dot * appended to it since they got removed in the split. */ returnTypeName.append(splitValues[i]); returnTypeName.append("."); } } /* Use the package/class and enum names to get the Enum<T>. */ returnType = (Class<T>)Class.forName(returnTypeName.toString()); returnable = Enum.valueOf(returnType, enumName.toString()); return returnable; } /** * A convenience method that assumes there is a Number or String value at * the given index. * * @param index represents where the value is expected to be at. * @return the value at the index provided cast to a float. * @throws ClassCastException if there was a value but didn't match the * assumed return type. * @throws NumberFormatException if a String isn't a valid representation of * a BigDecimal or if the Number represents the double or float * Infinity or NaN. * @throws IndexOutOfBoundsException if the index is outside of the range of * element indexes in the JsonArray. * @see Number */ public Float getFloat(final int index) { Object returnable = this.get(index); if (returnable == null) { return null; } if (returnable instanceof String) { /* A String can be used to construct a BigDecimal. */ returnable = new BigDecimal((String)returnable); } return ((Number)returnable).floatValue(); } /** * A convenience method that assumes there is a Number or String value at * the given index. * * @param index represents where the value is expected to be at. * @return the value at the index provided cast to a int. * @throws ClassCastException if there was a value but didn't match the * assumed return type. * @throws NumberFormatException if a String isn't a valid representation of * a BigDecimal or if the Number represents the double or float * Infinity or NaN. * @throws IndexOutOfBoundsException if the index is outside of the range of * element indexes in the JsonArray. * @see Number */ public Integer getInteger(final int index) { Object returnable = this.get(index); if (returnable == null) { return null; } if (returnable instanceof String) { /* A String can be used to construct a BigDecimal. */ returnable = new BigDecimal((String)returnable); } return ((Number)returnable).intValue(); } /** * A convenience method that assumes there is a Number or String value at * the given index. * * @param index represents where the value is expected to be at. * @return the value at the index provided cast to a long.<|fim▁hole|> * a BigDecimal or if the Number represents the double or float * Infinity or NaN. * @throws IndexOutOfBoundsException if the index is outside of the range of * element indexes in the JsonArray. * @see Number */ public Long getLong(final int index) { Object returnable = this.get(index); if (returnable == null) { return null; } if (returnable instanceof String) { /* A String can be used to construct a BigDecimal. */ returnable = new BigDecimal((String)returnable); } return ((Number)returnable).longValue(); } /** * A convenience method that assumes there is a Map value at the given * index. * * @param <T> the kind of map to expect at the index. Note unless manually * added, Map values will be a JsonObject. * @param index represents where the value is expected to be at. * @return the value at the index provided cast to a Map. * @throws ClassCastException if there was a value but didn't match the * assumed return type. * @throws IndexOutOfBoundsException if the index is outside of the range of * element indexes in the JsonArray. * @see Map */ @SuppressWarnings("unchecked") public <T extends Map<?, ?>> T getMap(final int index) { /* * The unchecked warning is suppressed because there is no way of * guaranteeing at compile time the cast will work. */ return (T)this.get(index); } /** * A convenience method that assumes there is a Number or String value at * the given index. * * @param index represents where the value is expected to be at. * @return the value at the index provided cast to a short. * @throws ClassCastException if there was a value but didn't match the * assumed return type. * @throws NumberFormatException if a String isn't a valid representation of * a BigDecimal or if the Number represents the double or float * Infinity or NaN. * @throws IndexOutOfBoundsException if the index is outside of the range of * element indexes in the JsonArray. * @see Number */ public Short getShort(final int index) { Object returnable = this.get(index); if (returnable == null) { return null; } if (returnable instanceof String) { /* A String can be used to construct a BigDecimal. */ returnable = new BigDecimal((String)returnable); } return ((Number)returnable).shortValue(); } /** * A convenience method that assumes there is a Boolean, Number, or String * value at the given index. * * @param index represents where the value is expected to be at. * @return the value at the index provided cast to a String. * @throws ClassCastException if there was a value but didn't match the * assumed return type. * @throws IndexOutOfBoundsException if the index is outside of the range of * element indexes in the JsonArray. */ public String getString(final int index) { Object returnable = this.get(index); if (returnable instanceof Boolean) { returnable = returnable.toString(); } else if (returnable instanceof Number) { returnable = returnable.toString(); } return (String)returnable; } /* * (non-Javadoc) * @see org.apache.camel.util.json.Jsonable#asJsonString() */ @Override public String toJson() { final StringWriter writable = new StringWriter(); try { this.toJson(writable); } catch (final IOException caught) { /* See java.io.StringWriter. */ } return writable.toString(); } /* * (non-Javadoc) * @see org.apache.camel.util.json.Jsonable#toJsonString(java.io.Writer) */ @Override public void toJson(final Writer writable) throws IOException { boolean isFirstElement = true; final Iterator<Object> elements = this.iterator(); writable.write('['); while (elements.hasNext()) { if (isFirstElement) { isFirstElement = false; } else { writable.write(','); } writable.write(Jsoner.serialize(elements.next())); } writable.write(']'); } }<|fim▁end|>
* @throws ClassCastException if there was a value but didn't match the * assumed return type. * @throws NumberFormatException if a String isn't a valid representation of
<|file_name|>datedetectortestcase.py<|end_file_name|><|fim▁begin|># This file is part of Fail2Ban. # # Fail2Ban is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Fail2Ban is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Fail2Ban; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # Author: Cyril Jaquier # # $Revision$ __author__ = "Cyril Jaquier" __version__ = "$Revision$" __date__ = "$Date$" __copyright__ = "Copyright (c) 2004 Cyril Jaquier" __license__ = "GPL" import unittest from server.datedetector import DateDetector from server.datetemplate import DateTemplate class DateDetectorTest(unittest.TestCase): def setUp(self): """Call before every test case.""" self.__datedetector = DateDetector() self.__datedetector.addDefaultTemplate() def tearDown(self): """Call after every test case.""" def testGetEpochTime(self): log = "1138049999 [sshd] error: PAM: Authentication failure" date = [2006, 1, 23, 21, 59, 59, 0, 23, 0] dateUnix = 1138049999.0 self.assertEqual(self.__datedetector.getTime(log), date) self.assertEqual(self.__datedetector.getUnixTime(log), dateUnix)<|fim▁hole|> def testGetTime(self): log = "Jan 23 21:59:59 [sshd] error: PAM: Authentication failure" date = [2005, 1, 23, 21, 59, 59, 1, 23, -1] dateUnix = 1106513999.0 self.assertEqual(self.__datedetector.getTime(log), date) self.assertEqual(self.__datedetector.getUnixTime(log), dateUnix) # def testDefaultTempate(self): # self.__datedetector.setDefaultRegex("^\S{3}\s{1,2}\d{1,2} \d{2}:\d{2}:\d{2}") # self.__datedetector.setDefaultPattern("%b %d %H:%M:%S") # # log = "Jan 23 21:59:59 [sshd] error: PAM: Authentication failure" # date = [2005, 1, 23, 21, 59, 59, 1, 23, -1] # dateUnix = 1106513999.0 # # self.assertEqual(self.__datedetector.getTime(log), date) # self.assertEqual(self.__datedetector.getUnixTime(log), dateUnix)<|fim▁end|>
<|file_name|>convert.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. '''Extract the net parameters from the pytorch file and store them as python dict using cPickle. Must install pytorch. ''' import torch.utils.model_zoo as model_zoo import numpy as np from argparse import ArgumentParser import model try: import cPickle as pickle except ModuleNotFoundError: import pickle URL_PREFIX = 'https://download.pytorch.org/models/' model_urls = { 'densenet121': URL_PREFIX + 'densenet121-a639ec97.pth', 'densenet169': URL_PREFIX + 'densenet169-b2777c0a.pth', 'densenet201': URL_PREFIX + 'densenet201-c1103571.pth', 'densenet161': URL_PREFIX + 'densenet161-8d451a50.pth', } def rename(pname): p1 = pname.find('/') p2 = pname.rfind('/') assert p1 != -1 and p2 != -1, 'param name = %s is not correct' % pname if 'gamma' in pname: suffix = 'weight' elif 'beta' in pname: suffix = 'bias' elif 'mean' in pname: suffix = 'running_mean' elif 'var' in pname: suffix = 'running_var' else: suffix = pname[p2 + 1:] return pname[p1+1:p2] + '.' + suffix if __name__ == '__main__':<|fim▁hole|> parser.add_argument('nb_classes', default=1000, type=int) args = parser.parse_args() net = model.create_net(args.depth, args.nb_classes) url = 'densenet%d' % args.depth torch_dict = model_zoo.load_url(model_urls[url]) params = {'SINGA_VERSION': 1101} # resolve dict keys name mismatch problem print(len(net.param_names()), len(torch_dict.keys())) for pname, pval, torch_name in\ zip(net.param_names(), net.param_values(), torch_dict.keys()): #torch_name = rename(pname) ary = torch_dict[torch_name].numpy() ary = np.array(ary, dtype=np.float32) if len(ary.shape) == 4: params[pname] = np.reshape(ary, (ary.shape[0], -1)) else: params[pname] = np.transpose(ary) #pdb.set_trace() assert pval.shape == params[pname].shape, 'shape mismatch for {0}, \ expected {1} in torch model, got {2} in singa model'.\ format(pname, params[pname].shape, pval.shape) with open(args.outfile, 'wb') as fd: pickle.dump(params, fd)<|fim▁end|>
parser = ArgumentParser(description='Convert params from torch to python' 'dict. ') parser.add_argument("depth", type=int, choices=[121, 169, 201, 161]) parser.add_argument("outfile")
<|file_name|>uproxy_core.ts<|end_file_name|><|fim▁begin|>/// <reference path='../../../third_party/freedom-typings/port-control.d.ts' /> import diagnose_nat = require('./diagnose-nat'); import globals = require('./globals'); import logging = require('../../../third_party/uproxy-lib/logging/logging'); import loggingTypes = require('../../../third_party/uproxy-lib/loggingprovider/loggingprovider.types'); import net = require('../../../third_party/uproxy-lib/net/net.types'); import remote_connection = require('./remote-connection'); import remote_instance = require('./remote-instance'); import social = require('../interfaces/social'); import social_network = require('./social'); import storage = globals.storage; import ui_connector = require('./ui_connector'); import uproxy_core_api = require('../interfaces/uproxy_core_api'); import user = require('./remote-user'); import version = require('../version/version'); import _ = require('lodash'); import ui = ui_connector.connector; // This is a global instance of RemoteConnection that is currently used for // either sharing or using a proxy through the copy+paste interface (i.e. // without an instance) export var copyPasteConnection :remote_connection.RemoteConnection = null; var log :logging.Log = new logging.Log('core'); log.info('Loading core', version.UPROXY_VERSION); // Note that the proxy runs extremely slowly in debug ('*:D') mode. export var loggingController = freedom['loggingcontroller'](); loggingController.setDefaultFilter( loggingTypes.Destination.console, loggingTypes.Level.warn); loggingController.setDefaultFilter( loggingTypes.Destination.buffered, loggingTypes.Level.debug); var portControl = freedom['portControl'](); /** * Primary uProxy backend. Handles which social networks one is connected to, * sends updates to the UI, and handles commands from the UI. */ export class uProxyCore implements uproxy_core_api.CoreApi { private copyPasteSharingMessages_ :social.PeerMessage[] = []; private copyPasteGettingMessages_ :social.PeerMessage[] = []; // this should be set iff an update to the core is available private availableVersion_ :string = null; constructor() { log.debug('Preparing uProxy Core'); copyPasteConnection = new remote_connection.RemoteConnection((update :uproxy_core_api.Update, message?:any) => { // TODO send this update only when // update !== uproxy_core_api.Update.SIGNALLING_MESSAGE // (after v0.8.13) ui.update(update, message); if (update !== uproxy_core_api.Update.SIGNALLING_MESSAGE) { return; } var data :social.PeerMessage[]; switch (message.type) { case social.PeerMessageType.SIGNAL_FROM_CLIENT_PEER: data = this.copyPasteGettingMessages_; break; case social.PeerMessageType.SIGNAL_FROM_SERVER_PEER: data = this.copyPasteSharingMessages_; break; } data.push(message); ui.update(uproxy_core_api.Update.COPYPASTE_MESSAGE, { type: message.type, data: data }); }); } // sendInstanceHandshakeMessage = (clientId :string) => { // // TODO: Possibly implement this, or get rid of the possibility for // // UI-initiated instance handshakes. // } changeOption = (option :string) => { // TODO: implement options. } dismissNotification = (instancePath :social.InstancePath) => { // TODO: implement options. } private pendingNetworks_ :{[name :string] :social.Network} = {}; private portControlSupport_ = uproxy_core_api.PortControlSupport.PENDING; /** * Access various social networks using the Social API. */ public login = (loginArgs :uproxy_core_api.LoginArgs) :Promise<void> => { var networkName = loginArgs.network; if (!(networkName in social_network.networks)) { log.warn('Network does not exist', networkName); return Promise.reject(new Error('Network does not exist (' + networkName + ')')); } var network = this.pendingNetworks_[networkName]; if (typeof network === 'undefined') { network = new social_network.FreedomNetwork(networkName); this.pendingNetworks_[networkName] = network; } // TODO: save the auto-login default return network.login(loginArgs.reconnect).then(() => { delete this.pendingNetworks_[networkName]; log.info('Successfully logged in to network', { network: networkName, userId: network.myInstance.userId }); }).catch((e) => { delete this.pendingNetworks_[networkName]; throw e; }); } /** * Log-out of |networkName|. * TODO: write a test for this. */ public logout = (networkInfo :social.SocialNetworkInfo) : Promise<void> => { var networkName = networkInfo.name; var userId = networkInfo.userId; var network = social_network.getNetwork(networkName, userId); if (null === network) { log.warn('Could not logout of network', networkName); return; } return network.logout().then(() => { log.info('Successfully logged out of network', networkName); }); // TODO: disable auto-login // store.saveMeToStorage(); } // onUpdate not needed in the real core. onUpdate = (update:uproxy_core_api.Update, handler:Function) => { throw "uproxy_core onUpdate not implemented."; } /** * Updates user's description of their current device. This applies to all * local instances for every network the user is currently logged onto. Those * local instances will then propogate their description update to all * instances. */ public updateGlobalSettings = (newSettings :uproxy_core_api.GlobalSettings) => { newSettings.version = globals.STORAGE_VERSION; if (newSettings.stunServers.length === 0) { newSettings.stunServers = globals.DEFAULT_STUN_SERVERS; } globals.storage.save('globalSettings', newSettings) .catch((e) => { log.error('Could not save globalSettings to storage', e.stack); }); // Clear the existing servers and add in each new server. // Trying globalSettings = newSettings does not correctly update // pre-existing references to stunServers (e.g. from RemoteInstances). globals.settings.stunServers .splice(0, globals.settings.stunServers.length); for (var i = 0; i < newSettings.stunServers.length; ++i) { globals.settings.stunServers.push(newSettings.stunServers[i]); } if (newSettings.description != globals.settings.description) { globals.settings.description = newSettings.description; // Resend instance info to update description for logged in networks. for (var networkName in social_network.networks) { for (var userId in social_network.networks[networkName]) { social_network.networks[networkName][userId].resendInstanceHandshakes(); } } } globals.settings.hasSeenSharingEnabledScreen = newSettings.hasSeenSharingEnabledScreen; globals.settings.hasSeenWelcome = newSettings.hasSeenWelcome; globals.settings.allowNonUnicast = newSettings.allowNonUnicast; globals.settings.mode = newSettings.mode; globals.settings.statsReportingEnabled = newSettings.statsReportingEnabled; globals.settings.splashState = newSettings.splashState; globals.settings.consoleFilter = newSettings.consoleFilter; loggingController.setDefaultFilter( loggingTypes.Destination.console, globals.settings.consoleFilter); globals.settings.language = newSettings.language; globals.settings.force_message_version = newSettings.force_message_version; } public getFullState = () :Promise<uproxy_core_api.InitialState> => { return globals.loadSettings.then(() => { return { networkNames: Object.keys(social_network.networks), globalSettings: globals.settings, onlineNetworks: social_network.getOnlineNetworks(), availableVersion: this.availableVersion_, copyPasteState: { connectionState: copyPasteConnection.getCurrentState(), endpoint: copyPasteConnection.activeEndpoint, gettingMessages: this.copyPasteGettingMessages_, sharingMessages: this.copyPasteSharingMessages_ }, portControlSupport: this.portControlSupport_, }; }); } /** * Modifies the local consent value as the result of a local user action. * This is a distinct pathway from receiving consent bits over the wire, which * is handled directly inside the relevant social.Network. */ public modifyConsent = (command:uproxy_core_api.ConsentCommand) => { // Determine which Network, User, and Instance... var user = this.getUser(command.path); if (!user) { // Error msg emitted above. log.error('Cannot modify consent for non-existing user', command.path); return; } // Set the instance's new consent levels. It will take care of sending new // consent bits over the wire and re-syncing with the UI. user.modifyConsent(command.action); } public startCopyPasteGet = () : Promise<net.Endpoint> => { this.copyPasteGettingMessages_ = []; return copyPasteConnection.startGet(globals.effectiveMessageVersion()); } public stopCopyPasteGet = () :Promise<void> => { return copyPasteConnection.stopGet(); } public startCopyPasteShare = () => { this.copyPasteSharingMessages_ = []; copyPasteConnection.startShare(globals.effectiveMessageVersion()); } public stopCopyPasteShare = () :Promise<void> => { return copyPasteConnection.stopShare(); } public sendCopyPasteSignal = (signal :social.PeerMessage) => { copyPasteConnection.handleSignal(signal); } /** * Begin using a peer as a proxy server. * Starts SDP negotiations with a remote peer. Assumes |path| to the * RemoteInstance exists. */ public start = (path :social.InstancePath) : Promise<net.Endpoint> => { var remote = this.getInstance(path); if (!remote) { log.error('Instance does not exist for proxying', path.instanceId); return Promise.reject(new Error('Instance does not exist for proxying (' + path.instanceId + ')')); } // Remember this instance as our proxy. Set this before start fulfills // in case the user decides to cancel the proxy before it begins. return remote.start(); } /** * Stop proxying with the current instance, if it exists. */ public stop = (path :social.InstancePath) => { var remote = this.getInstance(path); if (!remote) { log.error('Instance does not exist for proxying', path.instanceId); return Promise.reject(new Error('Instance does not exist for proxying (' + path.instanceId + ')')); } remote.stop(); // TODO: Handle revoked permissions notifications. } public handleManualNetworkInboundMessage = (command :social.HandleManualNetworkInboundMessageCommand) => { var manualNetwork :social_network.ManualNetwork = <social_network.ManualNetwork> social_network.getNetwork( social_network.MANUAL_NETWORK_ID, ''); if (!manualNetwork) { log.error('Manual network does not exist, discarding inbound message', command); return; } manualNetwork.receive(command.senderClientId, command.message); } /** * Obtain the RemoteInstance corresponding to an instance path. */ public getInstance = (path :social.InstancePath) :social.RemoteUserInstance => { var user = this.getUser(path); if (!user) { log.error('No user', path.userId); return; } return user.getInstance(path.instanceId); } public getUser = (path :social.UserPath) :social.RemoteUser => { var network = social_network.getNetwork(path.network.name, path.network.userId); if (!network) { log.error('No network', path.network.name); return; } return network.getUser(path.userId); } // If the user requests the NAT type while another NAT request is pending, // the then() block of doNatProvoking ends up being called twice. // We keep track of the timeout that resets the NAT type to make sure // there is at most one timeout at a time. private natResetTimeout_ :number; public getNatType = () :Promise<string> => { if (globals.natType === '') { // Function that returns a promise which fulfills // in a given time. var countdown = (time:number) : Promise<void> => { return new Promise<void>((F, R) => { setTimeout(F, time); }); } // Return the first Promise that fulfills in the 'race' // between a countdown and NAT provoking. // i.e., if NAT provoking takes longer than 30s, the countdown // will return first, and a time out message is returned. return Promise.race( [ countdown(30000).then(() => { return 'NAT classification timed out.'; }), diagnose_nat.doNatProvoking().then((natType:string) => { globals.natType = natType; // Store NAT type for five minutes. This way, if the user previews // their logs, and then submits them shortly after, we do not need // to determine the NAT type once for the preview, and once for // submission to our backend. // If we expect users to check NAT type frequently (e.g. if they // switch between networks while troubleshooting), then we might want // to remove caching. clearTimeout(this.natResetTimeout_); this.natResetTimeout_ = setTimeout(() => {globals.natType = '';}, 300000); return globals.natType; }) ]); } else { return Promise.resolve(globals.natType); } } // Probe for NAT-PMP, PCP, and UPnP support // Sets this.portControlSupport_ and sends update message to UI public refreshPortControlSupport = () :Promise<void> => { this.portControlSupport_ = uproxy_core_api.PortControlSupport.PENDING; ui.update(uproxy_core_api.Update.PORT_CONTROL_STATUS, uproxy_core_api.PortControlSupport.PENDING); return portControl.probeProtocolSupport().then( (probe:freedom_PortControl.ProtocolSupport) => { this.portControlSupport_ = (probe.natPmp || probe.pcp || probe.upnp) ? uproxy_core_api.PortControlSupport.TRUE : uproxy_core_api.PortControlSupport.FALSE; ui.update(uproxy_core_api.Update.PORT_CONTROL_STATUS, this.portControlSupport_); }); } // Probe the NAT type and support for port control protocols // Returns an object with the NAT configuration as keys public getNetworkInfoObj = () :Promise<uproxy_core_api.NetworkInfo> => { var natInfo :uproxy_core_api.NetworkInfo = { natType: undefined, pmpSupport: undefined, pcpSupport: undefined, upnpSupport: undefined }; return this.getNatType().then((natType:string) => { natInfo.natType = natType; return portControl.probeProtocolSupport().then( (probe:freedom_PortControl.ProtocolSupport) => { natInfo.pmpSupport = probe.natPmp; natInfo.pcpSupport = probe.pcp; natInfo.upnpSupport = probe.upnp; return natInfo; }).catch((err:Error) => { // Should only catch the error when getInternalIp() times out natInfo.errorMsg = 'Could not probe for port control protocols: ' + err.message; return natInfo; }); }); } // Returns a string of the NAT type and support for port control protocols public getNetworkInfo = () :Promise<string> => { return this.getNetworkInfoObj().then((natInfo:uproxy_core_api.NetworkInfo) => { var natInfoStr = 'NAT Type: ' + natInfo.natType + '\n'; if (natInfo.errorMsg) { natInfoStr += natInfo.errorMsg + '\n'; } else { natInfoStr += 'NAT-PMP: ' + (natInfo.pmpSupport ? 'Supported' : 'Not supported') + '\n'; natInfoStr += 'PCP: ' + (natInfo.pcpSupport ? 'Supported' : 'Not supported') + '\n'; natInfoStr += 'UPnP IGD: ' + (natInfo.upnpSupport ? 'Supported' : 'Not supported') + '\n'; } return natInfoStr; }); } public getLogs = () :Promise<string> => { return loggingController.getLogs().then((rawLogs:string[]) => { var formattedLogsWithVersionInfo = 'Version: ' + JSON.stringify(version.UPROXY_VERSION) + '\n\n'; formattedLogsWithVersionInfo += this.formatLogs_(rawLogs); return formattedLogsWithVersionInfo; }); } public getLogsAndNetworkInfo = () :Promise<string> => { return Promise.all([this.getNetworkInfo(), this.getLogs()]) .then((natAndLogs) => { // natAndLogs is an array of returned values corresponding to the // array of Promises in Promise.all. return natAndLogs[0] + '\n' + natAndLogs[1]; }); } private formatLogs_ = (logs :string[]) :string => { // Searches through text for all JSON fields of the specified key, then // replaces the values with the prefix + a counter. // e.g. // jsonFieldReplace( // '{"name":"Alice"}...{\\"name\\":\\"Bob\\"}...Alice...Bob...',<|fim▁hole|> // '{"name":"NAME_1"}...{\\"name\\":\\"NAME_2\\"}...NAME_1...NAME_2...' var jsonFieldReplace = (text :string, key :string, prefix :string) : string => { // Allow for escaped JSON to be matched, e.g. {\"name\":\"Bob\"} var re = new RegExp('\\\\*"' + key + '\\\\*":\\\\*"([^"]+)"', 'g'); var matches :string[]; var uniqueValueSet :{[value :string] :Boolean} = {}; while (matches = re.exec(text)) { matches[1].replace(/\\+$/, ''); // Removing trailing \ uniqueValueSet[matches[1]] = true; // Add userId, name, etc to set. } var index = 1; for (var value in uniqueValueSet) { // Replace all occurances of value in text. var escapedRegex = new RegExp( // Escape all special regex characters, from // http://stackoverflow.com/questions/3446170/ value.replace(/[\-\[\]\/\{\}\(\)\*\+\?\.\\\^\$\|]/g, "\\$&"), 'g'); text = text.replace(escapedRegex, prefix + index); ++index; } return text; } var text = logs.join('\n'); text = jsonFieldReplace(text, 'name', 'NAME_'); text = jsonFieldReplace(text, 'userId', 'USER_ID_'); text = jsonFieldReplace(text, 'imageData', 'IMAGE_DATA_'); text = jsonFieldReplace(text, 'url', 'URL_'); // Replace any emails that may have been missed when replacing userIds. // Email regex taken from regular-expressions.info text = text.replace(/\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}\b/ig, 'EMAIL_ADDRESS'); return text; } public pingUntilOnline = (pingUrl :string) : Promise<void> => { var ping = () : Promise<void> => { return new Promise<void>(function(fulfill, reject) { var xhr = new XMLHttpRequest(); xhr.open('GET', pingUrl); xhr.onload = function() { fulfill(); }; xhr.onerror = function(e) { reject(new Error('Ping failed')); }; xhr.send(); }); } return new Promise<void>((fulfill, reject) => { var checkIfOnline = () => { ping().then(() => { clearInterval(intervalId); fulfill(); }).catch((e) => { // Ping failed (may be because the internet is disconnected), // we will try again on the next interval. }); }; var intervalId = setInterval(checkIfOnline, 5000); checkIfOnline(); }); } public getVersion = () :Promise<{ version :string }> => { return Promise.resolve(version.UPROXY_VERSION); } public handleUpdate = (details :{version :string}) => { this.availableVersion_ = details.version; ui.update(uproxy_core_api.Update.CORE_UPDATE_AVAILABLE, details); } } // class uProxyCore<|fim▁end|>
// 'name', 'NAME_'); // will return:
<|file_name|>files_service.py<|end_file_name|><|fim▁begin|>import datetime import os import shutil import time from files_by_date.utils.logging_wrapper import get_logger, log_message from files_by_date.validators.argument_validator import ArgumentValidator logger = get_logger(name='files_service') class FilesService: def __init__(self): raise NotImplementedError @classmethod def gather_files(cls, parent_directory, files): for dir_name, subdir_list, file_list in os.walk(parent_directory): if file_list: files.extend( ['{dir_name}{os_sep}{file_name}'.format(dir_name=dir_name, os_sep=os.sep, file_name=file) for file in file_list]) # [f'{dir_name}{os.sep}{file}' for file in file_list] # 3.6 for subdir in subdir_list: files = cls.gather_files(subdir, files) return files @classmethod def group_files_by_modified_date(cls, files): grouped_files = {} for file in files: directory_tag = cls._get_directory_tag_for_file(file) file_group = grouped_files.get(directory_tag, list()) file_group.append(file) grouped_files[directory_tag] = file_group return grouped_files @classmethod def copy_files(cls, file_groups, target_dir, force_overwrite): if not os.path.exists(target_dir): os.makedirs(target_dir) # TODO: not covered total_count = Count() for group in file_groups: group_count = Count() # group_dir = f'{target_dir}{os.sep}{group}' # 3.6 group_dir = '{target_dir}{os_sep}{group}'.format(target_dir=target_dir, os_sep=os.sep, group=group) ArgumentValidator.validate_target_dir(group_dir) if not os.path.exists(group_dir): os.makedirs(group_dir) # log_message(f'Created directory: {group_dir}') # 3.6 log_message('Created directory: {group_dir}'.format(group_dir=group_dir)) # log_message(f'Copying {len(file_groups[group])} files to {group_dir}') # 3.6 log_message('Moving {group_size} files to {group_dir}'.format(group_size=len(file_groups[group]), group_dir=group_dir)) for file in file_groups[group]: # file_path = f'{group_dir}{os.sep}{os.path.basename(file)}' # 3.6 file_path = '{group_dir}{os_sep}{file_name}'.format(group_dir=group_dir, os_sep=os.sep, file_name=os.path.basename(file)) if force_overwrite and os.path.exists(file_path): os.remove(file_path) if not os.path.exists(file_path): shutil.copy2(file, group_dir) group_count.add_copied(count=1) else: group_count.add_skipped(count=1) # TODO: not covered total_count.add_files(count=len(file_groups[group])) total_count.add_copied(count=group_count.copied) total_count.add_skipped(count=group_count.skipped) # log_message(f'Copied {group_count.copied}, skipped {group_count.skipped}') # 3.6 log_message('Copied {local_copied_count}, skipped {local_skipped_count}'.format( local_copied_count=group_count.copied, local_skipped_count=group_count.skipped)) log_message( # f'Total files count {total_count.files}, total copied {total_count.copied}, total skipped {total_count.skipped}') # 3.6 'Total files count {total_files_count}, total copied {total_copied_count}, total skipped {total_skipped_count}'.format( total_files_count=total_count.files, total_copied_count=total_count.copied, total_skipped_count=total_count.skipped)) return total_count @staticmethod def _get_directory_tag_for_file(file): return datetime.datetime.strptime(time.ctime(os.path.getmtime(file)), "%a %b %d %H:%M:%S %Y").strftime('%Y%m') <|fim▁hole|> def __init__(self, *, files=0, copied=0, skipped=0): self.files = files self.copied = copied self.skipped = skipped def __str__(self): # return f'files={self.files}, copied={self.copied}, skipped={self.skipped}' # 3.6 return 'files={files}, copied={copied}, skipped={skipped}'.format(files=self.files, copied=self.copied, skipped=self.skipped) def add_files(self, *, count=1): self.files += count def add_copied(self, *, count=0): self.copied += count def add_skipped(self, *, count=0): self.skipped += count<|fim▁end|>
class Count:
<|file_name|>user.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- ''' gitolite-manager Author: Tim Henderson Contact: tim.tadh@gmail.com, tadh@case.edu Copyright: 2013 All Rights Reserved, see LICENSE ''' import urllib, re import cgi from logging import getLogger log = getLogger('gm:view:user') from pyramid.view import view_config from pyramid import httpexceptions as httpexc from pyramid.httpexceptions import HTTPFound, HTTPBadRequest from pyramid.response import Response from gitolite_manager import validate as v from gitolite_manager.models.session import Session from gitolite_manager.models.user import User from gitolite_manager.controllers import key_controller, repo_controller def get_user_name(user): email = user.email return email[:email.index('@')] def tvars(request, extras): session = request.environ['gm_session'] defaults = { 'SITENAME' : 'Key Czar', 'SITEURL' : request.application_url, 'request' : request, 'session' : session, 'get_user_name' : get_user_name, } defaults.update(extras) return defaults @view_config( route_name='user', request_method=['GET'], renderer='templates/user.html' ) def user(request): db = request.environ['db.session'] session = request.environ['gm_session'] if session.user is None: return HTTPFound(request.application_url) email = session.user.email user = email[:email.index('@')] return tvars(request, { 'TITLE' : user + ' user', 'user_name' : user, 'keys_url': request.route_url('user/keys'), 'add_key_url': request.route_url('user/addkey'), 'partners_url': request.route_url('user/partners'), }) @view_config( route_name='user/keys', request_method=['GET'], renderer='templates/keys.html' ) def keys(request): db = request.environ['db.session'] session = request.environ['gm_session'] if session.user is None: return HTTPFound(request.application_url) email = session.user.email user = email[:email.index('@')] return tvars(request, { 'TITLE' : 'keys for %s' % user, 'user_name' : user, }) @view_config( route_name='user/addkey', request_method=['GET'], renderer='templates/addkey.html' ) def addkey(request): db = request.environ['db.session'] session = request.environ['gm_session'] if session.user is None: return HTTPFound(request.application_url) email = session.user.email user = email[:email.index('@')] return tvars(request, { 'TITLE' : 'add key for %s' % user, 'user_name' : user, }) addkey_schema = { 'key': v.type_checker(cgi.FieldStorage), 'csrf': v.type_checker(basestring) } @view_config( route_name='user/addkey', request_method=['POST'], renderer='templates/addkey.html' ) def addkey_post(request): db = request.environ['db.session'] session = request.environ['gm_session'] if session.user is None: return HTTPFound(request.route_url("root")) email = session.user.email user = email[:email.index('@')] err, post = v.validate_dictionary(dict(request.POST), addkey_schema) if err: return tvars(request, { 'TITLE' : 'add key for %s' % user, 'user_name' : user, 'errors': err, }) elif not session.valid_csrf(post['csrf'], request.route_url('user/addkey')): return HTTPFound(request.route_url('root')) else: try: key = post['key'].file.read() key_controller.add_key(db, session.user, key) except Exception, e: log.exception(e) return tvars(request, { 'TITLE' : 'add key for %s' % user, 'user_name' : user, 'errors': [e], }) return HTTPFound(request.route_url('user/keys')) rmkey_schema = { 'keyid': v.type_checker(basestring) & v.format_checker(re.compile(r'^[0-9]+$')), } @view_config( route_name='user/rmkey', request_method=['GET'], renderer='templates/rmkey.html'<|fim▁hole|>def rmkey(request): db = request.environ['db.session'] session = request.environ['gm_session'] if session.user is None: return HTTPFound(request.application_url) email = session.user.email user = email[:email.index('@')] err, match = v.validate_dictionary(dict(request.matchdict), rmkey_schema) if err: return tvars(request, { 'TITLE' : 'remove key for %s' % user, 'user_name' : user, 'errors': err, }) try: key_controller.rm_key(db, session.user, int(match['keyid'])) except Exception, e: return tvars(request, { 'TITLE' : 'remove key for %s' % user, 'user_name' : user, 'errors': [str(e)], }) return HTTPFound(request.route_url('user/keys')) @view_config( route_name='user/partners', request_method=['GET'], renderer='templates/add-partner.html' ) def partners(request): db = request.environ['db.session'] session = request.environ['gm_session'] if session.user is None: return HTTPFound(request.application_url) email = session.user.email user = email[:email.index('@')] return tvars(request, { 'TITLE' : 'partners for %s' % user, 'user_name' : user, }) add_partner_schema = { 'csrf': v.type_checker(basestring), 'case_id': v.type_checker(basestring) & v.format_checker(re.compile(r'^[a-z]{3}[0-9]*$')), 'repo_name': v.type_checker(basestring) & v.format_checker(re.compile(r'^[a-zA-Z][a-zA-Z0-9_-]*$')), } @view_config( route_name='user/add-partner', request_method=['POST'], renderer='templates/add-partner.html' ) def add_partners(request): db = request.environ['db.session'] session = request.environ['gm_session'] if session.user is None: return HTTPFound(request.application_url) email = session.user.email user = email[:email.index('@')] err, post = v.validate_dictionary(dict(request.POST), add_partner_schema) if err: return tvars(request, { 'TITLE' : 'add partner for %s' % user, 'user_name' : user, 'errors': err, }) elif not session.valid_csrf(post['csrf'], request.route_url('user/add-partner')): return HTTPFound(request.route_url('root')) else: try: repo_controller.add_partner(db, session.user, post['case_id'], post['repo_name']) except Exception, e: return tvars(request, { 'TITLE' : 'add partner for %s' % user, 'user_name' : user, 'errors': [e], }) return HTTPFound(request.route_url('user/partners')) rm_partner_schema = { 'repo_id': v.type_checker(basestring) & v.format_checker(re.compile(r'^[0-9]+$')), } @view_config( route_name='user/rm-partner', request_method=['GET'], renderer='templates/rm-partner.html' ) def rm_partner(request): db = request.environ['db.session'] session = request.environ['gm_session'] if session.user is None: return HTTPFound(request.application_url) email = session.user.email user = email[:email.index('@')] err, match = v.validate_dictionary(dict(request.matchdict), rm_partner_schema) if err: return tvars(request, { 'TITLE' : 'remove partner for %s' % user, 'user_name' : user, 'errors': err, }) try: repo_controller.rm_partner(db, session.user, int(match['repo_id'])) except Exception, e: return tvars(request, { 'TITLE' : 'remove partner for %s' % user, 'user_name' : user, 'errors': [str(e)], }) return HTTPFound(request.route_url('user/partners'))<|fim▁end|>
)
<|file_name|>base.py<|end_file_name|><|fim▁begin|># -*- encoding: utf-8 -*- """ sleekxmpp.plugins.base ~~~~~~~~~~~~~~~~~~~~~~ This module provides XMPP functionality that is specific to client connections. Part of SleekXMPP: The Sleek XMPP Library :copyright: (c) 2012 Nathanael C. Fritz :license: MIT, see LICENSE for more details """ import sys import copy import logging import threading if sys.version_info >= (3, 0): unicode = str log = logging.getLogger(__name__) #: Associate short string names of plugins with implementations. The #: plugin names are based on the spec used by the plugin, such as #: `'xep_0030'` for a plugin that implements XEP-0030. PLUGIN_REGISTRY = {} #: In order to do cascading plugin disabling, reverse dependencies #: must be tracked. PLUGIN_DEPENDENTS = {} #: Only allow one thread to manipulate the plugin registry at a time. REGISTRY_LOCK = threading.RLock() class PluginNotFound(Exception): """Raised if an unknown plugin is accessed.""" def register_plugin(impl, name=None): """Add a new plugin implementation to the registry. :param class impl: The plugin class. The implementation class must provide a :attr:`~BasePlugin.name` value that will be used as a short name for enabling and disabling the plugin. The name should be based on the specification used by the plugin. For example, a plugin implementing XEP-0030 would be named `'xep_0030'`. """ if name is None: name = impl.name with REGISTRY_LOCK: PLUGIN_REGISTRY[name] = impl if name not in PLUGIN_DEPENDENTS: PLUGIN_DEPENDENTS[name] = set() for dep in impl.dependencies: if dep not in PLUGIN_DEPENDENTS: PLUGIN_DEPENDENTS[dep] = set() PLUGIN_DEPENDENTS[dep].add(name) def load_plugin(name, module=None): """Find and import a plugin module so that it can be registered. This function is called to import plugins that have selected for enabling, but no matching registered plugin has been found. :param str name: The name of the plugin. It is expected that plugins are in packages matching their name, even though the plugin class name does not have to match. :param str module: The name of the base module to search for the plugin. """ try: if not module: try: module = 'sleekxmpp.plugins.%s' % name __import__(module) mod = sys.modules[module] except ImportError: module = 'sleekxmpp.features.%s' % name __import__(module) mod = sys.modules[module] elif isinstance(module, (str, unicode)): __import__(module) mod = sys.modules[module] else: mod = module # Add older style plugins to the registry. if hasattr(mod, name): plugin = getattr(mod, name) if hasattr(plugin, 'xep') or hasattr(plugin, 'rfc'): plugin.name = name # Mark the plugin as an older style plugin so # we can work around dependency issues. plugin.old_style = True register_plugin(plugin, name) except ImportError: log.exception("Unable to load plugin: %s", name) class PluginManager(object): def __init__(self, xmpp, config=None): #: We will track all enabled plugins in a set so that we #: can enable plugins in batches and pull in dependencies #: without problems. self._enabled = set() #: Maintain references to active plugins. self._plugins = {} self._plugin_lock = threading.RLock() #: Globally set default plugin configuration. This will #: be used for plugins that are auto-enabled through #: dependency loading. self.config = config if config else {} self.xmpp = xmpp def register(self, plugin, enable=True): """Register a new plugin, and optionally enable it. :param class plugin: The implementation class of the plugin to register. :param bool enable: If ``True``, immediately enable the plugin after registration. """ register_plugin(plugin) if enable: self.enable(plugin.name) def enable(self, name, config=None, enabled=None): """Enable a plugin, including any dependencies. :param string name: The short name of the plugin. :param dict config: Optional settings dictionary for configuring plugin behaviour. """ top_level = False if enabled is None: enabled = set() with self._plugin_lock: if name not in self._enabled: enabled.add(name) self._enabled.add(name) if not self.registered(name): load_plugin(name) plugin_class = PLUGIN_REGISTRY.get(name, None) if not plugin_class: raise PluginNotFound(name) if config is None: config = self.config.get(name, None) plugin = plugin_class(self.xmpp, config) self._plugins[name] = plugin for dep in plugin.dependencies: self.enable(dep, enabled=enabled) plugin._init()<|fim▁hole|> for name in enabled: if hasattr(self.plugins[name], 'old_style'): # Older style plugins require post_init() # to run just before stream processing begins, # so we don't call it here. pass self.plugins[name].post_init() def enable_all(self, names=None, config=None): """Enable all registered plugins. :param list names: A list of plugin names to enable. If none are provided, all registered plugins will be enabled. :param dict config: A dictionary mapping plugin names to configuration dictionaries, as used by :meth:`~PluginManager.enable`. """ names = names if names else PLUGIN_REGISTRY.keys() if config is None: config = {} for name in names: self.enable(name, config.get(name, {})) def enabled(self, name): """Check if a plugin has been enabled. :param string name: The name of the plugin to check. :return: boolean """ return name in self._enabled def registered(self, name): """Check if a plugin has been registered. :param string name: The name of the plugin to check. :return: boolean """ return name in PLUGIN_REGISTRY def disable(self, name, _disabled=None): """Disable a plugin, including any dependent upon it. :param string name: The name of the plugin to disable. :param set _disabled: Private set used to track the disabled status of plugins during the cascading process. """ if _disabled is None: _disabled = set() with self._plugin_lock: if name not in _disabled and name in self._enabled: _disabled.add(name) plugin = self._plugins.get(name, None) if plugin is None: raise PluginNotFound(name) for dep in PLUGIN_DEPENDENTS[name]: self.disable(dep, _disabled) plugin._end() if name in self._enabled: self._enabled.remove(name) del self._plugins[name] def __keys__(self): """Return the set of enabled plugins.""" return self._plugins.keys() def __getitem__(self, name): """ Allow plugins to be accessed through the manager as if it were a dictionary. """ plugin = self._plugins.get(name, None) if plugin is None: raise PluginNotFound(name) return plugin def __iter__(self): """Return an iterator over the set of enabled plugins.""" return self._plugins.__iter__() def __len__(self): """Return the number of enabled plugins.""" return len(self._plugins) class BasePlugin(object): #: A short name for the plugin based on the implemented specification. #: For example, a plugin for XEP-0030 would use `'xep_0030'`. name = '' #: A longer name for the plugin, describing its purpose. For example, #: a plugin for XEP-0030 would use `'Service Discovery'` as its #: description value. description = '' #: Some plugins may depend on others in order to function properly. #: Any plugin names included in :attr:`~BasePlugin.dependencies` will #: be initialized as needed if this plugin is enabled. dependencies = set() #: The basic, standard configuration for the plugin, which may #: be overridden when initializing the plugin. The configuration #: fields included here may be accessed directly as attributes of #: the plugin. For example, including the configuration field 'foo' #: would mean accessing `plugin.foo` returns the current value of #: `plugin.config['foo']`. default_config = {} def __init__(self, xmpp, config=None): self.xmpp = xmpp if self.xmpp: self.api = self.xmpp.api.wrap(self.name) #: A plugin's behaviour may be configurable, in which case those #: configuration settings will be provided as a dictionary. self.config = copy.copy(self.default_config) if config: self.config.update(config) def __getattr__(self, key): """Provide direct access to configuration fields. If the standard configuration includes the option `'foo'`, then accessing `self.foo` should be the same as `self.config['foo']`. """ if key in self.default_config: return self.config.get(key, None) else: return object.__getattribute__(self, key) def __setattr__(self, key, value): """Provide direct assignment to configuration fields. If the standard configuration includes the option `'foo'`, then assigning to `self.foo` should be the same as assigning to `self.config['foo']`. """ if key in self.default_config: self.config[key] = value else: super(BasePlugin, self).__setattr__(key, value) def _init(self): """Initialize plugin state, such as registering event handlers. Also sets up required event handlers. """ if self.xmpp is not None: self.xmpp.add_event_handler('session_bind', self.session_bind) if self.xmpp.session_bind_event.is_set(): self.session_bind(self.xmpp.boundjid.full) self.plugin_init() log.debug('Loaded Plugin: %s', self.description) def _end(self): """Cleanup plugin state, and prepare for plugin removal. Also removes required event handlers. """ if self.xmpp is not None: self.xmpp.del_event_handler('session_bind', self.session_bind) self.plugin_end() log.debug('Disabled Plugin: %s' % self.description) def plugin_init(self): """Initialize plugin state, such as registering event handlers.""" pass def plugin_end(self): """Cleanup plugin state, and prepare for plugin removal.""" pass def session_bind(self, jid): """Initialize plugin state based on the bound JID.""" pass def post_init(self): """Initialize any cross-plugin state. Only needed if the plugin has circular dependencies. """ pass base_plugin = BasePlugin<|fim▁end|>
if top_level:
<|file_name|>chained_compiled_file_system.py<|end_file_name|><|fim▁begin|># Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from compiled_file_system import CompiledFileSystem from file_system import FileNotFoundError class ChainedCompiledFileSystem(object): ''' A CompiledFileSystem implementation that fetches data from a chain of CompiledFileSystems that have different file systems and separate cache namespaces. The rules for the compiled file system chain are: - Versions are fetched from the first compiled file system's underlying file system. - Each compiled file system is read in the reverse order (the last one is read first). If the version matches, return the data. Otherwise, read from the previous compiled file system until the first one is read. It is used to chain compiled file systems whose underlying file systems are<|fim▁hole|> def __init__(self, factory_and_fs_chain): self._factory_and_fs_chain = factory_and_fs_chain def Create(self, populate_function, cls, category=None): return ChainedCompiledFileSystem( [(factory.Create(populate_function, cls, category), fs) for factory, fs in self._factory_and_fs_chain]) def __init__(self, compiled_fs_chain): assert len(compiled_fs_chain) > 0 self._compiled_fs_chain = compiled_fs_chain def GetFromFile(self, path, binary=False): # It's possible that a new file is added in the first compiled file system # and it doesn't exist in other compiled file systems. try: first_compiled_fs, first_file_system = self._compiled_fs_chain[0] # The first file system contains both files of a newer version and files # shared with other compiled file systems. We are going to try each # compiled file system in the reverse order and return the data when # version matches. Data cached in other compiled file system will be # reused whenever possible so that we don't need to recompile things that # are not changed across these file systems. version = first_file_system.Stat(path).version for compiled_fs, _ in reversed(self._compiled_fs_chain): if compiled_fs.StatFile(path) == version: return compiled_fs.GetFromFile(path, binary) except FileNotFoundError: pass # Try first operation again to generate the correct stack trace return first_compiled_fs.GetFromFile(path, binary) def GetFromFileListing(self, path): if not path.endswith('/'): path += '/' try: first_compiled_fs, first_file_system = self._compiled_fs_chain[0] version = first_file_system.Stat(path).version for compiled_fs, _ in reversed(self._compiled_fs_chain): if compiled_fs.StatFileListing(path) == version: return compiled_fs.GetFromFileListing(path) except FileNotFoundError: pass # Try first operation again to generate the correct stack trace return first_compiled_fs.GetFromFileListing(path)<|fim▁end|>
slightly different. This makes it possible to reuse cached compiled data in one of them without recompiling everything that is shared by them. ''' class Factory(CompiledFileSystem.Factory):
<|file_name|>conf.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/stable/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys from distutils.command.config import config import guzzle_sphinx_theme import tomli from dunamai import Version root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) sys.path.insert(0, root) # -- Project information ----------------------------------------------------- # General project metadata is stored in pyproject.toml with open(os.path.join(root, "pyproject.toml"), "rb") as f: config = tomli.load(f) project_meta = config["tool"]["poetry"] print(project_meta) project = project_meta["name"] author = project_meta["authors"][0] description = project_meta["description"] url = project_meta["homepage"] title = project + " Documentation" _version = Version.from_git() # The full version, including alpha/beta/rc tags release = _version.serialize(metadata=False) # The short X.Y.Z version version = _version.base # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = "2.0" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.napoleon", "sphinx_autodoc_typehints", "guzzle_sphinx_theme", "sphinxcontrib_dooble", ] # Include a separate entry for special methods, like __init__, where provided. autodoc_default_options = { "member-order": "bysource", "special-members": True, "exclude-members": "__dict__,__weakref__", } # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ".rst" # The master toctree document. master_doc = "index" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_translator_class = "guzzle_sphinx_theme.HTMLTranslator" html_theme_path = guzzle_sphinx_theme.html_theme_path() html_theme = "guzzle_sphinx_theme" html_title = title html_short_title = project + " " + version # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation.<|fim▁hole|># html_theme_options = {} html_theme_options = {"projectlink": url} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # html_sidebars = {"**": ["logo-text.html", "globaltoc.html", "searchbox.html"]} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = project + "doc" # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [(master_doc, project + ".tex", title, author, "manual")] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, project.lower(), title, [author], 1)] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, project, title, author, project, description, "Miscellaneous") ] # -- Extension configuration -------------------------------------------------<|fim▁end|>
#
<|file_name|>grp_roundcube.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- # kate: space-indent on; indent-width 4; mixedindent off; indent-mode python; roundcube = [ {'name':'common', 'mainpackage':True, 'shortdesc':'Installs the latest version of roundcube', 'description':'', 'packages':['roundcube-plugins', 'roundcube-plugins-extra', 'roundcube-amacube']<|fim▁hole|> 'depends':['common'], 'packages':['roundcube-mysql'] }, {'name':'pgsql', 'shortdesc':'Installs roundcube using PostgreSQL database', 'description':'', 'depends':['common'], 'packages':['roundcube-pgsql'] }, {'name':'sqlite3', 'shortdesc':'Installs roundcube using SQLite3 database', 'description':'', 'depends':['common'], 'packages':['roundcube-sqlite3'] }, {'name':'none', 'shortdesc':'Uninstalls all versions of roundcube', 'description':'', 'packages':[], 'noconflicts':[] }, ]<|fim▁end|>
}, {'name':'mysql', 'shortdesc':'Installs roundcube using MySQL database', 'description':'',
<|file_name|>scheduler.rs<|end_file_name|><|fim▁begin|>use alloc::linked_list::LinkedList; use alloc::arc::Arc; use super::bottom_half; use super::bottom_half::BottomHalfManager; use super::task::{TID_BOTTOMHALFD, TID_SYSTEMIDLE}; use super::task::{Task, TaskContext, TaskPriority, TaskStatus}; use kernel::kget; use memory::MemoryManager; const THREAD_QUANTUM: usize = 10; /// Scheduler for the kernel. Manages scheduling of tasks and timers pub struct Scheduler { inactive_tasks: LinkedList<Task>, active_task: Option<Task>, task_count: u32, last_resched: usize, need_resched: bool, bh_manager: Arc<BottomHalfManager>, } impl Scheduler { /// Creates a new scheduler /// /// The currently active task is created along with a single, currently `WAITING`, task of /// priority `IRQ`. pub fn new(memory_manager: &mut MemoryManager) -> Scheduler { let mut inactive_tasks = LinkedList::new(); // Create the kernel bottom_half IRQ processing thread let stack = memory_manager.allocate_stack(); inactive_tasks.push_front(Task::new( TID_BOTTOMHALFD, stack, bottom_half::execute, TaskPriority::IRQ, TaskStatus::WAITING,<|fim▁hole|> Scheduler { inactive_tasks: inactive_tasks, active_task: Some(Task::default(TID_SYSTEMIDLE)), task_count: 2, last_resched: 0, need_resched: false, bh_manager: Arc::new(BottomHalfManager::new()), } } /// Create a new task to be scheduled. pub fn new_task(&mut self, memory_manager: &mut MemoryManager, func: fn()) { let stack = memory_manager.allocate_stack(); self.inactive_tasks.push_front(Task::new( self.task_count, stack, func, TaskPriority::NORMAL, TaskStatus::READY, )); self.task_count += 1; } /// Schedule the next task. /// /// Choses the next task with status != `TaskStatus::COMPLETED` and switches its context with /// that of the currently active task. pub fn schedule(&mut self, active_ctx: &mut TaskContext) { // Optimization - return early if nothing to do if self.inactive_tasks.len() == 0 { return; } // First look for active high priority tasks first, if none of these exist then look for // normal priority tasks. There is guaranteed to always be at least one NORMAL priority // task so it is safe to call unwrap. let new_task = match self.next_task(TaskPriority::IRQ) { Some(t) => t, None => { if let Some(ref t) = self.active_task { // The active task is an interrupt handler that hasn't yet completed, let it // run to completion. if t.get_priority() == TaskPriority::IRQ && t.get_status() == TaskStatus::READY { return; } } // Theres definitely nothing higer priority! self.next_task(TaskPriority::NORMAL).unwrap() } }; let mut old_task = self.active_task.take().unwrap(); // Swap the contexts // Copy the active context to save it old_task.set_context(active_ctx); *active_ctx = *new_task.get_context(); // Update the schedulers internal references and store the initial task back into the // inactive_tasks list if it is not yet finished. By not restoring COMPLETED tasks here // we force cleanup of COMPLETED tasks. self.active_task = Some(new_task); if old_task.get_status() != TaskStatus::COMPLETED { self.inactive_tasks.push_back(old_task); } // Update the last_resched time self.update_last_resched(); } /// Get a mutable reference to the current active task. pub fn get_active_task_mut(&mut self) -> Option<&mut Task> { self.active_task.as_mut() } /// Returns true if a reschedule is needed /// /// Returns true if the last reschedule was over `THREAD_QUANTUM` cpu ticks ago. pub fn need_resched(&self) -> bool { if self.need_resched { return true; } let clock = unsafe { &mut *kget().clock.get() }; let now = clock.now(); (now - self.last_resched) > THREAD_QUANTUM } /// Returns an Arc pointer to the bh_fifo pub fn bh_manager(&self) -> Arc<BottomHalfManager> { self.bh_manager.clone() } /// Set the status of task with `id` pub fn set_task_status(&mut self, id: u32, status: TaskStatus) { if let Some(ref mut t) = self.active_task { if t.id() == id { t.set_status(status); return; } } for t in self.inactive_tasks.iter_mut() { if t.id() == id { t.set_status(status); return; } } } /// Set the internal 'need_resched' flag to true pub fn set_need_resched(&mut self) { self.need_resched = true; } /// Update `last_resched` to now and reset the `need_resched` flag fn update_last_resched(&mut self) { let clock = unsafe { &mut *kget().clock.get() }; self.last_resched = clock.now(); self.need_resched = false; } /// Find the next task with priority matching `priority` fn next_task(&mut self, priority: TaskPriority) -> Option<Task> { let mut i = 0; let mut found = false; for ref t in self.inactive_tasks.iter() { if t.get_priority() != priority || t.get_status() != TaskStatus::READY { // On to the next, this is not suitable i += 1; } else { found = true; break; } } if found { // Split inactive_tasks, remove the task we found, then re-merge the two lists let mut remainder = self.inactive_tasks.split_off(i); let next_task = remainder.pop_front(); // Merge the lists loop { match remainder.pop_front() { Some(t) => self.inactive_tasks.push_back(t), None => break, } } next_task } else { None } } }<|fim▁end|>
));
<|file_name|>github-electron-renderer-tests.ts<|end_file_name|><|fim▁begin|> import { ipcRenderer, remote, webFrame, clipboard, crashReporter, nativeImage, screen, shell } from 'electron'; import * as fs from 'fs'; // In renderer process (web page). // https://github.com/atom/electron/blob/master/docs/api/ipc-renderer.md console.log(ipcRenderer.sendSync('synchronous-message', 'ping')); // prints "pong" ipcRenderer.on('asynchronous-reply', (event: Electron.IpcRendererEvent, arg: any) => { console.log(arg); // prints "pong" event.sender.send('another-message', 'Hello World!'); }); ipcRenderer.send('asynchronous-message', 'ping'); // remote // https://github.com/atom/electron/blob/master/docs/api/remote.md var BrowserWindow = remote.BrowserWindow; var win = new BrowserWindow({ width: 800, height: 600 }); win.loadURL('https://github.com'); remote.getCurrentWindow().on('close', () => { // blabla... }); remote.getCurrentWindow().capturePage(buf => { fs.writeFile('/tmp/screenshot.png', buf, err => { console.log(err); }); }); remote.getCurrentWebContents().print(); remote.getCurrentWindow().capturePage(buf => { remote.require('fs').writeFile('/tmp/screenshot.png', buf, (err: Error) => { console.log(err); }); }); // web-frame // https://github.com/atom/electron/blob/master/docs/api/web-frame.md webFrame.setZoomFactor(2); console.log(webFrame.getZoomFactor()); webFrame.setZoomLevel(200); console.log(webFrame.getZoomLevel()); webFrame.setVisualZoomLevelLimits(50, 200); webFrame.setLayoutZoomLevelLimits(50, 200); webFrame.setSpellCheckProvider('en-US', true, { spellCheck: text => { return !(require('spellchecker').isMisspelled(text)); } }); webFrame.registerURLSchemeAsSecure('app'); webFrame.registerURLSchemeAsBypassingCSP('app'); webFrame.registerURLSchemeAsPrivileged('app'); webFrame.registerURLSchemeAsPrivileged('app', { secure: true, supportFetchAPI: true, }); webFrame.insertText('text'); webFrame.executeJavaScript('JSON.stringify({})', false, (result) => { console.log(result); }).then((result: string) => console.log('OK:' + result)); console.log(webFrame.getResourceUsage()); webFrame.clearCache(); // clipboard // https://github.com/atom/electron/blob/master/docs/api/clipboard.md clipboard.writeText('Example String'); clipboard.writeText('Example String', 'selection'); console.log(clipboard.readText('selection')); console.log(clipboard.availableFormats()); clipboard.clear(); clipboard.write({ html: '<html></html>', text: 'Hello World!', image: clipboard.readImage() }); // crash-reporter // https://github.com/atom/electron/blob/master/docs/api/crash-reporter.md crashReporter.start({ productName: 'YourName', companyName: 'YourCompany', submitURL: 'https://your-domain.com/url-to-submit', autoSubmit: true }); // desktopCapturer // https://github.com/atom/electron/blob/master/docs/api/desktop-capturer.md var desktopCapturer = require('electron').desktopCapturer; desktopCapturer.getSources({types: ['window', 'screen']}, function(error, sources) { if (error) throw error; for (var i = 0; i < sources.length; ++i) { if (sources[i].name == "Electron") { (navigator as any).webkitGetUserMedia({ audio: false, video: { mandatory: { chromeMediaSource: 'desktop', chromeMediaSourceId: sources[i].id, minWidth: 1280, maxWidth: 1280, minHeight: 720, maxHeight: 720 } } }, gotStream, getUserMediaError); return; } } }); function gotStream(stream: any) { (document.querySelector('video') as HTMLVideoElement).src = URL.createObjectURL(stream); }<|fim▁hole|> console.log('getUserMediaError', error); } // File object // https://github.com/atom/electron/blob/master/docs/api/file-object.md /* <div id="holder"> Drag your file here </div> */ var holder = document.getElementById('holder'); holder.ondragover = function () { return false; }; holder.ondragleave = holder.ondragend = function () { return false; }; holder.ondrop = function (e) { e.preventDefault(); var file = e.dataTransfer.files[0]; console.log('File you dragged here is', file.path); return false; }; // nativeImage // https://github.com/atom/electron/blob/master/docs/api/native-image.md var Tray = remote.Tray; var appIcon2 = new Tray('/Users/somebody/images/icon.png'); var window2 = new BrowserWindow({ icon: '/Users/somebody/images/window.png' }); var image = clipboard.readImage(); var appIcon3 = new Tray(image); var appIcon4 = new Tray('/Users/somebody/images/icon.png'); // https://github.com/electron/electron/blob/master/docs/api/process.md // preload.js var _setImmediate = setImmediate; var _clearImmediate = clearImmediate; process.once('loaded', function() { global.setImmediate = _setImmediate; global.clearImmediate = _clearImmediate; }); // screen // https://github.com/atom/electron/blob/master/docs/api/screen.md var app = remote.app; var mainWindow: Electron.BrowserWindow = null; app.on('ready', () => { var size = screen.getPrimaryDisplay().workAreaSize; mainWindow = new BrowserWindow({ width: size.width, height: size.height }); }); app.on('ready', () => { var displays = screen.getAllDisplays(); var externalDisplay: any = null; for (var i in displays) { if (displays[i].bounds.x > 0 || displays[i].bounds.y > 0) { externalDisplay = displays[i]; break; } } if (externalDisplay) { mainWindow = new BrowserWindow({ x: externalDisplay.bounds.x + 50, y: externalDisplay.bounds.y + 50, }); } }); // shell // https://github.com/atom/electron/blob/master/docs/api/shell.md shell.openExternal('https://github.com'); // <webview> // https://github.com/atom/electron/blob/master/docs/api/web-view-tag.md var webview = document.createElement('webview'); webview.loadURL('https://github.com'); webview.addEventListener('console-message', function(e) { console.log('Guest page logged a message:', e.message); }); webview.addEventListener('found-in-page', function(e) { if (e.result.finalUpdate) { webview.stopFindInPage("keepSelection"); } }); var requestId = webview.findInPage("test"); webview.addEventListener('new-window', function(e) { require('electron').shell.openExternal(e.url); }); webview.addEventListener('close', function() { webview.src = 'about:blank'; }); // In embedder page. webview.addEventListener('ipc-message', function(event) { console.log(event.channel); // Prints "pong" }); webview.send('ping'); webview.capturePage((image) => { console.log(image); }); // In guest page. ipcRenderer.on('ping', function() { ipcRenderer.sendToHost('pong'); });<|fim▁end|>
function getUserMediaError(error: Error) {
<|file_name|>OpenloadCo.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from module.plugins.internal.XFSAccount import XFSAccount class OpenloadCo(XFSAccount): __name__ = "OpenloadCo" __type__ = "account" __version__ = "0.02" __status__ = "testing" __description__ = """Openload.co account plugin""" __license__ = "GPLv3"<|fim▁hole|> __authors__ = [("Walter Purcaro", "vuolter@gmail.com")] PLUGIN_DOMAIN = "openload.co"<|fim▁end|>
<|file_name|>core.py<|end_file_name|><|fim▁begin|>""" Handling signals of the `core` app """ from django.dispatch import receiver from core import signals from reader import actions @receiver(signals.app_link_ready)<|fim▁hole|><|fim▁end|>
def app_link_ready(sender, **kwargs): actions.create_app_link()
<|file_name|>local_file_sync_context_unittest.cc<|end_file_name|><|fim▁begin|>// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/sync_file_system/local/local_file_sync_context.h" #include <vector> #include "base/bind.h" #include "base/bind_helpers.h" #include "base/file_util.h" #include "base/files/file_path.h" #include "base/message_loop/message_loop.h" #include "base/stl_util.h" #include "chrome/browser/sync_file_system/local/canned_syncable_file_system.h" #include "chrome/browser/sync_file_system/local/local_file_change_tracker.h" #include "chrome/browser/sync_file_system/local/sync_file_system_backend.h" #include "chrome/browser/sync_file_system/sync_file_metadata.h" #include "chrome/browser/sync_file_system/sync_status_code.h" #include "chrome/browser/sync_file_system/syncable_file_system_util.h" #include "content/public/browser/browser_thread.h" #include "content/public/test/mock_blob_url_request_context.h" #include "content/public/test/test_browser_thread_bundle.h" #include "testing/gtest/include/gtest/gtest.h" #include "third_party/leveldatabase/src/helpers/memenv/memenv.h" #include "third_party/leveldatabase/src/include/leveldb/env.h" #include "webkit/browser/fileapi/file_system_context.h" #include "webkit/browser/fileapi/file_system_operation_runner.h" #include "webkit/browser/fileapi/isolated_context.h" #include "webkit/common/blob/scoped_file.h" #define FPL FILE_PATH_LITERAL using content::BrowserThread; using fileapi::FileSystemContext; using fileapi::FileSystemURL; using fileapi::FileSystemURLSet; // This tests LocalFileSyncContext behavior in multi-thread / // multi-file-system-context environment. // Basic combined tests (single-thread / single-file-system-context) // that involve LocalFileSyncContext are also in // syncable_file_system_unittests.cc. namespace sync_file_system { namespace { const char kOrigin1[] = "http://example.com"; const char kOrigin2[] = "http://chromium.org"; } class LocalFileSyncContextTest : public testing::Test { protected: LocalFileSyncContextTest() : thread_bundle_( content::TestBrowserThreadBundle::REAL_FILE_THREAD | content::TestBrowserThreadBundle::REAL_IO_THREAD), status_(SYNC_FILE_ERROR_FAILED), file_error_(base::File::FILE_ERROR_FAILED), async_modify_finished_(false), has_inflight_prepare_for_sync_(false) {} virtual void SetUp() OVERRIDE { RegisterSyncableFileSystem(); ASSERT_TRUE(dir_.CreateUniqueTempDir()); in_memory_env_.reset(leveldb::NewMemEnv(leveldb::Env::Default())); ui_task_runner_ = base::MessageLoop::current()->message_loop_proxy(); io_task_runner_ = BrowserThread::GetMessageLoopProxyForThread( BrowserThread::IO); file_task_runner_ = BrowserThread::GetMessageLoopProxyForThread( BrowserThread::IO); } virtual void TearDown() OVERRIDE { RevokeSyncableFileSystem(); } void StartPrepareForSync(FileSystemContext* file_system_context, const FileSystemURL& url, LocalFileSyncContext::SyncMode sync_mode, SyncFileMetadata* metadata, FileChangeList* changes, webkit_blob::ScopedFile* snapshot) { ASSERT_TRUE(changes != NULL); ASSERT_FALSE(has_inflight_prepare_for_sync_); status_ = SYNC_STATUS_UNKNOWN; has_inflight_prepare_for_sync_ = true; sync_context_->PrepareForSync( file_system_context, url, sync_mode, base::Bind(&LocalFileSyncContextTest::DidPrepareForSync, base::Unretained(this), metadata, changes, snapshot)); } SyncStatusCode PrepareForSync(FileSystemContext* file_system_context, const FileSystemURL& url, LocalFileSyncContext::SyncMode sync_mode, SyncFileMetadata* metadata, FileChangeList* changes, webkit_blob::ScopedFile* snapshot) { StartPrepareForSync(file_system_context, url, sync_mode, metadata, changes, snapshot); base::MessageLoop::current()->Run(); return status_; } base::Closure GetPrepareForSyncClosure( FileSystemContext* file_system_context, const FileSystemURL& url, LocalFileSyncContext::SyncMode sync_mode, SyncFileMetadata* metadata, FileChangeList* changes, webkit_blob::ScopedFile* snapshot) { return base::Bind(&LocalFileSyncContextTest::StartPrepareForSync, base::Unretained(this), base::Unretained(file_system_context), url, sync_mode, metadata, changes, snapshot); } void DidPrepareForSync(SyncFileMetadata* metadata_out, FileChangeList* changes_out, webkit_blob::ScopedFile* snapshot_out, SyncStatusCode status, const LocalFileSyncInfo& sync_file_info, webkit_blob::ScopedFile snapshot) { ASSERT_TRUE(ui_task_runner_->RunsTasksOnCurrentThread()); has_inflight_prepare_for_sync_ = false; status_ = status; *metadata_out = sync_file_info.metadata; *changes_out = sync_file_info.changes; if (snapshot_out) *snapshot_out = snapshot.Pass(); base::MessageLoop::current()->Quit(); } SyncStatusCode ApplyRemoteChange(FileSystemContext* file_system_context, const FileChange& change, const base::FilePath& local_path, const FileSystemURL& url, SyncFileType expected_file_type) { SCOPED_TRACE(testing::Message() << "ApplyChange for " << url.DebugString()); // First we should call PrepareForSync to disable writing. SyncFileMetadata metadata; FileChangeList changes; EXPECT_EQ(SYNC_STATUS_OK, PrepareForSync(file_system_context, url, LocalFileSyncContext::SYNC_EXCLUSIVE, &metadata, &changes, NULL)); EXPECT_EQ(expected_file_type, metadata.file_type); status_ = SYNC_STATUS_UNKNOWN; sync_context_->ApplyRemoteChange( file_system_context, change, local_path, url, base::Bind(&LocalFileSyncContextTest::DidApplyRemoteChange, base::Unretained(this), make_scoped_refptr(file_system_context), url)); base::MessageLoop::current()->Run(); return status_; } void DidApplyRemoteChange(FileSystemContext* file_system_context,<|fim▁hole|> status_ = status; sync_context_->FinalizeExclusiveSync( file_system_context, url, status == SYNC_STATUS_OK /* clear_local_changes */, base::MessageLoop::QuitClosure()); } void StartModifyFileOnIOThread(CannedSyncableFileSystem* file_system, const FileSystemURL& url) { ASSERT_TRUE(file_system != NULL); if (!io_task_runner_->RunsTasksOnCurrentThread()) { async_modify_finished_ = false; ASSERT_TRUE(ui_task_runner_->RunsTasksOnCurrentThread()); io_task_runner_->PostTask( FROM_HERE, base::Bind(&LocalFileSyncContextTest::StartModifyFileOnIOThread, base::Unretained(this), file_system, url)); return; } ASSERT_TRUE(io_task_runner_->RunsTasksOnCurrentThread()); file_error_ = base::File::FILE_ERROR_FAILED; file_system->operation_runner()->Truncate( url, 1, base::Bind(&LocalFileSyncContextTest::DidModifyFile, base::Unretained(this))); } base::File::Error WaitUntilModifyFileIsDone() { while (!async_modify_finished_) base::MessageLoop::current()->RunUntilIdle(); return file_error_; } void DidModifyFile(base::File::Error error) { if (!ui_task_runner_->RunsTasksOnCurrentThread()) { ASSERT_TRUE(io_task_runner_->RunsTasksOnCurrentThread()); ui_task_runner_->PostTask( FROM_HERE, base::Bind(&LocalFileSyncContextTest::DidModifyFile, base::Unretained(this), error)); return; } ASSERT_TRUE(ui_task_runner_->RunsTasksOnCurrentThread()); file_error_ = error; async_modify_finished_ = true; } void SimulateFinishSync(FileSystemContext* file_system_context, const FileSystemURL& url, SyncStatusCode status, LocalFileSyncContext::SyncMode sync_mode) { if (sync_mode == LocalFileSyncContext::SYNC_SNAPSHOT) { sync_context_->FinalizeSnapshotSync( file_system_context, url, status, base::Bind(&base::DoNothing)); } else { sync_context_->FinalizeExclusiveSync( file_system_context, url, status == SYNC_STATUS_OK /* clear_local_changes */, base::Bind(&base::DoNothing)); } } void PrepareForSync_Basic(LocalFileSyncContext::SyncMode sync_mode, SyncStatusCode simulate_sync_finish_status) { CannedSyncableFileSystem file_system(GURL(kOrigin1), in_memory_env_.get(), io_task_runner_.get(), file_task_runner_.get()); file_system.SetUp(CannedSyncableFileSystem::QUOTA_ENABLED); sync_context_ = new LocalFileSyncContext( dir_.path(), in_memory_env_.get(), ui_task_runner_.get(), io_task_runner_.get()); ASSERT_EQ(SYNC_STATUS_OK, file_system.MaybeInitializeFileSystemContext( sync_context_.get())); ASSERT_EQ(base::File::FILE_OK, file_system.OpenFileSystem()); const FileSystemURL kFile(file_system.URL("file")); EXPECT_EQ(base::File::FILE_OK, file_system.CreateFile(kFile)); SyncFileMetadata metadata; FileChangeList changes; EXPECT_EQ(SYNC_STATUS_OK, PrepareForSync(file_system.file_system_context(), kFile, sync_mode, &metadata, &changes, NULL)); EXPECT_EQ(1U, changes.size()); EXPECT_TRUE(changes.list().back().IsFile()); EXPECT_TRUE(changes.list().back().IsAddOrUpdate()); // We should see the same set of changes. file_system.GetChangesForURLInTracker(kFile, &changes); EXPECT_EQ(1U, changes.size()); EXPECT_TRUE(changes.list().back().IsFile()); EXPECT_TRUE(changes.list().back().IsAddOrUpdate()); SimulateFinishSync(file_system.file_system_context(), kFile, simulate_sync_finish_status, sync_mode); file_system.GetChangesForURLInTracker(kFile, &changes); if (simulate_sync_finish_status == SYNC_STATUS_OK) { // The change's cleared. EXPECT_TRUE(changes.empty()); } else { EXPECT_EQ(1U, changes.size()); EXPECT_TRUE(changes.list().back().IsFile()); EXPECT_TRUE(changes.list().back().IsAddOrUpdate()); } sync_context_->ShutdownOnUIThread(); sync_context_ = NULL; file_system.TearDown(); } void PrepareForSync_WriteDuringSync( LocalFileSyncContext::SyncMode sync_mode) { CannedSyncableFileSystem file_system(GURL(kOrigin1), in_memory_env_.get(), io_task_runner_.get(), file_task_runner_.get()); file_system.SetUp(CannedSyncableFileSystem::QUOTA_ENABLED); sync_context_ = new LocalFileSyncContext( dir_.path(), in_memory_env_.get(), ui_task_runner_.get(), io_task_runner_.get()); ASSERT_EQ(SYNC_STATUS_OK, file_system.MaybeInitializeFileSystemContext( sync_context_.get())); ASSERT_EQ(base::File::FILE_OK, file_system.OpenFileSystem()); const FileSystemURL kFile(file_system.URL("file")); EXPECT_EQ(base::File::FILE_OK, file_system.CreateFile(kFile)); SyncFileMetadata metadata; FileChangeList changes; webkit_blob::ScopedFile snapshot; EXPECT_EQ(SYNC_STATUS_OK, PrepareForSync(file_system.file_system_context(), kFile, sync_mode, &metadata, &changes, &snapshot)); EXPECT_EQ(1U, changes.size()); EXPECT_TRUE(changes.list().back().IsFile()); EXPECT_TRUE(changes.list().back().IsAddOrUpdate()); EXPECT_EQ(sync_mode == LocalFileSyncContext::SYNC_SNAPSHOT, !snapshot.path().empty()); // Tracker keeps same set of changes. file_system.GetChangesForURLInTracker(kFile, &changes); EXPECT_EQ(1U, changes.size()); EXPECT_TRUE(changes.list().back().IsFile()); EXPECT_TRUE(changes.list().back().IsAddOrUpdate()); StartModifyFileOnIOThread(&file_system, kFile); if (sync_mode == LocalFileSyncContext::SYNC_SNAPSHOT) { // Write should succeed. EXPECT_EQ(base::File::FILE_OK, WaitUntilModifyFileIsDone()); } else { base::MessageLoop::current()->RunUntilIdle(); EXPECT_FALSE(async_modify_finished_); } SimulateFinishSync(file_system.file_system_context(), kFile, SYNC_STATUS_OK, sync_mode); EXPECT_EQ(base::File::FILE_OK, WaitUntilModifyFileIsDone()); // Sync succeeded, but the other change that was made during or // after sync is recorded. file_system.GetChangesForURLInTracker(kFile, &changes); EXPECT_EQ(1U, changes.size()); EXPECT_TRUE(changes.list().back().IsFile()); EXPECT_TRUE(changes.list().back().IsAddOrUpdate()); sync_context_->ShutdownOnUIThread(); sync_context_ = NULL; file_system.TearDown(); } base::ScopedTempDir dir_; scoped_ptr<leveldb::Env> in_memory_env_; // These need to remain until the very end. content::TestBrowserThreadBundle thread_bundle_; scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_; scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner_; scoped_refptr<base::SingleThreadTaskRunner> file_task_runner_; scoped_refptr<LocalFileSyncContext> sync_context_; SyncStatusCode status_; base::File::Error file_error_; bool async_modify_finished_; bool has_inflight_prepare_for_sync_; }; TEST_F(LocalFileSyncContextTest, ConstructAndDestruct) { sync_context_ = new LocalFileSyncContext( dir_.path(), in_memory_env_.get(), ui_task_runner_.get(), io_task_runner_.get()); sync_context_->ShutdownOnUIThread(); } TEST_F(LocalFileSyncContextTest, InitializeFileSystemContext) { CannedSyncableFileSystem file_system(GURL(kOrigin1), in_memory_env_.get(), io_task_runner_.get(), file_task_runner_.get()); file_system.SetUp(CannedSyncableFileSystem::QUOTA_ENABLED); sync_context_ = new LocalFileSyncContext( dir_.path(), in_memory_env_.get(), ui_task_runner_.get(), io_task_runner_.get()); // Initializes file_system using |sync_context_|. EXPECT_EQ(SYNC_STATUS_OK, file_system.MaybeInitializeFileSystemContext(sync_context_.get())); // Make sure everything's set up for file_system to be able to handle // syncable file system operations. EXPECT_TRUE(file_system.backend()->sync_context() != NULL); EXPECT_TRUE(file_system.backend()->change_tracker() != NULL); EXPECT_EQ(sync_context_.get(), file_system.backend()->sync_context()); // Calling MaybeInitialize for the same context multiple times must be ok. EXPECT_EQ(SYNC_STATUS_OK, file_system.MaybeInitializeFileSystemContext(sync_context_.get())); EXPECT_EQ(sync_context_.get(), file_system.backend()->sync_context()); // Opens the file_system, perform some operation and see if the change tracker // correctly captures the change. EXPECT_EQ(base::File::FILE_OK, file_system.OpenFileSystem()); const FileSystemURL kURL(file_system.URL("foo")); EXPECT_EQ(base::File::FILE_OK, file_system.CreateFile(kURL)); FileSystemURLSet urls; file_system.GetChangedURLsInTracker(&urls); ASSERT_EQ(1U, urls.size()); EXPECT_TRUE(ContainsKey(urls, kURL)); // Finishing the test. sync_context_->ShutdownOnUIThread(); file_system.TearDown(); } TEST_F(LocalFileSyncContextTest, MultipleFileSystemContexts) { CannedSyncableFileSystem file_system1(GURL(kOrigin1), in_memory_env_.get(), io_task_runner_.get(), file_task_runner_.get()); CannedSyncableFileSystem file_system2(GURL(kOrigin2), in_memory_env_.get(), io_task_runner_.get(), file_task_runner_.get()); file_system1.SetUp(CannedSyncableFileSystem::QUOTA_ENABLED); file_system2.SetUp(CannedSyncableFileSystem::QUOTA_ENABLED); sync_context_ = new LocalFileSyncContext( dir_.path(), in_memory_env_.get(), ui_task_runner_.get(), io_task_runner_.get()); // Initializes file_system1 and file_system2. EXPECT_EQ(SYNC_STATUS_OK, file_system1.MaybeInitializeFileSystemContext(sync_context_.get())); EXPECT_EQ(SYNC_STATUS_OK, file_system2.MaybeInitializeFileSystemContext(sync_context_.get())); EXPECT_EQ(base::File::FILE_OK, file_system1.OpenFileSystem()); EXPECT_EQ(base::File::FILE_OK, file_system2.OpenFileSystem()); const FileSystemURL kURL1(file_system1.URL("foo")); const FileSystemURL kURL2(file_system2.URL("bar")); // Creates a file in file_system1. EXPECT_EQ(base::File::FILE_OK, file_system1.CreateFile(kURL1)); // file_system1's tracker must have recorded the change. FileSystemURLSet urls; file_system1.GetChangedURLsInTracker(&urls); ASSERT_EQ(1U, urls.size()); EXPECT_TRUE(ContainsKey(urls, kURL1)); // file_system1's tracker must have no change. urls.clear(); file_system2.GetChangedURLsInTracker(&urls); ASSERT_TRUE(urls.empty()); // Creates a directory in file_system2. EXPECT_EQ(base::File::FILE_OK, file_system2.CreateDirectory(kURL2)); // file_system1's tracker must have the change for kURL1 as before. urls.clear(); file_system1.GetChangedURLsInTracker(&urls); ASSERT_EQ(1U, urls.size()); EXPECT_TRUE(ContainsKey(urls, kURL1)); // file_system2's tracker now must have the change for kURL2. urls.clear(); file_system2.GetChangedURLsInTracker(&urls); ASSERT_EQ(1U, urls.size()); EXPECT_TRUE(ContainsKey(urls, kURL2)); SyncFileMetadata metadata; FileChangeList changes; EXPECT_EQ(SYNC_STATUS_OK, PrepareForSync(file_system1.file_system_context(), kURL1, LocalFileSyncContext::SYNC_EXCLUSIVE, &metadata, &changes, NULL)); EXPECT_EQ(1U, changes.size()); EXPECT_TRUE(changes.list().back().IsFile()); EXPECT_TRUE(changes.list().back().IsAddOrUpdate()); EXPECT_EQ(SYNC_FILE_TYPE_FILE, metadata.file_type); EXPECT_EQ(0, metadata.size); changes.clear(); EXPECT_EQ(SYNC_STATUS_OK, PrepareForSync(file_system2.file_system_context(), kURL2, LocalFileSyncContext::SYNC_EXCLUSIVE, &metadata, &changes, NULL)); EXPECT_EQ(1U, changes.size()); EXPECT_FALSE(changes.list().back().IsFile()); EXPECT_TRUE(changes.list().back().IsAddOrUpdate()); EXPECT_EQ(SYNC_FILE_TYPE_DIRECTORY, metadata.file_type); EXPECT_EQ(0, metadata.size); sync_context_->ShutdownOnUIThread(); sync_context_ = NULL; file_system1.TearDown(); file_system2.TearDown(); } TEST_F(LocalFileSyncContextTest, PrepareSync_SyncSuccess_Exclusive) { PrepareForSync_Basic(LocalFileSyncContext::SYNC_EXCLUSIVE, SYNC_STATUS_OK); } TEST_F(LocalFileSyncContextTest, PrepareSync_SyncSuccess_Snapshot) { PrepareForSync_Basic(LocalFileSyncContext::SYNC_SNAPSHOT, SYNC_STATUS_OK); } TEST_F(LocalFileSyncContextTest, PrepareSync_SyncFailure_Exclusive) { PrepareForSync_Basic(LocalFileSyncContext::SYNC_EXCLUSIVE, SYNC_STATUS_FAILED); } TEST_F(LocalFileSyncContextTest, PrepareSync_SyncFailure_Snapshot) { PrepareForSync_Basic(LocalFileSyncContext::SYNC_SNAPSHOT, SYNC_STATUS_FAILED); } TEST_F(LocalFileSyncContextTest, PrepareSync_WriteDuringSync_Exclusive) { PrepareForSync_WriteDuringSync(LocalFileSyncContext::SYNC_EXCLUSIVE); } TEST_F(LocalFileSyncContextTest, PrepareSync_WriteDuringSync_Snapshot) { PrepareForSync_WriteDuringSync(LocalFileSyncContext::SYNC_SNAPSHOT); } // LocalFileSyncContextTest.PrepareSyncWhileWriting is flaky on android. // http://crbug.com/239793 // It is also flaky on the TSAN v2 bots, and hangs other bots. // http://crbug.com/305905. TEST_F(LocalFileSyncContextTest, DISABLED_PrepareSyncWhileWriting) { CannedSyncableFileSystem file_system(GURL(kOrigin1), in_memory_env_.get(), io_task_runner_.get(), file_task_runner_.get()); file_system.SetUp(CannedSyncableFileSystem::QUOTA_ENABLED); sync_context_ = new LocalFileSyncContext( dir_.path(), in_memory_env_.get(), ui_task_runner_.get(), io_task_runner_.get()); EXPECT_EQ(SYNC_STATUS_OK, file_system.MaybeInitializeFileSystemContext(sync_context_.get())); EXPECT_EQ(base::File::FILE_OK, file_system.OpenFileSystem()); const FileSystemURL kURL1(file_system.URL("foo")); // Creates a file in file_system. EXPECT_EQ(base::File::FILE_OK, file_system.CreateFile(kURL1)); // Kick file write on IO thread. StartModifyFileOnIOThread(&file_system, kURL1); // Until the operation finishes PrepareForSync should return BUSY error. SyncFileMetadata metadata; metadata.file_type = SYNC_FILE_TYPE_UNKNOWN; FileChangeList changes; EXPECT_EQ(SYNC_STATUS_FILE_BUSY, PrepareForSync(file_system.file_system_context(), kURL1, LocalFileSyncContext::SYNC_EXCLUSIVE, &metadata, &changes, NULL)); EXPECT_EQ(SYNC_FILE_TYPE_FILE, metadata.file_type); // Register PrepareForSync method to be invoked when kURL1 becomes // syncable. (Actually this may be done after all operations are done // on IO thread in this test.) metadata.file_type = SYNC_FILE_TYPE_UNKNOWN; changes.clear(); sync_context_->RegisterURLForWaitingSync( kURL1, GetPrepareForSyncClosure(file_system.file_system_context(), kURL1, LocalFileSyncContext::SYNC_EXCLUSIVE, &metadata, &changes, NULL)); // Wait for the completion. EXPECT_EQ(base::File::FILE_OK, WaitUntilModifyFileIsDone()); // The PrepareForSync must have been started; wait until DidPrepareForSync // is done. base::MessageLoop::current()->Run(); ASSERT_FALSE(has_inflight_prepare_for_sync_); // Now PrepareForSync should have run and returned OK. EXPECT_EQ(SYNC_STATUS_OK, status_); EXPECT_EQ(1U, changes.size()); EXPECT_TRUE(changes.list().back().IsFile()); EXPECT_TRUE(changes.list().back().IsAddOrUpdate()); EXPECT_EQ(SYNC_FILE_TYPE_FILE, metadata.file_type); EXPECT_EQ(1, metadata.size); sync_context_->ShutdownOnUIThread(); sync_context_ = NULL; file_system.TearDown(); } TEST_F(LocalFileSyncContextTest, ApplyRemoteChangeForDeletion) { CannedSyncableFileSystem file_system(GURL(kOrigin1), in_memory_env_.get(), io_task_runner_.get(), file_task_runner_.get()); file_system.SetUp(CannedSyncableFileSystem::QUOTA_ENABLED); sync_context_ = new LocalFileSyncContext( dir_.path(), in_memory_env_.get(), ui_task_runner_.get(), io_task_runner_.get()); ASSERT_EQ(SYNC_STATUS_OK, file_system.MaybeInitializeFileSystemContext(sync_context_.get())); ASSERT_EQ(base::File::FILE_OK, file_system.OpenFileSystem()); // Record the initial usage (likely 0). int64 initial_usage = -1; int64 quota = -1; EXPECT_EQ(quota::kQuotaStatusOk, file_system.GetUsageAndQuota(&initial_usage, &quota)); // Create a file and directory in the file_system. const FileSystemURL kFile(file_system.URL("file")); const FileSystemURL kDir(file_system.URL("dir")); const FileSystemURL kChild(file_system.URL("dir/child")); EXPECT_EQ(base::File::FILE_OK, file_system.CreateFile(kFile)); EXPECT_EQ(base::File::FILE_OK, file_system.CreateDirectory(kDir)); EXPECT_EQ(base::File::FILE_OK, file_system.CreateFile(kChild)); // file_system's change tracker must have recorded the creation. FileSystemURLSet urls; file_system.GetChangedURLsInTracker(&urls); ASSERT_EQ(3U, urls.size()); ASSERT_TRUE(ContainsKey(urls, kFile)); ASSERT_TRUE(ContainsKey(urls, kDir)); ASSERT_TRUE(ContainsKey(urls, kChild)); for (FileSystemURLSet::iterator iter = urls.begin(); iter != urls.end(); ++iter) { file_system.ClearChangeForURLInTracker(*iter); } // At this point the usage must be greater than the initial usage. int64 new_usage = -1; EXPECT_EQ(quota::kQuotaStatusOk, file_system.GetUsageAndQuota(&new_usage, &quota)); EXPECT_GT(new_usage, initial_usage); // Now let's apply remote deletion changes. FileChange change(FileChange::FILE_CHANGE_DELETE, SYNC_FILE_TYPE_FILE); EXPECT_EQ(SYNC_STATUS_OK, ApplyRemoteChange(file_system.file_system_context(), change, base::FilePath(), kFile, SYNC_FILE_TYPE_FILE)); // The implementation doesn't check file type for deletion, and it must be ok // even if we don't know if the deletion change was for a file or a directory. change = FileChange(FileChange::FILE_CHANGE_DELETE, SYNC_FILE_TYPE_UNKNOWN); EXPECT_EQ(SYNC_STATUS_OK, ApplyRemoteChange(file_system.file_system_context(), change, base::FilePath(), kDir, SYNC_FILE_TYPE_DIRECTORY)); // Check the directory/files are deleted successfully. EXPECT_EQ(base::File::FILE_ERROR_NOT_FOUND, file_system.FileExists(kFile)); EXPECT_EQ(base::File::FILE_ERROR_NOT_FOUND, file_system.DirectoryExists(kDir)); EXPECT_EQ(base::File::FILE_ERROR_NOT_FOUND, file_system.FileExists(kChild)); // The changes applied by ApplyRemoteChange should not be recorded in // the change tracker. urls.clear(); file_system.GetChangedURLsInTracker(&urls); EXPECT_TRUE(urls.empty()); // The quota usage data must have reflected the deletion. EXPECT_EQ(quota::kQuotaStatusOk, file_system.GetUsageAndQuota(&new_usage, &quota)); EXPECT_EQ(new_usage, initial_usage); sync_context_->ShutdownOnUIThread(); sync_context_ = NULL; file_system.TearDown(); } TEST_F(LocalFileSyncContextTest, ApplyRemoteChangeForDeletion_ForRoot) { CannedSyncableFileSystem file_system(GURL(kOrigin1), in_memory_env_.get(), io_task_runner_.get(), file_task_runner_.get()); file_system.SetUp(CannedSyncableFileSystem::QUOTA_ENABLED); sync_context_ = new LocalFileSyncContext( dir_.path(), in_memory_env_.get(), ui_task_runner_.get(), io_task_runner_.get()); ASSERT_EQ(SYNC_STATUS_OK, file_system.MaybeInitializeFileSystemContext(sync_context_.get())); ASSERT_EQ(base::File::FILE_OK, file_system.OpenFileSystem()); // Record the initial usage (likely 0). int64 initial_usage = -1; int64 quota = -1; EXPECT_EQ(quota::kQuotaStatusOk, file_system.GetUsageAndQuota(&initial_usage, &quota)); // Create a file and directory in the file_system. const FileSystemURL kFile(file_system.URL("file")); const FileSystemURL kDir(file_system.URL("dir")); const FileSystemURL kChild(file_system.URL("dir/child")); EXPECT_EQ(base::File::FILE_OK, file_system.CreateFile(kFile)); EXPECT_EQ(base::File::FILE_OK, file_system.CreateDirectory(kDir)); EXPECT_EQ(base::File::FILE_OK, file_system.CreateFile(kChild)); // At this point the usage must be greater than the initial usage. int64 new_usage = -1; EXPECT_EQ(quota::kQuotaStatusOk, file_system.GetUsageAndQuota(&new_usage, &quota)); EXPECT_GT(new_usage, initial_usage); const FileSystemURL kRoot(file_system.URL("")); // Now let's apply remote deletion changes for the root. FileChange change(FileChange::FILE_CHANGE_DELETE, SYNC_FILE_TYPE_DIRECTORY); EXPECT_EQ(SYNC_STATUS_OK, ApplyRemoteChange(file_system.file_system_context(), change, base::FilePath(), kRoot, SYNC_FILE_TYPE_DIRECTORY)); // Check the directory/files are deleted successfully. EXPECT_EQ(base::File::FILE_ERROR_NOT_FOUND, file_system.FileExists(kFile)); EXPECT_EQ(base::File::FILE_ERROR_NOT_FOUND, file_system.DirectoryExists(kDir)); EXPECT_EQ(base::File::FILE_ERROR_NOT_FOUND, file_system.FileExists(kChild)); // All changes made for the previous creation must have been also reset. FileSystemURLSet urls; file_system.GetChangedURLsInTracker(&urls); EXPECT_TRUE(urls.empty()); // The quota usage data must have reflected the deletion. EXPECT_EQ(quota::kQuotaStatusOk, file_system.GetUsageAndQuota(&new_usage, &quota)); EXPECT_EQ(new_usage, initial_usage); sync_context_->ShutdownOnUIThread(); sync_context_ = NULL; file_system.TearDown(); } TEST_F(LocalFileSyncContextTest, ApplyRemoteChangeForAddOrUpdate) { base::ScopedTempDir temp_dir; ASSERT_TRUE(temp_dir.CreateUniqueTempDir()); CannedSyncableFileSystem file_system(GURL(kOrigin1), in_memory_env_.get(), io_task_runner_.get(), file_task_runner_.get()); file_system.SetUp(CannedSyncableFileSystem::QUOTA_ENABLED); sync_context_ = new LocalFileSyncContext( dir_.path(), in_memory_env_.get(), ui_task_runner_.get(), io_task_runner_.get()); ASSERT_EQ(SYNC_STATUS_OK, file_system.MaybeInitializeFileSystemContext(sync_context_.get())); ASSERT_EQ(base::File::FILE_OK, file_system.OpenFileSystem()); const FileSystemURL kFile1(file_system.URL("file1")); const FileSystemURL kFile2(file_system.URL("file2")); const FileSystemURL kDir(file_system.URL("dir")); const char kTestFileData0[] = "0123456789"; const char kTestFileData1[] = "Lorem ipsum!"; const char kTestFileData2[] = "This is sample test data."; // Create kFile1 and populate it with kTestFileData0. EXPECT_EQ(base::File::FILE_OK, file_system.CreateFile(kFile1)); EXPECT_EQ(static_cast<int64>(arraysize(kTestFileData0) - 1), file_system.WriteString(kFile1, kTestFileData0)); // kFile2 and kDir are not there yet. EXPECT_EQ(base::File::FILE_ERROR_NOT_FOUND, file_system.FileExists(kFile2)); EXPECT_EQ(base::File::FILE_ERROR_NOT_FOUND, file_system.DirectoryExists(kDir)); // file_system's change tracker must have recorded the creation. FileSystemURLSet urls; file_system.GetChangedURLsInTracker(&urls); ASSERT_EQ(1U, urls.size()); EXPECT_TRUE(ContainsKey(urls, kFile1)); file_system.ClearChangeForURLInTracker(*urls.begin()); // Prepare temporary files which represent the remote file data. const base::FilePath kFilePath1(temp_dir.path().Append(FPL("file1"))); const base::FilePath kFilePath2(temp_dir.path().Append(FPL("file2"))); ASSERT_EQ(static_cast<int>(arraysize(kTestFileData1) - 1), base::WriteFile(kFilePath1, kTestFileData1, arraysize(kTestFileData1) - 1)); ASSERT_EQ(static_cast<int>(arraysize(kTestFileData2) - 1), base::WriteFile(kFilePath2, kTestFileData2, arraysize(kTestFileData2) - 1)); // Record the usage. int64 usage = -1, new_usage = -1; int64 quota = -1; EXPECT_EQ(quota::kQuotaStatusOk, file_system.GetUsageAndQuota(&usage, &quota)); // Here in the local filesystem we have: // * kFile1 with kTestFileData0 // // In the remote side let's assume we have: // * kFile1 with kTestFileData1 // * kFile2 with kTestFileData2 // * kDir // // By calling ApplyChange's: // * kFile1 will be updated to have kTestFileData1 // * kFile2 will be created // * kDir will be created // Apply the remote change to kFile1 (which will update the file). FileChange change(FileChange::FILE_CHANGE_ADD_OR_UPDATE, SYNC_FILE_TYPE_FILE); EXPECT_EQ(SYNC_STATUS_OK, ApplyRemoteChange(file_system.file_system_context(), change, kFilePath1, kFile1, SYNC_FILE_TYPE_FILE)); // Check if the usage has been increased by (kTestFileData1 - kTestFileData0). const int updated_size = arraysize(kTestFileData1) - arraysize(kTestFileData0); EXPECT_EQ(quota::kQuotaStatusOk, file_system.GetUsageAndQuota(&new_usage, &quota)); EXPECT_EQ(updated_size, new_usage - usage); // Apply remote changes to kFile2 and kDir (should create a file and // directory respectively). // They are non-existent yet so their expected file type (the last // parameter of ApplyRemoteChange) are // SYNC_FILE_TYPE_UNKNOWN. change = FileChange(FileChange::FILE_CHANGE_ADD_OR_UPDATE, SYNC_FILE_TYPE_FILE); EXPECT_EQ(SYNC_STATUS_OK, ApplyRemoteChange(file_system.file_system_context(), change, kFilePath2, kFile2, SYNC_FILE_TYPE_UNKNOWN)); change = FileChange(FileChange::FILE_CHANGE_ADD_OR_UPDATE, SYNC_FILE_TYPE_DIRECTORY); EXPECT_EQ(SYNC_STATUS_OK, ApplyRemoteChange(file_system.file_system_context(), change, base::FilePath(), kDir, SYNC_FILE_TYPE_UNKNOWN)); // Calling ApplyRemoteChange with different file type should be handled as // overwrite. change = FileChange(FileChange::FILE_CHANGE_ADD_OR_UPDATE, SYNC_FILE_TYPE_FILE); EXPECT_EQ(SYNC_STATUS_OK, ApplyRemoteChange(file_system.file_system_context(), change, kFilePath1, kDir, SYNC_FILE_TYPE_DIRECTORY)); EXPECT_EQ(base::File::FILE_OK, file_system.FileExists(kDir)); change = FileChange(FileChange::FILE_CHANGE_ADD_OR_UPDATE, SYNC_FILE_TYPE_DIRECTORY); EXPECT_EQ(SYNC_STATUS_OK, ApplyRemoteChange(file_system.file_system_context(), change, kFilePath1, kDir, SYNC_FILE_TYPE_FILE)); // Creating a file/directory must have increased the usage more than // the size of kTestFileData2. new_usage = usage; EXPECT_EQ(quota::kQuotaStatusOk, file_system.GetUsageAndQuota(&new_usage, &quota)); EXPECT_GT(new_usage, static_cast<int64>(usage + arraysize(kTestFileData2) - 1)); // The changes applied by ApplyRemoteChange should not be recorded in // the change tracker. urls.clear(); file_system.GetChangedURLsInTracker(&urls); EXPECT_TRUE(urls.empty()); // Make sure all three files/directory exist. EXPECT_EQ(base::File::FILE_OK, file_system.FileExists(kFile1)); EXPECT_EQ(base::File::FILE_OK, file_system.FileExists(kFile2)); EXPECT_EQ(base::File::FILE_OK, file_system.DirectoryExists(kDir)); sync_context_->ShutdownOnUIThread(); file_system.TearDown(); } TEST_F(LocalFileSyncContextTest, ApplyRemoteChangeForAddOrUpdate_NoParent) { base::ScopedTempDir temp_dir; ASSERT_TRUE(temp_dir.CreateUniqueTempDir()); CannedSyncableFileSystem file_system(GURL(kOrigin1), in_memory_env_.get(), io_task_runner_.get(), file_task_runner_.get()); file_system.SetUp(CannedSyncableFileSystem::QUOTA_ENABLED); sync_context_ = new LocalFileSyncContext( dir_.path(), in_memory_env_.get(), ui_task_runner_.get(), io_task_runner_.get()); ASSERT_EQ(SYNC_STATUS_OK, file_system.MaybeInitializeFileSystemContext(sync_context_.get())); ASSERT_EQ(base::File::FILE_OK, file_system.OpenFileSystem()); const char kTestFileData[] = "Lorem ipsum!"; const FileSystemURL kDir(file_system.URL("dir")); const FileSystemURL kFile(file_system.URL("dir/file")); // Either kDir or kFile not exist yet. EXPECT_EQ(base::File::FILE_ERROR_NOT_FOUND, file_system.FileExists(kDir)); EXPECT_EQ(base::File::FILE_ERROR_NOT_FOUND, file_system.FileExists(kFile)); // Prepare a temporary file which represents remote file data. const base::FilePath kFilePath(temp_dir.path().Append(FPL("file"))); ASSERT_EQ(static_cast<int>(arraysize(kTestFileData) - 1), base::WriteFile(kFilePath, kTestFileData, arraysize(kTestFileData) - 1)); // Calling ApplyChange's with kFilePath should create // kFile along with kDir. FileChange change(FileChange::FILE_CHANGE_ADD_OR_UPDATE, SYNC_FILE_TYPE_FILE); EXPECT_EQ(SYNC_STATUS_OK, ApplyRemoteChange(file_system.file_system_context(), change, kFilePath, kFile, SYNC_FILE_TYPE_UNKNOWN)); // The changes applied by ApplyRemoteChange should not be recorded in // the change tracker. FileSystemURLSet urls; urls.clear(); file_system.GetChangedURLsInTracker(&urls); EXPECT_TRUE(urls.empty()); // Make sure kDir and kFile are created by ApplyRemoteChange. EXPECT_EQ(base::File::FILE_OK, file_system.FileExists(kFile)); EXPECT_EQ(base::File::FILE_OK, file_system.DirectoryExists(kDir)); sync_context_->ShutdownOnUIThread(); file_system.TearDown(); } } // namespace sync_file_system<|fim▁end|>
const FileSystemURL& url, SyncStatusCode status) {
<|file_name|>Operations.java<|end_file_name|><|fim▁begin|>package com.amaze.filemanager.filesystem; import android.content.Context; import android.os.AsyncTask; import android.os.Build; import android.support.annotation.NonNull; import android.support.v4.provider.DocumentFile; import com.amaze.filemanager.exceptions.RootNotPermittedException; import com.amaze.filemanager.utils.DataUtils; import com.amaze.filemanager.utils.cloud.CloudUtil; import com.amaze.filemanager.utils.Logger; import com.amaze.filemanager.utils.MainActivityHelper; import com.amaze.filemanager.utils.OTGUtil; import com.amaze.filemanager.utils.OpenMode; import com.amaze.filemanager.utils.RootUtils; import com.cloudrail.si.interfaces.CloudStorage; import java.io.ByteArrayInputStream; import java.io.File; import java.io.IOException; import java.io.OutputStream; import java.net.MalformedURLException; import jcifs.smb.SmbException; import jcifs.smb.SmbFile; /** * Created by arpitkh996 on 13-01-2016, modified by Emmanuel Messulam<emmanuelbendavid@gmail.com> */ public class Operations { // reserved characters by OS, shall not be allowed in file names private static final String FOREWARD_SLASH = "/"; private static final String BACKWARD_SLASH = "\\"; private static final String COLON = ":"; private static final String ASTERISK = "*"; private static final String QUESTION_MARK = "?"; private static final String QUOTE = "\""; private static final String GREATER_THAN = ">"; private static final String LESS_THAN = "<"; private static final String FAT = "FAT"; private DataUtils dataUtils = DataUtils.getInstance(); public interface ErrorCallBack { /** * Callback fired when file being created in process already exists * * @param file */ void exists(HFile file); /** * Callback fired when creating new file/directory and required storage access framework permission * to access SD Card is not available * * @param file */ void launchSAF(HFile file); /** * Callback fired when renaming file and required storage access framework permission to access * SD Card is not available * * @param file * @param file1 */ void launchSAF(HFile file, HFile file1); /** * Callback fired when we're done processing the operation * * @param hFile * @param b defines whether operation was successful */ void done(HFile hFile, boolean b); /** * Callback fired when an invalid file name is found. * * @param file */ void invalidName(HFile file); } public static void mkdir(@NonNull final HFile file, final Context context, final boolean rootMode, @NonNull final ErrorCallBack errorCallBack) { new AsyncTask<Void, Void, Void>() { private DataUtils dataUtils = DataUtils.getInstance(); @Override protected Void doInBackground(Void... params) { // checking whether filename is valid or a recursive call possible if (MainActivityHelper.isNewDirectoryRecursive(file) || !Operations.isFileNameValid(file.getName(context))) { errorCallBack.invalidName(file); return null; } if (file.exists()) { errorCallBack.exists(file); return null; } if (file.isSmb()) { try { file.getSmbFile(2000).mkdirs(); } catch (SmbException e) { Logger.log(e, file.getPath(), context); errorCallBack.done(file, false); return null; } errorCallBack.done(file, file.exists()); return null; } else if (file.isOtgFile()) { // first check whether new directory already exists DocumentFile directoryToCreate = OTGUtil.getDocumentFile(file.getPath(), context, false); if (directoryToCreate != null) errorCallBack.exists(file); DocumentFile parentDirectory = OTGUtil.getDocumentFile(file.getParent(), context, false); if (parentDirectory.isDirectory()) { parentDirectory.createDirectory(file.getName(context)); errorCallBack.done(file, true); } else errorCallBack.done(file, false); return null; } else if (file.isDropBoxFile()) { CloudStorage cloudStorageDropbox = dataUtils.getAccount(OpenMode.DROPBOX); try { cloudStorageDropbox.createFolder(CloudUtil.stripPath(OpenMode.DROPBOX, file.getPath())); errorCallBack.done(file, true); } catch (Exception e) { e.printStackTrace(); errorCallBack.done(file, false); } } else if (file.isBoxFile()) { CloudStorage cloudStorageBox = dataUtils.getAccount(OpenMode.BOX); try { cloudStorageBox.createFolder(CloudUtil.stripPath(OpenMode.BOX, file.getPath())); errorCallBack.done(file, true); } catch (Exception e) { e.printStackTrace(); errorCallBack.done(file, false); } } else if (file.isOneDriveFile()) { CloudStorage cloudStorageOneDrive = dataUtils.getAccount(OpenMode.ONEDRIVE); try { cloudStorageOneDrive.createFolder(CloudUtil.stripPath(OpenMode.ONEDRIVE, file.getPath())); errorCallBack.done(file, true); } catch (Exception e) { e.printStackTrace(); errorCallBack.done(file, false); } } else if (file.isGoogleDriveFile()) { CloudStorage cloudStorageGdrive = dataUtils.getAccount(OpenMode.GDRIVE); try { cloudStorageGdrive.createFolder(CloudUtil.stripPath(OpenMode.GDRIVE, file.getPath())); errorCallBack.done(file, true); } catch (Exception e) { e.printStackTrace(); errorCallBack.done(file, false); } } else { if (file.isLocal() || file.isRoot()) { int mode = checkFolder(new File(file.getParent()), context); if (mode == 2) { errorCallBack.launchSAF(file); return null; } if (mode == 1 || mode == 0) FileUtil.mkdir(file.getFile(), context); if (!file.exists() && rootMode) { file.setMode(OpenMode.ROOT); if (file.exists()) errorCallBack.exists(file); try { RootUtils.mkDir(file.getParent(context), file.getName(context)); } catch (RootNotPermittedException e) { Logger.log(e, file.getPath(), context); } errorCallBack.done(file, file.exists()); return null; } errorCallBack.done(file, file.exists()); return null; } errorCallBack.done(file, file.exists()); } return null; } }.executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR); } public static void mkfile(@NonNull final HFile file, final Context context, final boolean rootMode, @NonNull final ErrorCallBack errorCallBack) { new AsyncTask<Void, Void, Void>() { private DataUtils dataUtils = DataUtils.getInstance(); @Override protected Void doInBackground(Void... params) { // check whether filename is valid or not if (!Operations.isFileNameValid(file.getName(context))) { errorCallBack.invalidName(file); return null; } if (file.exists()) { errorCallBack.exists(file); return null; } if (file.isSmb()) { try { file.getSmbFile(2000).createNewFile(); } catch (SmbException e) { Logger.log(e, file.getPath(), context); errorCallBack.done(file, false); return null; } errorCallBack.done(file, file.exists()); return null; } else if (file.isDropBoxFile()) { CloudStorage cloudStorageDropbox = dataUtils.getAccount(OpenMode.DROPBOX); try { byte[] tempBytes = new byte[0]; ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(tempBytes); cloudStorageDropbox.upload(CloudUtil.stripPath(OpenMode.DROPBOX, file.getPath()), byteArrayInputStream, 0l, true); errorCallBack.done(file, true); } catch (Exception e) { e.printStackTrace(); errorCallBack.done(file, false); } } else if (file.isBoxFile()) { CloudStorage cloudStorageBox = dataUtils.getAccount(OpenMode.BOX); try { byte[] tempBytes = new byte[0]; ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(tempBytes); cloudStorageBox.upload(CloudUtil.stripPath(OpenMode.BOX, file.getPath()), byteArrayInputStream, 0l, true); errorCallBack.done(file, true); } catch (Exception e) { e.printStackTrace(); errorCallBack.done(file, false); } } else if (file.isOneDriveFile()) { CloudStorage cloudStorageOneDrive = dataUtils.getAccount(OpenMode.ONEDRIVE); try { byte[] tempBytes = new byte[0]; ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(tempBytes); cloudStorageOneDrive.upload(CloudUtil.stripPath(OpenMode.ONEDRIVE, file.getPath()), byteArrayInputStream, 0l, true); errorCallBack.done(file, true); } catch (Exception e) { e.printStackTrace(); errorCallBack.done(file, false); } } else if (file.isGoogleDriveFile()) { CloudStorage cloudStorageGdrive = dataUtils.getAccount(OpenMode.GDRIVE); try { byte[] tempBytes = new byte[0]; ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(tempBytes);<|fim▁hole|> byteArrayInputStream, 0l, true); errorCallBack.done(file, true); } catch (Exception e) { e.printStackTrace(); errorCallBack.done(file, false); } } else if (file.isOtgFile()) { // first check whether new file already exists DocumentFile fileToCreate = OTGUtil.getDocumentFile(file.getPath(), context, false); if (fileToCreate != null) errorCallBack.exists(file); DocumentFile parentDirectory = OTGUtil.getDocumentFile(file.getParent(), context, false); if (parentDirectory.isDirectory()) { parentDirectory.createFile(file.getName(context).substring(file.getName().lastIndexOf(".")), file.getName(context)); errorCallBack.done(file, true); } else errorCallBack.done(file, false); return null; } else { if (file.isLocal() || file.isRoot()) { int mode = checkFolder(new File(file.getParent()), context); if (mode == 2) { errorCallBack.launchSAF(file); return null; } if (mode == 1 || mode == 0) try { FileUtil.mkfile(file.getFile(), context); } catch (IOException e) { } if (!file.exists() && rootMode) { file.setMode(OpenMode.ROOT); if (file.exists()) errorCallBack.exists(file); try { RootUtils.mkFile(file.getPath()); } catch (RootNotPermittedException e) { Logger.log(e, file.getPath(), context); } errorCallBack.done(file, file.exists()); return null; } errorCallBack.done(file, file.exists()); return null; } errorCallBack.done(file, file.exists()); } return null; } }.executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR); } public static void rename(final HFile oldFile, final HFile newFile, final boolean rootMode, final Context context, final ErrorCallBack errorCallBack) { new AsyncTask<Void, Void, Void>() { private DataUtils dataUtils = DataUtils.getInstance(); @Override protected Void doInBackground(Void... params) { // check whether file names for new file are valid or recursion occurs if (MainActivityHelper.isNewDirectoryRecursive(newFile) || !Operations.isFileNameValid(newFile.getName(context))) { errorCallBack.invalidName(newFile); return null; } if (newFile.exists()) { errorCallBack.exists(newFile); return null; } if (oldFile.isSmb()) { try { SmbFile smbFile = new SmbFile(oldFile.getPath()); SmbFile smbFile1 = new SmbFile(newFile.getPath()); if (smbFile1.exists()) { errorCallBack.exists(newFile); return null; } smbFile.renameTo(smbFile1); if (!smbFile.exists() && smbFile1.exists()) errorCallBack.done(newFile, true); } catch (MalformedURLException e) { e.printStackTrace(); } catch (SmbException e) { e.printStackTrace(); } return null; } else if (oldFile.isDropBoxFile()) { CloudStorage cloudStorageDropbox = dataUtils.getAccount(OpenMode.DROPBOX); try { cloudStorageDropbox.move(CloudUtil.stripPath(OpenMode.DROPBOX, oldFile.getPath()), CloudUtil.stripPath(OpenMode.DROPBOX, newFile.getPath())); errorCallBack.done(newFile, true); } catch (Exception e) { e.printStackTrace(); errorCallBack.done(newFile, false); } } else if (oldFile.isBoxFile()) { CloudStorage cloudStorageBox = dataUtils.getAccount(OpenMode.BOX); try { cloudStorageBox.move(CloudUtil.stripPath(OpenMode.BOX, oldFile.getPath()), CloudUtil.stripPath(OpenMode.BOX, newFile.getPath())); errorCallBack.done(newFile, true); } catch (Exception e) { e.printStackTrace(); errorCallBack.done(newFile, false); } } else if (oldFile.isOneDriveFile()) { CloudStorage cloudStorageOneDrive = dataUtils.getAccount(OpenMode.ONEDRIVE); try { cloudStorageOneDrive.move(CloudUtil.stripPath(OpenMode.ONEDRIVE, oldFile.getPath()), CloudUtil.stripPath(OpenMode.ONEDRIVE, newFile.getPath())); errorCallBack.done(newFile, true); } catch (Exception e) { e.printStackTrace(); errorCallBack.done(newFile, false); } } else if (oldFile.isGoogleDriveFile()) { CloudStorage cloudStorageGdrive = dataUtils.getAccount(OpenMode.GDRIVE); try { cloudStorageGdrive.move(CloudUtil.stripPath(OpenMode.GDRIVE, oldFile.getPath()), CloudUtil.stripPath(OpenMode.GDRIVE, newFile.getPath())); errorCallBack.done(newFile, true); } catch (Exception e) { e.printStackTrace(); errorCallBack.done(newFile, false); } } else if (oldFile.isOtgFile()) { DocumentFile oldDocumentFile = OTGUtil.getDocumentFile(oldFile.getPath(), context, false); DocumentFile newDocumentFile = OTGUtil.getDocumentFile(newFile.getPath(), context, false); if (newDocumentFile != null) { errorCallBack.exists(newFile); return null; } errorCallBack.done(newFile, oldDocumentFile.renameTo(newFile.getName(context))); return null; } else { File file = new File(oldFile.getPath()); File file1 = new File(newFile.getPath()); switch (oldFile.getMode()) { case FILE: int mode = checkFolder(file.getParentFile(), context); if (mode == 2) { errorCallBack.launchSAF(oldFile, newFile); } else if (mode == 1 || mode == 0) { try { FileUtil.renameFolder(file, file1, context); } catch (RootNotPermittedException e) { e.printStackTrace(); } boolean a = !file.exists() && file1.exists(); if (!a && rootMode) { try { RootUtils.rename(file.getPath(), file1.getPath()); } catch (Exception e) { Logger.log(e, oldFile.getPath() + "\n" + newFile.getPath(), context); } oldFile.setMode(OpenMode.ROOT); newFile.setMode(OpenMode.ROOT); a = !file.exists() && file1.exists(); } errorCallBack.done(newFile, a); return null; } break; case ROOT: try { RootUtils.rename(file.getPath(), file1.getPath()); } catch (Exception e) { Logger.log(e, oldFile.getPath() + "\n" + newFile.getPath(), context); } newFile.setMode(OpenMode.ROOT); errorCallBack.done(newFile, true); break; } } return null; } }.executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR); } private static int checkFolder(final File folder, Context context) { boolean lol = Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP; if (lol) { boolean ext = FileUtil.isOnExtSdCard(folder, context); if (ext) { if (!folder.exists() || !folder.isDirectory()) { return 0; } // On Android 5, trigger storage access framework. if (!FileUtil.isWritableNormalOrSaf(folder, context)) { return 2; } return 1; } } else if (Build.VERSION.SDK_INT == 19) { // Assume that Kitkat workaround works if (FileUtil.isOnExtSdCard(folder, context)) return 1; } // file not on external sd card if (FileUtil.isWritable(new File(folder, "DummyFile"))) { return 1; } else { return 0; } } /** * Well, we wouldn't want to copy when the target is inside the source * otherwise it'll end into a loop * * @param sourceFile * @param targetFile * @return true when copy loop is possible */ public static boolean isCopyLoopPossible(BaseFile sourceFile, HFile targetFile) { return targetFile.getPath().contains(sourceFile.getPath()); } /** * Validates file name * special reserved characters shall not be allowed in the file names on FAT filesystems * * @param fileName the filename, not the full path! * @return boolean if the file name is valid or invalid */ public static boolean isFileNameValid(String fileName) { //String fileName = builder.substring(builder.lastIndexOf("/")+1, builder.length()); // TODO: check file name validation only for FAT filesystems return !(fileName.contains(ASTERISK) || fileName.contains(BACKWARD_SLASH) || fileName.contains(COLON) || fileName.contains(FOREWARD_SLASH) || fileName.contains(GREATER_THAN) || fileName.contains(LESS_THAN) || fileName.contains(QUESTION_MARK) || fileName.contains(QUOTE)); } private static boolean isFileSystemFAT(String mountPoint) { String[] args = new String[]{"/bin/bash", "-c", "df -DO_NOT_REPLACE | awk '{print $1,$2,$NF}' | grep \"^" + mountPoint + "\""}; try { Process proc = new ProcessBuilder(args).start(); OutputStream outputStream = proc.getOutputStream(); String buffer = null; outputStream.write(buffer.getBytes()); return buffer != null && buffer.contains(FAT); } catch (IOException e) { e.printStackTrace(); // process interrupted, returning true, as a word of cation return true; } } }<|fim▁end|>
cloudStorageGdrive.upload(CloudUtil.stripPath(OpenMode.GDRIVE, file.getPath()),
<|file_name|>main.module.ts<|end_file_name|><|fim▁begin|>import * as akala from '@akala/core' import * as ac from '@akala/commands' import * as web from '@domojs/theme-default' const deviceMetaContainer: ac.Metadata.Container = require('../../device-commands.json') const deviceTypeMetaContainer: ac.Metadata.Container = require('../../devicetype-commands.json') <|fim▁hole|> return resolveUrl('/api/devices/' + url); }) mdule.register('container', ac.proxy(deviceMetaContainer, new ac.Processors.HttpClient(mdule as any))); web.bootstrap.addDependency(mdule); export default mdule; export const mdule2 = akala.module('@domojs/devices/type', '@domojs/theme-default'); mdule2.register('$resolveUrl', (url: string) => { return resolveUrl('/api/devices/types/' + url); }) mdule2.register('container', ac.proxy(deviceTypeMetaContainer, new ac.Processors.HttpClient(mdule2 as any))); web.bootstrap.addDependency(mdule2);<|fim▁end|>
export const mdule = akala.module('@domojs/devices', '@domojs/devices/type'); const resolveUrl = mdule.resolve('$resolveUrl'); mdule.register('$resolveUrl', (url: string) => {
<|file_name|>rd2wgs84_test.go<|end_file_name|><|fim▁begin|>package rd2wgs84 import ( "testing" ) var parseTests = []struct {<|fim▁hole|>} func TestConvert(t *testing.T) { for i, tt := range parseTests { wgs := Convert(tt.in.X, tt.in.Y) if wgs.Latitude != tt.out.Latitude || wgs.Longitude != tt.out.Longitude { t.Errorf("%d. Convert(%f, %f) => %+v returned, expected %+v", i, tt.in.X, tt.in.Y, wgs, tt.out) } } }<|fim▁end|>
in RD out *WGS84 }{ {RD{163835.370083, 446830.763585}, &WGS84{52.00977421758342, 5.515894213047998}},
<|file_name|>logistic.py<|end_file_name|><|fim▁begin|>""" Logistic Regression """ # Author: Gael Varoquaux <gael.varoquaux@normalesup.org> # Fabian Pedregosa <f@bianp.net> # Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Manoj Kumar <manojkumarsivaraj334@gmail.com> import numbers import warnings import numpy as np from scipy import optimize, sparse from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator from ..feature_selection.from_model import _LearntSelectorMixin from ..preprocessing import LabelEncoder from ..svm.base import BaseLibLinear from ..utils import check_array, check_consistent_length, compute_class_weight from ..utils.extmath import log_logistic, safe_sparse_dot from ..utils.optimize import newton_cg from ..utils.validation import as_float_array, DataConversionWarning from ..utils.fixes import expit from ..externals.joblib import Parallel, delayed from ..cross_validation import _check_cv from ..externals import six from ..metrics import SCORERS # .. some helper functions for logistic_regression_path .. def _intercept_dot(w, X, y): """Computes y * np.dot(X, w). It takes into consideration if the intercept should be fit or not. Parameters ---------- w : ndarray, shape (n_features,) or (n_features + 1,) Coefficient vector. X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : ndarray, shape (n_samples,) Array of labels. """ c = 0. if w.size == X.shape[1] + 1: c = w[-1] w = w[:-1] z = safe_sparse_dot(X, w) + c return w, c, y * z def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None): """Computes the logistic loss and gradient. Parameters ---------- w : ndarray, shape (n_features,) or (n_features + 1,) Coefficient vector. X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : ndarray, shape (n_samples,) Array of labels. alpha : float Regularization parameter. alpha is equal to 1 / C. sample_weight : ndarray, shape (n_samples,) optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. Returns ------- out : float Logistic loss. grad : ndarray, shape (n_features,) or (n_features + 1,) Logistic gradient. """ _, n_features = X.shape grad = np.empty_like(w) w, c, yz = _intercept_dot(w, X, y) if sample_weight is None: sample_weight = np.ones(y.shape[0]) # Logistic loss is the negative of the log of the logistic function. out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w) z = expit(yz) z0 = sample_weight * (z - 1) * y grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w # Case where we fit the intercept. if grad.shape[0] > n_features: grad[-1] = z0.sum() return out, grad def _logistic_loss(w, X, y, alpha, sample_weight=None): """Computes the logistic loss. Parameters ---------- w : ndarray, shape (n_features,) or (n_features + 1,) Coefficient vector. X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : ndarray, shape (n_samples,) Array of labels. alpha : float Regularization parameter. alpha is equal to 1 / C. sample_weight : ndarray, shape (n_samples,) optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. Returns ------- out : float Logistic loss. """ w, c, yz = _intercept_dot(w, X, y) if sample_weight is None: sample_weight = np.ones(y.shape[0]) # Logistic loss is the negative of the log of the logistic function. out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w) return out def _logistic_loss_grad_hess(w, X, y, alpha, sample_weight=None): """Computes the logistic loss, gradient and the Hessian. Parameters ---------- w : ndarray, shape (n_features,) or (n_features + 1,) Coefficient vector. X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : ndarray, shape (n_samples,) Array of labels. alpha : float Regularization parameter. alpha is equal to 1 / C. sample_weight : ndarray, shape (n_samples,) optional Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. Returns ------- out : float Logistic loss. grad : ndarray, shape (n_features,) or (n_features + 1,) Logistic gradient. Hs : callable Function that takes the gradient as a parameter and returns the matrix product of the Hessian and gradient. """ n_samples, n_features = X.shape grad = np.empty_like(w) fit_intercept = grad.shape[0] > n_features w, c, yz = _intercept_dot(w, X, y) if sample_weight is None: sample_weight = np.ones(y.shape[0]) # Logistic loss is the negative of the log of the logistic function. out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w) z = expit(yz) z0 = sample_weight * (z - 1) * y grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w # Case where we fit the intercept. if fit_intercept: grad[-1] = z0.sum() # The mat-vec product of the Hessian d = sample_weight * z * (1 - z) if sparse.issparse(X): dX = safe_sparse_dot(sparse.dia_matrix((d, 0), shape=(n_samples, n_samples)), X) else: # Precompute as much as possible dX = d[:, np.newaxis] * X if fit_intercept: # Calculate the double derivative with respect to intercept # In the case of sparse matrices this returns a matrix object. dd_intercept = np.squeeze(np.array(dX.sum(axis=0))) def Hs(s): ret = np.empty_like(s) ret[:n_features] = X.T.dot(dX.dot(s[:n_features])) ret[:n_features] += alpha * s[:n_features] # For the fit intercept case. if fit_intercept: ret[:n_features] += s[-1] * dd_intercept ret[-1] = dd_intercept.dot(s[:n_features]) ret[-1] += d.sum() * s[-1] return ret return out, grad, Hs def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True, max_iter=100, tol=1e-4, verbose=0, solver='lbfgs', coef=None, copy=True, class_weight=None, dual=False, penalty='l2', intercept_scaling=1.): """Compute a Logistic Regression model for a list of regularization parameters. This is an implementation that uses the result of the previous model to speed up computations along the set of solutions, making it faster than sequentially calling LogisticRegression for the different parameters. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data. y : array-like, shape (n_samples,) Input data, target values. Cs : int | array-like, shape (n_cs,) List of values for the regularization parameter or integer specifying the number of regularization parameters that should be used. In this case, the parameters will be chosen in a logarithmic scale between 1e-4 and 1e4. pos_class : int, None The class with respect to which we perform a one-vs-all fit. If None, then it is assumed that the given problem is binary. fit_intercept : bool Whether to fit an intercept for the model. In this case the shape of the returned array is (n_cs, n_features + 1). max_iter : int Maximum number of iterations for the solver. tol : float Stopping criterion. For the newton-cg and lbfgs solvers, the iteration will stop when ``max{|g_i | i = 1, ..., n} <= tol`` where ``g_i`` is the i-th component of the gradient. verbose : int Print convergence message if True. solver : {'lbfgs', 'newton-cg', 'liblinear'} Numerical solver to use. coef : array-like, shape (n_features,), default None Initialization value for coefficients of logistic regression. copy : bool, default True Whether or not to produce a copy of the data. Setting this to True will be useful in cases, when logistic_regression_path is called repeatedly with the same data, as y is modified along the path. class_weight : {dict, 'auto'}, optional Over-/undersamples the samples of each class according to the given weights. If not given, all classes are supposed to have weight one. The 'auto' mode selects weights inversely proportional to class frequencies in the training set. dual : bool Dual or primal formulation. Dual formulation is only implemented for l2 penalty with liblinear solver. Prefer dual=False when n_samples > n_features. penalty : str, 'l1' or 'l2' Used to specify the norm used in the penalization. The newton-cg and lbfgs solvers support only l2 penalties. intercept_scaling : float, default 1. This parameter is useful only when the solver 'liblinear' is used and self.fit_intercept is set to True. In this case, x becomes [x, self.intercept_scaling], i.e. a "synthetic" feature with constant value equals to intercept_scaling is appended to the instance vector. The intercept becomes intercept_scaling * synthetic feature weight Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. Returns ------- coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1) List of coefficients for the Logistic Regression model. If fit_intercept is set to True then the second dimension will be n_features + 1, where the last item represents the intercept. Cs : ndarray Grid of Cs used for cross-validation. Notes ----- You might get slighly different results with the solver liblinear than with the others since this uses LIBLINEAR which penalizes the intercept. """ if isinstance(Cs, numbers.Integral): Cs = np.logspace(-4, 4, Cs) X = check_array(X, accept_sparse='csc', dtype=np.float64) y = check_array(y, ensure_2d=False, copy=copy) check_consistent_length(X, y) n_classes = np.unique(y) if pos_class is None: if (n_classes.size > 2): raise ValueError('To fit OvA, use the pos_class argument') # np.unique(y) gives labels in sorted order. pos_class = n_classes[1] # If class_weights is a dict (provided by the user), the weights # are assigned to the original labels. If it is "auto", then # the class_weights are assigned after masking the labels with a OvA. sample_weight = np.ones(X.shape[0]) le = LabelEncoder() if isinstance(class_weight, dict): if solver == "liblinear": if n_classes.size == 2: # Reconstruct the weights with keys 1 and -1 temp = {} temp[1] = class_weight[pos_class] temp[-1] = class_weight[n_classes[0]] class_weight = temp.copy() else: raise ValueError("In LogisticRegressionCV the liblinear " "solver cannot handle multiclass with " "class_weight of type dict. Use the lbfgs, " "newton-cg solvers or set " "class_weight='auto'") else: class_weight_ = compute_class_weight(class_weight, n_classes, y) sample_weight = class_weight_[le.fit_transform(y)] mask = (y == pos_class) y[mask] = 1 y[~mask] = -1 # To take care of object dtypes y = as_float_array(y, copy=False) if class_weight == "auto": class_weight_ = compute_class_weight(class_weight, [-1, 1], y) sample_weight = class_weight_[le.fit_transform(y)] if fit_intercept: w0 = np.zeros(X.shape[1] + 1) else: w0 = np.zeros(X.shape[1]) if coef is not None: # it must work both giving the bias term and not if not coef.size in (X.shape[1], w0.size): raise ValueError('Initialization coef is not of correct shape') w0[:coef.size] = coef coefs = list() for C in Cs: if solver == 'lbfgs': func = _logistic_loss_and_grad try: out = optimize.fmin_l_bfgs_b( func, w0, fprime=None, args=(X, y, 1. / C, sample_weight), iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter) except TypeError: # old scipy doesn't have maxiter out = optimize.fmin_l_bfgs_b( func, w0, fprime=None, args=(X, y, 1. / C, sample_weight), iprint=(verbose > 0) - 1, pgtol=tol) w0 = out[0] if out[2]["warnflag"] == 1: warnings.warn("lbfgs failed to converge. Increase the number " "of iterations.") elif solver == 'newton-cg': grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1] w0 = newton_cg(_logistic_loss_grad_hess, _logistic_loss, grad, w0, args=(X, y, 1. / C, sample_weight), maxiter=max_iter, tol=tol) elif solver == 'liblinear': lr = LogisticRegression(C=C, fit_intercept=fit_intercept, tol=tol, class_weight=class_weight, dual=dual, penalty=penalty, intercept_scaling=intercept_scaling) lr.fit(X, y) if fit_intercept: w0 = np.concatenate([lr.coef_.ravel(), lr.intercept_]) else: w0 = lr.coef_.ravel() else: raise ValueError("solver must be one of {'liblinear', 'lbfgs', " "'newton-cg'}, got '%s' instead" % solver) coefs.append(w0) return coefs, np.array(Cs) # helper function for LogisticCV def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10, scoring=None, fit_intercept=False, max_iter=100, tol=1e-4, class_weight=None, verbose=0, solver='lbfgs', penalty='l2', dual=False, copy=True, intercept_scaling=1.): """Computes scores across logistic_regression_path Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target labels. train : list of indices The indices of the train set. test : list of indices The indices of the test set. pos_class : int, None The class with respect to which we perform a one-vs-all fit. If None, then it is assumed that the given problem is binary. Cs : list of floats | int Each of the values in Cs describes the inverse of regularization strength. If Cs is as an int, then a grid of Cs values are chosen in a logarithmic scale between 1e-4 and 1e4. If not provided, then a fixed set of values for Cs are used. scoring : callable For a list of scoring functions that can be used, look at :mod:`sklearn.metrics`. The default scoring option used is accuracy_score. fit_intercept : bool If False, then the bias term is set to zero. Else the last term of each coef_ gives us the intercept. max_iter : int Maximum number of iterations for the solver. tol : float Tolerance for stopping criteria. class_weight : {dict, 'auto'}, optional Over-/undersamples the samples of each class according to the given weights. If not given, all classes are supposed to have weight one. The 'auto' mode selects weights inversely proportional to class frequencies in the training set. verbose : int Amount of verbosity. solver : {'lbfgs', 'newton-cg', 'liblinear'} Decides which solver to use. penalty : str, 'l1' or 'l2' Used to specify the norm used in the penalization. The newton-cg and lbfgs solvers support only l2 penalties. dual : bool Dual or primal formulation. Dual formulation is only implemented for l2 penalty with liblinear solver. Prefer dual=False when n_samples > n_features. intercept_scaling : float, default 1. This parameter is useful only when the solver 'liblinear' is used and self.fit_intercept is set to True. In this case, x becomes [x, self.intercept_scaling], i.e. a "synthetic" feature with constant value equals to intercept_scaling is appended to the instance vector. The intercept becomes intercept_scaling * synthetic feature weight Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. Returns ------- coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1) List of coefficients for the Logistic Regression model. If fit_intercept is set to True then the second dimension will be n_features + 1, where the last item represents the intercept. Cs : ndarray Grid of Cs used for cross-validation. scores : ndarray, shape (n_cs,) Scores obtained for each Cs. """ log_reg = LogisticRegression(fit_intercept=fit_intercept) log_reg._enc = LabelEncoder() log_reg._enc.fit_transform([-1, 1]) X_train = X[train] X_test = X[test] y_train = y[train] y_test = y[test] if pos_class is not None: mask = (y_test == pos_class) y_test[mask] = 1 y_test[~mask] = -1 # To deal with object dtypes, we need to convert into an array of floats. y_test = as_float_array(y_test, copy=False) coefs, Cs = logistic_regression_path(X_train, y_train, Cs=Cs, fit_intercept=fit_intercept, solver=solver, max_iter=max_iter, class_weight=class_weight, copy=copy, pos_class=pos_class, tol=tol, verbose=verbose, dual=dual, penalty=penalty, intercept_scaling=intercept_scaling) scores = list() if isinstance(scoring, six.string_types): scoring = SCORERS[scoring] for w in coefs: if fit_intercept: log_reg.coef_ = w[np.newaxis, :-1] log_reg.intercept_ = w[-1] else: log_reg.coef_ = w[np.newaxis, :] log_reg.intercept_ = 0. if scoring is None: scores.append(log_reg.score(X_test, y_test)) else: scores.append(scoring(log_reg, X_test, y_test)) return coefs, Cs, np.array(scores) class LogisticRegression(BaseLibLinear, LinearClassifierMixin, _LearntSelectorMixin, SparseCoefMixin): """Logistic Regression (aka logit, MaxEnt) classifier. In the multiclass case, the training algorithm uses a one-vs.-all (OvA) scheme, rather than the "true" multinomial LR. This class implements regularized logistic regression using the `liblinear` library, newton-cg and lbfgs solvers. It can handle both dense and sparse input. Use C-ordered arrays or CSR matrices containing 64-bit floats for optimal performance; any other input format will be converted (and copied). The newton-cg and lbfgs solvers support only L2 regularization with primal formulation. The liblinear solver supports both L1 and L2 regularization, with a dual formulation only for the L2 penalty. Parameters ---------- penalty : str, 'l1' or 'l2' Used to specify the norm used in the penalization. The newton-cg and lbfgs solvers support only l2 penalties. dual : bool Dual or primal formulation. Dual formulation is only implemented for l2 penalty with liblinear solver. Prefer dual=False when n_samples > n_features. C : float, optional (default=1.0) Inverse of regularization strength; must be a positive float. Like in support vector machines, smaller values specify stronger regularization. fit_intercept : bool, default: True Specifies if a constant (a.k.a. bias or intercept) should be added the decision function. intercept_scaling : float, default: 1 when self.fit_intercept is True, instance vector x becomes [x, self.intercept_scaling], i.e. a "synthetic" feature with constant value equals to intercept_scaling is appended to the instance vector. The intercept becomes intercept_scaling * synthetic feature weight Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. class_weight : {dict, 'auto'}, optional Over-/undersamples the samples of each class according to the given weights. If not given, all classes are supposed to have weight one. The 'auto' mode selects weights inversely proportional to class frequencies in the training set. max_iter : int Useful only for the newton-cg and lbfgs solvers. Maximum number of iterations taken for the solvers to converge. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. solver : {'newton-cg', 'lbfgs', 'liblinear'} Algorithm to use in the optimization problem. tol : float, optional Tolerance for stopping criteria. Attributes ---------- coef_ : array, shape (n_classes, n_features) Coefficient of the features in the decision function. intercept_ : array, shape (n_classes,) Intercept (a.k.a. bias) added to the decision function. If `fit_intercept` is set to False, the intercept is set to zero. n_iter_ : int Maximum of the actual number of iterations across all classes. Valid only for the liblinear solver. See also -------- SGDClassifier : incrementally trained logistic regression (when given the parameter ``loss="log"``). sklearn.svm.LinearSVC : learns SVM models using the same algorithm. Notes ----- The underlying C implementation uses a random number generator to select features when fitting the model. It is thus not uncommon, to have slightly different results for the same input data. If that happens, try with a smaller tol parameter. References: LIBLINEAR -- A Library for Large Linear Classification http://www.csie.ntu.edu.tw/~cjlin/liblinear/ Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent methods for logistic regression and maximum entropy models. Machine Learning 85(1-2):41-75. http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf """ def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=None, solver='liblinear', max_iter=100): super(LogisticRegression, self).__init__( penalty=penalty, dual=dual, loss='lr', tol=tol, C=C, fit_intercept=fit_intercept, intercept_scaling=intercept_scaling, class_weight=class_weight, random_state=random_state, solver=solver, max_iter=max_iter) def predict_proba(self, X): """Probability estimates. The returned estimates for all classes are ordered by the label of classes. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : array-like, shape = [n_samples, n_classes] Returns the probability of the sample for each class in the model, where classes are ordered as they are in ``self.classes_``. """ return self._predict_proba_lr(X) def predict_log_proba(self, X): """Log of probability estimates. The returned estimates for all classes are ordered by the label of classes. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : array-like, shape = [n_samples, n_classes] Returns the log-probability of the sample for each class in the model, where classes are ordered as they are in ``self.classes_``. """ return np.log(self.predict_proba(X)) class LogisticRegressionCV(LogisticRegression, BaseEstimator, LinearClassifierMixin, _LearntSelectorMixin): """Logistic Regression CV (aka logit, MaxEnt) classifier. This class implements logistic regression using liblinear, newton-cg or LBFGS optimizer. The newton-cg and lbfgs solvers support only L2 regularization with primal formulation. The liblinear solver supports both L1 and L2 regularization, with a dual formulation only for the L2 penalty. For the grid of Cs values (that are set by default to be ten values in a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is selected by the cross-validator StratifiedKFold, but it can be changed using the cv parameter. In the case of newton-cg and lbfgs solvers, we warm start along the path i.e guess the initial coefficients of the present fit to be the coefficients got after convergence in the previous fit, so in general it is supposed to be faster. For a multiclass problem, the hyperparameters for each class are computed using the best scores got by doing a one-vs-rest in parallel across all folds and classes. Hence this is not the true multinomial loss. Parameters ---------- Cs : list of floats | int Each of the values in Cs describes the inverse of regularization strength. If Cs is as an int, then a grid of Cs values are chosen in a logarithmic scale between 1e-4 and 1e4. Like in support vector machines, smaller values specify stronger regularization. fit_intercept : bool, default: True Specifies if a constant (a.k.a. bias or intercept) should be added the decision function. class_weight : {dict, 'auto'}, optional Over-/undersamples the samples of each class according to the given weights. If not given, all classes are supposed to have weight one. The 'auto' mode selects weights inversely proportional to class<|fim▁hole|> cv : integer or cross-validation generator The default cross-validation generator used is Stratified K-Folds. If an integer is provided, then it is the number of folds used. See the module :mod:`sklearn.cross_validation` module for the list of possible cross-validation objects. penalty : str, 'l1' or 'l2' Used to specify the norm used in the penalization. The newton-cg and lbfgs solvers support only l2 penalties. dual : bool Dual or primal formulation. Dual formulation is only implemented for l2 penalty with liblinear solver. Prefer dual=False when n_samples > n_features. scoring : callabale Scoring function to use as cross-validation criteria. For a list of scoring functions that can be used, look at :mod:`sklearn.metrics`. The default scoring option used is accuracy_score. solver : {'newton-cg', 'lbfgs', 'liblinear'} Algorithm to use in the optimization problem. tol : float, optional Tolerance for stopping criteria. max_iter : int, optional Maximum number of iterations of the optimization algorithm. class_weight : {dict, 'auto'}, optional Over-/undersamples the samples of each class according to the given weights. If not given, all classes are supposed to have weight one. The 'auto' mode selects weights inversely proportional to class frequencies in the training set. n_jobs : int, optional Number of CPU cores used during the cross-validation loop. If given a value of -1, all cores are used. verbose : bool | int Amount of verbosity. refit : bool If set to True, the scores are averaged across all folds, and the coefs and the C that corresponds to the best score is taken, and a final refit is done using these parameters. Otherwise the coefs, intercepts and C that correspond to the best scores across folds are averaged. intercept_scaling : float, default 1. This parameter is useful only when the solver 'liblinear' is used and self.fit_intercept is set to True. In this case, x becomes [x, self.intercept_scaling], i.e. a "synthetic" feature with constant value equals to intercept_scaling is appended to the instance vector. The intercept becomes intercept_scaling * synthetic feature weight Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. Attributes ---------- coef_ : array, shape (1, n_features) or (n_classes, n_features) Coefficient of the features in the decision function. `coef_` is of shape (1, n_features) when the given problem is binary. `coef_` is readonly property derived from `raw_coef_` that follows the internal memory layout of liblinear. intercept_ : array, shape (1,) or (n_classes,) Intercept (a.k.a. bias) added to the decision function. It is available only when parameter intercept is set to True and is of shape(1,) when the problem is binary. Cs_ : array Array of C i.e. inverse of regularization parameter values used for cross-validation. coefs_paths_ : array, shape (n_folds, len(Cs_), n_features) or (n_folds, len(Cs_), n_features + 1) dict with classes as the keys, and the path of coefficients obtained during cross-validating across each fold and then across each Cs after doing an OvA for the corresponding class. Each dict value has shape (n_folds, len(Cs_), n_features) or (n_folds, len(Cs_), n_features + 1) depending on whether the intercept is fit or not. scores_ : dict dict with classes as the keys, and the values as the grid of scores obtained during cross-validating each fold, after doing an OvA for the corresponding class. Each dict value has shape (n_folds, len(Cs)) C_ : array, shape (n_classes,) or (n_classes - 1,) Array of C that maps to the best scores across every class. If refit is set to False, then for each class, the best C is the average of the C's that correspond to the best scores for each fold. See also -------- LogisticRegression """ def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False, penalty='l2', scoring=None, solver='lbfgs', tol=1e-4, max_iter=100, class_weight=None, n_jobs=1, verbose=False, refit=True, intercept_scaling=1.): self.Cs = Cs self.fit_intercept = fit_intercept self.cv = cv self.dual = dual self.penalty = penalty self.scoring = scoring self.tol = tol self.max_iter = max_iter self.class_weight = class_weight self.n_jobs = n_jobs self.verbose = verbose self.solver = solver self.refit = refit self.intercept_scaling = 1. def fit(self, X, y): """Fit the model according to the given training data. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target vector relative to X. Returns ------- self : object Returns self. """ if self.solver != 'liblinear': if self.penalty != 'l2': raise ValueError("newton-cg and lbfgs solvers support only " "l2 penalties.") if self.dual: raise ValueError("newton-cg and lbfgs solvers support only " "the primal form.") X = check_array(X, accept_sparse='csc', dtype=np.float64) y = check_array(y, ensure_2d=False) if y.ndim == 2 and y.shape[1] == 1: warnings.warn( "A column-vector y was passed when a 1d array was" " expected. Please change the shape of y to " "(n_samples, ), for example using ravel().", DataConversionWarning ) y = np.ravel(y) check_consistent_length(X, y) # init cross-validation generator cv = _check_cv(self.cv, X, y, classifier=True) folds = list(cv) self._enc = LabelEncoder() self._enc.fit(y) labels = self.classes_ n_classes = len(labels) if n_classes < 2: raise ValueError("Number of classes have to be greater than one.") if n_classes == 2: # OvA in case of binary problems is as good as fitting # the higher label n_classes = 1 labels = labels[1:] if self.class_weight and not(isinstance(self.class_weight, dict) or self.class_weight == 'auto'): raise ValueError("class_weight provided should be a " "dict or 'auto'") path_func = delayed(_log_reg_scoring_path) fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( path_func(X, y, train, test, pos_class=label, Cs=self.Cs, fit_intercept=self.fit_intercept, penalty=self.penalty, dual=self.dual, solver=self.solver, max_iter=self.max_iter, tol=self.tol, class_weight=self.class_weight, verbose=max(0, self.verbose - 1), scoring=self.scoring, intercept_scaling=self.intercept_scaling) for label in labels for train, test in folds) coefs_paths, Cs, scores = zip(*fold_coefs_) self.Cs_ = Cs[0] coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds), len(self.Cs_), -1)) self.coefs_paths_ = dict(zip(labels, coefs_paths)) scores = np.reshape(scores, (n_classes, len(folds), -1)) self.scores_ = dict(zip(labels, scores)) self.C_ = list() self.coef_ = list() self.intercept_ = list() for label in labels: scores = self.scores_[label] coefs_paths = self.coefs_paths_[label] if self.refit: best_index = scores.sum(axis=0).argmax() C_ = self.Cs_[best_index] self.C_.append(C_) coef_init = np.mean(coefs_paths[:, best_index, :], axis=0) w, _ = logistic_regression_path( X, y, pos_class=label, Cs=[C_], solver=self.solver, fit_intercept=self.fit_intercept, coef=coef_init, max_iter=self.max_iter, tol=self.tol, class_weight=self.class_weight, verbose=max(0, self.verbose - 1)) w = w[0] else: # Take the best scores across every fold and the average of all # coefficients corresponding to the best scores. best_indices = np.argmax(scores, axis=1) w = np.mean([ coefs_paths[i][best_indices[i]] for i in range(len(folds)) ], axis=0) self.C_.append(np.mean(self.Cs_[best_indices])) if self.fit_intercept: self.coef_.append(w[:-1]) self.intercept_.append(w[-1]) else: self.coef_.append(w) self.intercept_.append(0.) self.C_ = np.asarray(self.C_) self.coef_ = np.asarray(self.coef_) self.intercept_ = np.asarray(self.intercept_) return self<|fim▁end|>
frequencies in the training set.
<|file_name|>SearchResultsEntity_viewer.graphql.ts<|end_file_name|><|fim▁begin|>/* tslint:disable */ /* eslint-disable */ // @ts-nocheck import { ReaderFragment } from "relay-runtime"; import { FragmentRefs } from "relay-runtime"; export type SearchResultsEntity_viewer = { readonly searchConnection: { readonly pageInfo: { readonly hasNextPage: boolean; readonly endCursor: string | null; }; readonly pageCursors: { readonly " $fragmentRefs": FragmentRefs<"Pagination_pageCursors">; }; readonly edges: ReadonlyArray<{<|fim▁hole|> readonly description?: string | null; readonly displayLabel?: string | null; readonly href?: string | null; readonly internalID?: string; readonly imageUrl?: string | null; readonly displayType?: string | null; } | null; } | null> | null; } | null; readonly " $refType": "SearchResultsEntity_viewer"; }; export type SearchResultsEntity_viewer$data = SearchResultsEntity_viewer; export type SearchResultsEntity_viewer$key = { readonly " $data"?: SearchResultsEntity_viewer$data; readonly " $fragmentRefs": FragmentRefs<"SearchResultsEntity_viewer">; }; const node: ReaderFragment = { "argumentDefinitions": [ { "defaultValue": null, "kind": "LocalArgument", "name": "after" }, { "defaultValue": null, "kind": "LocalArgument", "name": "before" }, { "defaultValue": null, "kind": "LocalArgument", "name": "entities" }, { "defaultValue": 10, "kind": "LocalArgument", "name": "first" }, { "defaultValue": null, "kind": "LocalArgument", "name": "last" }, { "defaultValue": null, "kind": "LocalArgument", "name": "page" }, { "defaultValue": "", "kind": "LocalArgument", "name": "term" } ], "kind": "Fragment", "metadata": null, "name": "SearchResultsEntity_viewer", "selections": [ { "alias": null, "args": [ { "kind": "Variable", "name": "after", "variableName": "after" }, { "kind": "Variable", "name": "before", "variableName": "before" }, { "kind": "Variable", "name": "entities", "variableName": "entities" }, { "kind": "Variable", "name": "first", "variableName": "first" }, { "kind": "Variable", "name": "last", "variableName": "last" }, { "kind": "Variable", "name": "page", "variableName": "page" }, { "kind": "Variable", "name": "query", "variableName": "term" } ], "concreteType": "SearchableConnection", "kind": "LinkedField", "name": "searchConnection", "plural": false, "selections": [ { "alias": null, "args": null, "concreteType": "PageInfo", "kind": "LinkedField", "name": "pageInfo", "plural": false, "selections": [ { "alias": null, "args": null, "kind": "ScalarField", "name": "hasNextPage", "storageKey": null }, { "alias": null, "args": null, "kind": "ScalarField", "name": "endCursor", "storageKey": null } ], "storageKey": null }, { "alias": null, "args": null, "concreteType": "PageCursors", "kind": "LinkedField", "name": "pageCursors", "plural": false, "selections": [ { "args": null, "kind": "FragmentSpread", "name": "Pagination_pageCursors" } ], "storageKey": null }, { "alias": null, "args": null, "concreteType": "SearchableEdge", "kind": "LinkedField", "name": "edges", "plural": true, "selections": [ { "alias": null, "args": null, "concreteType": null, "kind": "LinkedField", "name": "node", "plural": false, "selections": [ { "kind": "InlineFragment", "selections": [ { "alias": null, "args": null, "kind": "ScalarField", "name": "description", "storageKey": null }, { "alias": null, "args": null, "kind": "ScalarField", "name": "displayLabel", "storageKey": null }, { "alias": null, "args": null, "kind": "ScalarField", "name": "href", "storageKey": null }, { "alias": null, "args": null, "kind": "ScalarField", "name": "internalID", "storageKey": null }, { "alias": null, "args": null, "kind": "ScalarField", "name": "imageUrl", "storageKey": null }, { "alias": null, "args": null, "kind": "ScalarField", "name": "displayType", "storageKey": null } ], "type": "SearchableItem", "abstractKey": null } ], "storageKey": null } ], "storageKey": null } ], "storageKey": null } ], "type": "Viewer", "abstractKey": null }; (node as any).hash = '5815db449614a1ba927017f63ab148bf'; export default node;<|fim▁end|>
readonly node: {
<|file_name|>location.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ use crate::dom::bindings::codegen::Bindings::LocationBinding::LocationMethods; use crate::dom::bindings::codegen::Bindings::WindowBinding::WindowBinding::WindowMethods; use crate::dom::bindings::error::{Error, ErrorResult, Fallible}; use crate::dom::bindings::inheritance::Castable; use crate::dom::bindings::reflector::{reflect_dom_object, Reflector}; use crate::dom::bindings::root::{Dom, DomRoot}; use crate::dom::bindings::str::USVString; use crate::dom::document::Document; use crate::dom::globalscope::GlobalScope; use crate::dom::urlhelper::UrlHelper; use crate::dom::window::Window; use dom_struct::dom_struct; use net_traits::request::Referrer; use script_traits::{HistoryEntryReplacement, LoadData, LoadOrigin}; use servo_url::{MutableOrigin, ServoUrl}; #[dom_struct] pub struct Location { reflector_: Reflector, window: Dom<Window>, } impl Location { fn new_inherited(window: &Window) -> Location { Location { reflector_: Reflector::new(), window: Dom::from_ref(window), } } pub fn new(window: &Window) -> DomRoot<Location> { reflect_dom_object(Box::new(Location::new_inherited(window)), window) } /// https://html.spec.whatwg.org/multipage/#location-object-navigate fn navigate( &self, url: ServoUrl, referrer: Referrer, replacement_flag: HistoryEntryReplacement, reload_triggered: bool, ) { let document = self.window.Document(); let referrer_policy = document.get_referrer_policy(); let pipeline_id = self.window.upcast::<GlobalScope>().pipeline_id(); let load_data = LoadData::new( LoadOrigin::Script(document.origin().immutable().clone()), url, Some(pipeline_id), Some(referrer), referrer_policy, ); // TODO: rethrow exceptions, set exceptions enabled flag. self.window .load_url(replacement_flag, reload_triggered, load_data); } fn get_url(&self) -> ServoUrl { self.window.get_url() } fn check_same_origin_domain(&self) -> ErrorResult { let this_document = self.window.Document(); if self .entry_document() .origin() .same_origin_domain(this_document.origin()) { Ok(()) } else { Err(Error::Security) } } fn entry_document(&self) -> DomRoot<Document> { GlobalScope::entry().as_window().Document() } // https://html.spec.whatwg.org/multipage/#dom-location-reload pub fn reload_without_origin_check(&self) { let url = self.get_url(); let referrer = Referrer::ReferrerUrl(url.clone()); self.navigate(url, referrer, HistoryEntryReplacement::Enabled, true); } #[allow(dead_code)] pub fn origin(&self) -> &MutableOrigin { self.window.origin() } } impl LocationMethods for Location { // https://html.spec.whatwg.org/multipage/#dom-location-assign fn Assign(&self, url: USVString) -> ErrorResult { // Step 1: If this Location object's relevant Document is null, then return. if self.window.has_document() { // Step 2: If this Location object's relevant Document's origin is not same // origin-domain with the entry settings object's origin, then throw a // "SecurityError" DOMException. self.check_same_origin_domain()?; // Step 3: Parse url relative to the entry settings object. If that failed, // throw a "SyntaxError" DOMException. let base_url = self.entry_document().url(); let url = match base_url.join(&url.0) { Ok(url) => url, Err(_) => return Err(Error::Syntax), }; // Step 4: Location-object navigate to the resulting URL record. let referrer = Referrer::ReferrerUrl(self.get_url()); self.navigate(url, referrer, HistoryEntryReplacement::Disabled, false); } Ok(()) } // https://html.spec.whatwg.org/multipage/#dom-location-reload fn Reload(&self) -> ErrorResult { self.check_same_origin_domain()?; let url = self.get_url(); let referrer = Referrer::ReferrerUrl(url.clone()); self.navigate(url, referrer, HistoryEntryReplacement::Enabled, true); Ok(()) } // https://html.spec.whatwg.org/multipage/#dom-location-replace fn Replace(&self, url: USVString) -> ErrorResult { // Step 1: If this Location object's relevant Document is null, then return. if self.window.has_document() { // Step 2: Parse url relative to the entry settings object. If that failed, // throw a "SyntaxError" DOMException. let base_url = self.entry_document().url(); let url = match base_url.join(&url.0) { Ok(url) => url, Err(_) => return Err(Error::Syntax), }; // Step 3: Location-object navigate to the resulting URL record with // the replacement flag set. let referrer = Referrer::ReferrerUrl(self.get_url()); self.navigate(url, referrer, HistoryEntryReplacement::Enabled, false); } Ok(()) } // https://html.spec.whatwg.org/multipage/#dom-location-hash fn GetHash(&self) -> Fallible<USVString> { self.check_same_origin_domain()?; Ok(UrlHelper::Hash(&self.get_url())) } // https://html.spec.whatwg.org/multipage/#dom-location-hash fn SetHash(&self, value: USVString) -> ErrorResult { // Step 1: If this Location object's relevant Document is null, then return. if self.window.has_document() { // Step 2: If this Location object's relevant Document's origin is not // same origin-domain with the entry settings object's origin, then // throw a "SecurityError" DOMException. self.check_same_origin_domain()?; // Step 3: Let copyURL be a copy of this Location object's url. let mut copy_url = self.get_url(); // Step 4: Let input be the given value with a single leading "#" removed, if any. // Step 5: Set copyURL's fragment to the empty string. // Step 6: Basic URL parse input, with copyURL as url and fragment state as // state override. copy_url.as_mut_url().set_fragment(match value.0.as_str() { "" => Some("#"), _ if value.0.starts_with('#') => Some(&value.0[1..]), _ => Some(&value.0), }); // Step 7: Location-object-setter navigate to copyURL. let referrer = Referrer::ReferrerUrl(self.get_url()); self.navigate(copy_url, referrer, HistoryEntryReplacement::Disabled, false); } Ok(()) } // https://html.spec.whatwg.org/multipage/#dom-location-host fn GetHost(&self) -> Fallible<USVString> { self.check_same_origin_domain()?; Ok(UrlHelper::Host(&self.get_url())) } // https://html.spec.whatwg.org/multipage/#dom-location-host fn SetHost(&self, value: USVString) -> ErrorResult { // Step 1: If this Location object's relevant Document is null, then return. if self.window.has_document() { // Step 2: If this Location object's relevant Document's origin is not // same origin-domain with the entry settings object's origin, then // throw a "SecurityError" DOMException. self.check_same_origin_domain()?; // Step 3: Let copyURL be a copy of this Location object's url. let mut copy_url = self.get_url(); // Step 4: If copyURL's cannot-be-a-base-URL flag is set, terminate these steps. if !copy_url.cannot_be_a_base() { // Step 5: Basic URL parse the given value, with copyURL as url and host state // as state override. let _ = copy_url.as_mut_url().set_host(Some(&value.0)); // Step 6: Location-object-setter navigate to copyURL. let referrer = Referrer::ReferrerUrl(self.get_url()); self.navigate(copy_url, referrer, HistoryEntryReplacement::Disabled, false); } } Ok(()) } // https://html.spec.whatwg.org/multipage/#dom-location-origin fn GetOrigin(&self) -> Fallible<USVString> { self.check_same_origin_domain()?; Ok(UrlHelper::Origin(&self.get_url())) } // https://html.spec.whatwg.org/multipage/#dom-location-hostname fn GetHostname(&self) -> Fallible<USVString> { self.check_same_origin_domain()?; Ok(UrlHelper::Hostname(&self.get_url())) } // https://html.spec.whatwg.org/multipage/#dom-location-hostname fn SetHostname(&self, value: USVString) -> ErrorResult { // Step 1: If this Location object's relevant Document is null, then return. if self.window.has_document() { // Step 2: If this Location object's relevant Document's origin is not // same origin-domain with the entry settings object's origin, then // throw a "SecurityError" DOMException. self.check_same_origin_domain()?; // Step 3: Let copyURL be a copy of this Location object's url. let mut copy_url = self.get_url(); // Step 4: If copyURL's cannot-be-a-base-URL flag is set, terminate these steps. if !copy_url.cannot_be_a_base() { // Step 5: Basic URL parse the given value, with copyURL as url and hostname // state as state override. let _ = copy_url.as_mut_url().set_host(Some(&value.0)); // Step 6: Location-object-setter navigate to copyURL. let referrer = Referrer::ReferrerUrl(self.get_url()); self.navigate(copy_url, referrer, HistoryEntryReplacement::Disabled, false); } } Ok(()) } // https://html.spec.whatwg.org/multipage/#dom-location-href fn GetHref(&self) -> Fallible<USVString> { self.check_same_origin_domain()?; Ok(UrlHelper::Href(&self.get_url())) } // https://html.spec.whatwg.org/multipage/#dom-location-href fn SetHref(&self, value: USVString) -> ErrorResult { // Step 1: If this Location object's relevant Document is null, then return. if self.window.has_document() { // Note: no call to self.check_same_origin_domain() // Step 2: Parse the given value relative to the entry settings object. // If that failed, throw a TypeError exception. let base_url = self.entry_document().url(); let url = match base_url.join(&value.0) { Ok(url) => url, Err(e) => return Err(Error::Type(format!("Couldn't parse URL: {}", e))), }; // Step 3: Location-object-setter navigate to the resulting URL record. let referrer = Referrer::ReferrerUrl(self.get_url()); self.navigate(url, referrer, HistoryEntryReplacement::Disabled, false); } Ok(()) } // https://html.spec.whatwg.org/multipage/#dom-location-pathname fn GetPathname(&self) -> Fallible<USVString> { self.check_same_origin_domain()?; Ok(UrlHelper::Pathname(&self.get_url())) } // https://html.spec.whatwg.org/multipage/#dom-location-pathname fn SetPathname(&self, value: USVString) -> ErrorResult { // Step 1: If this Location object's relevant Document is null, then return.<|fim▁hole|> self.check_same_origin_domain()?; // Step 3: Let copyURL be a copy of this Location object's url. let mut copy_url = self.get_url(); // Step 4: If copyURL's cannot-be-a-base-URL flag is set, terminate these steps. if !copy_url.cannot_be_a_base() { // Step 5: Set copyURL's path to the empty list. // Step 6: Basic URL parse the given value, with copyURL as url and path // start state as state override. copy_url.as_mut_url().set_path(&value.0); // Step 7: Location-object-setter navigate to copyURL. let referrer = Referrer::ReferrerUrl(self.get_url()); self.navigate(copy_url, referrer, HistoryEntryReplacement::Disabled, false); } } Ok(()) } // https://html.spec.whatwg.org/multipage/#dom-location-port fn GetPort(&self) -> Fallible<USVString> { self.check_same_origin_domain()?; Ok(UrlHelper::Port(&self.get_url())) } // https://html.spec.whatwg.org/multipage/#dom-location-port fn SetPort(&self, value: USVString) -> ErrorResult { // Step 1: If this Location object's relevant Document is null, then return. if self.window.has_document() { // Step 2: If this Location object's relevant Document's origin is not // same origin-domain with the entry settings object's origin, then // throw a "SecurityError" DOMException. self.check_same_origin_domain()?; // Step 3: Let copyURL be a copy of this Location object's url. let mut copy_url = self.get_url(); // Step 4: If copyURL cannot have a username/password/port, then return. // https://url.spec.whatwg.org/#cannot-have-a-username-password-port if copy_url.host().is_some() && !copy_url.cannot_be_a_base() && copy_url.scheme() != "file" { // Step 5: If the given value is the empty string, then set copyURL's // port to null. // Step 6: Otherwise, basic URL parse the given value, with copyURL as url // and port state as state override. let _ = url::quirks::set_port(copy_url.as_mut_url(), &value.0); // Step 7: Location-object-setter navigate to copyURL. let referrer = Referrer::ReferrerUrl(self.get_url()); self.navigate(copy_url, referrer, HistoryEntryReplacement::Disabled, false); } } Ok(()) } // https://html.spec.whatwg.org/multipage/#dom-location-protocol fn GetProtocol(&self) -> Fallible<USVString> { self.check_same_origin_domain()?; Ok(UrlHelper::Protocol(&self.get_url())) } // https://html.spec.whatwg.org/multipage/#dom-location-protocol fn SetProtocol(&self, value: USVString) -> ErrorResult { // Step 1: If this Location object's relevant Document is null, then return. if self.window.has_document() { // Step 2: If this Location object's relevant Document's origin is not // same origin-domain with the entry settings object's origin, then // throw a "SecurityError" DOMException. self.check_same_origin_domain()?; // Step 3: Let copyURL be a copy of this Location object's url. let mut copy_url = self.get_url(); // Step 4: Let possibleFailure be the result of basic URL parsing the given // value, followed by ":", with copyURL as url and scheme start state as // state override. let scheme = match value.0.find(':') { Some(position) => &value.0[..position], None => &value.0, }; if let Err(_) = copy_url.as_mut_url().set_scheme(scheme) { // Step 5: If possibleFailure is failure, then throw a "SyntaxError" DOMException. return Err(Error::Syntax); } // Step 6: If copyURL's scheme is not an HTTP(S) scheme, then terminate these steps. if copy_url.scheme().eq_ignore_ascii_case("http") || copy_url.scheme().eq_ignore_ascii_case("https") { // Step 7: Location-object-setter navigate to copyURL. let referrer = Referrer::ReferrerUrl(self.get_url()); self.navigate(copy_url, referrer, HistoryEntryReplacement::Disabled, false); } } Ok(()) } // https://html.spec.whatwg.org/multipage/#dom-location-search fn GetSearch(&self) -> Fallible<USVString> { self.check_same_origin_domain()?; Ok(UrlHelper::Search(&self.get_url())) } // https://html.spec.whatwg.org/multipage/#dom-location-search fn SetSearch(&self, value: USVString) -> ErrorResult { // Step 1: If this Location object's relevant Document is null, then return. if self.window.has_document() { // Step 2: If this Location object's relevant Document's origin is not // same origin-domain with the entry settings object's origin, then // throw a "SecurityError" DOMException. self.check_same_origin_domain()?; // Step 3: Let copyURL be a copy of this Location object's url. let mut copy_url = self.get_url(); // Step 4: If the given value is the empty string, set copyURL's query to null. // Step 5: Otherwise, run these substeps: // 1. Let input be the given value with a single leading "?" removed, if any. // 2. Set copyURL's query to the empty string. // 3. Basic URL parse input, with copyURL as url and query state as state // override, and the relevant Document's document's character encoding as // encoding override. copy_url.as_mut_url().set_query(match value.0.as_str() { "" => None, _ if value.0.starts_with('?') => Some(&value.0[1..]), _ => Some(&value.0), }); // Step 6: Location-object-setter navigate to copyURL. let referrer = Referrer::ReferrerUrl(self.get_url()); self.navigate(copy_url, referrer, HistoryEntryReplacement::Disabled, false); } Ok(()) } }<|fim▁end|>
if self.window.has_document() { // Step 2: If this Location object's relevant Document's origin is not // same origin-domain with the entry settings object's origin, then // throw a "SecurityError" DOMException.
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django import forms from django.contrib.auth.models import User from django.forms import ModelForm from django.db import models # Create your models here. #EDICION DE MODELO USER User.add_to_class('usuario_sico', models.CharField(max_length=10, null=False, blank=False)) User.add_to_class('contrasenia_sico', models.CharField(max_length=10, null=False, blank=False)) #User.add_to_class('amigos', models.ManyToManyField('self', symmetrical=True, blank=True)) #FORMULARIOS class SignUpForm(ModelForm): class Meta:<|fim▁hole|> widgets = { 'password': forms.PasswordInput(), 'contrasenia_sico': forms.PasswordInput(), }<|fim▁end|>
model = User fields = ['username', 'password', 'email', 'first_name', 'last_name', 'usuario_sico', 'contrasenia_sico']
<|file_name|>viewport.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use cssparser::Parser; use euclid::size::TypedSize2D; use media_queries::CSSErrorReporterTest; use servo_config::prefs::{PREFS, PrefValue}; use servo_url::ServoUrl; use style::media_queries::{Device, MediaType}; use style::parser::{Parse, ParserContext}; use style::shared_lock::SharedRwLock; use style::stylesheets::{Stylesheet, Origin}; use style::values::specified::LengthOrPercentageOrAuto::{self, Auto}; use style::values::specified::NoCalcLength::{self, ViewportPercentage}; use style::values::specified::ViewportPercentageLength::Vw; use style::viewport::*; use style_traits::PinchZoomFactor; use style_traits::viewport::*; macro_rules! stylesheet { ($css:expr, $origin:ident, $error_reporter:expr) => { stylesheet!($css, $origin, $error_reporter, SharedRwLock::new()) }; ($css:expr, $origin:ident, $error_reporter:expr, $shared_lock:expr) => { Box::new(Stylesheet::from_str( $css, ServoUrl::parse("http://localhost").unwrap(), Origin::$origin, Default::default(), $shared_lock, None, &$error_reporter )) } } fn test_viewport_rule<F>(css: &str, device: &Device, callback: F) where F: Fn(&Vec<ViewportDescriptorDeclaration>, &str) { PREFS.set("layout.viewport.enabled", PrefValue::Boolean(true)); let stylesheet = stylesheet!(css, Author, CSSErrorReporterTest); let mut rule_count = 0; stylesheet.effective_viewport_rules(&device, &stylesheet.shared_lock.read(), |rule| { rule_count += 1; callback(&rule.declarations, css); }); assert!(rule_count > 0); } fn test_meta_viewport<F>(meta: &str, callback: F) where F: Fn(&Vec<ViewportDescriptorDeclaration>, &str) { if let Some(mut rule) = ViewportRule::from_meta(meta) { // from_meta uses a hash-map to collect the declarations, so we need to // sort them in a stable order for the tests rule.declarations.sort_by(|a, b| { let a = a.descriptor.discriminant_value(); let b = b.descriptor.discriminant_value(); a.cmp(&b) }); callback(&rule.declarations, meta); } else { panic!("no @viewport rule for {}", meta); } } macro_rules! assert_decl_len { ($declarations:ident == 1) => { assert!($declarations.len() == 1, "expected 1 declaration; have {}: {:?})", $declarations.len(), $declarations) }; ($declarations:ident == $len:expr) => { assert!($declarations.len() == $len, "expected {} declarations; have {}: {:?})", $len, $declarations.len(), $declarations) } } macro_rules! viewport_length { ($value:expr, px) => { ViewportLength::Specified(LengthOrPercentageOrAuto::Length(NoCalcLength::from_px($value))) }; ($value:expr, vw) => { ViewportLength::Specified(LengthOrPercentageOrAuto::Length(ViewportPercentage(Vw($value)))) } } #[test] fn empty_viewport_rule() { let device = Device::new(MediaType::Screen, TypedSize2D::new(800., 600.)); test_viewport_rule("@viewport {}", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 0); }); } macro_rules! assert_decl_eq { ($d:expr, $origin:ident, $expected:ident: $value:expr) => {{ assert_eq!($d.origin, Origin::$origin); assert_eq!($d.descriptor, ViewportDescriptor::$expected($value)); assert!($d.important == false, "descriptor should not be !important"); }}; ($d:expr, $origin:ident, $expected:ident: $value:expr, !important) => {{ assert_eq!($d.origin, Origin::$origin); assert_eq!($d.descriptor, ViewportDescriptor::$expected($value)); assert!($d.important == true, "descriptor should be !important"); }}; } #[test] fn simple_viewport_rules() { let device = Device::new(MediaType::Screen, TypedSize2D::new(800., 600.)); test_viewport_rule("@viewport { width: auto; height: auto;\ zoom: auto; min-zoom: 0; max-zoom: 200%;\ user-zoom: zoom; orientation: auto; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 9); assert_decl_eq!(&declarations[0], Author, MinWidth: ViewportLength::Specified(Auto)); assert_decl_eq!(&declarations[1], Author, MaxWidth: ViewportLength::Specified(Auto)); assert_decl_eq!(&declarations[2], Author, MinHeight: ViewportLength::Specified(Auto)); assert_decl_eq!(&declarations[3], Author, MaxHeight: ViewportLength::Specified(Auto)); assert_decl_eq!(&declarations[4], Author, Zoom: Zoom::Auto); assert_decl_eq!(&declarations[5], Author, MinZoom: Zoom::Number(0.)); assert_decl_eq!(&declarations[6], Author, MaxZoom: Zoom::Percentage(2.)); assert_decl_eq!(&declarations[7], Author, UserZoom: UserZoom::Zoom); assert_decl_eq!(&declarations[8], Author, Orientation: Orientation::Auto); }); test_viewport_rule("@viewport { min-width: 200px; max-width: auto;\ min-height: 200px; max-height: auto; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 4); assert_decl_eq!(&declarations[0], Author, MinWidth: viewport_length!(200., px)); assert_decl_eq!(&declarations[1], Author, MaxWidth: ViewportLength::Specified(Auto)); assert_decl_eq!(&declarations[2], Author, MinHeight: viewport_length!(200., px)); assert_decl_eq!(&declarations[3], Author, MaxHeight: ViewportLength::Specified(Auto)); }); } #[test] fn simple_meta_viewport_contents() { test_meta_viewport("width=500, height=600", |declarations, meta| { println!("{}", meta); assert_decl_len!(declarations == 4); assert_decl_eq!(&declarations[0], Author, MinWidth: ViewportLength::ExtendToZoom); assert_decl_eq!(&declarations[1], Author, MaxWidth: viewport_length!(500., px)); assert_decl_eq!(&declarations[2], Author, MinHeight: ViewportLength::ExtendToZoom); assert_decl_eq!(&declarations[3], Author, MaxHeight: viewport_length!(600., px)); }); test_meta_viewport("initial-scale=1.0", |declarations, meta| { println!("{}", meta); assert_decl_len!(declarations == 3); assert_decl_eq!(&declarations[0], Author, MinWidth: ViewportLength::ExtendToZoom); assert_decl_eq!(&declarations[1], Author, MaxWidth: ViewportLength::ExtendToZoom); assert_decl_eq!(&declarations[2], Author, Zoom: Zoom::Number(1.)); }); test_meta_viewport("initial-scale=2.0, height=device-width", |declarations, meta| { println!("{}", meta); assert_decl_len!(declarations == 5); assert_decl_eq!(&declarations[0], Author, MinWidth: ViewportLength::Specified(Auto)); assert_decl_eq!(&declarations[1], Author, MaxWidth: ViewportLength::Specified(Auto)); assert_decl_eq!(&declarations[2], Author, MinHeight: ViewportLength::ExtendToZoom); assert_decl_eq!(&declarations[3], Author, MaxHeight: viewport_length!(100., vw)); assert_decl_eq!(&declarations[4], Author, Zoom: Zoom::Number(2.)); }); test_meta_viewport("width=480, initial-scale=2.0, user-scalable=1", |declarations, meta| { println!("{}", meta); assert_decl_len!(declarations == 4); assert_decl_eq!(&declarations[0], Author, MinWidth: ViewportLength::ExtendToZoom); assert_decl_eq!(&declarations[1], Author, MaxWidth: viewport_length!(480., px)); assert_decl_eq!(&declarations[2], Author, Zoom: Zoom::Number(2.)); assert_decl_eq!(&declarations[3], Author, UserZoom: UserZoom::Zoom); }); } #[test] fn cascading_within_viewport_rule() { let device = Device::new(MediaType::Screen, TypedSize2D::new(800., 600.)); <|fim▁hole|> test_viewport_rule("@viewport { min-width: 200px; min-width: auto; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 1); assert_decl_eq!(&declarations[0], Author, MinWidth: ViewportLength::Specified(Auto)); }); // !important order of appearance test_viewport_rule("@viewport { min-width: 200px !important; min-width: auto !important; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 1); assert_decl_eq!(&declarations[0], Author, MinWidth: ViewportLength::Specified(Auto), !important); }); // !important vs normal test_viewport_rule("@viewport { min-width: auto !important; min-width: 200px; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 1); assert_decl_eq!(&declarations[0], Author, MinWidth: ViewportLength::Specified(Auto), !important); }); // normal longhands vs normal shorthand test_viewport_rule("@viewport { min-width: 200px; max-width: 200px; width: auto; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 2); assert_decl_eq!(&declarations[0], Author, MinWidth: ViewportLength::Specified(Auto)); assert_decl_eq!(&declarations[1], Author, MaxWidth: ViewportLength::Specified(Auto)); }); // normal shorthand vs normal longhands test_viewport_rule("@viewport { width: 200px; min-width: auto; max-width: auto; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 2); assert_decl_eq!(&declarations[0], Author, MinWidth: ViewportLength::Specified(Auto)); assert_decl_eq!(&declarations[1], Author, MaxWidth: ViewportLength::Specified(Auto)); }); // one !important longhand vs normal shorthand test_viewport_rule("@viewport { min-width: auto !important; width: 200px; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 2); assert_decl_eq!(&declarations[0], Author, MinWidth: ViewportLength::Specified(Auto), !important); assert_decl_eq!(&declarations[1], Author, MaxWidth: viewport_length!(200., px)); }); // both !important longhands vs normal shorthand test_viewport_rule("@viewport { min-width: auto !important; max-width: auto !important; width: 200px; }", &device, |declarations, css| { println!("{}", css); assert_decl_len!(declarations == 2); assert_decl_eq!(&declarations[0], Author, MinWidth: ViewportLength::Specified(Auto), !important); assert_decl_eq!(&declarations[1], Author, MaxWidth: ViewportLength::Specified(Auto), !important); }); } #[test] fn multiple_stylesheets_cascading() { PREFS.set("layout.viewport.enabled", PrefValue::Boolean(true)); let device = Device::new(MediaType::Screen, TypedSize2D::new(800., 600.)); let error_reporter = CSSErrorReporterTest; let shared_lock = SharedRwLock::new(); let stylesheets = vec![ stylesheet!("@viewport { min-width: 100px; min-height: 100px; zoom: 1; }", UserAgent, error_reporter, shared_lock.clone()), stylesheet!("@viewport { min-width: 200px; min-height: 200px; }", User, error_reporter, shared_lock.clone()), stylesheet!("@viewport { min-width: 300px; }", Author, error_reporter, shared_lock.clone()) ]; let declarations = Cascade::from_stylesheets(&stylesheets, &shared_lock.read(), &device).finish(); assert_decl_len!(declarations == 3); assert_decl_eq!(&declarations[0], UserAgent, Zoom: Zoom::Number(1.)); assert_decl_eq!(&declarations[1], User, MinHeight: viewport_length!(200., px)); assert_decl_eq!(&declarations[2], Author, MinWidth: viewport_length!(300., px)); let stylesheets = vec![ stylesheet!("@viewport { min-width: 100px !important; }", UserAgent, error_reporter, shared_lock.clone()), stylesheet!("@viewport { min-width: 200px !important; min-height: 200px !important; }", User, error_reporter, shared_lock.clone()), stylesheet!("@viewport { min-width: 300px !important; min-height: 300px !important; zoom: 3 !important; }", Author, error_reporter, shared_lock.clone()) ]; let declarations = Cascade::from_stylesheets(&stylesheets, &shared_lock.read(), &device).finish(); assert_decl_len!(declarations == 3); assert_decl_eq!(&declarations[0], UserAgent, MinWidth: viewport_length!(100., px), !important); assert_decl_eq!(&declarations[1], User, MinHeight: viewport_length!(200., px), !important); assert_decl_eq!(&declarations[2], Author, Zoom: Zoom::Number(3.), !important); } #[test] fn constrain_viewport() { let url = ServoUrl::parse("http://localhost").unwrap(); let reporter = CSSErrorReporterTest; let context = ParserContext::new(Origin::Author, &url, &reporter); macro_rules! from_css { ($css:expr) => { &ViewportRule::parse(&context, &mut Parser::new($css)).unwrap() } } let initial_viewport = TypedSize2D::new(800., 600.); let device = Device::new(MediaType::Screen, initial_viewport); assert_eq!(ViewportConstraints::maybe_new(&device, from_css!("")), None); assert_eq!(ViewportConstraints::maybe_new(&device, from_css!("width: 320px auto")), Some(ViewportConstraints { size: initial_viewport, initial_zoom: PinchZoomFactor::new(1.), min_zoom: None, max_zoom: None, user_zoom: UserZoom::Zoom, orientation: Orientation::Auto })); assert_eq!(ViewportConstraints::maybe_new(&device, from_css!("width: 320px auto")), Some(ViewportConstraints { size: initial_viewport, initial_zoom: PinchZoomFactor::new(1.), min_zoom: None, max_zoom: None, user_zoom: UserZoom::Zoom, orientation: Orientation::Auto })); assert_eq!(ViewportConstraints::maybe_new(&device, from_css!("width: 800px; height: 600px;\ zoom: 1;\ user-zoom: zoom;\ orientation: auto;")), Some(ViewportConstraints { size: initial_viewport, initial_zoom: PinchZoomFactor::new(1.), min_zoom: None, max_zoom: None, user_zoom: UserZoom::Zoom, orientation: Orientation::Auto })); let initial_viewport = TypedSize2D::new(200., 150.); let device = Device::new(MediaType::Screen, initial_viewport); assert_eq!(ViewportConstraints::maybe_new(&device, from_css!("width: 320px auto")), Some(ViewportConstraints { size: TypedSize2D::new(320., 240.), initial_zoom: PinchZoomFactor::new(1.), min_zoom: None, max_zoom: None, user_zoom: UserZoom::Zoom, orientation: Orientation::Auto })); }<|fim▁end|>
// normal order of appearance
<|file_name|>verify.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(globs, phase, macro_rules)] extern crate syntax; extern crate rustc; #[phase(link)] extern crate regex; #[phase(link, plugin)] extern crate log; #[phase(plugin)] extern crate regex_macros; use std::collections::HashMap; use std::io::File; use syntax::parse; use syntax::parse::lexer; use rustc::driver::{session, config}; use syntax::ast; use syntax::ast::Name; use syntax::parse::token::*; use syntax::parse::lexer::TokenAndSpan; fn parse_token_list(file: &str) -> HashMap<String, Token> { fn id() -> Token { IDENT(ast::Ident { name: Name(0), ctxt: 0, }, false) } let mut res = HashMap::new(); res.insert("-1".to_string(), EOF); for line in file.split('\n') { let eq = match line.trim().rfind('=') { Some(val) => val, None => continue }; let val = line.slice_to(eq); let num = line.slice_from(eq + 1); let tok = match val { "SHR" => BINOP(SHR), "DOLLAR" => DOLLAR, "LT" => LT, "STAR" => BINOP(STAR), "FLOAT_SUFFIX" => id(), "INT_SUFFIX" => id(), "SHL" => BINOP(SHL), "LBRACE" => LBRACE, "RARROW" => RARROW, "LIT_STR" => LIT_STR(Name(0)), "DOTDOT" => DOTDOT, "MOD_SEP" => MOD_SEP, "DOTDOTDOT" => DOTDOTDOT, "NOT" => NOT, "AND" => BINOP(AND), "LPAREN" => LPAREN, "ANDAND" => ANDAND, "AT" => AT, "LBRACKET" => LBRACKET, "LIT_STR_RAW" => LIT_STR_RAW(Name(0), 0), "RPAREN" => RPAREN, "SLASH" => BINOP(SLASH), "COMMA" => COMMA, "LIFETIME" => LIFETIME(ast::Ident { name: Name(0), ctxt: 0 }), "CARET" => BINOP(CARET), "TILDE" => TILDE, "IDENT" => id(), "PLUS" => BINOP(PLUS), "LIT_CHAR" => LIT_CHAR(Name(0)), "LIT_BYTE" => LIT_BYTE(Name(0)), "EQ" => EQ, "RBRACKET" => RBRACKET, "COMMENT" => COMMENT, "DOC_COMMENT" => DOC_COMMENT(Name(0)), "DOT" => DOT, "EQEQ" => EQEQ, "NE" => NE, "GE" => GE, "PERCENT" => BINOP(PERCENT),<|fim▁hole|> "BINOP" => BINOP(PLUS), "POUND" => POUND, "OROR" => OROR, "LIT_INTEGER" => LIT_INTEGER(Name(0)), "BINOPEQ" => BINOPEQ(PLUS), "LIT_FLOAT" => LIT_FLOAT(Name(0)), "WHITESPACE" => WS, "UNDERSCORE" => UNDERSCORE, "MINUS" => BINOP(MINUS), "SEMI" => SEMI, "COLON" => COLON, "FAT_ARROW" => FAT_ARROW, "OR" => BINOP(OR), "GT" => GT, "LE" => LE, "LIT_BINARY" => LIT_BINARY(Name(0)), "LIT_BINARY_RAW" => LIT_BINARY_RAW(Name(0), 0), _ => continue }; res.insert(num.to_string(), tok); } debug!("Token map: {}", res); res } fn str_to_binop(s: &str) -> BinOp { match s { "+" => PLUS, "/" => SLASH, "-" => MINUS, "*" => STAR, "%" => PERCENT, "^" => CARET, "&" => AND, "|" => OR, "<<" => SHL, ">>" => SHR, _ => fail!("Bad binop str `{}`", s) } } /// Assuming a string/binary literal, strip out the leading/trailing /// hashes and surrounding quotes/raw/binary prefix. fn fix(mut lit: &str) -> ast::Name { if lit.char_at(0) == 'r' { if lit.char_at(1) == 'b' { lit = lit.slice_from(2) } else { lit = lit.slice_from(1); } } else if lit.char_at(0) == 'b' { lit = lit.slice_from(1); } let leading_hashes = count(lit); // +1/-1 to adjust for single quotes parse::token::intern(lit.slice(leading_hashes + 1, lit.len() - leading_hashes - 1)) } /// Assuming a char/byte literal, strip the 'b' prefix and the single quotes. fn fixchar(mut lit: &str) -> ast::Name { if lit.char_at(0) == 'b' { lit = lit.slice_from(1); } parse::token::intern(lit.slice(1, lit.len() - 1)) } fn count(lit: &str) -> uint { lit.chars().take_while(|c| *c == '#').count() } fn parse_antlr_token(s: &str, tokens: &HashMap<String, Token>) -> TokenAndSpan { let re = regex!( r"\[@(?P<seq>\d+),(?P<start>\d+):(?P<end>\d+)='(?P<content>.+?)',<(?P<toknum>-?\d+)>,\d+:\d+]" ); let m = re.captures(s).expect(format!("The regex didn't match {}", s).as_slice()); let start = m.name("start"); let end = m.name("end"); let toknum = m.name("toknum"); let content = m.name("content"); let proto_tok = tokens.find_equiv(&toknum).expect(format!("didn't find token {} in the map", toknum).as_slice()); let nm = parse::token::intern(content); debug!("What we got: content (`{}`), proto: {}", content, proto_tok); let real_tok = match *proto_tok { BINOP(..) => BINOP(str_to_binop(content)), BINOPEQ(..) => BINOPEQ(str_to_binop(content.slice_to(content.len() - 1))), LIT_STR(..) => LIT_STR(fix(content)), LIT_STR_RAW(..) => LIT_STR_RAW(fix(content), count(content)), LIT_CHAR(..) => LIT_CHAR(fixchar(content)), LIT_BYTE(..) => LIT_BYTE(fixchar(content)), DOC_COMMENT(..) => DOC_COMMENT(nm), LIT_INTEGER(..) => LIT_INTEGER(nm), LIT_FLOAT(..) => LIT_FLOAT(nm), LIT_BINARY(..) => LIT_BINARY(nm), LIT_BINARY_RAW(..) => LIT_BINARY_RAW(fix(content), count(content)), IDENT(..) => IDENT(ast::Ident { name: nm, ctxt: 0 }, true), LIFETIME(..) => LIFETIME(ast::Ident { name: nm, ctxt: 0 }), ref t => t.clone() }; let offset = if real_tok == EOF { 1 } else { 0 }; let sp = syntax::codemap::Span { lo: syntax::codemap::BytePos(from_str::<u32>(start).unwrap() - offset), hi: syntax::codemap::BytePos(from_str::<u32>(end).unwrap() + 1), expn_info: None }; TokenAndSpan { tok: real_tok, sp: sp } } fn tok_cmp(a: &Token, b: &Token) -> bool { match a { &IDENT(id, _) => match b { &IDENT(id2, _) => id == id2, _ => false }, _ => a == b } } fn main() { fn next(r: &mut lexer::StringReader) -> TokenAndSpan { use syntax::parse::lexer::Reader; r.next_token() } let args = std::os::args(); let mut token_file = File::open(&Path::new(args.get(2).as_slice())); let token_map = parse_token_list(token_file.read_to_string().unwrap().as_slice()); let mut stdin = std::io::stdin(); let mut antlr_tokens = stdin.lines().map(|l| parse_antlr_token(l.unwrap().as_slice().trim(), &token_map)); let code = File::open(&Path::new(args.get(1).as_slice())).unwrap().read_to_string().unwrap(); let options = config::basic_options(); let session = session::build_session(options, None, syntax::diagnostics::registry::Registry::new([])); let filemap = parse::string_to_filemap(&session.parse_sess, code, String::from_str("<n/a>")); let mut lexer = lexer::StringReader::new(session.diagnostic(), filemap); for antlr_tok in antlr_tokens { let rustc_tok = next(&mut lexer); if rustc_tok.tok == EOF && antlr_tok.tok == EOF { continue } assert!(rustc_tok.sp == antlr_tok.sp, "{} and {} have different spans", rustc_tok, antlr_tok); macro_rules! matches ( ( $($x:pat),+ ) => ( match rustc_tok.tok { $($x => match antlr_tok.tok { $x => { if !tok_cmp(&rustc_tok.tok, &antlr_tok.tok) { // FIXME #15677: needs more robust escaping in // antlr warn!("Different names for {} and {}", rustc_tok, antlr_tok); } } _ => fail!("{} is not {}", antlr_tok, rustc_tok) },)* ref c => assert!(c == &antlr_tok.tok, "{} is not {}", rustc_tok, antlr_tok) } ) ) matches!(LIT_BYTE(..), LIT_CHAR(..), LIT_INTEGER(..), LIT_FLOAT(..), LIT_STR(..), LIT_STR_RAW(..), LIT_BINARY(..), LIT_BINARY_RAW(..), IDENT(..), LIFETIME(..), INTERPOLATED(..), DOC_COMMENT(..), SHEBANG(..) ); } }<|fim▁end|>
"RBRACE" => RBRACE,
<|file_name|>qPrefDisplay.cpp<|end_file_name|><|fim▁begin|>// SPDX-License-Identifier: GPL-2.0 #include "core/subsurface-string.h" #include "qPrefDisplay.h" #include "qPrefPrivate.h" #include <QApplication> #include <QFont> static const QString group = QStringLiteral("Display"); QPointF qPrefDisplay::st_tooltip_position; static const QPointF st_tooltip_position_default = QPointF(0,0); QString qPrefDisplay::st_lastDir; static const QString st_lastDir_default = ""; QString qPrefDisplay::st_theme; static const QString st_theme_default = "Blue"; QString qPrefDisplay::st_userSurvey; static const QString st_userSurvey_default = ""; QByteArray qPrefDisplay::st_mainSplitter; static const QByteArray st_mainSplitter_default = ""; QByteArray qPrefDisplay::st_topSplitter; static const QByteArray st_topSplitter_default = ""; QByteArray qPrefDisplay::st_bottomSplitter; static const QByteArray st_bottomSplitter_default = ""; bool qPrefDisplay::st_maximized; static bool st_maximized_default = false; QByteArray qPrefDisplay::st_geometry; static const QByteArray st_geometry_default = 0; QByteArray qPrefDisplay::st_windowState; static const QByteArray st_windowState_default = 0; int qPrefDisplay::st_lastState; static int st_lastState_default = false; qPrefDisplay::qPrefDisplay(QObject *parent) : QObject(parent) { } qPrefDisplay *qPrefDisplay::instance() { static qPrefDisplay *self = new qPrefDisplay; return self; } void qPrefDisplay::loadSync(bool doSync) { disk_animation_speed(doSync); disk_divelist_font(doSync); disk_font_size(doSync); disk_mobile_scale(doSync); disk_display_invalid_dives(doSync); disk_show_developer(doSync); if (!doSync) { load_tooltip_position(); load_theme(); load_userSurvey(); load_mainSplitter(); load_topSplitter(); load_bottomSplitter(); load_maximized(); load_geometry(); load_windowState(); load_lastState(); } } void qPrefDisplay::set_divelist_font(const QString &value) { QString newValue = value; if (value.contains(",")) newValue = value.left(value.indexOf(",")); if (newValue != prefs.divelist_font && !subsurface_ignore_font(qPrintable(newValue))) { qPrefPrivate::copy_txt(&prefs.divelist_font, value); disk_divelist_font(true); qApp->setFont(QFont(newValue)); emit instance()->divelist_fontChanged(value); } } void qPrefDisplay::disk_divelist_font(bool doSync) { if (doSync) qPrefPrivate::propSetValue(keyFromGroupAndName(group, "divelist_font"), prefs.divelist_font, default_prefs.divelist_font); else setCorrectFont(); } void qPrefDisplay::set_font_size(double value)<|fim▁hole|> if (!IS_FP_SAME(value, prefs.font_size)) { prefs.font_size = value; disk_font_size(true); QFont defaultFont = qApp->font(); defaultFont.setPointSizeF(prefs.font_size * prefs.mobile_scale); qApp->setFont(defaultFont); emit instance()->font_sizeChanged(value); } } void qPrefDisplay::disk_font_size(bool doSync) { // inverted logic compared to the other disk_xxx functions if (!doSync) setCorrectFont(); #if !defined(SUBSURFACE_MOBILE) // we never want to save the font_size to disk - we always want to grab that from the system default else qPrefPrivate::propSetValue(keyFromGroupAndName(group, "font_size"), prefs.font_size, default_prefs.font_size); #endif } void qPrefDisplay::set_mobile_scale(double value) { if (!IS_FP_SAME(value, prefs.mobile_scale)) { prefs.mobile_scale = value; disk_mobile_scale(true); QFont defaultFont = qApp->font(); defaultFont.setPointSizeF(prefs.font_size * prefs.mobile_scale); qApp->setFont(defaultFont); emit instance()->mobile_scaleChanged(value); emit instance()->font_sizeChanged(value); } } void qPrefDisplay::disk_mobile_scale(bool doSync) { if (doSync) { qPrefPrivate::propSetValue(keyFromGroupAndName(group, "mobile_scale"), prefs.mobile_scale, default_prefs.mobile_scale); } else { prefs.mobile_scale = qPrefPrivate::propValue(keyFromGroupAndName(group, "mobile_scale"), default_prefs.mobile_scale).toDouble(); setCorrectFont(); } } //JAN static const QString group = QStringLiteral("Animations"); HANDLE_PREFERENCE_INT(Display, "animation_speed", animation_speed); HANDLE_PREFERENCE_BOOL(Display, "displayinvalid", display_invalid_dives); HANDLE_PREFERENCE_BOOL(Display, "show_developer", show_developer); void qPrefDisplay::setCorrectFont() { // get the font from the settings or our defaults // respect the system default font size if none is explicitly set QFont defaultFont = qPrefPrivate::propValue(keyFromGroupAndName(group, "divelist_font"), prefs.divelist_font).value<QFont>(); if (IS_FP_SAME(system_divelist_default_font_size, -1.0)) { prefs.font_size = qApp->font().pointSizeF(); system_divelist_default_font_size = prefs.font_size; // this way we don't save it on exit } prefs.font_size = qPrefPrivate::propValue(keyFromGroupAndName(group, "font_size"), prefs.font_size).toFloat(); // painful effort to ignore previous default fonts on Windows - ridiculous QString fontName = defaultFont.toString(); if (fontName.contains(",")) fontName = fontName.left(fontName.indexOf(",")); if (subsurface_ignore_font(qPrintable(fontName))) { defaultFont = QFont(prefs.divelist_font); } else { free((void *)prefs.divelist_font); prefs.divelist_font = copy_qstring(fontName); } defaultFont.setPointSizeF(prefs.font_size * prefs.mobile_scale); qApp->setFont(defaultFont); prefs.display_invalid_dives = qPrefPrivate::propValue(keyFromGroupAndName(group, "displayinvalid"), default_prefs.display_invalid_dives).toBool(); } HANDLE_PROP_QSTRING(Display, "FileDialog/LastDir", lastDir); HANDLE_PROP_QSTRING(Display, "Theme/currentTheme", theme); HANDLE_PROP_QPOINTF(Display, "ProfileMap/tooltip_position", tooltip_position); HANDLE_PROP_QSTRING(Display, "UserSurvey/SurveyDone", userSurvey); HANDLE_PROP_QBYTEARRAY(Display, "MainWindow/mainSplitter", mainSplitter); HANDLE_PROP_QBYTEARRAY(Display, "MainWindow/topSplitter", topSplitter); HANDLE_PROP_QBYTEARRAY(Display, "MainWindow/bottomSplitter", bottomSplitter); HANDLE_PROP_BOOL(Display, "MainWindow/maximized", maximized); HANDLE_PROP_QBYTEARRAY(Display, "MainWindow/geometry", geometry); HANDLE_PROP_QBYTEARRAY(Display, "MainWindow/windowState", windowState); HANDLE_PROP_INT(Display, "MainWindow/lastState", lastState);<|fim▁end|>
{
<|file_name|>unwind-resource.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. //<|fim▁hole|>// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-fast use std::task; struct complainer { tx: Sender<bool>, } impl Drop for complainer { fn drop(&mut self) { println!("About to send!"); self.tx.send(true); println!("Sent!"); } } fn complainer(tx: Sender<bool>) -> complainer { println!("Hello!"); complainer { tx: tx } } fn f(tx: Sender<bool>) { let _tx = complainer(tx); fail!(); } pub fn main() { let (tx, rx) = channel(); task::spawn(proc() f(tx.clone())); println!("hiiiiiiiii"); assert!(rx.recv()); }<|fim▁end|>
<|file_name|>it.js<|end_file_name|><|fim▁begin|>// IT lang variables tinyMCE.addToLang('',{ bold_desc : 'Grassetto (Ctrl+B)', italic_desc : 'Corsivo (Ctrl+I)', underline_desc : 'Sottolineato (Ctrl+U)', striketrough_desc : 'Barrato', justifyleft_desc : 'Allinea a sinistra', justifycenter_desc : 'Allinea al centro', justifyright_desc : 'Allinea a destra', justifyfull_desc : 'Giustifica', bullist_desc : 'Elenco puntato', numlist_desc : 'Elenco numerato', outdent_desc : 'Riduci rientro', indent_desc : 'Aumenta rientro', undo_desc : 'Annulla (Ctrl+Z)', redo_desc : 'Ripeti (Ctrl+Y)', <|fim▁hole|>image_desc : 'Inserisci o modifica immagine', cleanup_desc : 'Pulisci il codice HTML', focus_alert : 'Fare clic su un\' istanza dell\'editor prima di eseguire questo comando', edit_confirm : 'Vuoi usare l\'editor visuale in quest\'area di testo?', insert_link_title : 'Inserisci o modifica link', insert : 'Inserisci', update : 'Modifica', cancel : 'Annulla', insert_link_url : 'URL del collegamento', insert_link_target : 'Destinazione', insert_link_target_same : 'Apri il link nella stessa finestra', insert_link_target_blank : 'Apri il link in una nuova finestra', insert_image_title : 'Inserisci o modifica immagine', insert_image_src : 'URL dell\'immagine', insert_image_alt : 'Descrizione', help_desc : 'Aiuto', bold_img : "bold.gif", italic_img : "italic.gif", underline_img : "underline.gif", clipboard_msg : 'Le operazioni di taglia, copia e incolla non sono disponibili in Firefox. Vuoi ricevere ulteriori informazioni al riguardo?', popup_blocked : 'Un blocco popup sta impedendo l\'utilizzo di alcune funzionalit&agrave;. Dovresti disabilitare il blocco per questo sito.', insert_image_delta_width : 50, insert_link_delta_width : 75 });<|fim▁end|>
link_desc : 'Inserisci o modifica link', unlink_desc : 'Elimina link',
<|file_name|>category-form.component.ts<|end_file_name|><|fim▁begin|>import { Component, Input, OnInit } from '@angular/core'; import { FormBuilder, Validators } from '@angular/forms'; import { Response } from '@angular/http'; import { CustomValidators } from 'ng2-validation'; import { MdDialogRef } from '@angular/material'; import { AbstractForm } from '../form/abstract-form'; import { CategoryModel } from '../category/category.model'; import { CategoryService } from '../category/category.service'; import { UserModel } from '../user/user.model';<|fim▁hole|>@Component({ selector: 'app-category-form', templateUrl: './category-form.component.html', styleUrls: ['./category-form.component.scss'] }) export class CategoryFormComponent extends AbstractForm implements OnInit { @Input() user: UserModel; @Input() category: CategoryModel; constructor( private formBuilder: FormBuilder, private categoryService: CategoryService, private dialogRef: MdDialogRef<CategoryFormComponent> ) { super(); } ngOnInit(): void { this.form = this.formBuilder.group({ name: [this.category.name, Validators.required], description: [this.category.description], rate: [this.category.rate, [ Validators.required, CustomValidators.number, CustomValidators.min(5) ]] }); } onSubmit(): void { super.onSubmit(); this.saveCategory(new CategoryModel( this.category.id, this.form.value.name, this.form.value.description, this.form.value.rate, null )); } private saveCategory(category: CategoryModel): void { this.categoryService.saveCategory(this.user, category) .do((category) => category.tasks = this.category.tasks) .subscribe( (category: CategoryModel) => { this.dialogRef.close(category); }, (response: Response) => { let errors; switch (response.status) { case 400: errors = response.json().errors; break; default: errors = { errors: [`${response.statusText ? response.statusText : 'Unknown Error'}.`] }; } this.setFormErrors(errors); } ); } }<|fim▁end|>
<|file_name|>SprintMapper.cpp<|end_file_name|><|fim▁begin|>/******************************************************************************** ** ** Copyright (C) 2016-2021 Pavel Pavlov. ** ** ** This file is part of SprintTimer. ** ** SprintTimer is free software: you can redistribute it and/or modify ** it under the terms of the GNU Lesser General Public License as published by ** the Free Software Foundation, either version 3 of the License, or ** (at your option) any later version. ** ** SprintTimer is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU Lesser General Public License for more details. ** ** You should have received a copy of the GNU Lesser General Public License ** along with SprintTimer. If not, see <http://www.gnu.org/licenses/>. ** *********************************************************************************/ #include "core/use_cases/SprintMapper.h" namespace sprint_timer::use_cases { SprintDTO makeDTO(const entities::Sprint& sprint) { const auto& tagsEnt = sprint.tags(); std::vector<std::string> tags(tagsEnt.size()); std::transform(cbegin(tagsEnt), cend(tagsEnt),<|fim▁hole|> return SprintDTO{sprint.uuid(), sprint.taskUuid(), sprint.name(), tags, sprint.timeSpan()}; } entities::Sprint fromDTO(const SprintDTO& dto) { const auto& tagStr = dto.tags; std::list<entities::Tag> tags(tagStr.size()); std::transform(cbegin(tagStr), cend(tagStr), begin(tags), [](const auto& elem) { return entities::Tag{elem}; }); return entities::Sprint{ dto.taskName, dto.timeRange, tags, dto.uuid, dto.taskUuid}; } std::vector<SprintDTO> makeDTOs(const std::vector<entities::Sprint>& sprints) { std::vector<SprintDTO> res; res.reserve(sprints.size()); makeDTOs(cbegin(sprints), cend(sprints), std::back_inserter(res)); return res; } std::vector<entities::Sprint> fromDTOs(const std::vector<SprintDTO>& dtos) { std::vector<entities::Sprint> res; res.reserve(dtos.size()); fromDTOs(cbegin(dtos), cend(dtos), std::back_inserter(res)); return res; } } // namespace sprint_timer::use_cases<|fim▁end|>
begin(tags), [](const auto& elem) { return elem.name(); });
<|file_name|>lib.js<|end_file_name|><|fim▁begin|>//Scroll View function scrollHandler(view, hscrollbar, vscrollbar){ this.view= view; this.view.addEventListener('DOMMouseScroll', this, false); this.view.addEventListener('mousewheel', this, false); this.hListeners= []; this.vListeners= []; if (hscrollbar != undefined){ this.hscrollbar= hscrollbar; this.hscrollbar.addEventListener('scroll', this, false); } if (vscrollbar != undefined){ this.vscrollbar= vscrollbar; this.vscrollbar.addEventListener('scroll', this, false); } }; scrollHandler.prototype.addListener= function(axis, listener){ if (axis == 1 || axis == 'x' || axis == 'h') this.hListeners.push(listener); if (axis == 2 || axis == 'y' || axis == 'v') this.vListeners.push(listener); }; scrollHandler.prototype.handleEvent= function(event){ switch(event.type) { case 'scroll': if (this.hscrollbar != undefined) { view.style.left= -this.hscrollbar.scrollLeft + "px"; for(var listener=0; listener < this.hListeners.length; listener++){ this.hListeners[listener].OnHScroll(event.target.id); } } if (this.vscrollbar != undefined) { view.style.top= -this.vscrollbar.scrollTop + "px"; for(var listener=0; listener < this.vListeners.length; listener++){ this.vListeners[listener].OnVScroll(event.target.id); } } break; case 'DOMMouseScroll': // Firefox: event.axis= 1-x,2-y; event.detail= ticks. this.scrollWheel(event.axis, event.detail*4); event.preventDefault(); break; case 'mousewheel': <|fim▁hole|> break; } }; scrollHandler.prototype.scrollWheel= function(axis, delta){ switch (axis){ case 1: // X axis if (this.hscrollbar != undefined) { this.hscrollbar.scrollLeft= this.hscrollbar.scrollLeft + delta; //TODO: Test for limits. 0 < scrollLeft < scrollsizewidth } break; case 2: // Y axis if (this.vscrollbar != undefined) { this.vscrollbar.scrollTop= this.vscrollbar.scrollTop + delta; //TODO: Test for limits. 0 < scrollTop < scrollsizeheight } break; } }; // Resize function resizeHandler(handgrip, listener){ this.dragging= false; this.dragginId= ''; this.listener= listener; this.initialPoint= {x:0, y:0}; handgrip.addEventListener('mousedown', this, false); handgrip.addEventListener('dragstart', this, false); handgrip.addEventListener('selectstart', this, false); window.addEventListener('mouseup', this, false); }; resizeHandler.prototype.handleEvent= function(event){ switch(event.type) { case 'dragstart': case 'selectstart': event.preventDefault(); break; case 'mousedown': this.dragging= true; this.initialPoint.x= event.clientX; this.initialPoint.y= event.clientY; this.draggingId= event.target.id; document.getElementsByTagName('body')[0].style.cursor= 'ew-resize'; //document.childNodes[0].style.cursor= 'ew-resize'; //'se-resize'; event.preventDefault(); break; case 'mouseup': if (this.dragging) { document.getElementsByTagName('body')[0].style.cursor= 'auto'; //document.childNodes[0].style.cursor= 'auto'; this.dragging= false; //TODO: En lugar de lanzar un evento, puede redimensionar la división padre. this.listener.OnResize(this.draggingId , event.clientX - this.initialPoint.x , event.clientY - this.initialPoint.y); event.preventDefault(); } break; } }; function bas_copyAttributes(src, dst){ for (var attr in src) dst[attr]= src[attr]; };<|fim▁end|>
// event.wheelDelta= ticks*(-120); event.wheelDeltaX= ticksX*(-120); event.wheelDeltaY= ticksY*(-120); this.scrollWheel(event.wheelDeltaX != 0? 1 : 2, -event.wheelDelta/15); event.preventDefault();
<|file_name|>device_id_fetcher.cc<|end_file_name|><|fim▁begin|>// Copyright (c) 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/renderer_host/pepper/device_id_fetcher.h" #include "base/file_util.h" #include "base/prefs/pref_service.h" #include "base/strings/string_number_conversions.h" #include "chrome/browser/profiles/profile.h" #include "chrome/common/pref_names.h" #if defined(OS_CHROMEOS) #include "chromeos/cryptohome/cryptohome_library.h" #endif #include "components/user_prefs/pref_registry_syncable.h" #include "content/public/browser/browser_context.h" #include "content/public/browser/browser_ppapi_host.h" #include "content/public/browser/browser_thread.h" #include "content/public/browser/render_process_host.h" #include "crypto/encryptor.h" #include "crypto/random.h" #include "crypto/sha2.h" #if defined(ENABLE_RLZ) #include "rlz/lib/machine_id.h" #endif using content::BrowserPpapiHost; using content::BrowserThread; using content::RenderProcessHost; namespace chrome { namespace { const char kDRMIdentifierFile[] = "Pepper DRM ID.0"; const uint32_t kSaltLength = 32; void GetMachineIDAsync(const DeviceIDFetcher::IDCallback& callback) { std::string result; #if defined(OS_WIN) && defined(ENABLE_RLZ) rlz_lib::GetMachineId(&result); #elif defined(OS_CHROMEOS) result = chromeos::CryptohomeLibrary::Get()->GetSystemSalt(); if (result.empty()) { // cryptohome must not be running; re-request after a delay. const int64 kRequestSystemSaltDelayMs = 500; base::MessageLoop::current()->PostDelayedTask( FROM_HERE, base::Bind(&GetMachineIDAsync, callback), base::TimeDelta::FromMilliseconds(kRequestSystemSaltDelayMs)); return; } #else // Not implemented for other platforms. NOTREACHED(); #endif callback.Run(result); } } // namespace DeviceIDFetcher::DeviceIDFetcher(int render_process_id) : in_progress_(false), render_process_id_(render_process_id) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); } DeviceIDFetcher::~DeviceIDFetcher() { } bool DeviceIDFetcher::Start(const IDCallback& callback) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); if (in_progress_) return false; in_progress_ = true; callback_ = callback; BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, base::Bind(&DeviceIDFetcher::CheckPrefsOnUIThread, this)); return true; } // static void DeviceIDFetcher::RegisterProfilePrefs( user_prefs::PrefRegistrySyncable* prefs) { prefs->RegisterBooleanPref(prefs::kEnableDRM, true, user_prefs::PrefRegistrySyncable::UNSYNCABLE_PREF); prefs->RegisterStringPref( prefs::kDRMSalt, "", user_prefs::PrefRegistrySyncable::UNSYNCABLE_PREF); } // static base::FilePath DeviceIDFetcher::GetLegacyDeviceIDPath( const base::FilePath& profile_path) { return profile_path.AppendASCII(kDRMIdentifierFile); } void DeviceIDFetcher::CheckPrefsOnUIThread() { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); Profile* profile = NULL; RenderProcessHost* render_process_host = RenderProcessHost::FromID(render_process_id_); if (render_process_host && render_process_host->GetBrowserContext()) { profile = Profile::FromBrowserContext( render_process_host->GetBrowserContext()); } if (!profile || profile->IsOffTheRecord() || !profile->GetPrefs()->GetBoolean(prefs::kEnableDRM)) { RunCallbackOnIOThread(std::string()); return; } // Check if the salt pref is set. If it isn't, set it. std::string salt = profile->GetPrefs()->GetString(prefs::kDRMSalt); if (salt.empty()) { uint8_t salt_bytes[kSaltLength]; crypto::RandBytes(salt_bytes, arraysize(salt_bytes)); // Since it will be stored in a string pref, convert it to hex. salt = base::HexEncode(salt_bytes, arraysize(salt_bytes)); profile->GetPrefs()->SetString(prefs::kDRMSalt, salt); } #if defined(OS_CHROMEOS) // Try the legacy path first for ChromeOS. We pass the new salt in as well // in case the legacy id doesn't exist. BrowserThread::PostBlockingPoolTask(<|fim▁hole|> base::Bind(&DeviceIDFetcher::LegacyComputeOnBlockingPool, this, profile->GetPath(), salt)); #else // Get the machine ID and call ComputeOnUIThread with salt + machine_id. GetMachineIDAsync(base::Bind(&DeviceIDFetcher::ComputeOnUIThread, this, salt)); #endif } void DeviceIDFetcher::ComputeOnUIThread(const std::string& salt, const std::string& machine_id) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); if (machine_id.empty()) { LOG(ERROR) << "Empty machine id"; RunCallbackOnIOThread(std::string()); return; } // Build the identifier as follows: // SHA256(machine-id||service||SHA256(machine-id||service||salt)) std::vector<uint8> salt_bytes; if (!base::HexStringToBytes(salt, &salt_bytes)) salt_bytes.clear(); if (salt_bytes.size() != kSaltLength) { LOG(ERROR) << "Unexpected salt bytes length: " << salt_bytes.size(); RunCallbackOnIOThread(std::string()); return; } char id_buf[256 / 8]; // 256-bits for SHA256 std::string input = machine_id; input.append(kDRMIdentifierFile); input.append(salt_bytes.begin(), salt_bytes.end()); crypto::SHA256HashString(input, &id_buf, sizeof(id_buf)); std::string id = StringToLowerASCII( base::HexEncode(reinterpret_cast<const void*>(id_buf), sizeof(id_buf))); input = machine_id; input.append(kDRMIdentifierFile); input.append(id); crypto::SHA256HashString(input, &id_buf, sizeof(id_buf)); id = StringToLowerASCII(base::HexEncode( reinterpret_cast<const void*>(id_buf), sizeof(id_buf))); RunCallbackOnIOThread(id); } // TODO(raymes): This is temporary code to migrate ChromeOS devices to the new // scheme for generating device IDs. Delete this once we are sure most ChromeOS // devices have been migrated. void DeviceIDFetcher::LegacyComputeOnBlockingPool( const base::FilePath& profile_path, const std::string& salt) { std::string id; // First check if the legacy device ID file exists on ChromeOS. If it does, we // should just return that. base::FilePath id_path = GetLegacyDeviceIDPath(profile_path); if (base::PathExists(id_path)) { if (base::ReadFileToString(id_path, &id) && !id.empty()) { RunCallbackOnIOThread(id); return; } } // If we didn't find an ID, get the machine ID and call the new code path to // generate an ID. BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, base::Bind(&GetMachineIDAsync, base::Bind(&DeviceIDFetcher::ComputeOnUIThread, this, salt))); } void DeviceIDFetcher::RunCallbackOnIOThread(const std::string& id) { if (!BrowserThread::CurrentlyOn(BrowserThread::IO)) { BrowserThread::PostTask( BrowserThread::IO, FROM_HERE, base::Bind(&DeviceIDFetcher::RunCallbackOnIOThread, this, id)); return; } in_progress_ = false; callback_.Run(id); } } // namespace chrome<|fim▁end|>
FROM_HERE,
<|file_name|>main.cpp<|end_file_name|><|fim▁begin|>//#include <math.h> #include <stdio.h> #include <stdlib.h> #include <iostream> #include "inference.hpp" #include "lbfgs.h" //#include <boost/random.hpp> // lbfgsfloatval_t evaluate(void *instance, // const double * x, // double *g, // const int n, // const lbfgsfloatval_t step) { // Inference * inference = // static_cast<Inference *>(instance); // double SL = inference->evaluate(n, x, g); // //printf("%.10f, %.10f, %.10f, %.10f\n", SL, x[0], x[1], x[2]); // //printf("%.10f, %.10f, %.10f, %.10f\n", SL, g[0], g[1], g[2]); // //getchar(); // return SL; // } // int progress(void *instance, // const double *s, // const double *g, // const double SL, // const lbfgsfloatval_t xnorm, // const lbfgsfloatval_t gnorm, // const lbfgsfloatval_t step, // int n, // int k, // int ls) { // //if (k%10 == 0) { // // printf("Iteration %d: ",k); // // printf("Object function = %16.15f ", SL); // // printf(" = %16.15f step = %16.15f\n", gnorm, step); // //} // return 0; // } int main() { lbfgs_parameter_t param; lbfgs_parameter_init(&param); double * x; double SL; Inference inference; inference.init(); int nsample = 1000; double E1, E2, DeltaE; int spot_index; //boost::mt19937 gen; //gen.seed(time(0)); //boost::uniform_int<> real(1, 999); //boost::uniform_01<boost::hellekalek1995> runif(gen); //boost::uniform_int<> runif_int(0, inference.L-1); // int N = 3; // x = new double[N]; // if(x==NULL) // { // std::cout<<"Allocating storage FAILED!"<< "\n"; // return -1; // } for ( int nstep=0; nstep < nsample; nstep++ ) { printf("Iteration: %d\n", nstep); //swipe the frame and optimize the parameter for each bright spot. // for (int i=0; i<inference.L; i++) // { // if (inference.frame.E[i] == 1){ // inference.active_spot_index = i; // // for (int i=0; i<N; i++) // // { // // x[i] = 1.0; // // } // //param.m = 10; // //param.epsilon = 1e-5; // // param.max_iterations = 20000; // // param.linesearch = LBFGS_LINESEARCH_BACKTRACKING_WOLFE; // //printf("here1\n");<|fim▁hole|> // //int status = lbfgs(N,x,&SL,evaluate,progress,&inference,&param); // // if (status == 0) // // { // // //printf("L-BFGS optimization terminated with status code = %d, lambda=%f\n",status, x[N-1]); // // } // // else // // { // // printf("L-BFGS optimization terminated with status code = %d, lambda=%f\n",status, x[N-1]); // // getchar(); // // } // } // } //randomly choose spot and set it bright or dark. //int spot_id = 5; //for (int i=0; i<20; i++) for (int i=0; i<120; i++) { for (int j=0; j<108; j++) { //int spot_id = runif_int(gen); spot_index = i*108+j; // printf("check spot %d: (dark: %.10lf ||| bright: %.10lf)\n", spot_id, // inference.get_dark_mlogp(spot_id), // inference.get_bright_mlogp(spot_id)); //getchar(); DeltaE = inference.FlipEnergyDiff(spot_index); if (DeltaE < 0) { inference.frame.E[spot_index] = -inference.frame.E[spot_index]; inference.frame.A[spot_index] = inference.lbfgs_x[0]; inference.frame.B[spot_index] = inference.lbfgs_x[1]; inference.frame.phi[spot_index] = inference.lbfgs_x[2]; inference.UpdateMu(); } else { //for now we do nothing } //inference.TuningSpot(1); //E1 = inference.get_dark_mlogp(spot_id); //E2 = inference.get_bright_mlogp(spot_id); // if (E1 - E2 <= 5.0) // { // inference.frame.E[spot_id] = 0; // } // else // { // inference.frame.E[spot_id] = 1; // } printf("check spot %d: DeltaE: %.10lf \n", spot_index, DeltaE); } } if ( nstep%1 == 0 ) { inference.output_result(); inference.output_evidence(); } } inference.output_result(); return 0; }<|fim▁end|>
<|file_name|>player.ts<|end_file_name|><|fim▁begin|>import { Suspect } from '../card/suspect/suspect'; <|fim▁hole|>export interface Player { id: string; name: string; order: number; characterId: number; character?: Suspect; cardIds: number[]; }<|fim▁end|>
<|file_name|>_mod_clickMenu.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- #---------------------------------------------------------------------------- # Menu for quickly adding waypoints when on move #---------------------------------------------------------------------------- # Copyright 2007-2008, Oliver White # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #--------------------------------------------------------------------------- from modules.base_module import RanaModule import cairo from time import time from math import pi def getModule(*args, **kwargs): return ClickMenu(*args, **kwargs) class ClickMenu(RanaModule): """Overlay info on the map""" def __init__(self, *args, **kwargs): RanaModule.__init__(self, *args, **kwargs) self.lastWaypoint = "(none)" self.lastWaypointAddTime = 0 self.messageLingerTime = 2 def handleMessage(self, message, messageType, args): if message == "addWaypoint": m = self.m.get("waypoints", None) if m is not None: self.lastWaypoint = m.newWaypoint() self.lastWaypointAddTime = time() def drawMapOverlay(self, cr): """Draw an overlay on top of the map, showing various information about position etc.""" # waypoins will be done in another way, so this is disabled for the time being # (x,y,w,h) = self.get('viewport') # # dt = time() - self.lastWaypointAddTime # if(dt > 0 and dt < self.messageLingerTime): # self.drawNewWaypoint(cr, x+0.5*w, y+0.5*h, w*0.3) # else:<|fim▁hole|> def drawNewWaypoint(self, cr, x, y, size): text = self.lastWaypoint cr.set_font_size(200) extents = cr.text_extents(text) (w, h) = (extents[2], extents[3]) cr.set_source_rgb(0, 0, 0.5) cr.arc(x, y, size, 0, 2 * pi) cr.fill() x1 = x - 0.5 * w y1 = y + 0.5 * h border = 20 cr.set_source_rgb(1, 1, 1) cr.move_to(x1, y1) cr.show_text(text) cr.fill()<|fim▁end|>
# m = self.m.get('clickHandler', None) # if(m != None): # m.registerXYWH(x+0.25*w,y+0.25*h,w*0.5,h*0.5, "clickMenu:addWaypoint")
<|file_name|>gregorian.js.uncompressed.js<|end_file_name|><|fim▁begin|>define( "dojo/cldr/nls/nb/gregorian", //begin v1.x content { "dateFormatItem-Ehm": "E h.mm a", "days-standAlone-short": [ "sø.", "ma.", "ti.", "on.", "to.", "fr.", "lø." ], "months-format-narrow": [ "J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D" ], "field-second-relative+0": "nå", "quarters-standAlone-narrow": [ "1", "2", "3", "4" ], "field-weekday": "Ukedag", "dateFormatItem-yQQQ": "QQQ y", "dateFormatItem-yMEd": "E d.MM.y", "field-wed-relative+0": "onsdag denne uken", "dateFormatItem-GyMMMEd": "E d. MMM y G", "dateFormatItem-MMMEd": "E d. MMM", "field-wed-relative+1": "onsdag neste uke", "eraNarrow": [ "f.Kr.", "fvt.", "e.Kr.", "vt" ], "dateFormatItem-yMM": "MM.y", "field-tue-relative+-1": "tirsdag sist uke", "days-format-short": [ "sø.", "ma.", "ti.", "on.", "to.", "fr.", "lø." ], "dateFormat-long": "d. MMMM y", "field-fri-relative+-1": "fredag sist uke", "field-wed-relative+-1": "onsdag sist uke", "months-format-wide": [ "januar", "februar", "mars", "april", "mai", "juni", "juli", "august", "september", "oktober", "november", "desember" ], "dateTimeFormat-medium": "{1}, {0}", "dayPeriods-format-wide-pm": "p.m.", "dateFormat-full": "EEEE d. MMMM y", "field-thu-relative+-1": "torsdag sist uke", "dateFormatItem-Md": "d.M.", "dayPeriods-format-abbr-am": "a.m.", "dateFormatItem-yMd": "d.M.y", "dateFormatItem-yM": "M.y", "field-era": "Tidsalder", "months-standAlone-wide": [ "januar", "februar", "mars", "april", "mai", "juni", "juli", "august", "september", "oktober", "november", "desember" ], "timeFormat-short": "HH.mm", "quarters-format-wide": [ "1. kvartal", "2. kvartal", "3. kvartal", "4. kvartal" ], "timeFormat-long": "HH.mm.ss z", "dateFormatItem-yMMM": "MMM y", "dateFormatItem-yQQQQ": "QQQQ y", "field-year": "År", "dateFormatItem-MMdd": "d.M.", "field-hour": "Time", "months-format-abbr": [ "jan.", "feb.", "mar.", "apr.", "mai", "jun.", "jul.", "aug.", "sep.", "okt.", "nov.", "des." ], "field-sat-relative+0": "lørdag denne uken", "field-sat-relative+1": "lørdag neste uke", "timeFormat-full": "HH.mm.ss zzzz", "field-day-relative+0": "i dag", "field-day-relative+1": "i morgen", "field-thu-relative+0": "torsdag denne uken", "dateFormatItem-GyMMMd": "d. MMM y G", "field-day-relative+2": "i overmorgen", "field-thu-relative+1": "torsdag neste uke", "dateFormatItem-H": "HH", "months-standAlone-abbr": [ "jan", "feb", "mar", "apr", "mai", "jun", "jul", "aug", "sep", "okt", "nov", "des" ], "quarters-format-abbr": [ "K1", "K2", "K3", "K4" ], "quarters-standAlone-wide": [ "1. kvartal", "2. kvartal", "3. kvartal", "4. kvartal" ], "dateFormatItem-Gy": "y G", "dateFormatItem-M": "L.", "days-standAlone-wide": [ "søndag", "mandag", "tirsdag", "onsdag", "torsdag", "fredag", "lørdag" ], "timeFormat-medium": "HH.mm.ss", "field-sun-relative+0": "søndag denne uken", "dateFormatItem-Hm": "HH.mm", "quarters-standAlone-abbr": [ "K1", "K2", "K3", "K4" ], "field-sun-relative+1": "søndag neste uke", "eraAbbr": [ "f.Kr.", "e.Kr." ], "field-minute": "Minutt", "field-dayperiod": "AM/PM", "days-standAlone-abbr": [ "sø.", "ma.", "ti.", "on.", "to.", "fr.", "lø." ], "dateFormatItem-d": "d.", "dateFormatItem-ms": "mm.ss", "quarters-format-narrow": [ "1", "2", "3", "4" ], "field-day-relative+-1": "i går", "dateFormatItem-h": "h a", "dateTimeFormat-long": "{1} 'kl.' {0}", "dayPeriods-format-narrow-am": "a", "field-day-relative+-2": "i forgårs", "dateFormatItem-MMMd": "d. MMM", "dateFormatItem-MEd": "E d.M", "dateTimeFormat-full": "{1} {0}", "field-fri-relative+0": "fredag denne uken", "dateFormatItem-yMMMM": "MMMM y", "field-fri-relative+1": "fredag neste uke", "field-day": "Dag", "days-format-wide": [ "søndag", "mandag", "tirsdag", "onsdag", "torsdag", "fredag", "lørdag" ], "field-zone": "Tidssone", "dateFormatItem-y": "y", "months-standAlone-narrow": [ "J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D" ], "field-year-relative+-1": "i fjor", "field-month-relative+-1": "forrige måned", "dateFormatItem-hm": "h.mm a", "dayPeriods-format-abbr-pm": "p.m.", "days-format-abbr": [ "søn.", "man.", "tir.", "ons.", "tor.", "fre.", "lør." ], "eraNames": [ "f.Kr.", "e.Kr." ], "dateFormatItem-yMMMd": "d. MMM y", "days-format-narrow": [ "S", "M", "T", "O", "T", "F", "L" ], "days-standAlone-narrow": [ "S", "M", "T", "O", "T", "F", "L" ], "dateFormatItem-MMM": "LLL", "field-month": "Måned", "field-tue-relative+0": "tirsdag denne uken", "field-tue-relative+1": "tirsdag neste uke", "dayPeriods-format-wide-am": "a.m.", "dateFormatItem-EHm": "E HH.mm", "field-mon-relative+0": "mandag denne uken", "field-mon-relative+1": "mandag neste uke", "dateFormat-short": "dd.MM.y", "dateFormatItem-EHms": "E HH.mm.ss",<|fim▁hole|> "dateFormatItem-Ehms": "E h.mm.ss a", "field-second": "Sekund", "field-sat-relative+-1": "lørdag sist uke", "dateFormatItem-yMMMEd": "E d. MMM y", "field-sun-relative+-1": "søndag sist uke", "field-month-relative+0": "denne måneden", "field-month-relative+1": "neste måned", "dateFormatItem-Ed": "E d.", "dateTimeFormats-appendItem-Timezone": "{0} {1}", "field-week": "Uke", "dateFormat-medium": "d. MMM y", "field-year-relative+0": "i år", "field-week-relative+-1": "forrige uke", "field-year-relative+1": "neste år", "dayPeriods-format-narrow-pm": "p", "dateTimeFormat-short": "{1}, {0}", "dateFormatItem-Hms": "HH.mm.ss", "dateFormatItem-hms": "h.mm.ss a", "dateFormatItem-GyMMM": "MMM y G", "field-mon-relative+-1": "mandag sist uke", "field-week-relative+0": "denne uken", "field-week-relative+1": "neste uke" } //end v1.x content );<|fim▁end|>
<|file_name|>constants.py<|end_file_name|><|fim▁begin|>"""Constants for CLIFun.""" import click <|fim▁hole|>from .classes import Context CONTEXT_SETTINGS = dict(auto_envvar_prefix='PLAY') PASS_CONTEXT = click.make_pass_decorator(Context, ensure=True)<|fim▁end|>
<|file_name|>db.rs<|end_file_name|><|fim▁begin|>use std::{borrow::Cow, cell::RefCell, fmt::Write as FmtWrite}; use futures::{Future, FutureExt}; use nanorand::{WyRand, RNG}; use ntex::util::{Bytes, BytesMut}; use smallvec::SmallVec; use tokio_postgres::types::ToSql; use tokio_postgres::{connect, Client, Statement}; use yarte::{ywrite_html, Serialize}; #[cfg(target_os = "macos")] use serde_json as simd_json; use crate::utils::Writer; #[derive(Copy, Clone, Serialize, Debug, serde::Serialize)] pub struct World { pub id: i32, pub randomnumber: i32, } #[derive(serde::Serialize, Debug)] pub struct Fortune { pub id: i32, pub message: Cow<'static, str>, }<|fim▁hole|>pub struct PgConnection { cl: Client, fortune: Statement, world: Statement, rng: RefCell<WyRand>, updates: Vec<Statement>, } impl PgConnection { pub async fn connect(db_url: &str) -> PgConnection { let (cl, conn) = connect(db_url) .await .expect("can not connect to postgresql"); ntex::rt::spawn(conn.map(|_| ())); let fortune = cl.prepare("SELECT * FROM fortune").await.unwrap(); let mut updates = Vec::new(); for num in 1..=500u16 { let mut pl: u16 = 1; let mut q = String::new(); q.push_str("UPDATE world SET randomnumber = CASE id "); for _ in 1..=num { let _ = write!(&mut q, "when ${} then ${} ", pl, pl + 1); pl += 2; } q.push_str("ELSE randomnumber END WHERE id IN ("); for _ in 1..=num { let _ = write!(&mut q, "${},", pl); pl += 1; } q.pop(); q.push(')'); updates.push(cl.prepare(&q).await.unwrap()); } let world = cl.prepare("SELECT * FROM world WHERE id=$1").await.unwrap(); PgConnection { cl, fortune, world, updates, rng: RefCell::new(WyRand::new()), } } } impl PgConnection { pub fn get_world(&self) -> impl Future<Output = Bytes> { let random_id = (self.rng.borrow_mut().generate::<u32>() % 10_000 + 1) as i32; self.cl.query(&self.world, &[&random_id]).map(|rows| { let rows = rows.unwrap(); let mut body = BytesMut::new(); simd_json::to_writer( Writer(&mut body), &World { id: rows[0].get(0), randomnumber: rows[0].get(1), }, ) .unwrap(); body.freeze() }) } pub fn get_worlds(&self, num: u16) -> impl Future<Output = Vec<World>> { let mut futs = Vec::with_capacity(num as usize); let mut rng = self.rng.borrow_mut(); for _ in 0..num { let w_id = (rng.generate::<u32>() % 10_000 + 1) as i32; futs.push(self.cl.query(&self.world, &[&w_id])); } async move { let mut worlds: Vec<World> = Vec::with_capacity(num as usize); for q in futs { let rows = q.await.unwrap(); worlds.push(World { id: rows[0].get(0), randomnumber: rows[0].get(1), }) } worlds } } pub fn update(&self, num: u16) -> impl Future<Output = Vec<World>> { let mut futs = Vec::with_capacity(num as usize); let mut rng = self.rng.borrow_mut(); for _ in 0..num { let id = (rng.generate::<u32>() % 10_000 + 1) as i32; let w_id = (rng.generate::<u32>() % 10_000 + 1) as i32; futs.push(self.cl.query(&self.world, &[&w_id]).map(move |res| { let rows = res.unwrap(); World { id: rows[0].get(0), randomnumber: id, } })); } let cl = self.cl.clone(); let st = self.updates[(num as usize) - 1].clone(); async move { let mut worlds: Vec<World> = Vec::with_capacity(num as usize); for q in futs { worlds.push(q.await); } let mut params: Vec<&dyn ToSql> = Vec::with_capacity(num as usize * 3); for w in &worlds { params.push(&w.id); params.push(&w.randomnumber); } for w in &worlds { params.push(&w.id); } let _ = cl .query(&st, &params) .await .map_err(|e| log::error!("{:?}", e)); worlds } } pub fn tell_fortune(&self) -> impl Future<Output = Bytes> { let fut = self.cl.query_raw(&self.fortune, &[]); async move { let rows = fut.await.unwrap(); let mut fortunes: SmallVec<[_; 32]> = smallvec::smallvec![Fortune { id: 0, message: Cow::Borrowed("Additional fortune added at request time."), }]; for row in rows { fortunes.push(Fortune { id: row.get(0), message: Cow::Owned(row.get(1)), }); } fortunes.sort_by(|it, next| it.message.cmp(&next.message)); let mut buf = Vec::with_capacity(2048); ywrite_html!(buf, "{{> fortune }}"); Bytes::from(buf) } } }<|fim▁end|>
/// Postgres interface
<|file_name|>platform_matcher.py<|end_file_name|><|fim▁begin|># ============================================================================= # Copyright (c) 2016, Cisco Systems, Inc # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. # ============================================================================= import re UNKNOWN = "unknown" PLATFORM_ASR9K_P = 'asr9k_p' PLATFORM_ASR9K_PX = "asr9k_px" PLATFORM_CRS_P = "crs_p" PLATFORM_CRS_PX = "crs_px" PLATFORM_NCS6K = "ncs6k" PLATFORM_NCS6K_SYSADMIN = "ncs6k_sysadmin" PLATFORM_TYPE_UNKNOWN = -1 # IOS XR PLATFORM_TYPE_ASR9K_PX_SMU = 0 PLATFORM_TYPE_ASR9K_PX_SP = 1 PLATFORM_TYPE_ASR9K_P_SMU = 2 PLATFORM_TYPE_ASR9K_P_PACKAGE = 3 PLATFORM_TYPE_ASR9K_PX_PACKAGE = 4 PLATFORM_TYPE_CRS_PX_SMU = 5 PLATFORM_TYPE_CRS_P_SMU = 6 PLATFORM_TYPE_CRS_PX_PACKAGE = 7 PLATFORM_TYPE_CRS_P_PACKAGE = 8 PLATFORM_TYPE_ASR9K_PX_TAR = 13 """ Match NCS6K_SMU before NS6K_PACKAGE so a SMU won't be treated as a package as they have a very similar format. In addition, the long string (ncs6k-sysadmin) is matched first. """ PLATFORM_TYPE_NCS6K_SYSADMIN_SMU = 9; PLATFORM_TYPE_NCS6K_SYSADMIN_PACKAGE = 10; PLATFORM_TYPE_NCS6K_SMU = 11; PLATFORM_TYPE_NCS6K_PACKAGE = 12; pattern_list = {} # disk0:asr9k-mini-p-4.2.1 pattern = re.compile("\\S*asr9k-\\S*-p(-\\d+\\.\\d+\\.\\d+)\\S*") pattern_list[PLATFORM_TYPE_ASR9K_P_PACKAGE] = pattern # disk0:asr9k-p-4.2.3.CSCtz89449 pattern = re.compile("\\S*asr9k-p(-\\d+\\.\\d+\\.\\d+\\.)CSC\\S*") pattern_list[PLATFORM_TYPE_ASR9K_P_SMU] = pattern # disk0:asr9k-mini-px-4.2.1 pattern = re.compile("\\S*asr9k-\\S*-px(-\\d+\\.\\d+\\.\\d+)\\S*") pattern_list[PLATFORM_TYPE_ASR9K_PX_PACKAGE] = pattern # disk0:asr9k-px-4.2.3.CSCtz89449 pattern = re.compile("\\S*asr9k-px(-\\d+\\.\\d+\\.\\d+\\.)CSC\\S*") pattern_list[PLATFORM_TYPE_ASR9K_PX_SMU] = pattern # ASR9K-iosxr-px-k9-5.3.0.tar or ASR9K-iosxr-px-5.3.1-bridge_smus.tar pattern = re.compile("\\S*ASR9K-iosxr-px\\S*(-\\d+\\.\\d+\\.\\d+)\\S*\\.tar") pattern_list[PLATFORM_TYPE_ASR9K_PX_TAR] = pattern # disk0:asr9k-px-4.3.2.sp-1.0.0 or asr9k-px-4.3.2.k9-sp-1.0.0 pattern = re.compile("\\S*asr9k-px(-\\d+\\.\\d+\\.\\d+\\.)\\S*sp\\S*") pattern_list[PLATFORM_TYPE_ASR9K_PX_SP] = pattern # disk0:hfr-mini-px-4.2.1 pattern = re.compile("\\S*hfr-\\S*-px(-\\d+\\.\\d+\\.\\d+)\\S*") pattern_list[PLATFORM_TYPE_CRS_PX_PACKAGE] = pattern # disk0:hfr-px-4.2.3.CSCtz89449 pattern = re.compile("\\S*hfr-px(-\\d+\\.\\d+\\.\\d+\\.)CSC\\S*") pattern_list[PLATFORM_TYPE_CRS_PX_SMU] = pattern # disk0:hfr-p-4.2.3.CSCtz89449 pattern = re.compile("\\S*hfr-p(-\\d+\\.\\d+\\.\\d+\\.)CSC\\S*") pattern_list[PLATFORM_TYPE_CRS_P_SMU] = pattern # disk0:hfr-mini-p-4.2.1 pattern = re.compile("\\S*hfr-\\S*-p(-\\d+\\.\\d+\\.\\d+)\\S*") pattern_list[PLATFORM_TYPE_CRS_P_PACKAGE] = pattern # ncs6k-5.0.1.CSCul51055-0.0.2.i pattern = re.compile("\\S*ncs6k(-\\d+\\.\\d+\\.\\d+\\.)CSC\\S*") pattern_list[PLATFORM_TYPE_NCS6K_SMU] = pattern # ncs6k-mcast-5.0.1 pattern = re.compile("\\S*ncs6k-\\S*(-\\d+\\.\\d+\\.\\d+)\\S*") pattern_list[PLATFORM_TYPE_NCS6K_PACKAGE] = pattern # ncs6k-sysadmin-5.0.0.CSCul30161 pattern = re.compile("\\S*ncs6k-sysadmin(-\\d+\\.\\d+\\.\\d+\\.)CSC\\S*") pattern_list[PLATFORM_TYPE_NCS6K_SYSADMIN_SMU] = pattern # ncs6k-sysadmin-mcast-5.0.1 pattern = re.compile("\\S*ncs6k-sysadmin-\\S*(-\\d+\\.\\d+\\.\\d+)\\S*") pattern_list[PLATFORM_TYPE_NCS6K_SYSADMIN_PACKAGE] = pattern def get_IOSXR_release(name): matches = re.findall("\d+\.\d+\.\d+", name) if matches: return matches[0] return UNKNOWN def get_NCS6K_release(name): """ Example, input: ncs6k-xr-5.0.1 ncs6k-5.0.1.CSCul51055-0.0.2.i ncs6k-sysadmin-xr-5.0.1 ncs6k-sysadmin-5.0.1.CSCul51055-0.0.2.i ASR9K-iosxr-px-k9-5.0.1.tar ASR9K-iosxr-px-5.0.1-bridge_smus.tar output: 5.0.1 """ matches = re.findall("\d+\.\d+\.\d+", name) if matches: return matches[0] return UNKNOWN def get_platform_type(name): for platform_type in pattern_list: pattern = pattern_list[platform_type] if pattern.match(name): return platform_type return PLATFORM_TYPE_UNKNOWN def get_platform(name): """ Returns the platform based on the pattern type. ASR9K-PX, CRS-PX, NCS6K <|fim▁hole|> platform_type = get_platform_type(name) if platform_type == PLATFORM_TYPE_ASR9K_P_SMU or \ platform_type == PLATFORM_TYPE_ASR9K_P_PACKAGE: return PLATFORM_ASR9K_P elif platform_type == PLATFORM_TYPE_ASR9K_PX_PACKAGE or \ platform_type == PLATFORM_TYPE_ASR9K_PX_SMU or \ platform_type == PLATFORM_TYPE_ASR9K_PX_SP or \ platform_type == PLATFORM_TYPE_ASR9K_PX_TAR: return PLATFORM_ASR9K_PX elif platform_type == PLATFORM_TYPE_CRS_PX_SMU or \ platform_type == PLATFORM_TYPE_CRS_PX_PACKAGE: return PLATFORM_CRS_PX elif platform_type == PLATFORM_TYPE_CRS_P_SMU or \ platform_type == PLATFORM_TYPE_CRS_P_PACKAGE: return PLATFORM_CRS_P elif platform_type == PLATFORM_TYPE_NCS6K_SMU or \ platform_type == PLATFORM_TYPE_NCS6K_PACKAGE: return PLATFORM_NCS6K elif platform_type == PLATFORM_TYPE_NCS6K_SYSADMIN_SMU or \ platform_type == PLATFORM_TYPE_NCS6K_SYSADMIN_PACKAGE: return PLATFORM_NCS6K_SYSADMIN else: return UNKNOWN def get_release(name): platform_type = get_platform_type(name) if platform_type == PLATFORM_TYPE_ASR9K_P_SMU or \ platform_type == PLATFORM_TYPE_ASR9K_P_PACKAGE or \ platform_type == PLATFORM_TYPE_CRS_P_SMU or \ platform_type == PLATFORM_TYPE_CRS_P_PACKAGE or \ platform_type == PLATFORM_TYPE_ASR9K_PX_PACKAGE or \ platform_type == PLATFORM_TYPE_ASR9K_PX_SMU or \ platform_type == PLATFORM_TYPE_ASR9K_PX_SP or \ platform_type == PLATFORM_TYPE_CRS_PX_SMU or \ platform_type == PLATFORM_TYPE_CRS_PX_PACKAGE or \ platform_type == PLATFORM_TYPE_ASR9K_PX_TAR: return get_IOSXR_release(name) elif platform_type == PLATFORM_TYPE_NCS6K_SMU or \ platform_type == PLATFORM_TYPE_NCS6K_PACKAGE or \ platform_type == PLATFORM_TYPE_NCS6K_SYSADMIN_SMU or \ platform_type == PLATFORM_TYPE_NCS6K_SYSADMIN_PACKAGE: return get_NCS6K_release(name) else: return UNKNOWN; if __name__ == '__main__': names = [] names.append('ASR9K-iosxr-px-k9-5.3.1.tar') names.append('ASR9K-iosxr-px-5.3.1-bridge_smus.tar') names.append('asr9k-px-5.3.1.CSCuv00898.pie') names.append('ASR9K-iosxr-px-k9-5.1.3.tar') names.append('asr9k-px-5.1.3.CSCuw01943.pie') names.append('ASR9K-iosxr-px-k9-5.3.0.tar') names.append('ASR9K-iosxr-px-5.3.0-turboboot.tar') names.append('ASR9K-iosxr-px-5.30.0.tar') names.append('asr9k-px-5.2.2.sp1.pie') for name in names: print name print(get_platform(name), get_release(name)) print<|fim▁end|>
"""
<|file_name|>machine_windows.go<|end_file_name|><|fim▁begin|>package main import ( "os" "fmt" "path/filepath" // "launchpad.net/juju-core/agent" "launchpad.net/juju-core/worker" // "launchpad.net/juju-core/worker/authenticationworker" "launchpad.net/juju-core/worker/charmrevisionworker" "launchpad.net/juju-core/worker/deployer" "launchpad.net/juju-core/worker/firewaller" workerlogger "launchpad.net/juju-core/worker/logger" // "launchpad.net/juju-core/worker/machineenvironmentworker" "launchpad.net/juju-core/worker/machiner" // "launchpad.net/juju-core/worker/rsyslog" "launchpad.net/juju-core/worker/upgrader" "launchpad.net/juju-core/state/api/params" // "launchpad.net/juju-core/provider" "launchpad.net/juju-core/worker/provisioner" "launchpad.net/juju-core/utils" ) func (a *MachineAgent) initAgent() error { if err := os.Remove(jujuRun); err != nil && !os.IsNotExist(err) { return err } jujud := filepath.Join(a.Conf.dataDir, "tools", a.Tag(), "jujud.exe") return utils.Symlink(jujud, jujuRun) } // APIWorker returns a Worker that connects to the API and starts any // workers that need an API connection. // // If a state worker is necessary, APIWorker calls ensureStateWorker. func (a *MachineAgent) APIWorker(ensureStateWorker func()) (worker.Worker, error) { agentConfig := a.Conf.config st, entity, err := openAPIState(agentConfig, a) if err != nil { return nil, err } reportOpenedAPI(st) for _, job := range entity.Jobs() { if job.NeedsState() { ensureStateWorker() break }<|fim▁hole|> for _, job := range entity.Jobs() { if job == params.JobManageEnviron { rsyslogMode = rsyslog.RsyslogModeAccumulate break } } */ runner := newRunner(connectionIsFatal(st), moreImportant) // Run the upgrader and the upgrade-steps worker without waiting for the upgrade steps to complete. runner.StartWorker("upgrader", func() (worker.Worker, error) { return upgrader.NewUpgrader(st.Upgrader(), agentConfig), nil }) runner.StartWorker("upgrade-steps", func() (worker.Worker, error) { return a.upgradeWorker(st, entity.Jobs()), nil }) // All other workers must wait for the upgrade steps to complete before starting. a.startWorkerAfterUpgrade(runner, "machiner", func() (worker.Worker, error) { return machiner.NewMachiner(st.Machiner(), agentConfig), nil }) a.startWorkerAfterUpgrade(runner, "logger", func() (worker.Worker, error) { return workerlogger.NewLogger(st.Logger(), agentConfig), nil }) // TODO: gsamfira: Port machineenvironmentworker to windows. Proxy settings can be written // in the registry /* a.startWorkerAfterUpgrade(runner, "machineenvironmentworker", func() (worker.Worker, error) { return machineenvironmentworker.NewMachineEnvironmentWorker(st.Environment(), agentConfig), nil }) */ // gsamfira: No syslog support on windows (yet) /* a.startWorkerAfterUpgrade(runner, "rsyslog", func() (worker.Worker, error) { return newRsyslogConfigWorker(st.Rsyslog(), agentConfig, rsyslogMode) }) */ // If not a local provider bootstrap machine, start the worker to manage SSH keys. // TODO: gsamfira: This will need to be ported at a later time to setup x509 keys for // WinRm /* providerType := agentConfig.Value(agent.ProviderType) if providerType != provider.Local || a.MachineId != bootstrapMachineId { a.startWorkerAfterUpgrade(runner, "authenticationworker", func() (worker.Worker, error) { return authenticationworker.NewWorker(st.KeyUpdater(), agentConfig), nil }) } */ // Perform the operations needed to set up hosting for containers. if err := a.setupContainerSupport(runner, st, entity); err != nil { return nil, fmt.Errorf("setting up container support: %v", err) } for _, job := range entity.Jobs() { switch job { case params.JobHostUnits: a.startWorkerAfterUpgrade(runner, "deployer", func() (worker.Worker, error) { apiDeployer := st.Deployer() context := newDeployContext(apiDeployer, agentConfig) return deployer.NewDeployer(apiDeployer, context), nil }) case params.JobManageEnviron: a.startWorkerAfterUpgrade(runner, "environ-provisioner", func() (worker.Worker, error) { return provisioner.NewEnvironProvisioner(st.Provisioner(), agentConfig), nil }) // TODO(axw) 2013-09-24 bug #1229506 // Make another job to enable the firewaller. Not all environments // are capable of managing ports centrally. a.startWorkerAfterUpgrade(runner, "firewaller", func() (worker.Worker, error) { return firewaller.NewFirewaller(st.Firewaller()) }) a.startWorkerAfterUpgrade(runner, "charm-revision-updater", func() (worker.Worker, error) { return charmrevisionworker.NewRevisionUpdateWorker(st.CharmRevisionUpdater()), nil }) case params.JobManageStateDeprecated: // Legacy environments may set this, but we ignore it. default: // TODO(dimitern): Once all workers moved over to using // the API, report "unknown job type" here. } } return newCloseWorker(runner, st), nil // Note: a worker.Runner is itself a worker.Worker. }<|fim▁end|>
} /* rsyslogMode := rsyslog.RsyslogModeForwarding