file_name large_stringlengths 4 69 | prefix large_stringlengths 0 26.7k | suffix large_stringlengths 0 24.8k | middle large_stringlengths 0 2.12k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
glue.rs | let llty = sizing_type_of(ccx, typ);
// `Box<ZeroSizeType>` does not allocate.
if llsize_of_alloc(ccx, llty) == 0 {
tcx.types.i8
} else {
t
}
}
_ => t
}
}
pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc) -> Block<'blk, 'tcx> {
drop_ty_core(bcx, v, t, debug_loc, false, None)
}
pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc,
skip_dtor: bool,
drop_hint: Option<cleanup::DropHintValue>)
-> Block<'blk, 'tcx> {
// NB: v is an *alias* of type t here, not a direct value.
debug!("drop_ty_core(t={:?}, skip_dtor={} drop_hint={:?})", t, skip_dtor, drop_hint);
let _icx = push_ctxt("drop_ty");
let mut bcx = bcx;
if bcx.fcx.type_needs_drop(t) {
let ccx = bcx.ccx();
let g = if skip_dtor {
DropGlueKind::TyContents(t)
} else {
DropGlueKind::Ty(t)
};
let glue = get_drop_glue_core(ccx, g);
let glue_type = get_drop_glue_type(ccx, t);
let ptr = if glue_type!= t {
PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to())
} else {
v
};
match drop_hint {
Some(drop_hint) => {
let hint_val = load_ty(bcx, drop_hint.value(), bcx.tcx().types.u8);
let moved_val =
C_integral(Type::i8(bcx.ccx()), adt::DTOR_MOVED_HINT as u64, false);
let may_need_drop =
ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None);
bcx = with_cond(bcx, may_need_drop, |cx| {
Call(cx, glue, &[ptr], None, debug_loc);
cx
})
}
None => {
// No drop-hint ==> call standard drop glue
Call(bcx, glue, &[ptr], None, debug_loc);
}
}
}
bcx
}
pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc,
skip_dtor: bool)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("drop_ty_immediate");
let vp = alloca(bcx, type_of(bcx.ccx(), t), "");
store_ty(bcx, v, vp, t);
drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None)
}
pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef {
get_drop_glue_core(ccx, DropGlueKind::Ty(t))
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum DropGlueKind<'tcx> {
/// The normal path; runs the dtor, and then recurs on the contents
Ty(Ty<'tcx>),
/// Skips the dtor, if any, for ty; drops the contents directly.
/// Note that the dtor is only skipped at the most *shallow*
/// level, namely, an `impl Drop for Ty` itself. So, for example,
/// if Ty is Newtype(S) then only the Drop impl for for Newtype
/// itself will be skipped, while the Drop impl for S, if any,
/// will be invoked.
TyContents(Ty<'tcx>),
}
impl<'tcx> DropGlueKind<'tcx> {
fn ty(&self) -> Ty<'tcx> {
match *self { DropGlueKind::Ty(t) | DropGlueKind::TyContents(t) => t }
}
fn map_ty<F>(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) -> Ty<'tcx>
{
match *self {
DropGlueKind::Ty(t) => DropGlueKind::Ty(f(t)),
DropGlueKind::TyContents(t) => DropGlueKind::TyContents(f(t)),
}
}
}
fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
g: DropGlueKind<'tcx>) -> ValueRef {
debug!("make drop glue for {:?}", g);
let g = g.map_ty(|t| get_drop_glue_type(ccx, t));
debug!("drop glue type {:?}", g);
match ccx.drop_glues().borrow().get(&g) {
Some(&glue) => return glue,
_ => { }
}
let t = g.ty();
let llty = if type_is_sized(ccx.tcx(), t) {
type_of(ccx, t).ptr_to()
} else {
type_of(ccx, ccx.tcx().mk_box(t)).ptr_to()
};
let llfnty = Type::glue_fn(ccx, llty);
// To avoid infinite recursion, don't `make_drop_glue` until after we've
// added the entry to the `drop_glues` cache.
if let Some(old_sym) = ccx.available_drop_glues().borrow().get(&g) {
let llfn = declare::declare_cfn(ccx, &old_sym, llfnty, ccx.tcx().mk_nil());
ccx.drop_glues().borrow_mut().insert(g, llfn);
return llfn;
};
let fn_nm = mangle_internal_name_by_type_and_seq(ccx, t, "drop");
let llfn = declare::define_cfn(ccx, &fn_nm, llfnty, ccx.tcx().mk_nil()).unwrap_or_else(||{
ccx.sess().bug(&format!("symbol `{}` already defined", fn_nm));
});
ccx.available_drop_glues().borrow_mut().insert(g, fn_nm);
let _s = StatRecorder::new(ccx, format!("drop {:?}", t));
let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
let (arena, fcx): (TypedArena<_>, FunctionContext);
arena = TypedArena::new();
fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false,
ty::FnConverging(ccx.tcx().mk_nil()),
empty_substs, None, &arena);
let bcx = init_function(&fcx, false, ty::FnConverging(ccx.tcx().mk_nil()));
update_linkage(ccx, llfn, None, OriginalTranslation);
ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
// All glue functions take values passed *by alias*; this is a
// requirement since in many contexts glue is invoked indirectly and
// the caller has no idea if it's dealing with something that can be
// passed by value.
//
// llfn is expected be declared to take a parameter of the appropriate
// type, so we don't need to explicitly cast the function parameter.
let llrawptr0 = get_param(llfn, fcx.arg_offset() as c_uint);
let bcx = make_drop_glue(bcx, llrawptr0, g);
finish_fn(&fcx, bcx, ty::FnConverging(ccx.tcx().mk_nil()), DebugLoc::None);
llfn
}
fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
t: Ty<'tcx>,
struct_data: ValueRef,
dtor_did: ast::DefId,
class_did: ast::DefId,
substs: &subst::Substs<'tcx>)
-> Block<'blk, 'tcx> {
assert!(type_is_sized(bcx.tcx(), t), "Precondition: caller must ensure t is sized");
let repr = adt::represent_type(bcx.ccx(), t);
let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &*repr, struct_data));
let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type());
let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false);
let bcx = if!bcx.ccx().check_drop_flag_for_sanity() {
bcx
} else {
let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false);
let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None);
let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None);
let drop_flag_neither_initialized_nor_cleared =
And(bcx, not_init, not_done, DebugLoc::None);
with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| {
let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap"));
Call(cx, llfn, &[], None, DebugLoc::None);
cx
})
};
let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None);
with_cond(bcx, drop_flag_dtor_needed, |cx| {
trans_struct_drop(cx, t, struct_data, dtor_did, class_did, substs)
})
}
pub fn get_res_dtor<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
did: ast::DefId,
parent_id: ast::DefId,
substs: &Substs<'tcx>)
-> ValueRef {
let _icx = push_ctxt("trans_res_dtor");
let did = inline::maybe_instantiate_inline(ccx, did);
if!substs.types.is_empty() {
assert_eq!(did.krate, ast::LOCAL_CRATE);
// Since we're in trans we don't care for any region parameters
let substs = ccx.tcx().mk_substs(Substs::erased(substs.types.clone()));
let (val, _, _) = monomorphize::monomorphic_fn(ccx, did, substs, None);
val
} else if did.krate == ast::LOCAL_CRATE {
get_item_val(ccx, did.node)
} else {
let tcx = ccx.tcx();
let name = csearch::get_symbol(&ccx.sess().cstore, did);
let class_ty = tcx.lookup_item_type(parent_id).ty.subst(tcx, substs);
let llty = type_of_dtor(ccx, class_ty);
foreign::get_extern_fn(ccx, &mut *ccx.externs().borrow_mut(), &name[..], llvm::CCallConv,
llty, ccx.tcx().mk_nil())
}
}
fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
t: Ty<'tcx>,
v0: ValueRef,
dtor_did: ast::DefId,
class_did: ast::DefId,
substs: &subst::Substs<'tcx>)
-> Block<'blk, 'tcx>
{
debug!("trans_struct_drop t: {}", t);
// Find and call the actual destructor
let dtor_addr = get_res_dtor(bcx.ccx(), dtor_did, class_did, substs);
// Class dtors have no explicit args, so the params should
// just consist of the environment (self).
let params = unsafe {
let ty = Type::from_ref(llvm::LLVMTypeOf(dtor_addr));
ty.element_type().func_params()
};
assert_eq!(params.len(), if type_is_sized(bcx.tcx(), t) { 1 } else { 2 });
// Be sure to put the contents into a scope so we can use an invoke
// instruction to call the user destructor but still call the field
// destructors if the user destructor panics.
//
// FIXME (#14875) panic-in-drop semantics might be unsupported; we
// might well consider changing below to more direct code.
let contents_scope = bcx.fcx.push_custom_cleanup_scope();
// Issue #23611: schedule cleanup of contents, re-inspecting the
// discriminant (if any) in case of variant swap in drop code.
bcx.fcx.schedule_drop_adt_contents(cleanup::CustomScope(contents_scope), v0, t);
let glue_type = get_drop_glue_type(bcx.ccx(), t);
let dtor_ty = bcx.tcx().mk_ctor_fn(class_did, &[glue_type], bcx.tcx().mk_nil());
let (_, bcx) = if type_is_sized(bcx.tcx(), t) {
invoke(bcx, dtor_addr, &[v0], dtor_ty, DebugLoc::None)
} else {
let args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_len(bcx, v0))];
invoke(bcx, dtor_addr, &args, dtor_ty, DebugLoc::None)
};
bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
}
pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, info: ValueRef)
-> (ValueRef, ValueRef) {
debug!("calculate size of DST: {}; with lost info: {}",
t, bcx.val_to_string(info));
if type_is_sized(bcx.tcx(), t) {
let sizing_type = sizing_type_of(bcx.ccx(), t);
let size = llsize_of_alloc(bcx.ccx(), sizing_type);
let align = align_of(bcx.ccx(), t);
debug!("size_and_align_of_dst t={} info={} size: {} align: {}",
t, bcx.val_to_string(info), size, align);
let size = C_uint(bcx.ccx(), size);
let align = C_uint(bcx.ccx(), align);
return (size, align);
}
match t.sty {
ty::TyStruct(def, substs) => {
let ccx = bcx.ccx();
// First get the size of all statically known fields.
// Don't use type_of::sizing_type_of because that expects t to be sized.
assert!(!t.is_simd());
let repr = adt::represent_type(ccx, t);
let sizing_type = adt::sizing_type_context_of(ccx, &*repr, true);
debug!("DST {} sizing_type: {}", t, sizing_type.to_string());
let sized_size = llsize_of_alloc(ccx, sizing_type.prefix());
let sized_align = llalign_of_min(ccx, sizing_type.prefix());
debug!("DST {} statically sized prefix size: {} align: {}",
t, sized_size, sized_align);
let sized_size = C_uint(ccx, sized_size);
let sized_align = C_uint(ccx, sized_align);
// Recurse to get the size of the dynamically sized field (must be
// the last field).
let last_field = def.struct_variant().fields.last().unwrap();
let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field);
let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
let dbloc = DebugLoc::None;
// FIXME (#26403, #27023): We should be adding padding
// to `sized_size` (to accommodate the `unsized_align`
// required of the unsized field that follows) before
// summing it with `sized_size`. (Note that since #26403
// is unfixed, we do not yet add the necessary padding
// here. But this is where the add would go.)
// Return the sum of sizes and max of aligns.
let mut size = Add(bcx, sized_size, unsized_size, dbloc);
// Issue #27023: If there is a drop flag, *now* we add 1
// to the size. (We can do this without adding any
// padding because drop flags do not have any alignment
// constraints.)
if sizing_type.needs_drop_flag() {
size = Add(bcx, size, C_uint(bcx.ccx(), 1_u64), dbloc);
}
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).
let align = Select(bcx,
ICmp(bcx,
llvm::IntUGT,
sized_align,
unsized_align,
dbloc),
sized_align,
unsized_align);
// Issue #27023: must add any necessary padding to `size`
// (to make it a multiple of `align`) before returning it.
//
// Namely, the returned size should be, in C notation:
//
// `size + ((size & (align-1))? align : 0)`
//
// emulated via the semi-standard fast bit trick:
//
// `(size + (align-1)) &!align`
let addend = Sub(bcx, align, C_uint(bcx.ccx(), 1_u64), dbloc);
let size = And(
bcx, Add(bcx, size, addend, dbloc), Neg(bcx, align, dbloc), dbloc);
(size, align)
}
ty::TyTrait(..) => {
// info points to the vtable and the second entry in the vtable is the
// dynamic size of the object.
let info = PointerCast(bcx, info, Type::int(bcx.ccx()).ptr_to());
let size_ptr = GEPi(bcx, info, &[1]);
let align_ptr = GEPi(bcx, info, &[2]);
(Load(bcx, size_ptr), Load(bcx, align_ptr))
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(bcx.tcx());
// The info in this case is the length of the str, so the size is that
// times the unit size.
let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty);
let unit_align = llalign_of_min(bcx.ccx(), llunit_ty); | let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty);
(Mul(bcx, info, C_uint(bcx.ccx(), unit_size), DebugLoc::None),
C_uint(bcx.ccx(), unit_align)) | random_line_split | |
glue.rs | trans_exchange_free(bcx, ptr, content_size, content_align, debug_loc)
} else {
bcx
}
}
pub fn get_drop_glue_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>) -> Ty<'tcx> {
let tcx = ccx.tcx();
// Even if there is no dtor for t, there might be one deeper down and we
// might need to pass in the vtable ptr.
if!type_is_sized(tcx, t) {
return t
}
// FIXME (#22815): note that type_needs_drop conservatively
// approximates in some cases and may say a type expression
// requires drop glue when it actually does not.
//
// (In this case it is not clear whether any harm is done, i.e.
// erroneously returning `t` in some cases where we could have
// returned `tcx.types.i8` does not appear unsound. The impact on
// code quality is unknown at this time.)
if!type_needs_drop(tcx, t) {
return tcx.types.i8;
}
match t.sty {
ty::TyBox(typ) if!type_needs_drop(tcx, typ)
&& type_is_sized(tcx, typ) => {
let llty = sizing_type_of(ccx, typ);
// `Box<ZeroSizeType>` does not allocate.
if llsize_of_alloc(ccx, llty) == 0 {
tcx.types.i8
} else {
t
}
}
_ => t
}
}
pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc) -> Block<'blk, 'tcx> {
drop_ty_core(bcx, v, t, debug_loc, false, None)
}
pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc,
skip_dtor: bool,
drop_hint: Option<cleanup::DropHintValue>)
-> Block<'blk, 'tcx> {
// NB: v is an *alias* of type t here, not a direct value.
debug!("drop_ty_core(t={:?}, skip_dtor={} drop_hint={:?})", t, skip_dtor, drop_hint);
let _icx = push_ctxt("drop_ty");
let mut bcx = bcx;
if bcx.fcx.type_needs_drop(t) {
let ccx = bcx.ccx();
let g = if skip_dtor {
DropGlueKind::TyContents(t)
} else {
DropGlueKind::Ty(t)
};
let glue = get_drop_glue_core(ccx, g);
let glue_type = get_drop_glue_type(ccx, t);
let ptr = if glue_type!= t {
PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to())
} else {
v
};
match drop_hint {
Some(drop_hint) => {
let hint_val = load_ty(bcx, drop_hint.value(), bcx.tcx().types.u8);
let moved_val =
C_integral(Type::i8(bcx.ccx()), adt::DTOR_MOVED_HINT as u64, false);
let may_need_drop =
ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None);
bcx = with_cond(bcx, may_need_drop, |cx| {
Call(cx, glue, &[ptr], None, debug_loc);
cx
})
}
None => {
// No drop-hint ==> call standard drop glue
Call(bcx, glue, &[ptr], None, debug_loc);
}
}
}
bcx
}
pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc,
skip_dtor: bool)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("drop_ty_immediate");
let vp = alloca(bcx, type_of(bcx.ccx(), t), "");
store_ty(bcx, v, vp, t);
drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None)
}
pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef |
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum DropGlueKind<'tcx> {
/// The normal path; runs the dtor, and then recurs on the contents
Ty(Ty<'tcx>),
/// Skips the dtor, if any, for ty; drops the contents directly.
/// Note that the dtor is only skipped at the most *shallow*
/// level, namely, an `impl Drop for Ty` itself. So, for example,
/// if Ty is Newtype(S) then only the Drop impl for for Newtype
/// itself will be skipped, while the Drop impl for S, if any,
/// will be invoked.
TyContents(Ty<'tcx>),
}
impl<'tcx> DropGlueKind<'tcx> {
fn ty(&self) -> Ty<'tcx> {
match *self { DropGlueKind::Ty(t) | DropGlueKind::TyContents(t) => t }
}
fn map_ty<F>(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) -> Ty<'tcx>
{
match *self {
DropGlueKind::Ty(t) => DropGlueKind::Ty(f(t)),
DropGlueKind::TyContents(t) => DropGlueKind::TyContents(f(t)),
}
}
}
fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
g: DropGlueKind<'tcx>) -> ValueRef {
debug!("make drop glue for {:?}", g);
let g = g.map_ty(|t| get_drop_glue_type(ccx, t));
debug!("drop glue type {:?}", g);
match ccx.drop_glues().borrow().get(&g) {
Some(&glue) => return glue,
_ => { }
}
let t = g.ty();
let llty = if type_is_sized(ccx.tcx(), t) {
type_of(ccx, t).ptr_to()
} else {
type_of(ccx, ccx.tcx().mk_box(t)).ptr_to()
};
let llfnty = Type::glue_fn(ccx, llty);
// To avoid infinite recursion, don't `make_drop_glue` until after we've
// added the entry to the `drop_glues` cache.
if let Some(old_sym) = ccx.available_drop_glues().borrow().get(&g) {
let llfn = declare::declare_cfn(ccx, &old_sym, llfnty, ccx.tcx().mk_nil());
ccx.drop_glues().borrow_mut().insert(g, llfn);
return llfn;
};
let fn_nm = mangle_internal_name_by_type_and_seq(ccx, t, "drop");
let llfn = declare::define_cfn(ccx, &fn_nm, llfnty, ccx.tcx().mk_nil()).unwrap_or_else(||{
ccx.sess().bug(&format!("symbol `{}` already defined", fn_nm));
});
ccx.available_drop_glues().borrow_mut().insert(g, fn_nm);
let _s = StatRecorder::new(ccx, format!("drop {:?}", t));
let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
let (arena, fcx): (TypedArena<_>, FunctionContext);
arena = TypedArena::new();
fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false,
ty::FnConverging(ccx.tcx().mk_nil()),
empty_substs, None, &arena);
let bcx = init_function(&fcx, false, ty::FnConverging(ccx.tcx().mk_nil()));
update_linkage(ccx, llfn, None, OriginalTranslation);
ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
// All glue functions take values passed *by alias*; this is a
// requirement since in many contexts glue is invoked indirectly and
// the caller has no idea if it's dealing with something that can be
// passed by value.
//
// llfn is expected be declared to take a parameter of the appropriate
// type, so we don't need to explicitly cast the function parameter.
let llrawptr0 = get_param(llfn, fcx.arg_offset() as c_uint);
let bcx = make_drop_glue(bcx, llrawptr0, g);
finish_fn(&fcx, bcx, ty::FnConverging(ccx.tcx().mk_nil()), DebugLoc::None);
llfn
}
fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
t: Ty<'tcx>,
struct_data: ValueRef,
dtor_did: ast::DefId,
class_did: ast::DefId,
substs: &subst::Substs<'tcx>)
-> Block<'blk, 'tcx> {
assert!(type_is_sized(bcx.tcx(), t), "Precondition: caller must ensure t is sized");
let repr = adt::represent_type(bcx.ccx(), t);
let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &*repr, struct_data));
let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type());
let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false);
let bcx = if!bcx.ccx().check_drop_flag_for_sanity() {
bcx
} else {
let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false);
let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None);
let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None);
let drop_flag_neither_initialized_nor_cleared =
And(bcx, not_init, not_done, DebugLoc::None);
with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| {
let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap"));
Call(cx, llfn, &[], None, DebugLoc::None);
cx
})
};
let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None);
with_cond(bcx, drop_flag_dtor_needed, |cx| {
trans_struct_drop(cx, t, struct_data, dtor_did, class_did, substs)
})
}
pub fn get_res_dtor<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
did: ast::DefId,
parent_id: ast::DefId,
substs: &Substs<'tcx>)
-> ValueRef {
let _icx = push_ctxt("trans_res_dtor");
let did = inline::maybe_instantiate_inline(ccx, did);
if!substs.types.is_empty() {
assert_eq!(did.krate, ast::LOCAL_CRATE);
// Since we're in trans we don't care for any region parameters
let substs = ccx.tcx().mk_substs(Substs::erased(substs.types.clone()));
let (val, _, _) = monomorphize::monomorphic_fn(ccx, did, substs, None);
val
} else if did.krate == ast::LOCAL_CRATE {
get_item_val(ccx, did.node)
} else {
let tcx = ccx.tcx();
let name = csearch::get_symbol(&ccx.sess().cstore, did);
let class_ty = tcx.lookup_item_type(parent_id).ty.subst(tcx, substs);
let llty = type_of_dtor(ccx, class_ty);
foreign::get_extern_fn(ccx, &mut *ccx.externs().borrow_mut(), &name[..], llvm::CCallConv,
llty, ccx.tcx().mk_nil())
}
}
fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
t: Ty<'tcx>,
v0: ValueRef,
dtor_did: ast::DefId,
class_did: ast::DefId,
substs: &subst::Substs<'tcx>)
-> Block<'blk, 'tcx>
{
debug!("trans_struct_drop t: {}", t);
// Find and call the actual destructor
let dtor_addr = get_res_dtor(bcx.ccx(), dtor_did, class_did, substs);
// Class dtors have no explicit args, so the params should
// just consist of the environment (self).
let params = unsafe {
let ty = Type::from_ref(llvm::LLVMTypeOf(dtor_addr));
ty.element_type().func_params()
};
assert_eq!(params.len(), if type_is_sized(bcx.tcx(), t) { 1 } else { 2 });
// Be sure to put the contents into a scope so we can use an invoke
// instruction to call the user destructor but still call the field
// destructors if the user destructor panics.
//
// FIXME (#14875) panic-in-drop semantics might be unsupported; we
// might well consider changing below to more direct code.
let contents_scope = bcx.fcx.push_custom_cleanup_scope();
// Issue #23611: schedule cleanup of contents, re-inspecting the
// discriminant (if any) in case of variant swap in drop code.
bcx.fcx.schedule_drop_adt_contents(cleanup::CustomScope(contents_scope), v0, t);
let glue_type = get_drop_glue_type(bcx.ccx(), t);
let dtor_ty = bcx.tcx().mk_ctor_fn(class_did, &[glue_type], bcx.tcx().mk_nil());
let (_, bcx) = if type_is_sized(bcx.tcx(), t) {
invoke(bcx, dtor_addr, &[v0], dtor_ty, DebugLoc::None)
} else {
let args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_len(bcx, v0))];
invoke(bcx, dtor_addr, &args, dtor_ty, DebugLoc::None)
};
bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
}
pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, info: ValueRef)
-> (ValueRef, ValueRef) {
debug!("calculate size of DST: {}; with lost info: {}",
t, bcx.val_to_string(info));
if type_is_sized(bcx.tcx(), t) {
let sizing_type = sizing_type_of(bcx.ccx(), t);
let size = llsize_of_alloc(bcx.ccx(), sizing_type);
let align = align_of(bcx.ccx(), t);
debug!("size_and_align_of_dst t={} info={} size: {} align: {}",
t, bcx.val_to_string(info), size, align);
let size = C_uint(bcx.ccx(), size);
let align = C_uint(bcx.ccx(), align);
return (size, align);
}
match t.sty {
ty::TyStruct(def, substs) => {
let ccx = bcx.ccx();
// First get the size of all statically known fields.
// Don't use type_of::sizing_type_of because that expects t to be sized.
assert!(!t.is_simd());
let repr = adt::represent_type(ccx, t);
let sizing_type = adt::sizing_type_context_of(ccx, &*repr, true);
debug!("DST {} sizing_type: {}", t, sizing_type.to_string());
let sized_size = llsize_of_alloc(ccx, sizing_type.prefix());
let sized_align = llalign_of_min(ccx, sizing_type.prefix());
debug!("DST {} statically sized prefix size: {} align: {}",
t, sized_size, sized_align);
let sized_size = C_uint(ccx, sized_size);
let sized_align = C_uint(ccx, sized_align);
// Recurse to get the size of the dynamically sized field (must be
// the last field).
let last_field = def.struct_variant().fields.last().unwrap();
let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field);
let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
let dbloc = DebugLoc::None;
// FIXME (#26403, #27023): We should be adding padding
// to `sized_size` (to accommodate the `unsized_align`
// required of the unsized field that follows) before
// summing it with `sized_size`. (Note that since #26403
// is unfixed, we do not yet add the necessary padding
// here. But this is where the add would go.)
// Return the sum of sizes and max of aligns.
let mut size = Add(bcx, sized_size, unsized_size, dbloc);
// Issue #27023: If there is a drop flag, *now* we add 1
// to the size. (We can do this without adding any
// padding because drop flags do not have any alignment
// constraints.)
if sizing_type.needs_drop_flag() {
size = Add(bcx, size, C_uint(bcx.ccx(), 1_u64), dbloc);
}
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).
let align = Select(bcx,
ICmp(bcx,
llvm::IntUGT,
sized_align,
unsized_align,
dbloc),
sized_align,
unsized_align);
// Issue #27023: must add any necessary padding to `size`
// (to make it a multiple of `align`) before returning it.
//
// Namely, the returned size should be, in C notation:
//
// `size + ((size & (align-1))? align : 0)`
//
// emulated via the semi-standard fast bit trick:
//
// `(size + (align-1)) &!align`
let addend = Sub(bcx, align, C_uint(bcx.ccx(), 1_u64), dbloc);
let size = And(
bcx, Add(bcx, size, addend, dbloc), Neg(bcx, align, dbloc), dbloc);
(size, align)
| {
get_drop_glue_core(ccx, DropGlueKind::Ty(t))
} | identifier_body |
lib.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Generalization of a state machine for a consensus engine.
//! This will define traits for the header, block, and state of a blockchain.
extern crate ethcore_util as util;
extern crate ethcore_bigint as bigint;
use bigint::hash::H256;
use bigint::prelude::U256;
use util::Address;
/// A header. This contains important metadata about the block, as well as a
/// "seal" that indicates validity to a consensus engine.
pub trait Header {
/// Cryptographic hash of the header, excluding the seal.
fn bare_hash(&self) -> H256;
/// Cryptographic hash of the header, including the seal.
fn hash(&self) -> H256;
/// Get a reference to the seal fields.
fn seal(&self) -> &[Vec<u8>];
/// The author of the header.
fn author(&self) -> &Address;
/// The number of the header.
fn number(&self) -> u64;
}
/// a header with an associated score (difficulty in PoW terms)
pub trait ScoredHeader: Header {
/// Get the score of this header.
fn score(&self) -> &U256;
/// Set the score of this header.
fn set_score(&mut self, score: U256);
}
/// A "live" block is one which is in the process of the transition.
/// The state of this block can be mutated by arbitrary rules of the
/// state transition function.
pub trait LiveBlock:'static {
/// The block header type;
type Header: Header;
/// Get a reference to the header.
fn header(&self) -> &Self::Header;
/// Get a reference to the uncle headers. If the block type doesn't
/// support uncles, return the empty slice.
fn uncles(&self) -> &[Self::Header];
}
/// Trait for blocks which have a transaction type.
pub trait Transactions: LiveBlock {
/// The transaction type.
type Transaction;
/// Get a reference to the transactions in this block.
fn transactions(&self) -> &[Self::Transaction];
}
/// Generalization of types surrounding blockchain-suitable state machines.
pub trait Machine: for<'a> LocalizedMachine<'a> {
/// The block header type.
type Header: Header;
/// The live block type.
type LiveBlock: LiveBlock<Header=Self::Header>;
/// A handle to a blockchain client for this machine.
type EngineClient:?Sized;
/// A description of needed auxiliary data.
type AuxiliaryRequest;
/// Errors which can occur when querying or interacting with the machine.
type Error;
}
/// Machine-related types localized to a specific lifetime.
// TODO: this is a workaround for a lack of associated type constructors in the language.
pub trait LocalizedMachine<'a>: Sync + Send {
/// Definition of auxiliary data associated to a specific block.
type AuxiliaryData: 'a;
/// A context providing access to the state in a controlled capacity.
/// Generally also provides verifiable proofs.
type StateContext:?Sized + 'a;
}
/// A state machine that uses balances.
pub trait WithBalances: Machine {
/// Get the balance, in base units, associated with an account.
/// Extracts data from the live block.
fn balance(&self, live: &Self::LiveBlock, address: &Address) -> Result<U256, Self::Error>;
/// Increment the balance of an account in the state of the live block.
fn add_balance(&self, live: &mut Self::LiveBlock, address: &Address, amount: &U256) -> Result<(), Self::Error>;
/// Note block rewards. "direct" rewards are for authors, "indirect" are for e.g. uncles.
fn note_rewards( | _direct: &[(Address, U256)],
_indirect: &[(Address, U256)],
) -> Result<(), Self::Error> { Ok(()) }
} | &self,
_live: &mut Self::LiveBlock, | random_line_split |
lib.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Generalization of a state machine for a consensus engine.
//! This will define traits for the header, block, and state of a blockchain.
extern crate ethcore_util as util;
extern crate ethcore_bigint as bigint;
use bigint::hash::H256;
use bigint::prelude::U256;
use util::Address;
/// A header. This contains important metadata about the block, as well as a
/// "seal" that indicates validity to a consensus engine.
pub trait Header {
/// Cryptographic hash of the header, excluding the seal.
fn bare_hash(&self) -> H256;
/// Cryptographic hash of the header, including the seal.
fn hash(&self) -> H256;
/// Get a reference to the seal fields.
fn seal(&self) -> &[Vec<u8>];
/// The author of the header.
fn author(&self) -> &Address;
/// The number of the header.
fn number(&self) -> u64;
}
/// a header with an associated score (difficulty in PoW terms)
pub trait ScoredHeader: Header {
/// Get the score of this header.
fn score(&self) -> &U256;
/// Set the score of this header.
fn set_score(&mut self, score: U256);
}
/// A "live" block is one which is in the process of the transition.
/// The state of this block can be mutated by arbitrary rules of the
/// state transition function.
pub trait LiveBlock:'static {
/// The block header type;
type Header: Header;
/// Get a reference to the header.
fn header(&self) -> &Self::Header;
/// Get a reference to the uncle headers. If the block type doesn't
/// support uncles, return the empty slice.
fn uncles(&self) -> &[Self::Header];
}
/// Trait for blocks which have a transaction type.
pub trait Transactions: LiveBlock {
/// The transaction type.
type Transaction;
/// Get a reference to the transactions in this block.
fn transactions(&self) -> &[Self::Transaction];
}
/// Generalization of types surrounding blockchain-suitable state machines.
pub trait Machine: for<'a> LocalizedMachine<'a> {
/// The block header type.
type Header: Header;
/// The live block type.
type LiveBlock: LiveBlock<Header=Self::Header>;
/// A handle to a blockchain client for this machine.
type EngineClient:?Sized;
/// A description of needed auxiliary data.
type AuxiliaryRequest;
/// Errors which can occur when querying or interacting with the machine.
type Error;
}
/// Machine-related types localized to a specific lifetime.
// TODO: this is a workaround for a lack of associated type constructors in the language.
pub trait LocalizedMachine<'a>: Sync + Send {
/// Definition of auxiliary data associated to a specific block.
type AuxiliaryData: 'a;
/// A context providing access to the state in a controlled capacity.
/// Generally also provides verifiable proofs.
type StateContext:?Sized + 'a;
}
/// A state machine that uses balances.
pub trait WithBalances: Machine {
/// Get the balance, in base units, associated with an account.
/// Extracts data from the live block.
fn balance(&self, live: &Self::LiveBlock, address: &Address) -> Result<U256, Self::Error>;
/// Increment the balance of an account in the state of the live block.
fn add_balance(&self, live: &mut Self::LiveBlock, address: &Address, amount: &U256) -> Result<(), Self::Error>;
/// Note block rewards. "direct" rewards are for authors, "indirect" are for e.g. uncles.
fn | (
&self,
_live: &mut Self::LiveBlock,
_direct: &[(Address, U256)],
_indirect: &[(Address, U256)],
) -> Result<(), Self::Error> { Ok(()) }
}
| note_rewards | identifier_name |
lib.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Generalization of a state machine for a consensus engine.
//! This will define traits for the header, block, and state of a blockchain.
extern crate ethcore_util as util;
extern crate ethcore_bigint as bigint;
use bigint::hash::H256;
use bigint::prelude::U256;
use util::Address;
/// A header. This contains important metadata about the block, as well as a
/// "seal" that indicates validity to a consensus engine.
pub trait Header {
/// Cryptographic hash of the header, excluding the seal.
fn bare_hash(&self) -> H256;
/// Cryptographic hash of the header, including the seal.
fn hash(&self) -> H256;
/// Get a reference to the seal fields.
fn seal(&self) -> &[Vec<u8>];
/// The author of the header.
fn author(&self) -> &Address;
/// The number of the header.
fn number(&self) -> u64;
}
/// a header with an associated score (difficulty in PoW terms)
pub trait ScoredHeader: Header {
/// Get the score of this header.
fn score(&self) -> &U256;
/// Set the score of this header.
fn set_score(&mut self, score: U256);
}
/// A "live" block is one which is in the process of the transition.
/// The state of this block can be mutated by arbitrary rules of the
/// state transition function.
pub trait LiveBlock:'static {
/// The block header type;
type Header: Header;
/// Get a reference to the header.
fn header(&self) -> &Self::Header;
/// Get a reference to the uncle headers. If the block type doesn't
/// support uncles, return the empty slice.
fn uncles(&self) -> &[Self::Header];
}
/// Trait for blocks which have a transaction type.
pub trait Transactions: LiveBlock {
/// The transaction type.
type Transaction;
/// Get a reference to the transactions in this block.
fn transactions(&self) -> &[Self::Transaction];
}
/// Generalization of types surrounding blockchain-suitable state machines.
pub trait Machine: for<'a> LocalizedMachine<'a> {
/// The block header type.
type Header: Header;
/// The live block type.
type LiveBlock: LiveBlock<Header=Self::Header>;
/// A handle to a blockchain client for this machine.
type EngineClient:?Sized;
/// A description of needed auxiliary data.
type AuxiliaryRequest;
/// Errors which can occur when querying or interacting with the machine.
type Error;
}
/// Machine-related types localized to a specific lifetime.
// TODO: this is a workaround for a lack of associated type constructors in the language.
pub trait LocalizedMachine<'a>: Sync + Send {
/// Definition of auxiliary data associated to a specific block.
type AuxiliaryData: 'a;
/// A context providing access to the state in a controlled capacity.
/// Generally also provides verifiable proofs.
type StateContext:?Sized + 'a;
}
/// A state machine that uses balances.
pub trait WithBalances: Machine {
/// Get the balance, in base units, associated with an account.
/// Extracts data from the live block.
fn balance(&self, live: &Self::LiveBlock, address: &Address) -> Result<U256, Self::Error>;
/// Increment the balance of an account in the state of the live block.
fn add_balance(&self, live: &mut Self::LiveBlock, address: &Address, amount: &U256) -> Result<(), Self::Error>;
/// Note block rewards. "direct" rewards are for authors, "indirect" are for e.g. uncles.
fn note_rewards(
&self,
_live: &mut Self::LiveBlock,
_direct: &[(Address, U256)],
_indirect: &[(Address, U256)],
) -> Result<(), Self::Error> |
}
| { Ok(()) } | identifier_body |
mod.rs | //! Traits and datastructures representing a collection trace.
//!
//! A collection trace is a set of updates of the form `(key, val, time, diff)`, which determine the contents
//! of a collection at given times by accumulating updates whose time field is less or equal to the target field.
//!
//! The `Trace` trait describes those types and methods that a data structure must implement to be viewed as a
//! collection trace. This trait allows operator implementations to be generic with respect to the type of trace,
//! and allows various data structures to be interpretable as multiple different types of trace.
pub mod cursor;
pub mod description;
pub mod implementations;
pub mod layers;
pub mod wrappers;
use timely::progress::Antichain;
use timely::progress::Timestamp;
// use ::difference::Semigroup;
pub use self::cursor::Cursor;
pub use self::description::Description;
// The traces and batch and cursors want the flexibility to appear as if they manage certain types of keys and
// values and such, while perhaps using other representations, I'm thinking mostly of wrappers around the keys
// and vals that change the `Ord` implementation, or stash hash codes, or the like.
//
// This complicates what requirements we make so that the trace is still usable by someone who knows only about
// the base key and value types. For example, the complex types should likely dereference to the simpler types,
// so that the user can make sense of the result as if they were given references to the simpler types. At the
// same time, the collection should be formable from base types (perhaps we need an `Into` or `From` constraint)
// and we should, somehow, be able to take a reference to the simple types to compare against the more complex
// types. This second one is also like an `Into` or `From` constraint, except that we start with a reference and
// really don't need anything more complex than a reference, but we can't form an owned copy of the complex type
// without cloning it.
//
// We could just start by cloning things. Worry about wrapping references later on.
/// A trace whose contents may be read.
///
/// This is a restricted interface to the more general `Trace` trait, which extends this trait with further methods
/// to update the contents of the trace. These methods are used to examine the contents, and to update the reader's
/// capabilities (which may release restrictions on the mutations to the underlying trace and cause work to happen).
pub trait TraceReader {
/// Key by which updates are indexed.
type Key;
/// Values associated with keys.
type Val;
/// Timestamps associated with updates
type Time;
/// Associated update.
type R;
/// The type of an immutable collection of updates.
type Batch: BatchReader<Self::Key, Self::Val, Self::Time, Self::R>+Clone+'static;
/// The type used to enumerate the collections contents.
type Cursor: Cursor<Self::Key, Self::Val, Self::Time, Self::R>;
/// Provides a cursor over updates contained in the trace.
fn cursor(&mut self) -> (Self::Cursor, <Self::Cursor as Cursor<Self::Key, Self::Val, Self::Time, Self::R>>::Storage) {
if let Some(cursor) = self.cursor_through(&[]) |
else {
panic!("unable to acquire complete cursor for trace; is it closed?");
}
}
/// Acquires a cursor to the restriction of the collection's contents to updates at times not greater or
/// equal to an element of `upper`.
///
/// This method is expected to work if called with an `upper` that (i) was an observed bound in batches from
/// the trace, and (ii) the trace has not been advanced beyond `upper`. Practically, the implementation should
/// be expected to look for a "clean cut" using `upper`, and if it finds such a cut can return a cursor. This
/// should allow `upper` such as `&[]` as used by `self.cursor()`, though it is difficult to imagine other uses.
fn cursor_through(&mut self, upper: &[Self::Time]) -> Option<(Self::Cursor, <Self::Cursor as Cursor<Self::Key, Self::Val, Self::Time, Self::R>>::Storage)>;
/// Advances the frontier of times the collection must be correctly accumulable through.
///
/// Practically, this allows the trace to advance times in updates it maintains as long as the advanced times
/// still compare equivalently to any times greater or equal to some element of `frontier`. Times not greater
/// or equal to some element of `frontier` may no longer correctly accumulate, so do not advance a trace unless
/// you are quite sure you no longer require the distinction.
fn advance_by(&mut self, frontier: &[Self::Time]);
/// Reports the frontier from which all time comparisions should be accurate.
///
/// Times that are not greater or equal to some element of the advance frontier may accumulate inaccurately as
/// the trace may have lost the ability to distinguish between such times. Accumulations are only guaranteed to
/// be accurate from the frontier onwards.
fn advance_frontier(&mut self) -> &[Self::Time];
/// Advances the frontier that may be used in `cursor_through`.
///
/// Practically, this allows the trace to merge batches whose upper frontier comes before `frontier`. The trace
/// is likely to be annoyed or confused if you use a frontier other than one observed as an upper bound of an
/// actual batch. This doesn't seem likely to be a problem, but get in touch if it is.
///
/// Calling `distinguish_since(&[])` indicates that all batches may be merged at any point, which essentially
/// disables the use of `cursor_through` with any parameter other than `&[]`, which is the behavior of `cursor`.
fn distinguish_since(&mut self, frontier: &[Self::Time]);
/// Reports the frontier from which the collection may be subsetted.
///
/// The semantics are less elegant here, but the underlying trace will not merge batches in advance of this
/// frontier, which ensures that operators can extract the subset of the trace at batch boundaries from this
/// frontier onward. These boundaries may be used in `cursor_through`, whereas boundaries not in advance of
/// this frontier are not guaranteed to return a cursor.
fn distinguish_frontier(&mut self) -> &[Self::Time];
/// Maps logic across the non-empty sequence of batches in the trace.
///
/// This is currently used only to extract historical data to prime late-starting operators who want to reproduce
/// the stream of batches moving past the trace. It could also be a fine basis for a default implementation of the
/// cursor methods, as they (by default) just move through batches accumulating cursors into a cursor list.
fn map_batches<F: FnMut(&Self::Batch)>(&mut self, f: F);
/// Reads the upper frontier of committed times.
///
///
fn read_upper(&mut self, target: &mut Antichain<Self::Time>)
where
Self::Time: Timestamp,
{
target.clear();
target.insert(Default::default());
self.map_batches(|batch| {
target.clear();
for time in batch.upper().iter().cloned() {
target.insert(time);
}
});
}
/// Advances `upper` by any empty batches.
///
/// An empty batch whose `batch.lower` bound equals the current
/// contents of `upper` will advance `upper` to `batch.upper`.
/// Taken across all batches, this should advance `upper` across
/// empty batch regions.
fn advance_upper(&mut self, upper: &mut Antichain<Self::Time>)
where
Self::Time: Timestamp,
{
self.map_batches(|batch| {
if batch.is_empty() && batch.lower() == upper.elements() {
upper.clear();
upper.extend(batch.upper().iter().cloned());
}
});
}
}
/// An append-only collection of `(key, val, time, diff)` tuples.
///
/// The trace must pretend to look like a collection of `(Key, Val, Time, isize)` tuples, but is permitted
/// to introduce new types `KeyRef`, `ValRef`, and `TimeRef` which can be dereference to the types above.
///
/// The trace must be constructable from, and navigable by the `Key`, `Val`, `Time` types, but does not need
/// to return them.
pub trait Trace : TraceReader
where <Self as TraceReader>::Batch: Batch<Self::Key, Self::Val, Self::Time, Self::R> {
/// Allocates a new empty trace.
fn new(
info: ::timely::dataflow::operators::generic::OperatorInfo,
logging: Option<::logging::Logger>,
activator: Option<timely::scheduling::activate::Activator>,
) -> Self;
/// Exert merge effort, even without updates.
fn exert(&mut self, effort: &mut isize);
/// Introduces a batch of updates to the trace.
///
/// Batches describe the time intervals they contain, and they should be added to the trace in contiguous
/// intervals. If a batch arrives with a lower bound that does not equal the upper bound of the most recent
/// addition, the trace will add an empty batch. It is an error to then try to populate that region of time.
///
/// This restriction could be relaxed, especially if we discover ways in which batch interval order could
/// commute. For now, the trace should complain, to the extent that it cares about contiguous intervals.
fn insert(&mut self, batch: Self::Batch);
/// Introduces an empty batch concluding the trace.
///
/// This method should be logically equivalent to introducing an empty batch whose lower frontier equals
/// the upper frontier of the most recently introduced batch, and whose upper frontier is empty.
fn close(&mut self);
}
/// A batch of updates whose contents may be read.
///
/// This is a restricted interface to batches of updates, which support the reading of the batch's contents,
/// but do not expose ways to construct the batches. This trait is appropriate for views of the batch, and is
/// especially useful for views derived from other sources in ways that prevent the construction of batches
/// from the type of data in the view (for example, filtered views, or views with extended time coordinates).
pub trait BatchReader<K, V, T, R> where Self: ::std::marker::Sized
{
/// The type used to enumerate the batch's contents.
type Cursor: Cursor<K, V, T, R, Storage=Self>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor;
/// The number of updates in the batch.
fn len(&self) -> usize;
/// True if the batch is empty.
fn is_empty(&self) -> bool { self.len() == 0 }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T>;
/// All times in the batch are greater or equal to an element of `lower`.
fn lower(&self) -> &[T] { self.description().lower() }
/// All times in the batch are not greater or equal to any element of `upper`.
fn upper(&self) -> &[T] { self.description().upper() }
}
/// An immutable collection of updates.
pub trait Batch<K, V, T, R> : BatchReader<K, V, T, R> where Self: ::std::marker::Sized {
/// A type used to assemble batches from disordered updates.
type Batcher: Batcher<K, V, T, R, Self>;
/// A type used to assemble batches from ordered update sequences.
type Builder: Builder<K, V, T, R, Self>;
/// A type used to progressively merge batches.
type Merger: Merger<K, V, T, R, Self>;
/// Initiates the merging of consecutive batches.
///
/// The result of this method can be exercised to eventually produce the same result
/// that a call to `self.merge(other)` would produce, but it can be done in a measured
/// fashion. This can help to avoid latency spikes where a large merge needs to happen.
fn begin_merge(&self, other: &Self) -> Self::Merger {
Self::Merger::new(self, other)
}
///
fn empty(lower: &[T], upper: &[T], since: &[T]) -> Self {
<Self::Builder>::new().done(lower, upper, since)
}
}
/// Functionality for collecting and batching updates.
pub trait Batcher<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Allocates a new empty batcher.
fn new() -> Self;
/// Adds an unordered batch of elements to the batcher.
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>);
/// Returns all updates not greater or equal to an element of `upper`.
fn seal(&mut self, upper: &[T]) -> Output;
/// Returns the lower envelope of contained update times.
fn frontier(&mut self) -> &[T];
}
/// Functionality for building batches from ordered update sequences.
pub trait Builder<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Allocates an empty builder.
fn new() -> Self;
/// Allocates an empty builder with some capacity.
fn with_capacity(cap: usize) -> Self;
/// Adds an element to the batch.
fn push(&mut self, element: (K, V, T, R));
/// Adds an ordered sequence of elements to the batch.
fn extend<I: Iterator<Item=(K,V,T,R)>>(&mut self, iter: I) {
for item in iter { self.push(item); }
}
/// Completes building and returns the batch.
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Output;
}
/// Represents a merge in progress.
pub trait Merger<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Creates a new merger to merge the supplied batches.
fn new(source1: &Output, source2: &Output) -> Self;
/// Perform some amount of work, decrementing `fuel`.
///
/// If `fuel` is non-zero after the call, the merging is complete and
/// one should call `done` to extract the merged results.
fn work(&mut self, source1: &Output, source2: &Output, frontier: &Option<Vec<T>>, fuel: &mut isize);
/// Extracts merged results.
///
/// This method should only be called after `work` has been called and
/// has not brought `fuel` to zero. Otherwise, the merge is still in
/// progress.
fn done(self) -> Output;
}
/// Blanket implementations for reference counted batches.
pub mod rc_blanket_impls {
use std::rc::Rc;
use super::{Batch, BatchReader, Batcher, Builder, Merger, Cursor, Description};
impl<K, V, T, R, B: BatchReader<K,V,T,R>> BatchReader<K,V,T,R> for Rc<B> {
/// The type used to enumerate the batch's contents.
type Cursor = RcBatchCursor<K, V, T, R, B>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor {
RcBatchCursor::new((&**self).cursor())
}
/// The number of updates in the batch.
fn len(&self) -> usize { (&**self).len() }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T> { (&**self).description() }
}
/// Wrapper to provide cursor to nested scope.
pub struct RcBatchCursor<K, V, T, R, B: BatchReader<K, V, T, R>> {
phantom: ::std::marker::PhantomData<(K, V, T, R)>,
cursor: B::Cursor,
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> RcBatchCursor<K, V, T, R, B> {
fn new(cursor: B::Cursor) -> Self {
RcBatchCursor {
cursor,
phantom: ::std::marker::PhantomData,
}
}
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> Cursor<K, V, T, R> for RcBatchCursor<K, V, T, R, B> {
type Storage = Rc<B>;
#[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) }
#[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) }
#[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K { self.cursor.key(storage) }
#[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V { self.cursor.val(storage) }
#[inline]
fn map_times<L: FnMut(&T, &R)>(&mut self, storage: &Self::Storage, logic: L) {
self.cursor.map_times(storage, logic)
}
#[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) }
#[inline] fn seek_key(&mut self, storage: &Self::Storage, key: &K) { self.cursor.seek_key(storage, key) }
#[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) }
#[inline] fn seek_val(&mut self, storage: &Self::Storage, val: &V) { self.cursor.seek_val(storage, val) }
#[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) }
#[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) }
}
/// An immutable collection of updates.
impl<K,V,T,R,B: Batch<K,V,T,R>> Batch<K, V, T, R> for Rc<B> {
type Batcher = RcBatcher<K, V, T, R, B>;
type Builder = RcBuilder<K, V, T, R, B>;
type Merger = RcMerger<K, V, T, R, B>;
}
/// Wrapper type for batching reference counted batches.
pub struct RcBatcher<K,V,T,R,B:Batch<K,V,T,R>> { batcher: B::Batcher }
/// Functionality for collecting and batching updates.
impl<K,V,T,R,B:Batch<K,V,T,R>> Batcher<K, V, T, R, Rc<B>> for RcBatcher<K,V,T,R,B> {
fn new() -> Self { RcBatcher { batcher: <B::Batcher as Batcher<K,V,T,R,B>>::new() } }
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>) { self.batcher.push_batch(batch) }
fn seal(&mut self, upper: &[T]) -> Rc<B> { Rc::new(self.batcher.seal(upper)) }
fn frontier(&mut self) -> &[T] { self.batcher.frontier() }
}
/// Wrapper type for building reference counted batches.
pub struct RcBuilder<K,V,T,R,B:Batch<K,V,T,R>> { builder: B::Builder }
/// Functionality for building batches from ordered update sequences.
impl<K,V,T,R,B:Batch<K,V,T,R>> Builder<K, V, T, R, Rc<B>> for RcBuilder<K,V,T,R,B> {
fn new() -> Self { RcBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::new() } }
fn with_capacity(cap: usize) -> Self { RcBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::with_capacity(cap) } }
fn push(&mut self, element: (K, V, T, R)) { self.builder.push(element) }
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Rc<B> { Rc::new(self.builder.done(lower, upper, since)) }
}
/// Wrapper type for merging reference counted batches.
pub struct RcMerger<K,V,T,R,B:Batch<K,V,T,R>> { merger: B::Merger }
/// Represents a merge in progress.
impl<K,V,T,R,B:Batch<K,V,T,R>> Merger<K, V, T, R, Rc<B>> for RcMerger<K,V,T,R,B> {
fn new(source1: &Rc<B>, source2: &Rc<B>) -> Self { RcMerger { merger: B::begin_merge(source1, source2) } }
fn work(&mut self, source1: &Rc<B>, source2: &Rc<B>, frontier: &Option<Vec<T>>, fuel: &mut isize) { self.merger.work(source1, source2, frontier, fuel) }
fn done(self) -> Rc<B> { Rc::new(self.merger.done()) }
}
}
/// Blanket implementations for reference counted batches.
pub mod abomonated_blanket_impls {
extern crate abomonation;
use abomonation::{Abomonation, measure};
use abomonation::abomonated::Abomonated;
use super::{Batch, BatchReader, Batcher, Builder, Merger, Cursor, Description};
impl<K, V, T, R, B: BatchReader<K,V,T,R>+Abomonation> BatchReader<K,V,T,R> for Abomonated<B, Vec<u8>> {
/// The type used to enumerate the batch's contents.
type Cursor = AbomonatedBatchCursor<K, V, T, R, B>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor {
AbomonatedBatchCursor::new((&**self).cursor())
}
/// The number of updates in the batch.
fn len(&self) -> usize { (&**self).len() }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T> { (&**self).description() }
}
/// Wrapper to provide cursor to nested scope.
pub struct AbomonatedBatchCursor<K, V, T, R, B: BatchReader<K, V, T, R>> {
phantom: ::std::marker::PhantomData<(K, V, T, R)>,
cursor: B::Cursor,
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> AbomonatedBatchCursor<K, V, T, R, B> {
fn new(cursor: B::Cursor) -> Self {
AbomonatedBatchCursor {
cursor,
phantom: ::std::marker::PhantomData,
}
}
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>+Abomonation> Cursor<K, V, T, R> for AbomonatedBatchCursor<K, V, T, R, B> {
type Storage = Abomonated<B, Vec<u8>>;
#[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) }
#[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) }
#[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K { self.cursor.key(storage) }
#[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V { self.cursor.val(storage) }
#[inline]
fn map_times<L: FnMut(&T, &R)>(&mut self, storage: &Self::Storage, logic: L) {
self.cursor.map_times(storage, logic)
}
#[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) }
#[inline] fn seek_key(&mut self, storage: &Self::Storage, key: &K) { self.cursor.seek_key(storage, key) }
#[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) }
#[inline] fn seek_val(&mut self, storage: &Self::Storage, val: &V) { self.cursor.seek_val(storage, val) }
#[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) }
#[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) }
}
/// An immutable collection of updates.
impl<K,V,T,R,B: Batch<K,V,T,R>+Abomonation> Batch<K, V, T, R> for Abomonated<B, Vec<u8>> {
type Batcher = AbomonatedBatcher<K, V, T, R, B>;
type Builder = AbomonatedBuilder<K, V, T, R, B>;
type Merger = AbomonatedMerger<K, V, T, R, B>;
}
/// Wrapper type for batching reference counted batches.
pub struct AbomonatedBatcher<K,V,T,R,B:Batch<K,V,T,R>> { batcher: B::Batcher }
/// Functionality for collecting and batching updates.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Batcher<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedBatcher<K,V,T,R,B> {
fn new() -> Self { AbomonatedBatcher { batcher: <B::Batcher as Batcher<K,V,T,R,B>>::new() } }
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>) { self.batcher.push_batch(batch) }
fn seal(&mut self, upper: &[T]) -> Abomonated<B, Vec<u8>> {
let batch = self.batcher.seal(upper);
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
fn frontier(&mut self) -> &[T] { self.batcher.frontier() }
}
/// Wrapper type for building reference counted batches.
pub struct AbomonatedBuilder<K,V,T,R,B:Batch<K,V,T,R>> { builder: B::Builder }
/// Functionality for building batches from ordered update sequences.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Builder<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedBuilder<K,V,T,R,B> {
fn new() -> Self { AbomonatedBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::new() } }
fn with_capacity(cap: usize) -> Self { AbomonatedBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::with_capacity(cap) } }
fn push(&mut self, element: (K, V, T, R)) { self.builder.push(element) }
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Abomonated<B, Vec<u8>> {
let batch = self.builder.done(lower, upper, since);
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
}
/// Wrapper type for merging reference counted batches.
pub struct AbomonatedMerger<K,V,T,R,B:Batch<K,V,T,R>> { merger: B::Merger }
/// Represents a merge in progress.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Merger<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedMerger<K,V,T,R,B> {
fn new(source1: &Abomonated<B,Vec<u8>>, source2: &Abomonated<B,Vec<u8>>) -> Self {
AbomonatedMerger { merger: B::begin_merge(source1, source2) }
}
fn work(&mut self, source1: &Abomonated<B,Vec<u8>>, source2: &Abomonated<B,Vec<u8>>, frontier: &Option<Vec<T>>, fuel: &mut isize) {
self.merger.work(source1, source2, frontier, fuel)
}
fn done(self) -> Abomonated<B, Vec<u8>> {
let batch = self.merger.done();
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
}
}
| {
cursor
} | conditional_block |
mod.rs | //! Traits and datastructures representing a collection trace.
//!
//! A collection trace is a set of updates of the form `(key, val, time, diff)`, which determine the contents
//! of a collection at given times by accumulating updates whose time field is less or equal to the target field.
//!
//! The `Trace` trait describes those types and methods that a data structure must implement to be viewed as a
//! collection trace. This trait allows operator implementations to be generic with respect to the type of trace,
//! and allows various data structures to be interpretable as multiple different types of trace.
pub mod cursor;
pub mod description;
pub mod implementations;
pub mod layers;
pub mod wrappers;
use timely::progress::Antichain;
use timely::progress::Timestamp;
// use ::difference::Semigroup;
pub use self::cursor::Cursor;
pub use self::description::Description;
// The traces and batch and cursors want the flexibility to appear as if they manage certain types of keys and
// values and such, while perhaps using other representations, I'm thinking mostly of wrappers around the keys
// and vals that change the `Ord` implementation, or stash hash codes, or the like.
//
// This complicates what requirements we make so that the trace is still usable by someone who knows only about
// the base key and value types. For example, the complex types should likely dereference to the simpler types,
// so that the user can make sense of the result as if they were given references to the simpler types. At the
// same time, the collection should be formable from base types (perhaps we need an `Into` or `From` constraint)
// and we should, somehow, be able to take a reference to the simple types to compare against the more complex
// types. This second one is also like an `Into` or `From` constraint, except that we start with a reference and
// really don't need anything more complex than a reference, but we can't form an owned copy of the complex type
// without cloning it.
//
// We could just start by cloning things. Worry about wrapping references later on.
/// A trace whose contents may be read.
///
/// This is a restricted interface to the more general `Trace` trait, which extends this trait with further methods
/// to update the contents of the trace. These methods are used to examine the contents, and to update the reader's
/// capabilities (which may release restrictions on the mutations to the underlying trace and cause work to happen).
pub trait TraceReader {
/// Key by which updates are indexed.
type Key;
/// Values associated with keys.
type Val;
/// Timestamps associated with updates
type Time;
/// Associated update.
type R;
/// The type of an immutable collection of updates.
type Batch: BatchReader<Self::Key, Self::Val, Self::Time, Self::R>+Clone+'static;
/// The type used to enumerate the collections contents.
type Cursor: Cursor<Self::Key, Self::Val, Self::Time, Self::R>;
/// Provides a cursor over updates contained in the trace.
fn cursor(&mut self) -> (Self::Cursor, <Self::Cursor as Cursor<Self::Key, Self::Val, Self::Time, Self::R>>::Storage) {
if let Some(cursor) = self.cursor_through(&[]) {
cursor
}
else {
panic!("unable to acquire complete cursor for trace; is it closed?");
}
}
/// Acquires a cursor to the restriction of the collection's contents to updates at times not greater or
/// equal to an element of `upper`.
///
/// This method is expected to work if called with an `upper` that (i) was an observed bound in batches from
/// the trace, and (ii) the trace has not been advanced beyond `upper`. Practically, the implementation should
/// be expected to look for a "clean cut" using `upper`, and if it finds such a cut can return a cursor. This
/// should allow `upper` such as `&[]` as used by `self.cursor()`, though it is difficult to imagine other uses.
fn cursor_through(&mut self, upper: &[Self::Time]) -> Option<(Self::Cursor, <Self::Cursor as Cursor<Self::Key, Self::Val, Self::Time, Self::R>>::Storage)>;
/// Advances the frontier of times the collection must be correctly accumulable through.
///
/// Practically, this allows the trace to advance times in updates it maintains as long as the advanced times
/// still compare equivalently to any times greater or equal to some element of `frontier`. Times not greater
/// or equal to some element of `frontier` may no longer correctly accumulate, so do not advance a trace unless
/// you are quite sure you no longer require the distinction.
fn advance_by(&mut self, frontier: &[Self::Time]);
/// Reports the frontier from which all time comparisions should be accurate.
///
/// Times that are not greater or equal to some element of the advance frontier may accumulate inaccurately as
/// the trace may have lost the ability to distinguish between such times. Accumulations are only guaranteed to
/// be accurate from the frontier onwards.
fn advance_frontier(&mut self) -> &[Self::Time];
/// Advances the frontier that may be used in `cursor_through`.
///
/// Practically, this allows the trace to merge batches whose upper frontier comes before `frontier`. The trace
/// is likely to be annoyed or confused if you use a frontier other than one observed as an upper bound of an
/// actual batch. This doesn't seem likely to be a problem, but get in touch if it is.
///
/// Calling `distinguish_since(&[])` indicates that all batches may be merged at any point, which essentially
/// disables the use of `cursor_through` with any parameter other than `&[]`, which is the behavior of `cursor`.
fn distinguish_since(&mut self, frontier: &[Self::Time]);
/// Reports the frontier from which the collection may be subsetted.
///
/// The semantics are less elegant here, but the underlying trace will not merge batches in advance of this
/// frontier, which ensures that operators can extract the subset of the trace at batch boundaries from this
/// frontier onward. These boundaries may be used in `cursor_through`, whereas boundaries not in advance of
/// this frontier are not guaranteed to return a cursor.
fn distinguish_frontier(&mut self) -> &[Self::Time];
/// Maps logic across the non-empty sequence of batches in the trace.
///
/// This is currently used only to extract historical data to prime late-starting operators who want to reproduce
/// the stream of batches moving past the trace. It could also be a fine basis for a default implementation of the
/// cursor methods, as they (by default) just move through batches accumulating cursors into a cursor list.
fn map_batches<F: FnMut(&Self::Batch)>(&mut self, f: F);
/// Reads the upper frontier of committed times.
///
///
fn read_upper(&mut self, target: &mut Antichain<Self::Time>)
where
Self::Time: Timestamp,
{
target.clear();
target.insert(Default::default());
self.map_batches(|batch| {
target.clear();
for time in batch.upper().iter().cloned() {
target.insert(time);
}
});
}
/// Advances `upper` by any empty batches.
///
/// An empty batch whose `batch.lower` bound equals the current
/// contents of `upper` will advance `upper` to `batch.upper`.
/// Taken across all batches, this should advance `upper` across
/// empty batch regions.
fn advance_upper(&mut self, upper: &mut Antichain<Self::Time>)
where
Self::Time: Timestamp,
{
self.map_batches(|batch| {
if batch.is_empty() && batch.lower() == upper.elements() {
upper.clear();
upper.extend(batch.upper().iter().cloned());
}
});
}
}
/// An append-only collection of `(key, val, time, diff)` tuples.
///
/// The trace must pretend to look like a collection of `(Key, Val, Time, isize)` tuples, but is permitted
/// to introduce new types `KeyRef`, `ValRef`, and `TimeRef` which can be dereference to the types above.
///
/// The trace must be constructable from, and navigable by the `Key`, `Val`, `Time` types, but does not need
/// to return them.
pub trait Trace : TraceReader
where <Self as TraceReader>::Batch: Batch<Self::Key, Self::Val, Self::Time, Self::R> {
/// Allocates a new empty trace.
fn new(
info: ::timely::dataflow::operators::generic::OperatorInfo,
logging: Option<::logging::Logger>,
activator: Option<timely::scheduling::activate::Activator>,
) -> Self;
/// Exert merge effort, even without updates.
fn exert(&mut self, effort: &mut isize);
/// Introduces a batch of updates to the trace.
///
/// Batches describe the time intervals they contain, and they should be added to the trace in contiguous
/// intervals. If a batch arrives with a lower bound that does not equal the upper bound of the most recent
/// addition, the trace will add an empty batch. It is an error to then try to populate that region of time.
///
/// This restriction could be relaxed, especially if we discover ways in which batch interval order could
/// commute. For now, the trace should complain, to the extent that it cares about contiguous intervals.
fn insert(&mut self, batch: Self::Batch);
/// Introduces an empty batch concluding the trace.
///
/// This method should be logically equivalent to introducing an empty batch whose lower frontier equals
/// the upper frontier of the most recently introduced batch, and whose upper frontier is empty.
fn close(&mut self);
}
/// A batch of updates whose contents may be read.
///
/// This is a restricted interface to batches of updates, which support the reading of the batch's contents,
/// but do not expose ways to construct the batches. This trait is appropriate for views of the batch, and is
/// especially useful for views derived from other sources in ways that prevent the construction of batches
/// from the type of data in the view (for example, filtered views, or views with extended time coordinates).
pub trait BatchReader<K, V, T, R> where Self: ::std::marker::Sized
{
/// The type used to enumerate the batch's contents.
type Cursor: Cursor<K, V, T, R, Storage=Self>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor;
/// The number of updates in the batch.
fn len(&self) -> usize;
/// True if the batch is empty.
fn is_empty(&self) -> bool { self.len() == 0 }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T>;
/// All times in the batch are greater or equal to an element of `lower`.
fn lower(&self) -> &[T] { self.description().lower() }
/// All times in the batch are not greater or equal to any element of `upper`.
fn upper(&self) -> &[T] { self.description().upper() }
}
/// An immutable collection of updates.
pub trait Batch<K, V, T, R> : BatchReader<K, V, T, R> where Self: ::std::marker::Sized {
/// A type used to assemble batches from disordered updates.
type Batcher: Batcher<K, V, T, R, Self>;
/// A type used to assemble batches from ordered update sequences.
type Builder: Builder<K, V, T, R, Self>;
/// A type used to progressively merge batches.
type Merger: Merger<K, V, T, R, Self>;
/// Initiates the merging of consecutive batches.
///
/// The result of this method can be exercised to eventually produce the same result
/// that a call to `self.merge(other)` would produce, but it can be done in a measured
/// fashion. This can help to avoid latency spikes where a large merge needs to happen.
fn begin_merge(&self, other: &Self) -> Self::Merger {
Self::Merger::new(self, other)
}
///
fn empty(lower: &[T], upper: &[T], since: &[T]) -> Self {
<Self::Builder>::new().done(lower, upper, since)
}
}
/// Functionality for collecting and batching updates.
pub trait Batcher<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Allocates a new empty batcher.
fn new() -> Self;
/// Adds an unordered batch of elements to the batcher.
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>);
/// Returns all updates not greater or equal to an element of `upper`.
fn seal(&mut self, upper: &[T]) -> Output;
/// Returns the lower envelope of contained update times.
fn frontier(&mut self) -> &[T];
}
/// Functionality for building batches from ordered update sequences.
pub trait Builder<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Allocates an empty builder.
fn new() -> Self;
/// Allocates an empty builder with some capacity.
fn with_capacity(cap: usize) -> Self;
/// Adds an element to the batch.
fn push(&mut self, element: (K, V, T, R));
/// Adds an ordered sequence of elements to the batch.
fn extend<I: Iterator<Item=(K,V,T,R)>>(&mut self, iter: I) {
for item in iter { self.push(item); }
}
/// Completes building and returns the batch.
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Output;
}
/// Represents a merge in progress.
pub trait Merger<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Creates a new merger to merge the supplied batches.
fn new(source1: &Output, source2: &Output) -> Self;
/// Perform some amount of work, decrementing `fuel`.
///
/// If `fuel` is non-zero after the call, the merging is complete and
/// one should call `done` to extract the merged results.
fn work(&mut self, source1: &Output, source2: &Output, frontier: &Option<Vec<T>>, fuel: &mut isize);
/// Extracts merged results.
///
/// This method should only be called after `work` has been called and
/// has not brought `fuel` to zero. Otherwise, the merge is still in
/// progress.
fn done(self) -> Output;
}
/// Blanket implementations for reference counted batches.
pub mod rc_blanket_impls {
use std::rc::Rc;
use super::{Batch, BatchReader, Batcher, Builder, Merger, Cursor, Description};
impl<K, V, T, R, B: BatchReader<K,V,T,R>> BatchReader<K,V,T,R> for Rc<B> {
/// The type used to enumerate the batch's contents.
type Cursor = RcBatchCursor<K, V, T, R, B>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor {
RcBatchCursor::new((&**self).cursor())
}
/// The number of updates in the batch.
fn len(&self) -> usize { (&**self).len() }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T> { (&**self).description() }
}
/// Wrapper to provide cursor to nested scope.
pub struct RcBatchCursor<K, V, T, R, B: BatchReader<K, V, T, R>> {
phantom: ::std::marker::PhantomData<(K, V, T, R)>,
cursor: B::Cursor,
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> RcBatchCursor<K, V, T, R, B> {
fn new(cursor: B::Cursor) -> Self {
RcBatchCursor {
cursor,
phantom: ::std::marker::PhantomData,
}
}
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> Cursor<K, V, T, R> for RcBatchCursor<K, V, T, R, B> {
type Storage = Rc<B>;
#[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) }
#[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) }
#[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K { self.cursor.key(storage) }
#[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V { self.cursor.val(storage) }
#[inline]
fn map_times<L: FnMut(&T, &R)>(&mut self, storage: &Self::Storage, logic: L) {
self.cursor.map_times(storage, logic)
}
#[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) }
#[inline] fn seek_key(&mut self, storage: &Self::Storage, key: &K) { self.cursor.seek_key(storage, key) }
#[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) }
#[inline] fn seek_val(&mut self, storage: &Self::Storage, val: &V) { self.cursor.seek_val(storage, val) }
#[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) }
#[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) }
}
/// An immutable collection of updates.
impl<K,V,T,R,B: Batch<K,V,T,R>> Batch<K, V, T, R> for Rc<B> {
type Batcher = RcBatcher<K, V, T, R, B>;
type Builder = RcBuilder<K, V, T, R, B>;
type Merger = RcMerger<K, V, T, R, B>;
}
/// Wrapper type for batching reference counted batches.
pub struct RcBatcher<K,V,T,R,B:Batch<K,V,T,R>> { batcher: B::Batcher }
/// Functionality for collecting and batching updates.
impl<K,V,T,R,B:Batch<K,V,T,R>> Batcher<K, V, T, R, Rc<B>> for RcBatcher<K,V,T,R,B> {
fn new() -> Self { RcBatcher { batcher: <B::Batcher as Batcher<K,V,T,R,B>>::new() } }
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>) { self.batcher.push_batch(batch) }
fn seal(&mut self, upper: &[T]) -> Rc<B> { Rc::new(self.batcher.seal(upper)) }
fn frontier(&mut self) -> &[T] { self.batcher.frontier() }
}
/// Wrapper type for building reference counted batches.
pub struct RcBuilder<K,V,T,R,B:Batch<K,V,T,R>> { builder: B::Builder }
/// Functionality for building batches from ordered update sequences.
impl<K,V,T,R,B:Batch<K,V,T,R>> Builder<K, V, T, R, Rc<B>> for RcBuilder<K,V,T,R,B> {
fn new() -> Self { RcBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::new() } }
fn with_capacity(cap: usize) -> Self { RcBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::with_capacity(cap) } }
fn push(&mut self, element: (K, V, T, R)) { self.builder.push(element) }
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Rc<B> { Rc::new(self.builder.done(lower, upper, since)) }
}
/// Wrapper type for merging reference counted batches.
pub struct RcMerger<K,V,T,R,B:Batch<K,V,T,R>> { merger: B::Merger }
/// Represents a merge in progress.
impl<K,V,T,R,B:Batch<K,V,T,R>> Merger<K, V, T, R, Rc<B>> for RcMerger<K,V,T,R,B> {
fn new(source1: &Rc<B>, source2: &Rc<B>) -> Self { RcMerger { merger: B::begin_merge(source1, source2) } }
fn work(&mut self, source1: &Rc<B>, source2: &Rc<B>, frontier: &Option<Vec<T>>, fuel: &mut isize) { self.merger.work(source1, source2, frontier, fuel) }
fn done(self) -> Rc<B> { Rc::new(self.merger.done()) }
}
}
/// Blanket implementations for reference counted batches.
pub mod abomonated_blanket_impls {
extern crate abomonation;
use abomonation::{Abomonation, measure};
use abomonation::abomonated::Abomonated;
use super::{Batch, BatchReader, Batcher, Builder, Merger, Cursor, Description};
impl<K, V, T, R, B: BatchReader<K,V,T,R>+Abomonation> BatchReader<K,V,T,R> for Abomonated<B, Vec<u8>> {
/// The type used to enumerate the batch's contents.
type Cursor = AbomonatedBatchCursor<K, V, T, R, B>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor {
AbomonatedBatchCursor::new((&**self).cursor())
}
/// The number of updates in the batch.
fn len(&self) -> usize { (&**self).len() }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T> { (&**self).description() }
}
/// Wrapper to provide cursor to nested scope.
pub struct AbomonatedBatchCursor<K, V, T, R, B: BatchReader<K, V, T, R>> {
phantom: ::std::marker::PhantomData<(K, V, T, R)>,
cursor: B::Cursor,
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> AbomonatedBatchCursor<K, V, T, R, B> {
fn new(cursor: B::Cursor) -> Self {
AbomonatedBatchCursor {
cursor,
phantom: ::std::marker::PhantomData,
}
}
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>+Abomonation> Cursor<K, V, T, R> for AbomonatedBatchCursor<K, V, T, R, B> {
type Storage = Abomonated<B, Vec<u8>>;
#[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) }
#[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) }
#[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K { self.cursor.key(storage) }
#[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V { self.cursor.val(storage) }
#[inline]
fn map_times<L: FnMut(&T, &R)>(&mut self, storage: &Self::Storage, logic: L) {
self.cursor.map_times(storage, logic)
}
#[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) }
#[inline] fn seek_key(&mut self, storage: &Self::Storage, key: &K) { self.cursor.seek_key(storage, key) }
#[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) }
#[inline] fn seek_val(&mut self, storage: &Self::Storage, val: &V) { self.cursor.seek_val(storage, val) }
#[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) }
#[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) }
}
/// An immutable collection of updates.
impl<K,V,T,R,B: Batch<K,V,T,R>+Abomonation> Batch<K, V, T, R> for Abomonated<B, Vec<u8>> {
type Batcher = AbomonatedBatcher<K, V, T, R, B>;
type Builder = AbomonatedBuilder<K, V, T, R, B>;
type Merger = AbomonatedMerger<K, V, T, R, B>;
}
/// Wrapper type for batching reference counted batches.
pub struct AbomonatedBatcher<K,V,T,R,B:Batch<K,V,T,R>> { batcher: B::Batcher }
/// Functionality for collecting and batching updates.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Batcher<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedBatcher<K,V,T,R,B> {
fn new() -> Self { AbomonatedBatcher { batcher: <B::Batcher as Batcher<K,V,T,R,B>>::new() } }
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>) { self.batcher.push_batch(batch) }
fn seal(&mut self, upper: &[T]) -> Abomonated<B, Vec<u8>> {
let batch = self.batcher.seal(upper);
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
fn frontier(&mut self) -> &[T] { self.batcher.frontier() }
}
/// Wrapper type for building reference counted batches.
pub struct AbomonatedBuilder<K,V,T,R,B:Batch<K,V,T,R>> { builder: B::Builder }
/// Functionality for building batches from ordered update sequences.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Builder<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedBuilder<K,V,T,R,B> {
fn new() -> Self { AbomonatedBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::new() } }
fn with_capacity(cap: usize) -> Self { AbomonatedBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::with_capacity(cap) } }
fn push(&mut self, element: (K, V, T, R)) { self.builder.push(element) }
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Abomonated<B, Vec<u8>> {
let batch = self.builder.done(lower, upper, since);
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
}
/// Wrapper type for merging reference counted batches. | pub struct AbomonatedMerger<K,V,T,R,B:Batch<K,V,T,R>> { merger: B::Merger }
/// Represents a merge in progress.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Merger<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedMerger<K,V,T,R,B> {
fn new(source1: &Abomonated<B,Vec<u8>>, source2: &Abomonated<B,Vec<u8>>) -> Self {
AbomonatedMerger { merger: B::begin_merge(source1, source2) }
}
fn work(&mut self, source1: &Abomonated<B,Vec<u8>>, source2: &Abomonated<B,Vec<u8>>, frontier: &Option<Vec<T>>, fuel: &mut isize) {
self.merger.work(source1, source2, frontier, fuel)
}
fn done(self) -> Abomonated<B, Vec<u8>> {
let batch = self.merger.done();
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
}
} | random_line_split | |
mod.rs | //! Traits and datastructures representing a collection trace.
//!
//! A collection trace is a set of updates of the form `(key, val, time, diff)`, which determine the contents
//! of a collection at given times by accumulating updates whose time field is less or equal to the target field.
//!
//! The `Trace` trait describes those types and methods that a data structure must implement to be viewed as a
//! collection trace. This trait allows operator implementations to be generic with respect to the type of trace,
//! and allows various data structures to be interpretable as multiple different types of trace.
pub mod cursor;
pub mod description;
pub mod implementations;
pub mod layers;
pub mod wrappers;
use timely::progress::Antichain;
use timely::progress::Timestamp;
// use ::difference::Semigroup;
pub use self::cursor::Cursor;
pub use self::description::Description;
// The traces and batch and cursors want the flexibility to appear as if they manage certain types of keys and
// values and such, while perhaps using other representations, I'm thinking mostly of wrappers around the keys
// and vals that change the `Ord` implementation, or stash hash codes, or the like.
//
// This complicates what requirements we make so that the trace is still usable by someone who knows only about
// the base key and value types. For example, the complex types should likely dereference to the simpler types,
// so that the user can make sense of the result as if they were given references to the simpler types. At the
// same time, the collection should be formable from base types (perhaps we need an `Into` or `From` constraint)
// and we should, somehow, be able to take a reference to the simple types to compare against the more complex
// types. This second one is also like an `Into` or `From` constraint, except that we start with a reference and
// really don't need anything more complex than a reference, but we can't form an owned copy of the complex type
// without cloning it.
//
// We could just start by cloning things. Worry about wrapping references later on.
/// A trace whose contents may be read.
///
/// This is a restricted interface to the more general `Trace` trait, which extends this trait with further methods
/// to update the contents of the trace. These methods are used to examine the contents, and to update the reader's
/// capabilities (which may release restrictions on the mutations to the underlying trace and cause work to happen).
pub trait TraceReader {
/// Key by which updates are indexed.
type Key;
/// Values associated with keys.
type Val;
/// Timestamps associated with updates
type Time;
/// Associated update.
type R;
/// The type of an immutable collection of updates.
type Batch: BatchReader<Self::Key, Self::Val, Self::Time, Self::R>+Clone+'static;
/// The type used to enumerate the collections contents.
type Cursor: Cursor<Self::Key, Self::Val, Self::Time, Self::R>;
/// Provides a cursor over updates contained in the trace.
fn cursor(&mut self) -> (Self::Cursor, <Self::Cursor as Cursor<Self::Key, Self::Val, Self::Time, Self::R>>::Storage) {
if let Some(cursor) = self.cursor_through(&[]) {
cursor
}
else {
panic!("unable to acquire complete cursor for trace; is it closed?");
}
}
/// Acquires a cursor to the restriction of the collection's contents to updates at times not greater or
/// equal to an element of `upper`.
///
/// This method is expected to work if called with an `upper` that (i) was an observed bound in batches from
/// the trace, and (ii) the trace has not been advanced beyond `upper`. Practically, the implementation should
/// be expected to look for a "clean cut" using `upper`, and if it finds such a cut can return a cursor. This
/// should allow `upper` such as `&[]` as used by `self.cursor()`, though it is difficult to imagine other uses.
fn cursor_through(&mut self, upper: &[Self::Time]) -> Option<(Self::Cursor, <Self::Cursor as Cursor<Self::Key, Self::Val, Self::Time, Self::R>>::Storage)>;
/// Advances the frontier of times the collection must be correctly accumulable through.
///
/// Practically, this allows the trace to advance times in updates it maintains as long as the advanced times
/// still compare equivalently to any times greater or equal to some element of `frontier`. Times not greater
/// or equal to some element of `frontier` may no longer correctly accumulate, so do not advance a trace unless
/// you are quite sure you no longer require the distinction.
fn advance_by(&mut self, frontier: &[Self::Time]);
/// Reports the frontier from which all time comparisions should be accurate.
///
/// Times that are not greater or equal to some element of the advance frontier may accumulate inaccurately as
/// the trace may have lost the ability to distinguish between such times. Accumulations are only guaranteed to
/// be accurate from the frontier onwards.
fn advance_frontier(&mut self) -> &[Self::Time];
/// Advances the frontier that may be used in `cursor_through`.
///
/// Practically, this allows the trace to merge batches whose upper frontier comes before `frontier`. The trace
/// is likely to be annoyed or confused if you use a frontier other than one observed as an upper bound of an
/// actual batch. This doesn't seem likely to be a problem, but get in touch if it is.
///
/// Calling `distinguish_since(&[])` indicates that all batches may be merged at any point, which essentially
/// disables the use of `cursor_through` with any parameter other than `&[]`, which is the behavior of `cursor`.
fn distinguish_since(&mut self, frontier: &[Self::Time]);
/// Reports the frontier from which the collection may be subsetted.
///
/// The semantics are less elegant here, but the underlying trace will not merge batches in advance of this
/// frontier, which ensures that operators can extract the subset of the trace at batch boundaries from this
/// frontier onward. These boundaries may be used in `cursor_through`, whereas boundaries not in advance of
/// this frontier are not guaranteed to return a cursor.
fn distinguish_frontier(&mut self) -> &[Self::Time];
/// Maps logic across the non-empty sequence of batches in the trace.
///
/// This is currently used only to extract historical data to prime late-starting operators who want to reproduce
/// the stream of batches moving past the trace. It could also be a fine basis for a default implementation of the
/// cursor methods, as they (by default) just move through batches accumulating cursors into a cursor list.
fn map_batches<F: FnMut(&Self::Batch)>(&mut self, f: F);
/// Reads the upper frontier of committed times.
///
///
fn | (&mut self, target: &mut Antichain<Self::Time>)
where
Self::Time: Timestamp,
{
target.clear();
target.insert(Default::default());
self.map_batches(|batch| {
target.clear();
for time in batch.upper().iter().cloned() {
target.insert(time);
}
});
}
/// Advances `upper` by any empty batches.
///
/// An empty batch whose `batch.lower` bound equals the current
/// contents of `upper` will advance `upper` to `batch.upper`.
/// Taken across all batches, this should advance `upper` across
/// empty batch regions.
fn advance_upper(&mut self, upper: &mut Antichain<Self::Time>)
where
Self::Time: Timestamp,
{
self.map_batches(|batch| {
if batch.is_empty() && batch.lower() == upper.elements() {
upper.clear();
upper.extend(batch.upper().iter().cloned());
}
});
}
}
/// An append-only collection of `(key, val, time, diff)` tuples.
///
/// The trace must pretend to look like a collection of `(Key, Val, Time, isize)` tuples, but is permitted
/// to introduce new types `KeyRef`, `ValRef`, and `TimeRef` which can be dereference to the types above.
///
/// The trace must be constructable from, and navigable by the `Key`, `Val`, `Time` types, but does not need
/// to return them.
pub trait Trace : TraceReader
where <Self as TraceReader>::Batch: Batch<Self::Key, Self::Val, Self::Time, Self::R> {
/// Allocates a new empty trace.
fn new(
info: ::timely::dataflow::operators::generic::OperatorInfo,
logging: Option<::logging::Logger>,
activator: Option<timely::scheduling::activate::Activator>,
) -> Self;
/// Exert merge effort, even without updates.
fn exert(&mut self, effort: &mut isize);
/// Introduces a batch of updates to the trace.
///
/// Batches describe the time intervals they contain, and they should be added to the trace in contiguous
/// intervals. If a batch arrives with a lower bound that does not equal the upper bound of the most recent
/// addition, the trace will add an empty batch. It is an error to then try to populate that region of time.
///
/// This restriction could be relaxed, especially if we discover ways in which batch interval order could
/// commute. For now, the trace should complain, to the extent that it cares about contiguous intervals.
fn insert(&mut self, batch: Self::Batch);
/// Introduces an empty batch concluding the trace.
///
/// This method should be logically equivalent to introducing an empty batch whose lower frontier equals
/// the upper frontier of the most recently introduced batch, and whose upper frontier is empty.
fn close(&mut self);
}
/// A batch of updates whose contents may be read.
///
/// This is a restricted interface to batches of updates, which support the reading of the batch's contents,
/// but do not expose ways to construct the batches. This trait is appropriate for views of the batch, and is
/// especially useful for views derived from other sources in ways that prevent the construction of batches
/// from the type of data in the view (for example, filtered views, or views with extended time coordinates).
pub trait BatchReader<K, V, T, R> where Self: ::std::marker::Sized
{
/// The type used to enumerate the batch's contents.
type Cursor: Cursor<K, V, T, R, Storage=Self>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor;
/// The number of updates in the batch.
fn len(&self) -> usize;
/// True if the batch is empty.
fn is_empty(&self) -> bool { self.len() == 0 }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T>;
/// All times in the batch are greater or equal to an element of `lower`.
fn lower(&self) -> &[T] { self.description().lower() }
/// All times in the batch are not greater or equal to any element of `upper`.
fn upper(&self) -> &[T] { self.description().upper() }
}
/// An immutable collection of updates.
pub trait Batch<K, V, T, R> : BatchReader<K, V, T, R> where Self: ::std::marker::Sized {
/// A type used to assemble batches from disordered updates.
type Batcher: Batcher<K, V, T, R, Self>;
/// A type used to assemble batches from ordered update sequences.
type Builder: Builder<K, V, T, R, Self>;
/// A type used to progressively merge batches.
type Merger: Merger<K, V, T, R, Self>;
/// Initiates the merging of consecutive batches.
///
/// The result of this method can be exercised to eventually produce the same result
/// that a call to `self.merge(other)` would produce, but it can be done in a measured
/// fashion. This can help to avoid latency spikes where a large merge needs to happen.
fn begin_merge(&self, other: &Self) -> Self::Merger {
Self::Merger::new(self, other)
}
///
fn empty(lower: &[T], upper: &[T], since: &[T]) -> Self {
<Self::Builder>::new().done(lower, upper, since)
}
}
/// Functionality for collecting and batching updates.
pub trait Batcher<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Allocates a new empty batcher.
fn new() -> Self;
/// Adds an unordered batch of elements to the batcher.
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>);
/// Returns all updates not greater or equal to an element of `upper`.
fn seal(&mut self, upper: &[T]) -> Output;
/// Returns the lower envelope of contained update times.
fn frontier(&mut self) -> &[T];
}
/// Functionality for building batches from ordered update sequences.
pub trait Builder<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Allocates an empty builder.
fn new() -> Self;
/// Allocates an empty builder with some capacity.
fn with_capacity(cap: usize) -> Self;
/// Adds an element to the batch.
fn push(&mut self, element: (K, V, T, R));
/// Adds an ordered sequence of elements to the batch.
fn extend<I: Iterator<Item=(K,V,T,R)>>(&mut self, iter: I) {
for item in iter { self.push(item); }
}
/// Completes building and returns the batch.
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Output;
}
/// Represents a merge in progress.
pub trait Merger<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Creates a new merger to merge the supplied batches.
fn new(source1: &Output, source2: &Output) -> Self;
/// Perform some amount of work, decrementing `fuel`.
///
/// If `fuel` is non-zero after the call, the merging is complete and
/// one should call `done` to extract the merged results.
fn work(&mut self, source1: &Output, source2: &Output, frontier: &Option<Vec<T>>, fuel: &mut isize);
/// Extracts merged results.
///
/// This method should only be called after `work` has been called and
/// has not brought `fuel` to zero. Otherwise, the merge is still in
/// progress.
fn done(self) -> Output;
}
/// Blanket implementations for reference counted batches.
pub mod rc_blanket_impls {
use std::rc::Rc;
use super::{Batch, BatchReader, Batcher, Builder, Merger, Cursor, Description};
impl<K, V, T, R, B: BatchReader<K,V,T,R>> BatchReader<K,V,T,R> for Rc<B> {
/// The type used to enumerate the batch's contents.
type Cursor = RcBatchCursor<K, V, T, R, B>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor {
RcBatchCursor::new((&**self).cursor())
}
/// The number of updates in the batch.
fn len(&self) -> usize { (&**self).len() }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T> { (&**self).description() }
}
/// Wrapper to provide cursor to nested scope.
pub struct RcBatchCursor<K, V, T, R, B: BatchReader<K, V, T, R>> {
phantom: ::std::marker::PhantomData<(K, V, T, R)>,
cursor: B::Cursor,
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> RcBatchCursor<K, V, T, R, B> {
fn new(cursor: B::Cursor) -> Self {
RcBatchCursor {
cursor,
phantom: ::std::marker::PhantomData,
}
}
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> Cursor<K, V, T, R> for RcBatchCursor<K, V, T, R, B> {
type Storage = Rc<B>;
#[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) }
#[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) }
#[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K { self.cursor.key(storage) }
#[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V { self.cursor.val(storage) }
#[inline]
fn map_times<L: FnMut(&T, &R)>(&mut self, storage: &Self::Storage, logic: L) {
self.cursor.map_times(storage, logic)
}
#[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) }
#[inline] fn seek_key(&mut self, storage: &Self::Storage, key: &K) { self.cursor.seek_key(storage, key) }
#[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) }
#[inline] fn seek_val(&mut self, storage: &Self::Storage, val: &V) { self.cursor.seek_val(storage, val) }
#[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) }
#[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) }
}
/// An immutable collection of updates.
impl<K,V,T,R,B: Batch<K,V,T,R>> Batch<K, V, T, R> for Rc<B> {
type Batcher = RcBatcher<K, V, T, R, B>;
type Builder = RcBuilder<K, V, T, R, B>;
type Merger = RcMerger<K, V, T, R, B>;
}
/// Wrapper type for batching reference counted batches.
pub struct RcBatcher<K,V,T,R,B:Batch<K,V,T,R>> { batcher: B::Batcher }
/// Functionality for collecting and batching updates.
impl<K,V,T,R,B:Batch<K,V,T,R>> Batcher<K, V, T, R, Rc<B>> for RcBatcher<K,V,T,R,B> {
fn new() -> Self { RcBatcher { batcher: <B::Batcher as Batcher<K,V,T,R,B>>::new() } }
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>) { self.batcher.push_batch(batch) }
fn seal(&mut self, upper: &[T]) -> Rc<B> { Rc::new(self.batcher.seal(upper)) }
fn frontier(&mut self) -> &[T] { self.batcher.frontier() }
}
/// Wrapper type for building reference counted batches.
pub struct RcBuilder<K,V,T,R,B:Batch<K,V,T,R>> { builder: B::Builder }
/// Functionality for building batches from ordered update sequences.
impl<K,V,T,R,B:Batch<K,V,T,R>> Builder<K, V, T, R, Rc<B>> for RcBuilder<K,V,T,R,B> {
fn new() -> Self { RcBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::new() } }
fn with_capacity(cap: usize) -> Self { RcBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::with_capacity(cap) } }
fn push(&mut self, element: (K, V, T, R)) { self.builder.push(element) }
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Rc<B> { Rc::new(self.builder.done(lower, upper, since)) }
}
/// Wrapper type for merging reference counted batches.
pub struct RcMerger<K,V,T,R,B:Batch<K,V,T,R>> { merger: B::Merger }
/// Represents a merge in progress.
impl<K,V,T,R,B:Batch<K,V,T,R>> Merger<K, V, T, R, Rc<B>> for RcMerger<K,V,T,R,B> {
fn new(source1: &Rc<B>, source2: &Rc<B>) -> Self { RcMerger { merger: B::begin_merge(source1, source2) } }
fn work(&mut self, source1: &Rc<B>, source2: &Rc<B>, frontier: &Option<Vec<T>>, fuel: &mut isize) { self.merger.work(source1, source2, frontier, fuel) }
fn done(self) -> Rc<B> { Rc::new(self.merger.done()) }
}
}
/// Blanket implementations for reference counted batches.
pub mod abomonated_blanket_impls {
extern crate abomonation;
use abomonation::{Abomonation, measure};
use abomonation::abomonated::Abomonated;
use super::{Batch, BatchReader, Batcher, Builder, Merger, Cursor, Description};
impl<K, V, T, R, B: BatchReader<K,V,T,R>+Abomonation> BatchReader<K,V,T,R> for Abomonated<B, Vec<u8>> {
/// The type used to enumerate the batch's contents.
type Cursor = AbomonatedBatchCursor<K, V, T, R, B>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor {
AbomonatedBatchCursor::new((&**self).cursor())
}
/// The number of updates in the batch.
fn len(&self) -> usize { (&**self).len() }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T> { (&**self).description() }
}
/// Wrapper to provide cursor to nested scope.
pub struct AbomonatedBatchCursor<K, V, T, R, B: BatchReader<K, V, T, R>> {
phantom: ::std::marker::PhantomData<(K, V, T, R)>,
cursor: B::Cursor,
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> AbomonatedBatchCursor<K, V, T, R, B> {
fn new(cursor: B::Cursor) -> Self {
AbomonatedBatchCursor {
cursor,
phantom: ::std::marker::PhantomData,
}
}
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>+Abomonation> Cursor<K, V, T, R> for AbomonatedBatchCursor<K, V, T, R, B> {
type Storage = Abomonated<B, Vec<u8>>;
#[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) }
#[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) }
#[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K { self.cursor.key(storage) }
#[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V { self.cursor.val(storage) }
#[inline]
fn map_times<L: FnMut(&T, &R)>(&mut self, storage: &Self::Storage, logic: L) {
self.cursor.map_times(storage, logic)
}
#[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) }
#[inline] fn seek_key(&mut self, storage: &Self::Storage, key: &K) { self.cursor.seek_key(storage, key) }
#[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) }
#[inline] fn seek_val(&mut self, storage: &Self::Storage, val: &V) { self.cursor.seek_val(storage, val) }
#[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) }
#[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) }
}
/// An immutable collection of updates.
impl<K,V,T,R,B: Batch<K,V,T,R>+Abomonation> Batch<K, V, T, R> for Abomonated<B, Vec<u8>> {
type Batcher = AbomonatedBatcher<K, V, T, R, B>;
type Builder = AbomonatedBuilder<K, V, T, R, B>;
type Merger = AbomonatedMerger<K, V, T, R, B>;
}
/// Wrapper type for batching reference counted batches.
pub struct AbomonatedBatcher<K,V,T,R,B:Batch<K,V,T,R>> { batcher: B::Batcher }
/// Functionality for collecting and batching updates.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Batcher<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedBatcher<K,V,T,R,B> {
fn new() -> Self { AbomonatedBatcher { batcher: <B::Batcher as Batcher<K,V,T,R,B>>::new() } }
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>) { self.batcher.push_batch(batch) }
fn seal(&mut self, upper: &[T]) -> Abomonated<B, Vec<u8>> {
let batch = self.batcher.seal(upper);
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
fn frontier(&mut self) -> &[T] { self.batcher.frontier() }
}
/// Wrapper type for building reference counted batches.
pub struct AbomonatedBuilder<K,V,T,R,B:Batch<K,V,T,R>> { builder: B::Builder }
/// Functionality for building batches from ordered update sequences.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Builder<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedBuilder<K,V,T,R,B> {
fn new() -> Self { AbomonatedBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::new() } }
fn with_capacity(cap: usize) -> Self { AbomonatedBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::with_capacity(cap) } }
fn push(&mut self, element: (K, V, T, R)) { self.builder.push(element) }
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Abomonated<B, Vec<u8>> {
let batch = self.builder.done(lower, upper, since);
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
}
/// Wrapper type for merging reference counted batches.
pub struct AbomonatedMerger<K,V,T,R,B:Batch<K,V,T,R>> { merger: B::Merger }
/// Represents a merge in progress.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Merger<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedMerger<K,V,T,R,B> {
fn new(source1: &Abomonated<B,Vec<u8>>, source2: &Abomonated<B,Vec<u8>>) -> Self {
AbomonatedMerger { merger: B::begin_merge(source1, source2) }
}
fn work(&mut self, source1: &Abomonated<B,Vec<u8>>, source2: &Abomonated<B,Vec<u8>>, frontier: &Option<Vec<T>>, fuel: &mut isize) {
self.merger.work(source1, source2, frontier, fuel)
}
fn done(self) -> Abomonated<B, Vec<u8>> {
let batch = self.merger.done();
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
}
}
| read_upper | identifier_name |
mod.rs | //! Traits and datastructures representing a collection trace.
//!
//! A collection trace is a set of updates of the form `(key, val, time, diff)`, which determine the contents
//! of a collection at given times by accumulating updates whose time field is less or equal to the target field.
//!
//! The `Trace` trait describes those types and methods that a data structure must implement to be viewed as a
//! collection trace. This trait allows operator implementations to be generic with respect to the type of trace,
//! and allows various data structures to be interpretable as multiple different types of trace.
pub mod cursor;
pub mod description;
pub mod implementations;
pub mod layers;
pub mod wrappers;
use timely::progress::Antichain;
use timely::progress::Timestamp;
// use ::difference::Semigroup;
pub use self::cursor::Cursor;
pub use self::description::Description;
// The traces and batch and cursors want the flexibility to appear as if they manage certain types of keys and
// values and such, while perhaps using other representations, I'm thinking mostly of wrappers around the keys
// and vals that change the `Ord` implementation, or stash hash codes, or the like.
//
// This complicates what requirements we make so that the trace is still usable by someone who knows only about
// the base key and value types. For example, the complex types should likely dereference to the simpler types,
// so that the user can make sense of the result as if they were given references to the simpler types. At the
// same time, the collection should be formable from base types (perhaps we need an `Into` or `From` constraint)
// and we should, somehow, be able to take a reference to the simple types to compare against the more complex
// types. This second one is also like an `Into` or `From` constraint, except that we start with a reference and
// really don't need anything more complex than a reference, but we can't form an owned copy of the complex type
// without cloning it.
//
// We could just start by cloning things. Worry about wrapping references later on.
/// A trace whose contents may be read.
///
/// This is a restricted interface to the more general `Trace` trait, which extends this trait with further methods
/// to update the contents of the trace. These methods are used to examine the contents, and to update the reader's
/// capabilities (which may release restrictions on the mutations to the underlying trace and cause work to happen).
pub trait TraceReader {
/// Key by which updates are indexed.
type Key;
/// Values associated with keys.
type Val;
/// Timestamps associated with updates
type Time;
/// Associated update.
type R;
/// The type of an immutable collection of updates.
type Batch: BatchReader<Self::Key, Self::Val, Self::Time, Self::R>+Clone+'static;
/// The type used to enumerate the collections contents.
type Cursor: Cursor<Self::Key, Self::Val, Self::Time, Self::R>;
/// Provides a cursor over updates contained in the trace.
fn cursor(&mut self) -> (Self::Cursor, <Self::Cursor as Cursor<Self::Key, Self::Val, Self::Time, Self::R>>::Storage) {
if let Some(cursor) = self.cursor_through(&[]) {
cursor
}
else {
panic!("unable to acquire complete cursor for trace; is it closed?");
}
}
/// Acquires a cursor to the restriction of the collection's contents to updates at times not greater or
/// equal to an element of `upper`.
///
/// This method is expected to work if called with an `upper` that (i) was an observed bound in batches from
/// the trace, and (ii) the trace has not been advanced beyond `upper`. Practically, the implementation should
/// be expected to look for a "clean cut" using `upper`, and if it finds such a cut can return a cursor. This
/// should allow `upper` such as `&[]` as used by `self.cursor()`, though it is difficult to imagine other uses.
fn cursor_through(&mut self, upper: &[Self::Time]) -> Option<(Self::Cursor, <Self::Cursor as Cursor<Self::Key, Self::Val, Self::Time, Self::R>>::Storage)>;
/// Advances the frontier of times the collection must be correctly accumulable through.
///
/// Practically, this allows the trace to advance times in updates it maintains as long as the advanced times
/// still compare equivalently to any times greater or equal to some element of `frontier`. Times not greater
/// or equal to some element of `frontier` may no longer correctly accumulate, so do not advance a trace unless
/// you are quite sure you no longer require the distinction.
fn advance_by(&mut self, frontier: &[Self::Time]);
/// Reports the frontier from which all time comparisions should be accurate.
///
/// Times that are not greater or equal to some element of the advance frontier may accumulate inaccurately as
/// the trace may have lost the ability to distinguish between such times. Accumulations are only guaranteed to
/// be accurate from the frontier onwards.
fn advance_frontier(&mut self) -> &[Self::Time];
/// Advances the frontier that may be used in `cursor_through`.
///
/// Practically, this allows the trace to merge batches whose upper frontier comes before `frontier`. The trace
/// is likely to be annoyed or confused if you use a frontier other than one observed as an upper bound of an
/// actual batch. This doesn't seem likely to be a problem, but get in touch if it is.
///
/// Calling `distinguish_since(&[])` indicates that all batches may be merged at any point, which essentially
/// disables the use of `cursor_through` with any parameter other than `&[]`, which is the behavior of `cursor`.
fn distinguish_since(&mut self, frontier: &[Self::Time]);
/// Reports the frontier from which the collection may be subsetted.
///
/// The semantics are less elegant here, but the underlying trace will not merge batches in advance of this
/// frontier, which ensures that operators can extract the subset of the trace at batch boundaries from this
/// frontier onward. These boundaries may be used in `cursor_through`, whereas boundaries not in advance of
/// this frontier are not guaranteed to return a cursor.
fn distinguish_frontier(&mut self) -> &[Self::Time];
/// Maps logic across the non-empty sequence of batches in the trace.
///
/// This is currently used only to extract historical data to prime late-starting operators who want to reproduce
/// the stream of batches moving past the trace. It could also be a fine basis for a default implementation of the
/// cursor methods, as they (by default) just move through batches accumulating cursors into a cursor list.
fn map_batches<F: FnMut(&Self::Batch)>(&mut self, f: F);
/// Reads the upper frontier of committed times.
///
///
fn read_upper(&mut self, target: &mut Antichain<Self::Time>)
where
Self::Time: Timestamp,
{
target.clear();
target.insert(Default::default());
self.map_batches(|batch| {
target.clear();
for time in batch.upper().iter().cloned() {
target.insert(time);
}
});
}
/// Advances `upper` by any empty batches.
///
/// An empty batch whose `batch.lower` bound equals the current
/// contents of `upper` will advance `upper` to `batch.upper`.
/// Taken across all batches, this should advance `upper` across
/// empty batch regions.
fn advance_upper(&mut self, upper: &mut Antichain<Self::Time>)
where
Self::Time: Timestamp,
{
self.map_batches(|batch| {
if batch.is_empty() && batch.lower() == upper.elements() {
upper.clear();
upper.extend(batch.upper().iter().cloned());
}
});
}
}
/// An append-only collection of `(key, val, time, diff)` tuples.
///
/// The trace must pretend to look like a collection of `(Key, Val, Time, isize)` tuples, but is permitted
/// to introduce new types `KeyRef`, `ValRef`, and `TimeRef` which can be dereference to the types above.
///
/// The trace must be constructable from, and navigable by the `Key`, `Val`, `Time` types, but does not need
/// to return them.
pub trait Trace : TraceReader
where <Self as TraceReader>::Batch: Batch<Self::Key, Self::Val, Self::Time, Self::R> {
/// Allocates a new empty trace.
fn new(
info: ::timely::dataflow::operators::generic::OperatorInfo,
logging: Option<::logging::Logger>,
activator: Option<timely::scheduling::activate::Activator>,
) -> Self;
/// Exert merge effort, even without updates.
fn exert(&mut self, effort: &mut isize);
/// Introduces a batch of updates to the trace.
///
/// Batches describe the time intervals they contain, and they should be added to the trace in contiguous
/// intervals. If a batch arrives with a lower bound that does not equal the upper bound of the most recent
/// addition, the trace will add an empty batch. It is an error to then try to populate that region of time.
///
/// This restriction could be relaxed, especially if we discover ways in which batch interval order could
/// commute. For now, the trace should complain, to the extent that it cares about contiguous intervals.
fn insert(&mut self, batch: Self::Batch);
/// Introduces an empty batch concluding the trace.
///
/// This method should be logically equivalent to introducing an empty batch whose lower frontier equals
/// the upper frontier of the most recently introduced batch, and whose upper frontier is empty.
fn close(&mut self);
}
/// A batch of updates whose contents may be read.
///
/// This is a restricted interface to batches of updates, which support the reading of the batch's contents,
/// but do not expose ways to construct the batches. This trait is appropriate for views of the batch, and is
/// especially useful for views derived from other sources in ways that prevent the construction of batches
/// from the type of data in the view (for example, filtered views, or views with extended time coordinates).
pub trait BatchReader<K, V, T, R> where Self: ::std::marker::Sized
{
/// The type used to enumerate the batch's contents.
type Cursor: Cursor<K, V, T, R, Storage=Self>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor;
/// The number of updates in the batch.
fn len(&self) -> usize;
/// True if the batch is empty.
fn is_empty(&self) -> bool { self.len() == 0 }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T>;
/// All times in the batch are greater or equal to an element of `lower`.
fn lower(&self) -> &[T] |
/// All times in the batch are not greater or equal to any element of `upper`.
fn upper(&self) -> &[T] { self.description().upper() }
}
/// An immutable collection of updates.
pub trait Batch<K, V, T, R> : BatchReader<K, V, T, R> where Self: ::std::marker::Sized {
/// A type used to assemble batches from disordered updates.
type Batcher: Batcher<K, V, T, R, Self>;
/// A type used to assemble batches from ordered update sequences.
type Builder: Builder<K, V, T, R, Self>;
/// A type used to progressively merge batches.
type Merger: Merger<K, V, T, R, Self>;
/// Initiates the merging of consecutive batches.
///
/// The result of this method can be exercised to eventually produce the same result
/// that a call to `self.merge(other)` would produce, but it can be done in a measured
/// fashion. This can help to avoid latency spikes where a large merge needs to happen.
fn begin_merge(&self, other: &Self) -> Self::Merger {
Self::Merger::new(self, other)
}
///
fn empty(lower: &[T], upper: &[T], since: &[T]) -> Self {
<Self::Builder>::new().done(lower, upper, since)
}
}
/// Functionality for collecting and batching updates.
pub trait Batcher<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Allocates a new empty batcher.
fn new() -> Self;
/// Adds an unordered batch of elements to the batcher.
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>);
/// Returns all updates not greater or equal to an element of `upper`.
fn seal(&mut self, upper: &[T]) -> Output;
/// Returns the lower envelope of contained update times.
fn frontier(&mut self) -> &[T];
}
/// Functionality for building batches from ordered update sequences.
pub trait Builder<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Allocates an empty builder.
fn new() -> Self;
/// Allocates an empty builder with some capacity.
fn with_capacity(cap: usize) -> Self;
/// Adds an element to the batch.
fn push(&mut self, element: (K, V, T, R));
/// Adds an ordered sequence of elements to the batch.
fn extend<I: Iterator<Item=(K,V,T,R)>>(&mut self, iter: I) {
for item in iter { self.push(item); }
}
/// Completes building and returns the batch.
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Output;
}
/// Represents a merge in progress.
pub trait Merger<K, V, T, R, Output: Batch<K, V, T, R>> {
/// Creates a new merger to merge the supplied batches.
fn new(source1: &Output, source2: &Output) -> Self;
/// Perform some amount of work, decrementing `fuel`.
///
/// If `fuel` is non-zero after the call, the merging is complete and
/// one should call `done` to extract the merged results.
fn work(&mut self, source1: &Output, source2: &Output, frontier: &Option<Vec<T>>, fuel: &mut isize);
/// Extracts merged results.
///
/// This method should only be called after `work` has been called and
/// has not brought `fuel` to zero. Otherwise, the merge is still in
/// progress.
fn done(self) -> Output;
}
/// Blanket implementations for reference counted batches.
pub mod rc_blanket_impls {
use std::rc::Rc;
use super::{Batch, BatchReader, Batcher, Builder, Merger, Cursor, Description};
impl<K, V, T, R, B: BatchReader<K,V,T,R>> BatchReader<K,V,T,R> for Rc<B> {
/// The type used to enumerate the batch's contents.
type Cursor = RcBatchCursor<K, V, T, R, B>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor {
RcBatchCursor::new((&**self).cursor())
}
/// The number of updates in the batch.
fn len(&self) -> usize { (&**self).len() }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T> { (&**self).description() }
}
/// Wrapper to provide cursor to nested scope.
pub struct RcBatchCursor<K, V, T, R, B: BatchReader<K, V, T, R>> {
phantom: ::std::marker::PhantomData<(K, V, T, R)>,
cursor: B::Cursor,
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> RcBatchCursor<K, V, T, R, B> {
fn new(cursor: B::Cursor) -> Self {
RcBatchCursor {
cursor,
phantom: ::std::marker::PhantomData,
}
}
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> Cursor<K, V, T, R> for RcBatchCursor<K, V, T, R, B> {
type Storage = Rc<B>;
#[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) }
#[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) }
#[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K { self.cursor.key(storage) }
#[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V { self.cursor.val(storage) }
#[inline]
fn map_times<L: FnMut(&T, &R)>(&mut self, storage: &Self::Storage, logic: L) {
self.cursor.map_times(storage, logic)
}
#[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) }
#[inline] fn seek_key(&mut self, storage: &Self::Storage, key: &K) { self.cursor.seek_key(storage, key) }
#[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) }
#[inline] fn seek_val(&mut self, storage: &Self::Storage, val: &V) { self.cursor.seek_val(storage, val) }
#[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) }
#[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) }
}
/// An immutable collection of updates.
impl<K,V,T,R,B: Batch<K,V,T,R>> Batch<K, V, T, R> for Rc<B> {
type Batcher = RcBatcher<K, V, T, R, B>;
type Builder = RcBuilder<K, V, T, R, B>;
type Merger = RcMerger<K, V, T, R, B>;
}
/// Wrapper type for batching reference counted batches.
pub struct RcBatcher<K,V,T,R,B:Batch<K,V,T,R>> { batcher: B::Batcher }
/// Functionality for collecting and batching updates.
impl<K,V,T,R,B:Batch<K,V,T,R>> Batcher<K, V, T, R, Rc<B>> for RcBatcher<K,V,T,R,B> {
fn new() -> Self { RcBatcher { batcher: <B::Batcher as Batcher<K,V,T,R,B>>::new() } }
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>) { self.batcher.push_batch(batch) }
fn seal(&mut self, upper: &[T]) -> Rc<B> { Rc::new(self.batcher.seal(upper)) }
fn frontier(&mut self) -> &[T] { self.batcher.frontier() }
}
/// Wrapper type for building reference counted batches.
pub struct RcBuilder<K,V,T,R,B:Batch<K,V,T,R>> { builder: B::Builder }
/// Functionality for building batches from ordered update sequences.
impl<K,V,T,R,B:Batch<K,V,T,R>> Builder<K, V, T, R, Rc<B>> for RcBuilder<K,V,T,R,B> {
fn new() -> Self { RcBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::new() } }
fn with_capacity(cap: usize) -> Self { RcBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::with_capacity(cap) } }
fn push(&mut self, element: (K, V, T, R)) { self.builder.push(element) }
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Rc<B> { Rc::new(self.builder.done(lower, upper, since)) }
}
/// Wrapper type for merging reference counted batches.
pub struct RcMerger<K,V,T,R,B:Batch<K,V,T,R>> { merger: B::Merger }
/// Represents a merge in progress.
impl<K,V,T,R,B:Batch<K,V,T,R>> Merger<K, V, T, R, Rc<B>> for RcMerger<K,V,T,R,B> {
fn new(source1: &Rc<B>, source2: &Rc<B>) -> Self { RcMerger { merger: B::begin_merge(source1, source2) } }
fn work(&mut self, source1: &Rc<B>, source2: &Rc<B>, frontier: &Option<Vec<T>>, fuel: &mut isize) { self.merger.work(source1, source2, frontier, fuel) }
fn done(self) -> Rc<B> { Rc::new(self.merger.done()) }
}
}
/// Blanket implementations for reference counted batches.
pub mod abomonated_blanket_impls {
extern crate abomonation;
use abomonation::{Abomonation, measure};
use abomonation::abomonated::Abomonated;
use super::{Batch, BatchReader, Batcher, Builder, Merger, Cursor, Description};
impl<K, V, T, R, B: BatchReader<K,V,T,R>+Abomonation> BatchReader<K,V,T,R> for Abomonated<B, Vec<u8>> {
/// The type used to enumerate the batch's contents.
type Cursor = AbomonatedBatchCursor<K, V, T, R, B>;
/// Acquires a cursor to the batch's contents.
fn cursor(&self) -> Self::Cursor {
AbomonatedBatchCursor::new((&**self).cursor())
}
/// The number of updates in the batch.
fn len(&self) -> usize { (&**self).len() }
/// Describes the times of the updates in the batch.
fn description(&self) -> &Description<T> { (&**self).description() }
}
/// Wrapper to provide cursor to nested scope.
pub struct AbomonatedBatchCursor<K, V, T, R, B: BatchReader<K, V, T, R>> {
phantom: ::std::marker::PhantomData<(K, V, T, R)>,
cursor: B::Cursor,
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>> AbomonatedBatchCursor<K, V, T, R, B> {
fn new(cursor: B::Cursor) -> Self {
AbomonatedBatchCursor {
cursor,
phantom: ::std::marker::PhantomData,
}
}
}
impl<K, V, T, R, B: BatchReader<K, V, T, R>+Abomonation> Cursor<K, V, T, R> for AbomonatedBatchCursor<K, V, T, R, B> {
type Storage = Abomonated<B, Vec<u8>>;
#[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) }
#[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) }
#[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K { self.cursor.key(storage) }
#[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V { self.cursor.val(storage) }
#[inline]
fn map_times<L: FnMut(&T, &R)>(&mut self, storage: &Self::Storage, logic: L) {
self.cursor.map_times(storage, logic)
}
#[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) }
#[inline] fn seek_key(&mut self, storage: &Self::Storage, key: &K) { self.cursor.seek_key(storage, key) }
#[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) }
#[inline] fn seek_val(&mut self, storage: &Self::Storage, val: &V) { self.cursor.seek_val(storage, val) }
#[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) }
#[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) }
}
/// An immutable collection of updates.
impl<K,V,T,R,B: Batch<K,V,T,R>+Abomonation> Batch<K, V, T, R> for Abomonated<B, Vec<u8>> {
type Batcher = AbomonatedBatcher<K, V, T, R, B>;
type Builder = AbomonatedBuilder<K, V, T, R, B>;
type Merger = AbomonatedMerger<K, V, T, R, B>;
}
/// Wrapper type for batching reference counted batches.
pub struct AbomonatedBatcher<K,V,T,R,B:Batch<K,V,T,R>> { batcher: B::Batcher }
/// Functionality for collecting and batching updates.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Batcher<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedBatcher<K,V,T,R,B> {
fn new() -> Self { AbomonatedBatcher { batcher: <B::Batcher as Batcher<K,V,T,R,B>>::new() } }
fn push_batch(&mut self, batch: &mut Vec<((K, V), T, R)>) { self.batcher.push_batch(batch) }
fn seal(&mut self, upper: &[T]) -> Abomonated<B, Vec<u8>> {
let batch = self.batcher.seal(upper);
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
fn frontier(&mut self) -> &[T] { self.batcher.frontier() }
}
/// Wrapper type for building reference counted batches.
pub struct AbomonatedBuilder<K,V,T,R,B:Batch<K,V,T,R>> { builder: B::Builder }
/// Functionality for building batches from ordered update sequences.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Builder<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedBuilder<K,V,T,R,B> {
fn new() -> Self { AbomonatedBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::new() } }
fn with_capacity(cap: usize) -> Self { AbomonatedBuilder { builder: <B::Builder as Builder<K,V,T,R,B>>::with_capacity(cap) } }
fn push(&mut self, element: (K, V, T, R)) { self.builder.push(element) }
fn done(self, lower: &[T], upper: &[T], since: &[T]) -> Abomonated<B, Vec<u8>> {
let batch = self.builder.done(lower, upper, since);
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
}
/// Wrapper type for merging reference counted batches.
pub struct AbomonatedMerger<K,V,T,R,B:Batch<K,V,T,R>> { merger: B::Merger }
/// Represents a merge in progress.
impl<K,V,T,R,B:Batch<K,V,T,R>+Abomonation> Merger<K, V, T, R, Abomonated<B,Vec<u8>>> for AbomonatedMerger<K,V,T,R,B> {
fn new(source1: &Abomonated<B,Vec<u8>>, source2: &Abomonated<B,Vec<u8>>) -> Self {
AbomonatedMerger { merger: B::begin_merge(source1, source2) }
}
fn work(&mut self, source1: &Abomonated<B,Vec<u8>>, source2: &Abomonated<B,Vec<u8>>, frontier: &Option<Vec<T>>, fuel: &mut isize) {
self.merger.work(source1, source2, frontier, fuel)
}
fn done(self) -> Abomonated<B, Vec<u8>> {
let batch = self.merger.done();
let mut bytes = Vec::with_capacity(measure(&batch));
unsafe { abomonation::encode(&batch, &mut bytes).unwrap() };
unsafe { Abomonated::<B,_>::new(bytes).unwrap() }
}
}
}
| { self.description().lower() } | identifier_body |
dirname.rs | #![crate_name = "uu_dirname"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Derek Chiang <derekchiang93@gmail.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
use std::path::Path;
static NAME: &'static str = "dirname";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn | (args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optflag("z", "zero", "separate output with NUL rather than newline");
opts.optflag("", "help", "display this help and exit");
opts.optflag("", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!("Invalid options\n{}", f)
};
if matches.opt_present("help") {
let msg = format!("{0} {1} - strip last component from file name
Usage:
{0} [OPTION] NAME...
Output each NAME with its last non-slash component and trailing slashes
removed; if NAME contains no /'s, output '.' (meaning the current
directory).", NAME, VERSION);
print!("{}", opts.usage(&msg));
return 0;
}
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
let separator = if matches.opt_present("zero") {"\0"} else {"\n"};
if!matches.free.is_empty() {
for path in &matches.free {
let p = Path::new(path);
match p.parent() {
Some(d) => {
if d.components().next() == None {
print!(".")
} else {
print!("{}", d.to_string_lossy());
}
}
None => {
if p.is_absolute() {
print!("/");
} else {
print!(".");
}
}
}
print!("{}", separator);
}
} else {
println!("{0}: missing operand", NAME);
println!("Try '{0} --help' for more information.", NAME);
return 1;
}
0
}
| uumain | identifier_name |
dirname.rs | #![crate_name = "uu_dirname"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Derek Chiang <derekchiang93@gmail.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
use std::path::Path;
static NAME: &'static str = "dirname";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optflag("z", "zero", "separate output with NUL rather than newline");
opts.optflag("", "help", "display this help and exit");
opts.optflag("", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!("Invalid options\n{}", f)
};
if matches.opt_present("help") {
let msg = format!("{0} {1} - strip last component from file name
Usage:
{0} [OPTION] NAME...
Output each NAME with its last non-slash component and trailing slashes
removed; if NAME contains no /'s, output '.' (meaning the current
directory).", NAME, VERSION);
print!("{}", opts.usage(&msg));
return 0;
}
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
let separator = if matches.opt_present("zero") {"\0"} else {"\n"};
if!matches.free.is_empty() {
for path in &matches.free {
let p = Path::new(path);
match p.parent() {
Some(d) => {
if d.components().next() == None {
print!(".")
} else {
print!("{}", d.to_string_lossy());
}
}
None => {
if p.is_absolute() {
print!("/");
} else {
print!(".");
}
}
}
print!("{}", separator); | println!("{0}: missing operand", NAME);
println!("Try '{0} --help' for more information.", NAME);
return 1;
}
0
} | }
} else { | random_line_split |
dirname.rs | #![crate_name = "uu_dirname"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Derek Chiang <derekchiang93@gmail.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
use std::path::Path;
static NAME: &'static str = "dirname";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 |
print!("{}", opts.usage(&msg));
return 0;
}
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
let separator = if matches.opt_present("zero") {"\0"} else {"\n"};
if!matches.free.is_empty() {
for path in &matches.free {
let p = Path::new(path);
match p.parent() {
Some(d) => {
if d.components().next() == None {
print!(".")
} else {
print!("{}", d.to_string_lossy());
}
}
None => {
if p.is_absolute() {
print!("/");
} else {
print!(".");
}
}
}
print!("{}", separator);
}
} else {
println!("{0}: missing operand", NAME);
println!("Try '{0} --help' for more information.", NAME);
return 1;
}
0
}
| {
let mut opts = getopts::Options::new();
opts.optflag("z", "zero", "separate output with NUL rather than newline");
opts.optflag("", "help", "display this help and exit");
opts.optflag("", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!("Invalid options\n{}", f)
};
if matches.opt_present("help") {
let msg = format!("{0} {1} - strip last component from file name
Usage:
{0} [OPTION] NAME...
Output each NAME with its last non-slash component and trailing slashes
removed; if NAME contains no /'s, output '.' (meaning the current
directory).", NAME, VERSION); | identifier_body |
error.rs | use common::document::ContainerDocument;
use snafu::Snafu;
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display("Failed to parse {} document: {}", target_type, source))]
Deserialization {
target_type: &'static str,
source: serde_json::Error,
},
#[snafu(display("Document Retrieval Error: {}", source))]
DocumentRetrievalError { source: Box<dyn std::error::Error> },
#[snafu(display("Index Creation Error: {}", source))]
IndexCreation { source: Box<dyn std::error::Error> },
#[snafu(display("Index Publication Error: {}", source))]
IndexPublication { source: Box<dyn std::error::Error> },
#[snafu(display("Index Optimization Error: {}", source))]
IndexOptimization { source: Box<dyn std::error::Error> },
#[snafu(display("Storage Connection Error: {}", source))]
StorageConnection { source: Box<dyn std::error::Error> },
#[snafu(display("Document Stream Insertion Error: {}", source))]
DocumentStreamInsertion { source: Box<dyn std::error::Error> },
#[snafu(display("Document Stream Update Error: {}", source))]
DocumentStreamUpdate { source: Box<dyn std::error::Error> },
#[snafu(display("Expected Index: {}", index))]
ExpectedIndex { index: String },
#[snafu(display("Configuration Error: {}", source))]
Configuration { source: config::ConfigError },
#[snafu(display("Status Error: {}", source))]
Status { source: Box<dyn std::error::Error> },
#[snafu(display("Backend Configuration Error: {}", source))]
BackendConfiguration { source: Box<dyn std::error::Error> },
}
impl Error {
pub fn | <T: ContainerDocument>(err: serde_json::Error) -> Self {
Self::Deserialization {
target_type: T::static_doc_type(),
source: err,
}
}
}
| from_deserialization | identifier_name |
error.rs | use common::document::ContainerDocument;
use snafu::Snafu;
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display("Failed to parse {} document: {}", target_type, source))]
Deserialization {
target_type: &'static str,
source: serde_json::Error,
},
#[snafu(display("Document Retrieval Error: {}", source))]
DocumentRetrievalError { source: Box<dyn std::error::Error> },
#[snafu(display("Index Creation Error: {}", source))]
IndexCreation { source: Box<dyn std::error::Error> },
#[snafu(display("Index Publication Error: {}", source))]
IndexPublication { source: Box<dyn std::error::Error> },
#[snafu(display("Index Optimization Error: {}", source))]
IndexOptimization { source: Box<dyn std::error::Error> },
#[snafu(display("Storage Connection Error: {}", source))]
StorageConnection { source: Box<dyn std::error::Error> },
#[snafu(display("Document Stream Insertion Error: {}", source))]
DocumentStreamInsertion { source: Box<dyn std::error::Error> },
#[snafu(display("Document Stream Update Error: {}", source))]
DocumentStreamUpdate { source: Box<dyn std::error::Error> },
#[snafu(display("Expected Index: {}", index))]
ExpectedIndex { index: String }, | #[snafu(display("Status Error: {}", source))]
Status { source: Box<dyn std::error::Error> },
#[snafu(display("Backend Configuration Error: {}", source))]
BackendConfiguration { source: Box<dyn std::error::Error> },
}
impl Error {
pub fn from_deserialization<T: ContainerDocument>(err: serde_json::Error) -> Self {
Self::Deserialization {
target_type: T::static_doc_type(),
source: err,
}
}
} |
#[snafu(display("Configuration Error: {}", source))]
Configuration { source: config::ConfigError },
| random_line_split |
is_integer.rs | use integer::Integer;
use malachite_base::num::conversion::traits::IsInteger;
impl<'a> IsInteger for &'a Integer {
/// Determines whether an `Integer` is an integer. It always returns `true`.
///
/// $f(x) = \textrm{true}$.
///
/// # Worst-case complexity
/// Constant time and additional memory.
/// | /// extern crate malachite_base;
/// extern crate malachite_nz;
///
/// use malachite_base::num::basic::traits::{NegativeOne, One, Zero};
/// use malachite_base::num::conversion::traits::IsInteger;
/// use malachite_nz::integer::Integer;
///
/// assert_eq!(Integer::ZERO.is_integer(), true);
/// assert_eq!(Integer::ONE.is_integer(), true);
/// assert_eq!(Integer::from(100).is_integer(), true);
/// assert_eq!(Integer::NEGATIVE_ONE.is_integer(), true);
/// assert_eq!(Integer::from(-100).is_integer(), true);
/// ```
#[inline]
fn is_integer(self) -> bool {
true
}
} | /// # Examples
/// ``` | random_line_split |
is_integer.rs | use integer::Integer;
use malachite_base::num::conversion::traits::IsInteger;
impl<'a> IsInteger for &'a Integer {
/// Determines whether an `Integer` is an integer. It always returns `true`.
///
/// $f(x) = \textrm{true}$.
///
/// # Worst-case complexity
/// Constant time and additional memory.
///
/// # Examples
/// ```
/// extern crate malachite_base;
/// extern crate malachite_nz;
///
/// use malachite_base::num::basic::traits::{NegativeOne, One, Zero};
/// use malachite_base::num::conversion::traits::IsInteger;
/// use malachite_nz::integer::Integer;
///
/// assert_eq!(Integer::ZERO.is_integer(), true);
/// assert_eq!(Integer::ONE.is_integer(), true);
/// assert_eq!(Integer::from(100).is_integer(), true);
/// assert_eq!(Integer::NEGATIVE_ONE.is_integer(), true);
/// assert_eq!(Integer::from(-100).is_integer(), true);
/// ```
#[inline]
fn | (self) -> bool {
true
}
}
| is_integer | identifier_name |
is_integer.rs | use integer::Integer;
use malachite_base::num::conversion::traits::IsInteger;
impl<'a> IsInteger for &'a Integer {
/// Determines whether an `Integer` is an integer. It always returns `true`.
///
/// $f(x) = \textrm{true}$.
///
/// # Worst-case complexity
/// Constant time and additional memory.
///
/// # Examples
/// ```
/// extern crate malachite_base;
/// extern crate malachite_nz;
///
/// use malachite_base::num::basic::traits::{NegativeOne, One, Zero};
/// use malachite_base::num::conversion::traits::IsInteger;
/// use malachite_nz::integer::Integer;
///
/// assert_eq!(Integer::ZERO.is_integer(), true);
/// assert_eq!(Integer::ONE.is_integer(), true);
/// assert_eq!(Integer::from(100).is_integer(), true);
/// assert_eq!(Integer::NEGATIVE_ONE.is_integer(), true);
/// assert_eq!(Integer::from(-100).is_integer(), true);
/// ```
#[inline]
fn is_integer(self) -> bool |
}
| {
true
} | identifier_body |
view.rs | // +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <mdsteele@alum.mit.edu> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
use super::scenes;
use crate::elements::memory::{MemoryGridView, NextShapeView, FLIP_SLOWDOWN};
use crate::elements::{
FadeStyle, ProgressBar, PuzzleCmd, PuzzleCore, PuzzleView,
};
use crate::gui::{Action, Canvas, Element, Event, Rect, Resources, Sound};
use crate::modes::SOLVED_INFO_TEXT;
use crate::save::{Direction, Game, PuzzleState, ServesState};
// ========================================================================= //
const REMOVE_DELAY: i32 = FLIP_SLOWDOWN * 5 + 20;
const REMOVE_SOUND_AT: i32 = 20 + FLIP_SLOWDOWN * 2;
// ========================================================================= //
pub struct View {
core: PuzzleCore<()>,
grid: MemoryGridView,
next: NextShapeView,
progress: ProgressBar,
progress_adjust: u32,
remove_countdown: i32,
show_next: bool,
}
impl View {
pub fn new(
resources: &mut Resources,
visible: Rect,
state: &ServesState,
) -> View {
let mut core = {
let fade = (FadeStyle::LeftToRight, FadeStyle::LeftToRight);
let intro = scenes::compile_intro_scene(resources);
let outro = scenes::compile_outro_scene(resources);
PuzzleCore::new(resources, visible, state, fade, intro, outro)
};
core.add_extra_scene(scenes::compile_argony_midscene(resources));
core.add_extra_scene(scenes::compile_mezure_midscene(resources));
View {
core,
grid: MemoryGridView::new(
resources,
"memory/serves",
(256, 176),
state.grid(),
),
next: NextShapeView::new(resources, "memory/serves", (96, 208)),
progress: ProgressBar::new(
(104, 176),
Direction::East,
80,
(191, 191, 0),
),
progress_adjust: 0,
remove_countdown: 0,
show_next: false,
}
}
}
impl Element<Game, PuzzleCmd> for View {
fn draw(&self, game: &Game, canvas: &mut Canvas) {
let state = &game.if_memory_serves;
self.core.draw_back_layer(canvas);
if!state.is_solved() {
let value = state.current_step() as u32 + self.progress_adjust;
let maximum = state.total_num_steps() as u32;
self.progress.draw(value, maximum, canvas);
}
self.grid.draw(state.grid(), canvas);
self.core.draw_middle_layer(canvas);
if self.show_next {
self.next.draw(&state.next_shape(), canvas);
}
self.core.draw_front_layer(canvas, state);
}
fn handle_event(
&mut self,
event: &Event,
game: &mut Game,
) -> Action<PuzzleCmd> | self.core.begin_outro_scene();
action = action.and_return(PuzzleCmd::Save);
}
action.also_redraw();
}
}
if (!action.should_stop() && self.remove_countdown == 0)
|| event == &Event::ClockTick
{
let subaction =
self.next.handle_event(event, &mut state.next_shape());
if let Some(&pt) = subaction.value() {
let (col, row) = self.grid.coords_for_point(pt);
if let Some(symbol) = state.try_place_shape(col, row) {
action.also_play_sound(Sound::device_drop());
self.grid.place_symbol(symbol);
}
}
action.merge(subaction.but_no_value());
}
if (!action.should_stop() && self.remove_countdown == 0)
|| event == &Event::ClockTick
{
let subaction = self.grid.handle_event(event, state.grid_mut());
if let Some(&symbol) = subaction.value() {
action.also_play_sound(Sound::device_rotate());
self.grid.reveal_symbol(symbol);
self.remove_countdown = REMOVE_DELAY;
}
action.merge(subaction.but_no_value());
}
if!action.should_stop() {
self.core.begin_character_scene_on_click(event);
}
action
}
}
impl PuzzleView for View {
fn info_text(&self, game: &Game) -> &'static str {
if game.if_memory_serves.is_solved() {
SOLVED_INFO_TEXT
} else {
INFO_BOX_TEXT
}
}
fn undo(&mut self, _: &mut Game) {}
fn redo(&mut self, _: &mut Game) {}
fn reset(&mut self, game: &mut Game) {
self.core.clear_undo_redo();
game.if_memory_serves.reset();
}
fn solve(&mut self, game: &mut Game) {
game.if_memory_serves.solve();
self.core.begin_outro_scene();
}
fn drain_queue(&mut self) {
for (kind, value) in self.core.drain_queue() {
if kind == 0 {
self.show_next = value!= 0;
} else if kind == 1 {
if value >= 0 && (value as usize) < LETTERS.len() {
let (col, row, letter) = LETTERS[value as usize];
self.grid.add_letter(col, row, letter);
}
}
}
}
}
// ========================================================================= //
#[cfg_attr(rustfmt, rustfmt_skip)]
const LETTERS: &[(i32, i32, char)] = &[
(1, 0, 'I'), (1, 1, 'N'), (1, 2, 'T'), (1, 3, 'E'),
(3, 0, 'C'), (3, 1, 'O'), (3, 2, 'N'), (3, 3, 'S'),
(5, 0, 'I'), (5, 1, 'N'), (5, 2, 'D'), (5, 3, 'E'),
];
const INFO_BOX_TEXT: &str = "\
Your goal is to place (and later remove) each group of tiles on
the grid.
When a group of tiles appears on the left, use $M{your finger}{the mouse} to
drag it onto the grid on the right. The tiles will then flip over;
the backs of the tiles will be green.
Tiles will eventually turn from green to gray; once all tiles
with a given symbol are gray, they may be safely removed.
You can remove a group of tiles at any time by $M{tapp}{click}ing any of
the tiles on the grid that had that symbol. However, if you
accidentally remove a tile that's still green, you will have to
start over.
$M{Tap}{Click} on a character in the scene to hear their words of wisdom.";
// ========================================================================= //
| {
let state = &mut game.if_memory_serves;
let mut action = self.core.handle_event(event, state);
if event == &Event::ClockTick && self.remove_countdown > 0 {
self.remove_countdown -= 1;
if self.remove_countdown == REMOVE_SOUND_AT {
let symbol = self.grid.flip_symbol();
let sound = if state.can_remove_symbol(symbol) {
self.progress_adjust = 1;
Sound::mid_puzzle_chime()
} else {
Sound::talk_annoyed_hi()
};
action.merge(Action::redraw().and_play_sound(sound));
}
if self.remove_countdown == 0 {
self.progress_adjust = 0;
state.remove_symbol(self.grid.flip_symbol());
self.grid.clear_flip();
if state.is_solved() { | identifier_body |
view.rs | // +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <mdsteele@alum.mit.edu> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
use super::scenes;
use crate::elements::memory::{MemoryGridView, NextShapeView, FLIP_SLOWDOWN};
use crate::elements::{
FadeStyle, ProgressBar, PuzzleCmd, PuzzleCore, PuzzleView,
};
use crate::gui::{Action, Canvas, Element, Event, Rect, Resources, Sound};
use crate::modes::SOLVED_INFO_TEXT;
use crate::save::{Direction, Game, PuzzleState, ServesState};
// ========================================================================= //
const REMOVE_DELAY: i32 = FLIP_SLOWDOWN * 5 + 20;
const REMOVE_SOUND_AT: i32 = 20 + FLIP_SLOWDOWN * 2;
// ========================================================================= //
pub struct View {
core: PuzzleCore<()>,
grid: MemoryGridView,
next: NextShapeView,
progress: ProgressBar,
progress_adjust: u32,
remove_countdown: i32,
show_next: bool,
}
impl View {
pub fn new(
resources: &mut Resources,
visible: Rect,
state: &ServesState,
) -> View {
let mut core = {
let fade = (FadeStyle::LeftToRight, FadeStyle::LeftToRight);
let intro = scenes::compile_intro_scene(resources);
let outro = scenes::compile_outro_scene(resources);
PuzzleCore::new(resources, visible, state, fade, intro, outro)
};
core.add_extra_scene(scenes::compile_argony_midscene(resources));
core.add_extra_scene(scenes::compile_mezure_midscene(resources));
View {
core,
grid: MemoryGridView::new(
resources,
"memory/serves",
(256, 176),
state.grid(),
),
next: NextShapeView::new(resources, "memory/serves", (96, 208)),
progress: ProgressBar::new(
(104, 176),
Direction::East,
80,
(191, 191, 0),
),
progress_adjust: 0,
remove_countdown: 0,
show_next: false,
}
}
}
impl Element<Game, PuzzleCmd> for View {
fn draw(&self, game: &Game, canvas: &mut Canvas) {
let state = &game.if_memory_serves;
self.core.draw_back_layer(canvas);
if!state.is_solved() {
let value = state.current_step() as u32 + self.progress_adjust;
let maximum = state.total_num_steps() as u32;
self.progress.draw(value, maximum, canvas);
}
self.grid.draw(state.grid(), canvas);
self.core.draw_middle_layer(canvas);
if self.show_next {
self.next.draw(&state.next_shape(), canvas);
}
self.core.draw_front_layer(canvas, state);
}
fn handle_event(
&mut self,
event: &Event,
game: &mut Game,
) -> Action<PuzzleCmd> {
let state = &mut game.if_memory_serves;
let mut action = self.core.handle_event(event, state);
if event == &Event::ClockTick && self.remove_countdown > 0 {
self.remove_countdown -= 1;
if self.remove_countdown == REMOVE_SOUND_AT {
let symbol = self.grid.flip_symbol();
let sound = if state.can_remove_symbol(symbol) {
self.progress_adjust = 1;
Sound::mid_puzzle_chime()
} else {
Sound::talk_annoyed_hi()
};
action.merge(Action::redraw().and_play_sound(sound));
}
if self.remove_countdown == 0 {
self.progress_adjust = 0;
state.remove_symbol(self.grid.flip_symbol());
self.grid.clear_flip();
if state.is_solved() {
self.core.begin_outro_scene();
action = action.and_return(PuzzleCmd::Save);
}
action.also_redraw();
}
}
if (!action.should_stop() && self.remove_countdown == 0)
|| event == &Event::ClockTick
|
if (!action.should_stop() && self.remove_countdown == 0)
|| event == &Event::ClockTick
{
let subaction = self.grid.handle_event(event, state.grid_mut());
if let Some(&symbol) = subaction.value() {
action.also_play_sound(Sound::device_rotate());
self.grid.reveal_symbol(symbol);
self.remove_countdown = REMOVE_DELAY;
}
action.merge(subaction.but_no_value());
}
if!action.should_stop() {
self.core.begin_character_scene_on_click(event);
}
action
}
}
impl PuzzleView for View {
fn info_text(&self, game: &Game) -> &'static str {
if game.if_memory_serves.is_solved() {
SOLVED_INFO_TEXT
} else {
INFO_BOX_TEXT
}
}
fn undo(&mut self, _: &mut Game) {}
fn redo(&mut self, _: &mut Game) {}
fn reset(&mut self, game: &mut Game) {
self.core.clear_undo_redo();
game.if_memory_serves.reset();
}
fn solve(&mut self, game: &mut Game) {
game.if_memory_serves.solve();
self.core.begin_outro_scene();
}
fn drain_queue(&mut self) {
for (kind, value) in self.core.drain_queue() {
if kind == 0 {
self.show_next = value!= 0;
} else if kind == 1 {
if value >= 0 && (value as usize) < LETTERS.len() {
let (col, row, letter) = LETTERS[value as usize];
self.grid.add_letter(col, row, letter);
}
}
}
}
}
// ========================================================================= //
#[cfg_attr(rustfmt, rustfmt_skip)]
const LETTERS: &[(i32, i32, char)] = &[
(1, 0, 'I'), (1, 1, 'N'), (1, 2, 'T'), (1, 3, 'E'),
(3, 0, 'C'), (3, 1, 'O'), (3, 2, 'N'), (3, 3, 'S'),
(5, 0, 'I'), (5, 1, 'N'), (5, 2, 'D'), (5, 3, 'E'),
];
const INFO_BOX_TEXT: &str = "\
Your goal is to place (and later remove) each group of tiles on
the grid.
When a group of tiles appears on the left, use $M{your finger}{the mouse} to
drag it onto the grid on the right. The tiles will then flip over;
the backs of the tiles will be green.
Tiles will eventually turn from green to gray; once all tiles
with a given symbol are gray, they may be safely removed.
You can remove a group of tiles at any time by $M{tapp}{click}ing any of
the tiles on the grid that had that symbol. However, if you
accidentally remove a tile that's still green, you will have to
start over.
$M{Tap}{Click} on a character in the scene to hear their words of wisdom.";
// ========================================================================= //
| {
let subaction =
self.next.handle_event(event, &mut state.next_shape());
if let Some(&pt) = subaction.value() {
let (col, row) = self.grid.coords_for_point(pt);
if let Some(symbol) = state.try_place_shape(col, row) {
action.also_play_sound(Sound::device_drop());
self.grid.place_symbol(symbol);
}
}
action.merge(subaction.but_no_value());
} | conditional_block |
view.rs | // +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <mdsteele@alum.mit.edu> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
use super::scenes;
use crate::elements::memory::{MemoryGridView, NextShapeView, FLIP_SLOWDOWN};
use crate::elements::{
FadeStyle, ProgressBar, PuzzleCmd, PuzzleCore, PuzzleView,
};
use crate::gui::{Action, Canvas, Element, Event, Rect, Resources, Sound};
use crate::modes::SOLVED_INFO_TEXT;
use crate::save::{Direction, Game, PuzzleState, ServesState};
// ========================================================================= //
const REMOVE_DELAY: i32 = FLIP_SLOWDOWN * 5 + 20;
const REMOVE_SOUND_AT: i32 = 20 + FLIP_SLOWDOWN * 2;
// ========================================================================= //
pub struct View {
core: PuzzleCore<()>,
grid: MemoryGridView,
next: NextShapeView,
progress: ProgressBar,
progress_adjust: u32,
remove_countdown: i32,
show_next: bool,
}
impl View {
pub fn new(
resources: &mut Resources,
visible: Rect,
state: &ServesState,
) -> View {
let mut core = {
let fade = (FadeStyle::LeftToRight, FadeStyle::LeftToRight);
let intro = scenes::compile_intro_scene(resources);
let outro = scenes::compile_outro_scene(resources);
PuzzleCore::new(resources, visible, state, fade, intro, outro)
};
core.add_extra_scene(scenes::compile_argony_midscene(resources));
core.add_extra_scene(scenes::compile_mezure_midscene(resources));
View {
core,
grid: MemoryGridView::new(
resources,
"memory/serves",
(256, 176),
state.grid(),
),
next: NextShapeView::new(resources, "memory/serves", (96, 208)),
progress: ProgressBar::new(
(104, 176),
Direction::East,
80,
(191, 191, 0),
),
progress_adjust: 0,
remove_countdown: 0,
show_next: false,
}
}
}
impl Element<Game, PuzzleCmd> for View {
fn draw(&self, game: &Game, canvas: &mut Canvas) {
let state = &game.if_memory_serves;
self.core.draw_back_layer(canvas);
if!state.is_solved() {
let value = state.current_step() as u32 + self.progress_adjust;
let maximum = state.total_num_steps() as u32;
self.progress.draw(value, maximum, canvas);
}
self.grid.draw(state.grid(), canvas);
self.core.draw_middle_layer(canvas);
if self.show_next {
self.next.draw(&state.next_shape(), canvas);
}
self.core.draw_front_layer(canvas, state);
}
fn handle_event(
&mut self,
event: &Event,
game: &mut Game,
) -> Action<PuzzleCmd> {
let state = &mut game.if_memory_serves;
let mut action = self.core.handle_event(event, state);
if event == &Event::ClockTick && self.remove_countdown > 0 {
self.remove_countdown -= 1;
if self.remove_countdown == REMOVE_SOUND_AT {
let symbol = self.grid.flip_symbol();
let sound = if state.can_remove_symbol(symbol) {
self.progress_adjust = 1;
Sound::mid_puzzle_chime()
} else {
Sound::talk_annoyed_hi()
};
action.merge(Action::redraw().and_play_sound(sound));
}
if self.remove_countdown == 0 {
self.progress_adjust = 0;
state.remove_symbol(self.grid.flip_symbol());
self.grid.clear_flip();
if state.is_solved() {
self.core.begin_outro_scene();
action = action.and_return(PuzzleCmd::Save);
}
action.also_redraw();
}
}
if (!action.should_stop() && self.remove_countdown == 0)
|| event == &Event::ClockTick
{
let subaction =
self.next.handle_event(event, &mut state.next_shape());
if let Some(&pt) = subaction.value() {
let (col, row) = self.grid.coords_for_point(pt);
if let Some(symbol) = state.try_place_shape(col, row) {
action.also_play_sound(Sound::device_drop());
self.grid.place_symbol(symbol);
}
}
action.merge(subaction.but_no_value());
}
if (!action.should_stop() && self.remove_countdown == 0)
|| event == &Event::ClockTick
{
let subaction = self.grid.handle_event(event, state.grid_mut());
if let Some(&symbol) = subaction.value() {
action.also_play_sound(Sound::device_rotate());
self.grid.reveal_symbol(symbol);
self.remove_countdown = REMOVE_DELAY;
}
action.merge(subaction.but_no_value());
}
if!action.should_stop() {
self.core.begin_character_scene_on_click(event);
}
action
}
}
impl PuzzleView for View {
fn info_text(&self, game: &Game) -> &'static str {
if game.if_memory_serves.is_solved() {
SOLVED_INFO_TEXT
} else {
INFO_BOX_TEXT
}
}
fn | (&mut self, _: &mut Game) {}
fn redo(&mut self, _: &mut Game) {}
fn reset(&mut self, game: &mut Game) {
self.core.clear_undo_redo();
game.if_memory_serves.reset();
}
fn solve(&mut self, game: &mut Game) {
game.if_memory_serves.solve();
self.core.begin_outro_scene();
}
fn drain_queue(&mut self) {
for (kind, value) in self.core.drain_queue() {
if kind == 0 {
self.show_next = value!= 0;
} else if kind == 1 {
if value >= 0 && (value as usize) < LETTERS.len() {
let (col, row, letter) = LETTERS[value as usize];
self.grid.add_letter(col, row, letter);
}
}
}
}
}
// ========================================================================= //
#[cfg_attr(rustfmt, rustfmt_skip)]
const LETTERS: &[(i32, i32, char)] = &[
(1, 0, 'I'), (1, 1, 'N'), (1, 2, 'T'), (1, 3, 'E'),
(3, 0, 'C'), (3, 1, 'O'), (3, 2, 'N'), (3, 3, 'S'),
(5, 0, 'I'), (5, 1, 'N'), (5, 2, 'D'), (5, 3, 'E'),
];
const INFO_BOX_TEXT: &str = "\
Your goal is to place (and later remove) each group of tiles on
the grid.
When a group of tiles appears on the left, use $M{your finger}{the mouse} to
drag it onto the grid on the right. The tiles will then flip over;
the backs of the tiles will be green.
Tiles will eventually turn from green to gray; once all tiles
with a given symbol are gray, they may be safely removed.
You can remove a group of tiles at any time by $M{tapp}{click}ing any of
the tiles on the grid that had that symbol. However, if you
accidentally remove a tile that's still green, you will have to
start over.
$M{Tap}{Click} on a character in the scene to hear their words of wisdom.";
// ========================================================================= //
| undo | identifier_name |
view.rs | // +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <mdsteele@alum.mit.edu> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
use super::scenes;
use crate::elements::memory::{MemoryGridView, NextShapeView, FLIP_SLOWDOWN};
use crate::elements::{
FadeStyle, ProgressBar, PuzzleCmd, PuzzleCore, PuzzleView,
};
use crate::gui::{Action, Canvas, Element, Event, Rect, Resources, Sound};
use crate::modes::SOLVED_INFO_TEXT;
use crate::save::{Direction, Game, PuzzleState, ServesState};
// ========================================================================= //
const REMOVE_DELAY: i32 = FLIP_SLOWDOWN * 5 + 20;
const REMOVE_SOUND_AT: i32 = 20 + FLIP_SLOWDOWN * 2;
// ========================================================================= //
pub struct View {
core: PuzzleCore<()>,
grid: MemoryGridView,
next: NextShapeView,
progress: ProgressBar,
progress_adjust: u32,
remove_countdown: i32,
show_next: bool,
}
impl View {
pub fn new(
resources: &mut Resources,
visible: Rect,
state: &ServesState,
) -> View {
let mut core = {
let fade = (FadeStyle::LeftToRight, FadeStyle::LeftToRight);
let intro = scenes::compile_intro_scene(resources);
let outro = scenes::compile_outro_scene(resources);
PuzzleCore::new(resources, visible, state, fade, intro, outro)
};
core.add_extra_scene(scenes::compile_argony_midscene(resources));
core.add_extra_scene(scenes::compile_mezure_midscene(resources));
View {
core,
grid: MemoryGridView::new(
resources,
"memory/serves",
(256, 176),
state.grid(),
),
next: NextShapeView::new(resources, "memory/serves", (96, 208)),
progress: ProgressBar::new(
(104, 176),
Direction::East,
80,
(191, 191, 0),
),
progress_adjust: 0,
remove_countdown: 0,
show_next: false,
}
}
}
impl Element<Game, PuzzleCmd> for View {
fn draw(&self, game: &Game, canvas: &mut Canvas) {
let state = &game.if_memory_serves;
self.core.draw_back_layer(canvas);
if!state.is_solved() {
let value = state.current_step() as u32 + self.progress_adjust;
let maximum = state.total_num_steps() as u32;
self.progress.draw(value, maximum, canvas);
}
self.grid.draw(state.grid(), canvas);
self.core.draw_middle_layer(canvas);
if self.show_next {
self.next.draw(&state.next_shape(), canvas);
}
self.core.draw_front_layer(canvas, state);
}
fn handle_event(
&mut self,
event: &Event,
game: &mut Game,
) -> Action<PuzzleCmd> {
let state = &mut game.if_memory_serves;
let mut action = self.core.handle_event(event, state);
if event == &Event::ClockTick && self.remove_countdown > 0 {
self.remove_countdown -= 1;
if self.remove_countdown == REMOVE_SOUND_AT {
let symbol = self.grid.flip_symbol();
let sound = if state.can_remove_symbol(symbol) {
self.progress_adjust = 1;
Sound::mid_puzzle_chime()
} else {
Sound::talk_annoyed_hi()
};
action.merge(Action::redraw().and_play_sound(sound));
}
if self.remove_countdown == 0 {
self.progress_adjust = 0;
state.remove_symbol(self.grid.flip_symbol());
self.grid.clear_flip();
if state.is_solved() {
self.core.begin_outro_scene();
action = action.and_return(PuzzleCmd::Save);
}
action.also_redraw();
}
}
if (!action.should_stop() && self.remove_countdown == 0)
|| event == &Event::ClockTick
{
let subaction =
self.next.handle_event(event, &mut state.next_shape());
if let Some(&pt) = subaction.value() {
let (col, row) = self.grid.coords_for_point(pt);
if let Some(symbol) = state.try_place_shape(col, row) {
action.also_play_sound(Sound::device_drop());
self.grid.place_symbol(symbol);
}
}
action.merge(subaction.but_no_value());
}
if (!action.should_stop() && self.remove_countdown == 0)
|| event == &Event::ClockTick
{
let subaction = self.grid.handle_event(event, state.grid_mut());
if let Some(&symbol) = subaction.value() {
action.also_play_sound(Sound::device_rotate());
self.grid.reveal_symbol(symbol);
self.remove_countdown = REMOVE_DELAY;
}
action.merge(subaction.but_no_value());
}
if!action.should_stop() {
self.core.begin_character_scene_on_click(event);
}
action
}
}
impl PuzzleView for View {
fn info_text(&self, game: &Game) -> &'static str {
if game.if_memory_serves.is_solved() {
SOLVED_INFO_TEXT
} else {
INFO_BOX_TEXT
}
}
fn undo(&mut self, _: &mut Game) {}
fn redo(&mut self, _: &mut Game) {}
fn reset(&mut self, game: &mut Game) {
self.core.clear_undo_redo();
game.if_memory_serves.reset();
}
fn solve(&mut self, game: &mut Game) {
game.if_memory_serves.solve();
self.core.begin_outro_scene();
}
fn drain_queue(&mut self) {
for (kind, value) in self.core.drain_queue() { | if value >= 0 && (value as usize) < LETTERS.len() {
let (col, row, letter) = LETTERS[value as usize];
self.grid.add_letter(col, row, letter);
}
}
}
}
}
// ========================================================================= //
#[cfg_attr(rustfmt, rustfmt_skip)]
const LETTERS: &[(i32, i32, char)] = &[
(1, 0, 'I'), (1, 1, 'N'), (1, 2, 'T'), (1, 3, 'E'),
(3, 0, 'C'), (3, 1, 'O'), (3, 2, 'N'), (3, 3, 'S'),
(5, 0, 'I'), (5, 1, 'N'), (5, 2, 'D'), (5, 3, 'E'),
];
const INFO_BOX_TEXT: &str = "\
Your goal is to place (and later remove) each group of tiles on
the grid.
When a group of tiles appears on the left, use $M{your finger}{the mouse} to
drag it onto the grid on the right. The tiles will then flip over;
the backs of the tiles will be green.
Tiles will eventually turn from green to gray; once all tiles
with a given symbol are gray, they may be safely removed.
You can remove a group of tiles at any time by $M{tapp}{click}ing any of
the tiles on the grid that had that symbol. However, if you
accidentally remove a tile that's still green, you will have to
start over.
$M{Tap}{Click} on a character in the scene to hear their words of wisdom.";
// ========================================================================= // | if kind == 0 {
self.show_next = value != 0;
} else if kind == 1 { | random_line_split |
srgb.rs | // Copyright 2013 The color-rs developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. | //
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[deriving(Clone, PartialEq, Eq, Show)]
pub struct Srgb<T> { pub r: T, pub g: T, pub b: T }
impl<T> Srgb<T> {
#[inline]
pub fn new(r: T, g: T, b: T) -> Srgb<T> {
Srgb { r: r, g: g, b: b }
}
} | // You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0 | random_line_split |
srgb.rs | // Copyright 2013 The color-rs developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[deriving(Clone, PartialEq, Eq, Show)]
pub struct Srgb<T> { pub r: T, pub g: T, pub b: T }
impl<T> Srgb<T> {
#[inline]
pub fn new(r: T, g: T, b: T) -> Srgb<T> |
}
| {
Srgb { r: r, g: g, b: b }
} | identifier_body |
srgb.rs | // Copyright 2013 The color-rs developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[deriving(Clone, PartialEq, Eq, Show)]
pub struct | <T> { pub r: T, pub g: T, pub b: T }
impl<T> Srgb<T> {
#[inline]
pub fn new(r: T, g: T, b: T) -> Srgb<T> {
Srgb { r: r, g: g, b: b }
}
}
| Srgb | identifier_name |
scene.rs | // Robigo Luculenta -- Proof of concept spectral path tracer in Rust
// Copyright (C) 2014-2015 Ruud van Asseldonk
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use camera::Camera;
use intersection::Intersection;
use object::Object;
use ray::Ray;
/// A collection of objects.
pub struct Scene {
/// All the renderable objects in the scene.
pub objects: Vec<Object>,
/// A function that returns the camera through which the scene
/// will be seen. The function takes one parameter, the time (in
/// the range 0.0 - 1.0), which will be sampled randomly to create
/// effects like motion blur and zoom blur.
// TODO: apparently there is no such thing as an immutable closure
// any more, but I'd prefer to be able to use a pure function here,
// which might be a closure.
pub get_camera_at_time: fn (f32) -> Camera
}
impl Scene {
/// Intersects the specified ray with the scene.
pub fn | (&self, ray: &Ray) -> Option<(Intersection, &Object)> {
// Assume Nothing is found, and that Nothing is Very Far Away (tm).
let mut result = None;
let mut distance = 1.0e12f32;
// Then intersect all surfaces.
for obj in &self.objects {
match obj.surface.intersect(ray) {
None => { },
Some(isect) => {
// If there is an intersection, and if it is nearer than a
// previous one, use it.
if isect.distance < distance {
result = Some((isect, obj));
distance = isect.distance;
}
}
}
}
result
}
}
| intersect | identifier_name |
scene.rs | // Robigo Luculenta -- Proof of concept spectral path tracer in Rust
// Copyright (C) 2014-2015 Ruud van Asseldonk
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use camera::Camera;
use intersection::Intersection;
use object::Object;
use ray::Ray;
/// A collection of objects.
pub struct Scene {
/// All the renderable objects in the scene.
pub objects: Vec<Object>,
| // TODO: apparently there is no such thing as an immutable closure
// any more, but I'd prefer to be able to use a pure function here,
// which might be a closure.
pub get_camera_at_time: fn (f32) -> Camera
}
impl Scene {
/// Intersects the specified ray with the scene.
pub fn intersect(&self, ray: &Ray) -> Option<(Intersection, &Object)> {
// Assume Nothing is found, and that Nothing is Very Far Away (tm).
let mut result = None;
let mut distance = 1.0e12f32;
// Then intersect all surfaces.
for obj in &self.objects {
match obj.surface.intersect(ray) {
None => { },
Some(isect) => {
// If there is an intersection, and if it is nearer than a
// previous one, use it.
if isect.distance < distance {
result = Some((isect, obj));
distance = isect.distance;
}
}
}
}
result
}
} | /// A function that returns the camera through which the scene
/// will be seen. The function takes one parameter, the time (in
/// the range 0.0 - 1.0), which will be sampled randomly to create
/// effects like motion blur and zoom blur. | random_line_split |
scene.rs | // Robigo Luculenta -- Proof of concept spectral path tracer in Rust
// Copyright (C) 2014-2015 Ruud van Asseldonk
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use camera::Camera;
use intersection::Intersection;
use object::Object;
use ray::Ray;
/// A collection of objects.
pub struct Scene {
/// All the renderable objects in the scene.
pub objects: Vec<Object>,
/// A function that returns the camera through which the scene
/// will be seen. The function takes one parameter, the time (in
/// the range 0.0 - 1.0), which will be sampled randomly to create
/// effects like motion blur and zoom blur.
// TODO: apparently there is no such thing as an immutable closure
// any more, but I'd prefer to be able to use a pure function here,
// which might be a closure.
pub get_camera_at_time: fn (f32) -> Camera
}
impl Scene {
/// Intersects the specified ray with the scene.
pub fn intersect(&self, ray: &Ray) -> Option<(Intersection, &Object)> {
// Assume Nothing is found, and that Nothing is Very Far Away (tm).
let mut result = None;
let mut distance = 1.0e12f32;
// Then intersect all surfaces.
for obj in &self.objects {
match obj.surface.intersect(ray) {
None => { },
Some(isect) => {
// If there is an intersection, and if it is nearer than a
// previous one, use it.
if isect.distance < distance |
}
}
}
result
}
}
| {
result = Some((isect, obj));
distance = isect.distance;
} | conditional_block |
annotateable.rs | //
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <mail@beyermatthias.de> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use toml::Value;
use libimagstore::store::Entry;
use libimagstore::store::FileLockEntry;
use libimagstore::store::Store;
use libimagstore::storeid::IntoStoreId;
use libimagstore::storeid::StoreIdIterator;
use libimagentrylink::internal::InternalLinker;
use libimagentryutil::isa::Is;
use libimagentryutil::isa::IsKindHeaderPathProvider;
use toml_query::read::TomlValueReadExt;
use toml_query::insert::TomlValueInsertExt;
use error::Result;
use error::AnnotationErrorKind as AEK;
use error::AnnotationError as AE;
use error::ResultExt;
use iter::*;
pub trait Annotateable {
fn annotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<FileLockEntry<'a>>;
fn denotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<Option<FileLockEntry<'a>>>;
fn annotations<'a>(&self, store: &'a Store) -> Result<AnnotationIter<'a>>;
fn is_annotation(&self) -> Result<bool>;
}
provide_kindflag_path!(IsAnnotation, "annotation.is_annotation");
impl Annotateable for Entry {
/// Annotate an entry, returns the new entry which is used to annotate
fn annotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<FileLockEntry<'a>> {
use module_path::ModuleEntryPath;
store.retrieve(ModuleEntryPath::new(ann_name).into_storeid()?)
.map_err(From::from)
.and_then(|mut anno| {
{
let _ = anno.set_isflag::<IsAnnotation>()?;
let _ = anno
.get_header_mut()
.insert("annotation.name", Value::String(String::from(ann_name)))?;
}
Ok(anno)
})
.and_then(|mut anno| {
anno.add_internal_link(self)
.chain_err(|| AEK::LinkingError)
.map(|_| anno)
})
}
/// Checks the current entry for all annotations and removes the one where the name is
/// `ann_name`, which is then returned
fn denotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<Option<FileLockEntry<'a>>> {
for annotation in self.annotations(store)? {
let mut anno = annotation?;
let name = match anno.get_header().read("annotation.name")? {
None => continue,
Some(val) => match *val {
Value::String(ref name) => name.clone(),
_ => return Err(AE::from_kind(AEK::HeaderTypeError)),
},
};
if name == ann_name {
let _ = self.remove_internal_link(&mut anno)?;
return Ok(Some(anno));
}
}
Ok(None)
}
/// Get all annotations of an entry
fn | <'a>(&self, store: &'a Store) -> Result<AnnotationIter<'a>> {
self.get_internal_links()
.map_err(From::from)
.map(|iter| StoreIdIterator::new(Box::new(iter.map(|e| e.get_store_id().clone()))))
.map(|i| AnnotationIter::new(i, store))
}
fn is_annotation(&self) -> Result<bool> {
self.is::<IsAnnotation>().map_err(From::from)
}
}
| annotations | identifier_name |
annotateable.rs | //
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <mail@beyermatthias.de> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use toml::Value;
use libimagstore::store::Entry;
use libimagstore::store::FileLockEntry;
use libimagstore::store::Store;
use libimagstore::storeid::IntoStoreId;
use libimagstore::storeid::StoreIdIterator;
use libimagentrylink::internal::InternalLinker;
use libimagentryutil::isa::Is;
use libimagentryutil::isa::IsKindHeaderPathProvider;
use toml_query::read::TomlValueReadExt;
use toml_query::insert::TomlValueInsertExt;
use error::Result;
use error::AnnotationErrorKind as AEK;
use error::AnnotationError as AE;
use error::ResultExt;
use iter::*;
pub trait Annotateable {
fn annotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<FileLockEntry<'a>>;
fn denotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<Option<FileLockEntry<'a>>>;
fn annotations<'a>(&self, store: &'a Store) -> Result<AnnotationIter<'a>>;
fn is_annotation(&self) -> Result<bool>;
}
provide_kindflag_path!(IsAnnotation, "annotation.is_annotation");
impl Annotateable for Entry {
/// Annotate an entry, returns the new entry which is used to annotate
fn annotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<FileLockEntry<'a>> {
use module_path::ModuleEntryPath;
store.retrieve(ModuleEntryPath::new(ann_name).into_storeid()?)
.map_err(From::from)
.and_then(|mut anno| {
{
let _ = anno.set_isflag::<IsAnnotation>()?;
let _ = anno
.get_header_mut()
.insert("annotation.name", Value::String(String::from(ann_name)))?;
}
Ok(anno)
})
.and_then(|mut anno| {
anno.add_internal_link(self)
.chain_err(|| AEK::LinkingError)
.map(|_| anno)
})
}
/// Checks the current entry for all annotations and removes the one where the name is
/// `ann_name`, which is then returned
fn denotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<Option<FileLockEntry<'a>>> {
for annotation in self.annotations(store)? {
let mut anno = annotation?;
let name = match anno.get_header().read("annotation.name")? {
None => continue,
Some(val) => match *val {
Value::String(ref name) => name.clone(),
_ => return Err(AE::from_kind(AEK::HeaderTypeError)),
},
};
if name == ann_name {
let _ = self.remove_internal_link(&mut anno)?;
return Ok(Some(anno));
}
}
Ok(None)
}
/// Get all annotations of an entry
fn annotations<'a>(&self, store: &'a Store) -> Result<AnnotationIter<'a>> |
fn is_annotation(&self) -> Result<bool> {
self.is::<IsAnnotation>().map_err(From::from)
}
}
| {
self.get_internal_links()
.map_err(From::from)
.map(|iter| StoreIdIterator::new(Box::new(iter.map(|e| e.get_store_id().clone()))))
.map(|i| AnnotationIter::new(i, store))
} | identifier_body |
annotateable.rs | //
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <mail@beyermatthias.de> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use toml::Value;
use libimagstore::store::Entry;
use libimagstore::store::FileLockEntry;
use libimagstore::store::Store;
use libimagstore::storeid::IntoStoreId;
use libimagstore::storeid::StoreIdIterator;
use libimagentrylink::internal::InternalLinker;
use libimagentryutil::isa::Is;
use libimagentryutil::isa::IsKindHeaderPathProvider;
use toml_query::read::TomlValueReadExt;
use toml_query::insert::TomlValueInsertExt;
use error::Result;
use error::AnnotationErrorKind as AEK;
use error::AnnotationError as AE;
use error::ResultExt;
use iter::*;
pub trait Annotateable {
fn annotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<FileLockEntry<'a>>;
fn denotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<Option<FileLockEntry<'a>>>;
fn annotations<'a>(&self, store: &'a Store) -> Result<AnnotationIter<'a>>;
fn is_annotation(&self) -> Result<bool>;
}
provide_kindflag_path!(IsAnnotation, "annotation.is_annotation");
impl Annotateable for Entry {
/// Annotate an entry, returns the new entry which is used to annotate | {
let _ = anno.set_isflag::<IsAnnotation>()?;
let _ = anno
.get_header_mut()
.insert("annotation.name", Value::String(String::from(ann_name)))?;
}
Ok(anno)
})
.and_then(|mut anno| {
anno.add_internal_link(self)
.chain_err(|| AEK::LinkingError)
.map(|_| anno)
})
}
/// Checks the current entry for all annotations and removes the one where the name is
/// `ann_name`, which is then returned
fn denotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<Option<FileLockEntry<'a>>> {
for annotation in self.annotations(store)? {
let mut anno = annotation?;
let name = match anno.get_header().read("annotation.name")? {
None => continue,
Some(val) => match *val {
Value::String(ref name) => name.clone(),
_ => return Err(AE::from_kind(AEK::HeaderTypeError)),
},
};
if name == ann_name {
let _ = self.remove_internal_link(&mut anno)?;
return Ok(Some(anno));
}
}
Ok(None)
}
/// Get all annotations of an entry
fn annotations<'a>(&self, store: &'a Store) -> Result<AnnotationIter<'a>> {
self.get_internal_links()
.map_err(From::from)
.map(|iter| StoreIdIterator::new(Box::new(iter.map(|e| e.get_store_id().clone()))))
.map(|i| AnnotationIter::new(i, store))
}
fn is_annotation(&self) -> Result<bool> {
self.is::<IsAnnotation>().map_err(From::from)
}
} | fn annotate<'a>(&mut self, store: &'a Store, ann_name: &str) -> Result<FileLockEntry<'a>> {
use module_path::ModuleEntryPath;
store.retrieve(ModuleEntryPath::new(ann_name).into_storeid()?)
.map_err(From::from)
.and_then(|mut anno| { | random_line_split |
textdecoder.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::TextDecoderBinding;
use dom::bindings::codegen::Bindings::TextDecoderBinding::TextDecoderMethods;
use dom::bindings::error::{Error, Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::Root;
use dom::bindings::str::USVString;
use dom::bindings::trace::JSTraceable;
use dom::bindings::utils::{Reflector, reflect_dom_object};
use util::str::DOMString;
use encoding::Encoding;
use encoding::types::{EncodingRef, DecoderTrap};
use encoding::label::encoding_from_whatwg_label;
use js::jsapi::{JSContext, JSObject};
use js::jsapi::JS_GetObjectAsArrayBufferView;
use std::borrow::ToOwned;
use std::ptr;
use std::slice;
#[dom_struct]
pub struct TextDecoder {
reflector_: Reflector,
encoding: EncodingRef,
fatal: bool,
}
impl TextDecoder {
fn new_inherited(encoding: EncodingRef, fatal: bool) -> TextDecoder {
TextDecoder {
reflector_: Reflector::new(),
encoding: encoding,
fatal: fatal,
}
}
fn make_range_error() -> Fallible<Root<TextDecoder>> {
Err(Error::Range("The given encoding is not supported.".to_owned()))
}
pub fn new(global: GlobalRef, encoding: EncodingRef, fatal: bool) -> Root<TextDecoder> {
reflect_dom_object(box TextDecoder::new_inherited(encoding, fatal),
global,
TextDecoderBinding::Wrap)
}
/// https://encoding.spec.whatwg.org/#dom-textdecoder
pub fn Constructor(global: GlobalRef,
label: DOMString,
options: &TextDecoderBinding::TextDecoderOptions)
-> Fallible<Root<TextDecoder>> {
let encoding = match encoding_from_whatwg_label(&label) {
None => return TextDecoder::make_range_error(),
Some(enc) => enc
};
// The rust-encoding crate has WHATWG compatibility, so we are
// guaranteed to have a whatwg_name because we successfully got
// the encoding from encoding_from_whatwg_label.
// Use match + panic! instead of unwrap for better error message
match encoding.whatwg_name() {
None => panic!("Label {} fits valid encoding without valid name", label),
Some("replacement") => return TextDecoder::make_range_error(),
_ => ()
};
Ok(TextDecoder::new(global, encoding, options.fatal))
}
}
impl<'a> TextDecoderMethods for &'a TextDecoder {
fn Encoding(self) -> DOMString {
self.encoding.whatwg_name().unwrap().to_owned()
}
fn Fatal(self) -> bool |
#[allow(unsafe_code)]
fn Decode(self, _cx: *mut JSContext, input: Option<*mut JSObject>)
-> Fallible<USVString> {
let input = match input {
Some(input) => input,
None => return Ok(USVString("".to_owned())),
};
let mut length = 0;
let mut data = ptr::null_mut();
if unsafe { JS_GetObjectAsArrayBufferView(input, &mut length, &mut data).is_null() } {
return Err(Error::Type("Argument to TextDecoder.decode is not an ArrayBufferView".to_owned()));
}
let buffer = unsafe {
slice::from_raw_parts(data as *const _, length as usize)
};
let trap = if self.fatal {
DecoderTrap::Strict
} else {
DecoderTrap::Replace
};
match self.encoding.decode(buffer, trap) {
Ok(s) => Ok(USVString(s)),
Err(_) => Err(Error::Type("Decoding failed".to_owned())),
}
}
}
| {
self.fatal
} | identifier_body |
textdecoder.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::TextDecoderBinding;
use dom::bindings::codegen::Bindings::TextDecoderBinding::TextDecoderMethods;
use dom::bindings::error::{Error, Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::Root;
use dom::bindings::str::USVString;
use dom::bindings::trace::JSTraceable;
use dom::bindings::utils::{Reflector, reflect_dom_object};
use util::str::DOMString;
use encoding::Encoding;
use encoding::types::{EncodingRef, DecoderTrap};
use encoding::label::encoding_from_whatwg_label;
use js::jsapi::{JSContext, JSObject};
use js::jsapi::JS_GetObjectAsArrayBufferView;
use std::borrow::ToOwned;
use std::ptr;
use std::slice;
#[dom_struct]
pub struct TextDecoder {
reflector_: Reflector,
encoding: EncodingRef,
fatal: bool,
}
impl TextDecoder {
fn | (encoding: EncodingRef, fatal: bool) -> TextDecoder {
TextDecoder {
reflector_: Reflector::new(),
encoding: encoding,
fatal: fatal,
}
}
fn make_range_error() -> Fallible<Root<TextDecoder>> {
Err(Error::Range("The given encoding is not supported.".to_owned()))
}
pub fn new(global: GlobalRef, encoding: EncodingRef, fatal: bool) -> Root<TextDecoder> {
reflect_dom_object(box TextDecoder::new_inherited(encoding, fatal),
global,
TextDecoderBinding::Wrap)
}
/// https://encoding.spec.whatwg.org/#dom-textdecoder
pub fn Constructor(global: GlobalRef,
label: DOMString,
options: &TextDecoderBinding::TextDecoderOptions)
-> Fallible<Root<TextDecoder>> {
let encoding = match encoding_from_whatwg_label(&label) {
None => return TextDecoder::make_range_error(),
Some(enc) => enc
};
// The rust-encoding crate has WHATWG compatibility, so we are
// guaranteed to have a whatwg_name because we successfully got
// the encoding from encoding_from_whatwg_label.
// Use match + panic! instead of unwrap for better error message
match encoding.whatwg_name() {
None => panic!("Label {} fits valid encoding without valid name", label),
Some("replacement") => return TextDecoder::make_range_error(),
_ => ()
};
Ok(TextDecoder::new(global, encoding, options.fatal))
}
}
impl<'a> TextDecoderMethods for &'a TextDecoder {
fn Encoding(self) -> DOMString {
self.encoding.whatwg_name().unwrap().to_owned()
}
fn Fatal(self) -> bool {
self.fatal
}
#[allow(unsafe_code)]
fn Decode(self, _cx: *mut JSContext, input: Option<*mut JSObject>)
-> Fallible<USVString> {
let input = match input {
Some(input) => input,
None => return Ok(USVString("".to_owned())),
};
let mut length = 0;
let mut data = ptr::null_mut();
if unsafe { JS_GetObjectAsArrayBufferView(input, &mut length, &mut data).is_null() } {
return Err(Error::Type("Argument to TextDecoder.decode is not an ArrayBufferView".to_owned()));
}
let buffer = unsafe {
slice::from_raw_parts(data as *const _, length as usize)
};
let trap = if self.fatal {
DecoderTrap::Strict
} else {
DecoderTrap::Replace
};
match self.encoding.decode(buffer, trap) {
Ok(s) => Ok(USVString(s)),
Err(_) => Err(Error::Type("Decoding failed".to_owned())),
}
}
}
| new_inherited | identifier_name |
textdecoder.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::TextDecoderBinding;
use dom::bindings::codegen::Bindings::TextDecoderBinding::TextDecoderMethods;
use dom::bindings::error::{Error, Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::Root;
use dom::bindings::str::USVString;
use dom::bindings::trace::JSTraceable;
use dom::bindings::utils::{Reflector, reflect_dom_object};
use util::str::DOMString;
use encoding::Encoding;
use encoding::types::{EncodingRef, DecoderTrap};
use encoding::label::encoding_from_whatwg_label;
use js::jsapi::{JSContext, JSObject};
use js::jsapi::JS_GetObjectAsArrayBufferView;
use std::borrow::ToOwned;
use std::ptr;
use std::slice;
#[dom_struct]
pub struct TextDecoder {
reflector_: Reflector,
encoding: EncodingRef,
fatal: bool,
}
impl TextDecoder {
fn new_inherited(encoding: EncodingRef, fatal: bool) -> TextDecoder {
TextDecoder {
reflector_: Reflector::new(),
encoding: encoding,
fatal: fatal,
}
}
fn make_range_error() -> Fallible<Root<TextDecoder>> {
Err(Error::Range("The given encoding is not supported.".to_owned()))
}
pub fn new(global: GlobalRef, encoding: EncodingRef, fatal: bool) -> Root<TextDecoder> {
reflect_dom_object(box TextDecoder::new_inherited(encoding, fatal),
global,
TextDecoderBinding::Wrap)
}
/// https://encoding.spec.whatwg.org/#dom-textdecoder
pub fn Constructor(global: GlobalRef,
label: DOMString,
options: &TextDecoderBinding::TextDecoderOptions)
-> Fallible<Root<TextDecoder>> {
let encoding = match encoding_from_whatwg_label(&label) {
None => return TextDecoder::make_range_error(),
Some(enc) => enc
};
// The rust-encoding crate has WHATWG compatibility, so we are
// guaranteed to have a whatwg_name because we successfully got
// the encoding from encoding_from_whatwg_label.
// Use match + panic! instead of unwrap for better error message
match encoding.whatwg_name() {
None => panic!("Label {} fits valid encoding without valid name", label),
Some("replacement") => return TextDecoder::make_range_error(),
_ => ()
};
Ok(TextDecoder::new(global, encoding, options.fatal))
}
}
impl<'a> TextDecoderMethods for &'a TextDecoder {
fn Encoding(self) -> DOMString {
self.encoding.whatwg_name().unwrap().to_owned()
}
fn Fatal(self) -> bool {
self.fatal
}
#[allow(unsafe_code)]
fn Decode(self, _cx: *mut JSContext, input: Option<*mut JSObject>)
-> Fallible<USVString> {
let input = match input {
Some(input) => input,
None => return Ok(USVString("".to_owned())),
};
let mut length = 0;
let mut data = ptr::null_mut();
if unsafe { JS_GetObjectAsArrayBufferView(input, &mut length, &mut data).is_null() } {
return Err(Error::Type("Argument to TextDecoder.decode is not an ArrayBufferView".to_owned()));
}
let buffer = unsafe {
slice::from_raw_parts(data as *const _, length as usize)
};
let trap = if self.fatal {
DecoderTrap::Strict
} else {
DecoderTrap::Replace
}; | }
} | match self.encoding.decode(buffer, trap) {
Ok(s) => Ok(USVString(s)),
Err(_) => Err(Error::Type("Decoding failed".to_owned())),
} | random_line_split |
cap-clause-move.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::ptr;
pub fn main() | {
let x = ~3;
let y = ptr::to_unsafe_ptr(&(*x)) as uint;
let snd_move: ~fn() -> uint = || ptr::to_unsafe_ptr(&(*x)) as uint;
assert_eq!(snd_move(), y);
let x = ~4;
let y = ptr::to_unsafe_ptr(&(*x)) as uint;
let lam_move: ~fn() -> uint = || ptr::to_unsafe_ptr(&(*x)) as uint;
assert_eq!(lam_move(), y);
} | identifier_body | |
cap-clause-move.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::ptr;
pub fn | () {
let x = ~3;
let y = ptr::to_unsafe_ptr(&(*x)) as uint;
let snd_move: ~fn() -> uint = || ptr::to_unsafe_ptr(&(*x)) as uint;
assert_eq!(snd_move(), y);
let x = ~4;
let y = ptr::to_unsafe_ptr(&(*x)) as uint;
let lam_move: ~fn() -> uint = || ptr::to_unsafe_ptr(&(*x)) as uint;
assert_eq!(lam_move(), y);
}
| main | identifier_name |
cap-clause-move.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::ptr;
pub fn main() {
let x = ~3;
let y = ptr::to_unsafe_ptr(&(*x)) as uint;
let snd_move: ~fn() -> uint = || ptr::to_unsafe_ptr(&(*x)) as uint;
assert_eq!(snd_move(), y);
let x = ~4;
let y = ptr::to_unsafe_ptr(&(*x)) as uint;
let lam_move: ~fn() -> uint = || ptr::to_unsafe_ptr(&(*x)) as uint;
assert_eq!(lam_move(), y);
} | random_line_split | |
account.rs | // droplet_limit number The total number of droplets the user may have
// email string The email the user has registered for Digital
// Ocean with
// uuid string The universal identifier for this user
// email_verified boolean If true, the user has verified their account
// via email. False otherwise.
use std::fmt;
use std::borrow::Cow;
use response::NamedResponse;
use response::NotArray;
#[derive(Deserialize, Debug)]
pub struct Account {
/// droplet_limit is a "number" in json, which could be a float, even thought that's not a
/// reasonable value for a droplet limit, neither is a negative number | pub uuid: String,
pub email_verified: bool,
}
impl NotArray for Account {}
impl fmt::Display for Account {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"Email: {}\n\
Droplet Limit: {:.0}\n\
UUID: {}\n\
E-Mail Verified: {}",
self.email,
self.droplet_limit,
self.uuid,
self.email_verified)
}
}
impl NamedResponse for Account {
fn name<'a>() -> Cow<'a, str> { "account".into() }
}
// TODO: Implement response headers:
// content-type: application/json; charset=utf-8
// status: 200 OK
// ratelimit-limit: 1200
// ratelimit-remaining: 1137
// ratelimit-reset: 1415984218 | pub droplet_limit: f64,
pub email: String, | random_line_split |
account.rs | // droplet_limit number The total number of droplets the user may have
// email string The email the user has registered for Digital
// Ocean with
// uuid string The universal identifier for this user
// email_verified boolean If true, the user has verified their account
// via email. False otherwise.
use std::fmt;
use std::borrow::Cow;
use response::NamedResponse;
use response::NotArray;
#[derive(Deserialize, Debug)]
pub struct Account {
/// droplet_limit is a "number" in json, which could be a float, even thought that's not a
/// reasonable value for a droplet limit, neither is a negative number
pub droplet_limit: f64,
pub email: String,
pub uuid: String,
pub email_verified: bool,
}
impl NotArray for Account {}
impl fmt::Display for Account {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"Email: {}\n\
Droplet Limit: {:.0}\n\
UUID: {}\n\
E-Mail Verified: {}",
self.email,
self.droplet_limit,
self.uuid,
self.email_verified)
}
}
impl NamedResponse for Account {
fn name<'a>() -> Cow<'a, str> |
}
// TODO: Implement response headers:
// content-type: application/json; charset=utf-8
// status: 200 OK
// ratelimit-limit: 1200
// ratelimit-remaining: 1137
// ratelimit-reset: 1415984218
| { "account".into() } | identifier_body |
account.rs | // droplet_limit number The total number of droplets the user may have
// email string The email the user has registered for Digital
// Ocean with
// uuid string The universal identifier for this user
// email_verified boolean If true, the user has verified their account
// via email. False otherwise.
use std::fmt;
use std::borrow::Cow;
use response::NamedResponse;
use response::NotArray;
#[derive(Deserialize, Debug)]
pub struct Account {
/// droplet_limit is a "number" in json, which could be a float, even thought that's not a
/// reasonable value for a droplet limit, neither is a negative number
pub droplet_limit: f64,
pub email: String,
pub uuid: String,
pub email_verified: bool,
}
impl NotArray for Account {}
impl fmt::Display for Account {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"Email: {}\n\
Droplet Limit: {:.0}\n\
UUID: {}\n\
E-Mail Verified: {}",
self.email,
self.droplet_limit,
self.uuid,
self.email_verified)
}
}
impl NamedResponse for Account {
fn | <'a>() -> Cow<'a, str> { "account".into() }
}
// TODO: Implement response headers:
// content-type: application/json; charset=utf-8
// status: 200 OK
// ratelimit-limit: 1200
// ratelimit-remaining: 1137
// ratelimit-reset: 1415984218
| name | identifier_name |
state_script.rs | use crate::state::*;
use std;
use std::process::Command;
struct StateScript {
script_path: String,
shared_state: SharedState,
state_observer: StateObserver,
}
impl StateScript {
fn new(script_path: &str, shared_state: SharedState) -> StateScript {
let state_observer = shared_state.lock().add_observer();
StateScript {
script_path: String::from(script_path),
shared_state,
state_observer,
}
}
fn run_script(&self, state: StreamState, output: &str) |
fn run(&mut self) {
let mut stream_state;
let mut output_name: String;
{
let state = self.shared_state.lock();
output_name = state.current_output().name.clone();
stream_state = state.state().stream_state;
};
self.run_script(stream_state, output_name.as_str());
loop {
match self.state_observer.recv() {
Ok(StateChange::SelectOutput { output }) => {
output_name = self.shared_state.lock().state().outputs[output]
.name
.clone();
}
Ok(StateChange::SetStreamState {
stream_state: new_stream_state,
}) => {
stream_state = new_stream_state;
}
Ok(_) => continue,
Err(_) => return,
};
self.run_script(stream_state, output_name.as_str());
}
}
}
pub fn start_state_script_contoller(script_path: &str, shared_state: SharedState) {
let mut c = StateScript::new(script_path, shared_state);
std::thread::spawn(move || {
c.run();
});
}
| {
let result = Command::new(&self.script_path)
.arg(state.as_str())
.arg(output)
.status();
match result {
Ok(status) => {
if !status.success() {
println!(
"ERROR: {} {} failed with error code {}",
self.script_path,
state.as_str(),
status.code().unwrap_or(0)
);
}
}
Err(e) => println!("ERROR: Failed to run {}: {}", self.script_path, e),
}
} | identifier_body |
state_script.rs | use crate::state::*;
use std;
use std::process::Command;
struct StateScript {
script_path: String,
shared_state: SharedState,
state_observer: StateObserver,
}
impl StateScript {
fn | (script_path: &str, shared_state: SharedState) -> StateScript {
let state_observer = shared_state.lock().add_observer();
StateScript {
script_path: String::from(script_path),
shared_state,
state_observer,
}
}
fn run_script(&self, state: StreamState, output: &str) {
let result = Command::new(&self.script_path)
.arg(state.as_str())
.arg(output)
.status();
match result {
Ok(status) => {
if!status.success() {
println!(
"ERROR: {} {} failed with error code {}",
self.script_path,
state.as_str(),
status.code().unwrap_or(0)
);
}
}
Err(e) => println!("ERROR: Failed to run {}: {}", self.script_path, e),
}
}
fn run(&mut self) {
let mut stream_state;
let mut output_name: String;
{
let state = self.shared_state.lock();
output_name = state.current_output().name.clone();
stream_state = state.state().stream_state;
};
self.run_script(stream_state, output_name.as_str());
loop {
match self.state_observer.recv() {
Ok(StateChange::SelectOutput { output }) => {
output_name = self.shared_state.lock().state().outputs[output]
.name
.clone();
}
Ok(StateChange::SetStreamState {
stream_state: new_stream_state,
}) => {
stream_state = new_stream_state;
}
Ok(_) => continue,
Err(_) => return,
};
self.run_script(stream_state, output_name.as_str());
}
}
}
pub fn start_state_script_contoller(script_path: &str, shared_state: SharedState) {
let mut c = StateScript::new(script_path, shared_state);
std::thread::spawn(move || {
c.run();
});
}
| new | identifier_name |
state_script.rs | use crate::state::*;
use std;
use std::process::Command;
struct StateScript {
script_path: String,
shared_state: SharedState,
state_observer: StateObserver,
}
impl StateScript {
fn new(script_path: &str, shared_state: SharedState) -> StateScript {
let state_observer = shared_state.lock().add_observer();
StateScript {
script_path: String::from(script_path),
shared_state,
state_observer,
}
}
fn run_script(&self, state: StreamState, output: &str) {
let result = Command::new(&self.script_path)
.arg(state.as_str())
.arg(output)
.status();
match result {
Ok(status) => {
if!status.success() {
println!( | state.as_str(),
status.code().unwrap_or(0)
);
}
}
Err(e) => println!("ERROR: Failed to run {}: {}", self.script_path, e),
}
}
fn run(&mut self) {
let mut stream_state;
let mut output_name: String;
{
let state = self.shared_state.lock();
output_name = state.current_output().name.clone();
stream_state = state.state().stream_state;
};
self.run_script(stream_state, output_name.as_str());
loop {
match self.state_observer.recv() {
Ok(StateChange::SelectOutput { output }) => {
output_name = self.shared_state.lock().state().outputs[output]
.name
.clone();
}
Ok(StateChange::SetStreamState {
stream_state: new_stream_state,
}) => {
stream_state = new_stream_state;
}
Ok(_) => continue,
Err(_) => return,
};
self.run_script(stream_state, output_name.as_str());
}
}
}
pub fn start_state_script_contoller(script_path: &str, shared_state: SharedState) {
let mut c = StateScript::new(script_path, shared_state);
std::thread::spawn(move || {
c.run();
});
} | "ERROR: {} {} failed with error code {}",
self.script_path, | random_line_split |
remote.rs | use std::cell::{RefCell, Ref, Cell};
use std::io::SeekFrom;
use std::io::prelude::*;
use std::mem;
use std::path::Path;
use curl::easy::{Easy, List};
use git2;
use hex::ToHex;
use serde_json;
use url::Url;
use core::{PackageId, SourceId};
use ops;
use sources::git;
use sources::registry::{RegistryData, RegistryConfig, INDEX_LOCK};
use util::network;
use util::{FileLock, Filesystem, LazyCell};
use util::{Config, Sha256, ToUrl};
use util::errors::{CargoErrorKind, CargoResult, CargoResultExt};
pub struct RemoteRegistry<'cfg> {
index_path: Filesystem,
cache_path: Filesystem,
source_id: SourceId,
config: &'cfg Config,
handle: LazyCell<RefCell<Easy>>,
tree: RefCell<Option<git2::Tree<'static>>>,
repo: LazyCell<git2::Repository>,
head: Cell<Option<git2::Oid>>,
}
impl<'cfg> RemoteRegistry<'cfg> {
pub fn new(source_id: &SourceId, config: &'cfg Config, name: &str)
-> RemoteRegistry<'cfg> {
RemoteRegistry {
index_path: config.registry_index_path().join(name),
cache_path: config.registry_cache_path().join(name),
source_id: source_id.clone(),
config: config,
tree: RefCell::new(None),
handle: LazyCell::new(),
repo: LazyCell::new(),
head: Cell::new(None),
}
}
fn easy(&self) -> CargoResult<&RefCell<Easy>> {
self.handle.get_or_try_init(|| {
ops::http_handle(self.config).map(RefCell::new)
})
}
fn | (&self) -> CargoResult<&git2::Repository> {
self.repo.get_or_try_init(|| {
let path = self.index_path.clone().into_path_unlocked();
// Fast path without a lock
if let Ok(repo) = git2::Repository::open(&path) {
return Ok(repo)
}
// Ok, now we need to lock and try the whole thing over again.
let lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
match git2::Repository::open(&path) {
Ok(repo) => Ok(repo),
Err(_) => {
let _ = lock.remove_siblings();
// Note that we'd actually prefer to use a bare repository
// here as we're not actually going to check anything out.
// All versions of Cargo, though, share the same CARGO_HOME,
// so for compatibility with older Cargo which *does* do
// checkouts we make sure to initialize a new full
// repository (not a bare one).
//
// We should change this to `init_bare` whenever we feel
// like enough time has passed or if we change the directory
// that the folder is located in, such as by changing the
// hash at the end of the directory.
Ok(git2::Repository::init(&path)?)
}
}
})
}
fn head(&self) -> CargoResult<git2::Oid> {
if self.head.get().is_none() {
let oid = self.repo()?.refname_to_id("refs/remotes/origin/master")?;
self.head.set(Some(oid));
}
Ok(self.head.get().unwrap())
}
fn tree(&self) -> CargoResult<Ref<git2::Tree>> {
{
let tree = self.tree.borrow();
if tree.is_some() {
return Ok(Ref::map(tree, |s| s.as_ref().unwrap()))
}
}
let repo = self.repo()?;
let commit = repo.find_commit(self.head()?)?;
let tree = commit.tree()?;
// Unfortunately in libgit2 the tree objects look like they've got a
// reference to the repository object which means that a tree cannot
// outlive the repository that it came from. Here we want to cache this
// tree, though, so to accomplish this we transmute it to a static
// lifetime.
//
// Note that we don't actually hand out the static lifetime, instead we
// only return a scoped one from this function. Additionally the repo
// we loaded from (above) lives as long as this object
// (`RemoteRegistry`) so we then just need to ensure that the tree is
// destroyed first in the destructor, hence the destructor on
// `RemoteRegistry` below.
let tree = unsafe {
mem::transmute::<git2::Tree, git2::Tree<'static>>(tree)
};
*self.tree.borrow_mut() = Some(tree);
Ok(Ref::map(self.tree.borrow(), |s| s.as_ref().unwrap()))
}
}
impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
fn index_path(&self) -> &Filesystem {
&self.index_path
}
fn load(&self,
_root: &Path,
path: &Path,
data: &mut FnMut(&[u8]) -> CargoResult<()>) -> CargoResult<()> {
// Note that the index calls this method and the filesystem is locked
// in the index, so we don't need to worry about an `update_index`
// happening in a different process.
let repo = self.repo()?;
let tree = self.tree()?;
let entry = tree.get_path(path)?;
let object = entry.to_object(&repo)?;
let blob = match object.as_blob() {
Some(blob) => blob,
None => bail!("path `{}` is not a blob in the git repo", path.display()),
};
data(blob.content())
}
fn config(&mut self) -> CargoResult<Option<RegistryConfig>> {
self.repo()?; // create intermediate dirs and initialize the repo
let _lock = self.index_path.open_ro(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
let mut config = None;
self.load(Path::new(""), Path::new("config.json"), &mut |json| {
config = Some(serde_json::from_slice(&json)?);
Ok(())
})?;
Ok(config)
}
fn update_index(&mut self) -> CargoResult<()> {
// Ensure that we'll actually be able to acquire an HTTP handle later on
// once we start trying to download crates. This will weed out any
// problems with `.cargo/config` configuration related to HTTP.
//
// This way if there's a problem the error gets printed before we even
// hit the index, which may not actually read this configuration.
ops::http_handle(self.config)?;
let repo = self.repo()?;
let _lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
self.config.shell().status("Updating",
format!("registry `{}`", self.source_id.url()))?;
let mut needs_fetch = true;
if self.source_id.url().host_str() == Some("github.com") {
if let Ok(oid) = self.head() {
let mut handle = self.easy()?.borrow_mut();
debug!("attempting github fast path for {}",
self.source_id.url());
if github_up_to_date(&mut handle, self.source_id.url(), &oid) {
needs_fetch = false;
} else {
debug!("fast path failed, falling back to a git fetch");
}
}
}
if needs_fetch {
// git fetch origin master
let url = self.source_id.url().to_string();
let refspec = "refs/heads/master:refs/remotes/origin/master";
git::fetch(&repo, &url, refspec, self.config).chain_err(|| {
format!("failed to fetch `{}`", url)
})?;
}
self.head.set(None);
*self.tree.borrow_mut() = None;
Ok(())
}
fn download(&mut self, pkg: &PackageId, checksum: &str)
-> CargoResult<FileLock> {
let filename = format!("{}-{}.crate", pkg.name(), pkg.version());
let path = Path::new(&filename);
// Attempt to open an read-only copy first to avoid an exclusive write
// lock and also work with read-only filesystems. Note that we check the
// length of the file like below to handle interrupted downloads.
//
// If this fails then we fall through to the exclusive path where we may
// have to redownload the file.
if let Ok(dst) = self.cache_path.open_ro(path, self.config, &filename) {
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
}
let mut dst = self.cache_path.open_rw(path, self.config, &filename)?;
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
self.config.shell().status("Downloading", pkg)?;
let config = self.config()?.unwrap();
let mut url = config.dl.to_url()?;
url.path_segments_mut().unwrap()
.push(pkg.name())
.push(&pkg.version().to_string())
.push("download");
// TODO: don't download into memory, but ensure that if we ctrl-c a
// download we should resume either from the start or the middle
// on the next time
let url = url.to_string();
let mut handle = self.easy()?.borrow_mut();
handle.get(true)?;
handle.url(&url)?;
handle.follow_location(true)?;
let mut state = Sha256::new();
let mut body = Vec::new();
network::with_retry(self.config, || {
state = Sha256::new();
body = Vec::new();
{
let mut handle = handle.transfer();
handle.write_function(|buf| {
state.update(buf);
body.extend_from_slice(buf);
Ok(buf.len())
})?;
handle.perform()?;
}
let code = handle.response_code()?;
if code!= 200 && code!= 0 {
let url = handle.effective_url()?.unwrap_or(&url);
Err(CargoErrorKind::HttpNot200(code, url.to_string()).into())
} else {
Ok(())
}
})?;
// Verify what we just downloaded
if state.finish().to_hex()!= checksum {
bail!("failed to verify the checksum of `{}`", pkg)
}
dst.write_all(&body)?;
dst.seek(SeekFrom::Start(0))?;
Ok(dst)
}
}
impl<'cfg> Drop for RemoteRegistry<'cfg> {
fn drop(&mut self) {
// Just be sure to drop this before our other fields
self.tree.borrow_mut().take();
}
}
/// Updating the index is done pretty regularly so we want it to be as fast as
/// possible. For registries hosted on github (like the crates.io index) there's
/// a fast path available to use [1] to tell us that there's no updates to be
/// made.
///
/// This function will attempt to hit that fast path and verify that the `oid`
/// is actually the current `master` branch of the repository. If `true` is
/// returned then no update needs to be performed, but if `false` is returned
/// then the standard update logic still needs to happen.
///
/// [1]: https://developer.github.com/v3/repos/commits/#get-the-sha-1-of-a-commit-reference
///
/// Note that this function should never cause an actual failure because it's
/// just a fast path. As a result all errors are ignored in this function and we
/// just return a `bool`. Any real errors will be reported through the normal
/// update path above.
fn github_up_to_date(handle: &mut Easy, url: &Url, oid: &git2::Oid) -> bool {
macro_rules! try {
($e:expr) => (match $e {
Some(e) => e,
None => return false,
})
}
// This expects github urls in the form `github.com/user/repo` and nothing
// else
let mut pieces = try!(url.path_segments());
let username = try!(pieces.next());
let repo = try!(pieces.next());
if pieces.next().is_some() {
return false
}
let url = format!("https://api.github.com/repos/{}/{}/commits/master",
username, repo);
try!(handle.get(true).ok());
try!(handle.url(&url).ok());
try!(handle.useragent("cargo").ok());
let mut headers = List::new();
try!(headers.append("Accept: application/vnd.github.3.sha").ok());
try!(headers.append(&format!("If-None-Match: \"{}\"", oid)).ok());
try!(handle.http_headers(headers).ok());
try!(handle.perform().ok());
try!(handle.response_code().ok()) == 304
}
| repo | identifier_name |
remote.rs | use std::cell::{RefCell, Ref, Cell};
use std::io::SeekFrom;
use std::io::prelude::*;
use std::mem;
use std::path::Path;
use curl::easy::{Easy, List};
use git2;
use hex::ToHex;
use serde_json;
use url::Url;
use core::{PackageId, SourceId};
use ops;
use sources::git;
use sources::registry::{RegistryData, RegistryConfig, INDEX_LOCK};
use util::network;
use util::{FileLock, Filesystem, LazyCell};
use util::{Config, Sha256, ToUrl};
use util::errors::{CargoErrorKind, CargoResult, CargoResultExt};
pub struct RemoteRegistry<'cfg> {
index_path: Filesystem,
cache_path: Filesystem,
source_id: SourceId,
config: &'cfg Config,
handle: LazyCell<RefCell<Easy>>,
tree: RefCell<Option<git2::Tree<'static>>>,
repo: LazyCell<git2::Repository>,
head: Cell<Option<git2::Oid>>,
}
impl<'cfg> RemoteRegistry<'cfg> {
pub fn new(source_id: &SourceId, config: &'cfg Config, name: &str)
-> RemoteRegistry<'cfg> {
RemoteRegistry {
index_path: config.registry_index_path().join(name),
cache_path: config.registry_cache_path().join(name),
source_id: source_id.clone(),
config: config,
tree: RefCell::new(None),
handle: LazyCell::new(),
repo: LazyCell::new(),
head: Cell::new(None),
}
}
fn easy(&self) -> CargoResult<&RefCell<Easy>> {
self.handle.get_or_try_init(|| {
ops::http_handle(self.config).map(RefCell::new)
})
}
fn repo(&self) -> CargoResult<&git2::Repository> {
self.repo.get_or_try_init(|| {
let path = self.index_path.clone().into_path_unlocked();
// Fast path without a lock
if let Ok(repo) = git2::Repository::open(&path) {
return Ok(repo)
}
// Ok, now we need to lock and try the whole thing over again.
let lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
match git2::Repository::open(&path) {
Ok(repo) => Ok(repo),
Err(_) => {
let _ = lock.remove_siblings();
// Note that we'd actually prefer to use a bare repository
// here as we're not actually going to check anything out.
// All versions of Cargo, though, share the same CARGO_HOME,
// so for compatibility with older Cargo which *does* do
// checkouts we make sure to initialize a new full
// repository (not a bare one).
//
// We should change this to `init_bare` whenever we feel
// like enough time has passed or if we change the directory
// that the folder is located in, such as by changing the
// hash at the end of the directory.
Ok(git2::Repository::init(&path)?)
}
}
})
}
fn head(&self) -> CargoResult<git2::Oid> {
if self.head.get().is_none() {
let oid = self.repo()?.refname_to_id("refs/remotes/origin/master")?;
self.head.set(Some(oid));
}
Ok(self.head.get().unwrap())
}
fn tree(&self) -> CargoResult<Ref<git2::Tree>> {
{
let tree = self.tree.borrow();
if tree.is_some() {
return Ok(Ref::map(tree, |s| s.as_ref().unwrap()))
}
}
let repo = self.repo()?;
let commit = repo.find_commit(self.head()?)?;
let tree = commit.tree()?;
// Unfortunately in libgit2 the tree objects look like they've got a
// reference to the repository object which means that a tree cannot
// outlive the repository that it came from. Here we want to cache this
// tree, though, so to accomplish this we transmute it to a static
// lifetime.
//
// Note that we don't actually hand out the static lifetime, instead we
// only return a scoped one from this function. Additionally the repo
// we loaded from (above) lives as long as this object
// (`RemoteRegistry`) so we then just need to ensure that the tree is
// destroyed first in the destructor, hence the destructor on
// `RemoteRegistry` below.
let tree = unsafe {
mem::transmute::<git2::Tree, git2::Tree<'static>>(tree)
};
*self.tree.borrow_mut() = Some(tree);
Ok(Ref::map(self.tree.borrow(), |s| s.as_ref().unwrap()))
}
}
impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
fn index_path(&self) -> &Filesystem {
&self.index_path
}
fn load(&self,
_root: &Path,
path: &Path,
data: &mut FnMut(&[u8]) -> CargoResult<()>) -> CargoResult<()> {
// Note that the index calls this method and the filesystem is locked
// in the index, so we don't need to worry about an `update_index`
// happening in a different process.
let repo = self.repo()?;
let tree = self.tree()?;
let entry = tree.get_path(path)?;
let object = entry.to_object(&repo)?;
let blob = match object.as_blob() {
Some(blob) => blob,
None => bail!("path `{}` is not a blob in the git repo", path.display()),
};
data(blob.content())
}
fn config(&mut self) -> CargoResult<Option<RegistryConfig>> {
self.repo()?; // create intermediate dirs and initialize the repo
let _lock = self.index_path.open_ro(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
let mut config = None;
self.load(Path::new(""), Path::new("config.json"), &mut |json| {
config = Some(serde_json::from_slice(&json)?);
Ok(())
})?;
Ok(config)
}
fn update_index(&mut self) -> CargoResult<()> | debug!("attempting github fast path for {}",
self.source_id.url());
if github_up_to_date(&mut handle, self.source_id.url(), &oid) {
needs_fetch = false;
} else {
debug!("fast path failed, falling back to a git fetch");
}
}
}
if needs_fetch {
// git fetch origin master
let url = self.source_id.url().to_string();
let refspec = "refs/heads/master:refs/remotes/origin/master";
git::fetch(&repo, &url, refspec, self.config).chain_err(|| {
format!("failed to fetch `{}`", url)
})?;
}
self.head.set(None);
*self.tree.borrow_mut() = None;
Ok(())
}
fn download(&mut self, pkg: &PackageId, checksum: &str)
-> CargoResult<FileLock> {
let filename = format!("{}-{}.crate", pkg.name(), pkg.version());
let path = Path::new(&filename);
// Attempt to open an read-only copy first to avoid an exclusive write
// lock and also work with read-only filesystems. Note that we check the
// length of the file like below to handle interrupted downloads.
//
// If this fails then we fall through to the exclusive path where we may
// have to redownload the file.
if let Ok(dst) = self.cache_path.open_ro(path, self.config, &filename) {
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
}
let mut dst = self.cache_path.open_rw(path, self.config, &filename)?;
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
self.config.shell().status("Downloading", pkg)?;
let config = self.config()?.unwrap();
let mut url = config.dl.to_url()?;
url.path_segments_mut().unwrap()
.push(pkg.name())
.push(&pkg.version().to_string())
.push("download");
// TODO: don't download into memory, but ensure that if we ctrl-c a
// download we should resume either from the start or the middle
// on the next time
let url = url.to_string();
let mut handle = self.easy()?.borrow_mut();
handle.get(true)?;
handle.url(&url)?;
handle.follow_location(true)?;
let mut state = Sha256::new();
let mut body = Vec::new();
network::with_retry(self.config, || {
state = Sha256::new();
body = Vec::new();
{
let mut handle = handle.transfer();
handle.write_function(|buf| {
state.update(buf);
body.extend_from_slice(buf);
Ok(buf.len())
})?;
handle.perform()?;
}
let code = handle.response_code()?;
if code!= 200 && code!= 0 {
let url = handle.effective_url()?.unwrap_or(&url);
Err(CargoErrorKind::HttpNot200(code, url.to_string()).into())
} else {
Ok(())
}
})?;
// Verify what we just downloaded
if state.finish().to_hex()!= checksum {
bail!("failed to verify the checksum of `{}`", pkg)
}
dst.write_all(&body)?;
dst.seek(SeekFrom::Start(0))?;
Ok(dst)
}
}
impl<'cfg> Drop for RemoteRegistry<'cfg> {
fn drop(&mut self) {
// Just be sure to drop this before our other fields
self.tree.borrow_mut().take();
}
}
/// Updating the index is done pretty regularly so we want it to be as fast as
/// possible. For registries hosted on github (like the crates.io index) there's
/// a fast path available to use [1] to tell us that there's no updates to be
/// made.
///
/// This function will attempt to hit that fast path and verify that the `oid`
/// is actually the current `master` branch of the repository. If `true` is
/// returned then no update needs to be performed, but if `false` is returned
/// then the standard update logic still needs to happen.
///
/// [1]: https://developer.github.com/v3/repos/commits/#get-the-sha-1-of-a-commit-reference
///
/// Note that this function should never cause an actual failure because it's
/// just a fast path. As a result all errors are ignored in this function and we
/// just return a `bool`. Any real errors will be reported through the normal
/// update path above.
fn github_up_to_date(handle: &mut Easy, url: &Url, oid: &git2::Oid) -> bool {
macro_rules! try {
($e:expr) => (match $e {
Some(e) => e,
None => return false,
})
}
// This expects github urls in the form `github.com/user/repo` and nothing
// else
let mut pieces = try!(url.path_segments());
let username = try!(pieces.next());
let repo = try!(pieces.next());
if pieces.next().is_some() {
return false
}
let url = format!("https://api.github.com/repos/{}/{}/commits/master",
username, repo);
try!(handle.get(true).ok());
try!(handle.url(&url).ok());
try!(handle.useragent("cargo").ok());
let mut headers = List::new();
try!(headers.append("Accept: application/vnd.github.3.sha").ok());
try!(headers.append(&format!("If-None-Match: \"{}\"", oid)).ok());
try!(handle.http_headers(headers).ok());
try!(handle.perform().ok());
try!(handle.response_code().ok()) == 304
}
| {
// Ensure that we'll actually be able to acquire an HTTP handle later on
// once we start trying to download crates. This will weed out any
// problems with `.cargo/config` configuration related to HTTP.
//
// This way if there's a problem the error gets printed before we even
// hit the index, which may not actually read this configuration.
ops::http_handle(self.config)?;
let repo = self.repo()?;
let _lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
self.config.shell().status("Updating",
format!("registry `{}`", self.source_id.url()))?;
let mut needs_fetch = true;
if self.source_id.url().host_str() == Some("github.com") {
if let Ok(oid) = self.head() {
let mut handle = self.easy()?.borrow_mut(); | identifier_body |
remote.rs | use std::cell::{RefCell, Ref, Cell};
use std::io::SeekFrom;
use std::io::prelude::*;
use std::mem;
use std::path::Path;
use curl::easy::{Easy, List};
use git2;
use hex::ToHex;
use serde_json;
use url::Url;
use core::{PackageId, SourceId};
use ops;
use sources::git;
use sources::registry::{RegistryData, RegistryConfig, INDEX_LOCK};
use util::network;
use util::{FileLock, Filesystem, LazyCell};
use util::{Config, Sha256, ToUrl};
use util::errors::{CargoErrorKind, CargoResult, CargoResultExt};
pub struct RemoteRegistry<'cfg> {
index_path: Filesystem,
cache_path: Filesystem,
source_id: SourceId,
config: &'cfg Config,
handle: LazyCell<RefCell<Easy>>,
tree: RefCell<Option<git2::Tree<'static>>>,
repo: LazyCell<git2::Repository>,
head: Cell<Option<git2::Oid>>,
}
impl<'cfg> RemoteRegistry<'cfg> {
pub fn new(source_id: &SourceId, config: &'cfg Config, name: &str)
-> RemoteRegistry<'cfg> {
RemoteRegistry {
index_path: config.registry_index_path().join(name),
cache_path: config.registry_cache_path().join(name),
source_id: source_id.clone(),
config: config,
tree: RefCell::new(None),
handle: LazyCell::new(),
repo: LazyCell::new(),
head: Cell::new(None),
}
}
fn easy(&self) -> CargoResult<&RefCell<Easy>> {
self.handle.get_or_try_init(|| {
ops::http_handle(self.config).map(RefCell::new)
})
}
fn repo(&self) -> CargoResult<&git2::Repository> {
self.repo.get_or_try_init(|| {
let path = self.index_path.clone().into_path_unlocked();
// Fast path without a lock
if let Ok(repo) = git2::Repository::open(&path) {
return Ok(repo)
}
// Ok, now we need to lock and try the whole thing over again.
let lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
match git2::Repository::open(&path) {
Ok(repo) => Ok(repo),
Err(_) => {
let _ = lock.remove_siblings();
// Note that we'd actually prefer to use a bare repository
// here as we're not actually going to check anything out.
// All versions of Cargo, though, share the same CARGO_HOME,
// so for compatibility with older Cargo which *does* do
// checkouts we make sure to initialize a new full
// repository (not a bare one).
//
// We should change this to `init_bare` whenever we feel
// like enough time has passed or if we change the directory
// that the folder is located in, such as by changing the
// hash at the end of the directory.
Ok(git2::Repository::init(&path)?)
}
}
})
}
fn head(&self) -> CargoResult<git2::Oid> {
if self.head.get().is_none() {
let oid = self.repo()?.refname_to_id("refs/remotes/origin/master")?;
self.head.set(Some(oid));
}
Ok(self.head.get().unwrap())
}
fn tree(&self) -> CargoResult<Ref<git2::Tree>> {
{
let tree = self.tree.borrow();
if tree.is_some() {
return Ok(Ref::map(tree, |s| s.as_ref().unwrap()))
}
}
let repo = self.repo()?;
let commit = repo.find_commit(self.head()?)?;
let tree = commit.tree()?;
// Unfortunately in libgit2 the tree objects look like they've got a
// reference to the repository object which means that a tree cannot
// outlive the repository that it came from. Here we want to cache this
// tree, though, so to accomplish this we transmute it to a static
// lifetime.
//
// Note that we don't actually hand out the static lifetime, instead we
// only return a scoped one from this function. Additionally the repo
// we loaded from (above) lives as long as this object
// (`RemoteRegistry`) so we then just need to ensure that the tree is
// destroyed first in the destructor, hence the destructor on
// `RemoteRegistry` below.
let tree = unsafe {
mem::transmute::<git2::Tree, git2::Tree<'static>>(tree)
};
*self.tree.borrow_mut() = Some(tree);
Ok(Ref::map(self.tree.borrow(), |s| s.as_ref().unwrap()))
}
}
impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
fn index_path(&self) -> &Filesystem {
&self.index_path
}
fn load(&self,
_root: &Path,
path: &Path,
data: &mut FnMut(&[u8]) -> CargoResult<()>) -> CargoResult<()> {
// Note that the index calls this method and the filesystem is locked
// in the index, so we don't need to worry about an `update_index`
// happening in a different process.
let repo = self.repo()?;
let tree = self.tree()?;
let entry = tree.get_path(path)?;
let object = entry.to_object(&repo)?;
let blob = match object.as_blob() {
Some(blob) => blob,
None => bail!("path `{}` is not a blob in the git repo", path.display()),
};
data(blob.content())
}
fn config(&mut self) -> CargoResult<Option<RegistryConfig>> {
self.repo()?; // create intermediate dirs and initialize the repo
let _lock = self.index_path.open_ro(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
let mut config = None;
self.load(Path::new(""), Path::new("config.json"), &mut |json| {
config = Some(serde_json::from_slice(&json)?);
Ok(())
})?;
Ok(config)
}
fn update_index(&mut self) -> CargoResult<()> {
// Ensure that we'll actually be able to acquire an HTTP handle later on
// once we start trying to download crates. This will weed out any
// problems with `.cargo/config` configuration related to HTTP.
//
// This way if there's a problem the error gets printed before we even
// hit the index, which may not actually read this configuration. |
let repo = self.repo()?;
let _lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
self.config.shell().status("Updating",
format!("registry `{}`", self.source_id.url()))?;
let mut needs_fetch = true;
if self.source_id.url().host_str() == Some("github.com") {
if let Ok(oid) = self.head() {
let mut handle = self.easy()?.borrow_mut();
debug!("attempting github fast path for {}",
self.source_id.url());
if github_up_to_date(&mut handle, self.source_id.url(), &oid) {
needs_fetch = false;
} else {
debug!("fast path failed, falling back to a git fetch");
}
}
}
if needs_fetch {
// git fetch origin master
let url = self.source_id.url().to_string();
let refspec = "refs/heads/master:refs/remotes/origin/master";
git::fetch(&repo, &url, refspec, self.config).chain_err(|| {
format!("failed to fetch `{}`", url)
})?;
}
self.head.set(None);
*self.tree.borrow_mut() = None;
Ok(())
}
fn download(&mut self, pkg: &PackageId, checksum: &str)
-> CargoResult<FileLock> {
let filename = format!("{}-{}.crate", pkg.name(), pkg.version());
let path = Path::new(&filename);
// Attempt to open an read-only copy first to avoid an exclusive write
// lock and also work with read-only filesystems. Note that we check the
// length of the file like below to handle interrupted downloads.
//
// If this fails then we fall through to the exclusive path where we may
// have to redownload the file.
if let Ok(dst) = self.cache_path.open_ro(path, self.config, &filename) {
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
}
let mut dst = self.cache_path.open_rw(path, self.config, &filename)?;
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
self.config.shell().status("Downloading", pkg)?;
let config = self.config()?.unwrap();
let mut url = config.dl.to_url()?;
url.path_segments_mut().unwrap()
.push(pkg.name())
.push(&pkg.version().to_string())
.push("download");
// TODO: don't download into memory, but ensure that if we ctrl-c a
// download we should resume either from the start or the middle
// on the next time
let url = url.to_string();
let mut handle = self.easy()?.borrow_mut();
handle.get(true)?;
handle.url(&url)?;
handle.follow_location(true)?;
let mut state = Sha256::new();
let mut body = Vec::new();
network::with_retry(self.config, || {
state = Sha256::new();
body = Vec::new();
{
let mut handle = handle.transfer();
handle.write_function(|buf| {
state.update(buf);
body.extend_from_slice(buf);
Ok(buf.len())
})?;
handle.perform()?;
}
let code = handle.response_code()?;
if code!= 200 && code!= 0 {
let url = handle.effective_url()?.unwrap_or(&url);
Err(CargoErrorKind::HttpNot200(code, url.to_string()).into())
} else {
Ok(())
}
})?;
// Verify what we just downloaded
if state.finish().to_hex()!= checksum {
bail!("failed to verify the checksum of `{}`", pkg)
}
dst.write_all(&body)?;
dst.seek(SeekFrom::Start(0))?;
Ok(dst)
}
}
impl<'cfg> Drop for RemoteRegistry<'cfg> {
fn drop(&mut self) {
// Just be sure to drop this before our other fields
self.tree.borrow_mut().take();
}
}
/// Updating the index is done pretty regularly so we want it to be as fast as
/// possible. For registries hosted on github (like the crates.io index) there's
/// a fast path available to use [1] to tell us that there's no updates to be
/// made.
///
/// This function will attempt to hit that fast path and verify that the `oid`
/// is actually the current `master` branch of the repository. If `true` is
/// returned then no update needs to be performed, but if `false` is returned
/// then the standard update logic still needs to happen.
///
/// [1]: https://developer.github.com/v3/repos/commits/#get-the-sha-1-of-a-commit-reference
///
/// Note that this function should never cause an actual failure because it's
/// just a fast path. As a result all errors are ignored in this function and we
/// just return a `bool`. Any real errors will be reported through the normal
/// update path above.
fn github_up_to_date(handle: &mut Easy, url: &Url, oid: &git2::Oid) -> bool {
macro_rules! try {
($e:expr) => (match $e {
Some(e) => e,
None => return false,
})
}
// This expects github urls in the form `github.com/user/repo` and nothing
// else
let mut pieces = try!(url.path_segments());
let username = try!(pieces.next());
let repo = try!(pieces.next());
if pieces.next().is_some() {
return false
}
let url = format!("https://api.github.com/repos/{}/{}/commits/master",
username, repo);
try!(handle.get(true).ok());
try!(handle.url(&url).ok());
try!(handle.useragent("cargo").ok());
let mut headers = List::new();
try!(headers.append("Accept: application/vnd.github.3.sha").ok());
try!(headers.append(&format!("If-None-Match: \"{}\"", oid)).ok());
try!(handle.http_headers(headers).ok());
try!(handle.perform().ok());
try!(handle.response_code().ok()) == 304
} | ops::http_handle(self.config)?; | random_line_split |
remote.rs | use std::cell::{RefCell, Ref, Cell};
use std::io::SeekFrom;
use std::io::prelude::*;
use std::mem;
use std::path::Path;
use curl::easy::{Easy, List};
use git2;
use hex::ToHex;
use serde_json;
use url::Url;
use core::{PackageId, SourceId};
use ops;
use sources::git;
use sources::registry::{RegistryData, RegistryConfig, INDEX_LOCK};
use util::network;
use util::{FileLock, Filesystem, LazyCell};
use util::{Config, Sha256, ToUrl};
use util::errors::{CargoErrorKind, CargoResult, CargoResultExt};
pub struct RemoteRegistry<'cfg> {
index_path: Filesystem,
cache_path: Filesystem,
source_id: SourceId,
config: &'cfg Config,
handle: LazyCell<RefCell<Easy>>,
tree: RefCell<Option<git2::Tree<'static>>>,
repo: LazyCell<git2::Repository>,
head: Cell<Option<git2::Oid>>,
}
impl<'cfg> RemoteRegistry<'cfg> {
pub fn new(source_id: &SourceId, config: &'cfg Config, name: &str)
-> RemoteRegistry<'cfg> {
RemoteRegistry {
index_path: config.registry_index_path().join(name),
cache_path: config.registry_cache_path().join(name),
source_id: source_id.clone(),
config: config,
tree: RefCell::new(None),
handle: LazyCell::new(),
repo: LazyCell::new(),
head: Cell::new(None),
}
}
fn easy(&self) -> CargoResult<&RefCell<Easy>> {
self.handle.get_or_try_init(|| {
ops::http_handle(self.config).map(RefCell::new)
})
}
fn repo(&self) -> CargoResult<&git2::Repository> {
self.repo.get_or_try_init(|| {
let path = self.index_path.clone().into_path_unlocked();
// Fast path without a lock
if let Ok(repo) = git2::Repository::open(&path) {
return Ok(repo)
}
// Ok, now we need to lock and try the whole thing over again.
let lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
match git2::Repository::open(&path) {
Ok(repo) => Ok(repo),
Err(_) => {
let _ = lock.remove_siblings();
// Note that we'd actually prefer to use a bare repository
// here as we're not actually going to check anything out.
// All versions of Cargo, though, share the same CARGO_HOME,
// so for compatibility with older Cargo which *does* do
// checkouts we make sure to initialize a new full
// repository (not a bare one).
//
// We should change this to `init_bare` whenever we feel
// like enough time has passed or if we change the directory
// that the folder is located in, such as by changing the
// hash at the end of the directory.
Ok(git2::Repository::init(&path)?)
}
}
})
}
fn head(&self) -> CargoResult<git2::Oid> {
if self.head.get().is_none() {
let oid = self.repo()?.refname_to_id("refs/remotes/origin/master")?;
self.head.set(Some(oid));
}
Ok(self.head.get().unwrap())
}
fn tree(&self) -> CargoResult<Ref<git2::Tree>> {
{
let tree = self.tree.borrow();
if tree.is_some() {
return Ok(Ref::map(tree, |s| s.as_ref().unwrap()))
}
}
let repo = self.repo()?;
let commit = repo.find_commit(self.head()?)?;
let tree = commit.tree()?;
// Unfortunately in libgit2 the tree objects look like they've got a
// reference to the repository object which means that a tree cannot
// outlive the repository that it came from. Here we want to cache this
// tree, though, so to accomplish this we transmute it to a static
// lifetime.
//
// Note that we don't actually hand out the static lifetime, instead we
// only return a scoped one from this function. Additionally the repo
// we loaded from (above) lives as long as this object
// (`RemoteRegistry`) so we then just need to ensure that the tree is
// destroyed first in the destructor, hence the destructor on
// `RemoteRegistry` below.
let tree = unsafe {
mem::transmute::<git2::Tree, git2::Tree<'static>>(tree)
};
*self.tree.borrow_mut() = Some(tree);
Ok(Ref::map(self.tree.borrow(), |s| s.as_ref().unwrap()))
}
}
impl<'cfg> RegistryData for RemoteRegistry<'cfg> {
fn index_path(&self) -> &Filesystem {
&self.index_path
}
fn load(&self,
_root: &Path,
path: &Path,
data: &mut FnMut(&[u8]) -> CargoResult<()>) -> CargoResult<()> {
// Note that the index calls this method and the filesystem is locked
// in the index, so we don't need to worry about an `update_index`
// happening in a different process.
let repo = self.repo()?;
let tree = self.tree()?;
let entry = tree.get_path(path)?;
let object = entry.to_object(&repo)?;
let blob = match object.as_blob() {
Some(blob) => blob,
None => bail!("path `{}` is not a blob in the git repo", path.display()),
};
data(blob.content())
}
fn config(&mut self) -> CargoResult<Option<RegistryConfig>> {
self.repo()?; // create intermediate dirs and initialize the repo
let _lock = self.index_path.open_ro(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
let mut config = None;
self.load(Path::new(""), Path::new("config.json"), &mut |json| {
config = Some(serde_json::from_slice(&json)?);
Ok(())
})?;
Ok(config)
}
fn update_index(&mut self) -> CargoResult<()> {
// Ensure that we'll actually be able to acquire an HTTP handle later on
// once we start trying to download crates. This will weed out any
// problems with `.cargo/config` configuration related to HTTP.
//
// This way if there's a problem the error gets printed before we even
// hit the index, which may not actually read this configuration.
ops::http_handle(self.config)?;
let repo = self.repo()?;
let _lock = self.index_path.open_rw(Path::new(INDEX_LOCK),
self.config,
"the registry index")?;
self.config.shell().status("Updating",
format!("registry `{}`", self.source_id.url()))?;
let mut needs_fetch = true;
if self.source_id.url().host_str() == Some("github.com") {
if let Ok(oid) = self.head() {
let mut handle = self.easy()?.borrow_mut();
debug!("attempting github fast path for {}",
self.source_id.url());
if github_up_to_date(&mut handle, self.source_id.url(), &oid) {
needs_fetch = false;
} else |
}
}
if needs_fetch {
// git fetch origin master
let url = self.source_id.url().to_string();
let refspec = "refs/heads/master:refs/remotes/origin/master";
git::fetch(&repo, &url, refspec, self.config).chain_err(|| {
format!("failed to fetch `{}`", url)
})?;
}
self.head.set(None);
*self.tree.borrow_mut() = None;
Ok(())
}
fn download(&mut self, pkg: &PackageId, checksum: &str)
-> CargoResult<FileLock> {
let filename = format!("{}-{}.crate", pkg.name(), pkg.version());
let path = Path::new(&filename);
// Attempt to open an read-only copy first to avoid an exclusive write
// lock and also work with read-only filesystems. Note that we check the
// length of the file like below to handle interrupted downloads.
//
// If this fails then we fall through to the exclusive path where we may
// have to redownload the file.
if let Ok(dst) = self.cache_path.open_ro(path, self.config, &filename) {
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
}
let mut dst = self.cache_path.open_rw(path, self.config, &filename)?;
let meta = dst.file().metadata()?;
if meta.len() > 0 {
return Ok(dst)
}
self.config.shell().status("Downloading", pkg)?;
let config = self.config()?.unwrap();
let mut url = config.dl.to_url()?;
url.path_segments_mut().unwrap()
.push(pkg.name())
.push(&pkg.version().to_string())
.push("download");
// TODO: don't download into memory, but ensure that if we ctrl-c a
// download we should resume either from the start or the middle
// on the next time
let url = url.to_string();
let mut handle = self.easy()?.borrow_mut();
handle.get(true)?;
handle.url(&url)?;
handle.follow_location(true)?;
let mut state = Sha256::new();
let mut body = Vec::new();
network::with_retry(self.config, || {
state = Sha256::new();
body = Vec::new();
{
let mut handle = handle.transfer();
handle.write_function(|buf| {
state.update(buf);
body.extend_from_slice(buf);
Ok(buf.len())
})?;
handle.perform()?;
}
let code = handle.response_code()?;
if code!= 200 && code!= 0 {
let url = handle.effective_url()?.unwrap_or(&url);
Err(CargoErrorKind::HttpNot200(code, url.to_string()).into())
} else {
Ok(())
}
})?;
// Verify what we just downloaded
if state.finish().to_hex()!= checksum {
bail!("failed to verify the checksum of `{}`", pkg)
}
dst.write_all(&body)?;
dst.seek(SeekFrom::Start(0))?;
Ok(dst)
}
}
impl<'cfg> Drop for RemoteRegistry<'cfg> {
fn drop(&mut self) {
// Just be sure to drop this before our other fields
self.tree.borrow_mut().take();
}
}
/// Updating the index is done pretty regularly so we want it to be as fast as
/// possible. For registries hosted on github (like the crates.io index) there's
/// a fast path available to use [1] to tell us that there's no updates to be
/// made.
///
/// This function will attempt to hit that fast path and verify that the `oid`
/// is actually the current `master` branch of the repository. If `true` is
/// returned then no update needs to be performed, but if `false` is returned
/// then the standard update logic still needs to happen.
///
/// [1]: https://developer.github.com/v3/repos/commits/#get-the-sha-1-of-a-commit-reference
///
/// Note that this function should never cause an actual failure because it's
/// just a fast path. As a result all errors are ignored in this function and we
/// just return a `bool`. Any real errors will be reported through the normal
/// update path above.
fn github_up_to_date(handle: &mut Easy, url: &Url, oid: &git2::Oid) -> bool {
macro_rules! try {
($e:expr) => (match $e {
Some(e) => e,
None => return false,
})
}
// This expects github urls in the form `github.com/user/repo` and nothing
// else
let mut pieces = try!(url.path_segments());
let username = try!(pieces.next());
let repo = try!(pieces.next());
if pieces.next().is_some() {
return false
}
let url = format!("https://api.github.com/repos/{}/{}/commits/master",
username, repo);
try!(handle.get(true).ok());
try!(handle.url(&url).ok());
try!(handle.useragent("cargo").ok());
let mut headers = List::new();
try!(headers.append("Accept: application/vnd.github.3.sha").ok());
try!(headers.append(&format!("If-None-Match: \"{}\"", oid)).ok());
try!(handle.http_headers(headers).ok());
try!(handle.perform().ok());
try!(handle.response_code().ok()) == 304
}
| {
debug!("fast path failed, falling back to a git fetch");
} | conditional_block |
inline-closure.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z span_free_formats
// Tests that MIR inliner can handle closure arguments. (#45894)
fn main() {
println!("{}", foo(0, 14));
}
fn foo<T: Copy>(_t: T, q: i32) -> i32 |
// END RUST SOURCE
// START rustc.foo.Inline.after.mir
//...
// bb0: {
// ...
// _3 = [closure@NodeId(39)];
// ...
// _4 = &_3;
// ...
// _6 = _2;
// ...
// _7 = _2;
// _5 = (move _6, move _7);
// _8 = move (_5.0: i32);
// _9 = move (_5.1: i32);
// _0 = _8;
// ...
// return;
// }
//...
// END rustc.foo.Inline.after.mir
| {
let x = |_t, _q| _t;
x(q, q)
} | identifier_body |
inline-closure.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z span_free_formats
// Tests that MIR inliner can handle closure arguments. (#45894)
fn | () {
println!("{}", foo(0, 14));
}
fn foo<T: Copy>(_t: T, q: i32) -> i32 {
let x = |_t, _q| _t;
x(q, q)
}
// END RUST SOURCE
// START rustc.foo.Inline.after.mir
//...
// bb0: {
// ...
// _3 = [closure@NodeId(39)];
// ...
// _4 = &_3;
// ...
// _6 = _2;
// ...
// _7 = _2;
// _5 = (move _6, move _7);
// _8 = move (_5.0: i32);
// _9 = move (_5.1: i32);
// _0 = _8;
// ...
// return;
// }
//...
// END rustc.foo.Inline.after.mir
| main | identifier_name |
inline-closure.rs | // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z span_free_formats
// Tests that MIR inliner can handle closure arguments. (#45894)
fn main() {
println!("{}", foo(0, 14));
}
fn foo<T: Copy>(_t: T, q: i32) -> i32 {
let x = |_t, _q| _t; | x(q, q)
}
// END RUST SOURCE
// START rustc.foo.Inline.after.mir
//...
// bb0: {
// ...
// _3 = [closure@NodeId(39)];
// ...
// _4 = &_3;
// ...
// _6 = _2;
// ...
// _7 = _2;
// _5 = (move _6, move _7);
// _8 = move (_5.0: i32);
// _9 = move (_5.1: i32);
// _0 = _8;
// ...
// return;
// }
//...
// END rustc.foo.Inline.after.mir | random_line_split | |
lib.rs | #![crate_name="otp"]
#![crate_type="lib"]
use std::time::{SystemTime, SystemTimeError};
use std::convert::TryInto;
use data_encoding::{BASE32_NOPAD, DecodeError};
use err_derive::Error;
use ring::hmac;
#[derive(Debug, Error)]
pub enum Error {
#[error(display="invalid time provided")]
InvalidTimeError(#[error(source)] SystemTimeError),
#[error(display="invalid digest provided: {:?}", _0)]
InvalidDigest(Vec<u8>),
#[error(display="invalid secret provided")]
InvalidSecret(#[error(source)] DecodeError)
}
/// Decodes a secret (given as an RFC4648 base32-encoded ASCII string)
/// into a byte string
fn decode_secret(secret: &str) -> Result<Vec<u8>, DecodeError> {
BASE32_NOPAD.decode(secret.as_bytes())
}
/// Calculates the HMAC digest for the given secret and counter.
fn calc_digest(decoded_secret: &[u8], counter: u64) -> hmac::Tag {
let key = hmac::Key::new(hmac::HMAC_SHA1_FOR_LEGACY_USE_ONLY, decoded_secret);
hmac::sign(&key, &counter.to_be_bytes())
}
/// Encodes the HMAC digest into a 6-digit integer.
fn encode_digest(digest: &[u8]) -> Result<u32, Error> |
/// Performs the [HMAC-based One-time Password Algorithm](http://en.wikipedia.org/wiki/HMAC-based_One-time_Password_Algorithm)
/// (HOTP) given an RFC4648 base32 encoded secret, and an integer counter.
pub fn make_hotp(secret: &str, counter: u64) -> Result<u32, Error> {
let decoded = decode_secret(secret)?;
encode_digest(calc_digest(decoded.as_slice(), counter).as_ref())
}
/// Helper function for `make_totp` to make it testable. Note that times
/// before Unix epoch are not supported.
fn make_totp_helper(secret: &str, time_step: u64, skew: i64, time: u64) -> Result<u32, Error> {
let counter = ((time as i64 + skew) as u64) / time_step;
make_hotp(secret, counter)
}
/// Performs the [Time-based One-time Password Algorithm](http://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm)
/// (TOTP) given an RFC4648 base32 encoded secret, the time step in seconds,
/// and a skew in seconds.
pub fn make_totp(secret: &str, time_step: u64, skew: i64) -> Result<u32, Error> {
let now = SystemTime::now();
let time_since_epoch = now.duration_since(SystemTime::UNIX_EPOCH)?;
match make_totp_helper(secret, time_step, skew, time_since_epoch.as_secs() ) {
Ok(d) => Ok(d),
Err(err) => return Err(err)
}
}
#[cfg(test)]
mod tests {
use super::{make_hotp, make_totp_helper};
#[test]
fn hotp() {
assert_eq!(make_hotp("BASE32SECRET3232", 0).unwrap(), 260182);
assert_eq!(make_hotp("BASE32SECRET3232", 1).unwrap(), 55283);
assert_eq!(make_hotp("BASE32SECRET3232", 1401).unwrap(), 316439);
}
#[test]
fn totp() {
assert_eq!(make_totp_helper("BASE32SECRET3232", 30, 0, 0).unwrap(), 260182);
assert_eq!(make_totp_helper("BASE32SECRET3232", 3600, 0, 7).unwrap(), 260182);
assert_eq!(make_totp_helper("BASE32SECRET3232", 30, 0, 35).unwrap(), 55283);
assert_eq!(make_totp_helper("BASE32SECRET3232", 1, -2, 1403).unwrap(), 316439);
}
}
| {
let offset = match digest.last() {
Some(x) => *x & 0xf,
None => return Err(Error::InvalidDigest(Vec::from(digest)))
} as usize;
let code_bytes: [u8; 4] = match digest[offset..offset+4].try_into() {
Ok(x) => x,
Err(_) => return Err(Error::InvalidDigest(Vec::from(digest)))
};
let code = u32::from_be_bytes(code_bytes);
Ok((code & 0x7fffffff) % 1_000_000)
} | identifier_body |
lib.rs | #![crate_name="otp"]
#![crate_type="lib"]
use std::time::{SystemTime, SystemTimeError};
use std::convert::TryInto;
use data_encoding::{BASE32_NOPAD, DecodeError};
use err_derive::Error;
use ring::hmac;
#[derive(Debug, Error)]
pub enum Error {
#[error(display="invalid time provided")]
InvalidTimeError(#[error(source)] SystemTimeError),
#[error(display="invalid digest provided: {:?}", _0)]
InvalidDigest(Vec<u8>),
#[error(display="invalid secret provided")]
InvalidSecret(#[error(source)] DecodeError)
}
/// Decodes a secret (given as an RFC4648 base32-encoded ASCII string)
/// into a byte string
fn decode_secret(secret: &str) -> Result<Vec<u8>, DecodeError> {
BASE32_NOPAD.decode(secret.as_bytes())
}
/// Calculates the HMAC digest for the given secret and counter.
fn calc_digest(decoded_secret: &[u8], counter: u64) -> hmac::Tag {
let key = hmac::Key::new(hmac::HMAC_SHA1_FOR_LEGACY_USE_ONLY, decoded_secret);
hmac::sign(&key, &counter.to_be_bytes())
}
/// Encodes the HMAC digest into a 6-digit integer.
fn encode_digest(digest: &[u8]) -> Result<u32, Error> {
let offset = match digest.last() {
Some(x) => *x & 0xf,
None => return Err(Error::InvalidDigest(Vec::from(digest)))
} as usize;
let code_bytes: [u8; 4] = match digest[offset..offset+4].try_into() {
Ok(x) => x,
Err(_) => return Err(Error::InvalidDigest(Vec::from(digest)))
};
let code = u32::from_be_bytes(code_bytes);
Ok((code & 0x7fffffff) % 1_000_000)
}
/// Performs the [HMAC-based One-time Password Algorithm](http://en.wikipedia.org/wiki/HMAC-based_One-time_Password_Algorithm)
/// (HOTP) given an RFC4648 base32 encoded secret, and an integer counter.
pub fn make_hotp(secret: &str, counter: u64) -> Result<u32, Error> {
let decoded = decode_secret(secret)?;
encode_digest(calc_digest(decoded.as_slice(), counter).as_ref())
}
/// Helper function for `make_totp` to make it testable. Note that times
/// before Unix epoch are not supported.
fn | (secret: &str, time_step: u64, skew: i64, time: u64) -> Result<u32, Error> {
let counter = ((time as i64 + skew) as u64) / time_step;
make_hotp(secret, counter)
}
/// Performs the [Time-based One-time Password Algorithm](http://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm)
/// (TOTP) given an RFC4648 base32 encoded secret, the time step in seconds,
/// and a skew in seconds.
pub fn make_totp(secret: &str, time_step: u64, skew: i64) -> Result<u32, Error> {
let now = SystemTime::now();
let time_since_epoch = now.duration_since(SystemTime::UNIX_EPOCH)?;
match make_totp_helper(secret, time_step, skew, time_since_epoch.as_secs() ) {
Ok(d) => Ok(d),
Err(err) => return Err(err)
}
}
#[cfg(test)]
mod tests {
use super::{make_hotp, make_totp_helper};
#[test]
fn hotp() {
assert_eq!(make_hotp("BASE32SECRET3232", 0).unwrap(), 260182);
assert_eq!(make_hotp("BASE32SECRET3232", 1).unwrap(), 55283);
assert_eq!(make_hotp("BASE32SECRET3232", 1401).unwrap(), 316439);
}
#[test]
fn totp() {
assert_eq!(make_totp_helper("BASE32SECRET3232", 30, 0, 0).unwrap(), 260182);
assert_eq!(make_totp_helper("BASE32SECRET3232", 3600, 0, 7).unwrap(), 260182);
assert_eq!(make_totp_helper("BASE32SECRET3232", 30, 0, 35).unwrap(), 55283);
assert_eq!(make_totp_helper("BASE32SECRET3232", 1, -2, 1403).unwrap(), 316439);
}
}
| make_totp_helper | identifier_name |
lib.rs | #![crate_name="otp"]
#![crate_type="lib"]
use std::time::{SystemTime, SystemTimeError};
use std::convert::TryInto;
use data_encoding::{BASE32_NOPAD, DecodeError};
use err_derive::Error;
use ring::hmac;
#[derive(Debug, Error)]
pub enum Error {
#[error(display="invalid time provided")]
InvalidTimeError(#[error(source)] SystemTimeError),
#[error(display="invalid digest provided: {:?}", _0)]
InvalidDigest(Vec<u8>),
#[error(display="invalid secret provided")]
InvalidSecret(#[error(source)] DecodeError)
}
/// Decodes a secret (given as an RFC4648 base32-encoded ASCII string)
/// into a byte string
fn decode_secret(secret: &str) -> Result<Vec<u8>, DecodeError> {
BASE32_NOPAD.decode(secret.as_bytes())
}
/// Calculates the HMAC digest for the given secret and counter.
fn calc_digest(decoded_secret: &[u8], counter: u64) -> hmac::Tag {
let key = hmac::Key::new(hmac::HMAC_SHA1_FOR_LEGACY_USE_ONLY, decoded_secret);
hmac::sign(&key, &counter.to_be_bytes())
}
/// Encodes the HMAC digest into a 6-digit integer.
fn encode_digest(digest: &[u8]) -> Result<u32, Error> {
let offset = match digest.last() {
Some(x) => *x & 0xf,
None => return Err(Error::InvalidDigest(Vec::from(digest)))
} as usize;
let code_bytes: [u8; 4] = match digest[offset..offset+4].try_into() {
Ok(x) => x,
Err(_) => return Err(Error::InvalidDigest(Vec::from(digest)))
};
let code = u32::from_be_bytes(code_bytes);
Ok((code & 0x7fffffff) % 1_000_000)
}
/// Performs the [HMAC-based One-time Password Algorithm](http://en.wikipedia.org/wiki/HMAC-based_One-time_Password_Algorithm)
/// (HOTP) given an RFC4648 base32 encoded secret, and an integer counter.
pub fn make_hotp(secret: &str, counter: u64) -> Result<u32, Error> {
let decoded = decode_secret(secret)?;
encode_digest(calc_digest(decoded.as_slice(), counter).as_ref())
}
/// Helper function for `make_totp` to make it testable. Note that times
/// before Unix epoch are not supported.
fn make_totp_helper(secret: &str, time_step: u64, skew: i64, time: u64) -> Result<u32, Error> {
let counter = ((time as i64 + skew) as u64) / time_step;
make_hotp(secret, counter)
}
/// Performs the [Time-based One-time Password Algorithm](http://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm)
/// (TOTP) given an RFC4648 base32 encoded secret, the time step in seconds,
/// and a skew in seconds.
pub fn make_totp(secret: &str, time_step: u64, skew: i64) -> Result<u32, Error> {
let now = SystemTime::now();
let time_since_epoch = now.duration_since(SystemTime::UNIX_EPOCH)?;
match make_totp_helper(secret, time_step, skew, time_since_epoch.as_secs() ) {
Ok(d) => Ok(d),
Err(err) => return Err(err)
}
}
#[cfg(test)]
mod tests {
use super::{make_hotp, make_totp_helper};
#[test]
fn hotp() {
assert_eq!(make_hotp("BASE32SECRET3232", 0).unwrap(), 260182);
assert_eq!(make_hotp("BASE32SECRET3232", 1).unwrap(), 55283);
assert_eq!(make_hotp("BASE32SECRET3232", 1401).unwrap(), 316439);
}
#[test]
fn totp() {
assert_eq!(make_totp_helper("BASE32SECRET3232", 30, 0, 0).unwrap(), 260182); | assert_eq!(make_totp_helper("BASE32SECRET3232", 1, -2, 1403).unwrap(), 316439);
}
} | assert_eq!(make_totp_helper("BASE32SECRET3232", 3600, 0, 7).unwrap(), 260182);
assert_eq!(make_totp_helper("BASE32SECRET3232", 30, 0, 35).unwrap(), 55283); | random_line_split |
util.rs | // Copyright (c) 2014, 2015 Robert Clipsham <robert@octarineparrot.com>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Miscellaneous utilities for low level networking
extern crate libc;
use packet::PrimitiveValues;
use std::ffi::CStr;
use std::fmt;
use std::str::{FromStr, from_utf8_unchecked};
use std::mem;
use std::u8;
use std::net::IpAddr;
#[cfg(not(windows))]
use internal;
/// A MAC address
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct MacAddr(pub u8, pub u8, pub u8, pub u8, pub u8, pub u8);
impl MacAddr {
/// Construct a new MacAddr
pub fn new(a: u8, b: u8, c: u8, d: u8, e: u8, f: u8) -> MacAddr {
MacAddr(a, b, c, d, e, f)
}
}
impl PrimitiveValues for MacAddr {
type T = (u8, u8, u8, u8, u8, u8);
fn to_primitive_values(&self) -> (u8, u8, u8, u8, u8, u8) {
(self.0, self.1, self.2, self.3, self.4, self.5)
}
}
impl fmt::Display for MacAddr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt,
"{:x}:{:x}:{:x}:{:x}:{:x}:{:x}",
self.0,
self.1,
self.2,
self.3,
self.4,
self.5)
}
}
impl fmt::Debug for MacAddr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self, fmt)
}
}
// FIXME Is this the right way to do this? Which occurs is an implementation
// issue rather than actually defined - is it useful to provide these
// errors, or would it be better to just give ()?
/// Represents an error which occurred whilst parsing a MAC address
#[derive(Copy, Debug, PartialEq, Eq, Clone)]
pub enum ParseMacAddrErr {
/// The MAC address has too many components, eg. 00:11:22:33:44:55:66
TooManyComponents,
/// The MAC address has too few components, eg. 00:11
TooFewComponents,
/// One of the components contains an invalid value, eg. 00:GG:22:33:44:55
InvalidComponent,
}
impl FromStr for MacAddr {
type Err = ParseMacAddrErr;
fn from_str(s: &str) -> Result<MacAddr, ParseMacAddrErr> { | let mut parts = [0u8; 6];
let splits = s.split(':');
let mut i = 0;
for split in splits {
if i == 6 {
return Err(ParseMacAddrErr::TooManyComponents);
}
match u8::from_str_radix(split, 16) {
Ok(b) if split.len()!= 0 => parts[i] = b,
_ => return Err(ParseMacAddrErr::InvalidComponent),
}
i += 1;
}
if i == 6 {
Ok(MacAddr(parts[0], parts[1], parts[2], parts[3], parts[4], parts[5]))
} else {
Err(ParseMacAddrErr::TooFewComponents)
}
}
}
#[test]
fn mac_addr_from_str() {
assert_eq!("00:00:00:00:00:00".parse(), Ok(MacAddr(0, 0, 0, 0, 0, 0)));
assert_eq!("ff:ff:ff:ff:ff:ff".parse(),
Ok(MacAddr(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)));
assert_eq!("12:34:56:78:90:ab".parse(),
Ok(MacAddr(0x12, 0x34, 0x56, 0x78, 0x90, 0xAB)));
assert_eq!("::::::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("0::::::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("::::0::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooFewComponents));
assert_eq!("12:34:56:78:".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78:90".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooFewComponents));
assert_eq!("12:34:56:78:90:".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78:90:00:00".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooManyComponents));
assert_eq!("xx:xx:xx:xx:xx:xx".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
}
/// Represents a network interface and its associated addresses
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct NetworkInterface {
/// The name of the interface
pub name: String,
/// The interface index (operating system specific)
pub index: u32,
/// A MAC address for the interface
pub mac: Option<MacAddr>,
/// An IP addresses for the interface
pub ips: Option<Vec<IpAddr>>,
/// Operating system specific flags for the interface
pub flags: u32,
}
impl NetworkInterface {
/// Retrieve the MAC address associated with the interface
pub fn mac_address(&self) -> MacAddr {
self.mac.unwrap()
}
/// Is the interface a loopback interface?
pub fn is_loopback(&self) -> bool {
self.flags & (libc::IFF_LOOPBACK as u32)!= 0
}
}
#[cfg(target_os = "linux")]
fn sockaddr_to_network_addr(sa: *const libc::sockaddr) -> (Option<MacAddr>, Option<IpAddr>) {
unsafe {
if sa.is_null() {
(None, None)
} else if (*sa).sa_family as libc::c_int == libc::AF_PACKET {
let sll: *const libc::sockaddr_ll = mem::transmute(sa);
let mac = MacAddr((*sll).sll_addr[0],
(*sll).sll_addr[1],
(*sll).sll_addr[2],
(*sll).sll_addr[3],
(*sll).sll_addr[4],
(*sll).sll_addr[5]);
(Some(mac), None)
} else {
let addr = internal::sockaddr_to_addr(mem::transmute(sa),
mem::size_of::<libc::sockaddr_storage>());
match addr {
Ok(sa) => (None, Some(sa.ip())),
Err(_) => (None, None),
}
}
}
}
#[cfg(any(target_os = "freebsd", target_os = "macos"))]
fn sockaddr_to_network_addr(sa: *const libc::sockaddr) -> (Option<MacAddr>, Option<IpAddr>) {
use bindings::bpf;
unsafe {
if sa.is_null() {
(None, None)
} else if (*sa).sa_family as libc::c_int == bpf::AF_LINK {
let sdl: *const bpf::sockaddr_dl = mem::transmute(sa);
let nlen = (*sdl).sdl_nlen as usize;
let mac = MacAddr((*sdl).sdl_data[nlen] as u8,
(*sdl).sdl_data[nlen + 1] as u8,
(*sdl).sdl_data[nlen + 2] as u8,
(*sdl).sdl_data[nlen + 3] as u8,
(*sdl).sdl_data[nlen + 4] as u8,
(*sdl).sdl_data[nlen + 5] as u8);
(Some(mac), None)
} else {
let addr = internal::sockaddr_to_addr(mem::transmute(sa),
mem::size_of::<libc::sockaddr_storage>());
match addr {
Ok(sa) => (None, Some(sa.ip())),
Err(_) => (None, None),
}
}
}
}
/// Get a list of available network interfaces for the current machine.
#[inline]
pub fn get_network_interfaces() -> Vec<NetworkInterface> {
get_network_interfaces_impl()
}
#[cfg(not(windows))]
fn get_network_interfaces_impl() -> Vec<NetworkInterface> {
use std::ffi::CString;
let mut ifaces: Vec<NetworkInterface> = Vec::new();
unsafe {
let mut addrs: *mut libc::ifaddrs = mem::uninitialized();
if libc::getifaddrs(&mut addrs)!= 0 {
return ifaces;
}
let mut addr = addrs;
while!addr.is_null() {
let c_str = (*addr).ifa_name as *const i8;
let bytes = CStr::from_ptr(c_str).to_bytes();
let name = from_utf8_unchecked(bytes).to_owned();
let (mac, ip) = sockaddr_to_network_addr((*addr).ifa_addr as *const libc::sockaddr);
let ni = NetworkInterface {
name: name.clone(),
index: 0,
mac: mac,
ips: ip.map(|ip| [ip].to_vec()),
flags: (*addr).ifa_flags,
};
let mut found: bool = false;
for iface in &mut ifaces {
if name == iface.name {
merge(iface, &ni);
found = true;
}
}
if!found {
ifaces.push(ni);
}
addr = (*addr).ifa_next;
}
libc::freeifaddrs(addrs);
for iface in &mut ifaces {
let name = CString::new(iface.name.as_bytes());
iface.index = libc::if_nametoindex(name.unwrap().as_ptr());
}
return ifaces;
}
fn merge(old: &mut NetworkInterface, new: &NetworkInterface) {
old.mac = match new.mac {
None => old.mac,
_ => new.mac,
};
match (&mut old.ips, &new.ips) {
(&mut Some(ref mut old_ips), &Some(ref new_ips)) => old_ips.push_all(&new_ips[..]),
(&mut ref mut old_ips @ None, &Some(ref new_ips)) => *old_ips = Some(new_ips.clone()),
_ => {}
};
old.flags = old.flags | new.flags;
}
}
#[cfg(windows)]
fn get_network_interfaces_impl() -> Vec<NetworkInterface> {
use bindings::winpcap;
let mut adapters_size = 0u32;
unsafe {
let mut tmp: winpcap::IP_ADAPTER_INFO = mem::zeroed();
// FIXME [windows] This only gets IPv4 addresses - should use
// GetAdaptersAddresses
winpcap::GetAdaptersInfo(&mut tmp, &mut adapters_size);
}
let vec_size = adapters_size / mem::size_of::<winpcap::IP_ADAPTER_INFO>() as u32;
let mut adapters = Vec::with_capacity(vec_size as usize);
// FIXME [windows] Check return code
unsafe {
winpcap::GetAdaptersInfo(adapters.as_mut_ptr(), &mut adapters_size);
}
// Create a complete list of NetworkInterfaces for the machine
let mut cursor = adapters.as_mut_ptr();
let mut all_ifaces = Vec::with_capacity(vec_size as usize);
while!cursor.is_null() {
let mac = unsafe {
MacAddr((*cursor).Address[0],
(*cursor).Address[1],
(*cursor).Address[2],
(*cursor).Address[3],
(*cursor).Address[4],
(*cursor).Address[5])
};
let mut ip_cursor = unsafe { &mut (*cursor).IpAddressList as winpcap::PIP_ADDR_STRING };
let mut ips: Vec<IpAddr> = Vec::new();
while!ip_cursor.is_null() {
let ip_str_ptr = unsafe { &(*ip_cursor) }.IpAddress.String.as_ptr() as *const i8;
let bytes = unsafe { CStr::from_ptr(ip_str_ptr).to_bytes() };
let ip_str = unsafe { from_utf8_unchecked(bytes).to_owned() };
ips.push(ip_str.parse().unwrap());
ip_cursor = unsafe { (*ip_cursor).Next };
}
unsafe {
let name_str_ptr = (*cursor).AdapterName.as_ptr() as *const i8;
let bytes = CStr::from_ptr(name_str_ptr).to_bytes();
let name_str = from_utf8_unchecked(bytes).to_owned();
all_ifaces.push(NetworkInterface {
name: name_str,
index: (*cursor).Index,
mac: Some(mac),
ips: Some(ips),
// flags: (*cursor).Type, // FIXME [windows]
flags: 0,
});
cursor = (*cursor).Next;
}
}
let mut buf = [0u8; 4096];
let mut buflen = buf.len() as u32;
// Gets list of supported adapters in form:
// adapter1\0adapter2\0\0desc1\0desc2\0\0
if unsafe { winpcap::PacketGetAdapterNames(buf.as_mut_ptr() as *mut i8, &mut buflen) } == 0 {
// FIXME [windows] Should allocate a buffer big enough and try again
// - size should be buf.len() + buflen (buflen is overwritten)
panic!("FIXME [windows] unable to get interface list");
}
let buf_str = unsafe { from_utf8_unchecked(&buf) };
let iface_names = buf_str.split("\0\0").next();
let mut vec = Vec::new();
// Return only supported adapters
match iface_names {
Some(iface_names) => {
for iface in iface_names.split('\0') {
let name = iface.to_owned();
let next = all_ifaces.iter().filter(|x| name[..].ends_with(&x.name[..])).next();
if next.is_some() {
let mut iface = next.unwrap().clone();
iface.name = name;
vec.push(iface);
}
}
}
None => (),
};
vec
} | random_line_split | |
util.rs | // Copyright (c) 2014, 2015 Robert Clipsham <robert@octarineparrot.com>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Miscellaneous utilities for low level networking
extern crate libc;
use packet::PrimitiveValues;
use std::ffi::CStr;
use std::fmt;
use std::str::{FromStr, from_utf8_unchecked};
use std::mem;
use std::u8;
use std::net::IpAddr;
#[cfg(not(windows))]
use internal;
/// A MAC address
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct MacAddr(pub u8, pub u8, pub u8, pub u8, pub u8, pub u8);
impl MacAddr {
/// Construct a new MacAddr
pub fn | (a: u8, b: u8, c: u8, d: u8, e: u8, f: u8) -> MacAddr {
MacAddr(a, b, c, d, e, f)
}
}
impl PrimitiveValues for MacAddr {
type T = (u8, u8, u8, u8, u8, u8);
fn to_primitive_values(&self) -> (u8, u8, u8, u8, u8, u8) {
(self.0, self.1, self.2, self.3, self.4, self.5)
}
}
impl fmt::Display for MacAddr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt,
"{:x}:{:x}:{:x}:{:x}:{:x}:{:x}",
self.0,
self.1,
self.2,
self.3,
self.4,
self.5)
}
}
impl fmt::Debug for MacAddr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self, fmt)
}
}
// FIXME Is this the right way to do this? Which occurs is an implementation
// issue rather than actually defined - is it useful to provide these
// errors, or would it be better to just give ()?
/// Represents an error which occurred whilst parsing a MAC address
#[derive(Copy, Debug, PartialEq, Eq, Clone)]
pub enum ParseMacAddrErr {
/// The MAC address has too many components, eg. 00:11:22:33:44:55:66
TooManyComponents,
/// The MAC address has too few components, eg. 00:11
TooFewComponents,
/// One of the components contains an invalid value, eg. 00:GG:22:33:44:55
InvalidComponent,
}
impl FromStr for MacAddr {
type Err = ParseMacAddrErr;
fn from_str(s: &str) -> Result<MacAddr, ParseMacAddrErr> {
let mut parts = [0u8; 6];
let splits = s.split(':');
let mut i = 0;
for split in splits {
if i == 6 {
return Err(ParseMacAddrErr::TooManyComponents);
}
match u8::from_str_radix(split, 16) {
Ok(b) if split.len()!= 0 => parts[i] = b,
_ => return Err(ParseMacAddrErr::InvalidComponent),
}
i += 1;
}
if i == 6 {
Ok(MacAddr(parts[0], parts[1], parts[2], parts[3], parts[4], parts[5]))
} else {
Err(ParseMacAddrErr::TooFewComponents)
}
}
}
#[test]
fn mac_addr_from_str() {
assert_eq!("00:00:00:00:00:00".parse(), Ok(MacAddr(0, 0, 0, 0, 0, 0)));
assert_eq!("ff:ff:ff:ff:ff:ff".parse(),
Ok(MacAddr(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)));
assert_eq!("12:34:56:78:90:ab".parse(),
Ok(MacAddr(0x12, 0x34, 0x56, 0x78, 0x90, 0xAB)));
assert_eq!("::::::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("0::::::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("::::0::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooFewComponents));
assert_eq!("12:34:56:78:".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78:90".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooFewComponents));
assert_eq!("12:34:56:78:90:".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78:90:00:00".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooManyComponents));
assert_eq!("xx:xx:xx:xx:xx:xx".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
}
/// Represents a network interface and its associated addresses
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct NetworkInterface {
/// The name of the interface
pub name: String,
/// The interface index (operating system specific)
pub index: u32,
/// A MAC address for the interface
pub mac: Option<MacAddr>,
/// An IP addresses for the interface
pub ips: Option<Vec<IpAddr>>,
/// Operating system specific flags for the interface
pub flags: u32,
}
impl NetworkInterface {
/// Retrieve the MAC address associated with the interface
pub fn mac_address(&self) -> MacAddr {
self.mac.unwrap()
}
/// Is the interface a loopback interface?
pub fn is_loopback(&self) -> bool {
self.flags & (libc::IFF_LOOPBACK as u32)!= 0
}
}
#[cfg(target_os = "linux")]
fn sockaddr_to_network_addr(sa: *const libc::sockaddr) -> (Option<MacAddr>, Option<IpAddr>) {
unsafe {
if sa.is_null() {
(None, None)
} else if (*sa).sa_family as libc::c_int == libc::AF_PACKET {
let sll: *const libc::sockaddr_ll = mem::transmute(sa);
let mac = MacAddr((*sll).sll_addr[0],
(*sll).sll_addr[1],
(*sll).sll_addr[2],
(*sll).sll_addr[3],
(*sll).sll_addr[4],
(*sll).sll_addr[5]);
(Some(mac), None)
} else {
let addr = internal::sockaddr_to_addr(mem::transmute(sa),
mem::size_of::<libc::sockaddr_storage>());
match addr {
Ok(sa) => (None, Some(sa.ip())),
Err(_) => (None, None),
}
}
}
}
#[cfg(any(target_os = "freebsd", target_os = "macos"))]
fn sockaddr_to_network_addr(sa: *const libc::sockaddr) -> (Option<MacAddr>, Option<IpAddr>) {
use bindings::bpf;
unsafe {
if sa.is_null() {
(None, None)
} else if (*sa).sa_family as libc::c_int == bpf::AF_LINK {
let sdl: *const bpf::sockaddr_dl = mem::transmute(sa);
let nlen = (*sdl).sdl_nlen as usize;
let mac = MacAddr((*sdl).sdl_data[nlen] as u8,
(*sdl).sdl_data[nlen + 1] as u8,
(*sdl).sdl_data[nlen + 2] as u8,
(*sdl).sdl_data[nlen + 3] as u8,
(*sdl).sdl_data[nlen + 4] as u8,
(*sdl).sdl_data[nlen + 5] as u8);
(Some(mac), None)
} else {
let addr = internal::sockaddr_to_addr(mem::transmute(sa),
mem::size_of::<libc::sockaddr_storage>());
match addr {
Ok(sa) => (None, Some(sa.ip())),
Err(_) => (None, None),
}
}
}
}
/// Get a list of available network interfaces for the current machine.
#[inline]
pub fn get_network_interfaces() -> Vec<NetworkInterface> {
get_network_interfaces_impl()
}
#[cfg(not(windows))]
fn get_network_interfaces_impl() -> Vec<NetworkInterface> {
use std::ffi::CString;
let mut ifaces: Vec<NetworkInterface> = Vec::new();
unsafe {
let mut addrs: *mut libc::ifaddrs = mem::uninitialized();
if libc::getifaddrs(&mut addrs)!= 0 {
return ifaces;
}
let mut addr = addrs;
while!addr.is_null() {
let c_str = (*addr).ifa_name as *const i8;
let bytes = CStr::from_ptr(c_str).to_bytes();
let name = from_utf8_unchecked(bytes).to_owned();
let (mac, ip) = sockaddr_to_network_addr((*addr).ifa_addr as *const libc::sockaddr);
let ni = NetworkInterface {
name: name.clone(),
index: 0,
mac: mac,
ips: ip.map(|ip| [ip].to_vec()),
flags: (*addr).ifa_flags,
};
let mut found: bool = false;
for iface in &mut ifaces {
if name == iface.name {
merge(iface, &ni);
found = true;
}
}
if!found {
ifaces.push(ni);
}
addr = (*addr).ifa_next;
}
libc::freeifaddrs(addrs);
for iface in &mut ifaces {
let name = CString::new(iface.name.as_bytes());
iface.index = libc::if_nametoindex(name.unwrap().as_ptr());
}
return ifaces;
}
fn merge(old: &mut NetworkInterface, new: &NetworkInterface) {
old.mac = match new.mac {
None => old.mac,
_ => new.mac,
};
match (&mut old.ips, &new.ips) {
(&mut Some(ref mut old_ips), &Some(ref new_ips)) => old_ips.push_all(&new_ips[..]),
(&mut ref mut old_ips @ None, &Some(ref new_ips)) => *old_ips = Some(new_ips.clone()),
_ => {}
};
old.flags = old.flags | new.flags;
}
}
#[cfg(windows)]
fn get_network_interfaces_impl() -> Vec<NetworkInterface> {
use bindings::winpcap;
let mut adapters_size = 0u32;
unsafe {
let mut tmp: winpcap::IP_ADAPTER_INFO = mem::zeroed();
// FIXME [windows] This only gets IPv4 addresses - should use
// GetAdaptersAddresses
winpcap::GetAdaptersInfo(&mut tmp, &mut adapters_size);
}
let vec_size = adapters_size / mem::size_of::<winpcap::IP_ADAPTER_INFO>() as u32;
let mut adapters = Vec::with_capacity(vec_size as usize);
// FIXME [windows] Check return code
unsafe {
winpcap::GetAdaptersInfo(adapters.as_mut_ptr(), &mut adapters_size);
}
// Create a complete list of NetworkInterfaces for the machine
let mut cursor = adapters.as_mut_ptr();
let mut all_ifaces = Vec::with_capacity(vec_size as usize);
while!cursor.is_null() {
let mac = unsafe {
MacAddr((*cursor).Address[0],
(*cursor).Address[1],
(*cursor).Address[2],
(*cursor).Address[3],
(*cursor).Address[4],
(*cursor).Address[5])
};
let mut ip_cursor = unsafe { &mut (*cursor).IpAddressList as winpcap::PIP_ADDR_STRING };
let mut ips: Vec<IpAddr> = Vec::new();
while!ip_cursor.is_null() {
let ip_str_ptr = unsafe { &(*ip_cursor) }.IpAddress.String.as_ptr() as *const i8;
let bytes = unsafe { CStr::from_ptr(ip_str_ptr).to_bytes() };
let ip_str = unsafe { from_utf8_unchecked(bytes).to_owned() };
ips.push(ip_str.parse().unwrap());
ip_cursor = unsafe { (*ip_cursor).Next };
}
unsafe {
let name_str_ptr = (*cursor).AdapterName.as_ptr() as *const i8;
let bytes = CStr::from_ptr(name_str_ptr).to_bytes();
let name_str = from_utf8_unchecked(bytes).to_owned();
all_ifaces.push(NetworkInterface {
name: name_str,
index: (*cursor).Index,
mac: Some(mac),
ips: Some(ips),
// flags: (*cursor).Type, // FIXME [windows]
flags: 0,
});
cursor = (*cursor).Next;
}
}
let mut buf = [0u8; 4096];
let mut buflen = buf.len() as u32;
// Gets list of supported adapters in form:
// adapter1\0adapter2\0\0desc1\0desc2\0\0
if unsafe { winpcap::PacketGetAdapterNames(buf.as_mut_ptr() as *mut i8, &mut buflen) } == 0 {
// FIXME [windows] Should allocate a buffer big enough and try again
// - size should be buf.len() + buflen (buflen is overwritten)
panic!("FIXME [windows] unable to get interface list");
}
let buf_str = unsafe { from_utf8_unchecked(&buf) };
let iface_names = buf_str.split("\0\0").next();
let mut vec = Vec::new();
// Return only supported adapters
match iface_names {
Some(iface_names) => {
for iface in iface_names.split('\0') {
let name = iface.to_owned();
let next = all_ifaces.iter().filter(|x| name[..].ends_with(&x.name[..])).next();
if next.is_some() {
let mut iface = next.unwrap().clone();
iface.name = name;
vec.push(iface);
}
}
}
None => (),
};
vec
}
| new | identifier_name |
util.rs | // Copyright (c) 2014, 2015 Robert Clipsham <robert@octarineparrot.com>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Miscellaneous utilities for low level networking
extern crate libc;
use packet::PrimitiveValues;
use std::ffi::CStr;
use std::fmt;
use std::str::{FromStr, from_utf8_unchecked};
use std::mem;
use std::u8;
use std::net::IpAddr;
#[cfg(not(windows))]
use internal;
/// A MAC address
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct MacAddr(pub u8, pub u8, pub u8, pub u8, pub u8, pub u8);
impl MacAddr {
/// Construct a new MacAddr
pub fn new(a: u8, b: u8, c: u8, d: u8, e: u8, f: u8) -> MacAddr {
MacAddr(a, b, c, d, e, f)
}
}
impl PrimitiveValues for MacAddr {
type T = (u8, u8, u8, u8, u8, u8);
fn to_primitive_values(&self) -> (u8, u8, u8, u8, u8, u8) {
(self.0, self.1, self.2, self.3, self.4, self.5)
}
}
impl fmt::Display for MacAddr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt,
"{:x}:{:x}:{:x}:{:x}:{:x}:{:x}",
self.0,
self.1,
self.2,
self.3,
self.4,
self.5)
}
}
impl fmt::Debug for MacAddr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self, fmt)
}
}
// FIXME Is this the right way to do this? Which occurs is an implementation
// issue rather than actually defined - is it useful to provide these
// errors, or would it be better to just give ()?
/// Represents an error which occurred whilst parsing a MAC address
#[derive(Copy, Debug, PartialEq, Eq, Clone)]
pub enum ParseMacAddrErr {
/// The MAC address has too many components, eg. 00:11:22:33:44:55:66
TooManyComponents,
/// The MAC address has too few components, eg. 00:11
TooFewComponents,
/// One of the components contains an invalid value, eg. 00:GG:22:33:44:55
InvalidComponent,
}
impl FromStr for MacAddr {
type Err = ParseMacAddrErr;
fn from_str(s: &str) -> Result<MacAddr, ParseMacAddrErr> {
let mut parts = [0u8; 6];
let splits = s.split(':');
let mut i = 0;
for split in splits {
if i == 6 {
return Err(ParseMacAddrErr::TooManyComponents);
}
match u8::from_str_radix(split, 16) {
Ok(b) if split.len()!= 0 => parts[i] = b,
_ => return Err(ParseMacAddrErr::InvalidComponent),
}
i += 1;
}
if i == 6 {
Ok(MacAddr(parts[0], parts[1], parts[2], parts[3], parts[4], parts[5]))
} else {
Err(ParseMacAddrErr::TooFewComponents)
}
}
}
#[test]
fn mac_addr_from_str() {
assert_eq!("00:00:00:00:00:00".parse(), Ok(MacAddr(0, 0, 0, 0, 0, 0)));
assert_eq!("ff:ff:ff:ff:ff:ff".parse(),
Ok(MacAddr(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)));
assert_eq!("12:34:56:78:90:ab".parse(),
Ok(MacAddr(0x12, 0x34, 0x56, 0x78, 0x90, 0xAB)));
assert_eq!("::::::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("0::::::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("::::0::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooFewComponents));
assert_eq!("12:34:56:78:".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78:90".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooFewComponents));
assert_eq!("12:34:56:78:90:".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78:90:00:00".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooManyComponents));
assert_eq!("xx:xx:xx:xx:xx:xx".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
}
/// Represents a network interface and its associated addresses
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct NetworkInterface {
/// The name of the interface
pub name: String,
/// The interface index (operating system specific)
pub index: u32,
/// A MAC address for the interface
pub mac: Option<MacAddr>,
/// An IP addresses for the interface
pub ips: Option<Vec<IpAddr>>,
/// Operating system specific flags for the interface
pub flags: u32,
}
impl NetworkInterface {
/// Retrieve the MAC address associated with the interface
pub fn mac_address(&self) -> MacAddr {
self.mac.unwrap()
}
/// Is the interface a loopback interface?
pub fn is_loopback(&self) -> bool {
self.flags & (libc::IFF_LOOPBACK as u32)!= 0
}
}
#[cfg(target_os = "linux")]
fn sockaddr_to_network_addr(sa: *const libc::sockaddr) -> (Option<MacAddr>, Option<IpAddr>) {
unsafe {
if sa.is_null() {
(None, None)
} else if (*sa).sa_family as libc::c_int == libc::AF_PACKET | else {
let addr = internal::sockaddr_to_addr(mem::transmute(sa),
mem::size_of::<libc::sockaddr_storage>());
match addr {
Ok(sa) => (None, Some(sa.ip())),
Err(_) => (None, None),
}
}
}
}
#[cfg(any(target_os = "freebsd", target_os = "macos"))]
fn sockaddr_to_network_addr(sa: *const libc::sockaddr) -> (Option<MacAddr>, Option<IpAddr>) {
use bindings::bpf;
unsafe {
if sa.is_null() {
(None, None)
} else if (*sa).sa_family as libc::c_int == bpf::AF_LINK {
let sdl: *const bpf::sockaddr_dl = mem::transmute(sa);
let nlen = (*sdl).sdl_nlen as usize;
let mac = MacAddr((*sdl).sdl_data[nlen] as u8,
(*sdl).sdl_data[nlen + 1] as u8,
(*sdl).sdl_data[nlen + 2] as u8,
(*sdl).sdl_data[nlen + 3] as u8,
(*sdl).sdl_data[nlen + 4] as u8,
(*sdl).sdl_data[nlen + 5] as u8);
(Some(mac), None)
} else {
let addr = internal::sockaddr_to_addr(mem::transmute(sa),
mem::size_of::<libc::sockaddr_storage>());
match addr {
Ok(sa) => (None, Some(sa.ip())),
Err(_) => (None, None),
}
}
}
}
/// Get a list of available network interfaces for the current machine.
#[inline]
pub fn get_network_interfaces() -> Vec<NetworkInterface> {
get_network_interfaces_impl()
}
#[cfg(not(windows))]
fn get_network_interfaces_impl() -> Vec<NetworkInterface> {
use std::ffi::CString;
let mut ifaces: Vec<NetworkInterface> = Vec::new();
unsafe {
let mut addrs: *mut libc::ifaddrs = mem::uninitialized();
if libc::getifaddrs(&mut addrs)!= 0 {
return ifaces;
}
let mut addr = addrs;
while!addr.is_null() {
let c_str = (*addr).ifa_name as *const i8;
let bytes = CStr::from_ptr(c_str).to_bytes();
let name = from_utf8_unchecked(bytes).to_owned();
let (mac, ip) = sockaddr_to_network_addr((*addr).ifa_addr as *const libc::sockaddr);
let ni = NetworkInterface {
name: name.clone(),
index: 0,
mac: mac,
ips: ip.map(|ip| [ip].to_vec()),
flags: (*addr).ifa_flags,
};
let mut found: bool = false;
for iface in &mut ifaces {
if name == iface.name {
merge(iface, &ni);
found = true;
}
}
if!found {
ifaces.push(ni);
}
addr = (*addr).ifa_next;
}
libc::freeifaddrs(addrs);
for iface in &mut ifaces {
let name = CString::new(iface.name.as_bytes());
iface.index = libc::if_nametoindex(name.unwrap().as_ptr());
}
return ifaces;
}
fn merge(old: &mut NetworkInterface, new: &NetworkInterface) {
old.mac = match new.mac {
None => old.mac,
_ => new.mac,
};
match (&mut old.ips, &new.ips) {
(&mut Some(ref mut old_ips), &Some(ref new_ips)) => old_ips.push_all(&new_ips[..]),
(&mut ref mut old_ips @ None, &Some(ref new_ips)) => *old_ips = Some(new_ips.clone()),
_ => {}
};
old.flags = old.flags | new.flags;
}
}
#[cfg(windows)]
fn get_network_interfaces_impl() -> Vec<NetworkInterface> {
use bindings::winpcap;
let mut adapters_size = 0u32;
unsafe {
let mut tmp: winpcap::IP_ADAPTER_INFO = mem::zeroed();
// FIXME [windows] This only gets IPv4 addresses - should use
// GetAdaptersAddresses
winpcap::GetAdaptersInfo(&mut tmp, &mut adapters_size);
}
let vec_size = adapters_size / mem::size_of::<winpcap::IP_ADAPTER_INFO>() as u32;
let mut adapters = Vec::with_capacity(vec_size as usize);
// FIXME [windows] Check return code
unsafe {
winpcap::GetAdaptersInfo(adapters.as_mut_ptr(), &mut adapters_size);
}
// Create a complete list of NetworkInterfaces for the machine
let mut cursor = adapters.as_mut_ptr();
let mut all_ifaces = Vec::with_capacity(vec_size as usize);
while!cursor.is_null() {
let mac = unsafe {
MacAddr((*cursor).Address[0],
(*cursor).Address[1],
(*cursor).Address[2],
(*cursor).Address[3],
(*cursor).Address[4],
(*cursor).Address[5])
};
let mut ip_cursor = unsafe { &mut (*cursor).IpAddressList as winpcap::PIP_ADDR_STRING };
let mut ips: Vec<IpAddr> = Vec::new();
while!ip_cursor.is_null() {
let ip_str_ptr = unsafe { &(*ip_cursor) }.IpAddress.String.as_ptr() as *const i8;
let bytes = unsafe { CStr::from_ptr(ip_str_ptr).to_bytes() };
let ip_str = unsafe { from_utf8_unchecked(bytes).to_owned() };
ips.push(ip_str.parse().unwrap());
ip_cursor = unsafe { (*ip_cursor).Next };
}
unsafe {
let name_str_ptr = (*cursor).AdapterName.as_ptr() as *const i8;
let bytes = CStr::from_ptr(name_str_ptr).to_bytes();
let name_str = from_utf8_unchecked(bytes).to_owned();
all_ifaces.push(NetworkInterface {
name: name_str,
index: (*cursor).Index,
mac: Some(mac),
ips: Some(ips),
// flags: (*cursor).Type, // FIXME [windows]
flags: 0,
});
cursor = (*cursor).Next;
}
}
let mut buf = [0u8; 4096];
let mut buflen = buf.len() as u32;
// Gets list of supported adapters in form:
// adapter1\0adapter2\0\0desc1\0desc2\0\0
if unsafe { winpcap::PacketGetAdapterNames(buf.as_mut_ptr() as *mut i8, &mut buflen) } == 0 {
// FIXME [windows] Should allocate a buffer big enough and try again
// - size should be buf.len() + buflen (buflen is overwritten)
panic!("FIXME [windows] unable to get interface list");
}
let buf_str = unsafe { from_utf8_unchecked(&buf) };
let iface_names = buf_str.split("\0\0").next();
let mut vec = Vec::new();
// Return only supported adapters
match iface_names {
Some(iface_names) => {
for iface in iface_names.split('\0') {
let name = iface.to_owned();
let next = all_ifaces.iter().filter(|x| name[..].ends_with(&x.name[..])).next();
if next.is_some() {
let mut iface = next.unwrap().clone();
iface.name = name;
vec.push(iface);
}
}
}
None => (),
};
vec
}
| {
let sll: *const libc::sockaddr_ll = mem::transmute(sa);
let mac = MacAddr((*sll).sll_addr[0],
(*sll).sll_addr[1],
(*sll).sll_addr[2],
(*sll).sll_addr[3],
(*sll).sll_addr[4],
(*sll).sll_addr[5]);
(Some(mac), None)
} | conditional_block |
util.rs | // Copyright (c) 2014, 2015 Robert Clipsham <robert@octarineparrot.com>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Miscellaneous utilities for low level networking
extern crate libc;
use packet::PrimitiveValues;
use std::ffi::CStr;
use std::fmt;
use std::str::{FromStr, from_utf8_unchecked};
use std::mem;
use std::u8;
use std::net::IpAddr;
#[cfg(not(windows))]
use internal;
/// A MAC address
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct MacAddr(pub u8, pub u8, pub u8, pub u8, pub u8, pub u8);
impl MacAddr {
/// Construct a new MacAddr
pub fn new(a: u8, b: u8, c: u8, d: u8, e: u8, f: u8) -> MacAddr {
MacAddr(a, b, c, d, e, f)
}
}
impl PrimitiveValues for MacAddr {
type T = (u8, u8, u8, u8, u8, u8);
fn to_primitive_values(&self) -> (u8, u8, u8, u8, u8, u8) |
}
impl fmt::Display for MacAddr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt,
"{:x}:{:x}:{:x}:{:x}:{:x}:{:x}",
self.0,
self.1,
self.2,
self.3,
self.4,
self.5)
}
}
impl fmt::Debug for MacAddr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self, fmt)
}
}
// FIXME Is this the right way to do this? Which occurs is an implementation
// issue rather than actually defined - is it useful to provide these
// errors, or would it be better to just give ()?
/// Represents an error which occurred whilst parsing a MAC address
#[derive(Copy, Debug, PartialEq, Eq, Clone)]
pub enum ParseMacAddrErr {
/// The MAC address has too many components, eg. 00:11:22:33:44:55:66
TooManyComponents,
/// The MAC address has too few components, eg. 00:11
TooFewComponents,
/// One of the components contains an invalid value, eg. 00:GG:22:33:44:55
InvalidComponent,
}
impl FromStr for MacAddr {
type Err = ParseMacAddrErr;
fn from_str(s: &str) -> Result<MacAddr, ParseMacAddrErr> {
let mut parts = [0u8; 6];
let splits = s.split(':');
let mut i = 0;
for split in splits {
if i == 6 {
return Err(ParseMacAddrErr::TooManyComponents);
}
match u8::from_str_radix(split, 16) {
Ok(b) if split.len()!= 0 => parts[i] = b,
_ => return Err(ParseMacAddrErr::InvalidComponent),
}
i += 1;
}
if i == 6 {
Ok(MacAddr(parts[0], parts[1], parts[2], parts[3], parts[4], parts[5]))
} else {
Err(ParseMacAddrErr::TooFewComponents)
}
}
}
#[test]
fn mac_addr_from_str() {
assert_eq!("00:00:00:00:00:00".parse(), Ok(MacAddr(0, 0, 0, 0, 0, 0)));
assert_eq!("ff:ff:ff:ff:ff:ff".parse(),
Ok(MacAddr(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)));
assert_eq!("12:34:56:78:90:ab".parse(),
Ok(MacAddr(0x12, 0x34, 0x56, 0x78, 0x90, 0xAB)));
assert_eq!("::::::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("0::::::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("::::0::".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooFewComponents));
assert_eq!("12:34:56:78:".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78:90".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooFewComponents));
assert_eq!("12:34:56:78:90:".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
assert_eq!("12:34:56:78:90:00:00".parse::<MacAddr>(),
Err(ParseMacAddrErr::TooManyComponents));
assert_eq!("xx:xx:xx:xx:xx:xx".parse::<MacAddr>(),
Err(ParseMacAddrErr::InvalidComponent));
}
/// Represents a network interface and its associated addresses
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct NetworkInterface {
/// The name of the interface
pub name: String,
/// The interface index (operating system specific)
pub index: u32,
/// A MAC address for the interface
pub mac: Option<MacAddr>,
/// An IP addresses for the interface
pub ips: Option<Vec<IpAddr>>,
/// Operating system specific flags for the interface
pub flags: u32,
}
impl NetworkInterface {
/// Retrieve the MAC address associated with the interface
pub fn mac_address(&self) -> MacAddr {
self.mac.unwrap()
}
/// Is the interface a loopback interface?
pub fn is_loopback(&self) -> bool {
self.flags & (libc::IFF_LOOPBACK as u32)!= 0
}
}
#[cfg(target_os = "linux")]
fn sockaddr_to_network_addr(sa: *const libc::sockaddr) -> (Option<MacAddr>, Option<IpAddr>) {
unsafe {
if sa.is_null() {
(None, None)
} else if (*sa).sa_family as libc::c_int == libc::AF_PACKET {
let sll: *const libc::sockaddr_ll = mem::transmute(sa);
let mac = MacAddr((*sll).sll_addr[0],
(*sll).sll_addr[1],
(*sll).sll_addr[2],
(*sll).sll_addr[3],
(*sll).sll_addr[4],
(*sll).sll_addr[5]);
(Some(mac), None)
} else {
let addr = internal::sockaddr_to_addr(mem::transmute(sa),
mem::size_of::<libc::sockaddr_storage>());
match addr {
Ok(sa) => (None, Some(sa.ip())),
Err(_) => (None, None),
}
}
}
}
#[cfg(any(target_os = "freebsd", target_os = "macos"))]
fn sockaddr_to_network_addr(sa: *const libc::sockaddr) -> (Option<MacAddr>, Option<IpAddr>) {
use bindings::bpf;
unsafe {
if sa.is_null() {
(None, None)
} else if (*sa).sa_family as libc::c_int == bpf::AF_LINK {
let sdl: *const bpf::sockaddr_dl = mem::transmute(sa);
let nlen = (*sdl).sdl_nlen as usize;
let mac = MacAddr((*sdl).sdl_data[nlen] as u8,
(*sdl).sdl_data[nlen + 1] as u8,
(*sdl).sdl_data[nlen + 2] as u8,
(*sdl).sdl_data[nlen + 3] as u8,
(*sdl).sdl_data[nlen + 4] as u8,
(*sdl).sdl_data[nlen + 5] as u8);
(Some(mac), None)
} else {
let addr = internal::sockaddr_to_addr(mem::transmute(sa),
mem::size_of::<libc::sockaddr_storage>());
match addr {
Ok(sa) => (None, Some(sa.ip())),
Err(_) => (None, None),
}
}
}
}
/// Get a list of available network interfaces for the current machine.
#[inline]
pub fn get_network_interfaces() -> Vec<NetworkInterface> {
get_network_interfaces_impl()
}
#[cfg(not(windows))]
fn get_network_interfaces_impl() -> Vec<NetworkInterface> {
use std::ffi::CString;
let mut ifaces: Vec<NetworkInterface> = Vec::new();
unsafe {
let mut addrs: *mut libc::ifaddrs = mem::uninitialized();
if libc::getifaddrs(&mut addrs)!= 0 {
return ifaces;
}
let mut addr = addrs;
while!addr.is_null() {
let c_str = (*addr).ifa_name as *const i8;
let bytes = CStr::from_ptr(c_str).to_bytes();
let name = from_utf8_unchecked(bytes).to_owned();
let (mac, ip) = sockaddr_to_network_addr((*addr).ifa_addr as *const libc::sockaddr);
let ni = NetworkInterface {
name: name.clone(),
index: 0,
mac: mac,
ips: ip.map(|ip| [ip].to_vec()),
flags: (*addr).ifa_flags,
};
let mut found: bool = false;
for iface in &mut ifaces {
if name == iface.name {
merge(iface, &ni);
found = true;
}
}
if!found {
ifaces.push(ni);
}
addr = (*addr).ifa_next;
}
libc::freeifaddrs(addrs);
for iface in &mut ifaces {
let name = CString::new(iface.name.as_bytes());
iface.index = libc::if_nametoindex(name.unwrap().as_ptr());
}
return ifaces;
}
fn merge(old: &mut NetworkInterface, new: &NetworkInterface) {
old.mac = match new.mac {
None => old.mac,
_ => new.mac,
};
match (&mut old.ips, &new.ips) {
(&mut Some(ref mut old_ips), &Some(ref new_ips)) => old_ips.push_all(&new_ips[..]),
(&mut ref mut old_ips @ None, &Some(ref new_ips)) => *old_ips = Some(new_ips.clone()),
_ => {}
};
old.flags = old.flags | new.flags;
}
}
#[cfg(windows)]
fn get_network_interfaces_impl() -> Vec<NetworkInterface> {
use bindings::winpcap;
let mut adapters_size = 0u32;
unsafe {
let mut tmp: winpcap::IP_ADAPTER_INFO = mem::zeroed();
// FIXME [windows] This only gets IPv4 addresses - should use
// GetAdaptersAddresses
winpcap::GetAdaptersInfo(&mut tmp, &mut adapters_size);
}
let vec_size = adapters_size / mem::size_of::<winpcap::IP_ADAPTER_INFO>() as u32;
let mut adapters = Vec::with_capacity(vec_size as usize);
// FIXME [windows] Check return code
unsafe {
winpcap::GetAdaptersInfo(adapters.as_mut_ptr(), &mut adapters_size);
}
// Create a complete list of NetworkInterfaces for the machine
let mut cursor = adapters.as_mut_ptr();
let mut all_ifaces = Vec::with_capacity(vec_size as usize);
while!cursor.is_null() {
let mac = unsafe {
MacAddr((*cursor).Address[0],
(*cursor).Address[1],
(*cursor).Address[2],
(*cursor).Address[3],
(*cursor).Address[4],
(*cursor).Address[5])
};
let mut ip_cursor = unsafe { &mut (*cursor).IpAddressList as winpcap::PIP_ADDR_STRING };
let mut ips: Vec<IpAddr> = Vec::new();
while!ip_cursor.is_null() {
let ip_str_ptr = unsafe { &(*ip_cursor) }.IpAddress.String.as_ptr() as *const i8;
let bytes = unsafe { CStr::from_ptr(ip_str_ptr).to_bytes() };
let ip_str = unsafe { from_utf8_unchecked(bytes).to_owned() };
ips.push(ip_str.parse().unwrap());
ip_cursor = unsafe { (*ip_cursor).Next };
}
unsafe {
let name_str_ptr = (*cursor).AdapterName.as_ptr() as *const i8;
let bytes = CStr::from_ptr(name_str_ptr).to_bytes();
let name_str = from_utf8_unchecked(bytes).to_owned();
all_ifaces.push(NetworkInterface {
name: name_str,
index: (*cursor).Index,
mac: Some(mac),
ips: Some(ips),
// flags: (*cursor).Type, // FIXME [windows]
flags: 0,
});
cursor = (*cursor).Next;
}
}
let mut buf = [0u8; 4096];
let mut buflen = buf.len() as u32;
// Gets list of supported adapters in form:
// adapter1\0adapter2\0\0desc1\0desc2\0\0
if unsafe { winpcap::PacketGetAdapterNames(buf.as_mut_ptr() as *mut i8, &mut buflen) } == 0 {
// FIXME [windows] Should allocate a buffer big enough and try again
// - size should be buf.len() + buflen (buflen is overwritten)
panic!("FIXME [windows] unable to get interface list");
}
let buf_str = unsafe { from_utf8_unchecked(&buf) };
let iface_names = buf_str.split("\0\0").next();
let mut vec = Vec::new();
// Return only supported adapters
match iface_names {
Some(iface_names) => {
for iface in iface_names.split('\0') {
let name = iface.to_owned();
let next = all_ifaces.iter().filter(|x| name[..].ends_with(&x.name[..])).next();
if next.is_some() {
let mut iface = next.unwrap().clone();
iface.name = name;
vec.push(iface);
}
}
}
None => (),
};
vec
}
| {
(self.0, self.1, self.2, self.3, self.4, self.5)
} | identifier_body |
forget_room.rs | //! `POST /_matrix/client/*/rooms/{roomId}/forget`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidforget
use ruma_common::{api::ruma_api, RoomId};
ruma_api! {
metadata: {
description: "Forget a room.",
method: POST,
name: "forget_room",
r0_path: "/_matrix/client/r0/rooms/:room_id/forget",
stable_path: "/_matrix/client/v3/rooms/:room_id/forget",
rate_limited: true,
authentication: AccessToken,
added: 1.0,
}
request: {
/// The room to forget.
#[ruma_api(path)]
pub room_id: &'a RoomId,
}
#[derive(Default)]
response: {}
error: crate::Error
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given room id.
pub fn new(room_id: &'a RoomId) -> Self {
Self { room_id }
}
}
impl Response {
/// Creates an empty `Response`.
pub fn | () -> Self {
Self {}
}
}
}
| new | identifier_name |
forget_room.rs | //! `POST /_matrix/client/*/rooms/{roomId}/forget`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidforget
use ruma_common::{api::ruma_api, RoomId};
ruma_api! {
metadata: {
description: "Forget a room.",
method: POST,
name: "forget_room",
r0_path: "/_matrix/client/r0/rooms/:room_id/forget",
stable_path: "/_matrix/client/v3/rooms/:room_id/forget",
rate_limited: true,
authentication: AccessToken,
added: 1.0,
}
request: {
/// The room to forget.
#[ruma_api(path)]
pub room_id: &'a RoomId,
}
#[derive(Default)]
response: {}
error: crate::Error
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given room id.
pub fn new(room_id: &'a RoomId) -> Self {
Self { room_id }
}
}
impl Response {
/// Creates an empty `Response`.
pub fn new() -> Self |
}
}
| {
Self {}
} | identifier_body |
forget_room.rs | //! `POST /_matrix/client/*/rooms/{roomId}/forget`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidforget
use ruma_common::{api::ruma_api, RoomId};
ruma_api! {
metadata: {
description: "Forget a room.",
method: POST,
name: "forget_room",
r0_path: "/_matrix/client/r0/rooms/:room_id/forget",
stable_path: "/_matrix/client/v3/rooms/:room_id/forget",
rate_limited: true,
authentication: AccessToken,
added: 1.0,
}
request: {
/// The room to forget.
#[ruma_api(path)]
pub room_id: &'a RoomId,
}
#[derive(Default)]
response: {}
error: crate::Error
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given room id.
pub fn new(room_id: &'a RoomId) -> Self {
Self { room_id }
}
}
impl Response {
/// Creates an empty `Response`.
pub fn new() -> Self {
Self {}
}
} | } | random_line_split | |
mod.rs | // SPDX-License-Identifier: MIT
// Copyright wtfsckgh@gmail.com
// Copyright iced contributors
use super::super::test_utils::*;
use super::super::*;
use alloc::vec::Vec;
use core::cmp::Ordering;
use core::u32;
mod br8_16;
mod br8_32;
mod br8_64;
mod call_16;
mod call_32;
mod call_64;
mod ip_rel_64;
mod jcc_16;
mod jcc_32;
mod jcc_64;
mod jmp_16;
mod jmp_32;
mod jmp_64;
mod misc;
mod xbegin_16;
mod xbegin_32;
mod xbegin_64;
const DECODER_OPTIONS: u32 = 0; // DecoderOptions
fn | (bitness: u32, rip: u64, data: &[u8], options: u32) -> Vec<Instruction> {
let mut decoder = create_decoder(bitness, data, options).0;
decoder.set_ip(rip);
decoder.into_iter().collect()
}
fn sort(mut vec: Vec<RelocInfo>) -> Vec<RelocInfo> {
vec.sort_unstable_by(|a, b| {
let c = a.address.cmp(&b.address);
if c!= Ordering::Equal {
c
} else {
a.kind.cmp(&b.kind)
}
});
vec
}
#[allow(clippy::too_many_arguments)]
fn encode_test(
bitness: u32, orig_rip: u64, original_data: &[u8], new_rip: u64, new_data: &[u8], mut options: u32, decoder_options: u32,
expected_instruction_offsets: &[u32], expected_reloc_infos: &[RelocInfo],
) {
let orig_instrs = decode(bitness, orig_rip, original_data, decoder_options);
options |=
BlockEncoderOptions::RETURN_RELOC_INFOS | BlockEncoderOptions::RETURN_NEW_INSTRUCTION_OFFSETS | BlockEncoderOptions::RETURN_CONSTANT_OFFSETS;
let result = BlockEncoder::encode(bitness, InstructionBlock::new(&orig_instrs, new_rip), options).unwrap();
let encoded_bytes = result.code_buffer;
assert_eq!(&encoded_bytes[..], new_data);
assert_eq!(result.rip, new_rip);
let reloc_infos = result.reloc_infos;
let new_instruction_offsets = result.new_instruction_offsets;
let constant_offsets = result.constant_offsets;
assert_eq!(new_instruction_offsets.len(), orig_instrs.len());
assert_eq!(constant_offsets.len(), orig_instrs.len());
assert_eq!(sort(reloc_infos), sort(expected_reloc_infos.to_vec()));
assert_eq!(&new_instruction_offsets[..], expected_instruction_offsets);
let mut expected_constant_offsets = Vec::with_capacity(constant_offsets.len());
let mut decoder = create_decoder(bitness, &encoded_bytes, decoder_options).0;
let mut instr = Instruction::default();
for &offset in &new_instruction_offsets {
if offset == u32::MAX {
expected_constant_offsets.push(ConstantOffsets::default());
} else {
decoder.try_set_position(offset as usize).unwrap();
decoder.set_ip(new_rip.wrapping_add(offset as u64));
decoder.decode_out(&mut instr);
expected_constant_offsets.push(decoder.get_constant_offsets(&instr));
}
}
assert_eq!(constant_offsets, expected_constant_offsets);
}
| decode | identifier_name |
mod.rs | // SPDX-License-Identifier: MIT
// Copyright wtfsckgh@gmail.com
// Copyright iced contributors
use super::super::test_utils::*;
use super::super::*;
use alloc::vec::Vec;
use core::cmp::Ordering;
use core::u32;
mod br8_16;
mod br8_32;
mod br8_64;
mod call_16;
mod call_32;
mod call_64;
mod ip_rel_64;
mod jcc_16;
mod jcc_32;
mod jcc_64;
mod jmp_16;
mod jmp_32;
mod jmp_64;
mod misc;
mod xbegin_16;
mod xbegin_32;
mod xbegin_64;
const DECODER_OPTIONS: u32 = 0; // DecoderOptions
fn decode(bitness: u32, rip: u64, data: &[u8], options: u32) -> Vec<Instruction> {
let mut decoder = create_decoder(bitness, data, options).0;
decoder.set_ip(rip);
decoder.into_iter().collect()
}
fn sort(mut vec: Vec<RelocInfo>) -> Vec<RelocInfo> {
vec.sort_unstable_by(|a, b| {
let c = a.address.cmp(&b.address);
if c!= Ordering::Equal {
c
} else {
a.kind.cmp(&b.kind)
}
});
vec
}
#[allow(clippy::too_many_arguments)]
fn encode_test(
bitness: u32, orig_rip: u64, original_data: &[u8], new_rip: u64, new_data: &[u8], mut options: u32, decoder_options: u32,
expected_instruction_offsets: &[u32], expected_reloc_infos: &[RelocInfo],
) {
let orig_instrs = decode(bitness, orig_rip, original_data, decoder_options);
options |=
BlockEncoderOptions::RETURN_RELOC_INFOS | BlockEncoderOptions::RETURN_NEW_INSTRUCTION_OFFSETS | BlockEncoderOptions::RETURN_CONSTANT_OFFSETS;
let result = BlockEncoder::encode(bitness, InstructionBlock::new(&orig_instrs, new_rip), options).unwrap();
let encoded_bytes = result.code_buffer;
assert_eq!(&encoded_bytes[..], new_data);
assert_eq!(result.rip, new_rip);
let reloc_infos = result.reloc_infos;
let new_instruction_offsets = result.new_instruction_offsets;
let constant_offsets = result.constant_offsets;
assert_eq!(new_instruction_offsets.len(), orig_instrs.len());
assert_eq!(constant_offsets.len(), orig_instrs.len());
assert_eq!(sort(reloc_infos), sort(expected_reloc_infos.to_vec()));
assert_eq!(&new_instruction_offsets[..], expected_instruction_offsets);
let mut expected_constant_offsets = Vec::with_capacity(constant_offsets.len());
let mut decoder = create_decoder(bitness, &encoded_bytes, decoder_options).0;
let mut instr = Instruction::default();
for &offset in &new_instruction_offsets {
if offset == u32::MAX {
expected_constant_offsets.push(ConstantOffsets::default());
} else {
decoder.try_set_position(offset as usize).unwrap(); | }
}
assert_eq!(constant_offsets, expected_constant_offsets);
} | decoder.set_ip(new_rip.wrapping_add(offset as u64));
decoder.decode_out(&mut instr);
expected_constant_offsets.push(decoder.get_constant_offsets(&instr)); | random_line_split |
mod.rs | // SPDX-License-Identifier: MIT
// Copyright wtfsckgh@gmail.com
// Copyright iced contributors
use super::super::test_utils::*;
use super::super::*;
use alloc::vec::Vec;
use core::cmp::Ordering;
use core::u32;
mod br8_16;
mod br8_32;
mod br8_64;
mod call_16;
mod call_32;
mod call_64;
mod ip_rel_64;
mod jcc_16;
mod jcc_32;
mod jcc_64;
mod jmp_16;
mod jmp_32;
mod jmp_64;
mod misc;
mod xbegin_16;
mod xbegin_32;
mod xbegin_64;
const DECODER_OPTIONS: u32 = 0; // DecoderOptions
fn decode(bitness: u32, rip: u64, data: &[u8], options: u32) -> Vec<Instruction> {
let mut decoder = create_decoder(bitness, data, options).0;
decoder.set_ip(rip);
decoder.into_iter().collect()
}
fn sort(mut vec: Vec<RelocInfo>) -> Vec<RelocInfo> |
#[allow(clippy::too_many_arguments)]
fn encode_test(
bitness: u32, orig_rip: u64, original_data: &[u8], new_rip: u64, new_data: &[u8], mut options: u32, decoder_options: u32,
expected_instruction_offsets: &[u32], expected_reloc_infos: &[RelocInfo],
) {
let orig_instrs = decode(bitness, orig_rip, original_data, decoder_options);
options |=
BlockEncoderOptions::RETURN_RELOC_INFOS | BlockEncoderOptions::RETURN_NEW_INSTRUCTION_OFFSETS | BlockEncoderOptions::RETURN_CONSTANT_OFFSETS;
let result = BlockEncoder::encode(bitness, InstructionBlock::new(&orig_instrs, new_rip), options).unwrap();
let encoded_bytes = result.code_buffer;
assert_eq!(&encoded_bytes[..], new_data);
assert_eq!(result.rip, new_rip);
let reloc_infos = result.reloc_infos;
let new_instruction_offsets = result.new_instruction_offsets;
let constant_offsets = result.constant_offsets;
assert_eq!(new_instruction_offsets.len(), orig_instrs.len());
assert_eq!(constant_offsets.len(), orig_instrs.len());
assert_eq!(sort(reloc_infos), sort(expected_reloc_infos.to_vec()));
assert_eq!(&new_instruction_offsets[..], expected_instruction_offsets);
let mut expected_constant_offsets = Vec::with_capacity(constant_offsets.len());
let mut decoder = create_decoder(bitness, &encoded_bytes, decoder_options).0;
let mut instr = Instruction::default();
for &offset in &new_instruction_offsets {
if offset == u32::MAX {
expected_constant_offsets.push(ConstantOffsets::default());
} else {
decoder.try_set_position(offset as usize).unwrap();
decoder.set_ip(new_rip.wrapping_add(offset as u64));
decoder.decode_out(&mut instr);
expected_constant_offsets.push(decoder.get_constant_offsets(&instr));
}
}
assert_eq!(constant_offsets, expected_constant_offsets);
}
| {
vec.sort_unstable_by(|a, b| {
let c = a.address.cmp(&b.address);
if c != Ordering::Equal {
c
} else {
a.kind.cmp(&b.kind)
}
});
vec
} | identifier_body |
mod.rs | // SPDX-License-Identifier: MIT
// Copyright wtfsckgh@gmail.com
// Copyright iced contributors
use super::super::test_utils::*;
use super::super::*;
use alloc::vec::Vec;
use core::cmp::Ordering;
use core::u32;
mod br8_16;
mod br8_32;
mod br8_64;
mod call_16;
mod call_32;
mod call_64;
mod ip_rel_64;
mod jcc_16;
mod jcc_32;
mod jcc_64;
mod jmp_16;
mod jmp_32;
mod jmp_64;
mod misc;
mod xbegin_16;
mod xbegin_32;
mod xbegin_64;
const DECODER_OPTIONS: u32 = 0; // DecoderOptions
fn decode(bitness: u32, rip: u64, data: &[u8], options: u32) -> Vec<Instruction> {
let mut decoder = create_decoder(bitness, data, options).0;
decoder.set_ip(rip);
decoder.into_iter().collect()
}
fn sort(mut vec: Vec<RelocInfo>) -> Vec<RelocInfo> {
vec.sort_unstable_by(|a, b| {
let c = a.address.cmp(&b.address);
if c!= Ordering::Equal | else {
a.kind.cmp(&b.kind)
}
});
vec
}
#[allow(clippy::too_many_arguments)]
fn encode_test(
bitness: u32, orig_rip: u64, original_data: &[u8], new_rip: u64, new_data: &[u8], mut options: u32, decoder_options: u32,
expected_instruction_offsets: &[u32], expected_reloc_infos: &[RelocInfo],
) {
let orig_instrs = decode(bitness, orig_rip, original_data, decoder_options);
options |=
BlockEncoderOptions::RETURN_RELOC_INFOS | BlockEncoderOptions::RETURN_NEW_INSTRUCTION_OFFSETS | BlockEncoderOptions::RETURN_CONSTANT_OFFSETS;
let result = BlockEncoder::encode(bitness, InstructionBlock::new(&orig_instrs, new_rip), options).unwrap();
let encoded_bytes = result.code_buffer;
assert_eq!(&encoded_bytes[..], new_data);
assert_eq!(result.rip, new_rip);
let reloc_infos = result.reloc_infos;
let new_instruction_offsets = result.new_instruction_offsets;
let constant_offsets = result.constant_offsets;
assert_eq!(new_instruction_offsets.len(), orig_instrs.len());
assert_eq!(constant_offsets.len(), orig_instrs.len());
assert_eq!(sort(reloc_infos), sort(expected_reloc_infos.to_vec()));
assert_eq!(&new_instruction_offsets[..], expected_instruction_offsets);
let mut expected_constant_offsets = Vec::with_capacity(constant_offsets.len());
let mut decoder = create_decoder(bitness, &encoded_bytes, decoder_options).0;
let mut instr = Instruction::default();
for &offset in &new_instruction_offsets {
if offset == u32::MAX {
expected_constant_offsets.push(ConstantOffsets::default());
} else {
decoder.try_set_position(offset as usize).unwrap();
decoder.set_ip(new_rip.wrapping_add(offset as u64));
decoder.decode_out(&mut instr);
expected_constant_offsets.push(decoder.get_constant_offsets(&instr));
}
}
assert_eq!(constant_offsets, expected_constant_offsets);
}
| {
c
} | conditional_block |
macro_crate_test.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![feature(globs, plugin_registrar, macro_rules, quote)]
extern crate syntax;
extern crate rustc;
use syntax::ast::{TokenTree, Item, MetaItem};
use syntax::codemap::Span;
use syntax::ext::base::*;
use syntax::parse::token;
use syntax::parse;
use syntax::ptr::P;
use rustc::plugin::Registry;
#[macro_export]
macro_rules! exported_macro (() => (2i))
macro_rules! unexported_macro (() => (3i))
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("make_a_1", expand_make_a_1);
reg.register_macro("forged_ident", expand_forged_ident);
reg.register_macro("identity", expand_identity);
reg.register_syntax_extension(
token::intern("into_foo"),
Modifier(box expand_into_foo));
}
fn expand_make_a_1(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
if!tts.is_empty() {
cx.span_fatal(sp, "make_a_1 takes no arguments");
}
MacExpr::new(quote_expr!(cx, 1i))
}
// See Issue #15750
fn expand_identity(cx: &mut ExtCtxt, _span: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
// Parse an expression and emit it unchanged.
let mut parser = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(), tts.to_vec());
let expr = parser.parse_expr();
MacExpr::new(quote_expr!(&mut *cx, $expr))
}
fn expand_into_foo(cx: &mut ExtCtxt, sp: Span, attr: &MetaItem, it: P<Item>)
-> P<Item> {
P(Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo { Bar, Baz }).unwrap()).clone()
})
}
fn expand_forged_ident(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<MacResult+'static> {
use syntax::ext::quote::rt::*;
if!tts.is_empty() |
// Most of this is modelled after the expansion of the `quote_expr!`
// macro...
let parse_sess = cx.parse_sess();
let cfg = cx.cfg();
//... except this is where we inject a forged identifier,
// and deliberately do not call `cx.parse_tts_with_hygiene`
// (because we are testing that this will be *rejected*
// by the default parser).
let expr = {
let tt = cx.parse_tts("\x00name_2,ctxt_0\x00".to_string());
let mut parser = new_parser_from_tts(parse_sess, cfg, tt);
parser.parse_expr()
};
MacExpr::new(expr)
}
pub fn foo() {}
| {
cx.span_fatal(sp, "forged_ident takes no arguments");
} | conditional_block |
macro_crate_test.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![feature(globs, plugin_registrar, macro_rules, quote)]
extern crate syntax;
extern crate rustc;
use syntax::ast::{TokenTree, Item, MetaItem};
use syntax::codemap::Span;
use syntax::ext::base::*;
use syntax::parse::token;
use syntax::parse;
use syntax::ptr::P;
use rustc::plugin::Registry;
#[macro_export]
macro_rules! exported_macro (() => (2i))
macro_rules! unexported_macro (() => (3i))
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("make_a_1", expand_make_a_1);
reg.register_macro("forged_ident", expand_forged_ident);
reg.register_macro("identity", expand_identity);
reg.register_syntax_extension(
token::intern("into_foo"),
Modifier(box expand_into_foo));
}
fn expand_make_a_1(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree])
-> Box<MacResult+'static> { | }
// See Issue #15750
fn expand_identity(cx: &mut ExtCtxt, _span: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
// Parse an expression and emit it unchanged.
let mut parser = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(), tts.to_vec());
let expr = parser.parse_expr();
MacExpr::new(quote_expr!(&mut *cx, $expr))
}
fn expand_into_foo(cx: &mut ExtCtxt, sp: Span, attr: &MetaItem, it: P<Item>)
-> P<Item> {
P(Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo { Bar, Baz }).unwrap()).clone()
})
}
fn expand_forged_ident(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<MacResult+'static> {
use syntax::ext::quote::rt::*;
if!tts.is_empty() {
cx.span_fatal(sp, "forged_ident takes no arguments");
}
// Most of this is modelled after the expansion of the `quote_expr!`
// macro...
let parse_sess = cx.parse_sess();
let cfg = cx.cfg();
//... except this is where we inject a forged identifier,
// and deliberately do not call `cx.parse_tts_with_hygiene`
// (because we are testing that this will be *rejected*
// by the default parser).
let expr = {
let tt = cx.parse_tts("\x00name_2,ctxt_0\x00".to_string());
let mut parser = new_parser_from_tts(parse_sess, cfg, tt);
parser.parse_expr()
};
MacExpr::new(expr)
}
pub fn foo() {} | if !tts.is_empty() {
cx.span_fatal(sp, "make_a_1 takes no arguments");
}
MacExpr::new(quote_expr!(cx, 1i)) | random_line_split |
macro_crate_test.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![feature(globs, plugin_registrar, macro_rules, quote)]
extern crate syntax;
extern crate rustc;
use syntax::ast::{TokenTree, Item, MetaItem};
use syntax::codemap::Span;
use syntax::ext::base::*;
use syntax::parse::token;
use syntax::parse;
use syntax::ptr::P;
use rustc::plugin::Registry;
#[macro_export]
macro_rules! exported_macro (() => (2i))
macro_rules! unexported_macro (() => (3i))
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("make_a_1", expand_make_a_1);
reg.register_macro("forged_ident", expand_forged_ident);
reg.register_macro("identity", expand_identity);
reg.register_syntax_extension(
token::intern("into_foo"),
Modifier(box expand_into_foo));
}
fn expand_make_a_1(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
if!tts.is_empty() {
cx.span_fatal(sp, "make_a_1 takes no arguments");
}
MacExpr::new(quote_expr!(cx, 1i))
}
// See Issue #15750
fn expand_identity(cx: &mut ExtCtxt, _span: Span, tts: &[TokenTree])
-> Box<MacResult+'static> |
fn expand_into_foo(cx: &mut ExtCtxt, sp: Span, attr: &MetaItem, it: P<Item>)
-> P<Item> {
P(Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo { Bar, Baz }).unwrap()).clone()
})
}
fn expand_forged_ident(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<MacResult+'static> {
use syntax::ext::quote::rt::*;
if!tts.is_empty() {
cx.span_fatal(sp, "forged_ident takes no arguments");
}
// Most of this is modelled after the expansion of the `quote_expr!`
// macro...
let parse_sess = cx.parse_sess();
let cfg = cx.cfg();
//... except this is where we inject a forged identifier,
// and deliberately do not call `cx.parse_tts_with_hygiene`
// (because we are testing that this will be *rejected*
// by the default parser).
let expr = {
let tt = cx.parse_tts("\x00name_2,ctxt_0\x00".to_string());
let mut parser = new_parser_from_tts(parse_sess, cfg, tt);
parser.parse_expr()
};
MacExpr::new(expr)
}
pub fn foo() {}
| {
// Parse an expression and emit it unchanged.
let mut parser = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(), tts.to_vec());
let expr = parser.parse_expr();
MacExpr::new(quote_expr!(&mut *cx, $expr))
} | identifier_body |
macro_crate_test.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![feature(globs, plugin_registrar, macro_rules, quote)]
extern crate syntax;
extern crate rustc;
use syntax::ast::{TokenTree, Item, MetaItem};
use syntax::codemap::Span;
use syntax::ext::base::*;
use syntax::parse::token;
use syntax::parse;
use syntax::ptr::P;
use rustc::plugin::Registry;
#[macro_export]
macro_rules! exported_macro (() => (2i))
macro_rules! unexported_macro (() => (3i))
#[plugin_registrar]
pub fn | (reg: &mut Registry) {
reg.register_macro("make_a_1", expand_make_a_1);
reg.register_macro("forged_ident", expand_forged_ident);
reg.register_macro("identity", expand_identity);
reg.register_syntax_extension(
token::intern("into_foo"),
Modifier(box expand_into_foo));
}
fn expand_make_a_1(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
if!tts.is_empty() {
cx.span_fatal(sp, "make_a_1 takes no arguments");
}
MacExpr::new(quote_expr!(cx, 1i))
}
// See Issue #15750
fn expand_identity(cx: &mut ExtCtxt, _span: Span, tts: &[TokenTree])
-> Box<MacResult+'static> {
// Parse an expression and emit it unchanged.
let mut parser = parse::new_parser_from_tts(cx.parse_sess(),
cx.cfg(), tts.to_vec());
let expr = parser.parse_expr();
MacExpr::new(quote_expr!(&mut *cx, $expr))
}
fn expand_into_foo(cx: &mut ExtCtxt, sp: Span, attr: &MetaItem, it: P<Item>)
-> P<Item> {
P(Item {
attrs: it.attrs.clone(),
..(*quote_item!(cx, enum Foo { Bar, Baz }).unwrap()).clone()
})
}
fn expand_forged_ident(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<MacResult+'static> {
use syntax::ext::quote::rt::*;
if!tts.is_empty() {
cx.span_fatal(sp, "forged_ident takes no arguments");
}
// Most of this is modelled after the expansion of the `quote_expr!`
// macro...
let parse_sess = cx.parse_sess();
let cfg = cx.cfg();
//... except this is where we inject a forged identifier,
// and deliberately do not call `cx.parse_tts_with_hygiene`
// (because we are testing that this will be *rejected*
// by the default parser).
let expr = {
let tt = cx.parse_tts("\x00name_2,ctxt_0\x00".to_string());
let mut parser = new_parser_from_tts(parse_sess, cfg, tt);
parser.parse_expr()
};
MacExpr::new(expr)
}
pub fn foo() {}
| plugin_registrar | identifier_name |
shadows.rs | /*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ip.rsh"
#pragma rs_fp_relaxed
static double shadowFilterMap[] = {
-0.00591, 0.0001,
1.16488, 0.01668,
-0.18027, -0.06791,
-0.12625, 0.09001,
0.15065, -0.03897
};
static double poly[] = {
0., 0.,
0., 0.,
0.
};
static const int ABITS = 4;
static const int HSCALE = 256;
static const int k1=255 << ABITS;
static const int k2=HSCALE << ABITS;
static double fastevalPoly(double *poly,int n, double x){
double f =x;
double sum = poly[0]+poly[1]*f;
int i;
for (i = 2; i < n; i++) {
f*=x;
sum += poly[i]*f;
}
return sum;
}
static ushort3 rgb2hsv( uchar4 rgb)
{
int iMin,iMax,chroma;
int ri = rgb.r;
int gi = rgb.g;
int bi = rgb.b;
short rv,rs,rh;
if (ri > gi) {
iMax = max (ri, bi);
iMin = min (gi, bi);
} else {
iMax = max (gi, bi);
iMin = min (ri, bi);
}
chroma = iMax - iMin;
// set value
rv = (short)( iMax << ABITS);
// set saturation
if (rv == 0)
rs = 0;
else
rs = (short)((k1*chroma)/iMax);
// set hue
if (rs == 0)
rh = 0;
else {
if ( ri == iMax ) {
rh = (short)( (k2*(6*chroma+gi - bi))/(6*chroma));
if (rh >= k2) rh -= k2;
} else if (gi == iMax)
rh = (short)( (k2*(2*chroma+bi - ri ))/(6*chroma));
else // (bi == iMax )
rh = (short)( (k2*(4*chroma+ri - gi ))/(6*chroma));
}
ushort3 out;
out.x = rv;
out.y = rs;
out.z = rh;
return out;
}
static uchar4 hsv2rgb(ushort3 hsv)
{
int ABITS = 4;
int HSCALE = 256;
int m;
int H,X,ih,is,iv;
int k1=255<<ABITS;
int k2=HSCALE<<ABITS;
int k3=1<<(ABITS-1);
int rr=0;
int rg=0;
int rb=0;
short cv = hsv.x;
short cs = hsv.y;
short ch = hsv.z;
// set chroma and min component value m
//chroma = ( cv * cs )/k1;
//m = cv - chroma;
m = ((int)cv*(k1 - (int)cs ))/k1;
// chroma == 0 <-> cs == 0 --> m=cv
if (cs == 0) {
rb = ( rg = ( rr =( cv >> ABITS) ));
} else {
ih=(int)ch;
is=(int)cs;
iv=(int)cv;
H = (6*ih)/k2;
X = ((iv*is)/k2)*(k2- abs(6*ih- 2*(H>>1)*k2 - k2)) ;
// removing additional bits --> unit8
X=( (X+iv*(k1 - is ))/k1 + k3 ) >> ABITS;
m=m >> ABITS;
// ( chroma + m ) --> cv ;
cv=(short) (cv >> ABITS);
switch (H) {
case 0:
rr = cv;
rg = X;
rb = m;
break;
case 1:
rr = X;
rg = cv;
rb = m;
break;
case 2:
rr = m;
rg = cv;
rb = X;
break;
case 3:
rr = m;
rg = X;
rb = cv;
break;
case 4:
rr = X;
rg = m;
rb = cv;
break;
case 5:
rr = cv;
rg = m ;
rb = X;
break;
}
}
uchar4 rgb;
rgb.r = rr;
rgb.g = rg;
rgb.b = rb;
return rgb;
}
void prepareShadows(float scale) {
double s = (scale>=0)?scale:scale/5;
for (int i = 0; i < 5; i++) {
poly[i] = fastevalPoly(shadowFilterMap+i*2,2, s); | }
void shadowsKernel(const uchar4 *in, uchar4 *out) {
ushort3 hsv = rgb2hsv(*in);
double v = (fastevalPoly(poly,5,hsv.x/4080.)*4080);
if (v>4080) v = 4080;
hsv.x = (unsigned short) ((v>0)?v:0);
*out = hsv2rgb(hsv);
} | } | random_line_split |
shadows.rs | /*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ip.rsh"
#pragma rs_fp_relaxed
static double shadowFilterMap[] = {
-0.00591, 0.0001,
1.16488, 0.01668,
-0.18027, -0.06791,
-0.12625, 0.09001,
0.15065, -0.03897
};
static double poly[] = {
0., 0.,
0., 0.,
0.
};
static const int ABITS = 4;
static const int HSCALE = 256;
static const int k1=255 << ABITS;
static const int k2=HSCALE << ABITS;
static double fastevalPoly(double *poly,int n, double x){
double f =x;
double sum = poly[0]+poly[1]*f;
int i;
for (i = 2; i < n; i++) {
f*=x;
sum += poly[i]*f;
}
return sum;
}
static ushort3 rgb2hsv( uchar4 rgb)
{
int iMin,iMax,chroma;
int ri = rgb.r;
int gi = rgb.g;
int bi = rgb.b;
short rv,rs,rh;
if (ri > gi) {
iMax = max (ri, bi);
iMin = min (gi, bi);
} else {
iMax = max (gi, bi);
iMin = min (ri, bi);
}
chroma = iMax - iMin;
// set value
rv = (short)( iMax << ABITS);
// set saturation
if (rv == 0)
rs = 0;
else
rs = (short)((k1*chroma)/iMax);
// set hue
if (rs == 0)
rh = 0;
else {
if ( ri == iMax ) {
rh = (short)( (k2*(6*chroma+gi - bi))/(6*chroma));
if (rh >= k2) rh -= k2;
} else if (gi == iMax)
rh = (short)( (k2*(2*chroma+bi - ri ))/(6*chroma));
else // (bi == iMax )
rh = (short)( (k2*(4*chroma+ri - gi ))/(6*chroma));
}
ushort3 out;
out.x = rv;
out.y = rs;
out.z = rh;
return out;
}
static uchar4 hsv2rgb(ushort3 hsv)
{
int ABITS = 4;
int HSCALE = 256;
int m;
int H,X,ih,is,iv;
int k1=255<<ABITS;
int k2=HSCALE<<ABITS;
int k3=1<<(ABITS-1);
int rr=0;
int rg=0;
int rb=0;
short cv = hsv.x;
short cs = hsv.y;
short ch = hsv.z;
// set chroma and min component value m
//chroma = ( cv * cs )/k1;
//m = cv - chroma;
m = ((int)cv*(k1 - (int)cs ))/k1;
// chroma == 0 <-> cs == 0 --> m=cv
if (cs == 0) {
rb = ( rg = ( rr =( cv >> ABITS) ));
} else | case 1:
rr = X;
rg = cv;
rb = m;
break;
case 2:
rr = m;
rg = cv;
rb = X;
break;
case 3:
rr = m;
rg = X;
rb = cv;
break;
case 4:
rr = X;
rg = m;
rb = cv;
break;
case 5:
rr = cv;
rg = m ;
rb = X;
break;
}
}
uchar4 rgb;
rgb.r = rr;
rgb.g = rg;
rgb.b = rb;
return rgb;
}
void prepareShadows(float scale) {
double s = (scale>=0)?scale:scale/5;
for (int i = 0; i < 5; i++) {
poly[i] = fastevalPoly(shadowFilterMap+i*2,2, s);
}
}
void shadowsKernel(const uchar4 *in, uchar4 *out) {
ushort3 hsv = rgb2hsv(*in);
double v = (fastevalPoly(poly,5,hsv.x/4080.)*4080);
if (v>4080) v = 4080;
hsv.x = (unsigned short) ((v>0)?v:0);
*out = hsv2rgb(hsv);
}
| {
ih=(int)ch;
is=(int)cs;
iv=(int)cv;
H = (6*ih)/k2;
X = ((iv*is)/k2)*(k2- abs(6*ih- 2*(H>>1)*k2 - k2)) ;
// removing additional bits --> unit8
X=( (X+iv*(k1 - is ))/k1 + k3 ) >> ABITS;
m=m >> ABITS;
// ( chroma + m ) --> cv ;
cv=(short) (cv >> ABITS);
switch (H) {
case 0:
rr = cv;
rg = X;
rb = m;
break; | conditional_block |
main.rs | #![feature(iter_arith)]
use std::io::Read;
use std::fs::File;
fn main() | },
_ => {
index += 1;
chars_in_memory += 1;
}
}
},
_ => {
chars_in_memory += 1;
index += 1;
}
}
}
char_num - chars_in_memory
}).sum();
println!("The answer is {} :D", answer);
}
| {
let mut f = File::open("../input.txt").unwrap();
let mut s = String::new();
f.read_to_string(&mut s).ok();
let s = s;
let answer: i32 = s.lines().map(|x| {
let char_num = x.len() as i32;
let mut chars_in_memory = 0;
let mut index = 1;
let chars = x.chars().collect::<Vec<_>>();
while index < x.len() - 1 {
match chars[index] {
'\\' => {
index += 1;
match chars[index] {
'x' => {
index += 3;
chars_in_memory += 1; | identifier_body |
main.rs | #![feature(iter_arith)]
use std::io::Read;
use std::fs::File;
fn | () {
let mut f = File::open("../input.txt").unwrap();
let mut s = String::new();
f.read_to_string(&mut s).ok();
let s = s;
let answer: i32 = s.lines().map(|x| {
let char_num = x.len() as i32;
let mut chars_in_memory = 0;
let mut index = 1;
let chars = x.chars().collect::<Vec<_>>();
while index < x.len() - 1 {
match chars[index] {
'\\' => {
index += 1;
match chars[index] {
'x' => {
index += 3;
chars_in_memory += 1;
},
_ => {
index += 1;
chars_in_memory += 1;
}
}
},
_ => {
chars_in_memory += 1;
index += 1;
}
}
}
char_num - chars_in_memory
}).sum();
println!("The answer is {} :D", answer);
}
| main | identifier_name |
main.rs | #![feature(iter_arith)]
use std::io::Read;
use std::fs::File;
fn main() {
let mut f = File::open("../input.txt").unwrap();
let mut s = String::new();
f.read_to_string(&mut s).ok();
let s = s;
let answer: i32 = s.lines().map(|x| {
let char_num = x.len() as i32;
let mut chars_in_memory = 0;
let mut index = 1;
let chars = x.chars().collect::<Vec<_>>();
while index < x.len() - 1 {
match chars[index] {
'\\' => { | index += 3;
chars_in_memory += 1;
},
_ => {
index += 1;
chars_in_memory += 1;
}
}
},
_ => {
chars_in_memory += 1;
index += 1;
}
}
}
char_num - chars_in_memory
}).sum();
println!("The answer is {} :D", answer);
} | index += 1;
match chars[index] {
'x' => { | random_line_split |
convert_seconds_to_compound_duration.rs | // http://rosettacode.org/wiki/Convert_seconds_to_compound_duration
fn seconds_to_compound(secs: u32) -> String {
let part = |comps: &mut String, c: &str, one: u32, secs: &mut u32| {
if *secs >= one |
};
let mut secs = secs;
let mut comps = String::new();
part(&mut comps, " wk", 60 * 60 * 24 * 7, &mut secs);
part(&mut comps, " d", 60 * 60 * 24, &mut secs);
part(&mut comps, " hr", 60 * 60, &mut secs);
part(&mut comps, " min", 60, &mut secs);
part(&mut comps, " sec", 1, &mut secs);
comps
}
#[test]
fn hours_and_seconds() {
assert_eq!(seconds_to_compound(7259), "2 hr, 59 sec");
}
#[test]
fn one_day() {
assert_eq!(seconds_to_compound(86400), "1 d");
}
#[test]
fn six_million_seconds() {
assert_eq!(seconds_to_compound(6000000), "9 wk, 6 d, 10 hr, 40 min");
}
fn main() {
println!("7,259 seconds = {}", seconds_to_compound(7259));
println!("86,400 seconds = {}", seconds_to_compound(86400));
println!("6,000,000 seconds = {}", seconds_to_compound(6000000));
}
| {
let div = *secs / one;
comps.push_str(&(div.to_string() + c));
*secs -= one * div;
if *secs > 0 {
comps.push_str(", ");
}
} | conditional_block |
convert_seconds_to_compound_duration.rs | // http://rosettacode.org/wiki/Convert_seconds_to_compound_duration
fn seconds_to_compound(secs: u32) -> String {
let part = |comps: &mut String, c: &str, one: u32, secs: &mut u32| {
if *secs >= one {
let div = *secs / one;
comps.push_str(&(div.to_string() + c));
*secs -= one * div;
if *secs > 0 {
comps.push_str(", ");
}
}
};
let mut secs = secs;
let mut comps = String::new();
part(&mut comps, " wk", 60 * 60 * 24 * 7, &mut secs);
part(&mut comps, " d", 60 * 60 * 24, &mut secs);
part(&mut comps, " hr", 60 * 60, &mut secs);
part(&mut comps, " min", 60, &mut secs);
part(&mut comps, " sec", 1, &mut secs);
comps
}
#[test]
fn hours_and_seconds() {
assert_eq!(seconds_to_compound(7259), "2 hr, 59 sec");
}
#[test]
fn one_day() {
assert_eq!(seconds_to_compound(86400), "1 d");
}
#[test]
fn six_million_seconds() {
assert_eq!(seconds_to_compound(6000000), "9 wk, 6 d, 10 hr, 40 min");
}
fn main() | {
println!("7,259 seconds = {}", seconds_to_compound(7259));
println!("86,400 seconds = {}", seconds_to_compound(86400));
println!("6,000,000 seconds = {}", seconds_to_compound(6000000));
} | identifier_body | |
convert_seconds_to_compound_duration.rs | // http://rosettacode.org/wiki/Convert_seconds_to_compound_duration
fn seconds_to_compound(secs: u32) -> String {
let part = |comps: &mut String, c: &str, one: u32, secs: &mut u32| {
if *secs >= one {
let div = *secs / one;
comps.push_str(&(div.to_string() + c));
*secs -= one * div;
if *secs > 0 {
comps.push_str(", ");
}
}
};
let mut secs = secs;
let mut comps = String::new();
part(&mut comps, " wk", 60 * 60 * 24 * 7, &mut secs);
part(&mut comps, " d", 60 * 60 * 24, &mut secs);
part(&mut comps, " hr", 60 * 60, &mut secs);
part(&mut comps, " min", 60, &mut secs);
part(&mut comps, " sec", 1, &mut secs);
comps
}
#[test]
fn | () {
assert_eq!(seconds_to_compound(7259), "2 hr, 59 sec");
}
#[test]
fn one_day() {
assert_eq!(seconds_to_compound(86400), "1 d");
}
#[test]
fn six_million_seconds() {
assert_eq!(seconds_to_compound(6000000), "9 wk, 6 d, 10 hr, 40 min");
}
fn main() {
println!("7,259 seconds = {}", seconds_to_compound(7259));
println!("86,400 seconds = {}", seconds_to_compound(86400));
println!("6,000,000 seconds = {}", seconds_to_compound(6000000));
}
| hours_and_seconds | identifier_name |
convert_seconds_to_compound_duration.rs | // http://rosettacode.org/wiki/Convert_seconds_to_compound_duration
fn seconds_to_compound(secs: u32) -> String {
let part = |comps: &mut String, c: &str, one: u32, secs: &mut u32| { | let div = *secs / one;
comps.push_str(&(div.to_string() + c));
*secs -= one * div;
if *secs > 0 {
comps.push_str(", ");
}
}
};
let mut secs = secs;
let mut comps = String::new();
part(&mut comps, " wk", 60 * 60 * 24 * 7, &mut secs);
part(&mut comps, " d", 60 * 60 * 24, &mut secs);
part(&mut comps, " hr", 60 * 60, &mut secs);
part(&mut comps, " min", 60, &mut secs);
part(&mut comps, " sec", 1, &mut secs);
comps
}
#[test]
fn hours_and_seconds() {
assert_eq!(seconds_to_compound(7259), "2 hr, 59 sec");
}
#[test]
fn one_day() {
assert_eq!(seconds_to_compound(86400), "1 d");
}
#[test]
fn six_million_seconds() {
assert_eq!(seconds_to_compound(6000000), "9 wk, 6 d, 10 hr, 40 min");
}
fn main() {
println!("7,259 seconds = {}", seconds_to_compound(7259));
println!("86,400 seconds = {}", seconds_to_compound(86400));
println!("6,000,000 seconds = {}", seconds_to_compound(6000000));
} | if *secs >= one { | random_line_split |
pm.rs | use core::marker::PhantomData;
use volatile::*;
#[repr(C, packed)]
pub struct PowerManager {
pub control: ReadWrite<u8>, // Offset: 0x00 (R/W 8) Control
pub sleep: ReadWrite<u8>, // Offset: 0x01 (R/W 8) Sleep Mode
pub external_control: ReadWrite<u8>, // Offset: 0x02 (R/W 8) External Reset Controller
reserved_1: [u8; 5],
pub cpu_select: ReadWrite<u8>, // Offset: 0x08 (R/W 8) CPU Clock Select
pub apba_select: ReadWrite<u8>, // Offset: 0x09 (R/W 8) APBA Clock Select
pub apbb_select: ReadWrite<u8>, // Offset: 0x0A (R/W 8) APBB Clock Select
pub apbc_select: ReadWrite<u8>, // Offset: 0x0B (R/W 8) APBC Clock Select
reserved_2: [u8; 8],
pub ahb_mask: ReadWrite<u32>, // Offset: 0x14 (R/W 32) AHB Mask
pub apba_mask: ReadWrite<u32>, // Offset: 0x18 (R/W 32) APBA Mask
pub apbb_mask: ReadWrite<u32>, // Offset: 0x1C (R/W 32) APBB Mask
pub apbc_mask: ReadWrite<u32>, // Offset: 0x20 (R/W 32) APBC Mask
reserved_3: [u8; 10],
pub interrupt_clear: ReadWrite<u8>, // Offset: 0x34 (R/W 8) Interrupt Enable Clear
pub interrupt_set: ReadWrite<u8>, // Offset: 0x35 (R/W 8) Interrupt Enable Set
pub interrupt_flag: ReadWrite<u8>, // Offset: 0x36 (R/W 8) Interrupt Flag Status and Clear
reserved_4: [u8; 1],
pub reset_cause: ReadOnly<u8>, // Offset: 0x38 (R/ 8) Reset Cause
private: PhantomData<()>, // This type cannot be constructed.
}
pub const PM_APBAMASK_GCLK: u32 = 1 << 3;
impl PowerManager {
pub unsafe fn set_cpu_and_bus_blocks(&mut self) {
// Now that all system clocks are configured, we can set CPU and APBx BUS clocks.
// There[sic] values are normally the one present after Reset.
let PM_CPUSEL_CPUDIV_POS = 0; // (PM_CPUSEL) CPU Prescaler Selection
let PM_CPUSEL_CPUDIV_DIV1_VAL = 0x0; // (PM_CPUSEL) Divide by 1
let PM_CPUSEL_CPUDIV_DIV1 = PM_CPUSEL_CPUDIV_DIV1_VAL << PM_CPUSEL_CPUDIV_POS;
let PM_APBASEL_APBADIV_DIV1_VAL = 0x0; // (PM_APBASEL) Divide by 1
let PM_APBBSEL_APBBDIV_DIV1_VAL = 0x0; // (PM_APBBSEL) Divide by 1
let PM_APBCSEL_APBCDIV_DIV1_VAL = 0x0; // (PM_APBCSEL) Divide by 1
self.cpu_select.write(PM_CPUSEL_CPUDIV_DIV1);
self.apba_select.write(PM_APBASEL_APBADIV_DIV1_VAL);
self.apbb_select.write(PM_APBBSEL_APBBDIV_DIV1_VAL);
self.apbc_select.write(PM_APBCSEL_APBCDIV_DIV1_VAL); | }
} | random_line_split | |
pm.rs | use core::marker::PhantomData;
use volatile::*;
#[repr(C, packed)]
pub struct | {
pub control: ReadWrite<u8>, // Offset: 0x00 (R/W 8) Control
pub sleep: ReadWrite<u8>, // Offset: 0x01 (R/W 8) Sleep Mode
pub external_control: ReadWrite<u8>, // Offset: 0x02 (R/W 8) External Reset Controller
reserved_1: [u8; 5],
pub cpu_select: ReadWrite<u8>, // Offset: 0x08 (R/W 8) CPU Clock Select
pub apba_select: ReadWrite<u8>, // Offset: 0x09 (R/W 8) APBA Clock Select
pub apbb_select: ReadWrite<u8>, // Offset: 0x0A (R/W 8) APBB Clock Select
pub apbc_select: ReadWrite<u8>, // Offset: 0x0B (R/W 8) APBC Clock Select
reserved_2: [u8; 8],
pub ahb_mask: ReadWrite<u32>, // Offset: 0x14 (R/W 32) AHB Mask
pub apba_mask: ReadWrite<u32>, // Offset: 0x18 (R/W 32) APBA Mask
pub apbb_mask: ReadWrite<u32>, // Offset: 0x1C (R/W 32) APBB Mask
pub apbc_mask: ReadWrite<u32>, // Offset: 0x20 (R/W 32) APBC Mask
reserved_3: [u8; 10],
pub interrupt_clear: ReadWrite<u8>, // Offset: 0x34 (R/W 8) Interrupt Enable Clear
pub interrupt_set: ReadWrite<u8>, // Offset: 0x35 (R/W 8) Interrupt Enable Set
pub interrupt_flag: ReadWrite<u8>, // Offset: 0x36 (R/W 8) Interrupt Flag Status and Clear
reserved_4: [u8; 1],
pub reset_cause: ReadOnly<u8>, // Offset: 0x38 (R/ 8) Reset Cause
private: PhantomData<()>, // This type cannot be constructed.
}
pub const PM_APBAMASK_GCLK: u32 = 1 << 3;
impl PowerManager {
pub unsafe fn set_cpu_and_bus_blocks(&mut self) {
// Now that all system clocks are configured, we can set CPU and APBx BUS clocks.
// There[sic] values are normally the one present after Reset.
let PM_CPUSEL_CPUDIV_POS = 0; // (PM_CPUSEL) CPU Prescaler Selection
let PM_CPUSEL_CPUDIV_DIV1_VAL = 0x0; // (PM_CPUSEL) Divide by 1
let PM_CPUSEL_CPUDIV_DIV1 = PM_CPUSEL_CPUDIV_DIV1_VAL << PM_CPUSEL_CPUDIV_POS;
let PM_APBASEL_APBADIV_DIV1_VAL = 0x0; // (PM_APBASEL) Divide by 1
let PM_APBBSEL_APBBDIV_DIV1_VAL = 0x0; // (PM_APBBSEL) Divide by 1
let PM_APBCSEL_APBCDIV_DIV1_VAL = 0x0; // (PM_APBCSEL) Divide by 1
self.cpu_select.write(PM_CPUSEL_CPUDIV_DIV1);
self.apba_select.write(PM_APBASEL_APBADIV_DIV1_VAL);
self.apbb_select.write(PM_APBBSEL_APBBDIV_DIV1_VAL);
self.apbc_select.write(PM_APBCSEL_APBCDIV_DIV1_VAL);
}
}
| PowerManager | identifier_name |
pm.rs | use core::marker::PhantomData;
use volatile::*;
#[repr(C, packed)]
pub struct PowerManager {
pub control: ReadWrite<u8>, // Offset: 0x00 (R/W 8) Control
pub sleep: ReadWrite<u8>, // Offset: 0x01 (R/W 8) Sleep Mode
pub external_control: ReadWrite<u8>, // Offset: 0x02 (R/W 8) External Reset Controller
reserved_1: [u8; 5],
pub cpu_select: ReadWrite<u8>, // Offset: 0x08 (R/W 8) CPU Clock Select
pub apba_select: ReadWrite<u8>, // Offset: 0x09 (R/W 8) APBA Clock Select
pub apbb_select: ReadWrite<u8>, // Offset: 0x0A (R/W 8) APBB Clock Select
pub apbc_select: ReadWrite<u8>, // Offset: 0x0B (R/W 8) APBC Clock Select
reserved_2: [u8; 8],
pub ahb_mask: ReadWrite<u32>, // Offset: 0x14 (R/W 32) AHB Mask
pub apba_mask: ReadWrite<u32>, // Offset: 0x18 (R/W 32) APBA Mask
pub apbb_mask: ReadWrite<u32>, // Offset: 0x1C (R/W 32) APBB Mask
pub apbc_mask: ReadWrite<u32>, // Offset: 0x20 (R/W 32) APBC Mask
reserved_3: [u8; 10],
pub interrupt_clear: ReadWrite<u8>, // Offset: 0x34 (R/W 8) Interrupt Enable Clear
pub interrupt_set: ReadWrite<u8>, // Offset: 0x35 (R/W 8) Interrupt Enable Set
pub interrupt_flag: ReadWrite<u8>, // Offset: 0x36 (R/W 8) Interrupt Flag Status and Clear
reserved_4: [u8; 1],
pub reset_cause: ReadOnly<u8>, // Offset: 0x38 (R/ 8) Reset Cause
private: PhantomData<()>, // This type cannot be constructed.
}
pub const PM_APBAMASK_GCLK: u32 = 1 << 3;
impl PowerManager {
pub unsafe fn set_cpu_and_bus_blocks(&mut self) |
}
| {
// Now that all system clocks are configured, we can set CPU and APBx BUS clocks.
// There[sic] values are normally the one present after Reset.
let PM_CPUSEL_CPUDIV_POS = 0; // (PM_CPUSEL) CPU Prescaler Selection
let PM_CPUSEL_CPUDIV_DIV1_VAL = 0x0; // (PM_CPUSEL) Divide by 1
let PM_CPUSEL_CPUDIV_DIV1 = PM_CPUSEL_CPUDIV_DIV1_VAL << PM_CPUSEL_CPUDIV_POS;
let PM_APBASEL_APBADIV_DIV1_VAL = 0x0; // (PM_APBASEL) Divide by 1
let PM_APBBSEL_APBBDIV_DIV1_VAL = 0x0; // (PM_APBBSEL) Divide by 1
let PM_APBCSEL_APBCDIV_DIV1_VAL = 0x0; // (PM_APBCSEL) Divide by 1
self.cpu_select.write(PM_CPUSEL_CPUDIV_DIV1);
self.apba_select.write(PM_APBASEL_APBADIV_DIV1_VAL);
self.apbb_select.write(PM_APBBSEL_APBBDIV_DIV1_VAL);
self.apbc_select.write(PM_APBCSEL_APBCDIV_DIV1_VAL);
} | identifier_body |
write.rs | use std::fmt;
use std::io;
pub trait AnyWrite {
type wstr:?Sized;
type Error;
fn write_fmt(&mut self, fmt: fmt::Arguments) -> Result<(), Self::Error>;
fn write_str(&mut self, s: &Self::wstr) -> Result<(), Self::Error>;
}
impl<'a> AnyWrite for fmt::Write + 'a {
type wstr = str;
type Error = fmt::Error;
fn write_fmt(&mut self, fmt: fmt::Arguments) -> Result<(), Self::Error> {
fmt::Write::write_fmt(self, fmt)
}
fn write_str(&mut self, s: &Self::wstr) -> Result<(), Self::Error> {
fmt::Write::write_str(self, s)
}
}
impl<'a> AnyWrite for io::Write + 'a {
type wstr = [u8];
type Error = io::Error;
fn write_fmt(&mut self, fmt: fmt::Arguments) -> Result<(), Self::Error> {
io::Write::write_fmt(self, fmt)
}
fn write_str(&mut self, s: &Self::wstr) -> Result<(), Self::Error> |
}
| {
io::Write::write_all(self, s)
} | identifier_body |
write.rs | use std::fmt;
use std::io;
pub trait AnyWrite {
type wstr:?Sized;
type Error;
fn write_fmt(&mut self, fmt: fmt::Arguments) -> Result<(), Self::Error>;
fn write_str(&mut self, s: &Self::wstr) -> Result<(), Self::Error>;
}
impl<'a> AnyWrite for fmt::Write + 'a {
type wstr = str;
type Error = fmt::Error;
fn write_fmt(&mut self, fmt: fmt::Arguments) -> Result<(), Self::Error> {
fmt::Write::write_fmt(self, fmt)
}
fn | (&mut self, s: &Self::wstr) -> Result<(), Self::Error> {
fmt::Write::write_str(self, s)
}
}
impl<'a> AnyWrite for io::Write + 'a {
type wstr = [u8];
type Error = io::Error;
fn write_fmt(&mut self, fmt: fmt::Arguments) -> Result<(), Self::Error> {
io::Write::write_fmt(self, fmt)
}
fn write_str(&mut self, s: &Self::wstr) -> Result<(), Self::Error> {
io::Write::write_all(self, s)
}
}
| write_str | identifier_name |
write.rs | use std::fmt;
use std::io;
pub trait AnyWrite {
type wstr:?Sized;
type Error;
fn write_fmt(&mut self, fmt: fmt::Arguments) -> Result<(), Self::Error>;
fn write_str(&mut self, s: &Self::wstr) -> Result<(), Self::Error>;
}
impl<'a> AnyWrite for fmt::Write + 'a {
type wstr = str;
type Error = fmt::Error;
fn write_fmt(&mut self, fmt: fmt::Arguments) -> Result<(), Self::Error> {
fmt::Write::write_fmt(self, fmt)
}
fn write_str(&mut self, s: &Self::wstr) -> Result<(), Self::Error> {
fmt::Write::write_str(self, s)
}
}
impl<'a> AnyWrite for io::Write + 'a {
type wstr = [u8];
type Error = io::Error;
fn write_fmt(&mut self, fmt: fmt::Arguments) -> Result<(), Self::Error> {
io::Write::write_fmt(self, fmt)
} | fn write_str(&mut self, s: &Self::wstr) -> Result<(), Self::Error> {
io::Write::write_all(self, s)
}
} | random_line_split | |
scopemeasure_stopover_adds_an_extra_line_to_the_log_upon_each_call.rs | use libnewsboat::{
logger::{self, Level},
scopemeasure::ScopeMeasure,
};
use std::fs::File;
use std::io::{BufRead, BufReader, Result};
use std::path::Path;
use tempfile::TempDir;
fn file_lines_count(logfile: &Path) -> Result<usize> {
let file = File::open(logfile)?;
let reader = BufReader::new(file);
Ok(reader.lines().count())
}
#[test]
fn stopover_adds_an_extra_line_to_the_log_upon_each_call() | assert_eq!(file_lines_count(&logfile).unwrap(), calls + 1usize);
}
}
| {
for calls in &[1, 2, 5] {
let tmp = TempDir::new().unwrap();
let logfile = {
let mut logfile = tmp.path().to_owned();
logfile.push("example.log");
logfile
};
{
logger::get_instance().set_logfile(logfile.to_str().unwrap());
logger::get_instance().set_loglevel(Level::Debug);
let sm = ScopeMeasure::new(String::from("test"));
for i in 0..*calls {
sm.stopover(&format!("stopover No.{}", i));
}
}
// One line for each call to stopover(), plus one more for the call to drop() | identifier_body |
scopemeasure_stopover_adds_an_extra_line_to_the_log_upon_each_call.rs | use libnewsboat::{
logger::{self, Level},
scopemeasure::ScopeMeasure,
};
use std::fs::File;
use std::io::{BufRead, BufReader, Result};
use std::path::Path;
use tempfile::TempDir;
fn | (logfile: &Path) -> Result<usize> {
let file = File::open(logfile)?;
let reader = BufReader::new(file);
Ok(reader.lines().count())
}
#[test]
fn stopover_adds_an_extra_line_to_the_log_upon_each_call() {
for calls in &[1, 2, 5] {
let tmp = TempDir::new().unwrap();
let logfile = {
let mut logfile = tmp.path().to_owned();
logfile.push("example.log");
logfile
};
{
logger::get_instance().set_logfile(logfile.to_str().unwrap());
logger::get_instance().set_loglevel(Level::Debug);
let sm = ScopeMeasure::new(String::from("test"));
for i in 0..*calls {
sm.stopover(&format!("stopover No.{}", i));
}
}
// One line for each call to stopover(), plus one more for the call to drop()
assert_eq!(file_lines_count(&logfile).unwrap(), calls + 1usize);
}
}
| file_lines_count | identifier_name |
scopemeasure_stopover_adds_an_extra_line_to_the_log_upon_each_call.rs | use libnewsboat::{
logger::{self, Level},
scopemeasure::ScopeMeasure,
};
use std::fs::File;
use std::io::{BufRead, BufReader, Result};
use std::path::Path;
use tempfile::TempDir;
fn file_lines_count(logfile: &Path) -> Result<usize> {
let file = File::open(logfile)?;
let reader = BufReader::new(file);
Ok(reader.lines().count())
}
| for calls in &[1, 2, 5] {
let tmp = TempDir::new().unwrap();
let logfile = {
let mut logfile = tmp.path().to_owned();
logfile.push("example.log");
logfile
};
{
logger::get_instance().set_logfile(logfile.to_str().unwrap());
logger::get_instance().set_loglevel(Level::Debug);
let sm = ScopeMeasure::new(String::from("test"));
for i in 0..*calls {
sm.stopover(&format!("stopover No.{}", i));
}
}
// One line for each call to stopover(), plus one more for the call to drop()
assert_eq!(file_lines_count(&logfile).unwrap(), calls + 1usize);
}
} | #[test]
fn stopover_adds_an_extra_line_to_the_log_upon_each_call() { | random_line_split |
mod.rs | use std::time::{Instant, Duration};
use super::Figure;
/// This struct is responsible for logging the average frame duration to stdout
/// once a second.
pub struct FpsLog {
last_second: Instant,
avg_duration_ns: u64,
ticks: u64,
}
impl FpsLog {
pub fn new() -> FpsLog {
FpsLog {
last_second: Instant::now(), | /// Dump the frame time to std out
fn print(&self) {
let frame_time_ms = self.avg_duration_ns / 1000000;
println!("avg frame time: {}ns which is {}ms",
self.avg_duration_ns, frame_time_ms);
}
/// Reset internal state which is used to calculate frame duration
fn reset(&mut self) {
self.last_second = Instant::now();
self.avg_duration_ns = 0;
self.ticks = 0;
}
/// Update state for duration calculation
fn add_frame_duration(&mut self, duration: Duration) {
let scaled_avg = self.avg_duration_ns * self.ticks;
let frame_ns = duration.subsec_nanos() as u64;
self.ticks += 1;
self.avg_duration_ns = (scaled_avg + frame_ns) / self.ticks;
}
}
impl Figure for FpsLog {
fn update(&mut self, duration: Duration) {
if self.last_second.elapsed() > Duration::from_secs(1) {
self.print();
self.reset();
}
self.add_frame_duration(duration);
}
} | avg_duration_ns: 0,
ticks: 0
}
}
| random_line_split |
mod.rs | use std::time::{Instant, Duration};
use super::Figure;
/// This struct is responsible for logging the average frame duration to stdout
/// once a second.
pub struct FpsLog {
last_second: Instant,
avg_duration_ns: u64,
ticks: u64,
}
impl FpsLog {
pub fn new() -> FpsLog {
FpsLog {
last_second: Instant::now(),
avg_duration_ns: 0,
ticks: 0
}
}
/// Dump the frame time to std out
fn | (&self) {
let frame_time_ms = self.avg_duration_ns / 1000000;
println!("avg frame time: {}ns which is {}ms",
self.avg_duration_ns, frame_time_ms);
}
/// Reset internal state which is used to calculate frame duration
fn reset(&mut self) {
self.last_second = Instant::now();
self.avg_duration_ns = 0;
self.ticks = 0;
}
/// Update state for duration calculation
fn add_frame_duration(&mut self, duration: Duration) {
let scaled_avg = self.avg_duration_ns * self.ticks;
let frame_ns = duration.subsec_nanos() as u64;
self.ticks += 1;
self.avg_duration_ns = (scaled_avg + frame_ns) / self.ticks;
}
}
impl Figure for FpsLog {
fn update(&mut self, duration: Duration) {
if self.last_second.elapsed() > Duration::from_secs(1) {
self.print();
self.reset();
}
self.add_frame_duration(duration);
}
}
| print | identifier_name |
mod.rs | use std::time::{Instant, Duration};
use super::Figure;
/// This struct is responsible for logging the average frame duration to stdout
/// once a second.
pub struct FpsLog {
last_second: Instant,
avg_duration_ns: u64,
ticks: u64,
}
impl FpsLog {
pub fn new() -> FpsLog |
/// Dump the frame time to std out
fn print(&self) {
let frame_time_ms = self.avg_duration_ns / 1000000;
println!("avg frame time: {}ns which is {}ms",
self.avg_duration_ns, frame_time_ms);
}
/// Reset internal state which is used to calculate frame duration
fn reset(&mut self) {
self.last_second = Instant::now();
self.avg_duration_ns = 0;
self.ticks = 0;
}
/// Update state for duration calculation
fn add_frame_duration(&mut self, duration: Duration) {
let scaled_avg = self.avg_duration_ns * self.ticks;
let frame_ns = duration.subsec_nanos() as u64;
self.ticks += 1;
self.avg_duration_ns = (scaled_avg + frame_ns) / self.ticks;
}
}
impl Figure for FpsLog {
fn update(&mut self, duration: Duration) {
if self.last_second.elapsed() > Duration::from_secs(1) {
self.print();
self.reset();
}
self.add_frame_duration(duration);
}
}
| {
FpsLog {
last_second: Instant::now(),
avg_duration_ns: 0,
ticks: 0
}
} | identifier_body |
mod.rs | use std::time::{Instant, Duration};
use super::Figure;
/// This struct is responsible for logging the average frame duration to stdout
/// once a second.
pub struct FpsLog {
last_second: Instant,
avg_duration_ns: u64,
ticks: u64,
}
impl FpsLog {
pub fn new() -> FpsLog {
FpsLog {
last_second: Instant::now(),
avg_duration_ns: 0,
ticks: 0
}
}
/// Dump the frame time to std out
fn print(&self) {
let frame_time_ms = self.avg_duration_ns / 1000000;
println!("avg frame time: {}ns which is {}ms",
self.avg_duration_ns, frame_time_ms);
}
/// Reset internal state which is used to calculate frame duration
fn reset(&mut self) {
self.last_second = Instant::now();
self.avg_duration_ns = 0;
self.ticks = 0;
}
/// Update state for duration calculation
fn add_frame_duration(&mut self, duration: Duration) {
let scaled_avg = self.avg_duration_ns * self.ticks;
let frame_ns = duration.subsec_nanos() as u64;
self.ticks += 1;
self.avg_duration_ns = (scaled_avg + frame_ns) / self.ticks;
}
}
impl Figure for FpsLog {
fn update(&mut self, duration: Duration) {
if self.last_second.elapsed() > Duration::from_secs(1) |
self.add_frame_duration(duration);
}
}
| {
self.print();
self.reset();
} | conditional_block |
test.rs | #![crate_name = "test"]
#![feature(libc)]
#![feature(start)]
extern crate libc;
extern crate wx;
use libc::c_void;
use wx::_unsafe::*;
use wx::defs::*;
use wx::base::*;
use wx::core::*;
mod macros;
wxApp!(wx_main);
extern "C"
fn wx_main() |
fn make_frame() -> Frame {
let frame = Frame::new(&Window::null(), ID_ANY, "Hello, wxRust!", -1, -1, -1, -1, DEFAULT_FRAME_STYLE);
let menubar = make_menubar();
frame.setMenuBar(&menubar);
make_button(&frame);
frame
}
fn make_menubar() -> MenuBar {
let menubar = MenuBar::new(0);
let fileMenu = Menu::new("", 0);
let fileNew = MenuItem::newEx(ID_ANY, "New", "Create a new file.", 0, &Menu::null());
fileMenu.appendItem(&fileNew);
menubar.append(&fileMenu, "File");
menubar
}
extern "C"
fn MyButton_clicked(fun: *mut c_void, data: *mut c_void, evt: *mut c_void) {
if evt == 0 as *mut c_void {
// Comes here when the target widget is destroyed.
return;
}
println!("hello!");
let parent = Window::from(data);
let msgDlg = MessageDialog::new(&parent, "Pushed!!", "The Button", OK);
msgDlg.showModal();
}
fn make_button<T: WindowMethods>(parent: &T) -> Button {
let button = Button::new(parent, ID_ANY, "Push me!", 10, 10, 50, 30, 0);
let closure = Closure::new(MyButton_clicked as *mut c_void, parent.ptr());
unsafe {
button.connect(ID_ANY, ID_ANY, expEVT_COMMAND_BUTTON_CLICKED(), closure.ptr());
}
button
}
| {
let frame = make_frame();
frame.show();
frame.raise();
} | identifier_body |
test.rs | #![crate_name = "test"]
#![feature(libc)]
#![feature(start)]
extern crate libc;
extern crate wx;
use libc::c_void;
use wx::_unsafe::*;
use wx::defs::*;
use wx::base::*;
use wx::core::*;
mod macros;
wxApp!(wx_main);
extern "C"
fn wx_main() {
let frame = make_frame();
frame.show();
frame.raise();
}
fn make_frame() -> Frame {
let frame = Frame::new(&Window::null(), ID_ANY, "Hello, wxRust!", -1, -1, -1, -1, DEFAULT_FRAME_STYLE);
let menubar = make_menubar();
frame.setMenuBar(&menubar);
make_button(&frame);
frame
}
fn make_menubar() -> MenuBar {
let menubar = MenuBar::new(0);
let fileMenu = Menu::new("", 0);
let fileNew = MenuItem::newEx(ID_ANY, "New", "Create a new file.", 0, &Menu::null());
fileMenu.appendItem(&fileNew);
menubar.append(&fileMenu, "File");
menubar
}
extern "C"
fn MyButton_clicked(fun: *mut c_void, data: *mut c_void, evt: *mut c_void) {
if evt == 0 as *mut c_void |
println!("hello!");
let parent = Window::from(data);
let msgDlg = MessageDialog::new(&parent, "Pushed!!", "The Button", OK);
msgDlg.showModal();
}
fn make_button<T: WindowMethods>(parent: &T) -> Button {
let button = Button::new(parent, ID_ANY, "Push me!", 10, 10, 50, 30, 0);
let closure = Closure::new(MyButton_clicked as *mut c_void, parent.ptr());
unsafe {
button.connect(ID_ANY, ID_ANY, expEVT_COMMAND_BUTTON_CLICKED(), closure.ptr());
}
button
}
| {
// Comes here when the target widget is destroyed.
return;
} | conditional_block |
test.rs | #![crate_name = "test"]
#![feature(libc)]
#![feature(start)]
extern crate libc;
extern crate wx;
use libc::c_void;
use wx::_unsafe::*;
use wx::defs::*;
use wx::base::*;
use wx::core::*;
mod macros;
wxApp!(wx_main);
extern "C"
fn wx_main() {
let frame = make_frame();
frame.show();
frame.raise();
}
fn make_frame() -> Frame {
let frame = Frame::new(&Window::null(), ID_ANY, "Hello, wxRust!", -1, -1, -1, -1, DEFAULT_FRAME_STYLE);
let menubar = make_menubar();
frame.setMenuBar(&menubar);
make_button(&frame);
frame
}
fn make_menubar() -> MenuBar {
let menubar = MenuBar::new(0);
let fileMenu = Menu::new("", 0);
let fileNew = MenuItem::newEx(ID_ANY, "New", "Create a new file.", 0, &Menu::null());
fileMenu.appendItem(&fileNew);
menubar.append(&fileMenu, "File");
menubar
}
extern "C"
fn MyButton_clicked(fun: *mut c_void, data: *mut c_void, evt: *mut c_void) {
if evt == 0 as *mut c_void {
// Comes here when the target widget is destroyed.
return;
}
println!("hello!");
let parent = Window::from(data);
let msgDlg = MessageDialog::new(&parent, "Pushed!!", "The Button", OK);
msgDlg.showModal();
}
fn | <T: WindowMethods>(parent: &T) -> Button {
let button = Button::new(parent, ID_ANY, "Push me!", 10, 10, 50, 30, 0);
let closure = Closure::new(MyButton_clicked as *mut c_void, parent.ptr());
unsafe {
button.connect(ID_ANY, ID_ANY, expEVT_COMMAND_BUTTON_CLICKED(), closure.ptr());
}
button
}
| make_button | identifier_name |
test.rs | #![crate_name = "test"]
#![feature(libc)]
#![feature(start)]
extern crate libc;
extern crate wx;
use libc::c_void;
use wx::_unsafe::*;
use wx::defs::*;
use wx::base::*;
use wx::core::*;
mod macros;
wxApp!(wx_main);
extern "C"
fn wx_main() {
let frame = make_frame();
frame.show();
frame.raise();
}
fn make_frame() -> Frame {
let frame = Frame::new(&Window::null(), ID_ANY, "Hello, wxRust!", -1, -1, -1, -1, DEFAULT_FRAME_STYLE);
let menubar = make_menubar();
frame.setMenuBar(&menubar);
| }
fn make_menubar() -> MenuBar {
let menubar = MenuBar::new(0);
let fileMenu = Menu::new("", 0);
let fileNew = MenuItem::newEx(ID_ANY, "New", "Create a new file.", 0, &Menu::null());
fileMenu.appendItem(&fileNew);
menubar.append(&fileMenu, "File");
menubar
}
extern "C"
fn MyButton_clicked(fun: *mut c_void, data: *mut c_void, evt: *mut c_void) {
if evt == 0 as *mut c_void {
// Comes here when the target widget is destroyed.
return;
}
println!("hello!");
let parent = Window::from(data);
let msgDlg = MessageDialog::new(&parent, "Pushed!!", "The Button", OK);
msgDlg.showModal();
}
fn make_button<T: WindowMethods>(parent: &T) -> Button {
let button = Button::new(parent, ID_ANY, "Push me!", 10, 10, 50, 30, 0);
let closure = Closure::new(MyButton_clicked as *mut c_void, parent.ptr());
unsafe {
button.connect(ID_ANY, ID_ANY, expEVT_COMMAND_BUTTON_CLICKED(), closure.ptr());
}
button
} | make_button(&frame);
frame | random_line_split |
state.rs | use nalgebra::{Point2, Scalar, Vector2};
use std::collections::HashSet;
use std::hash::Hash;
use event::{ElementState, React};
/// An atomic state of an input element.
pub trait State: Copy + Eq {
// TODO: Use a default type (`Self`) here once that feature stabilizes.
/// Representation of a difference between states.
type Difference;
/// Gets the transition between a live and snapshot state. If no transition
/// has occurred, returns `None`.
fn transition(live: Self, snapshot: Self) -> Option<Self> {
if live == snapshot {
None
}
else {
Some(live)
}
}
}
impl State for bool {
type Difference = Self;
}
impl State for ElementState {
type Difference = Self;
}
impl<T> State for Point2<T>
where | }
/// An input element, such as a button, key, or position.
pub trait Element: Copy + Sized {
/// Representation of the state of the element.
type State: State;
}
/// A state with a composite representation. This is used for input elements
/// which have a cardinality greater than one. For example, a mouse may have
/// more than one button.
pub trait CompositeState<E>
where
E: Element,
{
// TODO: Use a default type (`E::State`) here once that feature stabilizes.
/// Representation of the composite state.
type Composite;
/// Gets the composite state.
fn composite(&self) -> &Self::Composite;
}
/// Provides a state for an input element.
pub trait InputState<E>
where
E: Element,
{
/// Gets the state of an input element.
fn state(&self, element: E) -> E::State;
}
// Blanket implementation for `InputState` for composite states represented by
// a `HashSet`, such as keys and buttons.
impl<E, T> InputState<E> for T
where
T: CompositeState<E, Composite = HashSet<E>>,
E: Element<State = ElementState> + Eq + Hash,
{
fn state(&self, element: E) -> E::State {
if self.composite().contains(&element) {
ElementState::Pressed
}
else {
ElementState::Released
}
}
}
/// Provides a transition state for an input element.
pub trait InputTransition<E>
where
E: Element,
{
/// Gets the transition state of an input element.
fn transition(&self, element: E) -> Option<E::State>;
}
impl<E, T> InputTransition<E> for T
where
T: Input,
T::State: InputState<E>,
E: Element,
{
fn transition(&self, element: E) -> Option<E::State> {
E::State::transition(self.live().state(element), self.snapshot().state(element))
}
}
/// Determines the difference in state for an input element.
pub trait InputDifference<E>
where
E: Element,
{
/// Iterable representation of differences in state.
type Difference: IntoIterator<Item = (E, <E::State as State>::Difference)>;
/// Gets the difference in state for an input element.
fn difference(&self) -> Self::Difference;
}
// Blanket implementation for `InputDifference` for composite states
// represented by a `HashSet`, such as keys and buttons.
impl<E, S, T> InputDifference<E> for T
where
T: Input,
T::State: CompositeState<E, Composite = HashSet<E>> + InputState<E>,
E: Element<State = S> + Eq + Hash,
S: State<Difference = S>,
{
type Difference = Vec<(E, <E::State as State>::Difference)>;
fn difference(&self) -> Self::Difference {
self.live()
.composite()
.symmetric_difference(self.snapshot().composite())
.map(|element| (*element, self.live().state(*element)))
.collect()
}
}
/// An input device with a live state and snapshot state. These are updated via
/// `React` and `Snapshot` and provide information about the live state and
/// changes based on the snapshot state.
pub trait Input: React + Snapshot {
/// Aggregate state for the input device.
type State;
/// Gets the live state.
fn live(&self) -> &Self::State;
// TODO: The term "snapshot" is ambiguous. Here, it refers to the snapshot
// of the state of an input device. In the `Snapshot` trait, it is
// used as a verb for the operation of taking a snapshot (copying the
// live state into the snapshot state). However, the `Input` trait is
// not exposed outside of this module, so this shouldn't affect
// client code.
/// Gets the snapshot state.
fn snapshot(&self) -> &Self::State;
}
/// Provides snapshotting for an input device. Input devices maintain a live
/// state and snapshot state, which are updated via `React` and this trait,
/// respectively.
pub trait Snapshot {
/// Snapshots the live state.
fn snapshot(&mut self);
} | T: Eq + Scalar,
{
type Difference = Vector2<T>; | random_line_split |
state.rs | use nalgebra::{Point2, Scalar, Vector2};
use std::collections::HashSet;
use std::hash::Hash;
use event::{ElementState, React};
/// An atomic state of an input element.
pub trait State: Copy + Eq {
// TODO: Use a default type (`Self`) here once that feature stabilizes.
/// Representation of a difference between states.
type Difference;
/// Gets the transition between a live and snapshot state. If no transition
/// has occurred, returns `None`.
fn transition(live: Self, snapshot: Self) -> Option<Self> |
}
impl State for bool {
type Difference = Self;
}
impl State for ElementState {
type Difference = Self;
}
impl<T> State for Point2<T>
where
T: Eq + Scalar,
{
type Difference = Vector2<T>;
}
/// An input element, such as a button, key, or position.
pub trait Element: Copy + Sized {
/// Representation of the state of the element.
type State: State;
}
/// A state with a composite representation. This is used for input elements
/// which have a cardinality greater than one. For example, a mouse may have
/// more than one button.
pub trait CompositeState<E>
where
E: Element,
{
// TODO: Use a default type (`E::State`) here once that feature stabilizes.
/// Representation of the composite state.
type Composite;
/// Gets the composite state.
fn composite(&self) -> &Self::Composite;
}
/// Provides a state for an input element.
pub trait InputState<E>
where
E: Element,
{
/// Gets the state of an input element.
fn state(&self, element: E) -> E::State;
}
// Blanket implementation for `InputState` for composite states represented by
// a `HashSet`, such as keys and buttons.
impl<E, T> InputState<E> for T
where
T: CompositeState<E, Composite = HashSet<E>>,
E: Element<State = ElementState> + Eq + Hash,
{
fn state(&self, element: E) -> E::State {
if self.composite().contains(&element) {
ElementState::Pressed
}
else {
ElementState::Released
}
}
}
/// Provides a transition state for an input element.
pub trait InputTransition<E>
where
E: Element,
{
/// Gets the transition state of an input element.
fn transition(&self, element: E) -> Option<E::State>;
}
impl<E, T> InputTransition<E> for T
where
T: Input,
T::State: InputState<E>,
E: Element,
{
fn transition(&self, element: E) -> Option<E::State> {
E::State::transition(self.live().state(element), self.snapshot().state(element))
}
}
/// Determines the difference in state for an input element.
pub trait InputDifference<E>
where
E: Element,
{
/// Iterable representation of differences in state.
type Difference: IntoIterator<Item = (E, <E::State as State>::Difference)>;
/// Gets the difference in state for an input element.
fn difference(&self) -> Self::Difference;
}
// Blanket implementation for `InputDifference` for composite states
// represented by a `HashSet`, such as keys and buttons.
impl<E, S, T> InputDifference<E> for T
where
T: Input,
T::State: CompositeState<E, Composite = HashSet<E>> + InputState<E>,
E: Element<State = S> + Eq + Hash,
S: State<Difference = S>,
{
type Difference = Vec<(E, <E::State as State>::Difference)>;
fn difference(&self) -> Self::Difference {
self.live()
.composite()
.symmetric_difference(self.snapshot().composite())
.map(|element| (*element, self.live().state(*element)))
.collect()
}
}
/// An input device with a live state and snapshot state. These are updated via
/// `React` and `Snapshot` and provide information about the live state and
/// changes based on the snapshot state.
pub trait Input: React + Snapshot {
/// Aggregate state for the input device.
type State;
/// Gets the live state.
fn live(&self) -> &Self::State;
// TODO: The term "snapshot" is ambiguous. Here, it refers to the snapshot
// of the state of an input device. In the `Snapshot` trait, it is
// used as a verb for the operation of taking a snapshot (copying the
// live state into the snapshot state). However, the `Input` trait is
// not exposed outside of this module, so this shouldn't affect
// client code.
/// Gets the snapshot state.
fn snapshot(&self) -> &Self::State;
}
/// Provides snapshotting for an input device. Input devices maintain a live
/// state and snapshot state, which are updated via `React` and this trait,
/// respectively.
pub trait Snapshot {
/// Snapshots the live state.
fn snapshot(&mut self);
}
| {
if live == snapshot {
None
}
else {
Some(live)
}
} | identifier_body |
state.rs | use nalgebra::{Point2, Scalar, Vector2};
use std::collections::HashSet;
use std::hash::Hash;
use event::{ElementState, React};
/// An atomic state of an input element.
pub trait State: Copy + Eq {
// TODO: Use a default type (`Self`) here once that feature stabilizes.
/// Representation of a difference between states.
type Difference;
/// Gets the transition between a live and snapshot state. If no transition
/// has occurred, returns `None`.
fn transition(live: Self, snapshot: Self) -> Option<Self> {
if live == snapshot {
None
}
else {
Some(live)
}
}
}
impl State for bool {
type Difference = Self;
}
impl State for ElementState {
type Difference = Self;
}
impl<T> State for Point2<T>
where
T: Eq + Scalar,
{
type Difference = Vector2<T>;
}
/// An input element, such as a button, key, or position.
pub trait Element: Copy + Sized {
/// Representation of the state of the element.
type State: State;
}
/// A state with a composite representation. This is used for input elements
/// which have a cardinality greater than one. For example, a mouse may have
/// more than one button.
pub trait CompositeState<E>
where
E: Element,
{
// TODO: Use a default type (`E::State`) here once that feature stabilizes.
/// Representation of the composite state.
type Composite;
/// Gets the composite state.
fn composite(&self) -> &Self::Composite;
}
/// Provides a state for an input element.
pub trait InputState<E>
where
E: Element,
{
/// Gets the state of an input element.
fn state(&self, element: E) -> E::State;
}
// Blanket implementation for `InputState` for composite states represented by
// a `HashSet`, such as keys and buttons.
impl<E, T> InputState<E> for T
where
T: CompositeState<E, Composite = HashSet<E>>,
E: Element<State = ElementState> + Eq + Hash,
{
fn state(&self, element: E) -> E::State {
if self.composite().contains(&element) |
else {
ElementState::Released
}
}
}
/// Provides a transition state for an input element.
pub trait InputTransition<E>
where
E: Element,
{
/// Gets the transition state of an input element.
fn transition(&self, element: E) -> Option<E::State>;
}
impl<E, T> InputTransition<E> for T
where
T: Input,
T::State: InputState<E>,
E: Element,
{
fn transition(&self, element: E) -> Option<E::State> {
E::State::transition(self.live().state(element), self.snapshot().state(element))
}
}
/// Determines the difference in state for an input element.
pub trait InputDifference<E>
where
E: Element,
{
/// Iterable representation of differences in state.
type Difference: IntoIterator<Item = (E, <E::State as State>::Difference)>;
/// Gets the difference in state for an input element.
fn difference(&self) -> Self::Difference;
}
// Blanket implementation for `InputDifference` for composite states
// represented by a `HashSet`, such as keys and buttons.
impl<E, S, T> InputDifference<E> for T
where
T: Input,
T::State: CompositeState<E, Composite = HashSet<E>> + InputState<E>,
E: Element<State = S> + Eq + Hash,
S: State<Difference = S>,
{
type Difference = Vec<(E, <E::State as State>::Difference)>;
fn difference(&self) -> Self::Difference {
self.live()
.composite()
.symmetric_difference(self.snapshot().composite())
.map(|element| (*element, self.live().state(*element)))
.collect()
}
}
/// An input device with a live state and snapshot state. These are updated via
/// `React` and `Snapshot` and provide information about the live state and
/// changes based on the snapshot state.
pub trait Input: React + Snapshot {
/// Aggregate state for the input device.
type State;
/// Gets the live state.
fn live(&self) -> &Self::State;
// TODO: The term "snapshot" is ambiguous. Here, it refers to the snapshot
// of the state of an input device. In the `Snapshot` trait, it is
// used as a verb for the operation of taking a snapshot (copying the
// live state into the snapshot state). However, the `Input` trait is
// not exposed outside of this module, so this shouldn't affect
// client code.
/// Gets the snapshot state.
fn snapshot(&self) -> &Self::State;
}
/// Provides snapshotting for an input device. Input devices maintain a live
/// state and snapshot state, which are updated via `React` and this trait,
/// respectively.
pub trait Snapshot {
/// Snapshots the live state.
fn snapshot(&mut self);
}
| {
ElementState::Pressed
} | conditional_block |
state.rs | use nalgebra::{Point2, Scalar, Vector2};
use std::collections::HashSet;
use std::hash::Hash;
use event::{ElementState, React};
/// An atomic state of an input element.
pub trait State: Copy + Eq {
// TODO: Use a default type (`Self`) here once that feature stabilizes.
/// Representation of a difference between states.
type Difference;
/// Gets the transition between a live and snapshot state. If no transition
/// has occurred, returns `None`.
fn transition(live: Self, snapshot: Self) -> Option<Self> {
if live == snapshot {
None
}
else {
Some(live)
}
}
}
impl State for bool {
type Difference = Self;
}
impl State for ElementState {
type Difference = Self;
}
impl<T> State for Point2<T>
where
T: Eq + Scalar,
{
type Difference = Vector2<T>;
}
/// An input element, such as a button, key, or position.
pub trait Element: Copy + Sized {
/// Representation of the state of the element.
type State: State;
}
/// A state with a composite representation. This is used for input elements
/// which have a cardinality greater than one. For example, a mouse may have
/// more than one button.
pub trait CompositeState<E>
where
E: Element,
{
// TODO: Use a default type (`E::State`) here once that feature stabilizes.
/// Representation of the composite state.
type Composite;
/// Gets the composite state.
fn composite(&self) -> &Self::Composite;
}
/// Provides a state for an input element.
pub trait InputState<E>
where
E: Element,
{
/// Gets the state of an input element.
fn state(&self, element: E) -> E::State;
}
// Blanket implementation for `InputState` for composite states represented by
// a `HashSet`, such as keys and buttons.
impl<E, T> InputState<E> for T
where
T: CompositeState<E, Composite = HashSet<E>>,
E: Element<State = ElementState> + Eq + Hash,
{
fn | (&self, element: E) -> E::State {
if self.composite().contains(&element) {
ElementState::Pressed
}
else {
ElementState::Released
}
}
}
/// Provides a transition state for an input element.
pub trait InputTransition<E>
where
E: Element,
{
/// Gets the transition state of an input element.
fn transition(&self, element: E) -> Option<E::State>;
}
impl<E, T> InputTransition<E> for T
where
T: Input,
T::State: InputState<E>,
E: Element,
{
fn transition(&self, element: E) -> Option<E::State> {
E::State::transition(self.live().state(element), self.snapshot().state(element))
}
}
/// Determines the difference in state for an input element.
pub trait InputDifference<E>
where
E: Element,
{
/// Iterable representation of differences in state.
type Difference: IntoIterator<Item = (E, <E::State as State>::Difference)>;
/// Gets the difference in state for an input element.
fn difference(&self) -> Self::Difference;
}
// Blanket implementation for `InputDifference` for composite states
// represented by a `HashSet`, such as keys and buttons.
impl<E, S, T> InputDifference<E> for T
where
T: Input,
T::State: CompositeState<E, Composite = HashSet<E>> + InputState<E>,
E: Element<State = S> + Eq + Hash,
S: State<Difference = S>,
{
type Difference = Vec<(E, <E::State as State>::Difference)>;
fn difference(&self) -> Self::Difference {
self.live()
.composite()
.symmetric_difference(self.snapshot().composite())
.map(|element| (*element, self.live().state(*element)))
.collect()
}
}
/// An input device with a live state and snapshot state. These are updated via
/// `React` and `Snapshot` and provide information about the live state and
/// changes based on the snapshot state.
pub trait Input: React + Snapshot {
/// Aggregate state for the input device.
type State;
/// Gets the live state.
fn live(&self) -> &Self::State;
// TODO: The term "snapshot" is ambiguous. Here, it refers to the snapshot
// of the state of an input device. In the `Snapshot` trait, it is
// used as a verb for the operation of taking a snapshot (copying the
// live state into the snapshot state). However, the `Input` trait is
// not exposed outside of this module, so this shouldn't affect
// client code.
/// Gets the snapshot state.
fn snapshot(&self) -> &Self::State;
}
/// Provides snapshotting for an input device. Input devices maintain a live
/// state and snapshot state, which are updated via `React` and this trait,
/// respectively.
pub trait Snapshot {
/// Snapshots the live state.
fn snapshot(&mut self);
}
| state | identifier_name |
dispnew.rs | //! Updating of data structures for redisplay.
use std::{cmp, ptr};
use remacs_lib::current_timespec;
use remacs_macros::lisp_fn;
use crate::{
eval::unbind_to,
frame::selected_frame,
frame::{LispFrameLiveOrSelected, LispFrameRef},
lisp::{ExternalPtr, LispObject},
lists::{LispConsCircularChecks, LispConsEndChecks},
remacs_sys::{
clear_current_matrices, detect_input_pending_run_timers, dtotimespec, fset_redisplay,
mark_window_display_accurate, putchar_unlocked, redisplay_preserve_echo_area, ring_bell,
specbind, swallow_events, timespec_add, timespec_sub, wait_reading_process_output,
},
remacs_sys::{
globals, noninteractive, redisplaying_p, Qnil, Qredisplay_dont_pause, Qt, Vframe_list,
WAIT_READING_MAX,
},
remacs_sys::{EmacsDouble, EmacsInt, Lisp_Glyph},
terminal::{clear_frame, update_begin, update_end},
threads::c_specpdl_index,
windows::{LispWindowOrSelected, LispWindowRef},
};
pub type LispGlyphRef = ExternalPtr<Lisp_Glyph>;
/// Pause, without updating display, for SECONDS seconds.
/// SECONDS may be a floating-point value, meaning that you can wait for a
/// fraction of a second. Optional second arg MILLISECONDS specifies an
/// additional wait period, in milliseconds; this is for backwards compatibility.
/// (Not all operating systems support waiting for a fraction of a second.)
#[lisp_fn(min = "1")]
pub fn sleep_for(seconds: EmacsDouble, milliseconds: Option<EmacsInt>) {
let duration = seconds + (milliseconds.unwrap_or(0) as f64 / 1000.0);
if duration > 0.0 {
let mut t = unsafe { dtotimespec(duration) };
let tend = unsafe { timespec_add(current_timespec(), t) };
while!t.tv_sec < 0 && (t.tv_sec > 0 || t.tv_nsec > 0) {
unsafe {
wait_reading_process_output(
cmp::min(t.tv_sec as i64, WAIT_READING_MAX),
t.tv_nsec as i32,
0,
true,
Qnil,
ptr::null_mut(),
0,
)
};
t = unsafe { timespec_sub(tend, current_timespec()) };
}
}
}
/**********************************************************************
Redrawing Frames
**********************************************************************/
/// Redraw frame FRAME.
#[no_mangle]
pub extern "C" fn redraw_frame(mut frame: LispFrameRef) {
unsafe {
// Error if FRAME has no glyphs.
debug_assert!(frame.glyphs_initialized_p());
update_begin(frame);
clear_frame(frame);
clear_current_matrices(frame.as_mut());
update_end(frame);
fset_redisplay(frame.as_mut());
// Mark all windows as inaccurate, so that every window will have
// its redisplay done.
mark_window_display_accurate(frame.root_window, false);
set_window_update_flags(frame.root_window.into(), true);
frame.set_garbaged(false);
} | #[lisp_fn(c_name = "redraw_frame", name = "redraw-frame", min = "0")]
pub fn redraw_frame_lisp(frame: LispFrameLiveOrSelected) {
redraw_frame(frame.into());
}
/// Clear and redisplay all visible frames.
#[lisp_fn]
pub fn redraw_display() {
for_each_frame!(frame => {
if frame.visible()!= 0 {
redraw_frame(frame);
}
});
}
/// Set WINDOW->must_be_updated_p to ON_P for all windows in
/// the window tree rooted at W.
// Make private once all C usages are ported in this file
#[no_mangle]
pub extern "C" fn set_window_update_flags(w: LispWindowRef, on_p: bool) {
let mut w = Some(w);
while let Some(mut win) = w {
if let Some(contents) = win.contents.as_window() {
set_window_update_flags(contents, on_p);
} else {
win.set_must_be_updated_p(on_p);
}
let next = win.next;
w = if next.is_nil() {
None
} else {
Some(next.into())
};
}
}
/***********************************************************************
Blinking cursor
***********************************************************************/
/// Set the cursor-visibility flag of WINDOW to SHOW.
/// WINDOW nil means use the selected window. SHOW non-nil means
/// show a cursor in WINDOW in the next redisplay. SHOW nil means
/// don't show a cursor.
#[lisp_fn]
pub fn internal_show_cursor(window: LispWindowOrSelected, show: bool) {
let mut win: LispWindowRef = window.into();
// Don't change cursor state while redisplaying. This could confuse
// output routines.
if!unsafe { redisplaying_p } {
win.set_cursor_off_p(!show)
}
}
/// Value is non-nil if next redisplay will display a cursor in WINDOW.
/// WINDOW nil or omitted means report on the selected window.
#[lisp_fn(min = "0")]
pub fn internal_show_cursor_p(window: LispWindowOrSelected) -> bool {
let win: LispWindowRef = window.into();
!win.cursor_off_p()
}
/// Return whether input is coming from the keyboard.
// Corresponds to the INTERACTIVE macro in commands.h.
pub fn is_interactive() -> bool {
unsafe { globals.Vexecuting_kbd_macro.is_nil() &&!noninteractive }
}
#[no_mangle]
pub extern "C" fn ding_internal(terminate_macro: bool) {
unsafe {
if noninteractive {
putchar_unlocked(0o7);
} else if terminate_macro &&!is_interactive() {
// Stop executing a keyboard macro.
user_error!("Keyboard macro terminated by a command ringing the bell");
} else {
ring_bell(selected_frame().as_mut())
}
}
}
/// Beep, or flash the screen.
/// Also, unless an argument is given,
/// terminate any keyboard macro currently executing.
#[lisp_fn(min = "0")]
pub fn ding(arg: LispObject) {
ding_internal(arg.is_nil())
}
/// Perform redisplay.
/// Optional arg FORCE, if non-nil, prevents redisplay from being
/// preempted by arriving input, even if `redisplay-dont-pause' is nil.
/// If `redisplay-dont-pause' is non-nil (the default), redisplay is never
/// preempted by arriving input, so FORCE does nothing.
///
/// Return t if redisplay was performed, nil if redisplay was preempted
/// immediately by pending input.
#[lisp_fn(min = "0")]
pub fn redisplay(force: LispObject) -> bool {
let force: bool = force.is_not_nil();
unsafe {
swallow_events(true);
let ret =
(detect_input_pending_run_timers(true) &&!force &&!globals.redisplay_dont_pause)
|| globals.Vexecuting_kbd_macro.is_not_nil();
if ret {
let count = c_specpdl_index();
if force &&!globals.redisplay_dont_pause {
specbind(Qredisplay_dont_pause, Qt);
}
redisplay_preserve_echo_area(2);
unbind_to(count, Qnil);
}
ret
}
}
include!(concat!(env!("OUT_DIR"), "/dispnew_exports.rs")); | }
/// Clear frame FRAME and output again what is supposed to appear on it.
/// If FRAME is omitted or nil, the selected frame is used. | random_line_split |
dispnew.rs | //! Updating of data structures for redisplay.
use std::{cmp, ptr};
use remacs_lib::current_timespec;
use remacs_macros::lisp_fn;
use crate::{
eval::unbind_to,
frame::selected_frame,
frame::{LispFrameLiveOrSelected, LispFrameRef},
lisp::{ExternalPtr, LispObject},
lists::{LispConsCircularChecks, LispConsEndChecks},
remacs_sys::{
clear_current_matrices, detect_input_pending_run_timers, dtotimespec, fset_redisplay,
mark_window_display_accurate, putchar_unlocked, redisplay_preserve_echo_area, ring_bell,
specbind, swallow_events, timespec_add, timespec_sub, wait_reading_process_output,
},
remacs_sys::{
globals, noninteractive, redisplaying_p, Qnil, Qredisplay_dont_pause, Qt, Vframe_list,
WAIT_READING_MAX,
},
remacs_sys::{EmacsDouble, EmacsInt, Lisp_Glyph},
terminal::{clear_frame, update_begin, update_end},
threads::c_specpdl_index,
windows::{LispWindowOrSelected, LispWindowRef},
};
pub type LispGlyphRef = ExternalPtr<Lisp_Glyph>;
/// Pause, without updating display, for SECONDS seconds.
/// SECONDS may be a floating-point value, meaning that you can wait for a
/// fraction of a second. Optional second arg MILLISECONDS specifies an
/// additional wait period, in milliseconds; this is for backwards compatibility.
/// (Not all operating systems support waiting for a fraction of a second.)
#[lisp_fn(min = "1")]
pub fn sleep_for(seconds: EmacsDouble, milliseconds: Option<EmacsInt>) {
let duration = seconds + (milliseconds.unwrap_or(0) as f64 / 1000.0);
if duration > 0.0 {
let mut t = unsafe { dtotimespec(duration) };
let tend = unsafe { timespec_add(current_timespec(), t) };
while!t.tv_sec < 0 && (t.tv_sec > 0 || t.tv_nsec > 0) {
unsafe {
wait_reading_process_output(
cmp::min(t.tv_sec as i64, WAIT_READING_MAX),
t.tv_nsec as i32,
0,
true,
Qnil,
ptr::null_mut(),
0,
)
};
t = unsafe { timespec_sub(tend, current_timespec()) };
}
}
}
/**********************************************************************
Redrawing Frames
**********************************************************************/
/// Redraw frame FRAME.
#[no_mangle]
pub extern "C" fn redraw_frame(mut frame: LispFrameRef) {
unsafe {
// Error if FRAME has no glyphs.
debug_assert!(frame.glyphs_initialized_p());
update_begin(frame);
clear_frame(frame);
clear_current_matrices(frame.as_mut());
update_end(frame);
fset_redisplay(frame.as_mut());
// Mark all windows as inaccurate, so that every window will have
// its redisplay done.
mark_window_display_accurate(frame.root_window, false);
set_window_update_flags(frame.root_window.into(), true);
frame.set_garbaged(false);
}
}
/// Clear frame FRAME and output again what is supposed to appear on it.
/// If FRAME is omitted or nil, the selected frame is used.
#[lisp_fn(c_name = "redraw_frame", name = "redraw-frame", min = "0")]
pub fn redraw_frame_lisp(frame: LispFrameLiveOrSelected) {
redraw_frame(frame.into());
}
/// Clear and redisplay all visible frames.
#[lisp_fn]
pub fn redraw_display() {
for_each_frame!(frame => {
if frame.visible()!= 0 {
redraw_frame(frame);
}
});
}
/// Set WINDOW->must_be_updated_p to ON_P for all windows in
/// the window tree rooted at W.
// Make private once all C usages are ported in this file
#[no_mangle]
pub extern "C" fn set_window_update_flags(w: LispWindowRef, on_p: bool) {
let mut w = Some(w);
while let Some(mut win) = w {
if let Some(contents) = win.contents.as_window() {
set_window_update_flags(contents, on_p);
} else {
win.set_must_be_updated_p(on_p);
}
let next = win.next;
w = if next.is_nil() {
None
} else {
Some(next.into())
};
}
}
/***********************************************************************
Blinking cursor
***********************************************************************/
/// Set the cursor-visibility flag of WINDOW to SHOW.
/// WINDOW nil means use the selected window. SHOW non-nil means
/// show a cursor in WINDOW in the next redisplay. SHOW nil means
/// don't show a cursor.
#[lisp_fn]
pub fn internal_show_cursor(window: LispWindowOrSelected, show: bool) {
let mut win: LispWindowRef = window.into();
// Don't change cursor state while redisplaying. This could confuse
// output routines.
if!unsafe { redisplaying_p } {
win.set_cursor_off_p(!show)
}
}
/// Value is non-nil if next redisplay will display a cursor in WINDOW.
/// WINDOW nil or omitted means report on the selected window.
#[lisp_fn(min = "0")]
pub fn internal_show_cursor_p(window: LispWindowOrSelected) -> bool {
let win: LispWindowRef = window.into();
!win.cursor_off_p()
}
/// Return whether input is coming from the keyboard.
// Corresponds to the INTERACTIVE macro in commands.h.
pub fn is_interactive() -> bool {
unsafe { globals.Vexecuting_kbd_macro.is_nil() &&!noninteractive }
}
#[no_mangle]
pub extern "C" fn ding_internal(terminate_macro: bool) {
unsafe {
if noninteractive {
putchar_unlocked(0o7);
} else if terminate_macro &&!is_interactive() {
// Stop executing a keyboard macro.
user_error!("Keyboard macro terminated by a command ringing the bell");
} else {
ring_bell(selected_frame().as_mut())
}
}
}
/// Beep, or flash the screen.
/// Also, unless an argument is given,
/// terminate any keyboard macro currently executing.
#[lisp_fn(min = "0")]
pub fn ding(arg: LispObject) {
ding_internal(arg.is_nil())
}
/// Perform redisplay.
/// Optional arg FORCE, if non-nil, prevents redisplay from being
/// preempted by arriving input, even if `redisplay-dont-pause' is nil.
/// If `redisplay-dont-pause' is non-nil (the default), redisplay is never
/// preempted by arriving input, so FORCE does nothing.
///
/// Return t if redisplay was performed, nil if redisplay was preempted
/// immediately by pending input.
#[lisp_fn(min = "0")]
pub fn redisplay(force: LispObject) -> bool | }
ret
}
}
include!(concat!(env!("OUT_DIR"), "/dispnew_exports.rs"));
| {
let force: bool = force.is_not_nil();
unsafe {
swallow_events(true);
let ret =
(detect_input_pending_run_timers(true) && !force && !globals.redisplay_dont_pause)
|| globals.Vexecuting_kbd_macro.is_not_nil();
if ret {
let count = c_specpdl_index();
if force && !globals.redisplay_dont_pause {
specbind(Qredisplay_dont_pause, Qt);
}
redisplay_preserve_echo_area(2);
unbind_to(count, Qnil); | identifier_body |
dispnew.rs | //! Updating of data structures for redisplay.
use std::{cmp, ptr};
use remacs_lib::current_timespec;
use remacs_macros::lisp_fn;
use crate::{
eval::unbind_to,
frame::selected_frame,
frame::{LispFrameLiveOrSelected, LispFrameRef},
lisp::{ExternalPtr, LispObject},
lists::{LispConsCircularChecks, LispConsEndChecks},
remacs_sys::{
clear_current_matrices, detect_input_pending_run_timers, dtotimespec, fset_redisplay,
mark_window_display_accurate, putchar_unlocked, redisplay_preserve_echo_area, ring_bell,
specbind, swallow_events, timespec_add, timespec_sub, wait_reading_process_output,
},
remacs_sys::{
globals, noninteractive, redisplaying_p, Qnil, Qredisplay_dont_pause, Qt, Vframe_list,
WAIT_READING_MAX,
},
remacs_sys::{EmacsDouble, EmacsInt, Lisp_Glyph},
terminal::{clear_frame, update_begin, update_end},
threads::c_specpdl_index,
windows::{LispWindowOrSelected, LispWindowRef},
};
pub type LispGlyphRef = ExternalPtr<Lisp_Glyph>;
/// Pause, without updating display, for SECONDS seconds.
/// SECONDS may be a floating-point value, meaning that you can wait for a
/// fraction of a second. Optional second arg MILLISECONDS specifies an
/// additional wait period, in milliseconds; this is for backwards compatibility.
/// (Not all operating systems support waiting for a fraction of a second.)
#[lisp_fn(min = "1")]
pub fn sleep_for(seconds: EmacsDouble, milliseconds: Option<EmacsInt>) {
let duration = seconds + (milliseconds.unwrap_or(0) as f64 / 1000.0);
if duration > 0.0 {
let mut t = unsafe { dtotimespec(duration) };
let tend = unsafe { timespec_add(current_timespec(), t) };
while!t.tv_sec < 0 && (t.tv_sec > 0 || t.tv_nsec > 0) {
unsafe {
wait_reading_process_output(
cmp::min(t.tv_sec as i64, WAIT_READING_MAX),
t.tv_nsec as i32,
0,
true,
Qnil,
ptr::null_mut(),
0,
)
};
t = unsafe { timespec_sub(tend, current_timespec()) };
}
}
}
/**********************************************************************
Redrawing Frames
**********************************************************************/
/// Redraw frame FRAME.
#[no_mangle]
pub extern "C" fn redraw_frame(mut frame: LispFrameRef) {
unsafe {
// Error if FRAME has no glyphs.
debug_assert!(frame.glyphs_initialized_p());
update_begin(frame);
clear_frame(frame);
clear_current_matrices(frame.as_mut());
update_end(frame);
fset_redisplay(frame.as_mut());
// Mark all windows as inaccurate, so that every window will have
// its redisplay done.
mark_window_display_accurate(frame.root_window, false);
set_window_update_flags(frame.root_window.into(), true);
frame.set_garbaged(false);
}
}
/// Clear frame FRAME and output again what is supposed to appear on it.
/// If FRAME is omitted or nil, the selected frame is used.
#[lisp_fn(c_name = "redraw_frame", name = "redraw-frame", min = "0")]
pub fn redraw_frame_lisp(frame: LispFrameLiveOrSelected) {
redraw_frame(frame.into());
}
/// Clear and redisplay all visible frames.
#[lisp_fn]
pub fn redraw_display() {
for_each_frame!(frame => {
if frame.visible()!= 0 {
redraw_frame(frame);
}
});
}
/// Set WINDOW->must_be_updated_p to ON_P for all windows in
/// the window tree rooted at W.
// Make private once all C usages are ported in this file
#[no_mangle]
pub extern "C" fn | (w: LispWindowRef, on_p: bool) {
let mut w = Some(w);
while let Some(mut win) = w {
if let Some(contents) = win.contents.as_window() {
set_window_update_flags(contents, on_p);
} else {
win.set_must_be_updated_p(on_p);
}
let next = win.next;
w = if next.is_nil() {
None
} else {
Some(next.into())
};
}
}
/***********************************************************************
Blinking cursor
***********************************************************************/
/// Set the cursor-visibility flag of WINDOW to SHOW.
/// WINDOW nil means use the selected window. SHOW non-nil means
/// show a cursor in WINDOW in the next redisplay. SHOW nil means
/// don't show a cursor.
#[lisp_fn]
pub fn internal_show_cursor(window: LispWindowOrSelected, show: bool) {
let mut win: LispWindowRef = window.into();
// Don't change cursor state while redisplaying. This could confuse
// output routines.
if!unsafe { redisplaying_p } {
win.set_cursor_off_p(!show)
}
}
/// Value is non-nil if next redisplay will display a cursor in WINDOW.
/// WINDOW nil or omitted means report on the selected window.
#[lisp_fn(min = "0")]
pub fn internal_show_cursor_p(window: LispWindowOrSelected) -> bool {
let win: LispWindowRef = window.into();
!win.cursor_off_p()
}
/// Return whether input is coming from the keyboard.
// Corresponds to the INTERACTIVE macro in commands.h.
pub fn is_interactive() -> bool {
unsafe { globals.Vexecuting_kbd_macro.is_nil() &&!noninteractive }
}
#[no_mangle]
pub extern "C" fn ding_internal(terminate_macro: bool) {
unsafe {
if noninteractive {
putchar_unlocked(0o7);
} else if terminate_macro &&!is_interactive() {
// Stop executing a keyboard macro.
user_error!("Keyboard macro terminated by a command ringing the bell");
} else {
ring_bell(selected_frame().as_mut())
}
}
}
/// Beep, or flash the screen.
/// Also, unless an argument is given,
/// terminate any keyboard macro currently executing.
#[lisp_fn(min = "0")]
pub fn ding(arg: LispObject) {
ding_internal(arg.is_nil())
}
/// Perform redisplay.
/// Optional arg FORCE, if non-nil, prevents redisplay from being
/// preempted by arriving input, even if `redisplay-dont-pause' is nil.
/// If `redisplay-dont-pause' is non-nil (the default), redisplay is never
/// preempted by arriving input, so FORCE does nothing.
///
/// Return t if redisplay was performed, nil if redisplay was preempted
/// immediately by pending input.
#[lisp_fn(min = "0")]
pub fn redisplay(force: LispObject) -> bool {
let force: bool = force.is_not_nil();
unsafe {
swallow_events(true);
let ret =
(detect_input_pending_run_timers(true) &&!force &&!globals.redisplay_dont_pause)
|| globals.Vexecuting_kbd_macro.is_not_nil();
if ret {
let count = c_specpdl_index();
if force &&!globals.redisplay_dont_pause {
specbind(Qredisplay_dont_pause, Qt);
}
redisplay_preserve_echo_area(2);
unbind_to(count, Qnil);
}
ret
}
}
include!(concat!(env!("OUT_DIR"), "/dispnew_exports.rs"));
| set_window_update_flags | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.