file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
graphs.rs | extern crate rand;
extern crate timely;
extern crate differential_dataflow;
use std::rc::Rc;
use rand::{Rng, SeedableRng, StdRng};
use timely::dataflow::*;
use differential_dataflow::input::Input;
use differential_dataflow::Collection;
use differential_dataflow::operators::*;
use differential_dataflow::trace::Trace;
use differential_dataflow::operators::arrange::ArrangeByKey;
use differential_dataflow::operators::arrange::ArrangeBySelf;
use differential_dataflow::trace::implementations::spine_fueled::Spine;
type Node = usize;
use differential_dataflow::trace::implementations::ord::OrdValBatch;
// use differential_dataflow::trace::implementations::ord::OrdValSpine;
// type GraphTrace<N> = Spine<usize, N, (), isize, Rc<GraphBatch<N>>>;
type GraphTrace = Spine<Node, Node, (), isize, Rc<OrdValBatch<Node, Node, (), isize>>>;
fn main() {
let nodes: usize = std::env::args().nth(1).unwrap().parse().unwrap();
let edges: usize = std::env::args().nth(2).unwrap().parse().unwrap();
// Our setting involves four read query types, and two updatable base relations.
//
// Q1: Point lookup: reads "state" associated with a node.
// Q2: One-hop lookup: reads "state" associated with neighbors of a node.
// Q3: Two-hop lookup: reads "state" associated with n-of-n's of a node.
// Q4: Shortest path: reports hop count between two query nodes.
//
// R1: "State": a pair of (node, T) for some type T that I don't currently know.
// R2: "Graph": pairs (node, node) indicating linkage between the two nodes.
timely::execute_from_args(std::env::args().skip(3), move |worker| {
| let (graph_input, graph) = scope.new_collection();
let graph_indexed = graph.arrange_by_key();
// let graph_indexed = graph.arrange_by_key();
(graph_input, graph_indexed.trace)
});
let seed: &[_] = &[1, 2, 3, index];
let mut rng1: StdRng = SeedableRng::from_seed(seed); // rng for edge additions
// let mut rng2: StdRng = SeedableRng::from_seed(seed); // rng for edge deletions
if index == 0 { println!("performing workload on random graph with {} nodes, {} edges:", nodes, edges); }
let worker_edges = edges/peers + if index < (edges % peers) { 1 } else { 0 };
for _ in 0 .. worker_edges {
graph.insert((rng1.gen_range(0, nodes) as Node, rng1.gen_range(0, nodes) as Node));
}
graph.close();
while worker.step() { }
if index == 0 { println!("{:?}\tgraph loaded", timer.elapsed()); }
// Phase 2: Reachability.
let mut roots = worker.dataflow(|scope| {
let (roots_input, roots) = scope.new_collection();
reach(&mut trace, roots);
roots_input
});
if index == 0 { roots.insert(0); }
roots.close();
while worker.step() { }
if index == 0 { println!("{:?}\treach complete", timer.elapsed()); }
// Phase 3: Breadth-first distance labeling.
let mut roots = worker.dataflow(|scope| {
let (roots_input, roots) = scope.new_collection();
bfs(&mut trace, roots);
roots_input
});
if index == 0 { roots.insert(0); }
roots.close();
while worker.step() { }
if index == 0 { println!("{:?}\tbfs complete", timer.elapsed()); }
}).unwrap();
}
// use differential_dataflow::trace::implementations::ord::OrdValSpine;
use differential_dataflow::operators::arrange::TraceAgent;
type TraceHandle = TraceAgent<Node, Node, (), isize, GraphTrace>;
fn reach<G: Scope<Timestamp = ()>> (
graph: &mut TraceHandle,
roots: Collection<G, Node>
) -> Collection<G, Node> {
let graph = graph.import(&roots.scope());
roots.iterate(|inner| {
let graph = graph.enter(&inner.scope());
let roots = roots.enter(&inner.scope());
// let reach = inner.concat(&roots).distinct_total().arrange_by_self();
// graph.join_core(&reach, |_src,&dst,&()| Some(dst))
graph.join_core(&inner.arrange_by_self(), |_src,&dst,&()| Some(dst))
.concat(&roots)
.distinct_total()
})
}
fn bfs<G: Scope<Timestamp = ()>> (
graph: &mut TraceHandle,
roots: Collection<G, Node>
) -> Collection<G, (Node, u32)> {
let graph = graph.import(&roots.scope());
let roots = roots.map(|r| (r,0));
roots.iterate(|inner| {
let graph = graph.enter(&inner.scope());
let roots = roots.enter(&inner.scope());
graph.join_map(&inner, |_src,&dest,&dist| (dest, dist+1))
.concat(&roots)
.reduce(|_key, input, output| output.push((*input[0].0,1)))
})
}
// fn connected_components<G: Scope<Timestamp = ()>>(
// graph: &mut TraceHandle<Node>
// ) -> Collection<G, (Node, Node)> {
// // each edge (x,y) means that we need at least a label for the min of x and y.
// let nodes =
// graph
// .as_collection(|&k,&v| {
// let min = std::cmp::min(k,v);
// (min, min)
// })
// .consolidate();
// // each edge should exist in both directions.
// let edges = edges.map_in_place(|x| mem::swap(&mut x.0, &mut x.1))
// .concat(&edges);
// // don't actually use these labels, just grab the type
// nodes.filter(|_| false)
// .iterate(|inner| {
// let edges = edges.enter(&inner.scope());
// let nodes = nodes.enter_at(&inner.scope(), |r| 256 * (64 - r.1.leading_zeros() as u64));
// inner.join_map(&edges, |_k,l,d| (*d,*l))
// .concat(&nodes)
// .group(|_, s, t| { t.push((*s[0].0, 1)); } )
// })
// } | let index = worker.index();
let peers = worker.peers();
let timer = ::std::time::Instant::now();
let (mut graph, mut trace) = worker.dataflow(|scope| { | random_line_split |
graphs.rs | extern crate rand;
extern crate timely;
extern crate differential_dataflow;
use std::rc::Rc;
use rand::{Rng, SeedableRng, StdRng};
use timely::dataflow::*;
use differential_dataflow::input::Input;
use differential_dataflow::Collection;
use differential_dataflow::operators::*;
use differential_dataflow::trace::Trace;
use differential_dataflow::operators::arrange::ArrangeByKey;
use differential_dataflow::operators::arrange::ArrangeBySelf;
use differential_dataflow::trace::implementations::spine_fueled::Spine;
type Node = usize;
use differential_dataflow::trace::implementations::ord::OrdValBatch;
// use differential_dataflow::trace::implementations::ord::OrdValSpine;
// type GraphTrace<N> = Spine<usize, N, (), isize, Rc<GraphBatch<N>>>;
type GraphTrace = Spine<Node, Node, (), isize, Rc<OrdValBatch<Node, Node, (), isize>>>;
fn main() {
let nodes: usize = std::env::args().nth(1).unwrap().parse().unwrap();
let edges: usize = std::env::args().nth(2).unwrap().parse().unwrap();
// Our setting involves four read query types, and two updatable base relations.
//
// Q1: Point lookup: reads "state" associated with a node.
// Q2: One-hop lookup: reads "state" associated with neighbors of a node.
// Q3: Two-hop lookup: reads "state" associated with n-of-n's of a node.
// Q4: Shortest path: reports hop count between two query nodes.
//
// R1: "State": a pair of (node, T) for some type T that I don't currently know.
// R2: "Graph": pairs (node, node) indicating linkage between the two nodes.
timely::execute_from_args(std::env::args().skip(3), move |worker| {
let index = worker.index();
let peers = worker.peers();
let timer = ::std::time::Instant::now();
let (mut graph, mut trace) = worker.dataflow(|scope| {
let (graph_input, graph) = scope.new_collection();
let graph_indexed = graph.arrange_by_key();
// let graph_indexed = graph.arrange_by_key();
(graph_input, graph_indexed.trace)
});
let seed: &[_] = &[1, 2, 3, index];
let mut rng1: StdRng = SeedableRng::from_seed(seed); // rng for edge additions
// let mut rng2: StdRng = SeedableRng::from_seed(seed); // rng for edge deletions
if index == 0 { println!("performing workload on random graph with {} nodes, {} edges:", nodes, edges); }
let worker_edges = edges/peers + if index < (edges % peers) { 1 } else { 0 };
for _ in 0 .. worker_edges {
graph.insert((rng1.gen_range(0, nodes) as Node, rng1.gen_range(0, nodes) as Node));
}
graph.close();
while worker.step() { }
if index == 0 { println!("{:?}\tgraph loaded", timer.elapsed()); }
// Phase 2: Reachability.
let mut roots = worker.dataflow(|scope| {
let (roots_input, roots) = scope.new_collection();
reach(&mut trace, roots);
roots_input
});
if index == 0 { roots.insert(0); }
roots.close();
while worker.step() { }
if index == 0 { println!("{:?}\treach complete", timer.elapsed()); }
// Phase 3: Breadth-first distance labeling.
let mut roots = worker.dataflow(|scope| {
let (roots_input, roots) = scope.new_collection();
bfs(&mut trace, roots);
roots_input
});
if index == 0 { roots.insert(0); }
roots.close();
while worker.step() { }
if index == 0 { println!("{:?}\tbfs complete", timer.elapsed()); }
}).unwrap();
}
// use differential_dataflow::trace::implementations::ord::OrdValSpine;
use differential_dataflow::operators::arrange::TraceAgent;
type TraceHandle = TraceAgent<Node, Node, (), isize, GraphTrace>;
fn | <G: Scope<Timestamp = ()>> (
graph: &mut TraceHandle,
roots: Collection<G, Node>
) -> Collection<G, Node> {
let graph = graph.import(&roots.scope());
roots.iterate(|inner| {
let graph = graph.enter(&inner.scope());
let roots = roots.enter(&inner.scope());
// let reach = inner.concat(&roots).distinct_total().arrange_by_self();
// graph.join_core(&reach, |_src,&dst,&()| Some(dst))
graph.join_core(&inner.arrange_by_self(), |_src,&dst,&()| Some(dst))
.concat(&roots)
.distinct_total()
})
}
fn bfs<G: Scope<Timestamp = ()>> (
graph: &mut TraceHandle,
roots: Collection<G, Node>
) -> Collection<G, (Node, u32)> {
let graph = graph.import(&roots.scope());
let roots = roots.map(|r| (r,0));
roots.iterate(|inner| {
let graph = graph.enter(&inner.scope());
let roots = roots.enter(&inner.scope());
graph.join_map(&inner, |_src,&dest,&dist| (dest, dist+1))
.concat(&roots)
.reduce(|_key, input, output| output.push((*input[0].0,1)))
})
}
// fn connected_components<G: Scope<Timestamp = ()>>(
// graph: &mut TraceHandle<Node>
// ) -> Collection<G, (Node, Node)> {
// // each edge (x,y) means that we need at least a label for the min of x and y.
// let nodes =
// graph
// .as_collection(|&k,&v| {
// let min = std::cmp::min(k,v);
// (min, min)
// })
// .consolidate();
// // each edge should exist in both directions.
// let edges = edges.map_in_place(|x| mem::swap(&mut x.0, &mut x.1))
// .concat(&edges);
// // don't actually use these labels, just grab the type
// nodes.filter(|_| false)
// .iterate(|inner| {
// let edges = edges.enter(&inner.scope());
// let nodes = nodes.enter_at(&inner.scope(), |r| 256 * (64 - r.1.leading_zeros() as u64));
// inner.join_map(&edges, |_k,l,d| (*d,*l))
// .concat(&nodes)
// .group(|_, s, t| { t.push((*s[0].0, 1)); } )
// })
// }
| reach | identifier_name |
graphs.rs | extern crate rand;
extern crate timely;
extern crate differential_dataflow;
use std::rc::Rc;
use rand::{Rng, SeedableRng, StdRng};
use timely::dataflow::*;
use differential_dataflow::input::Input;
use differential_dataflow::Collection;
use differential_dataflow::operators::*;
use differential_dataflow::trace::Trace;
use differential_dataflow::operators::arrange::ArrangeByKey;
use differential_dataflow::operators::arrange::ArrangeBySelf;
use differential_dataflow::trace::implementations::spine_fueled::Spine;
type Node = usize;
use differential_dataflow::trace::implementations::ord::OrdValBatch;
// use differential_dataflow::trace::implementations::ord::OrdValSpine;
// type GraphTrace<N> = Spine<usize, N, (), isize, Rc<GraphBatch<N>>>;
type GraphTrace = Spine<Node, Node, (), isize, Rc<OrdValBatch<Node, Node, (), isize>>>;
fn main() {
let nodes: usize = std::env::args().nth(1).unwrap().parse().unwrap();
let edges: usize = std::env::args().nth(2).unwrap().parse().unwrap();
// Our setting involves four read query types, and two updatable base relations.
//
// Q1: Point lookup: reads "state" associated with a node.
// Q2: One-hop lookup: reads "state" associated with neighbors of a node.
// Q3: Two-hop lookup: reads "state" associated with n-of-n's of a node.
// Q4: Shortest path: reports hop count between two query nodes.
//
// R1: "State": a pair of (node, T) for some type T that I don't currently know.
// R2: "Graph": pairs (node, node) indicating linkage between the two nodes.
timely::execute_from_args(std::env::args().skip(3), move |worker| {
let index = worker.index();
let peers = worker.peers();
let timer = ::std::time::Instant::now();
let (mut graph, mut trace) = worker.dataflow(|scope| {
let (graph_input, graph) = scope.new_collection();
let graph_indexed = graph.arrange_by_key();
// let graph_indexed = graph.arrange_by_key();
(graph_input, graph_indexed.trace)
});
let seed: &[_] = &[1, 2, 3, index];
let mut rng1: StdRng = SeedableRng::from_seed(seed); // rng for edge additions
// let mut rng2: StdRng = SeedableRng::from_seed(seed); // rng for edge deletions
if index == 0 { println!("performing workload on random graph with {} nodes, {} edges:", nodes, edges); }
let worker_edges = edges/peers + if index < (edges % peers) { 1 } else { 0 };
for _ in 0 .. worker_edges {
graph.insert((rng1.gen_range(0, nodes) as Node, rng1.gen_range(0, nodes) as Node));
}
graph.close();
while worker.step() { }
if index == 0 { println!("{:?}\tgraph loaded", timer.elapsed()); }
// Phase 2: Reachability.
let mut roots = worker.dataflow(|scope| {
let (roots_input, roots) = scope.new_collection();
reach(&mut trace, roots);
roots_input
});
if index == 0 { roots.insert(0); }
roots.close();
while worker.step() { }
if index == 0 { println!("{:?}\treach complete", timer.elapsed()); }
// Phase 3: Breadth-first distance labeling.
let mut roots = worker.dataflow(|scope| {
let (roots_input, roots) = scope.new_collection();
bfs(&mut trace, roots);
roots_input
});
if index == 0 |
roots.close();
while worker.step() { }
if index == 0 { println!("{:?}\tbfs complete", timer.elapsed()); }
}).unwrap();
}
// use differential_dataflow::trace::implementations::ord::OrdValSpine;
use differential_dataflow::operators::arrange::TraceAgent;
type TraceHandle = TraceAgent<Node, Node, (), isize, GraphTrace>;
fn reach<G: Scope<Timestamp = ()>> (
graph: &mut TraceHandle,
roots: Collection<G, Node>
) -> Collection<G, Node> {
let graph = graph.import(&roots.scope());
roots.iterate(|inner| {
let graph = graph.enter(&inner.scope());
let roots = roots.enter(&inner.scope());
// let reach = inner.concat(&roots).distinct_total().arrange_by_self();
// graph.join_core(&reach, |_src,&dst,&()| Some(dst))
graph.join_core(&inner.arrange_by_self(), |_src,&dst,&()| Some(dst))
.concat(&roots)
.distinct_total()
})
}
fn bfs<G: Scope<Timestamp = ()>> (
graph: &mut TraceHandle,
roots: Collection<G, Node>
) -> Collection<G, (Node, u32)> {
let graph = graph.import(&roots.scope());
let roots = roots.map(|r| (r,0));
roots.iterate(|inner| {
let graph = graph.enter(&inner.scope());
let roots = roots.enter(&inner.scope());
graph.join_map(&inner, |_src,&dest,&dist| (dest, dist+1))
.concat(&roots)
.reduce(|_key, input, output| output.push((*input[0].0,1)))
})
}
// fn connected_components<G: Scope<Timestamp = ()>>(
// graph: &mut TraceHandle<Node>
// ) -> Collection<G, (Node, Node)> {
// // each edge (x,y) means that we need at least a label for the min of x and y.
// let nodes =
// graph
// .as_collection(|&k,&v| {
// let min = std::cmp::min(k,v);
// (min, min)
// })
// .consolidate();
// // each edge should exist in both directions.
// let edges = edges.map_in_place(|x| mem::swap(&mut x.0, &mut x.1))
// .concat(&edges);
// // don't actually use these labels, just grab the type
// nodes.filter(|_| false)
// .iterate(|inner| {
// let edges = edges.enter(&inner.scope());
// let nodes = nodes.enter_at(&inner.scope(), |r| 256 * (64 - r.1.leading_zeros() as u64));
// inner.join_map(&edges, |_k,l,d| (*d,*l))
// .concat(&nodes)
// .group(|_, s, t| { t.push((*s[0].0, 1)); } )
// })
// }
| { roots.insert(0); } | conditional_block |
sha256.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This module implements only the Sha256 function since that is all that is needed for internal
//! use. This implementation is not intended for external use or for any use where security is
//! important.
#![allow(deprecated)] // to_be32
use std::iter::{range_step, repeat};
use std::num::Int;
use std::slice::bytes::{MutableByteVector, copy_memory};
use serialize::hex::ToHex;
/// Write a u32 into a vector, which must be 4 bytes long. The value is written in big-endian
/// format.
fn write_u32_be(dst: &mut[u8], input: u32) {
dst[0] = (input >> 24) as u8;
dst[1] = (input >> 16) as u8;
dst[2] = (input >> 8) as u8;
dst[3] = input as u8;
}
/// Read the value of a vector of bytes as a u32 value in big-endian format.
fn read_u32_be(input: &[u8]) -> u32 {
return
(input[0] as u32) << 24 |
(input[1] as u32) << 16 |
(input[2] as u32) << 8 |
(input[3] as u32);
}
/// Read a vector of bytes into a vector of u32s. The values are read in big-endian format.
fn read_u32v_be(dst: &mut[u32], input: &[u8]) {
assert!(dst.len() * 4 == input.len());
let mut pos = 0us;
for chunk in input.chunks(4) {
dst[pos] = read_u32_be(chunk);
pos += 1;
}
}
trait ToBits {
/// Convert the value in bytes to the number of bits, a tuple where the 1st item is the
/// high-order value and the 2nd item is the low order value.
fn to_bits(self) -> (Self, Self);
}
impl ToBits for u64 {
fn to_bits(self) -> (u64, u64) {
return (self >> 61, self << 3);
}
}
/// Adds the specified number of bytes to the bit count. panic!() if this would cause numeric
/// overflow.
fn add_bytes_to_bits<T: Int + ToBits>(bits: T, bytes: T) -> T {
let (new_high_bits, new_low_bits) = bytes.to_bits();
if new_high_bits > Int::zero() {
panic!("numeric overflow occurred.")
}
match bits.checked_add(new_low_bits) {
Some(x) => return x,
None => panic!("numeric overflow occurred.")
}
}
/// A FixedBuffer, likes its name implies, is a fixed size buffer. When the buffer becomes full, it
/// must be processed. The input() method takes care of processing and then clearing the buffer
/// automatically. However, other methods do not and require the caller to process the buffer. Any
/// method that modifies the buffer directory or provides the caller with bytes that can be modified
/// results in those bytes being marked as used by the buffer.
trait FixedBuffer {
/// Input a vector of bytes. If the buffer becomes full, process it with the provided
/// function and then clear the buffer.
fn input<F>(&mut self, input: &[u8], func: F) where
F: FnMut(&[u8]);
/// Reset the buffer.
fn reset(&mut self);
/// Zero the buffer up until the specified index. The buffer position currently must not be
/// greater than that index.
fn zero_until(&mut self, idx: usize);
/// Get a slice of the buffer of the specified size. There must be at least that many bytes
/// remaining in the buffer.
fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8];
/// Get the current buffer. The buffer must already be full. This clears the buffer as well.
fn full_buffer<'s>(&'s mut self) -> &'s [u8];
/// Get the current position of the buffer.
fn position(&self) -> usize;
/// Get the number of bytes remaining in the buffer until it is full.
fn remaining(&self) -> usize;
/// Get the size of the buffer
fn size(&self) -> usize;
}
/// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize.
struct FixedBuffer64 {
buffer: [u8; 64],
buffer_idx: usize,
}
impl FixedBuffer64 {
/// Create a new FixedBuffer64
fn new() -> FixedBuffer64 {
return FixedBuffer64 {
buffer: [0u8; 64],
buffer_idx: 0
};
}
}
impl FixedBuffer for FixedBuffer64 {
fn input<F>(&mut self, input: &[u8], mut func: F) where
F: FnMut(&[u8]),
{
let mut i = 0;
let size = self.size();
// If there is already data in the buffer, copy as much as we can into it and process
// the data if the buffer becomes full.
if self.buffer_idx != 0 {
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
self.buffer.slice_mut(self.buffer_idx, size),
&input[0..buffer_remaining]);
self.buffer_idx = 0;
func(&self.buffer);
i += buffer_remaining;
} else {
copy_memory(
self.buffer.slice_mut(self.buffer_idx, self.buffer_idx + input.len()),
input);
self.buffer_idx += input.len();
return;
}
}
// While we have at least a full buffer size chunk's worth of data, process that data
// without copying it into the buffer
while input.len() - i >= size {
func(&input[i..(i + size)]);
i += size;
}
// Copy any input data into the buffer. At this point in the method, the amount of
// data left in the input vector will be less than the buffer size and the buffer will
// be empty.
let input_remaining = input.len() - i;
copy_memory(
self.buffer.slice_to_mut(input_remaining),
&input[i..]);
self.buffer_idx += input_remaining;
}
fn reset(&mut self) {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: usize) {
assert!(idx >= self.buffer_idx);
self.buffer.slice_mut(self.buffer_idx, idx).set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8] {
self.buffer_idx += len;
return self.buffer.slice_mut(self.buffer_idx - len, self.buffer_idx);
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == 64);
self.buffer_idx = 0;
return &self.buffer[0..64];
}
fn position(&self) -> usize { self.buffer_idx }
fn remaining(&self) -> usize { 64 - self.buffer_idx }
fn size(&self) -> usize { 64 }
}
/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct.
trait StandardPadding {
/// Add padding to the buffer. The buffer must not be full when this method is called and is
/// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least
/// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled
/// with zeros again until only rem bytes are remaining.
fn standard_padding<F>(&mut self, rem: usize, func: F) where F: FnMut(&[u8]);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding<F>(&mut self, rem: usize, mut func: F) where F: FnMut(&[u8]) {
let size = self.size();
self.next(1)[0] = 128;
if self.remaining() < rem {
self.zero_until(size);
func(self.full_buffer());
}
self.zero_until(size - rem);
}
}
/// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2
/// family of digest functions.
pub trait Digest {
/// Provide message data.
///
/// # Arguments
///
/// * input - A vector of message data
fn input(&mut self, input: &[u8]);
/// Retrieve the digest result. This method may be called multiple times.
///
/// # Arguments
///
/// * out - the vector to hold the result. Must be large enough to contain output_bits().
fn result(&mut self, out: &mut [u8]);
/// Reset the digest. This method must be called after result() and before supplying more
/// data.
fn reset(&mut self);
/// Get the output size in bits.
fn output_bits(&self) -> usize;
/// Convenience function that feeds a string into a digest.
///
/// # Arguments
///
/// * `input` The string to feed into the digest
fn input_str(&mut self, input: &str) {
self.input(input.as_bytes());
}
/// Convenience function that retrieves the result of a digest as a
/// newly allocated vec of bytes.
fn result_bytes(&mut self) -> Vec<u8> {
let mut buf: Vec<u8> = repeat(0u8).take((self.output_bits()+7)/8).collect();
self.result(buf.as_mut_slice());
buf
}
/// Convenience function that retrieves the result of a digest as a
/// String in hexadecimal format.
fn result_str(&mut self) -> String {
self.result_bytes().to_hex().to_string()
}
}
// A structure that represents that state of a digest computation for the SHA-2 512 family of digest
// functions
struct Engine256State {
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32,
h5: u32,
h6: u32,
h7: u32,
}
impl Engine256State {
fn new(h: &[u32; 8]) -> Engine256State {
return Engine256State {
h0: h[0],
h1: h[1],
h2: h[2],
h3: h[3],
h4: h[4],
h5: h[5],
h6: h[6],
h7: h[7]
};
}
fn reset(&mut self, h: &[u32; 8]) {
self.h0 = h[0];
self.h1 = h[1];
self.h2 = h[2];
self.h3 = h[3];
self.h4 = h[4];
self.h5 = h[5];
self.h6 = h[6];
self.h7 = h[7];
}
fn process_block(&mut self, data: &[u8]) {
fn ch(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ ((!x) & z))
}
fn maj(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ (x & z) ^ (y & z))
}
fn sum0(x: u32) -> u32 {
((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10))
}
fn sum1(x: u32) -> u32 {
((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7))
}
fn sigma0(x: u32) -> u32 {
((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3)
} |
fn sigma1(x: u32) -> u32 {
((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10)
}
let mut a = self.h0;
let mut b = self.h1;
let mut c = self.h2;
let mut d = self.h3;
let mut e = self.h4;
let mut f = self.h5;
let mut g = self.h6;
let mut h = self.h7;
let mut w = [0u32; 64];
// Sha-512 and Sha-256 use basically the same calculations which are implemented
// by these macros. Inlining the calculations seems to result in better generated code.
macro_rules! schedule_round { ($t:expr) => (
w[$t] = sigma1(w[$t - 2]) + w[$t - 7] + sigma0(w[$t - 15]) + w[$t - 16];
)
}
macro_rules! sha2_round {
($A:ident, $B:ident, $C:ident, $D:ident,
$E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => (
{
$H += sum1($E) + ch($E, $F, $G) + $K[$t] + w[$t];
$D += $H;
$H += sum0($A) + maj($A, $B, $C);
}
)
}
read_u32v_be(w.slice_mut(0, 16), data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
for t in range_step(0us, 48, 8) {
schedule_round!(t + 16);
schedule_round!(t + 17);
schedule_round!(t + 18);
schedule_round!(t + 19);
schedule_round!(t + 20);
schedule_round!(t + 21);
schedule_round!(t + 22);
schedule_round!(t + 23);
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
for t in range_step(48us, 64, 8) {
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
self.h0 += a;
self.h1 += b;
self.h2 += c;
self.h3 += d;
self.h4 += e;
self.h5 += f;
self.h6 += g;
self.h7 += h;
}
}
static K32: [u32; 64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
];
// A structure that keeps track of the state of the Sha-256 operation and contains the logic
// necessary to perform the final calculations.
struct Engine256 {
length_bits: u64,
buffer: FixedBuffer64,
state: Engine256State,
finished: bool,
}
impl Engine256 {
fn new(h: &[u32; 8]) -> Engine256 {
return Engine256 {
length_bits: 0,
buffer: FixedBuffer64::new(),
state: Engine256State::new(h),
finished: false
}
}
fn reset(&mut self, h: &[u32; 8]) {
self.length_bits = 0;
self.buffer.reset();
self.state.reset(h);
self.finished = false;
}
fn input(&mut self, input: &[u8]) {
assert!(!self.finished);
// Assumes that input.len() can be converted to u64 without overflow
self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64);
let self_state = &mut self.state;
self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) });
}
fn finish(&mut self) {
if self.finished {
return;
}
let self_state = &mut self.state;
self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) });
write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 );
write_u32_be(self.buffer.next(4), self.length_bits as u32);
self_state.process_block(self.buffer.full_buffer());
self.finished = true;
}
}
/// The SHA-256 hash algorithm
pub struct Sha256 {
engine: Engine256
}
impl Sha256 {
/// Construct a new instance of a SHA-256 digest.
pub fn new() -> Sha256 {
Sha256 {
engine: Engine256::new(&H256)
}
}
}
impl Digest for Sha256 {
fn input(&mut self, d: &[u8]) {
self.engine.input(d);
}
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
write_u32_be(out.slice_mut(0, 4), self.engine.state.h0);
write_u32_be(out.slice_mut(4, 8), self.engine.state.h1);
write_u32_be(out.slice_mut(8, 12), self.engine.state.h2);
write_u32_be(out.slice_mut(12, 16), self.engine.state.h3);
write_u32_be(out.slice_mut(16, 20), self.engine.state.h4);
write_u32_be(out.slice_mut(20, 24), self.engine.state.h5);
write_u32_be(out.slice_mut(24, 28), self.engine.state.h6);
write_u32_be(out.slice_mut(28, 32), self.engine.state.h7);
}
fn reset(&mut self) {
self.engine.reset(&H256);
}
fn output_bits(&self) -> usize { 256 }
}
static H256: [u32; 8] = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19
];
#[cfg(test)]
mod tests {
extern crate rand;
use self::rand::Rng;
use self::rand::isaac::IsaacRng;
use serialize::hex::FromHex;
use std::iter::repeat;
use std::num::Int;
use super::{Digest, Sha256, FixedBuffer};
// A normal addition - no overflow occurs
#[test]
fn test_add_bytes_to_bits_ok() {
assert!(super::add_bytes_to_bits::<u64>(100, 10) == 180);
}
// A simple failure case - adding 1 to the max value
#[test]
#[should_fail]
fn test_add_bytes_to_bits_overflow() {
super::add_bytes_to_bits::<u64>(Int::max_value(), 1);
}
struct Test {
input: String,
output_str: String,
}
fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) {
// Test that it works when accepting the message all at once
for t in tests.iter() {
sh.reset();
sh.input_str(t.input.as_slice());
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
// Test that it works when accepting the message in pieces
for t in tests.iter() {
sh.reset();
let len = t.input.len();
let mut left = len;
while left > 0u {
let take = (left + 1us) / 2us;
sh.input_str(t.input
.slice(len - left, take + len - left));
left = left - take;
}
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
}
#[test]
fn test_sha256() {
// Examples from wikipedia
let wikipedia_tests = vec!(
Test {
input: "".to_string(),
output_str: "e3b0c44298fc1c149afb\
f4c8996fb92427ae41e4649b934ca495991b7852b855".to_string()
},
Test {
input: "The quick brown fox jumps over the lazy \
dog".to_string(),
output_str: "d7a8fbb307d7809469ca\
9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592".to_string()
},
Test {
input: "The quick brown fox jumps over the lazy \
dog.".to_string(),
output_str: "ef537f25c895bfa78252\
6529a9b63d97aa631564d5d789c2b765448c8635fb6c".to_string()
});
let tests = wikipedia_tests;
let mut sh = box Sha256::new();
test_hash(&mut *sh, tests.as_slice());
}
/// Feed 1,000,000 'a's into the digest with varying input sizes and check that the result is
/// correct.
fn test_digest_1million_random<D: Digest>(digest: &mut D, blocksize: usize, expected: &str) {
let total_size = 1000000;
let buffer: Vec<u8> = repeat('a' as u8).take(blocksize * 2).collect();
let mut rng = IsaacRng::new_unseeded();
let mut count = 0;
digest.reset();
while count < total_size {
let next: usize = rng.gen_range(0, 2 * blocksize + 1);
let remaining = total_size - count;
let size = if next > remaining { remaining } else { next };
digest.input(buffer.slice_to(size));
count += size;
}
let result_str = digest.result_str();
let result_bytes = digest.result_bytes();
assert_eq!(expected, result_str.as_slice());
let expected_vec: Vec<u8> = expected.from_hex()
.unwrap()
.into_iter()
.collect();
assert_eq!(expected_vec, result_bytes);
}
#[test]
fn test_1million_random_sha256() {
let mut sh = Sha256::new();
test_digest_1million_random(
&mut sh,
64,
"cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0");
}
}
#[cfg(test)]
mod bench {
extern crate test;
use self::test::Bencher;
use super::{Sha256, FixedBuffer, Digest};
#[bench]
pub fn sha256_10(b: &mut Bencher) {
let mut sh = Sha256::new();
let bytes = [1u8; 10];
b.iter(|| {
sh.input(&bytes);
});
b.bytes = bytes.len() as u64;
}
#[bench]
pub fn sha256_1k(b: &mut Bencher) {
let mut sh = Sha256::new();
let bytes = [1u8; 1024];
b.iter(|| {
sh.input(&bytes);
});
b.bytes = bytes.len() as u64;
}
#[bench]
pub fn sha256_64k(b: &mut Bencher) {
let mut sh = Sha256::new();
let bytes = [1u8; 65536];
b.iter(|| {
sh.input(&bytes);
});
b.bytes = bytes.len() as u64;
}
} | random_line_split | |
sha256.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This module implements only the Sha256 function since that is all that is needed for internal
//! use. This implementation is not intended for external use or for any use where security is
//! important.
#![allow(deprecated)] // to_be32
use std::iter::{range_step, repeat};
use std::num::Int;
use std::slice::bytes::{MutableByteVector, copy_memory};
use serialize::hex::ToHex;
/// Write a u32 into a vector, which must be 4 bytes long. The value is written in big-endian
/// format.
fn write_u32_be(dst: &mut[u8], input: u32) {
dst[0] = (input >> 24) as u8;
dst[1] = (input >> 16) as u8;
dst[2] = (input >> 8) as u8;
dst[3] = input as u8;
}
/// Read the value of a vector of bytes as a u32 value in big-endian format.
fn read_u32_be(input: &[u8]) -> u32 {
return
(input[0] as u32) << 24 |
(input[1] as u32) << 16 |
(input[2] as u32) << 8 |
(input[3] as u32);
}
/// Read a vector of bytes into a vector of u32s. The values are read in big-endian format.
fn read_u32v_be(dst: &mut[u32], input: &[u8]) {
assert!(dst.len() * 4 == input.len());
let mut pos = 0us;
for chunk in input.chunks(4) {
dst[pos] = read_u32_be(chunk);
pos += 1;
}
}
trait ToBits {
/// Convert the value in bytes to the number of bits, a tuple where the 1st item is the
/// high-order value and the 2nd item is the low order value.
fn to_bits(self) -> (Self, Self);
}
impl ToBits for u64 {
fn to_bits(self) -> (u64, u64) {
return (self >> 61, self << 3);
}
}
/// Adds the specified number of bytes to the bit count. panic!() if this would cause numeric
/// overflow.
fn add_bytes_to_bits<T: Int + ToBits>(bits: T, bytes: T) -> T |
/// A FixedBuffer, likes its name implies, is a fixed size buffer. When the buffer becomes full, it
/// must be processed. The input() method takes care of processing and then clearing the buffer
/// automatically. However, other methods do not and require the caller to process the buffer. Any
/// method that modifies the buffer directory or provides the caller with bytes that can be modified
/// results in those bytes being marked as used by the buffer.
trait FixedBuffer {
/// Input a vector of bytes. If the buffer becomes full, process it with the provided
/// function and then clear the buffer.
fn input<F>(&mut self, input: &[u8], func: F) where
F: FnMut(&[u8]);
/// Reset the buffer.
fn reset(&mut self);
/// Zero the buffer up until the specified index. The buffer position currently must not be
/// greater than that index.
fn zero_until(&mut self, idx: usize);
/// Get a slice of the buffer of the specified size. There must be at least that many bytes
/// remaining in the buffer.
fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8];
/// Get the current buffer. The buffer must already be full. This clears the buffer as well.
fn full_buffer<'s>(&'s mut self) -> &'s [u8];
/// Get the current position of the buffer.
fn position(&self) -> usize;
/// Get the number of bytes remaining in the buffer until it is full.
fn remaining(&self) -> usize;
/// Get the size of the buffer
fn size(&self) -> usize;
}
/// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize.
struct FixedBuffer64 {
buffer: [u8; 64],
buffer_idx: usize,
}
impl FixedBuffer64 {
/// Create a new FixedBuffer64
fn new() -> FixedBuffer64 {
return FixedBuffer64 {
buffer: [0u8; 64],
buffer_idx: 0
};
}
}
impl FixedBuffer for FixedBuffer64 {
fn input<F>(&mut self, input: &[u8], mut func: F) where
F: FnMut(&[u8]),
{
let mut i = 0;
let size = self.size();
// If there is already data in the buffer, copy as much as we can into it and process
// the data if the buffer becomes full.
if self.buffer_idx != 0 {
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
self.buffer.slice_mut(self.buffer_idx, size),
&input[0..buffer_remaining]);
self.buffer_idx = 0;
func(&self.buffer);
i += buffer_remaining;
} else {
copy_memory(
self.buffer.slice_mut(self.buffer_idx, self.buffer_idx + input.len()),
input);
self.buffer_idx += input.len();
return;
}
}
// While we have at least a full buffer size chunk's worth of data, process that data
// without copying it into the buffer
while input.len() - i >= size {
func(&input[i..(i + size)]);
i += size;
}
// Copy any input data into the buffer. At this point in the method, the amount of
// data left in the input vector will be less than the buffer size and the buffer will
// be empty.
let input_remaining = input.len() - i;
copy_memory(
self.buffer.slice_to_mut(input_remaining),
&input[i..]);
self.buffer_idx += input_remaining;
}
fn reset(&mut self) {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: usize) {
assert!(idx >= self.buffer_idx);
self.buffer.slice_mut(self.buffer_idx, idx).set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8] {
self.buffer_idx += len;
return self.buffer.slice_mut(self.buffer_idx - len, self.buffer_idx);
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == 64);
self.buffer_idx = 0;
return &self.buffer[0..64];
}
fn position(&self) -> usize { self.buffer_idx }
fn remaining(&self) -> usize { 64 - self.buffer_idx }
fn size(&self) -> usize { 64 }
}
/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct.
trait StandardPadding {
/// Add padding to the buffer. The buffer must not be full when this method is called and is
/// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least
/// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled
/// with zeros again until only rem bytes are remaining.
fn standard_padding<F>(&mut self, rem: usize, func: F) where F: FnMut(&[u8]);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding<F>(&mut self, rem: usize, mut func: F) where F: FnMut(&[u8]) {
let size = self.size();
self.next(1)[0] = 128;
if self.remaining() < rem {
self.zero_until(size);
func(self.full_buffer());
}
self.zero_until(size - rem);
}
}
/// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2
/// family of digest functions.
pub trait Digest {
/// Provide message data.
///
/// # Arguments
///
/// * input - A vector of message data
fn input(&mut self, input: &[u8]);
/// Retrieve the digest result. This method may be called multiple times.
///
/// # Arguments
///
/// * out - the vector to hold the result. Must be large enough to contain output_bits().
fn result(&mut self, out: &mut [u8]);
/// Reset the digest. This method must be called after result() and before supplying more
/// data.
fn reset(&mut self);
/// Get the output size in bits.
fn output_bits(&self) -> usize;
/// Convenience function that feeds a string into a digest.
///
/// # Arguments
///
/// * `input` The string to feed into the digest
fn input_str(&mut self, input: &str) {
self.input(input.as_bytes());
}
/// Convenience function that retrieves the result of a digest as a
/// newly allocated vec of bytes.
fn result_bytes(&mut self) -> Vec<u8> {
let mut buf: Vec<u8> = repeat(0u8).take((self.output_bits()+7)/8).collect();
self.result(buf.as_mut_slice());
buf
}
/// Convenience function that retrieves the result of a digest as a
/// String in hexadecimal format.
fn result_str(&mut self) -> String {
self.result_bytes().to_hex().to_string()
}
}
// A structure that represents that state of a digest computation for the SHA-2 512 family of digest
// functions
struct Engine256State {
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32,
h5: u32,
h6: u32,
h7: u32,
}
impl Engine256State {
fn new(h: &[u32; 8]) -> Engine256State {
return Engine256State {
h0: h[0],
h1: h[1],
h2: h[2],
h3: h[3],
h4: h[4],
h5: h[5],
h6: h[6],
h7: h[7]
};
}
fn reset(&mut self, h: &[u32; 8]) {
self.h0 = h[0];
self.h1 = h[1];
self.h2 = h[2];
self.h3 = h[3];
self.h4 = h[4];
self.h5 = h[5];
self.h6 = h[6];
self.h7 = h[7];
}
fn process_block(&mut self, data: &[u8]) {
fn ch(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ ((!x) & z))
}
fn maj(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ (x & z) ^ (y & z))
}
fn sum0(x: u32) -> u32 {
((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10))
}
fn sum1(x: u32) -> u32 {
((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7))
}
fn sigma0(x: u32) -> u32 {
((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3)
}
fn sigma1(x: u32) -> u32 {
((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10)
}
let mut a = self.h0;
let mut b = self.h1;
let mut c = self.h2;
let mut d = self.h3;
let mut e = self.h4;
let mut f = self.h5;
let mut g = self.h6;
let mut h = self.h7;
let mut w = [0u32; 64];
// Sha-512 and Sha-256 use basically the same calculations which are implemented
// by these macros. Inlining the calculations seems to result in better generated code.
macro_rules! schedule_round { ($t:expr) => (
w[$t] = sigma1(w[$t - 2]) + w[$t - 7] + sigma0(w[$t - 15]) + w[$t - 16];
)
}
macro_rules! sha2_round {
($A:ident, $B:ident, $C:ident, $D:ident,
$E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => (
{
$H += sum1($E) + ch($E, $F, $G) + $K[$t] + w[$t];
$D += $H;
$H += sum0($A) + maj($A, $B, $C);
}
)
}
read_u32v_be(w.slice_mut(0, 16), data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
for t in range_step(0us, 48, 8) {
schedule_round!(t + 16);
schedule_round!(t + 17);
schedule_round!(t + 18);
schedule_round!(t + 19);
schedule_round!(t + 20);
schedule_round!(t + 21);
schedule_round!(t + 22);
schedule_round!(t + 23);
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
for t in range_step(48us, 64, 8) {
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
self.h0 += a;
self.h1 += b;
self.h2 += c;
self.h3 += d;
self.h4 += e;
self.h5 += f;
self.h6 += g;
self.h7 += h;
}
}
static K32: [u32; 64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
];
// A structure that keeps track of the state of the Sha-256 operation and contains the logic
// necessary to perform the final calculations.
struct Engine256 {
length_bits: u64,
buffer: FixedBuffer64,
state: Engine256State,
finished: bool,
}
impl Engine256 {
fn new(h: &[u32; 8]) -> Engine256 {
return Engine256 {
length_bits: 0,
buffer: FixedBuffer64::new(),
state: Engine256State::new(h),
finished: false
}
}
fn reset(&mut self, h: &[u32; 8]) {
self.length_bits = 0;
self.buffer.reset();
self.state.reset(h);
self.finished = false;
}
fn input(&mut self, input: &[u8]) {
assert!(!self.finished);
// Assumes that input.len() can be converted to u64 without overflow
self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64);
let self_state = &mut self.state;
self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) });
}
fn finish(&mut self) {
if self.finished {
return;
}
let self_state = &mut self.state;
self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) });
write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 );
write_u32_be(self.buffer.next(4), self.length_bits as u32);
self_state.process_block(self.buffer.full_buffer());
self.finished = true;
}
}
/// The SHA-256 hash algorithm
pub struct Sha256 {
engine: Engine256
}
impl Sha256 {
/// Construct a new instance of a SHA-256 digest.
pub fn new() -> Sha256 {
Sha256 {
engine: Engine256::new(&H256)
}
}
}
impl Digest for Sha256 {
fn input(&mut self, d: &[u8]) {
self.engine.input(d);
}
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
write_u32_be(out.slice_mut(0, 4), self.engine.state.h0);
write_u32_be(out.slice_mut(4, 8), self.engine.state.h1);
write_u32_be(out.slice_mut(8, 12), self.engine.state.h2);
write_u32_be(out.slice_mut(12, 16), self.engine.state.h3);
write_u32_be(out.slice_mut(16, 20), self.engine.state.h4);
write_u32_be(out.slice_mut(20, 24), self.engine.state.h5);
write_u32_be(out.slice_mut(24, 28), self.engine.state.h6);
write_u32_be(out.slice_mut(28, 32), self.engine.state.h7);
}
fn reset(&mut self) {
self.engine.reset(&H256);
}
fn output_bits(&self) -> usize { 256 }
}
static H256: [u32; 8] = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19
];
#[cfg(test)]
mod tests {
extern crate rand;
use self::rand::Rng;
use self::rand::isaac::IsaacRng;
use serialize::hex::FromHex;
use std::iter::repeat;
use std::num::Int;
use super::{Digest, Sha256, FixedBuffer};
// A normal addition - no overflow occurs
#[test]
fn test_add_bytes_to_bits_ok() {
assert!(super::add_bytes_to_bits::<u64>(100, 10) == 180);
}
// A simple failure case - adding 1 to the max value
#[test]
#[should_fail]
fn test_add_bytes_to_bits_overflow() {
super::add_bytes_to_bits::<u64>(Int::max_value(), 1);
}
struct Test {
input: String,
output_str: String,
}
fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) {
// Test that it works when accepting the message all at once
for t in tests.iter() {
sh.reset();
sh.input_str(t.input.as_slice());
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
// Test that it works when accepting the message in pieces
for t in tests.iter() {
sh.reset();
let len = t.input.len();
let mut left = len;
while left > 0u {
let take = (left + 1us) / 2us;
sh.input_str(t.input
.slice(len - left, take + len - left));
left = left - take;
}
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
}
#[test]
fn test_sha256() {
// Examples from wikipedia
let wikipedia_tests = vec!(
Test {
input: "".to_string(),
output_str: "e3b0c44298fc1c149afb\
f4c8996fb92427ae41e4649b934ca495991b7852b855".to_string()
},
Test {
input: "The quick brown fox jumps over the lazy \
dog".to_string(),
output_str: "d7a8fbb307d7809469ca\
9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592".to_string()
},
Test {
input: "The quick brown fox jumps over the lazy \
dog.".to_string(),
output_str: "ef537f25c895bfa78252\
6529a9b63d97aa631564d5d789c2b765448c8635fb6c".to_string()
});
let tests = wikipedia_tests;
let mut sh = box Sha256::new();
test_hash(&mut *sh, tests.as_slice());
}
/// Feed 1,000,000 'a's into the digest with varying input sizes and check that the result is
/// correct.
fn test_digest_1million_random<D: Digest>(digest: &mut D, blocksize: usize, expected: &str) {
let total_size = 1000000;
let buffer: Vec<u8> = repeat('a' as u8).take(blocksize * 2).collect();
let mut rng = IsaacRng::new_unseeded();
let mut count = 0;
digest.reset();
while count < total_size {
let next: usize = rng.gen_range(0, 2 * blocksize + 1);
let remaining = total_size - count;
let size = if next > remaining { remaining } else { next };
digest.input(buffer.slice_to(size));
count += size;
}
let result_str = digest.result_str();
let result_bytes = digest.result_bytes();
assert_eq!(expected, result_str.as_slice());
let expected_vec: Vec<u8> = expected.from_hex()
.unwrap()
.into_iter()
.collect();
assert_eq!(expected_vec, result_bytes);
}
#[test]
fn test_1million_random_sha256() {
let mut sh = Sha256::new();
test_digest_1million_random(
&mut sh,
64,
"cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0");
}
}
#[cfg(test)]
mod bench {
extern crate test;
use self::test::Bencher;
use super::{Sha256, FixedBuffer, Digest};
#[bench]
pub fn sha256_10(b: &mut Bencher) {
let mut sh = Sha256::new();
let bytes = [1u8; 10];
b.iter(|| {
sh.input(&bytes);
});
b.bytes = bytes.len() as u64;
}
#[bench]
pub fn sha256_1k(b: &mut Bencher) {
let mut sh = Sha256::new();
let bytes = [1u8; 1024];
b.iter(|| {
sh.input(&bytes);
});
b.bytes = bytes.len() as u64;
}
#[bench]
pub fn sha256_64k(b: &mut Bencher) {
let mut sh = Sha256::new();
let bytes = [1u8; 65536];
b.iter(|| {
sh.input(&bytes);
});
b.bytes = bytes.len() as u64;
}
}
| {
let (new_high_bits, new_low_bits) = bytes.to_bits();
if new_high_bits > Int::zero() {
panic!("numeric overflow occurred.")
}
match bits.checked_add(new_low_bits) {
Some(x) => return x,
None => panic!("numeric overflow occurred.")
}
} | identifier_body |
sha256.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This module implements only the Sha256 function since that is all that is needed for internal
//! use. This implementation is not intended for external use or for any use where security is
//! important.
#![allow(deprecated)] // to_be32
use std::iter::{range_step, repeat};
use std::num::Int;
use std::slice::bytes::{MutableByteVector, copy_memory};
use serialize::hex::ToHex;
/// Write a u32 into a vector, which must be 4 bytes long. The value is written in big-endian
/// format.
fn write_u32_be(dst: &mut[u8], input: u32) {
dst[0] = (input >> 24) as u8;
dst[1] = (input >> 16) as u8;
dst[2] = (input >> 8) as u8;
dst[3] = input as u8;
}
/// Read the value of a vector of bytes as a u32 value in big-endian format.
fn read_u32_be(input: &[u8]) -> u32 {
return
(input[0] as u32) << 24 |
(input[1] as u32) << 16 |
(input[2] as u32) << 8 |
(input[3] as u32);
}
/// Read a vector of bytes into a vector of u32s. The values are read in big-endian format.
fn read_u32v_be(dst: &mut[u32], input: &[u8]) {
assert!(dst.len() * 4 == input.len());
let mut pos = 0us;
for chunk in input.chunks(4) {
dst[pos] = read_u32_be(chunk);
pos += 1;
}
}
trait ToBits {
/// Convert the value in bytes to the number of bits, a tuple where the 1st item is the
/// high-order value and the 2nd item is the low order value.
fn to_bits(self) -> (Self, Self);
}
impl ToBits for u64 {
fn to_bits(self) -> (u64, u64) {
return (self >> 61, self << 3);
}
}
/// Adds the specified number of bytes to the bit count. panic!() if this would cause numeric
/// overflow.
fn add_bytes_to_bits<T: Int + ToBits>(bits: T, bytes: T) -> T {
let (new_high_bits, new_low_bits) = bytes.to_bits();
if new_high_bits > Int::zero() {
panic!("numeric overflow occurred.")
}
match bits.checked_add(new_low_bits) {
Some(x) => return x,
None => panic!("numeric overflow occurred.")
}
}
/// A FixedBuffer, likes its name implies, is a fixed size buffer. When the buffer becomes full, it
/// must be processed. The input() method takes care of processing and then clearing the buffer
/// automatically. However, other methods do not and require the caller to process the buffer. Any
/// method that modifies the buffer directory or provides the caller with bytes that can be modified
/// results in those bytes being marked as used by the buffer.
trait FixedBuffer {
/// Input a vector of bytes. If the buffer becomes full, process it with the provided
/// function and then clear the buffer.
fn input<F>(&mut self, input: &[u8], func: F) where
F: FnMut(&[u8]);
/// Reset the buffer.
fn reset(&mut self);
/// Zero the buffer up until the specified index. The buffer position currently must not be
/// greater than that index.
fn zero_until(&mut self, idx: usize);
/// Get a slice of the buffer of the specified size. There must be at least that many bytes
/// remaining in the buffer.
fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8];
/// Get the current buffer. The buffer must already be full. This clears the buffer as well.
fn full_buffer<'s>(&'s mut self) -> &'s [u8];
/// Get the current position of the buffer.
fn position(&self) -> usize;
/// Get the number of bytes remaining in the buffer until it is full.
fn remaining(&self) -> usize;
/// Get the size of the buffer
fn size(&self) -> usize;
}
/// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize.
struct FixedBuffer64 {
buffer: [u8; 64],
buffer_idx: usize,
}
impl FixedBuffer64 {
/// Create a new FixedBuffer64
fn new() -> FixedBuffer64 {
return FixedBuffer64 {
buffer: [0u8; 64],
buffer_idx: 0
};
}
}
impl FixedBuffer for FixedBuffer64 {
fn input<F>(&mut self, input: &[u8], mut func: F) where
F: FnMut(&[u8]),
{
let mut i = 0;
let size = self.size();
// If there is already data in the buffer, copy as much as we can into it and process
// the data if the buffer becomes full.
if self.buffer_idx != 0 {
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
self.buffer.slice_mut(self.buffer_idx, size),
&input[0..buffer_remaining]);
self.buffer_idx = 0;
func(&self.buffer);
i += buffer_remaining;
} else {
copy_memory(
self.buffer.slice_mut(self.buffer_idx, self.buffer_idx + input.len()),
input);
self.buffer_idx += input.len();
return;
}
}
// While we have at least a full buffer size chunk's worth of data, process that data
// without copying it into the buffer
while input.len() - i >= size {
func(&input[i..(i + size)]);
i += size;
}
// Copy any input data into the buffer. At this point in the method, the amount of
// data left in the input vector will be less than the buffer size and the buffer will
// be empty.
let input_remaining = input.len() - i;
copy_memory(
self.buffer.slice_to_mut(input_remaining),
&input[i..]);
self.buffer_idx += input_remaining;
}
fn reset(&mut self) {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: usize) {
assert!(idx >= self.buffer_idx);
self.buffer.slice_mut(self.buffer_idx, idx).set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8] {
self.buffer_idx += len;
return self.buffer.slice_mut(self.buffer_idx - len, self.buffer_idx);
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == 64);
self.buffer_idx = 0;
return &self.buffer[0..64];
}
fn position(&self) -> usize { self.buffer_idx }
fn remaining(&self) -> usize { 64 - self.buffer_idx }
fn size(&self) -> usize { 64 }
}
/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct.
trait StandardPadding {
/// Add padding to the buffer. The buffer must not be full when this method is called and is
/// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least
/// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled
/// with zeros again until only rem bytes are remaining.
fn standard_padding<F>(&mut self, rem: usize, func: F) where F: FnMut(&[u8]);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding<F>(&mut self, rem: usize, mut func: F) where F: FnMut(&[u8]) {
let size = self.size();
self.next(1)[0] = 128;
if self.remaining() < rem {
self.zero_until(size);
func(self.full_buffer());
}
self.zero_until(size - rem);
}
}
/// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2
/// family of digest functions.
pub trait Digest {
/// Provide message data.
///
/// # Arguments
///
/// * input - A vector of message data
fn input(&mut self, input: &[u8]);
/// Retrieve the digest result. This method may be called multiple times.
///
/// # Arguments
///
/// * out - the vector to hold the result. Must be large enough to contain output_bits().
fn result(&mut self, out: &mut [u8]);
/// Reset the digest. This method must be called after result() and before supplying more
/// data.
fn reset(&mut self);
/// Get the output size in bits.
fn output_bits(&self) -> usize;
/// Convenience function that feeds a string into a digest.
///
/// # Arguments
///
/// * `input` The string to feed into the digest
fn input_str(&mut self, input: &str) {
self.input(input.as_bytes());
}
/// Convenience function that retrieves the result of a digest as a
/// newly allocated vec of bytes.
fn result_bytes(&mut self) -> Vec<u8> {
let mut buf: Vec<u8> = repeat(0u8).take((self.output_bits()+7)/8).collect();
self.result(buf.as_mut_slice());
buf
}
/// Convenience function that retrieves the result of a digest as a
/// String in hexadecimal format.
fn result_str(&mut self) -> String {
self.result_bytes().to_hex().to_string()
}
}
// A structure that represents that state of a digest computation for the SHA-2 512 family of digest
// functions
struct Engine256State {
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32,
h5: u32,
h6: u32,
h7: u32,
}
impl Engine256State {
fn new(h: &[u32; 8]) -> Engine256State {
return Engine256State {
h0: h[0],
h1: h[1],
h2: h[2],
h3: h[3],
h4: h[4],
h5: h[5],
h6: h[6],
h7: h[7]
};
}
fn reset(&mut self, h: &[u32; 8]) {
self.h0 = h[0];
self.h1 = h[1];
self.h2 = h[2];
self.h3 = h[3];
self.h4 = h[4];
self.h5 = h[5];
self.h6 = h[6];
self.h7 = h[7];
}
fn process_block(&mut self, data: &[u8]) {
fn ch(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ ((!x) & z))
}
fn maj(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ (x & z) ^ (y & z))
}
fn sum0(x: u32) -> u32 {
((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10))
}
fn sum1(x: u32) -> u32 {
((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7))
}
fn sigma0(x: u32) -> u32 {
((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3)
}
fn sigma1(x: u32) -> u32 {
((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10)
}
let mut a = self.h0;
let mut b = self.h1;
let mut c = self.h2;
let mut d = self.h3;
let mut e = self.h4;
let mut f = self.h5;
let mut g = self.h6;
let mut h = self.h7;
let mut w = [0u32; 64];
// Sha-512 and Sha-256 use basically the same calculations which are implemented
// by these macros. Inlining the calculations seems to result in better generated code.
macro_rules! schedule_round { ($t:expr) => (
w[$t] = sigma1(w[$t - 2]) + w[$t - 7] + sigma0(w[$t - 15]) + w[$t - 16];
)
}
macro_rules! sha2_round {
($A:ident, $B:ident, $C:ident, $D:ident,
$E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => (
{
$H += sum1($E) + ch($E, $F, $G) + $K[$t] + w[$t];
$D += $H;
$H += sum0($A) + maj($A, $B, $C);
}
)
}
read_u32v_be(w.slice_mut(0, 16), data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
for t in range_step(0us, 48, 8) {
schedule_round!(t + 16);
schedule_round!(t + 17);
schedule_round!(t + 18);
schedule_round!(t + 19);
schedule_round!(t + 20);
schedule_round!(t + 21);
schedule_round!(t + 22);
schedule_round!(t + 23);
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
for t in range_step(48us, 64, 8) {
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
self.h0 += a;
self.h1 += b;
self.h2 += c;
self.h3 += d;
self.h4 += e;
self.h5 += f;
self.h6 += g;
self.h7 += h;
}
}
static K32: [u32; 64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
];
// A structure that keeps track of the state of the Sha-256 operation and contains the logic
// necessary to perform the final calculations.
struct Engine256 {
length_bits: u64,
buffer: FixedBuffer64,
state: Engine256State,
finished: bool,
}
impl Engine256 {
fn new(h: &[u32; 8]) -> Engine256 {
return Engine256 {
length_bits: 0,
buffer: FixedBuffer64::new(),
state: Engine256State::new(h),
finished: false
}
}
fn reset(&mut self, h: &[u32; 8]) {
self.length_bits = 0;
self.buffer.reset();
self.state.reset(h);
self.finished = false;
}
fn input(&mut self, input: &[u8]) {
assert!(!self.finished);
// Assumes that input.len() can be converted to u64 without overflow
self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64);
let self_state = &mut self.state;
self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) });
}
fn finish(&mut self) {
if self.finished {
return;
}
let self_state = &mut self.state;
self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) });
write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 );
write_u32_be(self.buffer.next(4), self.length_bits as u32);
self_state.process_block(self.buffer.full_buffer());
self.finished = true;
}
}
/// The SHA-256 hash algorithm
pub struct Sha256 {
engine: Engine256
}
impl Sha256 {
/// Construct a new instance of a SHA-256 digest.
pub fn new() -> Sha256 {
Sha256 {
engine: Engine256::new(&H256)
}
}
}
impl Digest for Sha256 {
fn input(&mut self, d: &[u8]) {
self.engine.input(d);
}
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
write_u32_be(out.slice_mut(0, 4), self.engine.state.h0);
write_u32_be(out.slice_mut(4, 8), self.engine.state.h1);
write_u32_be(out.slice_mut(8, 12), self.engine.state.h2);
write_u32_be(out.slice_mut(12, 16), self.engine.state.h3);
write_u32_be(out.slice_mut(16, 20), self.engine.state.h4);
write_u32_be(out.slice_mut(20, 24), self.engine.state.h5);
write_u32_be(out.slice_mut(24, 28), self.engine.state.h6);
write_u32_be(out.slice_mut(28, 32), self.engine.state.h7);
}
fn reset(&mut self) {
self.engine.reset(&H256);
}
fn output_bits(&self) -> usize { 256 }
}
static H256: [u32; 8] = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19
];
#[cfg(test)]
mod tests {
extern crate rand;
use self::rand::Rng;
use self::rand::isaac::IsaacRng;
use serialize::hex::FromHex;
use std::iter::repeat;
use std::num::Int;
use super::{Digest, Sha256, FixedBuffer};
// A normal addition - no overflow occurs
#[test]
fn test_add_bytes_to_bits_ok() {
assert!(super::add_bytes_to_bits::<u64>(100, 10) == 180);
}
// A simple failure case - adding 1 to the max value
#[test]
#[should_fail]
fn test_add_bytes_to_bits_overflow() {
super::add_bytes_to_bits::<u64>(Int::max_value(), 1);
}
struct Test {
input: String,
output_str: String,
}
fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) {
// Test that it works when accepting the message all at once
for t in tests.iter() {
sh.reset();
sh.input_str(t.input.as_slice());
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
// Test that it works when accepting the message in pieces
for t in tests.iter() {
sh.reset();
let len = t.input.len();
let mut left = len;
while left > 0u {
let take = (left + 1us) / 2us;
sh.input_str(t.input
.slice(len - left, take + len - left));
left = left - take;
}
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
}
#[test]
fn test_sha256() {
// Examples from wikipedia
let wikipedia_tests = vec!(
Test {
input: "".to_string(),
output_str: "e3b0c44298fc1c149afb\
f4c8996fb92427ae41e4649b934ca495991b7852b855".to_string()
},
Test {
input: "The quick brown fox jumps over the lazy \
dog".to_string(),
output_str: "d7a8fbb307d7809469ca\
9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592".to_string()
},
Test {
input: "The quick brown fox jumps over the lazy \
dog.".to_string(),
output_str: "ef537f25c895bfa78252\
6529a9b63d97aa631564d5d789c2b765448c8635fb6c".to_string()
});
let tests = wikipedia_tests;
let mut sh = box Sha256::new();
test_hash(&mut *sh, tests.as_slice());
}
/// Feed 1,000,000 'a's into the digest with varying input sizes and check that the result is
/// correct.
fn test_digest_1million_random<D: Digest>(digest: &mut D, blocksize: usize, expected: &str) {
let total_size = 1000000;
let buffer: Vec<u8> = repeat('a' as u8).take(blocksize * 2).collect();
let mut rng = IsaacRng::new_unseeded();
let mut count = 0;
digest.reset();
while count < total_size {
let next: usize = rng.gen_range(0, 2 * blocksize + 1);
let remaining = total_size - count;
let size = if next > remaining { remaining } else { next };
digest.input(buffer.slice_to(size));
count += size;
}
let result_str = digest.result_str();
let result_bytes = digest.result_bytes();
assert_eq!(expected, result_str.as_slice());
let expected_vec: Vec<u8> = expected.from_hex()
.unwrap()
.into_iter()
.collect();
assert_eq!(expected_vec, result_bytes);
}
#[test]
fn | () {
let mut sh = Sha256::new();
test_digest_1million_random(
&mut sh,
64,
"cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0");
}
}
#[cfg(test)]
mod bench {
extern crate test;
use self::test::Bencher;
use super::{Sha256, FixedBuffer, Digest};
#[bench]
pub fn sha256_10(b: &mut Bencher) {
let mut sh = Sha256::new();
let bytes = [1u8; 10];
b.iter(|| {
sh.input(&bytes);
});
b.bytes = bytes.len() as u64;
}
#[bench]
pub fn sha256_1k(b: &mut Bencher) {
let mut sh = Sha256::new();
let bytes = [1u8; 1024];
b.iter(|| {
sh.input(&bytes);
});
b.bytes = bytes.len() as u64;
}
#[bench]
pub fn sha256_64k(b: &mut Bencher) {
let mut sh = Sha256::new();
let bytes = [1u8; 65536];
b.iter(|| {
sh.input(&bytes);
});
b.bytes = bytes.len() as u64;
}
}
| test_1million_random_sha256 | identifier_name |
__init__.py | from os.path import abspath
import wptools
from mycroft.messagebus.message import Message
from mycroft.skills.LILACS_knowledge.services import KnowledgeBackend
from mycroft.util.log import getLogger
__author__ = 'jarbas'
logger = getLogger(abspath(__file__).split('/')[-2])
class WikidataService(KnowledgeBackend):
def __init__(self, config, emitter, name='wikidata'):
self.config = config
self.process = None
self.emitter = emitter
self.name = name
self.emitter.on('WikidataKnowledgeAdquire', self._adquire)
def _adquire(self, message=None):
logger.info('WikidataKnowledge_Adquire')
subject = message.data["subject"]
if subject is None:
|
else:
dict = {}
node_data = {}
# get knowledge about
# TODO exception handling for erros
try:
page = wptools.page(subject, silent=True, verbose=False).get_wikidata()
# parse for distant child of
node_data["description"] = page.description
# direct child of
node_data["what"] = page.what
# data fields
node_data["data"] = page.wikidata
# related to
# TODO parse and make cousin/child/parent
node_data["properties"] = page.props
# id info source
dict["wikidata"] = node_data
except:
logger.error("Could not parse wikidata for " + str(subject))
self.send_result(dict)
def adquire(self, subject):
logger.info('Call WikidataKnowledgeAdquire')
self.emitter.emit(Message('WikidataKnowledgeAdquire', {"subject": subject}))
def send_result(self, result = {}):
self.emitter.emit(Message("LILACS_result", {"data": result}))
def stop(self):
logger.info('WikidataKnowledge_Stop')
if self.process:
self.process.terminate()
self.process = None
def load_service(base_config, emitter):
backends = base_config.get('backends', [])
services = [(b, backends[b]) for b in backends
if backends[b]['type'] == 'wikidata']
instances = [WikidataService(s[1], emitter, s[0]) for s in services]
return instances
| logger.error("No subject to adquire knowledge about")
return | conditional_block |
__init__.py | from os.path import abspath
import wptools
from mycroft.messagebus.message import Message
from mycroft.skills.LILACS_knowledge.services import KnowledgeBackend
from mycroft.util.log import getLogger
__author__ = 'jarbas'
logger = getLogger(abspath(__file__).split('/')[-2])
class WikidataService(KnowledgeBackend):
def __init__(self, config, emitter, name='wikidata'):
self.config = config
self.process = None
self.emitter = emitter
self.name = name
self.emitter.on('WikidataKnowledgeAdquire', self._adquire)
def _adquire(self, message=None):
logger.info('WikidataKnowledge_Adquire')
subject = message.data["subject"]
if subject is None:
logger.error("No subject to adquire knowledge about")
return
else:
dict = {}
node_data = {}
# get knowledge about
# TODO exception handling for erros
try:
page = wptools.page(subject, silent=True, verbose=False).get_wikidata()
# parse for distant child of
node_data["description"] = page.description
# direct child of
node_data["what"] = page.what
# data fields
node_data["data"] = page.wikidata
# related to
# TODO parse and make cousin/child/parent
node_data["properties"] = page.props
# id info source
dict["wikidata"] = node_data
except:
logger.error("Could not parse wikidata for " + str(subject))
self.send_result(dict)
def adquire(self, subject):
logger.info('Call WikidataKnowledgeAdquire')
self.emitter.emit(Message('WikidataKnowledgeAdquire', {"subject": subject}))
def send_result(self, result = {}):
self.emitter.emit(Message("LILACS_result", {"data": result}))
def stop(self):
logger.info('WikidataKnowledge_Stop')
if self.process:
self.process.terminate()
self.process = None
|
def load_service(base_config, emitter):
backends = base_config.get('backends', [])
services = [(b, backends[b]) for b in backends
if backends[b]['type'] == 'wikidata']
instances = [WikidataService(s[1], emitter, s[0]) for s in services]
return instances | random_line_split | |
__init__.py | from os.path import abspath
import wptools
from mycroft.messagebus.message import Message
from mycroft.skills.LILACS_knowledge.services import KnowledgeBackend
from mycroft.util.log import getLogger
__author__ = 'jarbas'
logger = getLogger(abspath(__file__).split('/')[-2])
class WikidataService(KnowledgeBackend):
def __init__(self, config, emitter, name='wikidata'):
self.config = config
self.process = None
self.emitter = emitter
self.name = name
self.emitter.on('WikidataKnowledgeAdquire', self._adquire)
def _adquire(self, message=None):
logger.info('WikidataKnowledge_Adquire')
subject = message.data["subject"]
if subject is None:
logger.error("No subject to adquire knowledge about")
return
else:
dict = {}
node_data = {}
# get knowledge about
# TODO exception handling for erros
try:
page = wptools.page(subject, silent=True, verbose=False).get_wikidata()
# parse for distant child of
node_data["description"] = page.description
# direct child of
node_data["what"] = page.what
# data fields
node_data["data"] = page.wikidata
# related to
# TODO parse and make cousin/child/parent
node_data["properties"] = page.props
# id info source
dict["wikidata"] = node_data
except:
logger.error("Could not parse wikidata for " + str(subject))
self.send_result(dict)
def | (self, subject):
logger.info('Call WikidataKnowledgeAdquire')
self.emitter.emit(Message('WikidataKnowledgeAdquire', {"subject": subject}))
def send_result(self, result = {}):
self.emitter.emit(Message("LILACS_result", {"data": result}))
def stop(self):
logger.info('WikidataKnowledge_Stop')
if self.process:
self.process.terminate()
self.process = None
def load_service(base_config, emitter):
backends = base_config.get('backends', [])
services = [(b, backends[b]) for b in backends
if backends[b]['type'] == 'wikidata']
instances = [WikidataService(s[1], emitter, s[0]) for s in services]
return instances
| adquire | identifier_name |
__init__.py | from os.path import abspath
import wptools
from mycroft.messagebus.message import Message
from mycroft.skills.LILACS_knowledge.services import KnowledgeBackend
from mycroft.util.log import getLogger
__author__ = 'jarbas'
logger = getLogger(abspath(__file__).split('/')[-2])
class WikidataService(KnowledgeBackend):
def __init__(self, config, emitter, name='wikidata'):
self.config = config
self.process = None
self.emitter = emitter
self.name = name
self.emitter.on('WikidataKnowledgeAdquire', self._adquire)
def _adquire(self, message=None):
|
def adquire(self, subject):
logger.info('Call WikidataKnowledgeAdquire')
self.emitter.emit(Message('WikidataKnowledgeAdquire', {"subject": subject}))
def send_result(self, result = {}):
self.emitter.emit(Message("LILACS_result", {"data": result}))
def stop(self):
logger.info('WikidataKnowledge_Stop')
if self.process:
self.process.terminate()
self.process = None
def load_service(base_config, emitter):
backends = base_config.get('backends', [])
services = [(b, backends[b]) for b in backends
if backends[b]['type'] == 'wikidata']
instances = [WikidataService(s[1], emitter, s[0]) for s in services]
return instances
| logger.info('WikidataKnowledge_Adquire')
subject = message.data["subject"]
if subject is None:
logger.error("No subject to adquire knowledge about")
return
else:
dict = {}
node_data = {}
# get knowledge about
# TODO exception handling for erros
try:
page = wptools.page(subject, silent=True, verbose=False).get_wikidata()
# parse for distant child of
node_data["description"] = page.description
# direct child of
node_data["what"] = page.what
# data fields
node_data["data"] = page.wikidata
# related to
# TODO parse and make cousin/child/parent
node_data["properties"] = page.props
# id info source
dict["wikidata"] = node_data
except:
logger.error("Could not parse wikidata for " + str(subject))
self.send_result(dict) | identifier_body |
test_apt.rs |
extern crate woko;
// use std::io::prelude::*;
#[cfg(test)]
mod tests {
use std::fs::File;
use std::io::Read;
use std::path::Path;
//use std::collections::HashMap;
use woko::apt;
fn | () -> String {
let mut f = File::open(Path::new("tests/apt.out")).unwrap();
let mut s = String::new();
f.read_to_string(&mut s).ok();
return s;
}
#[test]
fn test_woko_apt() {
let apts = apt::parse_from_string(&read()).unwrap();
let l122 = apts.get(121).unwrap();
assert_eq!(132, apts.len());
assert_eq!("http://archive.ubuntu.com/ubuntu/pool/main/r/rename/rename_0.20-4_all.deb", l122.url);
assert_eq!("rename_0.20-4_all.deb", l122.name);
assert_eq!(12010, l122.size);
assert_eq!("MD5Sum", l122.sum_type);
assert_eq!("6cf1938ef51145a469ccef181a9304ce", l122.sum);
}
}
| read | identifier_name |
test_apt.rs | extern crate woko; | #[cfg(test)]
mod tests {
use std::fs::File;
use std::io::Read;
use std::path::Path;
//use std::collections::HashMap;
use woko::apt;
fn read() -> String {
let mut f = File::open(Path::new("tests/apt.out")).unwrap();
let mut s = String::new();
f.read_to_string(&mut s).ok();
return s;
}
#[test]
fn test_woko_apt() {
let apts = apt::parse_from_string(&read()).unwrap();
let l122 = apts.get(121).unwrap();
assert_eq!(132, apts.len());
assert_eq!("http://archive.ubuntu.com/ubuntu/pool/main/r/rename/rename_0.20-4_all.deb", l122.url);
assert_eq!("rename_0.20-4_all.deb", l122.name);
assert_eq!(12010, l122.size);
assert_eq!("MD5Sum", l122.sum_type);
assert_eq!("6cf1938ef51145a469ccef181a9304ce", l122.sum);
}
} |
// use std::io::prelude::*;
| random_line_split |
test_apt.rs |
extern crate woko;
// use std::io::prelude::*;
#[cfg(test)]
mod tests {
use std::fs::File;
use std::io::Read;
use std::path::Path;
//use std::collections::HashMap;
use woko::apt;
fn read() -> String {
let mut f = File::open(Path::new("tests/apt.out")).unwrap();
let mut s = String::new();
f.read_to_string(&mut s).ok();
return s;
}
#[test]
fn test_woko_apt() |
}
| {
let apts = apt::parse_from_string(&read()).unwrap();
let l122 = apts.get(121).unwrap();
assert_eq!(132, apts.len());
assert_eq!("http://archive.ubuntu.com/ubuntu/pool/main/r/rename/rename_0.20-4_all.deb", l122.url);
assert_eq!("rename_0.20-4_all.deb", l122.name);
assert_eq!(12010, l122.size);
assert_eq!("MD5Sum", l122.sum_type);
assert_eq!("6cf1938ef51145a469ccef181a9304ce", l122.sum);
} | identifier_body |
Ellipsis.tsx | import * as classNames from 'classnames';
import * as React from 'react';
import {
Bulma,
getActiveModifiers, getFocusedModifiers,
removeActiveModifiers, removeFocusedModifiers,
withHelpersModifiers,
} from './../../bulma';
import { combineModifiers, getHTMLProps } from './../../helpers';
export interface Ellipsis<T> extends Bulma.Active, Bulma.Focused, Bulma.Tag, React.HTMLProps<T> {
} | const className = classNames(
'pagination-ellipsis',
{
...combineModifiers(props, getActiveModifiers, getFocusedModifiers),
},
props.className,
);
const { children, ...HTMLProps } = getHTMLProps(props, removeActiveModifiers, removeFocusedModifiers);
return React.createElement(tag, { ...HTMLProps, className }, '\u2026');
}
const HOC = /*@__PURE__*/withHelpersModifiers(Ellipsis);
export default HOC; |
export function Ellipsis({ tag = 'span', ...props }: Ellipsis<HTMLElement>) { | random_line_split |
Ellipsis.tsx | import * as classNames from 'classnames';
import * as React from 'react';
import {
Bulma,
getActiveModifiers, getFocusedModifiers,
removeActiveModifiers, removeFocusedModifiers,
withHelpersModifiers,
} from './../../bulma';
import { combineModifiers, getHTMLProps } from './../../helpers';
export interface Ellipsis<T> extends Bulma.Active, Bulma.Focused, Bulma.Tag, React.HTMLProps<T> {
}
export function Ellipsis({ tag = 'span', ...props }: Ellipsis<HTMLElement>) |
const HOC = /*@__PURE__*/withHelpersModifiers(Ellipsis);
export default HOC;
| {
const className = classNames(
'pagination-ellipsis',
{
...combineModifiers(props, getActiveModifiers, getFocusedModifiers),
},
props.className,
);
const { children, ...HTMLProps } = getHTMLProps(props, removeActiveModifiers, removeFocusedModifiers);
return React.createElement(tag, { ...HTMLProps, className }, '\u2026');
} | identifier_body |
Ellipsis.tsx | import * as classNames from 'classnames';
import * as React from 'react';
import {
Bulma,
getActiveModifiers, getFocusedModifiers,
removeActiveModifiers, removeFocusedModifiers,
withHelpersModifiers,
} from './../../bulma';
import { combineModifiers, getHTMLProps } from './../../helpers';
export interface Ellipsis<T> extends Bulma.Active, Bulma.Focused, Bulma.Tag, React.HTMLProps<T> {
}
export function | ({ tag = 'span', ...props }: Ellipsis<HTMLElement>) {
const className = classNames(
'pagination-ellipsis',
{
...combineModifiers(props, getActiveModifiers, getFocusedModifiers),
},
props.className,
);
const { children, ...HTMLProps } = getHTMLProps(props, removeActiveModifiers, removeFocusedModifiers);
return React.createElement(tag, { ...HTMLProps, className }, '\u2026');
}
const HOC = /*@__PURE__*/withHelpersModifiers(Ellipsis);
export default HOC;
| Ellipsis | identifier_name |
pelicanconf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import os
AUTHOR = u'Eric Carmichael'
SITENAME = u"Eric Carmichael's Nerdery"
SITEURL = os.environ.get("PELICAN_SITE_URL", "")
TIMEZONE = 'Europe/Paris'
DEFAULT_LANG = u'en' | FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
DEFAULT_PAGINATION = 2
# WITH_FUTURE_DATES = True
GITHUB_URL = 'http://github.com/ckcollab/'
THEME = "themes/mintheme"
PATH = "content"
PLUGINS = ["plugins.assets", "plugins.sitemap"]
MARKUP = (('rst', 'md', 'html'))
WEBASSETS = True
SITEMAP = {
"format": "xml",
"priorities": {
"articles": 1,
"pages": 1,
"indexes": 0
},
"changefreqs": {
"articles": "daily",
"pages": "daily",
"indexes": "daily",
}
}
STATIC_PATHS = [
'images',
'extra/robots.txt',
]
EXTRA_PATH_METADATA = {
'extra/robots.txt': {'path': 'robots.txt'},
}
# Make the site display full articles instead of summaries by setting this to 0
# SUMMARY_MAX_LENGTH = 0 |
# Feed generation is usually not desired when developing | random_line_split |
to_toml.rs | // Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use handlebars::{Handlebars, Helper, HelperDef, RenderContext, RenderError};
use toml;
use super::super::RenderResult;
#[derive(Clone, Copy)]
pub struct ToTomlHelper;
impl HelperDef for ToTomlHelper {
fn | (&self, h: &Helper, _: &Handlebars, rc: &mut RenderContext) -> RenderResult<()> {
let param = h.param(0)
.ok_or_else(|| RenderError::new("Expected 1 parameter for \"toToml\""))?
.value();
let bytes = toml::ser::to_vec(¶m)
.map_err(|e| RenderError::new(format!("Can't serialize parameter to TOML: {}", e)))?;
rc.writer.write_all(bytes.as_ref())?;
Ok(())
}
}
pub static TO_TOML: ToTomlHelper = ToTomlHelper;
| call | identifier_name |
to_toml.rs | // Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use handlebars::{Handlebars, Helper, HelperDef, RenderContext, RenderError};
use toml;
use super::super::RenderResult;
#[derive(Clone, Copy)]
pub struct ToTomlHelper;
| .value();
let bytes = toml::ser::to_vec(¶m)
.map_err(|e| RenderError::new(format!("Can't serialize parameter to TOML: {}", e)))?;
rc.writer.write_all(bytes.as_ref())?;
Ok(())
}
}
pub static TO_TOML: ToTomlHelper = ToTomlHelper; | impl HelperDef for ToTomlHelper {
fn call(&self, h: &Helper, _: &Handlebars, rc: &mut RenderContext) -> RenderResult<()> {
let param = h.param(0)
.ok_or_else(|| RenderError::new("Expected 1 parameter for \"toToml\""))? | random_line_split |
plugin.py | # -*- coding: utf-8 -*-
"""
pygments.plugin
~~~~~~~~~~~~~~~
Pygments setuptools plugin interface. The methods defined
here also work if setuptools isn't installed but they just
return nothing.
lexer plugins::
[pygments.lexers]
yourlexer = yourmodule:YourLexer
| formatter plugins::
[pygments.formatters]
yourformatter = yourformatter:YourFormatter
/.ext = yourformatter:YourFormatter
As you can see, you can define extensions for the formatter
with a leading slash.
syntax plugins::
[pygments.styles]
yourstyle = yourstyle:YourStyle
filter plugin::
[pygments.filter]
yourfilter = yourfilter:YourFilter
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
try:
import pkg_resources
except ImportError:
pkg_resources = None
LEXER_ENTRY_POINT = 'pygments.lexers'
FORMATTER_ENTRY_POINT = 'pygments.formatters'
STYLE_ENTRY_POINT = 'pygments.styles'
FILTER_ENTRY_POINT = 'pygments.filters'
def find_plugin_lexers():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(LEXER_ENTRY_POINT):
yield entrypoint.load()
def find_plugin_formatters():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(FORMATTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_styles():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(STYLE_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_filters():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(FILTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load() | random_line_split | |
plugin.py | # -*- coding: utf-8 -*-
"""
pygments.plugin
~~~~~~~~~~~~~~~
Pygments setuptools plugin interface. The methods defined
here also work if setuptools isn't installed but they just
return nothing.
lexer plugins::
[pygments.lexers]
yourlexer = yourmodule:YourLexer
formatter plugins::
[pygments.formatters]
yourformatter = yourformatter:YourFormatter
/.ext = yourformatter:YourFormatter
As you can see, you can define extensions for the formatter
with a leading slash.
syntax plugins::
[pygments.styles]
yourstyle = yourstyle:YourStyle
filter plugin::
[pygments.filter]
yourfilter = yourfilter:YourFilter
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
try:
import pkg_resources
except ImportError:
pkg_resources = None
LEXER_ENTRY_POINT = 'pygments.lexers'
FORMATTER_ENTRY_POINT = 'pygments.formatters'
STYLE_ENTRY_POINT = 'pygments.styles'
FILTER_ENTRY_POINT = 'pygments.filters'
def find_plugin_lexers():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(LEXER_ENTRY_POINT):
yield entrypoint.load()
def find_plugin_formatters():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(FORMATTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def | ():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(STYLE_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_filters():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(FILTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
| find_plugin_styles | identifier_name |
plugin.py | # -*- coding: utf-8 -*-
"""
pygments.plugin
~~~~~~~~~~~~~~~
Pygments setuptools plugin interface. The methods defined
here also work if setuptools isn't installed but they just
return nothing.
lexer plugins::
[pygments.lexers]
yourlexer = yourmodule:YourLexer
formatter plugins::
[pygments.formatters]
yourformatter = yourformatter:YourFormatter
/.ext = yourformatter:YourFormatter
As you can see, you can define extensions for the formatter
with a leading slash.
syntax plugins::
[pygments.styles]
yourstyle = yourstyle:YourStyle
filter plugin::
[pygments.filter]
yourfilter = yourfilter:YourFilter
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
try:
import pkg_resources
except ImportError:
pkg_resources = None
LEXER_ENTRY_POINT = 'pygments.lexers'
FORMATTER_ENTRY_POINT = 'pygments.formatters'
STYLE_ENTRY_POINT = 'pygments.styles'
FILTER_ENTRY_POINT = 'pygments.filters'
def find_plugin_lexers():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(LEXER_ENTRY_POINT):
yield entrypoint.load()
def find_plugin_formatters():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(FORMATTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_styles():
|
def find_plugin_filters():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(FILTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
| if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(STYLE_ENTRY_POINT):
yield entrypoint.name, entrypoint.load() | identifier_body |
plugin.py | # -*- coding: utf-8 -*-
"""
pygments.plugin
~~~~~~~~~~~~~~~
Pygments setuptools plugin interface. The methods defined
here also work if setuptools isn't installed but they just
return nothing.
lexer plugins::
[pygments.lexers]
yourlexer = yourmodule:YourLexer
formatter plugins::
[pygments.formatters]
yourformatter = yourformatter:YourFormatter
/.ext = yourformatter:YourFormatter
As you can see, you can define extensions for the formatter
with a leading slash.
syntax plugins::
[pygments.styles]
yourstyle = yourstyle:YourStyle
filter plugin::
[pygments.filter]
yourfilter = yourfilter:YourFilter
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
try:
import pkg_resources
except ImportError:
pkg_resources = None
LEXER_ENTRY_POINT = 'pygments.lexers'
FORMATTER_ENTRY_POINT = 'pygments.formatters'
STYLE_ENTRY_POINT = 'pygments.styles'
FILTER_ENTRY_POINT = 'pygments.filters'
def find_plugin_lexers():
if pkg_resources is None:
|
for entrypoint in pkg_resources.iter_entry_points(LEXER_ENTRY_POINT):
yield entrypoint.load()
def find_plugin_formatters():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(FORMATTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_styles():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(STYLE_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_filters():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(FILTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
| return | conditional_block |
examples.js | angular.module('examples', [])
.factory('formPostData', ['$document', function($document) {
return function(url, fields) {
var form = angular.element('<form style="display: none;" method="post" action="' + url + '" target="_blank"></form>');
angular.forEach(fields, function(value, name) {
var input = angular.element('<input type="hidden" name="' + name + '">');
input.attr('value', value);
form.append(input);
});
$document.find('body').append(form);
form[0].submit();
form.remove();
};
}])
.factory('openPlunkr', ['formPostData', '$http', '$q', function(formPostData, $http, $q) {
return function(exampleFolder) {
var exampleName = 'AngularJS Example';
// Load the manifest for the example
$http.get(exampleFolder + '/manifest.json')
.then(function(response) {
return response.data;
})
.then(function(manifest) {
var filePromises = [];
// Build a pretty title for the Plunkr
var exampleNameParts = manifest.name.split('-');
exampleNameParts.unshift('AngularJS');
angular.forEach(exampleNameParts, function(part, index) {
exampleNameParts[index] = part.charAt(0).toUpperCase() + part.substr(1);
});
exampleName = exampleNameParts.join(' - ');
angular.forEach(manifest.files, function(filename) {
filePromises.push($http.get(exampleFolder + '/' + filename, { transformResponse: [] })
.then(function(response) {
// The manifests provide the production index file but Plunkr wants
// a straight index.html
if (filename === "index-production.html") |
return {
name: filename,
content: response.data
};
}));
});
return $q.all(filePromises);
})
.then(function(files) {
var postData = {};
angular.forEach(files, function(file) {
postData['files[' + file.name + ']'] = file.content;
});
postData['tags[0]'] = "angularjs";
postData['tags[1]'] = "example";
postData.private = true;
postData.description = exampleName;
formPostData('http://plnkr.co/edit/?p=preview', postData);
});
};
}]); | {
filename = "index.html"
} | conditional_block |
examples.js | angular.module('examples', [])
.factory('formPostData', ['$document', function($document) {
return function(url, fields) {
var form = angular.element('<form style="display: none;" method="post" action="' + url + '" target="_blank"></form>');
angular.forEach(fields, function(value, name) {
var input = angular.element('<input type="hidden" name="' + name + '">');
input.attr('value', value);
form.append(input);
});
$document.find('body').append(form);
form[0].submit();
form.remove();
};
}])
.factory('openPlunkr', ['formPostData', '$http', '$q', function(formPostData, $http, $q) {
return function(exampleFolder) {
var exampleName = 'AngularJS Example';
// Load the manifest for the example
$http.get(exampleFolder + '/manifest.json')
.then(function(response) {
return response.data;
})
.then(function(manifest) {
var filePromises = [];
// Build a pretty title for the Plunkr
var exampleNameParts = manifest.name.split('-');
exampleNameParts.unshift('AngularJS');
angular.forEach(exampleNameParts, function(part, index) {
exampleNameParts[index] = part.charAt(0).toUpperCase() + part.substr(1);
});
exampleName = exampleNameParts.join(' - ');
angular.forEach(manifest.files, function(filename) {
filePromises.push($http.get(exampleFolder + '/' + filename, { transformResponse: [] })
.then(function(response) {
// The manifests provide the production index file but Plunkr wants
// a straight index.html
if (filename === "index-production.html") {
filename = "index.html"
}
return {
name: filename,
content: response.data
}; | }));
});
return $q.all(filePromises);
})
.then(function(files) {
var postData = {};
angular.forEach(files, function(file) {
postData['files[' + file.name + ']'] = file.content;
});
postData['tags[0]'] = "angularjs";
postData['tags[1]'] = "example";
postData.private = true;
postData.description = exampleName;
formPostData('http://plnkr.co/edit/?p=preview', postData);
});
};
}]); | random_line_split | |
timer.rs | use libc::{uint32_t, c_void};
use std::mem;
use sys::timer as ll;
pub fn get_ticks() -> u32 {
unsafe { ll::SDL_GetTicks() }
}
pub fn get_performance_counter() -> u64 {
unsafe { ll::SDL_GetPerformanceCounter() }
}
pub fn get_performance_frequency() -> u64 {
unsafe { ll::SDL_GetPerformanceFrequency() }
}
pub fn delay(ms: u32) {
unsafe { ll::SDL_Delay(ms) }
}
pub type TimerCallback<'a> = Box<FnMut() -> u32+'a+Sync>;
#[unstable = "Unstable because of move to unboxed closures and `box` syntax"]
pub struct Timer<'a> {
callback: Option<Box<TimerCallback<'a>>>,
_delay: u32,
raw: ll::SDL_TimerID,
}
impl<'a> Timer<'a> {
/// Constructs a new timer using the boxed closure `callback`.
/// The timer is started immediately, it will be cancelled either:
/// * when the timer is dropped
/// * or when the callback returns a non-positive continuation interval
pub fn new(delay: u32, callback: TimerCallback<'a>) -> Timer<'a> {
unsafe {
let callback = Box::new(callback);
let timer_id = ll::SDL_AddTimer(delay,
Some(c_timer_callback),
mem::transmute_copy(&callback));
Timer {
callback: Some(callback),
_delay: delay,
raw: timer_id,
}
}
}
/// Returns the closure as a trait-object and cancels the timer
/// by consuming it...
pub fn into_inner(mut self) -> TimerCallback<'a> {
*self.callback.take().unwrap()
}
}
#[unsafe_destructor]
impl<'a> Drop for Timer<'a> {
fn drop(&mut self) {
let ret = unsafe { ll::SDL_RemoveTimer(self.raw) };
if ret != 1 |
}
}
extern "C" fn c_timer_callback(_interval: u32, param: *const c_void) -> uint32_t {
unsafe {
let f: *const Box<Fn() -> u32> = mem::transmute(param);
(*f)() as uint32_t
}
}
#[cfg(test)] use std::sync::{StaticMutex, MUTEX_INIT};
#[cfg(test)] static TIMER_INIT_LOCK: StaticMutex = MUTEX_INIT;
#[test]
fn test_timer_runs_multiple_times() {
use std::sync::{Arc, Mutex};
let _running = TIMER_INIT_LOCK.lock().unwrap();
::sdl::init(::sdl::INIT_TIMER).unwrap();
let local_num = Arc::new(Mutex::new(0));
let timer_num = local_num.clone();
let _timer = Timer::new(20, Box::new(|| {
// increment up to 10 times (0 -> 9)
// tick again in 100ms after each increment
//
let mut num = timer_num.lock().unwrap();
if *num < 9 {
*num += 1;
20
} else { 0 }
}));
delay(250); // tick the timer at least 10 times w/ 200ms of "buffer"
let num = local_num.lock().unwrap(); // read the number back
assert_eq!(*num, 9); // it should have incremented at least 10 times...
}
#[test]
fn test_timer_runs_at_least_once() {
use std::sync::{Arc, Mutex};
let _running = TIMER_INIT_LOCK.lock().unwrap();
::sdl::init(::sdl::INIT_TIMER).unwrap();
let local_flag = Arc::new(Mutex::new(false));
let timer_flag = local_flag.clone();
let _timer = Timer::new(20, Box::new(|| {
let mut flag = timer_flag.lock().unwrap();
*flag = true; 0
}));
delay(50);
let flag = local_flag.lock().unwrap();
assert_eq!(*flag, true);
}
#[test]
fn test_timer_can_be_recreated() {
use std::sync::{Arc, Mutex};
let _running = TIMER_INIT_LOCK.lock().unwrap();
::sdl::init(::sdl::INIT_TIMER).unwrap();
let local_num = Arc::new(Mutex::new(0));
let timer_num = local_num.clone();
// run the timer once and reclaim its closure
let timer_1 = Timer::new(20, Box::new(move|| {
let mut num = timer_num.lock().unwrap();
*num += 1; // increment the number
0 // do not run timer again
}));
// reclaim closure after timer runs
delay(50);
let closure = timer_1.into_inner();
// create a second timer and increment again
let _timer_2 = Timer::new(20, closure);
delay(50);
// check that timer was incremented twice
let num = local_num.lock().unwrap();
assert_eq!(*num, 2);
}
| {
println!("error dropping timer {}, maybe already removed.", self.raw);
} | conditional_block |
timer.rs | use libc::{uint32_t, c_void};
use std::mem;
use sys::timer as ll;
pub fn get_ticks() -> u32 {
unsafe { ll::SDL_GetTicks() }
}
pub fn get_performance_counter() -> u64 {
unsafe { ll::SDL_GetPerformanceCounter() }
}
pub fn get_performance_frequency() -> u64 {
unsafe { ll::SDL_GetPerformanceFrequency() }
}
pub fn delay(ms: u32) {
unsafe { ll::SDL_Delay(ms) }
}
pub type TimerCallback<'a> = Box<FnMut() -> u32+'a+Sync>;
#[unstable = "Unstable because of move to unboxed closures and `box` syntax"]
pub struct Timer<'a> {
callback: Option<Box<TimerCallback<'a>>>,
_delay: u32,
raw: ll::SDL_TimerID,
}
impl<'a> Timer<'a> {
/// Constructs a new timer using the boxed closure `callback`.
/// The timer is started immediately, it will be cancelled either:
/// * when the timer is dropped
/// * or when the callback returns a non-positive continuation interval
pub fn new(delay: u32, callback: TimerCallback<'a>) -> Timer<'a> {
unsafe {
let callback = Box::new(callback);
let timer_id = ll::SDL_AddTimer(delay,
Some(c_timer_callback),
mem::transmute_copy(&callback));
Timer {
callback: Some(callback),
_delay: delay,
raw: timer_id,
}
}
}
/// Returns the closure as a trait-object and cancels the timer
/// by consuming it...
pub fn into_inner(mut self) -> TimerCallback<'a> {
*self.callback.take().unwrap()
}
}
#[unsafe_destructor]
impl<'a> Drop for Timer<'a> {
fn drop(&mut self) {
let ret = unsafe { ll::SDL_RemoveTimer(self.raw) };
if ret != 1 {
println!("error dropping timer {}, maybe already removed.", self.raw);
}
}
}
extern "C" fn c_timer_callback(_interval: u32, param: *const c_void) -> uint32_t |
#[cfg(test)] use std::sync::{StaticMutex, MUTEX_INIT};
#[cfg(test)] static TIMER_INIT_LOCK: StaticMutex = MUTEX_INIT;
#[test]
fn test_timer_runs_multiple_times() {
use std::sync::{Arc, Mutex};
let _running = TIMER_INIT_LOCK.lock().unwrap();
::sdl::init(::sdl::INIT_TIMER).unwrap();
let local_num = Arc::new(Mutex::new(0));
let timer_num = local_num.clone();
let _timer = Timer::new(20, Box::new(|| {
// increment up to 10 times (0 -> 9)
// tick again in 100ms after each increment
//
let mut num = timer_num.lock().unwrap();
if *num < 9 {
*num += 1;
20
} else { 0 }
}));
delay(250); // tick the timer at least 10 times w/ 200ms of "buffer"
let num = local_num.lock().unwrap(); // read the number back
assert_eq!(*num, 9); // it should have incremented at least 10 times...
}
#[test]
fn test_timer_runs_at_least_once() {
use std::sync::{Arc, Mutex};
let _running = TIMER_INIT_LOCK.lock().unwrap();
::sdl::init(::sdl::INIT_TIMER).unwrap();
let local_flag = Arc::new(Mutex::new(false));
let timer_flag = local_flag.clone();
let _timer = Timer::new(20, Box::new(|| {
let mut flag = timer_flag.lock().unwrap();
*flag = true; 0
}));
delay(50);
let flag = local_flag.lock().unwrap();
assert_eq!(*flag, true);
}
#[test]
fn test_timer_can_be_recreated() {
use std::sync::{Arc, Mutex};
let _running = TIMER_INIT_LOCK.lock().unwrap();
::sdl::init(::sdl::INIT_TIMER).unwrap();
let local_num = Arc::new(Mutex::new(0));
let timer_num = local_num.clone();
// run the timer once and reclaim its closure
let timer_1 = Timer::new(20, Box::new(move|| {
let mut num = timer_num.lock().unwrap();
*num += 1; // increment the number
0 // do not run timer again
}));
// reclaim closure after timer runs
delay(50);
let closure = timer_1.into_inner();
// create a second timer and increment again
let _timer_2 = Timer::new(20, closure);
delay(50);
// check that timer was incremented twice
let num = local_num.lock().unwrap();
assert_eq!(*num, 2);
}
| {
unsafe {
let f: *const Box<Fn() -> u32> = mem::transmute(param);
(*f)() as uint32_t
}
} | identifier_body |
timer.rs | use libc::{uint32_t, c_void};
use std::mem;
use sys::timer as ll;
pub fn get_ticks() -> u32 {
unsafe { ll::SDL_GetTicks() }
}
pub fn get_performance_counter() -> u64 {
unsafe { ll::SDL_GetPerformanceCounter() }
}
pub fn get_performance_frequency() -> u64 {
unsafe { ll::SDL_GetPerformanceFrequency() }
}
pub fn delay(ms: u32) {
unsafe { ll::SDL_Delay(ms) }
}
pub type TimerCallback<'a> = Box<FnMut() -> u32+'a+Sync>;
#[unstable = "Unstable because of move to unboxed closures and `box` syntax"]
pub struct Timer<'a> {
callback: Option<Box<TimerCallback<'a>>>,
_delay: u32,
raw: ll::SDL_TimerID,
}
impl<'a> Timer<'a> {
/// Constructs a new timer using the boxed closure `callback`.
/// The timer is started immediately, it will be cancelled either:
/// * when the timer is dropped
/// * or when the callback returns a non-positive continuation interval
pub fn new(delay: u32, callback: TimerCallback<'a>) -> Timer<'a> {
unsafe {
let callback = Box::new(callback);
let timer_id = ll::SDL_AddTimer(delay,
Some(c_timer_callback),
mem::transmute_copy(&callback));
Timer {
callback: Some(callback),
_delay: delay,
raw: timer_id,
}
}
}
/// Returns the closure as a trait-object and cancels the timer
/// by consuming it...
pub fn into_inner(mut self) -> TimerCallback<'a> {
*self.callback.take().unwrap()
}
}
#[unsafe_destructor]
impl<'a> Drop for Timer<'a> {
fn | (&mut self) {
let ret = unsafe { ll::SDL_RemoveTimer(self.raw) };
if ret != 1 {
println!("error dropping timer {}, maybe already removed.", self.raw);
}
}
}
extern "C" fn c_timer_callback(_interval: u32, param: *const c_void) -> uint32_t {
unsafe {
let f: *const Box<Fn() -> u32> = mem::transmute(param);
(*f)() as uint32_t
}
}
#[cfg(test)] use std::sync::{StaticMutex, MUTEX_INIT};
#[cfg(test)] static TIMER_INIT_LOCK: StaticMutex = MUTEX_INIT;
#[test]
fn test_timer_runs_multiple_times() {
use std::sync::{Arc, Mutex};
let _running = TIMER_INIT_LOCK.lock().unwrap();
::sdl::init(::sdl::INIT_TIMER).unwrap();
let local_num = Arc::new(Mutex::new(0));
let timer_num = local_num.clone();
let _timer = Timer::new(20, Box::new(|| {
// increment up to 10 times (0 -> 9)
// tick again in 100ms after each increment
//
let mut num = timer_num.lock().unwrap();
if *num < 9 {
*num += 1;
20
} else { 0 }
}));
delay(250); // tick the timer at least 10 times w/ 200ms of "buffer"
let num = local_num.lock().unwrap(); // read the number back
assert_eq!(*num, 9); // it should have incremented at least 10 times...
}
#[test]
fn test_timer_runs_at_least_once() {
use std::sync::{Arc, Mutex};
let _running = TIMER_INIT_LOCK.lock().unwrap();
::sdl::init(::sdl::INIT_TIMER).unwrap();
let local_flag = Arc::new(Mutex::new(false));
let timer_flag = local_flag.clone();
let _timer = Timer::new(20, Box::new(|| {
let mut flag = timer_flag.lock().unwrap();
*flag = true; 0
}));
delay(50);
let flag = local_flag.lock().unwrap();
assert_eq!(*flag, true);
}
#[test]
fn test_timer_can_be_recreated() {
use std::sync::{Arc, Mutex};
let _running = TIMER_INIT_LOCK.lock().unwrap();
::sdl::init(::sdl::INIT_TIMER).unwrap();
let local_num = Arc::new(Mutex::new(0));
let timer_num = local_num.clone();
// run the timer once and reclaim its closure
let timer_1 = Timer::new(20, Box::new(move|| {
let mut num = timer_num.lock().unwrap();
*num += 1; // increment the number
0 // do not run timer again
}));
// reclaim closure after timer runs
delay(50);
let closure = timer_1.into_inner();
// create a second timer and increment again
let _timer_2 = Timer::new(20, closure);
delay(50);
// check that timer was incremented twice
let num = local_num.lock().unwrap();
assert_eq!(*num, 2);
}
| drop | identifier_name |
timer.rs | use libc::{uint32_t, c_void};
use std::mem;
use sys::timer as ll;
pub fn get_ticks() -> u32 {
unsafe { ll::SDL_GetTicks() }
}
pub fn get_performance_counter() -> u64 {
unsafe { ll::SDL_GetPerformanceCounter() }
}
pub fn get_performance_frequency() -> u64 {
unsafe { ll::SDL_GetPerformanceFrequency() }
}
pub fn delay(ms: u32) {
unsafe { ll::SDL_Delay(ms) }
}
pub type TimerCallback<'a> = Box<FnMut() -> u32+'a+Sync>;
#[unstable = "Unstable because of move to unboxed closures and `box` syntax"]
pub struct Timer<'a> {
callback: Option<Box<TimerCallback<'a>>>,
_delay: u32,
raw: ll::SDL_TimerID,
}
impl<'a> Timer<'a> {
/// Constructs a new timer using the boxed closure `callback`.
/// The timer is started immediately, it will be cancelled either:
/// * when the timer is dropped
/// * or when the callback returns a non-positive continuation interval
pub fn new(delay: u32, callback: TimerCallback<'a>) -> Timer<'a> {
unsafe {
let callback = Box::new(callback);
let timer_id = ll::SDL_AddTimer(delay,
Some(c_timer_callback),
mem::transmute_copy(&callback));
Timer {
callback: Some(callback),
_delay: delay,
raw: timer_id,
}
}
}
/// Returns the closure as a trait-object and cancels the timer
/// by consuming it...
pub fn into_inner(mut self) -> TimerCallback<'a> {
*self.callback.take().unwrap()
}
}
| impl<'a> Drop for Timer<'a> {
fn drop(&mut self) {
let ret = unsafe { ll::SDL_RemoveTimer(self.raw) };
if ret != 1 {
println!("error dropping timer {}, maybe already removed.", self.raw);
}
}
}
extern "C" fn c_timer_callback(_interval: u32, param: *const c_void) -> uint32_t {
unsafe {
let f: *const Box<Fn() -> u32> = mem::transmute(param);
(*f)() as uint32_t
}
}
#[cfg(test)] use std::sync::{StaticMutex, MUTEX_INIT};
#[cfg(test)] static TIMER_INIT_LOCK: StaticMutex = MUTEX_INIT;
#[test]
fn test_timer_runs_multiple_times() {
use std::sync::{Arc, Mutex};
let _running = TIMER_INIT_LOCK.lock().unwrap();
::sdl::init(::sdl::INIT_TIMER).unwrap();
let local_num = Arc::new(Mutex::new(0));
let timer_num = local_num.clone();
let _timer = Timer::new(20, Box::new(|| {
// increment up to 10 times (0 -> 9)
// tick again in 100ms after each increment
//
let mut num = timer_num.lock().unwrap();
if *num < 9 {
*num += 1;
20
} else { 0 }
}));
delay(250); // tick the timer at least 10 times w/ 200ms of "buffer"
let num = local_num.lock().unwrap(); // read the number back
assert_eq!(*num, 9); // it should have incremented at least 10 times...
}
#[test]
fn test_timer_runs_at_least_once() {
use std::sync::{Arc, Mutex};
let _running = TIMER_INIT_LOCK.lock().unwrap();
::sdl::init(::sdl::INIT_TIMER).unwrap();
let local_flag = Arc::new(Mutex::new(false));
let timer_flag = local_flag.clone();
let _timer = Timer::new(20, Box::new(|| {
let mut flag = timer_flag.lock().unwrap();
*flag = true; 0
}));
delay(50);
let flag = local_flag.lock().unwrap();
assert_eq!(*flag, true);
}
#[test]
fn test_timer_can_be_recreated() {
use std::sync::{Arc, Mutex};
let _running = TIMER_INIT_LOCK.lock().unwrap();
::sdl::init(::sdl::INIT_TIMER).unwrap();
let local_num = Arc::new(Mutex::new(0));
let timer_num = local_num.clone();
// run the timer once and reclaim its closure
let timer_1 = Timer::new(20, Box::new(move|| {
let mut num = timer_num.lock().unwrap();
*num += 1; // increment the number
0 // do not run timer again
}));
// reclaim closure after timer runs
delay(50);
let closure = timer_1.into_inner();
// create a second timer and increment again
let _timer_2 = Timer::new(20, closure);
delay(50);
// check that timer was incremented twice
let num = local_num.lock().unwrap();
assert_eq!(*num, 2);
} | #[unsafe_destructor] | random_line_split |
copy-all-static-files.ts | import chalk from 'chalk';
import fs from 'fs-extra';
import path from 'path';
import { logger } from '@storybook/node-logger';
import { parseStaticDir } from './server-statics';
export async function | (staticDirs: any[] | undefined, outputDir: string) {
if (staticDirs && staticDirs.length > 0) {
await Promise.all(
staticDirs.map(async (dir) => {
try {
const { staticDir, staticPath, targetDir } = await parseStaticDir(dir);
const targetPath = path.join(outputDir, targetDir);
logger.info(chalk`=> Copying static files: {cyan ${staticDir}} => {cyan ${targetDir}}`);
// Storybook's own files should not be overwritten, so we skip such files if we find them
const skipPaths = ['index.html', 'iframe.html'].map((f) => path.join(targetPath, f));
await fs.copy(staticPath, targetPath, {
dereference: true,
preserveTimestamps: true,
filter: (_, dest) => !skipPaths.includes(dest),
});
} catch (e) {
logger.error(e.message);
process.exit(-1);
}
})
);
}
}
| copyAllStaticFiles | identifier_name |
copy-all-static-files.ts | import chalk from 'chalk';
import fs from 'fs-extra';
import path from 'path';
import { logger } from '@storybook/node-logger';
import { parseStaticDir } from './server-statics';
export async function copyAllStaticFiles(staticDirs: any[] | undefined, outputDir: string) | {
if (staticDirs && staticDirs.length > 0) {
await Promise.all(
staticDirs.map(async (dir) => {
try {
const { staticDir, staticPath, targetDir } = await parseStaticDir(dir);
const targetPath = path.join(outputDir, targetDir);
logger.info(chalk`=> Copying static files: {cyan ${staticDir}} => {cyan ${targetDir}}`);
// Storybook's own files should not be overwritten, so we skip such files if we find them
const skipPaths = ['index.html', 'iframe.html'].map((f) => path.join(targetPath, f));
await fs.copy(staticPath, targetPath, {
dereference: true,
preserveTimestamps: true,
filter: (_, dest) => !skipPaths.includes(dest),
});
} catch (e) {
logger.error(e.message);
process.exit(-1);
}
})
);
}
} | identifier_body | |
copy-all-static-files.ts | import chalk from 'chalk';
import fs from 'fs-extra';
import path from 'path';
import { logger } from '@storybook/node-logger';
import { parseStaticDir } from './server-statics';
export async function copyAllStaticFiles(staticDirs: any[] | undefined, outputDir: string) {
if (staticDirs && staticDirs.length > 0) |
}
| {
await Promise.all(
staticDirs.map(async (dir) => {
try {
const { staticDir, staticPath, targetDir } = await parseStaticDir(dir);
const targetPath = path.join(outputDir, targetDir);
logger.info(chalk`=> Copying static files: {cyan ${staticDir}} => {cyan ${targetDir}}`);
// Storybook's own files should not be overwritten, so we skip such files if we find them
const skipPaths = ['index.html', 'iframe.html'].map((f) => path.join(targetPath, f));
await fs.copy(staticPath, targetPath, {
dereference: true,
preserveTimestamps: true,
filter: (_, dest) => !skipPaths.includes(dest),
});
} catch (e) {
logger.error(e.message);
process.exit(-1);
}
})
);
} | conditional_block |
copy-all-static-files.ts | import chalk from 'chalk';
import fs from 'fs-extra';
import path from 'path';
import { logger } from '@storybook/node-logger';
import { parseStaticDir } from './server-statics';
export async function copyAllStaticFiles(staticDirs: any[] | undefined, outputDir: string) {
if (staticDirs && staticDirs.length > 0) {
await Promise.all(
staticDirs.map(async (dir) => {
try {
const { staticDir, staticPath, targetDir } = await parseStaticDir(dir);
const targetPath = path.join(outputDir, targetDir);
logger.info(chalk`=> Copying static files: {cyan ${staticDir}} => {cyan ${targetDir}}`);
// Storybook's own files should not be overwritten, so we skip such files if we find them
const skipPaths = ['index.html', 'iframe.html'].map((f) => path.join(targetPath, f));
await fs.copy(staticPath, targetPath, {
dereference: true,
preserveTimestamps: true, | } catch (e) {
logger.error(e.message);
process.exit(-1);
}
})
);
}
} | filter: (_, dest) => !skipPaths.includes(dest),
}); | random_line_split |
webserver.py | # -*- coding: utf-8 -*-
from flask import Flask
from flask import Flask,jsonify, request, Response, session,g,redirect, url_for,abort, render_template, flash
from islem import *
from bot import *
import sys
import time
import datetime
reload(sys)
sys.setdefaultencoding("utf-8")
app = Flask(__name__)
toxbot = tox_factory(ProfileHelper.open_profile("tox_save.tox"))
sonek=str(toxbot.self_get_address())[0:2]
karsi_dosyalar="gelen_cevaplar"+sonek
komut_dosyasi="gelen_komutlar"+sonek
@app.route('/')
def indeks():
| @app.route('/toxfs', methods = ['GET','POST'])
def toxfs():
# localhost:2061
#if request.method == 'GET':
islem=Islem()
islem.fno = request.args.get('fno')
islem.tip = request.args.get('tip')
islem.mesaj = request.args.get('mesaj')
islem.komut="---"
print "islem icerik:"
islem.icerik()
islem.dosyala(komut_dosyasi)
return "komut icra edildi."
#else:
#return '''<html>
#paremetreyle gönderin</html>'''
@app.route('/toxsys', methods = ['GET','POST'])
def toxsys():
dosyalar_html=""
# localhost:2061
#if request.method == 'GET':
islem=Islem()
if 'fno' in request.args and 'dosya' not in request.args:
islem.fno = request.args.get('fno')
islem.tip = "komut"
islem.mesaj = "x"
islem.komut = "@100@dlist"
print "islem icerik:"
islem.icerik()
islem.dosyala(komut_dosyasi)
cevap_geldi=False
dosya_bek_bas = datetime.datetime.now()
#6sn bekle cevap icin
t_end = time.time() + 6
while not cevap_geldi :
if os.path.exists(karsi_dosyalar):
time.sleep(1)
cevaplar=open(karsi_dosyalar,"r").read()
cevaplar=cevaplar.split("\n")
for dosya in cevaplar:
dosyalar_html+="<tr><td><a href=/toxsys?fno="+str(islem.fno)+"&dosya="+dosya+">"+dosya+"</td><td></tr>"
os.remove(karsi_dosyalar)
cevap_geldi=True
return '''<html>
<h3>dosyalar</h3>
<table border=1>
'''+dosyalar_html+'''
</tr>
<a href="./">anasayfa</a>
</html>'''
dosya_bek_son = datetime.datetime.now()
krono=dosya_bek_son-dosya_bek_bas
if krono.total_seconds() > 6 :
break
else:
print "dlist sonucu bekleniyor.",krono.total_seconds()
if 'fno' in request.args and 'dosya' in request.args:
islem.fno = request.args.get('fno')
dosya = request.args.get('dosya')
islem.tip = "komut"
islem.mesaj = "x"
islem.komut = "@102@"+dosya
islem.dosyala(komut_dosyasi)
cevap_geldi=False
while not cevap_geldi:
time.sleep(0.5)
#md5sum kontrol
if os.path.exists(karsi_dosyalar):
cevap=open(karsi_dosyalar,"r").read()
if cevap =="dosya_inme_tamam":
cevap_geldi=True
os.remove(karsi_dosyalar)
return "dosya geldi statikte"
else:
return redirect(url_for('indeks'))
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0', port=2061)
| arkadaslar=""
for num in toxbot.self_get_friend_list():
arkadaslar+="<tr><td><a href=/toxsys?fno="+str(num)+">"+str(num)+"</td><td>"+toxbot.friend_get_name(num)+"</td><td>"+str(toxbot.friend_get_status_message(num))+"</td><td>"+str(toxbot.friend_get_public_key(num))+"</td></tr>"
return '''<html>
<h2>Tox Yönetim Sayfası</h2>
<table border=1>
<tr><td>no</td><td>isim</td><td>publickey</td></tr>
<tr><td>-1</td><td>'''+toxbot.self_get_name()+'''</td><td>'''+toxbot.self_get_status_message()+'''</td><td>'''+toxbot.self_get_address()+'''</td></tr>
'''+arkadaslar+'''
</tr></table>
<a href="/toxfs">toxfs</a>
</html>'''
| identifier_body |
webserver.py | # -*- coding: utf-8 -*-
from flask import Flask
from flask import Flask,jsonify, request, Response, session,g,redirect, url_for,abort, render_template, flash
from islem import *
from bot import *
import sys
import time
import datetime
reload(sys)
sys.setdefaultencoding("utf-8")
app = Flask(__name__)
toxbot = tox_factory(ProfileHelper.open_profile("tox_save.tox"))
sonek=str(toxbot.self_get_address())[0:2]
karsi_dosyalar="gelen_cevaplar"+sonek
komut_dosyasi="gelen_komutlar"+sonek
@app.route('/')
def indeks():
arkadaslar=""
for num in toxbot.self_get_friend_list():
arkadaslar+="<tr><td><a href=/toxsys?fno="+str(num)+">"+str(num)+"</td><td>"+toxbot.friend_get_name(num)+"</td><td>"+str(toxbot.friend_get_status_message(num))+"</td><td>"+str(toxbot.friend_get_public_key(num))+"</td></tr>"
return '''<html>
<h2>Tox Yönetim Sayfası</h2>
<table border=1>
<tr><td>no</td><td>isim</td><td>publickey</td></tr>
<tr><td>-1</td><td>'''+toxbot.self_get_name()+'''</td><td>'''+toxbot.self_get_status_message()+'''</td><td>'''+toxbot.self_get_address()+'''</td></tr>
'''+arkadaslar+'''
</tr></table>
<a href="/toxfs">toxfs</a>
</html>'''
@app.route('/toxfs', methods = ['GET','POST'])
def toxfs():
# localhost:2061
#if request.method == 'GET':
islem=Islem()
islem.fno = request.args.get('fno')
islem.tip = request.args.get('tip')
islem.mesaj = request.args.get('mesaj')
islem.komut="---"
print "islem icerik:"
islem.icerik()
islem.dosyala(komut_dosyasi)
return "komut icra edildi."
#else:
#return '''<html>
#paremetreyle gönderin</html>'''
@app.route('/toxsys', methods = ['GET','POST'])
def tox |
dosyalar_html=""
# localhost:2061
#if request.method == 'GET':
islem=Islem()
if 'fno' in request.args and 'dosya' not in request.args:
islem.fno = request.args.get('fno')
islem.tip = "komut"
islem.mesaj = "x"
islem.komut = "@100@dlist"
print "islem icerik:"
islem.icerik()
islem.dosyala(komut_dosyasi)
cevap_geldi=False
dosya_bek_bas = datetime.datetime.now()
#6sn bekle cevap icin
t_end = time.time() + 6
while not cevap_geldi :
if os.path.exists(karsi_dosyalar):
time.sleep(1)
cevaplar=open(karsi_dosyalar,"r").read()
cevaplar=cevaplar.split("\n")
for dosya in cevaplar:
dosyalar_html+="<tr><td><a href=/toxsys?fno="+str(islem.fno)+"&dosya="+dosya+">"+dosya+"</td><td></tr>"
os.remove(karsi_dosyalar)
cevap_geldi=True
return '''<html>
<h3>dosyalar</h3>
<table border=1>
'''+dosyalar_html+'''
</tr>
<a href="./">anasayfa</a>
</html>'''
dosya_bek_son = datetime.datetime.now()
krono=dosya_bek_son-dosya_bek_bas
if krono.total_seconds() > 6 :
break
else:
print "dlist sonucu bekleniyor.",krono.total_seconds()
if 'fno' in request.args and 'dosya' in request.args:
islem.fno = request.args.get('fno')
dosya = request.args.get('dosya')
islem.tip = "komut"
islem.mesaj = "x"
islem.komut = "@102@"+dosya
islem.dosyala(komut_dosyasi)
cevap_geldi=False
while not cevap_geldi:
time.sleep(0.5)
#md5sum kontrol
if os.path.exists(karsi_dosyalar):
cevap=open(karsi_dosyalar,"r").read()
if cevap =="dosya_inme_tamam":
cevap_geldi=True
os.remove(karsi_dosyalar)
return "dosya geldi statikte"
else:
return redirect(url_for('indeks'))
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0', port=2061)
| sys(): | identifier_name |
webserver.py | # -*- coding: utf-8 -*-
from flask import Flask
from flask import Flask,jsonify, request, Response, session,g,redirect, url_for,abort, render_template, flash
from islem import *
from bot import *
import sys
import time
import datetime
reload(sys)
sys.setdefaultencoding("utf-8")
app = Flask(__name__)
toxbot = tox_factory(ProfileHelper.open_profile("tox_save.tox"))
sonek=str(toxbot.self_get_address())[0:2]
karsi_dosyalar="gelen_cevaplar"+sonek
komut_dosyasi="gelen_komutlar"+sonek
@app.route('/')
def indeks():
arkadaslar=""
for num in toxbot.self_get_friend_list():
arkadaslar+="<tr><td><a href=/toxsys?fno="+str(num)+">"+str(num)+"</td><td>"+toxbot.friend_get_name(num)+"</td><td>"+str(toxbot.friend_get_status_message(num))+"</td><td>"+str(toxbot.friend_get_public_key(num))+"</td></tr>"
return '''<html>
<h2>Tox Yönetim Sayfası</h2>
<table border=1>
<tr><td>no</td><td>isim</td><td>publickey</td></tr>
<tr><td>-1</td><td>'''+toxbot.self_get_name()+'''</td><td>'''+toxbot.self_get_status_message()+'''</td><td>'''+toxbot.self_get_address()+'''</td></tr>
'''+arkadaslar+'''
</tr></table>
<a href="/toxfs">toxfs</a>
</html>'''
@app.route('/toxfs', methods = ['GET','POST'])
def toxfs():
# localhost:2061
#if request.method == 'GET':
islem=Islem()
islem.fno = request.args.get('fno')
islem.tip = request.args.get('tip')
islem.mesaj = request.args.get('mesaj')
islem.komut="---"
print "islem icerik:"
islem.icerik()
islem.dosyala(komut_dosyasi)
return "komut icra edildi."
#else:
#return '''<html>
#paremetreyle gönderin</html>'''
@app.route('/toxsys', methods = ['GET','POST'])
def toxsys():
dosyalar_html=""
# localhost:2061
#if request.method == 'GET':
islem=Islem()
if 'fno' in request.args and 'dosya' not in request.args:
islem.fno = request.args.get('fno')
islem.tip = "komut"
islem.mesaj = "x"
islem.komut = "@100@dlist"
print "islem icerik:"
islem.icerik()
islem.dosyala(komut_dosyasi)
cevap_geldi=False
dosya_bek_bas = datetime.datetime.now()
#6sn bekle cevap icin
t_end = time.time() + 6
while not cevap_geldi :
if os.path.exists(karsi_dosyalar):
time.sleep(1)
cevaplar=open(karsi_dosyalar,"r").read()
cevaplar=cevaplar.split("\n")
for dosya in cevaplar:
dosyalar_html+="<tr><td><a href=/toxsys?fno="+str(islem.fno)+"&dosya="+dosya+">"+dosya+"</td><td></tr>"
os.remove(karsi_dosyalar)
cevap_geldi=True
return '''<html>
<h3>dosyalar</h3>
<table border=1>
'''+dosyalar_html+'''
</tr>
<a href="./">anasayfa</a>
</html>'''
dosya_bek_son = datetime.datetime.now()
krono=dosya_bek_son-dosya_bek_bas
if krono.total_seconds() > 6 :
bre | else:
print "dlist sonucu bekleniyor.",krono.total_seconds()
if 'fno' in request.args and 'dosya' in request.args:
islem.fno = request.args.get('fno')
dosya = request.args.get('dosya')
islem.tip = "komut"
islem.mesaj = "x"
islem.komut = "@102@"+dosya
islem.dosyala(komut_dosyasi)
cevap_geldi=False
while not cevap_geldi:
time.sleep(0.5)
#md5sum kontrol
if os.path.exists(karsi_dosyalar):
cevap=open(karsi_dosyalar,"r").read()
if cevap =="dosya_inme_tamam":
cevap_geldi=True
os.remove(karsi_dosyalar)
return "dosya geldi statikte"
else:
return redirect(url_for('indeks'))
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0', port=2061)
| ak
| conditional_block |
webserver.py | # -*- coding: utf-8 -*-
from flask import Flask
from flask import Flask,jsonify, request, Response, session,g,redirect, url_for,abort, render_template, flash
from islem import *
from bot import *
import sys
import time
import datetime
reload(sys)
sys.setdefaultencoding("utf-8")
app = Flask(__name__)
toxbot = tox_factory(ProfileHelper.open_profile("tox_save.tox"))
sonek=str(toxbot.self_get_address())[0:2]
karsi_dosyalar="gelen_cevaplar"+sonek
komut_dosyasi="gelen_komutlar"+sonek
@app.route('/')
def indeks():
arkadaslar=""
for num in toxbot.self_get_friend_list():
arkadaslar+="<tr><td><a href=/toxsys?fno="+str(num)+">"+str(num)+"</td><td>"+toxbot.friend_get_name(num)+"</td><td>"+str(toxbot.friend_get_status_message(num))+"</td><td>"+str(toxbot.friend_get_public_key(num))+"</td></tr>"
return '''<html>
<h2>Tox Yönetim Sayfası</h2>
<table border=1>
<tr><td>no</td><td>isim</td><td>publickey</td></tr>
<tr><td>-1</td><td>'''+toxbot.self_get_name()+'''</td><td>'''+toxbot.self_get_status_message()+'''</td><td>'''+toxbot.self_get_address()+'''</td></tr>
'''+arkadaslar+'''
</tr></table>
<a href="/toxfs">toxfs</a>
</html>'''
@app.route('/toxfs', methods = ['GET','POST'])
def toxfs():
# localhost:2061
#if request.method == 'GET':
islem=Islem()
islem.fno = request.args.get('fno')
islem.tip = request.args.get('tip')
islem.mesaj = request.args.get('mesaj')
islem.komut="---"
print "islem icerik:"
islem.icerik()
islem.dosyala(komut_dosyasi)
return "komut icra edildi."
#else:
#return '''<html>
#paremetreyle gönderin</html>'''
@app.route('/toxsys', methods = ['GET','POST'])
def toxsys():
dosyalar_html=""
# localhost:2061
#if request.method == 'GET':
islem=Islem()
if 'fno' in request.args and 'dosya' not in request.args:
islem.fno = request.args.get('fno')
islem.tip = "komut"
islem.mesaj = "x"
islem.komut = "@100@dlist"
print "islem icerik:"
islem.icerik()
islem.dosyala(komut_dosyasi)
cevap_geldi=False
dosya_bek_bas = datetime.datetime.now()
#6sn bekle cevap icin
t_end = time.time() + 6
while not cevap_geldi : | for dosya in cevaplar:
dosyalar_html+="<tr><td><a href=/toxsys?fno="+str(islem.fno)+"&dosya="+dosya+">"+dosya+"</td><td></tr>"
os.remove(karsi_dosyalar)
cevap_geldi=True
return '''<html>
<h3>dosyalar</h3>
<table border=1>
'''+dosyalar_html+'''
</tr>
<a href="./">anasayfa</a>
</html>'''
dosya_bek_son = datetime.datetime.now()
krono=dosya_bek_son-dosya_bek_bas
if krono.total_seconds() > 6 :
break
else:
print "dlist sonucu bekleniyor.",krono.total_seconds()
if 'fno' in request.args and 'dosya' in request.args:
islem.fno = request.args.get('fno')
dosya = request.args.get('dosya')
islem.tip = "komut"
islem.mesaj = "x"
islem.komut = "@102@"+dosya
islem.dosyala(komut_dosyasi)
cevap_geldi=False
while not cevap_geldi:
time.sleep(0.5)
#md5sum kontrol
if os.path.exists(karsi_dosyalar):
cevap=open(karsi_dosyalar,"r").read()
if cevap =="dosya_inme_tamam":
cevap_geldi=True
os.remove(karsi_dosyalar)
return "dosya geldi statikte"
else:
return redirect(url_for('indeks'))
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0', port=2061) | if os.path.exists(karsi_dosyalar):
time.sleep(1)
cevaplar=open(karsi_dosyalar,"r").read()
cevaplar=cevaplar.split("\n") | random_line_split |
knockoutExtenders.js | define(function () {
return { registerExtenders: registerExtenders };
function registerExtenders() {
registerDateBinding();
registerMoneyExtension();
}
function re | ) {
ko.bindingHandlers.dateString = {
//Credit to Ryan Rahlf http://stackoverflow.com/questions/17001303/date-formatting-issues-with-knockout-and-syncing-to-breeze-js-entityaspect-modif
init: function (element, valueAccessor) {
//attach an event handler to our dom element to handle user input
element.onchange = function () {
var value = valueAccessor();//get our observable
//set our observable to the parsed date from the input
value(moment(element.value).toDate());
};
},
update: function (element, valueAccessor, allBindingsAccessor, viewModel) {
var value = valueAccessor();
var valueUnwrapped = ko.utils.unwrapObservable(value);
if (valueUnwrapped) {
element.value = moment(valueUnwrapped).format('L');
}
}
};
}
function registerMoneyExtension() {
//Credit to Josh Bush http://freshbrewedcode.com/joshbush/2011/12/27/knockout-js-observable-extensions/
var format = function (value) {
toks = value.toFixed(2).replace('-', '').split('.');
var display = '$' + $.map(toks[0].split('').reverse(), function (elm, i) {
return [(i % 3 === 0 && i > 0 ? ',' : ''), elm];
}).reverse().join('') + '.' + toks[1];
return value < 0 ? '(' + display + ')' : display;
};
ko.subscribable.fn.money = function () {
var target = this;
var writeTarget = function (value) {
target(parseFloat(value.replace(/[^0-9.-]/g, '')));
};
var result = ko.computed({
read: function () {
return target();
},
write: writeTarget
});
result.formatted = ko.computed({
read: function () {
return format(target());
},
write: writeTarget
});
return result;
};
}
});
| gisterDateBinding ( | identifier_name |
knockoutExtenders.js | define(function () {
return { registerExtenders: registerExtenders };
function registerExtenders() {
registerDateBinding();
registerMoneyExtension();
}
function registerDateBinding () {
ko.bindingHandlers.dateString = {
//Credit to Ryan Rahlf http://stackoverflow.com/questions/17001303/date-formatting-issues-with-knockout-and-syncing-to-breeze-js-entityaspect-modif
init: function (element, valueAccessor) {
//attach an event handler to our dom element to handle user input
element.onchange = function () {
var value = valueAccessor();//get our observable
//set our observable to the parsed date from the input
value(moment(element.value).toDate());
};
},
update: function (element, valueAccessor, allBindingsAccessor, viewModel) {
var value = valueAccessor();
var valueUnwrapped = ko.utils.unwrapObservable(value);
if (valueUnwrapped) {
| }
};
}
function registerMoneyExtension() {
//Credit to Josh Bush http://freshbrewedcode.com/joshbush/2011/12/27/knockout-js-observable-extensions/
var format = function (value) {
toks = value.toFixed(2).replace('-', '').split('.');
var display = '$' + $.map(toks[0].split('').reverse(), function (elm, i) {
return [(i % 3 === 0 && i > 0 ? ',' : ''), elm];
}).reverse().join('') + '.' + toks[1];
return value < 0 ? '(' + display + ')' : display;
};
ko.subscribable.fn.money = function () {
var target = this;
var writeTarget = function (value) {
target(parseFloat(value.replace(/[^0-9.-]/g, '')));
};
var result = ko.computed({
read: function () {
return target();
},
write: writeTarget
});
result.formatted = ko.computed({
read: function () {
return format(target());
},
write: writeTarget
});
return result;
};
}
});
| element.value = moment(valueUnwrapped).format('L');
}
| conditional_block |
knockoutExtenders.js | define(function () {
return { registerExtenders: registerExtenders };
| registerDateBinding();
registerMoneyExtension();
}
function registerDateBinding () {
ko.bindingHandlers.dateString = {
//Credit to Ryan Rahlf http://stackoverflow.com/questions/17001303/date-formatting-issues-with-knockout-and-syncing-to-breeze-js-entityaspect-modif
init: function (element, valueAccessor) {
//attach an event handler to our dom element to handle user input
element.onchange = function () {
var value = valueAccessor();//get our observable
//set our observable to the parsed date from the input
value(moment(element.value).toDate());
};
},
update: function (element, valueAccessor, allBindingsAccessor, viewModel) {
var value = valueAccessor();
var valueUnwrapped = ko.utils.unwrapObservable(value);
if (valueUnwrapped) {
element.value = moment(valueUnwrapped).format('L');
}
}
};
}
function registerMoneyExtension() {
//Credit to Josh Bush http://freshbrewedcode.com/joshbush/2011/12/27/knockout-js-observable-extensions/
var format = function (value) {
toks = value.toFixed(2).replace('-', '').split('.');
var display = '$' + $.map(toks[0].split('').reverse(), function (elm, i) {
return [(i % 3 === 0 && i > 0 ? ',' : ''), elm];
}).reverse().join('') + '.' + toks[1];
return value < 0 ? '(' + display + ')' : display;
};
ko.subscribable.fn.money = function () {
var target = this;
var writeTarget = function (value) {
target(parseFloat(value.replace(/[^0-9.-]/g, '')));
};
var result = ko.computed({
read: function () {
return target();
},
write: writeTarget
});
result.formatted = ko.computed({
read: function () {
return format(target());
},
write: writeTarget
});
return result;
};
}
}); | function registerExtenders() { | random_line_split |
knockoutExtenders.js | define(function () {
return { registerExtenders: registerExtenders };
function registerExtenders() {
registerDateBinding();
registerMoneyExtension();
}
function registerDateBinding () {
ko.bindingHandlers.dateString = {
//Credit to Ryan Rahlf http://stackoverflow.com/questions/17001303/date-formatting-issues-with-knockout-and-syncing-to-breeze-js-entityaspect-modif
init: function (element, valueAccessor) {
//attach an event handler to our dom element to handle user input
element.onchange = function () {
var value = valueAccessor();//get our observable
//set our observable to the parsed date from the input
value(moment(element.value).toDate());
};
},
update: function (element, valueAccessor, allBindingsAccessor, viewModel) {
var value = valueAccessor();
var valueUnwrapped = ko.utils.unwrapObservable(value);
if (valueUnwrapped) {
element.value = moment(valueUnwrapped).format('L');
}
}
};
}
function registerMoneyExtension() {
|
});
| //Credit to Josh Bush http://freshbrewedcode.com/joshbush/2011/12/27/knockout-js-observable-extensions/
var format = function (value) {
toks = value.toFixed(2).replace('-', '').split('.');
var display = '$' + $.map(toks[0].split('').reverse(), function (elm, i) {
return [(i % 3 === 0 && i > 0 ? ',' : ''), elm];
}).reverse().join('') + '.' + toks[1];
return value < 0 ? '(' + display + ')' : display;
};
ko.subscribable.fn.money = function () {
var target = this;
var writeTarget = function (value) {
target(parseFloat(value.replace(/[^0-9.-]/g, '')));
};
var result = ko.computed({
read: function () {
return target();
},
write: writeTarget
});
result.formatted = ko.computed({
read: function () {
return format(target());
},
write: writeTarget
});
return result;
};
}
| identifier_body |
by-move-pattern-binding.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum E {
Foo,
Bar(String)
}
struct | {
x: E
}
fn f(x: String) {}
fn main() {
let s = S { x: E::Bar("hello".to_string()) };
match &s.x {
&E::Foo => {}
&E::Bar(identifier) => f(identifier.clone()) //~ ERROR cannot move
};
match &s.x {
&E::Foo => {}
&E::Bar(ref identifier) => println!("{}", *identifier)
};
}
| S | identifier_name |
by-move-pattern-binding.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
| }
struct S {
x: E
}
fn f(x: String) {}
fn main() {
let s = S { x: E::Bar("hello".to_string()) };
match &s.x {
&E::Foo => {}
&E::Bar(identifier) => f(identifier.clone()) //~ ERROR cannot move
};
match &s.x {
&E::Foo => {}
&E::Bar(ref identifier) => println!("{}", *identifier)
};
} | enum E {
Foo,
Bar(String) | random_line_split |
by-move-pattern-binding.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum E {
Foo,
Bar(String)
}
struct S {
x: E
}
fn f(x: String) |
fn main() {
let s = S { x: E::Bar("hello".to_string()) };
match &s.x {
&E::Foo => {}
&E::Bar(identifier) => f(identifier.clone()) //~ ERROR cannot move
};
match &s.x {
&E::Foo => {}
&E::Bar(ref identifier) => println!("{}", *identifier)
};
}
| {} | identifier_body |
karma.conf.js | // Karma configuration
// Generated on Wed May 13 2015 17:38:34 GMT-0400 (EDT)
module.exports = function(config) { |
browsers: ['Firefox', 'PhantomJS'],
frameworks: ['mocha', 'requirejs'],
files: [
'node_modules/mocha/mocha.js',
'node_modules/mocha/mocha.css',
'node_modules/chai/chai.js',
{pattern: 'bower_components/**/*.js', included: false},
{pattern: 'bower_components/**/*.css', included: false},
{pattern: 'lib/**/*.js', included: false},
{pattern: 'lib/**/*.spec.js', included: false},
'test-main.js',
'test/app.js'
],
exclude: [],
preprocessors: {},
reporters: ['progress'],
client: {
mocha: {
reporter: 'html'
}
},
port: 9876,
colors: true,
logLevel: config.LOG_INFO,
autoWatch: true,
singleRun: true
});
}; | config.set({
basePath: '', | random_line_split |
question_cli.py | #!/usr/bin/env python
#
# License: MIT
#
from __future__ import absolute_import, division, print_function
##############################################################################
# Imports
##############################################################################
import os
import sys
import argparse
import ros1_pytemplate
import logging.config
logging.config.dictConfig(
{
'version': 1,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(name)s:%(message)s'
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'simple',
'stream': 'ext://sys.stdout',
},
'logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'ros1_pytemplate.log',
'maxBytes': 1024,
'backupCount': 3,
'formatter': 'verbose'
},
},
'loggers': {
'ros1_template': {
'handlers': ['logfile'],
'level': 'DEBUG',
'propagate': True,
},
'question': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
}
}
)
def show_description():
return "ros template test script"
def show_usage(cmd=None):
cmd = os.path.relpath(sys.argv[0], os.getcwd()) if cmd is None else cmd
return "{0} [-h|--help] [--version]".format(cmd)
def show_epilog():
return "never enough testing"
##############################################################################
# Main
##############################################################################
if __name__ == '__main__':
# Ref : https://docs.python.org/2/library/argparse
parser = argparse.ArgumentParser(description=show_description(),
usage=show_usage(),
epilog=show_epilog(),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--version", action='store_true', help="display the version number and exits.")
parsed_known_args, unknown_args = parser.parse_known_args(sys.argv[1:])
if parsed_known_args.version:
|
logger = logging.getLogger("question")
answer = ros1_pytemplate.Answer(6)
logger.info(answer.retrieve())
| print("ROS1 pytemplate version " + ros1_pytemplate.__version__ +
"\n from " + ros1_pytemplate.__file__)
sys.exit(0) | conditional_block |
question_cli.py | #!/usr/bin/env python
#
# License: MIT
#
from __future__ import absolute_import, division, print_function
##############################################################################
# Imports
##############################################################################
import os
import sys
import argparse
import ros1_pytemplate
import logging.config
logging.config.dictConfig(
{
'version': 1,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(name)s:%(message)s'
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'simple',
'stream': 'ext://sys.stdout',
},
'logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'ros1_pytemplate.log',
'maxBytes': 1024,
'backupCount': 3,
'formatter': 'verbose'
},
},
'loggers': {
'ros1_template': {
'handlers': ['logfile'],
'level': 'DEBUG',
'propagate': True,
},
'question': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
}
}
)
def show_description():
return "ros template test script"
def | (cmd=None):
cmd = os.path.relpath(sys.argv[0], os.getcwd()) if cmd is None else cmd
return "{0} [-h|--help] [--version]".format(cmd)
def show_epilog():
return "never enough testing"
##############################################################################
# Main
##############################################################################
if __name__ == '__main__':
# Ref : https://docs.python.org/2/library/argparse
parser = argparse.ArgumentParser(description=show_description(),
usage=show_usage(),
epilog=show_epilog(),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--version", action='store_true', help="display the version number and exits.")
parsed_known_args, unknown_args = parser.parse_known_args(sys.argv[1:])
if parsed_known_args.version:
print("ROS1 pytemplate version " + ros1_pytemplate.__version__ +
"\n from " + ros1_pytemplate.__file__)
sys.exit(0)
logger = logging.getLogger("question")
answer = ros1_pytemplate.Answer(6)
logger.info(answer.retrieve())
| show_usage | identifier_name |
question_cli.py | #!/usr/bin/env python
#
# License: MIT
#
from __future__ import absolute_import, division, print_function
##############################################################################
# Imports
##############################################################################
import os
import sys
import argparse
import ros1_pytemplate
import logging.config
logging.config.dictConfig(
{
'version': 1,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(name)s:%(message)s'
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'simple',
'stream': 'ext://sys.stdout',
},
'logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'ros1_pytemplate.log',
'maxBytes': 1024,
'backupCount': 3,
'formatter': 'verbose'
},
},
'loggers': {
'ros1_template': {
'handlers': ['logfile'],
'level': 'DEBUG',
'propagate': True,
},
'question': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
}
}
)
def show_description():
return "ros template test script"
def show_usage(cmd=None):
cmd = os.path.relpath(sys.argv[0], os.getcwd()) if cmd is None else cmd
return "{0} [-h|--help] [--version]".format(cmd)
def show_epilog():
return "never enough testing"
##############################################################################
# Main | usage=show_usage(),
epilog=show_epilog(),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--version", action='store_true', help="display the version number and exits.")
parsed_known_args, unknown_args = parser.parse_known_args(sys.argv[1:])
if parsed_known_args.version:
print("ROS1 pytemplate version " + ros1_pytemplate.__version__ +
"\n from " + ros1_pytemplate.__file__)
sys.exit(0)
logger = logging.getLogger("question")
answer = ros1_pytemplate.Answer(6)
logger.info(answer.retrieve()) | ##############################################################################
if __name__ == '__main__':
# Ref : https://docs.python.org/2/library/argparse
parser = argparse.ArgumentParser(description=show_description(), | random_line_split |
question_cli.py | #!/usr/bin/env python
#
# License: MIT
#
from __future__ import absolute_import, division, print_function
##############################################################################
# Imports
##############################################################################
import os
import sys
import argparse
import ros1_pytemplate
import logging.config
logging.config.dictConfig(
{
'version': 1,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(name)s:%(message)s'
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'simple',
'stream': 'ext://sys.stdout',
},
'logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'ros1_pytemplate.log',
'maxBytes': 1024,
'backupCount': 3,
'formatter': 'verbose'
},
},
'loggers': {
'ros1_template': {
'handlers': ['logfile'],
'level': 'DEBUG',
'propagate': True,
},
'question': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
}
}
)
def show_description():
return "ros template test script"
def show_usage(cmd=None):
cmd = os.path.relpath(sys.argv[0], os.getcwd()) if cmd is None else cmd
return "{0} [-h|--help] [--version]".format(cmd)
def show_epilog():
|
##############################################################################
# Main
##############################################################################
if __name__ == '__main__':
# Ref : https://docs.python.org/2/library/argparse
parser = argparse.ArgumentParser(description=show_description(),
usage=show_usage(),
epilog=show_epilog(),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--version", action='store_true', help="display the version number and exits.")
parsed_known_args, unknown_args = parser.parse_known_args(sys.argv[1:])
if parsed_known_args.version:
print("ROS1 pytemplate version " + ros1_pytemplate.__version__ +
"\n from " + ros1_pytemplate.__file__)
sys.exit(0)
logger = logging.getLogger("question")
answer = ros1_pytemplate.Answer(6)
logger.info(answer.retrieve())
| return "never enough testing" | identifier_body |
evenly_discretized.py | # The Hazard Library
# Copyright (C) 2012-2014, GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.mfd.evenly_discretized` defines an evenly
discretized MFD.
"""
from openquake.hazardlib.mfd.base import BaseMFD
from openquake.baselib.slots import with_slots
@with_slots
class EvenlyDiscretizedMFD(BaseMFD):
"""
Evenly discretized MFD is defined as a precalculated histogram.
:param min_mag:
Positive float value representing the middle point of the first
bin in the histogram. | :param occurrence_rates:
The list of non-negative float values representing the actual
annual occurrence rates. The resulting histogram has as many bins
as this list length.
"""
MODIFICATIONS = set(('set_mfd',))
_slots_ = 'min_mag bin_width occurrence_rates'.split()
def __init__(self, min_mag, bin_width, occurrence_rates):
self.min_mag = min_mag
self.bin_width = bin_width
self.occurrence_rates = occurrence_rates
self.check_constraints()
def check_constraints(self):
"""
Checks the following constraints:
* Bin width is positive.
* Occurrence rates list is not empty.
* Each number in occurrence rates list is non-negative.
* Minimum magnitude is positive.
"""
if not self.bin_width > 0:
raise ValueError('bin width must be positive')
if not self.occurrence_rates:
raise ValueError('at least one bin must be specified')
if not all(value >= 0 for value in self.occurrence_rates):
raise ValueError('all occurrence rates must not be negative')
if not any(value > 0 for value in self.occurrence_rates):
raise ValueError('at least one occurrence rate must be positive')
if not self.min_mag >= 0:
raise ValueError('minimum magnitude must be non-negative')
def get_annual_occurrence_rates(self):
"""
Returns the predefined annual occurrence rates.
"""
return [(self.min_mag + i * self.bin_width, occurrence_rate)
for i, occurrence_rate in enumerate(self.occurrence_rates)]
def get_min_max_mag(self):
"""
Returns the minumun and maximum magnitudes
"""
return self.min_mag, self.min_mag + self. bin_width * (
len(self.occurrence_rates) - 1)
def modify_set_mfd(self, min_mag, bin_width, occurrence_rates):
"""
Applies absolute modification of the MFD from the ``min_mag``,
``bin_width`` and ``occurrence_rates`` modification.
:param min_mag:
Positive float value representing the middle point of the first
bin in the histogram.
:param bin_width:
A positive float value -- the width of a single histogram bin.
:param occurrence_rates:
The list of non-negative float values representing the actual
annual occurrence rates. The resulting histogram has as many bins
as this list length.
"""
self.min_mag = min_mag
self.bin_width = bin_width
self.occurrence_rates = occurrence_rates
self.check_constraints() | :param bin_width:
A positive float value -- the width of a single histogram bin. | random_line_split |
evenly_discretized.py | # The Hazard Library
# Copyright (C) 2012-2014, GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.mfd.evenly_discretized` defines an evenly
discretized MFD.
"""
from openquake.hazardlib.mfd.base import BaseMFD
from openquake.baselib.slots import with_slots
@with_slots
class EvenlyDiscretizedMFD(BaseMFD):
| """
Evenly discretized MFD is defined as a precalculated histogram.
:param min_mag:
Positive float value representing the middle point of the first
bin in the histogram.
:param bin_width:
A positive float value -- the width of a single histogram bin.
:param occurrence_rates:
The list of non-negative float values representing the actual
annual occurrence rates. The resulting histogram has as many bins
as this list length.
"""
MODIFICATIONS = set(('set_mfd',))
_slots_ = 'min_mag bin_width occurrence_rates'.split()
def __init__(self, min_mag, bin_width, occurrence_rates):
self.min_mag = min_mag
self.bin_width = bin_width
self.occurrence_rates = occurrence_rates
self.check_constraints()
def check_constraints(self):
"""
Checks the following constraints:
* Bin width is positive.
* Occurrence rates list is not empty.
* Each number in occurrence rates list is non-negative.
* Minimum magnitude is positive.
"""
if not self.bin_width > 0:
raise ValueError('bin width must be positive')
if not self.occurrence_rates:
raise ValueError('at least one bin must be specified')
if not all(value >= 0 for value in self.occurrence_rates):
raise ValueError('all occurrence rates must not be negative')
if not any(value > 0 for value in self.occurrence_rates):
raise ValueError('at least one occurrence rate must be positive')
if not self.min_mag >= 0:
raise ValueError('minimum magnitude must be non-negative')
def get_annual_occurrence_rates(self):
"""
Returns the predefined annual occurrence rates.
"""
return [(self.min_mag + i * self.bin_width, occurrence_rate)
for i, occurrence_rate in enumerate(self.occurrence_rates)]
def get_min_max_mag(self):
"""
Returns the minumun and maximum magnitudes
"""
return self.min_mag, self.min_mag + self. bin_width * (
len(self.occurrence_rates) - 1)
def modify_set_mfd(self, min_mag, bin_width, occurrence_rates):
"""
Applies absolute modification of the MFD from the ``min_mag``,
``bin_width`` and ``occurrence_rates`` modification.
:param min_mag:
Positive float value representing the middle point of the first
bin in the histogram.
:param bin_width:
A positive float value -- the width of a single histogram bin.
:param occurrence_rates:
The list of non-negative float values representing the actual
annual occurrence rates. The resulting histogram has as many bins
as this list length.
"""
self.min_mag = min_mag
self.bin_width = bin_width
self.occurrence_rates = occurrence_rates
self.check_constraints() | identifier_body | |
evenly_discretized.py | # The Hazard Library
# Copyright (C) 2012-2014, GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.mfd.evenly_discretized` defines an evenly
discretized MFD.
"""
from openquake.hazardlib.mfd.base import BaseMFD
from openquake.baselib.slots import with_slots
@with_slots
class EvenlyDiscretizedMFD(BaseMFD):
"""
Evenly discretized MFD is defined as a precalculated histogram.
:param min_mag:
Positive float value representing the middle point of the first
bin in the histogram.
:param bin_width:
A positive float value -- the width of a single histogram bin.
:param occurrence_rates:
The list of non-negative float values representing the actual
annual occurrence rates. The resulting histogram has as many bins
as this list length.
"""
MODIFICATIONS = set(('set_mfd',))
_slots_ = 'min_mag bin_width occurrence_rates'.split()
def __init__(self, min_mag, bin_width, occurrence_rates):
self.min_mag = min_mag
self.bin_width = bin_width
self.occurrence_rates = occurrence_rates
self.check_constraints()
def check_constraints(self):
"""
Checks the following constraints:
* Bin width is positive.
* Occurrence rates list is not empty.
* Each number in occurrence rates list is non-negative.
* Minimum magnitude is positive.
"""
if not self.bin_width > 0:
raise ValueError('bin width must be positive')
if not self.occurrence_rates:
raise ValueError('at least one bin must be specified')
if not all(value >= 0 for value in self.occurrence_rates):
|
if not any(value > 0 for value in self.occurrence_rates):
raise ValueError('at least one occurrence rate must be positive')
if not self.min_mag >= 0:
raise ValueError('minimum magnitude must be non-negative')
def get_annual_occurrence_rates(self):
"""
Returns the predefined annual occurrence rates.
"""
return [(self.min_mag + i * self.bin_width, occurrence_rate)
for i, occurrence_rate in enumerate(self.occurrence_rates)]
def get_min_max_mag(self):
"""
Returns the minumun and maximum magnitudes
"""
return self.min_mag, self.min_mag + self. bin_width * (
len(self.occurrence_rates) - 1)
def modify_set_mfd(self, min_mag, bin_width, occurrence_rates):
"""
Applies absolute modification of the MFD from the ``min_mag``,
``bin_width`` and ``occurrence_rates`` modification.
:param min_mag:
Positive float value representing the middle point of the first
bin in the histogram.
:param bin_width:
A positive float value -- the width of a single histogram bin.
:param occurrence_rates:
The list of non-negative float values representing the actual
annual occurrence rates. The resulting histogram has as many bins
as this list length.
"""
self.min_mag = min_mag
self.bin_width = bin_width
self.occurrence_rates = occurrence_rates
self.check_constraints()
| raise ValueError('all occurrence rates must not be negative') | conditional_block |
evenly_discretized.py | # The Hazard Library
# Copyright (C) 2012-2014, GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.mfd.evenly_discretized` defines an evenly
discretized MFD.
"""
from openquake.hazardlib.mfd.base import BaseMFD
from openquake.baselib.slots import with_slots
@with_slots
class EvenlyDiscretizedMFD(BaseMFD):
"""
Evenly discretized MFD is defined as a precalculated histogram.
:param min_mag:
Positive float value representing the middle point of the first
bin in the histogram.
:param bin_width:
A positive float value -- the width of a single histogram bin.
:param occurrence_rates:
The list of non-negative float values representing the actual
annual occurrence rates. The resulting histogram has as many bins
as this list length.
"""
MODIFICATIONS = set(('set_mfd',))
_slots_ = 'min_mag bin_width occurrence_rates'.split()
def | (self, min_mag, bin_width, occurrence_rates):
self.min_mag = min_mag
self.bin_width = bin_width
self.occurrence_rates = occurrence_rates
self.check_constraints()
def check_constraints(self):
"""
Checks the following constraints:
* Bin width is positive.
* Occurrence rates list is not empty.
* Each number in occurrence rates list is non-negative.
* Minimum magnitude is positive.
"""
if not self.bin_width > 0:
raise ValueError('bin width must be positive')
if not self.occurrence_rates:
raise ValueError('at least one bin must be specified')
if not all(value >= 0 for value in self.occurrence_rates):
raise ValueError('all occurrence rates must not be negative')
if not any(value > 0 for value in self.occurrence_rates):
raise ValueError('at least one occurrence rate must be positive')
if not self.min_mag >= 0:
raise ValueError('minimum magnitude must be non-negative')
def get_annual_occurrence_rates(self):
"""
Returns the predefined annual occurrence rates.
"""
return [(self.min_mag + i * self.bin_width, occurrence_rate)
for i, occurrence_rate in enumerate(self.occurrence_rates)]
def get_min_max_mag(self):
"""
Returns the minumun and maximum magnitudes
"""
return self.min_mag, self.min_mag + self. bin_width * (
len(self.occurrence_rates) - 1)
def modify_set_mfd(self, min_mag, bin_width, occurrence_rates):
"""
Applies absolute modification of the MFD from the ``min_mag``,
``bin_width`` and ``occurrence_rates`` modification.
:param min_mag:
Positive float value representing the middle point of the first
bin in the histogram.
:param bin_width:
A positive float value -- the width of a single histogram bin.
:param occurrence_rates:
The list of non-negative float values representing the actual
annual occurrence rates. The resulting histogram has as many bins
as this list length.
"""
self.min_mag = min_mag
self.bin_width = bin_width
self.occurrence_rates = occurrence_rates
self.check_constraints()
| __init__ | identifier_name |
TotemTracker.tsx | import Analyzer, { Options, SELECTED_PLAYER, SELECTED_PLAYER_PET } from 'parser/core/Analyzer';
import Events, { CastEvent, DeathEvent, FightEndEvent, SummonEvent } from 'parser/core/Events';
import Combatants from 'parser/shared/modules/Combatants';
import * as SPELLS from '../../SPELLS';
import {
AllTotemsFilter,
GetTotemElement,
TotemDurations,
TotemElements,
TotemElementsList,
TOTEMS_BY_ELEMENT,
} from '../../totemConstants';
export interface TotemEventTracker {
[TotemElements.Fire]: TotemEvent[];
[TotemElements.Water]: TotemEvent[];
[TotemElements.Earth]: TotemEvent[];
[TotemElements.Air]: TotemEvent[];
}
export interface TotemEvent {
totemSpellId: number;
totemName: string; // This is just to make debugging easier
summonedAt: number;
dismissedAt?: number;
dismissReason?: string;
duration?: number;
targetID?: number;
damageDone: number;
healingDone: number;
manaRestored: number;
spellsGrounded?: any;
}
class TotemTracker extends Analyzer {
static dependencies = {
combatants: Combatants,
};
protected combatants!: Combatants;
public totemElementEvents: TotemEventTracker = {
[TotemElements.Fire]: [],
[TotemElements.Water]: [],
[TotemElements.Earth]: [],
[TotemElements.Air]: [],
};
public totemEvents(totemSpellId: number) {
const events: TotemEvent[] = [];
const totemElement = GetTotemElement(totemSpellId);
if (totemElement === null) {
return events;
}
return this.totemElementEvents[totemElement].filter(
(e: TotemEvent) => e.totemSpellId === totemSpellId,
);
}
activeTotem(element: TotemElements) {
if (this.totemElementEvents[element].length === 0) {
return null;
}
const lastTotemSummoned = this.totemElementEvents[element][
this.totemElementEvents[element].length - 1
];
if (lastTotemSummoned.dismissedAt) {
return null;
}
return lastTotemSummoned;
}
totalTotemUptime(totemIdOrElement: TotemElements | number) {
if (Number.isInteger(totemIdOrElement)) {
const totemId = totemIdOrElement as number;
if (this.totemEvents(totemId).length === 0) {
return 0;
}
return this.totemEvents(totemId)
.map((event) => event.duration || 0)
.reduce((a: number, b: number) => a + b);
}
const totemElement = totemIdOrElement as TotemElements;
if (this.totemElementEvents[totemElement].length === 0) {
return 0;
}
return this.totemElementEvents[totemElement]
.map((event) => event.duration || 0)
.reduce((a: number, b: number) => a + b);
}
totemUptimePercentage(totemIdOrElement: TotemElements | number): number {
return this.totalTotemUptime(totemIdOrElement) / this.owner.fightDuration;
}
// Duration is hard to get perfect, but we can do a few things to make the number we get not look so outlandish.
markTotemAsDismissed(element: TotemElements, timestamp: number, reason = '') {
if (this.totemElementEvents[element].length === 0) {
return;
}
if (this.totemElementEvents[element][this.totemElementEvents[element].length - 1].dismissedAt) {
return;
}
const totemEvent: TotemEvent = this.totemElementEvents[element][
this.totemElementEvents[element].length - 1
];
const possibleDuration: number = timestamp - totemEvent.summonedAt;
const maxDuration: number = (TotemDurations as any)[totemEvent.totemSpellId] as number;
const duration = Math.min(possibleDuration, maxDuration);
totemEvent.dismissedAt = timestamp;
totemEvent.dismissReason = reason;
totemEvent.duration = duration;
}
markAllTotemsDismissed(timestamp: number, reason = '') {
for (const element of TotemElementsList) {
this.markTotemAsDismissed(element, timestamp, reason);
}
}
allTotemUptimePercentage() {
return (
(this.totemUptimePercentage(TotemElements.Fire) +
this.totemUptimePercentage(TotemElements.Water) +
this.totemUptimePercentage(TotemElements.Earth) +
this.totemUptimePercentage(TotemElements.Air)) /
4
);
}
// Returns the ID of the totem that has the highest uptime for each element.
primaryTotemUsed(element: TotemElements) {
let primaryTotemId = TOTEMS_BY_ELEMENT[element][0];
if (TOTEMS_BY_ELEMENT[element].length === 0) {
return primaryTotemId;
}
for (const totemId of TOTEMS_BY_ELEMENT[element]) {
if (this.totalTotemUptime(totemId) > this.totalTotemUptime(primaryTotemId)) {
primaryTotemId = totemId;
}
}
return primaryTotemId;
}
constructor(options: Options) |
totemSummoned(event: SummonEvent) {
const totemSpellId = event.ability.guid;
const totemName = event.ability.name;
const totemElement = GetTotemElement(totemSpellId);
if (!totemElement) {
return;
}
this.markTotemAsDismissed(totemElement, event.timestamp, event.type);
this.totemElementEvents[totemElement].push({
totemSpellId,
totemName,
summonedAt: event.timestamp,
targetID: event.targetID || event.target?.id,
damageDone: 0,
healingDone: 0,
manaRestored: 0,
});
}
getTotemElementByTargetId(targetId: number) {
for (const element of TotemElementsList) {
if (this.activeTotem(element as TotemElements)?.targetID === targetId) {
return element;
}
}
return null;
}
totemDeathEvent(event: any) {
const targetId = event?.targetID || event.target?.id;
const targetTotemelement = this.getTotemElementByTargetId(targetId);
if (targetTotemelement) {
this.markTotemAsDismissed(targetTotemelement as TotemElements, event.timestamp, event.type);
}
}
// Used to track what spells are absorbed by grounding totem.
totemCastEvent(event: any) {
const targetId = event?.targetID || event?.target?.id;
const targetTotemelement = this.getTotemElementByTargetId(targetId);
if (targetTotemelement !== TotemElements.Air) {
return;
}
if (this.activeTotem(TotemElements.Air)?.totemSpellId !== SPELLS.GROUNDING_TOTEM) {
return;
}
this.totemElementEvents[TotemElements.Air][
this.totemElementEvents[TotemElements.Air].length - 1
].spellsGrounded = event.ability;
this.markTotemAsDismissed(TotemElements.Air, event.timestamp, event.type);
}
totemPurgeEvent(event: FightEndEvent | DeathEvent | CastEvent) {
this.markAllTotemsDismissed(event.timestamp, event.type);
}
}
export default TotemTracker;
| {
super(options);
this.addEventListener(
Events.summon.by(SELECTED_PLAYER).spell(AllTotemsFilter()),
this.totemSummoned,
);
this.addEventListener(Events.death.to(SELECTED_PLAYER_PET), this.totemDeathEvent);
this.addEventListener(Events.cast, this.totemCastEvent);
this.addEventListener(
Events.cast.by(SELECTED_PLAYER).spell({ id: SPELLS.TOTEMIC_CALL }),
this.totemPurgeEvent,
);
this.addEventListener(Events.death.to(SELECTED_PLAYER), this.totemPurgeEvent);
this.addEventListener(Events.fightend, this.totemPurgeEvent);
} | identifier_body |
TotemTracker.tsx | import Analyzer, { Options, SELECTED_PLAYER, SELECTED_PLAYER_PET } from 'parser/core/Analyzer';
import Events, { CastEvent, DeathEvent, FightEndEvent, SummonEvent } from 'parser/core/Events';
import Combatants from 'parser/shared/modules/Combatants';
import * as SPELLS from '../../SPELLS';
import {
AllTotemsFilter,
GetTotemElement,
TotemDurations,
TotemElements,
TotemElementsList,
TOTEMS_BY_ELEMENT,
} from '../../totemConstants';
export interface TotemEventTracker {
[TotemElements.Fire]: TotemEvent[];
[TotemElements.Water]: TotemEvent[];
[TotemElements.Earth]: TotemEvent[];
[TotemElements.Air]: TotemEvent[];
}
export interface TotemEvent {
totemSpellId: number;
totemName: string; // This is just to make debugging easier
summonedAt: number;
dismissedAt?: number;
dismissReason?: string;
duration?: number;
targetID?: number;
damageDone: number;
healingDone: number;
manaRestored: number;
spellsGrounded?: any;
}
class TotemTracker extends Analyzer {
static dependencies = {
combatants: Combatants,
};
protected combatants!: Combatants;
public totemElementEvents: TotemEventTracker = {
[TotemElements.Fire]: [],
[TotemElements.Water]: [],
[TotemElements.Earth]: [],
[TotemElements.Air]: [],
};
public totemEvents(totemSpellId: number) {
const events: TotemEvent[] = [];
const totemElement = GetTotemElement(totemSpellId);
if (totemElement === null) {
return events;
}
return this.totemElementEvents[totemElement].filter(
(e: TotemEvent) => e.totemSpellId === totemSpellId,
);
}
activeTotem(element: TotemElements) {
if (this.totemElementEvents[element].length === 0) {
return null;
}
const lastTotemSummoned = this.totemElementEvents[element][
this.totemElementEvents[element].length - 1
];
if (lastTotemSummoned.dismissedAt) {
return null;
}
return lastTotemSummoned;
}
| (totemIdOrElement: TotemElements | number) {
if (Number.isInteger(totemIdOrElement)) {
const totemId = totemIdOrElement as number;
if (this.totemEvents(totemId).length === 0) {
return 0;
}
return this.totemEvents(totemId)
.map((event) => event.duration || 0)
.reduce((a: number, b: number) => a + b);
}
const totemElement = totemIdOrElement as TotemElements;
if (this.totemElementEvents[totemElement].length === 0) {
return 0;
}
return this.totemElementEvents[totemElement]
.map((event) => event.duration || 0)
.reduce((a: number, b: number) => a + b);
}
totemUptimePercentage(totemIdOrElement: TotemElements | number): number {
return this.totalTotemUptime(totemIdOrElement) / this.owner.fightDuration;
}
// Duration is hard to get perfect, but we can do a few things to make the number we get not look so outlandish.
markTotemAsDismissed(element: TotemElements, timestamp: number, reason = '') {
if (this.totemElementEvents[element].length === 0) {
return;
}
if (this.totemElementEvents[element][this.totemElementEvents[element].length - 1].dismissedAt) {
return;
}
const totemEvent: TotemEvent = this.totemElementEvents[element][
this.totemElementEvents[element].length - 1
];
const possibleDuration: number = timestamp - totemEvent.summonedAt;
const maxDuration: number = (TotemDurations as any)[totemEvent.totemSpellId] as number;
const duration = Math.min(possibleDuration, maxDuration);
totemEvent.dismissedAt = timestamp;
totemEvent.dismissReason = reason;
totemEvent.duration = duration;
}
markAllTotemsDismissed(timestamp: number, reason = '') {
for (const element of TotemElementsList) {
this.markTotemAsDismissed(element, timestamp, reason);
}
}
allTotemUptimePercentage() {
return (
(this.totemUptimePercentage(TotemElements.Fire) +
this.totemUptimePercentage(TotemElements.Water) +
this.totemUptimePercentage(TotemElements.Earth) +
this.totemUptimePercentage(TotemElements.Air)) /
4
);
}
// Returns the ID of the totem that has the highest uptime for each element.
primaryTotemUsed(element: TotemElements) {
let primaryTotemId = TOTEMS_BY_ELEMENT[element][0];
if (TOTEMS_BY_ELEMENT[element].length === 0) {
return primaryTotemId;
}
for (const totemId of TOTEMS_BY_ELEMENT[element]) {
if (this.totalTotemUptime(totemId) > this.totalTotemUptime(primaryTotemId)) {
primaryTotemId = totemId;
}
}
return primaryTotemId;
}
constructor(options: Options) {
super(options);
this.addEventListener(
Events.summon.by(SELECTED_PLAYER).spell(AllTotemsFilter()),
this.totemSummoned,
);
this.addEventListener(Events.death.to(SELECTED_PLAYER_PET), this.totemDeathEvent);
this.addEventListener(Events.cast, this.totemCastEvent);
this.addEventListener(
Events.cast.by(SELECTED_PLAYER).spell({ id: SPELLS.TOTEMIC_CALL }),
this.totemPurgeEvent,
);
this.addEventListener(Events.death.to(SELECTED_PLAYER), this.totemPurgeEvent);
this.addEventListener(Events.fightend, this.totemPurgeEvent);
}
totemSummoned(event: SummonEvent) {
const totemSpellId = event.ability.guid;
const totemName = event.ability.name;
const totemElement = GetTotemElement(totemSpellId);
if (!totemElement) {
return;
}
this.markTotemAsDismissed(totemElement, event.timestamp, event.type);
this.totemElementEvents[totemElement].push({
totemSpellId,
totemName,
summonedAt: event.timestamp,
targetID: event.targetID || event.target?.id,
damageDone: 0,
healingDone: 0,
manaRestored: 0,
});
}
getTotemElementByTargetId(targetId: number) {
for (const element of TotemElementsList) {
if (this.activeTotem(element as TotemElements)?.targetID === targetId) {
return element;
}
}
return null;
}
totemDeathEvent(event: any) {
const targetId = event?.targetID || event.target?.id;
const targetTotemelement = this.getTotemElementByTargetId(targetId);
if (targetTotemelement) {
this.markTotemAsDismissed(targetTotemelement as TotemElements, event.timestamp, event.type);
}
}
// Used to track what spells are absorbed by grounding totem.
totemCastEvent(event: any) {
const targetId = event?.targetID || event?.target?.id;
const targetTotemelement = this.getTotemElementByTargetId(targetId);
if (targetTotemelement !== TotemElements.Air) {
return;
}
if (this.activeTotem(TotemElements.Air)?.totemSpellId !== SPELLS.GROUNDING_TOTEM) {
return;
}
this.totemElementEvents[TotemElements.Air][
this.totemElementEvents[TotemElements.Air].length - 1
].spellsGrounded = event.ability;
this.markTotemAsDismissed(TotemElements.Air, event.timestamp, event.type);
}
totemPurgeEvent(event: FightEndEvent | DeathEvent | CastEvent) {
this.markAllTotemsDismissed(event.timestamp, event.type);
}
}
export default TotemTracker;
| totalTotemUptime | identifier_name |
TotemTracker.tsx | import Analyzer, { Options, SELECTED_PLAYER, SELECTED_PLAYER_PET } from 'parser/core/Analyzer';
import Events, { CastEvent, DeathEvent, FightEndEvent, SummonEvent } from 'parser/core/Events';
import Combatants from 'parser/shared/modules/Combatants';
import * as SPELLS from '../../SPELLS';
import {
AllTotemsFilter,
GetTotemElement,
TotemDurations,
TotemElements,
TotemElementsList,
TOTEMS_BY_ELEMENT,
} from '../../totemConstants';
export interface TotemEventTracker {
[TotemElements.Fire]: TotemEvent[];
[TotemElements.Water]: TotemEvent[];
[TotemElements.Earth]: TotemEvent[];
[TotemElements.Air]: TotemEvent[];
}
export interface TotemEvent {
totemSpellId: number;
totemName: string; // This is just to make debugging easier
summonedAt: number;
dismissedAt?: number;
dismissReason?: string;
duration?: number;
targetID?: number;
damageDone: number;
healingDone: number;
manaRestored: number;
spellsGrounded?: any;
}
class TotemTracker extends Analyzer {
static dependencies = {
combatants: Combatants,
};
protected combatants!: Combatants;
public totemElementEvents: TotemEventTracker = {
[TotemElements.Fire]: [],
[TotemElements.Water]: [],
[TotemElements.Earth]: [],
[TotemElements.Air]: [],
};
public totemEvents(totemSpellId: number) {
const events: TotemEvent[] = [];
const totemElement = GetTotemElement(totemSpellId);
if (totemElement === null) {
return events;
}
return this.totemElementEvents[totemElement].filter(
(e: TotemEvent) => e.totemSpellId === totemSpellId,
);
}
activeTotem(element: TotemElements) {
if (this.totemElementEvents[element].length === 0) {
return null;
}
const lastTotemSummoned = this.totemElementEvents[element][
this.totemElementEvents[element].length - 1
];
if (lastTotemSummoned.dismissedAt) {
return null;
}
return lastTotemSummoned;
}
totalTotemUptime(totemIdOrElement: TotemElements | number) {
if (Number.isInteger(totemIdOrElement)) {
const totemId = totemIdOrElement as number;
if (this.totemEvents(totemId).length === 0) {
return 0;
}
return this.totemEvents(totemId)
.map((event) => event.duration || 0)
.reduce((a: number, b: number) => a + b);
}
const totemElement = totemIdOrElement as TotemElements;
if (this.totemElementEvents[totemElement].length === 0) {
return 0;
}
return this.totemElementEvents[totemElement]
.map((event) => event.duration || 0)
.reduce((a: number, b: number) => a + b);
}
totemUptimePercentage(totemIdOrElement: TotemElements | number): number {
return this.totalTotemUptime(totemIdOrElement) / this.owner.fightDuration;
}
// Duration is hard to get perfect, but we can do a few things to make the number we get not look so outlandish.
markTotemAsDismissed(element: TotemElements, timestamp: number, reason = '') {
if (this.totemElementEvents[element].length === 0) {
return;
}
if (this.totemElementEvents[element][this.totemElementEvents[element].length - 1].dismissedAt) {
return;
}
const totemEvent: TotemEvent = this.totemElementEvents[element][
this.totemElementEvents[element].length - 1
];
const possibleDuration: number = timestamp - totemEvent.summonedAt;
const maxDuration: number = (TotemDurations as any)[totemEvent.totemSpellId] as number;
const duration = Math.min(possibleDuration, maxDuration);
totemEvent.dismissedAt = timestamp;
totemEvent.dismissReason = reason;
totemEvent.duration = duration;
}
markAllTotemsDismissed(timestamp: number, reason = '') {
for (const element of TotemElementsList) {
this.markTotemAsDismissed(element, timestamp, reason);
}
}
allTotemUptimePercentage() {
return (
(this.totemUptimePercentage(TotemElements.Fire) +
this.totemUptimePercentage(TotemElements.Water) +
this.totemUptimePercentage(TotemElements.Earth) +
this.totemUptimePercentage(TotemElements.Air)) /
4
);
}
// Returns the ID of the totem that has the highest uptime for each element.
primaryTotemUsed(element: TotemElements) {
let primaryTotemId = TOTEMS_BY_ELEMENT[element][0];
if (TOTEMS_BY_ELEMENT[element].length === 0) {
return primaryTotemId;
}
for (const totemId of TOTEMS_BY_ELEMENT[element]) {
if (this.totalTotemUptime(totemId) > this.totalTotemUptime(primaryTotemId)) {
primaryTotemId = totemId;
}
}
return primaryTotemId;
}
| Events.summon.by(SELECTED_PLAYER).spell(AllTotemsFilter()),
this.totemSummoned,
);
this.addEventListener(Events.death.to(SELECTED_PLAYER_PET), this.totemDeathEvent);
this.addEventListener(Events.cast, this.totemCastEvent);
this.addEventListener(
Events.cast.by(SELECTED_PLAYER).spell({ id: SPELLS.TOTEMIC_CALL }),
this.totemPurgeEvent,
);
this.addEventListener(Events.death.to(SELECTED_PLAYER), this.totemPurgeEvent);
this.addEventListener(Events.fightend, this.totemPurgeEvent);
}
totemSummoned(event: SummonEvent) {
const totemSpellId = event.ability.guid;
const totemName = event.ability.name;
const totemElement = GetTotemElement(totemSpellId);
if (!totemElement) {
return;
}
this.markTotemAsDismissed(totemElement, event.timestamp, event.type);
this.totemElementEvents[totemElement].push({
totemSpellId,
totemName,
summonedAt: event.timestamp,
targetID: event.targetID || event.target?.id,
damageDone: 0,
healingDone: 0,
manaRestored: 0,
});
}
getTotemElementByTargetId(targetId: number) {
for (const element of TotemElementsList) {
if (this.activeTotem(element as TotemElements)?.targetID === targetId) {
return element;
}
}
return null;
}
totemDeathEvent(event: any) {
const targetId = event?.targetID || event.target?.id;
const targetTotemelement = this.getTotemElementByTargetId(targetId);
if (targetTotemelement) {
this.markTotemAsDismissed(targetTotemelement as TotemElements, event.timestamp, event.type);
}
}
// Used to track what spells are absorbed by grounding totem.
totemCastEvent(event: any) {
const targetId = event?.targetID || event?.target?.id;
const targetTotemelement = this.getTotemElementByTargetId(targetId);
if (targetTotemelement !== TotemElements.Air) {
return;
}
if (this.activeTotem(TotemElements.Air)?.totemSpellId !== SPELLS.GROUNDING_TOTEM) {
return;
}
this.totemElementEvents[TotemElements.Air][
this.totemElementEvents[TotemElements.Air].length - 1
].spellsGrounded = event.ability;
this.markTotemAsDismissed(TotemElements.Air, event.timestamp, event.type);
}
totemPurgeEvent(event: FightEndEvent | DeathEvent | CastEvent) {
this.markAllTotemsDismissed(event.timestamp, event.type);
}
}
export default TotemTracker; | constructor(options: Options) {
super(options);
this.addEventListener( | random_line_split |
TotemTracker.tsx | import Analyzer, { Options, SELECTED_PLAYER, SELECTED_PLAYER_PET } from 'parser/core/Analyzer';
import Events, { CastEvent, DeathEvent, FightEndEvent, SummonEvent } from 'parser/core/Events';
import Combatants from 'parser/shared/modules/Combatants';
import * as SPELLS from '../../SPELLS';
import {
AllTotemsFilter,
GetTotemElement,
TotemDurations,
TotemElements,
TotemElementsList,
TOTEMS_BY_ELEMENT,
} from '../../totemConstants';
export interface TotemEventTracker {
[TotemElements.Fire]: TotemEvent[];
[TotemElements.Water]: TotemEvent[];
[TotemElements.Earth]: TotemEvent[];
[TotemElements.Air]: TotemEvent[];
}
export interface TotemEvent {
totemSpellId: number;
totemName: string; // This is just to make debugging easier
summonedAt: number;
dismissedAt?: number;
dismissReason?: string;
duration?: number;
targetID?: number;
damageDone: number;
healingDone: number;
manaRestored: number;
spellsGrounded?: any;
}
class TotemTracker extends Analyzer {
static dependencies = {
combatants: Combatants,
};
protected combatants!: Combatants;
public totemElementEvents: TotemEventTracker = {
[TotemElements.Fire]: [],
[TotemElements.Water]: [],
[TotemElements.Earth]: [],
[TotemElements.Air]: [],
};
public totemEvents(totemSpellId: number) {
const events: TotemEvent[] = [];
const totemElement = GetTotemElement(totemSpellId);
if (totemElement === null) {
return events;
}
return this.totemElementEvents[totemElement].filter(
(e: TotemEvent) => e.totemSpellId === totemSpellId,
);
}
activeTotem(element: TotemElements) {
if (this.totemElementEvents[element].length === 0) {
return null;
}
const lastTotemSummoned = this.totemElementEvents[element][
this.totemElementEvents[element].length - 1
];
if (lastTotemSummoned.dismissedAt) {
return null;
}
return lastTotemSummoned;
}
totalTotemUptime(totemIdOrElement: TotemElements | number) {
if (Number.isInteger(totemIdOrElement)) {
const totemId = totemIdOrElement as number;
if (this.totemEvents(totemId).length === 0) {
return 0;
}
return this.totemEvents(totemId)
.map((event) => event.duration || 0)
.reduce((a: number, b: number) => a + b);
}
const totemElement = totemIdOrElement as TotemElements;
if (this.totemElementEvents[totemElement].length === 0) {
return 0;
}
return this.totemElementEvents[totemElement]
.map((event) => event.duration || 0)
.reduce((a: number, b: number) => a + b);
}
totemUptimePercentage(totemIdOrElement: TotemElements | number): number {
return this.totalTotemUptime(totemIdOrElement) / this.owner.fightDuration;
}
// Duration is hard to get perfect, but we can do a few things to make the number we get not look so outlandish.
markTotemAsDismissed(element: TotemElements, timestamp: number, reason = '') {
if (this.totemElementEvents[element].length === 0) {
return;
}
if (this.totemElementEvents[element][this.totemElementEvents[element].length - 1].dismissedAt) |
const totemEvent: TotemEvent = this.totemElementEvents[element][
this.totemElementEvents[element].length - 1
];
const possibleDuration: number = timestamp - totemEvent.summonedAt;
const maxDuration: number = (TotemDurations as any)[totemEvent.totemSpellId] as number;
const duration = Math.min(possibleDuration, maxDuration);
totemEvent.dismissedAt = timestamp;
totemEvent.dismissReason = reason;
totemEvent.duration = duration;
}
markAllTotemsDismissed(timestamp: number, reason = '') {
for (const element of TotemElementsList) {
this.markTotemAsDismissed(element, timestamp, reason);
}
}
allTotemUptimePercentage() {
return (
(this.totemUptimePercentage(TotemElements.Fire) +
this.totemUptimePercentage(TotemElements.Water) +
this.totemUptimePercentage(TotemElements.Earth) +
this.totemUptimePercentage(TotemElements.Air)) /
4
);
}
// Returns the ID of the totem that has the highest uptime for each element.
primaryTotemUsed(element: TotemElements) {
let primaryTotemId = TOTEMS_BY_ELEMENT[element][0];
if (TOTEMS_BY_ELEMENT[element].length === 0) {
return primaryTotemId;
}
for (const totemId of TOTEMS_BY_ELEMENT[element]) {
if (this.totalTotemUptime(totemId) > this.totalTotemUptime(primaryTotemId)) {
primaryTotemId = totemId;
}
}
return primaryTotemId;
}
constructor(options: Options) {
super(options);
this.addEventListener(
Events.summon.by(SELECTED_PLAYER).spell(AllTotemsFilter()),
this.totemSummoned,
);
this.addEventListener(Events.death.to(SELECTED_PLAYER_PET), this.totemDeathEvent);
this.addEventListener(Events.cast, this.totemCastEvent);
this.addEventListener(
Events.cast.by(SELECTED_PLAYER).spell({ id: SPELLS.TOTEMIC_CALL }),
this.totemPurgeEvent,
);
this.addEventListener(Events.death.to(SELECTED_PLAYER), this.totemPurgeEvent);
this.addEventListener(Events.fightend, this.totemPurgeEvent);
}
totemSummoned(event: SummonEvent) {
const totemSpellId = event.ability.guid;
const totemName = event.ability.name;
const totemElement = GetTotemElement(totemSpellId);
if (!totemElement) {
return;
}
this.markTotemAsDismissed(totemElement, event.timestamp, event.type);
this.totemElementEvents[totemElement].push({
totemSpellId,
totemName,
summonedAt: event.timestamp,
targetID: event.targetID || event.target?.id,
damageDone: 0,
healingDone: 0,
manaRestored: 0,
});
}
getTotemElementByTargetId(targetId: number) {
for (const element of TotemElementsList) {
if (this.activeTotem(element as TotemElements)?.targetID === targetId) {
return element;
}
}
return null;
}
totemDeathEvent(event: any) {
const targetId = event?.targetID || event.target?.id;
const targetTotemelement = this.getTotemElementByTargetId(targetId);
if (targetTotemelement) {
this.markTotemAsDismissed(targetTotemelement as TotemElements, event.timestamp, event.type);
}
}
// Used to track what spells are absorbed by grounding totem.
totemCastEvent(event: any) {
const targetId = event?.targetID || event?.target?.id;
const targetTotemelement = this.getTotemElementByTargetId(targetId);
if (targetTotemelement !== TotemElements.Air) {
return;
}
if (this.activeTotem(TotemElements.Air)?.totemSpellId !== SPELLS.GROUNDING_TOTEM) {
return;
}
this.totemElementEvents[TotemElements.Air][
this.totemElementEvents[TotemElements.Air].length - 1
].spellsGrounded = event.ability;
this.markTotemAsDismissed(TotemElements.Air, event.timestamp, event.type);
}
totemPurgeEvent(event: FightEndEvent | DeathEvent | CastEvent) {
this.markAllTotemsDismissed(event.timestamp, event.type);
}
}
export default TotemTracker;
| {
return;
} | conditional_block |
reducer_test.ts | import { fakeState } from "../../__test_support__/fake_state";
import { overwrite, refreshStart, refreshOK, refreshNO } from "../../api/crud";
import {
SpecialStatus,
TaggedSequence,
TaggedDevice,
ResourceName,
TaggedResource,
TaggedTool,
} from "farmbot";
import { buildResourceIndex } from "../../__test_support__/resource_index_builder";
import { GeneralizedError } from "../actions";
import { Actions } from "../../constants";
import { fakeResource } from "../../__test_support__/fake_resource";
import { resourceReducer } from "../reducer";
import { findByUuid } from "../reducer_support";
import { EditResourceParams } from "../../api/interfaces";
import { fakeFolder } from "../../__test_support__/fake_state/resources";
describe("resource reducer", () => {
it("marks resources as DIRTY when reducing OVERWRITE_RESOURCE", () => {
const state = fakeState().resources;
const uuid = Object.keys(state.index.byKind.Sequence)[0];
const sequence = state.index.references[uuid] as TaggedSequence;
expect(sequence).toBeTruthy();
expect(sequence.kind).toBe("Sequence");
const next = resourceReducer(state, overwrite(sequence, {
kind: "sequence",
name: "wow",
folder_id: undefined,
args: { version: -0, locals: { kind: "scope_declaration", args: {} } },
body: [],
color: "red"
}));
const seq2 = next.index.references[uuid] as TaggedSequence;
expect(seq2.specialStatus).toBe(SpecialStatus.DIRTY);
});
it("marks resources as SAVING when reducing REFRESH_RESOURCE_START", () => {
const state = fakeState().resources;
const uuid = Object.keys(state.index.byKind.Device)[0];
const device = state.index.references[uuid] as TaggedDevice;
expect(device).toBeTruthy();
expect(device.kind).toBe("Device");
const afterStart = resourceReducer(state, refreshStart(device.uuid));
const dev2 = afterStart.index.references[uuid] as TaggedDevice;
expect(dev2.specialStatus).toBe(SpecialStatus.SAVING);
// SCENARIO: REFRESH_START ===> REFRESH_OK
const afterOk = resourceReducer(afterStart, refreshOK(device));
const dev3 = afterOk.index.references[uuid] as TaggedDevice;
expect(dev3.specialStatus).toBe(SpecialStatus.SAVED);
const payl: GeneralizedError = {
err: "X",
uuid: dev3.uuid,
statusBeforeError: SpecialStatus.DIRTY
};
// SCENARIO: REFRESH_START ===> REFRESH_NO
const afterNo =
resourceReducer(afterStart, refreshNO(payl));
const dev4 = afterNo.index.references[uuid] as TaggedDevice;
expect(dev4.specialStatus).toBe(SpecialStatus.SAVED);
});
const TEST_RESOURCE_NAMES: TaggedResource["kind"][] = ["Crop", "Device",
"FarmEvent", "FarmwareInstallation", "FbosConfig",
"FirmwareConfig", "Log", "Peripheral", "PinBinding", "PlantTemplate",
"Point", "Regimen", "SavedGarden", "Sensor"];
it("EDITs a _RESOURCE", () => {
const startingState = fakeState().resources;
const { index } = startingState;
const uuid = Object.keys(index.byKind.Tool)[0];
const update: Partial<TaggedTool["body"]> = { name: "after" };
const payload: EditResourceParams = {
uuid,
update,
specialStatus: SpecialStatus.SAVED
};
const action = { type: Actions.EDIT_RESOURCE, payload };
const newState = resourceReducer(startingState, action);
const oldTool = index.references[uuid] as TaggedTool;
const newTool = newState.index.references[uuid] as TaggedTool;
expect(oldTool.body.name).not.toEqual("after");
expect(newTool.body.name).toEqual("after");
});
| const startingState = fakeState().resources;
const uuid = Object.keys(startingState.index.byKind.Tool)[0];
const action = {
type: Actions._RESOURCE_NO,
payload: { uuid, err: "Whatever", statusBeforeError: SpecialStatus.DIRTY }
};
const newState = resourceReducer(startingState, action);
const tool = newState.index.references[uuid] as TaggedTool;
expect(tool.specialStatus).toBe(SpecialStatus.DIRTY);
});
it("covers destroy resource branches", () => {
const testResourceDestroy = (kind: ResourceName) => {
const state = fakeState().resources;
const resource = fakeResource(kind as TaggedResource["kind"], {});
const action = {
type: Actions.DESTROY_RESOURCE_OK,
payload: resource
};
const newState = resourceReducer(state, action);
expect(newState.index.references[resource.uuid]).toEqual(undefined);
};
TEST_RESOURCE_NAMES
.concat(["Image", "SensorReading"])
.map((kind: ResourceName) => testResourceDestroy(kind));
});
it("toggles folder open state", () => {
const folder = fakeFolder();
folder.body.id = 1;
const startingState = buildResourceIndex([folder]);
(startingState.index.sequenceFolders.localMetaAttributes[1].open as unknown)
= undefined;
const action = { type: Actions.FOLDER_TOGGLE, payload: { id: 1 } };
const newState = resourceReducer(startingState, action);
expect(newState.index.sequenceFolders.localMetaAttributes[1].open)
.toEqual(false);
});
});
describe("findByUuid", () => {
it("crashes on bad UUIDs", () => {
expect(() => findByUuid(buildResourceIndex().index, "Nope!")).toThrow();
});
}); | it("handles resource failures", () => { | random_line_split |
ConvexPolygon.d.ts | import { Color } from '../Drawing/Color';
import { BoundingBox } from './BoundingBox';
import { CollisionContact } from './CollisionContact';
import { CollisionShape } from './CollisionShape';
import { Vector, Line, Ray, Projection } from '../Algebra';
import { Collider } from './Collider';
export interface ConvexPolygonOptions {
/**
* Pixel offset relative to a collider's position
*/
offset?: Vector;
/**
* Points in the polygon in order around the perimeter in local coordinates
*/
points: Vector[];
/**
* Whether points are specified in clockwise or counter clockwise order, default counter-clockwise
*/
clockwiseWinding?: boolean;
/**
* Collider to associate optionally with this shape
*/
collider?: Collider;
}
/**
* Polygon collision shape for detecting collisions
*
* Example:
* [[include:BoxAndPolygonShape.md]]
*/
export declare class ConvexPolygon implements CollisionShape {
offset: Vector;
points: Vector[];
/**
* Collider associated with this shape
*/
collider?: Collider;
private _transformedPoints;
private _axes;
private _sides;
constructor(options: ConvexPolygonOptions);
/**
* Returns a clone of this ConvexPolygon, not associated with any collider
*/
clone(): ConvexPolygon;
get worldPos(): Vector;
/**
* Get the center of the collision shape in world coordinates
*/
get center(): Vector;
/**
* Calculates the underlying transformation from the body relative space to world space
*/
private _calculateTransformation;
/**
* Gets the points that make up the polygon in world space, from actor relative space (if specified)
*/
getTransformedPoints(): Vector[];
/**
* Gets the sides of the polygon in world space
*/
getSides(): Line[];
recalc(): void;
/**
* Tests if a point is contained in this collision shape in world space
*/
contains(point: Vector): boolean;
getClosestLineBetween(shape: CollisionShape): Line;
/**
* Returns a collision contact if the 2 collision shapes collide, otherwise collide will
* return null.
* @param shape
*/
collide(shape: CollisionShape): CollisionContact;
/**
* Find the point on the shape furthest in the direction specified
*/
getFurthestPoint(direction: Vector): Vector;
/**
* Finds the closes face to the point using perpendicular distance
* @param point point to test against polygon
*/
getClosestFace(point: Vector): {
distance: Vector;
face: Line;
};
/**
* Get the axis aligned bounding box for the polygon shape in world coordinates
*/
get bounds(): BoundingBox;
/** | * Get the axis aligned bounding box for the polygon shape in local coordinates
*/
get localBounds(): BoundingBox;
/**
* Get the moment of inertia for an arbitrary polygon
* https://en.wikipedia.org/wiki/List_of_moments_of_inertia
*/
get inertia(): number;
/**
* Casts a ray into the polygon and returns a vector representing the point of contact (in world space) or null if no collision.
*/
rayCast(ray: Ray, max?: number): Vector;
/**
* Get the axis associated with the convex polygon
*/
get axes(): Vector[];
/**
* Perform Separating Axis test against another polygon, returns null if no overlap in polys
* Reference http://www.dyn4j.org/2010/01/sat/
*/
testSeparatingAxisTheorem(other: ConvexPolygon): Vector;
/**
* Project the edges of the polygon along a specified axis
*/
project(axis: Vector): Projection;
draw(ctx: CanvasRenderingContext2D, color?: Color, pos?: Vector): void;
debugDraw(ctx: CanvasRenderingContext2D, color?: Color): void;
} | random_line_split | |
ConvexPolygon.d.ts | import { Color } from '../Drawing/Color';
import { BoundingBox } from './BoundingBox';
import { CollisionContact } from './CollisionContact';
import { CollisionShape } from './CollisionShape';
import { Vector, Line, Ray, Projection } from '../Algebra';
import { Collider } from './Collider';
export interface ConvexPolygonOptions {
/**
* Pixel offset relative to a collider's position
*/
offset?: Vector;
/**
* Points in the polygon in order around the perimeter in local coordinates
*/
points: Vector[];
/**
* Whether points are specified in clockwise or counter clockwise order, default counter-clockwise
*/
clockwiseWinding?: boolean;
/**
* Collider to associate optionally with this shape
*/
collider?: Collider;
}
/**
* Polygon collision shape for detecting collisions
*
* Example:
* [[include:BoxAndPolygonShape.md]]
*/
export declare class | implements CollisionShape {
offset: Vector;
points: Vector[];
/**
* Collider associated with this shape
*/
collider?: Collider;
private _transformedPoints;
private _axes;
private _sides;
constructor(options: ConvexPolygonOptions);
/**
* Returns a clone of this ConvexPolygon, not associated with any collider
*/
clone(): ConvexPolygon;
get worldPos(): Vector;
/**
* Get the center of the collision shape in world coordinates
*/
get center(): Vector;
/**
* Calculates the underlying transformation from the body relative space to world space
*/
private _calculateTransformation;
/**
* Gets the points that make up the polygon in world space, from actor relative space (if specified)
*/
getTransformedPoints(): Vector[];
/**
* Gets the sides of the polygon in world space
*/
getSides(): Line[];
recalc(): void;
/**
* Tests if a point is contained in this collision shape in world space
*/
contains(point: Vector): boolean;
getClosestLineBetween(shape: CollisionShape): Line;
/**
* Returns a collision contact if the 2 collision shapes collide, otherwise collide will
* return null.
* @param shape
*/
collide(shape: CollisionShape): CollisionContact;
/**
* Find the point on the shape furthest in the direction specified
*/
getFurthestPoint(direction: Vector): Vector;
/**
* Finds the closes face to the point using perpendicular distance
* @param point point to test against polygon
*/
getClosestFace(point: Vector): {
distance: Vector;
face: Line;
};
/**
* Get the axis aligned bounding box for the polygon shape in world coordinates
*/
get bounds(): BoundingBox;
/**
* Get the axis aligned bounding box for the polygon shape in local coordinates
*/
get localBounds(): BoundingBox;
/**
* Get the moment of inertia for an arbitrary polygon
* https://en.wikipedia.org/wiki/List_of_moments_of_inertia
*/
get inertia(): number;
/**
* Casts a ray into the polygon and returns a vector representing the point of contact (in world space) or null if no collision.
*/
rayCast(ray: Ray, max?: number): Vector;
/**
* Get the axis associated with the convex polygon
*/
get axes(): Vector[];
/**
* Perform Separating Axis test against another polygon, returns null if no overlap in polys
* Reference http://www.dyn4j.org/2010/01/sat/
*/
testSeparatingAxisTheorem(other: ConvexPolygon): Vector;
/**
* Project the edges of the polygon along a specified axis
*/
project(axis: Vector): Projection;
draw(ctx: CanvasRenderingContext2D, color?: Color, pos?: Vector): void;
debugDraw(ctx: CanvasRenderingContext2D, color?: Color): void;
}
| ConvexPolygon | identifier_name |
hgid.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
#[cfg(any(test, feature = "for-tests"))]
use std::collections::HashSet;
use std::io::Read;
use std::io::Write;
use std::io::{self};
#[cfg(any(test, feature = "for-tests"))]
use rand::RngCore;
use sha1::Digest;
use sha1::Sha1;
use crate::hash::AbstractHashType;
use crate::hash::HashTypeInfo;
use crate::parents::Parents;
/// A 20-byte identifier, often a hash. Nodes are used to uniquely identify
/// commits, file versions, and many other things.
///
///
/// # Serde Serialization
///
/// The `serde_with` module allows customization on `HgId` serialization:
/// - `#[serde(with = "types::serde_with::hgid::bytes")]` (current default)
/// - `#[serde(with = "types::serde_with::hgid::hex")]`
/// - `#[serde(with = "types::serde_with::hgid::tuple")]`
///
/// Using them can change the size or the type of serialization result:
///
/// | lib \ serde_with | hgid::tuple | hgid::bytes | hgid::hex |
/// |------------------|---------------------|--------------|-----------|
/// | mincode | 20 bytes | 21 bytes | 41 bytes |
/// | cbor | 21 to 41 bytes [1] | 21 bytes | 42 bytes |
/// | json | 41 to 81+ bytes [1] | invalid [2] | 42 bytes |
/// | python | Tuple[int] | bytes | str |
///
/// In general,
/// - `hgid::tuple` only works best for `mincode`.
/// - `hgid::bytes` works best for cbor, python.
/// - `hgid::hex` is useful for `json`, or other text-only formats.
///
/// Compatibility note:
/// - `hgid::tuple` cannot decode `hgid::bytes` or `hgid::hex` data.
/// - `hgid::hex` can decode `hgid::bytes` data, or vice-versa. They share a
/// same `deserialize` implementation.
/// - `hgid::hex` or `hgid::bytes` might be able to decode `hgid::tuple` data,
/// depending on how tuples are serialized. For example, mincode
/// does not add framing for tuples, so `hgid::bytes` cannot decode
/// `hgid::tuple` data; cbor adds framing for tuples, so `hgid::bytes`
/// can decode `hgid::tuple` data.
///
/// [1]: Depends on actual data of `HgId`.
/// [2]: JSON only supports utf-8 data.
pub type HgId = AbstractHashType<HgIdTypeInfo, 20>;
pub struct HgIdTypeInfo;
impl HashTypeInfo for HgIdTypeInfo {
const HASH_TYPE_NAME: &'static str = "HgId";
}
/// The nullid (0x00) is used throughout Mercurial to represent "None".
/// (For example, a commit will have a nullid p2, if it has no second parent).
pub const NULL_ID: HgId = HgId::from_byte_array([0; HgId::len()]);
/// The hard-coded 'working copy parent' Mercurial id.
pub const WDIR_ID: HgId = HgId::from_byte_array([0xff; HgId::len()]);
impl HgId {
pub fn null_id() -> &'static Self {
&NULL_ID
}
pub fn is_null(&self) -> bool {
self == &NULL_ID
}
pub const fn wdir_id() -> &'static Self {
&WDIR_ID
}
pub fn is_wdir(&self) -> bool {
self == &WDIR_ID
}
pub fn from_content(data: &[u8], parents: Parents) -> Self {
// Parents must be hashed in sorted order.
let (p1, p2) = match parents.into_nodes() {
(p1, p2) if p1 > p2 => (p2, p1),
(p1, p2) => (p1, p2),
};
let mut hasher = Sha1::new();
hasher.input(p1.as_ref());
hasher.input(p2.as_ref());
hasher.input(data);
let hash: [u8; 20] = hasher.result().into();
HgId::from_byte_array(hash)
}
#[cfg(any(test, feature = "for-tests"))]
pub fn random(rng: &mut dyn RngCore) -> Self {
let mut bytes = [0; HgId::len()];
rng.fill_bytes(&mut bytes);
loop {
let hgid = HgId::from(&bytes);
if !hgid.is_null() {
return hgid;
}
}
}
#[cfg(any(test, feature = "for-tests"))]
pub fn random_distinct(rng: &mut dyn RngCore, count: usize) -> Vec<Self> {
let mut nodes = Vec::new();
let mut nodeset = HashSet::new();
while nodes.len() < count {
let hgid = HgId::random(rng);
if !nodeset.contains(&hgid) {
nodeset.insert(hgid.clone());
nodes.push(hgid);
}
}
nodes
}
}
impl<'a> From<&'a [u8; HgId::len()]> for HgId {
fn from(bytes: &[u8; HgId::len()]) -> HgId {
HgId::from_byte_array(bytes.clone())
}
}
pub trait WriteHgIdExt {
/// Write a ``HgId`` directly to a stream.
///
/// # Examples
///
/// ```
/// use types::hgid::{HgId, WriteHgIdExt};
/// let mut v = vec![];
///
/// let n = HgId::null_id();
/// v.write_hgid(&n).expect("writing a hgid to a vec should work");
///
/// assert_eq!(v, vec![0; HgId::len()]);
/// ```
fn write_hgid(&mut self, value: &HgId) -> io::Result<()>;
}
impl<W: Write + ?Sized> WriteHgIdExt for W {
fn write_hgid(&mut self, value: &HgId) -> io::Result<()> {
self.write_all(value.as_ref())
}
}
pub trait ReadHgIdExt {
/// Read a ``HgId`` directly from a stream.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
/// use types::hgid::{HgId, ReadHgIdExt};
/// let mut v = vec![0; HgId::len()];
/// let mut c = Cursor::new(v);
///
/// let n = c.read_hgid().expect("reading a hgid from a vec should work");
///
/// assert_eq!(&n, HgId::null_id());
/// ```
fn read_hgid(&mut self) -> io::Result<HgId>;
}
impl<R: Read + ?Sized> ReadHgIdExt for R {
fn read_hgid(&mut self) -> io::Result<HgId> {
let mut bytes = [0; HgId::len()];
self.read_exact(&mut bytes)?;
Ok(HgId::from_byte_array(bytes))
}
}
#[cfg(any(test, feature = "for-tests"))]
pub mod mocks {
use super::HgId;
pub const ONES: HgId = HgId::from_byte_array([0x11; HgId::len()]);
pub const TWOS: HgId = HgId::from_byte_array([0x22; HgId::len()]);
pub const THREES: HgId = HgId::from_byte_array([0x33; HgId::len()]);
pub const FOURS: HgId = HgId::from_byte_array([0x44; HgId::len()]);
pub const FIVES: HgId = HgId::from_byte_array([0x55; HgId::len()]);
pub const SIXES: HgId = HgId::from_byte_array([0x66; HgId::len()]);
pub const SEVENS: HgId = HgId::from_byte_array([0x77; HgId::len()]);
pub const EIGHTS: HgId = HgId::from_byte_array([0x88; HgId::len()]);
pub const NINES: HgId = HgId::from_byte_array([0x99; HgId::len()]);
pub const AS: HgId = HgId::from_byte_array([0xAA; HgId::len()]);
pub const BS: HgId = HgId::from_byte_array([0xAB; HgId::len()]);
pub const CS: HgId = HgId::from_byte_array([0xCC; HgId::len()]);
pub const DS: HgId = HgId::from_byte_array([0xDD; HgId::len()]);
pub const ES: HgId = HgId::from_byte_array([0xEE; HgId::len()]);
pub const FS: HgId = HgId::from_byte_array([0xFF; HgId::len()]);
}
#[cfg(test)]
mod tests {
use quickcheck::quickcheck;
use serde::Deserialize;
use serde::Serialize;
use super::*;
#[test]
fn | () {
HgId::from_slice(&[0u8; 25]).expect_err("bad slice length");
}
#[test]
fn test_serde_with_using_cbor() {
// Note: this test is for CBOR. Other serializers like mincode
// or Thrift would have different backwards compatibility!
use serde_cbor::de::from_slice as decode;
use serde_cbor::ser::to_vec as encode;
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
struct Orig(#[serde(with = "crate::serde_with::hgid::tuple")] HgId);
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
struct Bytes(#[serde(with = "crate::serde_with::hgid::bytes")] HgId);
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
struct Hex(#[serde(with = "crate::serde_with::hgid::hex")] HgId);
let id: HgId = mocks::CS;
let orig = Orig(id);
let bytes = Bytes(id);
let hex = Hex(id);
let cbor_orig = encode(&orig).unwrap();
let cbor_bytes = encode(&bytes).unwrap();
let cbor_hex = encode(&hex).unwrap();
assert_eq!(cbor_orig.len(), 41);
assert_eq!(cbor_bytes.len(), 21);
assert_eq!(cbor_hex.len(), 42);
// Orig cannot decode bytes or hex.
assert_eq!(decode::<Orig>(&cbor_orig).unwrap().0, id);
decode::<Orig>(&cbor_bytes).unwrap_err();
decode::<Orig>(&cbor_hex).unwrap_err();
// Bytes can decode all 3 formats.
assert_eq!(decode::<Bytes>(&cbor_orig).unwrap().0, id);
assert_eq!(decode::<Bytes>(&cbor_bytes).unwrap().0, id);
assert_eq!(decode::<Bytes>(&cbor_hex).unwrap().0, id);
// Hex can decode all 3 formats.
assert_eq!(decode::<Hex>(&cbor_orig).unwrap().0, id);
assert_eq!(decode::<Hex>(&cbor_bytes).unwrap().0, id);
assert_eq!(decode::<Hex>(&cbor_hex).unwrap().0, id);
}
quickcheck! {
fn test_from_slice(hgid: HgId) -> bool {
hgid == HgId::from_slice(hgid.as_ref()).expect("from_slice")
}
}
}
| test_incorrect_length | identifier_name |
hgid.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
#[cfg(any(test, feature = "for-tests"))]
use std::collections::HashSet;
use std::io::Read;
use std::io::Write;
use std::io::{self};
#[cfg(any(test, feature = "for-tests"))]
use rand::RngCore;
use sha1::Digest;
use sha1::Sha1;
use crate::hash::AbstractHashType;
use crate::hash::HashTypeInfo;
use crate::parents::Parents;
/// A 20-byte identifier, often a hash. Nodes are used to uniquely identify
/// commits, file versions, and many other things.
///
///
/// # Serde Serialization
///
/// The `serde_with` module allows customization on `HgId` serialization:
/// - `#[serde(with = "types::serde_with::hgid::bytes")]` (current default)
/// - `#[serde(with = "types::serde_with::hgid::hex")]`
/// - `#[serde(with = "types::serde_with::hgid::tuple")]`
///
/// Using them can change the size or the type of serialization result:
///
/// | lib \ serde_with | hgid::tuple | hgid::bytes | hgid::hex |
/// |------------------|---------------------|--------------|-----------|
/// | mincode | 20 bytes | 21 bytes | 41 bytes |
/// | cbor | 21 to 41 bytes [1] | 21 bytes | 42 bytes |
/// | json | 41 to 81+ bytes [1] | invalid [2] | 42 bytes |
/// | python | Tuple[int] | bytes | str |
///
/// In general,
/// - `hgid::tuple` only works best for `mincode`.
/// - `hgid::bytes` works best for cbor, python.
/// - `hgid::hex` is useful for `json`, or other text-only formats.
///
/// Compatibility note:
/// - `hgid::tuple` cannot decode `hgid::bytes` or `hgid::hex` data.
/// - `hgid::hex` can decode `hgid::bytes` data, or vice-versa. They share a
/// same `deserialize` implementation.
/// - `hgid::hex` or `hgid::bytes` might be able to decode `hgid::tuple` data,
/// depending on how tuples are serialized. For example, mincode
/// does not add framing for tuples, so `hgid::bytes` cannot decode
/// `hgid::tuple` data; cbor adds framing for tuples, so `hgid::bytes`
/// can decode `hgid::tuple` data.
///
/// [1]: Depends on actual data of `HgId`.
/// [2]: JSON only supports utf-8 data.
pub type HgId = AbstractHashType<HgIdTypeInfo, 20>;
pub struct HgIdTypeInfo;
impl HashTypeInfo for HgIdTypeInfo {
const HASH_TYPE_NAME: &'static str = "HgId";
}
/// The nullid (0x00) is used throughout Mercurial to represent "None".
/// (For example, a commit will have a nullid p2, if it has no second parent).
pub const NULL_ID: HgId = HgId::from_byte_array([0; HgId::len()]);
/// The hard-coded 'working copy parent' Mercurial id.
pub const WDIR_ID: HgId = HgId::from_byte_array([0xff; HgId::len()]);
impl HgId {
pub fn null_id() -> &'static Self {
&NULL_ID
}
pub fn is_null(&self) -> bool {
self == &NULL_ID
}
pub const fn wdir_id() -> &'static Self {
&WDIR_ID
}
pub fn is_wdir(&self) -> bool {
self == &WDIR_ID
}
pub fn from_content(data: &[u8], parents: Parents) -> Self |
#[cfg(any(test, feature = "for-tests"))]
pub fn random(rng: &mut dyn RngCore) -> Self {
let mut bytes = [0; HgId::len()];
rng.fill_bytes(&mut bytes);
loop {
let hgid = HgId::from(&bytes);
if !hgid.is_null() {
return hgid;
}
}
}
#[cfg(any(test, feature = "for-tests"))]
pub fn random_distinct(rng: &mut dyn RngCore, count: usize) -> Vec<Self> {
let mut nodes = Vec::new();
let mut nodeset = HashSet::new();
while nodes.len() < count {
let hgid = HgId::random(rng);
if !nodeset.contains(&hgid) {
nodeset.insert(hgid.clone());
nodes.push(hgid);
}
}
nodes
}
}
impl<'a> From<&'a [u8; HgId::len()]> for HgId {
fn from(bytes: &[u8; HgId::len()]) -> HgId {
HgId::from_byte_array(bytes.clone())
}
}
pub trait WriteHgIdExt {
/// Write a ``HgId`` directly to a stream.
///
/// # Examples
///
/// ```
/// use types::hgid::{HgId, WriteHgIdExt};
/// let mut v = vec![];
///
/// let n = HgId::null_id();
/// v.write_hgid(&n).expect("writing a hgid to a vec should work");
///
/// assert_eq!(v, vec![0; HgId::len()]);
/// ```
fn write_hgid(&mut self, value: &HgId) -> io::Result<()>;
}
impl<W: Write + ?Sized> WriteHgIdExt for W {
fn write_hgid(&mut self, value: &HgId) -> io::Result<()> {
self.write_all(value.as_ref())
}
}
pub trait ReadHgIdExt {
/// Read a ``HgId`` directly from a stream.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
/// use types::hgid::{HgId, ReadHgIdExt};
/// let mut v = vec![0; HgId::len()];
/// let mut c = Cursor::new(v);
///
/// let n = c.read_hgid().expect("reading a hgid from a vec should work");
///
/// assert_eq!(&n, HgId::null_id());
/// ```
fn read_hgid(&mut self) -> io::Result<HgId>;
}
impl<R: Read + ?Sized> ReadHgIdExt for R {
fn read_hgid(&mut self) -> io::Result<HgId> {
let mut bytes = [0; HgId::len()];
self.read_exact(&mut bytes)?;
Ok(HgId::from_byte_array(bytes))
}
}
#[cfg(any(test, feature = "for-tests"))]
pub mod mocks {
use super::HgId;
pub const ONES: HgId = HgId::from_byte_array([0x11; HgId::len()]);
pub const TWOS: HgId = HgId::from_byte_array([0x22; HgId::len()]);
pub const THREES: HgId = HgId::from_byte_array([0x33; HgId::len()]);
pub const FOURS: HgId = HgId::from_byte_array([0x44; HgId::len()]);
pub const FIVES: HgId = HgId::from_byte_array([0x55; HgId::len()]);
pub const SIXES: HgId = HgId::from_byte_array([0x66; HgId::len()]);
pub const SEVENS: HgId = HgId::from_byte_array([0x77; HgId::len()]);
pub const EIGHTS: HgId = HgId::from_byte_array([0x88; HgId::len()]);
pub const NINES: HgId = HgId::from_byte_array([0x99; HgId::len()]);
pub const AS: HgId = HgId::from_byte_array([0xAA; HgId::len()]);
pub const BS: HgId = HgId::from_byte_array([0xAB; HgId::len()]);
pub const CS: HgId = HgId::from_byte_array([0xCC; HgId::len()]);
pub const DS: HgId = HgId::from_byte_array([0xDD; HgId::len()]);
pub const ES: HgId = HgId::from_byte_array([0xEE; HgId::len()]);
pub const FS: HgId = HgId::from_byte_array([0xFF; HgId::len()]);
}
#[cfg(test)]
mod tests {
use quickcheck::quickcheck;
use serde::Deserialize;
use serde::Serialize;
use super::*;
#[test]
fn test_incorrect_length() {
HgId::from_slice(&[0u8; 25]).expect_err("bad slice length");
}
#[test]
fn test_serde_with_using_cbor() {
// Note: this test is for CBOR. Other serializers like mincode
// or Thrift would have different backwards compatibility!
use serde_cbor::de::from_slice as decode;
use serde_cbor::ser::to_vec as encode;
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
struct Orig(#[serde(with = "crate::serde_with::hgid::tuple")] HgId);
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
struct Bytes(#[serde(with = "crate::serde_with::hgid::bytes")] HgId);
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
struct Hex(#[serde(with = "crate::serde_with::hgid::hex")] HgId);
let id: HgId = mocks::CS;
let orig = Orig(id);
let bytes = Bytes(id);
let hex = Hex(id);
let cbor_orig = encode(&orig).unwrap();
let cbor_bytes = encode(&bytes).unwrap();
let cbor_hex = encode(&hex).unwrap();
assert_eq!(cbor_orig.len(), 41);
assert_eq!(cbor_bytes.len(), 21);
assert_eq!(cbor_hex.len(), 42);
// Orig cannot decode bytes or hex.
assert_eq!(decode::<Orig>(&cbor_orig).unwrap().0, id);
decode::<Orig>(&cbor_bytes).unwrap_err();
decode::<Orig>(&cbor_hex).unwrap_err();
// Bytes can decode all 3 formats.
assert_eq!(decode::<Bytes>(&cbor_orig).unwrap().0, id);
assert_eq!(decode::<Bytes>(&cbor_bytes).unwrap().0, id);
assert_eq!(decode::<Bytes>(&cbor_hex).unwrap().0, id);
// Hex can decode all 3 formats.
assert_eq!(decode::<Hex>(&cbor_orig).unwrap().0, id);
assert_eq!(decode::<Hex>(&cbor_bytes).unwrap().0, id);
assert_eq!(decode::<Hex>(&cbor_hex).unwrap().0, id);
}
quickcheck! {
fn test_from_slice(hgid: HgId) -> bool {
hgid == HgId::from_slice(hgid.as_ref()).expect("from_slice")
}
}
}
| {
// Parents must be hashed in sorted order.
let (p1, p2) = match parents.into_nodes() {
(p1, p2) if p1 > p2 => (p2, p1),
(p1, p2) => (p1, p2),
};
let mut hasher = Sha1::new();
hasher.input(p1.as_ref());
hasher.input(p2.as_ref());
hasher.input(data);
let hash: [u8; 20] = hasher.result().into();
HgId::from_byte_array(hash)
} | identifier_body |
hgid.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
#[cfg(any(test, feature = "for-tests"))]
use std::collections::HashSet;
use std::io::Read;
use std::io::Write;
use std::io::{self};
#[cfg(any(test, feature = "for-tests"))]
use rand::RngCore;
use sha1::Digest;
use sha1::Sha1;
use crate::hash::AbstractHashType;
use crate::hash::HashTypeInfo;
use crate::parents::Parents;
/// A 20-byte identifier, often a hash. Nodes are used to uniquely identify
/// commits, file versions, and many other things.
///
///
/// # Serde Serialization
///
/// The `serde_with` module allows customization on `HgId` serialization:
/// - `#[serde(with = "types::serde_with::hgid::bytes")]` (current default)
/// - `#[serde(with = "types::serde_with::hgid::hex")]`
/// - `#[serde(with = "types::serde_with::hgid::tuple")]`
///
/// Using them can change the size or the type of serialization result:
///
/// | lib \ serde_with | hgid::tuple | hgid::bytes | hgid::hex |
/// |------------------|---------------------|--------------|-----------|
/// | mincode | 20 bytes | 21 bytes | 41 bytes |
/// | cbor | 21 to 41 bytes [1] | 21 bytes | 42 bytes |
/// | json | 41 to 81+ bytes [1] | invalid [2] | 42 bytes |
/// | python | Tuple[int] | bytes | str |
///
/// In general,
/// - `hgid::tuple` only works best for `mincode`.
/// - `hgid::bytes` works best for cbor, python.
/// - `hgid::hex` is useful for `json`, or other text-only formats.
///
/// Compatibility note:
/// - `hgid::tuple` cannot decode `hgid::bytes` or `hgid::hex` data.
/// - `hgid::hex` can decode `hgid::bytes` data, or vice-versa. They share a
/// same `deserialize` implementation.
/// - `hgid::hex` or `hgid::bytes` might be able to decode `hgid::tuple` data,
/// depending on how tuples are serialized. For example, mincode
/// does not add framing for tuples, so `hgid::bytes` cannot decode
/// `hgid::tuple` data; cbor adds framing for tuples, so `hgid::bytes`
/// can decode `hgid::tuple` data.
///
/// [1]: Depends on actual data of `HgId`.
/// [2]: JSON only supports utf-8 data.
pub type HgId = AbstractHashType<HgIdTypeInfo, 20>;
pub struct HgIdTypeInfo;
impl HashTypeInfo for HgIdTypeInfo {
const HASH_TYPE_NAME: &'static str = "HgId";
}
/// The nullid (0x00) is used throughout Mercurial to represent "None".
/// (For example, a commit will have a nullid p2, if it has no second parent).
pub const NULL_ID: HgId = HgId::from_byte_array([0; HgId::len()]);
/// The hard-coded 'working copy parent' Mercurial id.
pub const WDIR_ID: HgId = HgId::from_byte_array([0xff; HgId::len()]);
impl HgId {
pub fn null_id() -> &'static Self {
&NULL_ID
}
pub fn is_null(&self) -> bool {
self == &NULL_ID
}
pub const fn wdir_id() -> &'static Self {
&WDIR_ID
}
pub fn is_wdir(&self) -> bool {
self == &WDIR_ID
}
pub fn from_content(data: &[u8], parents: Parents) -> Self {
// Parents must be hashed in sorted order.
let (p1, p2) = match parents.into_nodes() {
(p1, p2) if p1 > p2 => (p2, p1),
(p1, p2) => (p1, p2),
};
let mut hasher = Sha1::new();
hasher.input(p1.as_ref());
hasher.input(p2.as_ref());
hasher.input(data);
let hash: [u8; 20] = hasher.result().into();
HgId::from_byte_array(hash)
}
#[cfg(any(test, feature = "for-tests"))]
pub fn random(rng: &mut dyn RngCore) -> Self {
let mut bytes = [0; HgId::len()];
rng.fill_bytes(&mut bytes);
loop {
let hgid = HgId::from(&bytes);
if !hgid.is_null() {
return hgid;
}
}
}
#[cfg(any(test, feature = "for-tests"))]
pub fn random_distinct(rng: &mut dyn RngCore, count: usize) -> Vec<Self> {
let mut nodes = Vec::new();
let mut nodeset = HashSet::new();
while nodes.len() < count {
let hgid = HgId::random(rng);
if !nodeset.contains(&hgid) |
}
nodes
}
}
impl<'a> From<&'a [u8; HgId::len()]> for HgId {
fn from(bytes: &[u8; HgId::len()]) -> HgId {
HgId::from_byte_array(bytes.clone())
}
}
pub trait WriteHgIdExt {
/// Write a ``HgId`` directly to a stream.
///
/// # Examples
///
/// ```
/// use types::hgid::{HgId, WriteHgIdExt};
/// let mut v = vec![];
///
/// let n = HgId::null_id();
/// v.write_hgid(&n).expect("writing a hgid to a vec should work");
///
/// assert_eq!(v, vec![0; HgId::len()]);
/// ```
fn write_hgid(&mut self, value: &HgId) -> io::Result<()>;
}
impl<W: Write + ?Sized> WriteHgIdExt for W {
fn write_hgid(&mut self, value: &HgId) -> io::Result<()> {
self.write_all(value.as_ref())
}
}
pub trait ReadHgIdExt {
/// Read a ``HgId`` directly from a stream.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
/// use types::hgid::{HgId, ReadHgIdExt};
/// let mut v = vec![0; HgId::len()];
/// let mut c = Cursor::new(v);
///
/// let n = c.read_hgid().expect("reading a hgid from a vec should work");
///
/// assert_eq!(&n, HgId::null_id());
/// ```
fn read_hgid(&mut self) -> io::Result<HgId>;
}
impl<R: Read + ?Sized> ReadHgIdExt for R {
fn read_hgid(&mut self) -> io::Result<HgId> {
let mut bytes = [0; HgId::len()];
self.read_exact(&mut bytes)?;
Ok(HgId::from_byte_array(bytes))
}
}
#[cfg(any(test, feature = "for-tests"))]
pub mod mocks {
use super::HgId;
pub const ONES: HgId = HgId::from_byte_array([0x11; HgId::len()]);
pub const TWOS: HgId = HgId::from_byte_array([0x22; HgId::len()]);
pub const THREES: HgId = HgId::from_byte_array([0x33; HgId::len()]);
pub const FOURS: HgId = HgId::from_byte_array([0x44; HgId::len()]);
pub const FIVES: HgId = HgId::from_byte_array([0x55; HgId::len()]);
pub const SIXES: HgId = HgId::from_byte_array([0x66; HgId::len()]);
pub const SEVENS: HgId = HgId::from_byte_array([0x77; HgId::len()]);
pub const EIGHTS: HgId = HgId::from_byte_array([0x88; HgId::len()]);
pub const NINES: HgId = HgId::from_byte_array([0x99; HgId::len()]);
pub const AS: HgId = HgId::from_byte_array([0xAA; HgId::len()]);
pub const BS: HgId = HgId::from_byte_array([0xAB; HgId::len()]);
pub const CS: HgId = HgId::from_byte_array([0xCC; HgId::len()]);
pub const DS: HgId = HgId::from_byte_array([0xDD; HgId::len()]);
pub const ES: HgId = HgId::from_byte_array([0xEE; HgId::len()]);
pub const FS: HgId = HgId::from_byte_array([0xFF; HgId::len()]);
}
#[cfg(test)]
mod tests {
use quickcheck::quickcheck;
use serde::Deserialize;
use serde::Serialize;
use super::*;
#[test]
fn test_incorrect_length() {
HgId::from_slice(&[0u8; 25]).expect_err("bad slice length");
}
#[test]
fn test_serde_with_using_cbor() {
// Note: this test is for CBOR. Other serializers like mincode
// or Thrift would have different backwards compatibility!
use serde_cbor::de::from_slice as decode;
use serde_cbor::ser::to_vec as encode;
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
struct Orig(#[serde(with = "crate::serde_with::hgid::tuple")] HgId);
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
struct Bytes(#[serde(with = "crate::serde_with::hgid::bytes")] HgId);
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
struct Hex(#[serde(with = "crate::serde_with::hgid::hex")] HgId);
let id: HgId = mocks::CS;
let orig = Orig(id);
let bytes = Bytes(id);
let hex = Hex(id);
let cbor_orig = encode(&orig).unwrap();
let cbor_bytes = encode(&bytes).unwrap();
let cbor_hex = encode(&hex).unwrap();
assert_eq!(cbor_orig.len(), 41);
assert_eq!(cbor_bytes.len(), 21);
assert_eq!(cbor_hex.len(), 42);
// Orig cannot decode bytes or hex.
assert_eq!(decode::<Orig>(&cbor_orig).unwrap().0, id);
decode::<Orig>(&cbor_bytes).unwrap_err();
decode::<Orig>(&cbor_hex).unwrap_err();
// Bytes can decode all 3 formats.
assert_eq!(decode::<Bytes>(&cbor_orig).unwrap().0, id);
assert_eq!(decode::<Bytes>(&cbor_bytes).unwrap().0, id);
assert_eq!(decode::<Bytes>(&cbor_hex).unwrap().0, id);
// Hex can decode all 3 formats.
assert_eq!(decode::<Hex>(&cbor_orig).unwrap().0, id);
assert_eq!(decode::<Hex>(&cbor_bytes).unwrap().0, id);
assert_eq!(decode::<Hex>(&cbor_hex).unwrap().0, id);
}
quickcheck! {
fn test_from_slice(hgid: HgId) -> bool {
hgid == HgId::from_slice(hgid.as_ref()).expect("from_slice")
}
}
}
| {
nodeset.insert(hgid.clone());
nodes.push(hgid);
} | conditional_block |
hgid.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
#[cfg(any(test, feature = "for-tests"))]
use std::collections::HashSet;
use std::io::Read;
use std::io::Write;
use std::io::{self};
#[cfg(any(test, feature = "for-tests"))]
use rand::RngCore;
use sha1::Digest;
use sha1::Sha1;
use crate::hash::AbstractHashType;
use crate::hash::HashTypeInfo;
use crate::parents::Parents;
/// A 20-byte identifier, often a hash. Nodes are used to uniquely identify
/// commits, file versions, and many other things.
///
///
/// # Serde Serialization
///
/// The `serde_with` module allows customization on `HgId` serialization:
/// - `#[serde(with = "types::serde_with::hgid::bytes")]` (current default)
/// - `#[serde(with = "types::serde_with::hgid::hex")]`
/// - `#[serde(with = "types::serde_with::hgid::tuple")]`
///
/// Using them can change the size or the type of serialization result:
///
/// | lib \ serde_with | hgid::tuple | hgid::bytes | hgid::hex |
/// |------------------|---------------------|--------------|-----------|
/// | mincode | 20 bytes | 21 bytes | 41 bytes |
/// | cbor | 21 to 41 bytes [1] | 21 bytes | 42 bytes |
/// | json | 41 to 81+ bytes [1] | invalid [2] | 42 bytes |
/// | python | Tuple[int] | bytes | str |
///
/// In general,
/// - `hgid::tuple` only works best for `mincode`.
/// - `hgid::bytes` works best for cbor, python.
/// - `hgid::hex` is useful for `json`, or other text-only formats.
///
/// Compatibility note:
/// - `hgid::tuple` cannot decode `hgid::bytes` or `hgid::hex` data.
/// - `hgid::hex` can decode `hgid::bytes` data, or vice-versa. They share a
/// same `deserialize` implementation.
/// - `hgid::hex` or `hgid::bytes` might be able to decode `hgid::tuple` data,
/// depending on how tuples are serialized. For example, mincode
/// does not add framing for tuples, so `hgid::bytes` cannot decode
/// `hgid::tuple` data; cbor adds framing for tuples, so `hgid::bytes`
/// can decode `hgid::tuple` data.
///
/// [1]: Depends on actual data of `HgId`.
/// [2]: JSON only supports utf-8 data.
pub type HgId = AbstractHashType<HgIdTypeInfo, 20>;
pub struct HgIdTypeInfo;
impl HashTypeInfo for HgIdTypeInfo {
const HASH_TYPE_NAME: &'static str = "HgId";
}
/// The nullid (0x00) is used throughout Mercurial to represent "None".
/// (For example, a commit will have a nullid p2, if it has no second parent).
pub const NULL_ID: HgId = HgId::from_byte_array([0; HgId::len()]);
/// The hard-coded 'working copy parent' Mercurial id.
pub const WDIR_ID: HgId = HgId::from_byte_array([0xff; HgId::len()]);
impl HgId {
pub fn null_id() -> &'static Self {
&NULL_ID
}
pub fn is_null(&self) -> bool {
self == &NULL_ID
}
pub const fn wdir_id() -> &'static Self {
&WDIR_ID
}
pub fn is_wdir(&self) -> bool {
self == &WDIR_ID
}
pub fn from_content(data: &[u8], parents: Parents) -> Self {
// Parents must be hashed in sorted order.
let (p1, p2) = match parents.into_nodes() {
(p1, p2) if p1 > p2 => (p2, p1),
(p1, p2) => (p1, p2),
};
let mut hasher = Sha1::new();
hasher.input(p1.as_ref());
hasher.input(p2.as_ref());
hasher.input(data);
let hash: [u8; 20] = hasher.result().into();
HgId::from_byte_array(hash)
}
#[cfg(any(test, feature = "for-tests"))]
pub fn random(rng: &mut dyn RngCore) -> Self {
let mut bytes = [0; HgId::len()];
rng.fill_bytes(&mut bytes);
loop {
let hgid = HgId::from(&bytes);
if !hgid.is_null() {
return hgid;
}
}
}
#[cfg(any(test, feature = "for-tests"))]
pub fn random_distinct(rng: &mut dyn RngCore, count: usize) -> Vec<Self> {
let mut nodes = Vec::new();
let mut nodeset = HashSet::new();
while nodes.len() < count {
let hgid = HgId::random(rng);
if !nodeset.contains(&hgid) {
nodeset.insert(hgid.clone());
nodes.push(hgid);
}
}
nodes
}
}
impl<'a> From<&'a [u8; HgId::len()]> for HgId {
fn from(bytes: &[u8; HgId::len()]) -> HgId {
HgId::from_byte_array(bytes.clone())
}
}
pub trait WriteHgIdExt {
/// Write a ``HgId`` directly to a stream.
///
/// # Examples
///
/// ```
/// use types::hgid::{HgId, WriteHgIdExt};
/// let mut v = vec![];
///
/// let n = HgId::null_id();
/// v.write_hgid(&n).expect("writing a hgid to a vec should work");
///
/// assert_eq!(v, vec![0; HgId::len()]);
/// ```
fn write_hgid(&mut self, value: &HgId) -> io::Result<()>;
}
impl<W: Write + ?Sized> WriteHgIdExt for W {
fn write_hgid(&mut self, value: &HgId) -> io::Result<()> {
self.write_all(value.as_ref())
}
}
pub trait ReadHgIdExt {
/// Read a ``HgId`` directly from a stream.
///
/// # Examples
///
/// ```
/// use std::io::Cursor;
/// use types::hgid::{HgId, ReadHgIdExt};
/// let mut v = vec![0; HgId::len()];
/// let mut c = Cursor::new(v);
///
/// let n = c.read_hgid().expect("reading a hgid from a vec should work");
///
/// assert_eq!(&n, HgId::null_id());
/// ```
fn read_hgid(&mut self) -> io::Result<HgId>;
}
impl<R: Read + ?Sized> ReadHgIdExt for R {
fn read_hgid(&mut self) -> io::Result<HgId> {
let mut bytes = [0; HgId::len()];
self.read_exact(&mut bytes)?;
Ok(HgId::from_byte_array(bytes))
}
}
#[cfg(any(test, feature = "for-tests"))]
pub mod mocks {
use super::HgId;
pub const ONES: HgId = HgId::from_byte_array([0x11; HgId::len()]);
pub const TWOS: HgId = HgId::from_byte_array([0x22; HgId::len()]);
pub const THREES: HgId = HgId::from_byte_array([0x33; HgId::len()]);
pub const FOURS: HgId = HgId::from_byte_array([0x44; HgId::len()]);
pub const FIVES: HgId = HgId::from_byte_array([0x55; HgId::len()]);
pub const SIXES: HgId = HgId::from_byte_array([0x66; HgId::len()]);
pub const SEVENS: HgId = HgId::from_byte_array([0x77; HgId::len()]);
pub const EIGHTS: HgId = HgId::from_byte_array([0x88; HgId::len()]);
pub const NINES: HgId = HgId::from_byte_array([0x99; HgId::len()]);
pub const AS: HgId = HgId::from_byte_array([0xAA; HgId::len()]);
pub const BS: HgId = HgId::from_byte_array([0xAB; HgId::len()]);
pub const CS: HgId = HgId::from_byte_array([0xCC; HgId::len()]);
pub const DS: HgId = HgId::from_byte_array([0xDD; HgId::len()]);
pub const ES: HgId = HgId::from_byte_array([0xEE; HgId::len()]);
pub const FS: HgId = HgId::from_byte_array([0xFF; HgId::len()]);
}
#[cfg(test)] |
use super::*;
#[test]
fn test_incorrect_length() {
HgId::from_slice(&[0u8; 25]).expect_err("bad slice length");
}
#[test]
fn test_serde_with_using_cbor() {
// Note: this test is for CBOR. Other serializers like mincode
// or Thrift would have different backwards compatibility!
use serde_cbor::de::from_slice as decode;
use serde_cbor::ser::to_vec as encode;
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
struct Orig(#[serde(with = "crate::serde_with::hgid::tuple")] HgId);
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
struct Bytes(#[serde(with = "crate::serde_with::hgid::bytes")] HgId);
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
struct Hex(#[serde(with = "crate::serde_with::hgid::hex")] HgId);
let id: HgId = mocks::CS;
let orig = Orig(id);
let bytes = Bytes(id);
let hex = Hex(id);
let cbor_orig = encode(&orig).unwrap();
let cbor_bytes = encode(&bytes).unwrap();
let cbor_hex = encode(&hex).unwrap();
assert_eq!(cbor_orig.len(), 41);
assert_eq!(cbor_bytes.len(), 21);
assert_eq!(cbor_hex.len(), 42);
// Orig cannot decode bytes or hex.
assert_eq!(decode::<Orig>(&cbor_orig).unwrap().0, id);
decode::<Orig>(&cbor_bytes).unwrap_err();
decode::<Orig>(&cbor_hex).unwrap_err();
// Bytes can decode all 3 formats.
assert_eq!(decode::<Bytes>(&cbor_orig).unwrap().0, id);
assert_eq!(decode::<Bytes>(&cbor_bytes).unwrap().0, id);
assert_eq!(decode::<Bytes>(&cbor_hex).unwrap().0, id);
// Hex can decode all 3 formats.
assert_eq!(decode::<Hex>(&cbor_orig).unwrap().0, id);
assert_eq!(decode::<Hex>(&cbor_bytes).unwrap().0, id);
assert_eq!(decode::<Hex>(&cbor_hex).unwrap().0, id);
}
quickcheck! {
fn test_from_slice(hgid: HgId) -> bool {
hgid == HgId::from_slice(hgid.as_ref()).expect("from_slice")
}
}
} | mod tests {
use quickcheck::quickcheck;
use serde::Deserialize;
use serde::Serialize; | random_line_split |
plot_string_subst_bar.py | # Plotting performance of string_subst_.py scripts
# bar chart of relative comparison with variances as error bars
import numpy as np
import matplotlib.pyplot as plt
| scripts = ['string_subst_1.py', 'string_subst_2.py', 'string_subst_3.py']
x_pos = np.arange(len(scripts))
plt.bar(x_pos, performance, yerr=variance, align='center', alpha=0.5)
plt.xticks(x_pos, scripts)
plt.axhline(y=1, linestyle='--', color='black')
plt.ylim([0,12])
plt.ylabel('rel. performance gain')
plt.title('String substitution - Speed improvements')
#plt.show()
plt.savefig('PNGs/string_subst_bar.png') | performance = [10.3882388499416,1,10.3212281215746]
variance = [0.790435196936213,0,0.827207394592818] | random_line_split |
_tools.py | # Copyright (c) 2009,2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""A collection of general purpose tools for reading files."""
from __future__ import print_function
import bz2
from collections import namedtuple
import gzip
import logging
from struct import Struct
import zlib
from ..units import UndefinedUnitError, units
log = logging.getLogger(__name__)
# This works around problems on early Python 2.7 where Struct.unpack_from() can't handle
# being given a bytearray; use memoryview on Python 3, since calling bytearray again isn't
# cheap.
try:
bytearray_to_buff = buffer
except NameError:
bytearray_to_buff = memoryview
def open_as_needed(filename):
"""Return a file-object given either a filename or an object.
Handles opening with the right class based on the file extension.
"""
if hasattr(filename, 'read'):
return filename
if filename.endswith('.bz2'):
return bz2.BZ2File(filename, 'rb')
elif filename.endswith('.gz'):
return gzip.GzipFile(filename, 'rb')
else:
return open(filename, 'rb')
class UnitLinker(object):
r"""Wrap a :class:`metpy.io.cdm.Variable` and handle units.
Converts any attached unit attribute to a class:`pint.Unit`. It also handles converting
data returns to be instances of class:`pint.Quantity` rather than bare (unit-less) arrays.
"""
def __init__(self, var):
r"""Construct a new :class:`UnitLinker`.
Parameters
----------
var : Variable
The :class:`metpy.io.cdm.Variable` to be wrapped.
"""
self._var = var
try:
self._unit = units(self._var.units)
except (AttributeError, UndefinedUnitError):
self._unit = None
def __getitem__(self, ind):
"""Get data from the underlying variable and add units."""
ret = self._var[ind]
return ret if self._unit is None else ret * self._unit
def __getattr__(self, item):
"""Forward all attribute access onto underlying variable."""
return getattr(self._var, item)
@property
def units(self):
"""Access the units from the underlying variable as a :class:`pint.Quantity`."""
return self._unit
@units.setter
def units(self, val):
"""Override the units on the underlying variable."""
if isinstance(val, units.Unit):
self._unit = val
else:
self._unit = units(val)
class NamedStruct(Struct):
"""Parse bytes using :class:`Struct` but provide named fields."""
def __init__(self, info, prefmt='', tuple_name=None):
"""Initialize the NamedStruct."""
if tuple_name is None:
tuple_name = 'NamedStruct'
names, fmts = zip(*info)
self.converters = {}
conv_off = 0
for ind, i in enumerate(info):
if len(i) > 2:
self.converters[ind - conv_off] = i[-1]
elif not i[0]: # Skip items with no name
conv_off += 1
self._tuple = namedtuple(tuple_name, ' '.join(n for n in names if n))
super(NamedStruct, self).__init__(prefmt + ''.join(f for f in fmts if f))
def _create(self, items):
if self.converters:
items = list(items)
for ind, conv in self.converters.items():
items[ind] = conv(items[ind])
if len(items) < len(self._tuple._fields):
items.extend([None] * (len(self._tuple._fields) - len(items)))
return self.make_tuple(*items)
def make_tuple(self, *args, **kwargs):
"""Construct the underlying tuple from values."""
return self._tuple(*args, **kwargs)
def | (self, s):
"""Parse bytes and return a namedtuple."""
return self._create(super(NamedStruct, self).unpack(s))
def unpack_from(self, buff, offset=0):
"""Read bytes from a buffer and return as a namedtuple."""
return self._create(super(NamedStruct, self).unpack_from(buff, offset))
def unpack_file(self, fobj):
"""Unpack the next bytes from a file object."""
return self.unpack(fobj.read(self.size))
# This works around times when we have more than 255 items and can't use
# NamedStruct. This is a CPython limit for arguments.
class DictStruct(Struct):
"""Parse bytes using :class:`Struct` but provide named fields using dictionary access."""
def __init__(self, info, prefmt=''):
"""Initialize the DictStruct."""
names, formats = zip(*info)
# Remove empty names
self._names = [n for n in names if n]
super(DictStruct, self).__init__(prefmt + ''.join(f for f in formats if f))
def _create(self, items):
return dict(zip(self._names, items))
def unpack(self, s):
"""Parse bytes and return a namedtuple."""
return self._create(super(DictStruct, self).unpack(s))
def unpack_from(self, buff, offset=0):
"""Unpack the next bytes from a file object."""
return self._create(super(DictStruct, self).unpack_from(buff, offset))
class Enum(object):
"""Map values to specific strings."""
def __init__(self, *args, **kwargs):
"""Initialize the mapping."""
# Assign values for args in order starting at 0
self.val_map = {ind: a for ind, a in enumerate(args)}
# Invert the kwargs dict so that we can map from value to name
self.val_map.update(zip(kwargs.values(), kwargs.keys()))
def __call__(self, val):
"""Map an integer to the string representation."""
return self.val_map.get(val, 'Unknown ({})'.format(val))
class Bits(object):
"""Breaks an integer into a specified number of True/False bits."""
def __init__(self, num_bits):
"""Initialize the number of bits."""
self._bits = range(num_bits)
def __call__(self, val):
"""Convert the integer to the list of True/False values."""
return [bool((val >> i) & 0x1) for i in self._bits]
class BitField(object):
"""Convert an integer to a string for each bit."""
def __init__(self, *names):
"""Initialize the list of named bits."""
self._names = names
def __call__(self, val):
"""Return a list with a string for each True bit in the integer."""
if not val:
return None
bits = []
for n in self._names:
if val & 0x1:
bits.append(n)
val >>= 1
if not val:
break
# Return whole list if empty or multiple items, otherwise just single item
return bits[0] if len(bits) == 1 else bits
class Array(object):
"""Use a Struct as a callable to unpack a bunch of bytes as a list."""
def __init__(self, fmt):
"""Initialize the Struct unpacker."""
self._struct = Struct(fmt)
def __call__(self, buf):
"""Perform the actual unpacking."""
return list(self._struct.unpack(buf))
class IOBuffer(object):
"""Holds bytes from a buffer to simplify parsing and random access."""
def __init__(self, source):
"""Initialize the IOBuffer with the source data."""
self._data = bytearray(source)
self._offset = 0
self.clear_marks()
@classmethod
def fromfile(cls, fobj):
"""Initialize the IOBuffer with the contents of the file object."""
return cls(fobj.read())
def set_mark(self):
"""Mark the current location and return its id so that the buffer can return later."""
self._bookmarks.append(self._offset)
return len(self._bookmarks) - 1
def jump_to(self, mark, offset=0):
"""Jump to a previously set mark."""
self._offset = self._bookmarks[mark] + offset
def offset_from(self, mark):
"""Calculate the current offset relative to a marked location."""
return self._offset - self._bookmarks[mark]
def clear_marks(self):
"""Clear all marked locations."""
self._bookmarks = []
def splice(self, mark, newdata):
"""Replace the data after the marked location with the specified data."""
self.jump_to(mark)
self._data = self._data[:self._offset] + bytearray(newdata)
def read_struct(self, struct_class):
"""Parse and return a structure from the current buffer offset."""
struct = struct_class.unpack_from(bytearray_to_buff(self._data), self._offset)
self.skip(struct_class.size)
return struct
def read_func(self, func, num_bytes=None):
"""Parse data from the current buffer offset using a function."""
# only advance if func succeeds
res = func(self.get_next(num_bytes))
self.skip(num_bytes)
return res
def read_ascii(self, num_bytes=None):
"""Return the specified bytes as ascii-formatted text."""
return self.read(num_bytes).decode('ascii')
def read_binary(self, num, item_type='B'):
"""Parse the current buffer offset as the specified code."""
if 'B' in item_type:
return self.read(num)
if item_type[0] in ('@', '=', '<', '>', '!'):
order = item_type[0]
item_type = item_type[1:]
else:
order = '@'
return list(self.read_struct(Struct(order + '{:d}'.format(int(num)) + item_type)))
def read_int(self, code):
"""Parse the current buffer offset as the specified integer code."""
return self.read_struct(Struct(code))[0]
def read(self, num_bytes=None):
"""Read and return the specified bytes from the buffer."""
res = self.get_next(num_bytes)
self.skip(len(res))
return res
def get_next(self, num_bytes=None):
"""Get the next bytes in the buffer without modifying the offset."""
if num_bytes is None:
return self._data[self._offset:]
else:
return self._data[self._offset:self._offset + num_bytes]
def skip(self, num_bytes):
"""Jump the ahead the specified bytes in the buffer."""
if num_bytes is None:
self._offset = len(self._data)
else:
self._offset += num_bytes
def check_remains(self, num_bytes):
"""Check that the number of bytes specified remains in the buffer."""
return len(self._data[self._offset:]) == num_bytes
def truncate(self, num_bytes):
"""Remove the specified number of bytes from the end of the buffer."""
self._data = self._data[:-num_bytes]
def at_end(self):
"""Return whether the buffer has reached the end of data."""
return self._offset >= len(self._data)
def __getitem__(self, item):
"""Return the data at the specified location."""
return self._data[item]
def __str__(self):
"""Return a string representation of the IOBuffer."""
return 'Size: {} Offset: {}'.format(len(self._data), self._offset)
def __len__(self):
"""Return the amount of data in the buffer."""
return len(self._data)
def zlib_decompress_all_frames(data):
"""Decompress all frames of zlib-compressed bytes.
Repeatedly tries to decompress `data` until all data are decompressed, or decompression
fails. This will skip over bytes that are not compressed with zlib.
Parameters
----------
data : bytearray or bytes
Binary data compressed using zlib.
Returns
-------
bytearray
All decompressed bytes
"""
frames = bytearray()
data = bytes(data)
while data:
decomp = zlib.decompressobj()
try:
frames.extend(decomp.decompress(data))
data = decomp.unused_data
except zlib.error:
frames.extend(data)
break
return frames
def bits_to_code(val):
"""Convert the number of bits to the proper code for unpacking."""
if val == 8:
return 'B'
elif val == 16:
return 'H'
else:
log.warning('Unsupported bit size: %s. Returning "B"', val)
return 'B'
# For debugging
def hexdump(buf, num_bytes, offset=0, width=32):
"""Perform a hexudmp of the buffer.
Returns the hexdump as a canonically-formatted string.
"""
ind = offset
end = offset + num_bytes
lines = []
while ind < end:
chunk = buf[ind:ind + width]
actual_width = len(chunk)
hexfmt = '{:02X}'
blocksize = 4
blocks = [hexfmt * blocksize for _ in range(actual_width // blocksize)]
# Need to get any partial lines
num_left = actual_width % blocksize # noqa: S001 Fix false alarm
if num_left:
blocks += [hexfmt * num_left + '--' * (blocksize - num_left)]
blocks += ['--' * blocksize] * (width // blocksize - len(blocks))
hexoutput = ' '.join(blocks)
printable = tuple(chunk)
lines.append(' '.join((hexoutput.format(*printable), str(ind).ljust(len(str(end))),
str(ind - offset).ljust(len(str(end))),
''.join(chr(c) if 31 < c < 128 else '.' for c in chunk))))
ind += width
return '\n'.join(lines)
| unpack | identifier_name |
_tools.py | # Copyright (c) 2009,2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""A collection of general purpose tools for reading files."""
from __future__ import print_function
import bz2
from collections import namedtuple
import gzip
import logging
from struct import Struct
import zlib
from ..units import UndefinedUnitError, units
log = logging.getLogger(__name__)
# This works around problems on early Python 2.7 where Struct.unpack_from() can't handle
# being given a bytearray; use memoryview on Python 3, since calling bytearray again isn't
# cheap.
try:
bytearray_to_buff = buffer
except NameError:
bytearray_to_buff = memoryview
def open_as_needed(filename):
"""Return a file-object given either a filename or an object.
Handles opening with the right class based on the file extension.
"""
if hasattr(filename, 'read'):
return filename
if filename.endswith('.bz2'):
return bz2.BZ2File(filename, 'rb')
elif filename.endswith('.gz'):
return gzip.GzipFile(filename, 'rb')
else:
return open(filename, 'rb')
class UnitLinker(object):
r"""Wrap a :class:`metpy.io.cdm.Variable` and handle units.
Converts any attached unit attribute to a class:`pint.Unit`. It also handles converting
data returns to be instances of class:`pint.Quantity` rather than bare (unit-less) arrays.
"""
def __init__(self, var):
r"""Construct a new :class:`UnitLinker`.
Parameters
----------
var : Variable
The :class:`metpy.io.cdm.Variable` to be wrapped.
"""
self._var = var
try:
self._unit = units(self._var.units)
except (AttributeError, UndefinedUnitError):
self._unit = None
def __getitem__(self, ind):
"""Get data from the underlying variable and add units."""
ret = self._var[ind]
return ret if self._unit is None else ret * self._unit
def __getattr__(self, item):
"""Forward all attribute access onto underlying variable."""
return getattr(self._var, item)
@property
def units(self):
"""Access the units from the underlying variable as a :class:`pint.Quantity`."""
return self._unit
@units.setter
def units(self, val):
"""Override the units on the underlying variable."""
if isinstance(val, units.Unit):
self._unit = val
else:
self._unit = units(val)
class NamedStruct(Struct):
"""Parse bytes using :class:`Struct` but provide named fields."""
def __init__(self, info, prefmt='', tuple_name=None):
"""Initialize the NamedStruct."""
if tuple_name is None:
tuple_name = 'NamedStruct'
names, fmts = zip(*info)
self.converters = {}
conv_off = 0
for ind, i in enumerate(info):
if len(i) > 2:
self.converters[ind - conv_off] = i[-1]
elif not i[0]: # Skip items with no name
conv_off += 1
self._tuple = namedtuple(tuple_name, ' '.join(n for n in names if n))
super(NamedStruct, self).__init__(prefmt + ''.join(f for f in fmts if f))
def _create(self, items):
if self.converters:
items = list(items)
for ind, conv in self.converters.items():
items[ind] = conv(items[ind])
if len(items) < len(self._tuple._fields):
items.extend([None] * (len(self._tuple._fields) - len(items)))
return self.make_tuple(*items)
def make_tuple(self, *args, **kwargs):
"""Construct the underlying tuple from values."""
return self._tuple(*args, **kwargs)
def unpack(self, s):
"""Parse bytes and return a namedtuple."""
return self._create(super(NamedStruct, self).unpack(s))
def unpack_from(self, buff, offset=0):
"""Read bytes from a buffer and return as a namedtuple."""
return self._create(super(NamedStruct, self).unpack_from(buff, offset))
def unpack_file(self, fobj):
"""Unpack the next bytes from a file object."""
return self.unpack(fobj.read(self.size))
# This works around times when we have more than 255 items and can't use
# NamedStruct. This is a CPython limit for arguments.
class DictStruct(Struct):
"""Parse bytes using :class:`Struct` but provide named fields using dictionary access."""
def __init__(self, info, prefmt=''):
"""Initialize the DictStruct."""
names, formats = zip(*info)
# Remove empty names
self._names = [n for n in names if n]
super(DictStruct, self).__init__(prefmt + ''.join(f for f in formats if f))
def _create(self, items):
return dict(zip(self._names, items))
def unpack(self, s):
"""Parse bytes and return a namedtuple."""
return self._create(super(DictStruct, self).unpack(s))
def unpack_from(self, buff, offset=0):
"""Unpack the next bytes from a file object."""
return self._create(super(DictStruct, self).unpack_from(buff, offset))
class Enum(object):
"""Map values to specific strings."""
def __init__(self, *args, **kwargs):
"""Initialize the mapping."""
# Assign values for args in order starting at 0
self.val_map = {ind: a for ind, a in enumerate(args)}
# Invert the kwargs dict so that we can map from value to name
self.val_map.update(zip(kwargs.values(), kwargs.keys()))
def __call__(self, val):
"""Map an integer to the string representation."""
return self.val_map.get(val, 'Unknown ({})'.format(val))
class Bits(object):
"""Breaks an integer into a specified number of True/False bits."""
def __init__(self, num_bits):
"""Initialize the number of bits."""
self._bits = range(num_bits)
def __call__(self, val):
"""Convert the integer to the list of True/False values."""
return [bool((val >> i) & 0x1) for i in self._bits]
class BitField(object):
"""Convert an integer to a string for each bit."""
def __init__(self, *names):
"""Initialize the list of named bits."""
self._names = names
def __call__(self, val):
"""Return a list with a string for each True bit in the integer."""
if not val:
return None
bits = []
for n in self._names:
if val & 0x1:
bits.append(n)
val >>= 1
if not val:
break
# Return whole list if empty or multiple items, otherwise just single item
return bits[0] if len(bits) == 1 else bits
class Array(object):
"""Use a Struct as a callable to unpack a bunch of bytes as a list."""
def __init__(self, fmt):
"""Initialize the Struct unpacker."""
self._struct = Struct(fmt)
def __call__(self, buf):
"""Perform the actual unpacking."""
return list(self._struct.unpack(buf))
class IOBuffer(object):
"""Holds bytes from a buffer to simplify parsing and random access."""
def __init__(self, source):
"""Initialize the IOBuffer with the source data."""
self._data = bytearray(source)
self._offset = 0
self.clear_marks()
@classmethod
def fromfile(cls, fobj):
"""Initialize the IOBuffer with the contents of the file object."""
return cls(fobj.read())
def set_mark(self):
"""Mark the current location and return its id so that the buffer can return later."""
self._bookmarks.append(self._offset)
return len(self._bookmarks) - 1
def jump_to(self, mark, offset=0):
"""Jump to a previously set mark."""
self._offset = self._bookmarks[mark] + offset
def offset_from(self, mark):
"""Calculate the current offset relative to a marked location."""
return self._offset - self._bookmarks[mark]
def clear_marks(self):
"""Clear all marked locations."""
self._bookmarks = []
def splice(self, mark, newdata):
|
def read_struct(self, struct_class):
"""Parse and return a structure from the current buffer offset."""
struct = struct_class.unpack_from(bytearray_to_buff(self._data), self._offset)
self.skip(struct_class.size)
return struct
def read_func(self, func, num_bytes=None):
"""Parse data from the current buffer offset using a function."""
# only advance if func succeeds
res = func(self.get_next(num_bytes))
self.skip(num_bytes)
return res
def read_ascii(self, num_bytes=None):
"""Return the specified bytes as ascii-formatted text."""
return self.read(num_bytes).decode('ascii')
def read_binary(self, num, item_type='B'):
"""Parse the current buffer offset as the specified code."""
if 'B' in item_type:
return self.read(num)
if item_type[0] in ('@', '=', '<', '>', '!'):
order = item_type[0]
item_type = item_type[1:]
else:
order = '@'
return list(self.read_struct(Struct(order + '{:d}'.format(int(num)) + item_type)))
def read_int(self, code):
"""Parse the current buffer offset as the specified integer code."""
return self.read_struct(Struct(code))[0]
def read(self, num_bytes=None):
"""Read and return the specified bytes from the buffer."""
res = self.get_next(num_bytes)
self.skip(len(res))
return res
def get_next(self, num_bytes=None):
"""Get the next bytes in the buffer without modifying the offset."""
if num_bytes is None:
return self._data[self._offset:]
else:
return self._data[self._offset:self._offset + num_bytes]
def skip(self, num_bytes):
"""Jump the ahead the specified bytes in the buffer."""
if num_bytes is None:
self._offset = len(self._data)
else:
self._offset += num_bytes
def check_remains(self, num_bytes):
"""Check that the number of bytes specified remains in the buffer."""
return len(self._data[self._offset:]) == num_bytes
def truncate(self, num_bytes):
"""Remove the specified number of bytes from the end of the buffer."""
self._data = self._data[:-num_bytes]
def at_end(self):
"""Return whether the buffer has reached the end of data."""
return self._offset >= len(self._data)
def __getitem__(self, item):
"""Return the data at the specified location."""
return self._data[item]
def __str__(self):
"""Return a string representation of the IOBuffer."""
return 'Size: {} Offset: {}'.format(len(self._data), self._offset)
def __len__(self):
"""Return the amount of data in the buffer."""
return len(self._data)
def zlib_decompress_all_frames(data):
"""Decompress all frames of zlib-compressed bytes.
Repeatedly tries to decompress `data` until all data are decompressed, or decompression
fails. This will skip over bytes that are not compressed with zlib.
Parameters
----------
data : bytearray or bytes
Binary data compressed using zlib.
Returns
-------
bytearray
All decompressed bytes
"""
frames = bytearray()
data = bytes(data)
while data:
decomp = zlib.decompressobj()
try:
frames.extend(decomp.decompress(data))
data = decomp.unused_data
except zlib.error:
frames.extend(data)
break
return frames
def bits_to_code(val):
"""Convert the number of bits to the proper code for unpacking."""
if val == 8:
return 'B'
elif val == 16:
return 'H'
else:
log.warning('Unsupported bit size: %s. Returning "B"', val)
return 'B'
# For debugging
def hexdump(buf, num_bytes, offset=0, width=32):
"""Perform a hexudmp of the buffer.
Returns the hexdump as a canonically-formatted string.
"""
ind = offset
end = offset + num_bytes
lines = []
while ind < end:
chunk = buf[ind:ind + width]
actual_width = len(chunk)
hexfmt = '{:02X}'
blocksize = 4
blocks = [hexfmt * blocksize for _ in range(actual_width // blocksize)]
# Need to get any partial lines
num_left = actual_width % blocksize # noqa: S001 Fix false alarm
if num_left:
blocks += [hexfmt * num_left + '--' * (blocksize - num_left)]
blocks += ['--' * blocksize] * (width // blocksize - len(blocks))
hexoutput = ' '.join(blocks)
printable = tuple(chunk)
lines.append(' '.join((hexoutput.format(*printable), str(ind).ljust(len(str(end))),
str(ind - offset).ljust(len(str(end))),
''.join(chr(c) if 31 < c < 128 else '.' for c in chunk))))
ind += width
return '\n'.join(lines)
| """Replace the data after the marked location with the specified data."""
self.jump_to(mark)
self._data = self._data[:self._offset] + bytearray(newdata) | identifier_body |
_tools.py | # Copyright (c) 2009,2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""A collection of general purpose tools for reading files.""" | from collections import namedtuple
import gzip
import logging
from struct import Struct
import zlib
from ..units import UndefinedUnitError, units
log = logging.getLogger(__name__)
# This works around problems on early Python 2.7 where Struct.unpack_from() can't handle
# being given a bytearray; use memoryview on Python 3, since calling bytearray again isn't
# cheap.
try:
bytearray_to_buff = buffer
except NameError:
bytearray_to_buff = memoryview
def open_as_needed(filename):
"""Return a file-object given either a filename or an object.
Handles opening with the right class based on the file extension.
"""
if hasattr(filename, 'read'):
return filename
if filename.endswith('.bz2'):
return bz2.BZ2File(filename, 'rb')
elif filename.endswith('.gz'):
return gzip.GzipFile(filename, 'rb')
else:
return open(filename, 'rb')
class UnitLinker(object):
r"""Wrap a :class:`metpy.io.cdm.Variable` and handle units.
Converts any attached unit attribute to a class:`pint.Unit`. It also handles converting
data returns to be instances of class:`pint.Quantity` rather than bare (unit-less) arrays.
"""
def __init__(self, var):
r"""Construct a new :class:`UnitLinker`.
Parameters
----------
var : Variable
The :class:`metpy.io.cdm.Variable` to be wrapped.
"""
self._var = var
try:
self._unit = units(self._var.units)
except (AttributeError, UndefinedUnitError):
self._unit = None
def __getitem__(self, ind):
"""Get data from the underlying variable and add units."""
ret = self._var[ind]
return ret if self._unit is None else ret * self._unit
def __getattr__(self, item):
"""Forward all attribute access onto underlying variable."""
return getattr(self._var, item)
@property
def units(self):
"""Access the units from the underlying variable as a :class:`pint.Quantity`."""
return self._unit
@units.setter
def units(self, val):
"""Override the units on the underlying variable."""
if isinstance(val, units.Unit):
self._unit = val
else:
self._unit = units(val)
class NamedStruct(Struct):
"""Parse bytes using :class:`Struct` but provide named fields."""
def __init__(self, info, prefmt='', tuple_name=None):
"""Initialize the NamedStruct."""
if tuple_name is None:
tuple_name = 'NamedStruct'
names, fmts = zip(*info)
self.converters = {}
conv_off = 0
for ind, i in enumerate(info):
if len(i) > 2:
self.converters[ind - conv_off] = i[-1]
elif not i[0]: # Skip items with no name
conv_off += 1
self._tuple = namedtuple(tuple_name, ' '.join(n for n in names if n))
super(NamedStruct, self).__init__(prefmt + ''.join(f for f in fmts if f))
def _create(self, items):
if self.converters:
items = list(items)
for ind, conv in self.converters.items():
items[ind] = conv(items[ind])
if len(items) < len(self._tuple._fields):
items.extend([None] * (len(self._tuple._fields) - len(items)))
return self.make_tuple(*items)
def make_tuple(self, *args, **kwargs):
"""Construct the underlying tuple from values."""
return self._tuple(*args, **kwargs)
def unpack(self, s):
"""Parse bytes and return a namedtuple."""
return self._create(super(NamedStruct, self).unpack(s))
def unpack_from(self, buff, offset=0):
"""Read bytes from a buffer and return as a namedtuple."""
return self._create(super(NamedStruct, self).unpack_from(buff, offset))
def unpack_file(self, fobj):
"""Unpack the next bytes from a file object."""
return self.unpack(fobj.read(self.size))
# This works around times when we have more than 255 items and can't use
# NamedStruct. This is a CPython limit for arguments.
class DictStruct(Struct):
"""Parse bytes using :class:`Struct` but provide named fields using dictionary access."""
def __init__(self, info, prefmt=''):
"""Initialize the DictStruct."""
names, formats = zip(*info)
# Remove empty names
self._names = [n for n in names if n]
super(DictStruct, self).__init__(prefmt + ''.join(f for f in formats if f))
def _create(self, items):
return dict(zip(self._names, items))
def unpack(self, s):
"""Parse bytes and return a namedtuple."""
return self._create(super(DictStruct, self).unpack(s))
def unpack_from(self, buff, offset=0):
"""Unpack the next bytes from a file object."""
return self._create(super(DictStruct, self).unpack_from(buff, offset))
class Enum(object):
"""Map values to specific strings."""
def __init__(self, *args, **kwargs):
"""Initialize the mapping."""
# Assign values for args in order starting at 0
self.val_map = {ind: a for ind, a in enumerate(args)}
# Invert the kwargs dict so that we can map from value to name
self.val_map.update(zip(kwargs.values(), kwargs.keys()))
def __call__(self, val):
"""Map an integer to the string representation."""
return self.val_map.get(val, 'Unknown ({})'.format(val))
class Bits(object):
"""Breaks an integer into a specified number of True/False bits."""
def __init__(self, num_bits):
"""Initialize the number of bits."""
self._bits = range(num_bits)
def __call__(self, val):
"""Convert the integer to the list of True/False values."""
return [bool((val >> i) & 0x1) for i in self._bits]
class BitField(object):
"""Convert an integer to a string for each bit."""
def __init__(self, *names):
"""Initialize the list of named bits."""
self._names = names
def __call__(self, val):
"""Return a list with a string for each True bit in the integer."""
if not val:
return None
bits = []
for n in self._names:
if val & 0x1:
bits.append(n)
val >>= 1
if not val:
break
# Return whole list if empty or multiple items, otherwise just single item
return bits[0] if len(bits) == 1 else bits
class Array(object):
"""Use a Struct as a callable to unpack a bunch of bytes as a list."""
def __init__(self, fmt):
"""Initialize the Struct unpacker."""
self._struct = Struct(fmt)
def __call__(self, buf):
"""Perform the actual unpacking."""
return list(self._struct.unpack(buf))
class IOBuffer(object):
"""Holds bytes from a buffer to simplify parsing and random access."""
def __init__(self, source):
"""Initialize the IOBuffer with the source data."""
self._data = bytearray(source)
self._offset = 0
self.clear_marks()
@classmethod
def fromfile(cls, fobj):
"""Initialize the IOBuffer with the contents of the file object."""
return cls(fobj.read())
def set_mark(self):
"""Mark the current location and return its id so that the buffer can return later."""
self._bookmarks.append(self._offset)
return len(self._bookmarks) - 1
def jump_to(self, mark, offset=0):
"""Jump to a previously set mark."""
self._offset = self._bookmarks[mark] + offset
def offset_from(self, mark):
"""Calculate the current offset relative to a marked location."""
return self._offset - self._bookmarks[mark]
def clear_marks(self):
"""Clear all marked locations."""
self._bookmarks = []
def splice(self, mark, newdata):
"""Replace the data after the marked location with the specified data."""
self.jump_to(mark)
self._data = self._data[:self._offset] + bytearray(newdata)
def read_struct(self, struct_class):
"""Parse and return a structure from the current buffer offset."""
struct = struct_class.unpack_from(bytearray_to_buff(self._data), self._offset)
self.skip(struct_class.size)
return struct
def read_func(self, func, num_bytes=None):
"""Parse data from the current buffer offset using a function."""
# only advance if func succeeds
res = func(self.get_next(num_bytes))
self.skip(num_bytes)
return res
def read_ascii(self, num_bytes=None):
"""Return the specified bytes as ascii-formatted text."""
return self.read(num_bytes).decode('ascii')
def read_binary(self, num, item_type='B'):
"""Parse the current buffer offset as the specified code."""
if 'B' in item_type:
return self.read(num)
if item_type[0] in ('@', '=', '<', '>', '!'):
order = item_type[0]
item_type = item_type[1:]
else:
order = '@'
return list(self.read_struct(Struct(order + '{:d}'.format(int(num)) + item_type)))
def read_int(self, code):
"""Parse the current buffer offset as the specified integer code."""
return self.read_struct(Struct(code))[0]
def read(self, num_bytes=None):
"""Read and return the specified bytes from the buffer."""
res = self.get_next(num_bytes)
self.skip(len(res))
return res
def get_next(self, num_bytes=None):
"""Get the next bytes in the buffer without modifying the offset."""
if num_bytes is None:
return self._data[self._offset:]
else:
return self._data[self._offset:self._offset + num_bytes]
def skip(self, num_bytes):
"""Jump the ahead the specified bytes in the buffer."""
if num_bytes is None:
self._offset = len(self._data)
else:
self._offset += num_bytes
def check_remains(self, num_bytes):
"""Check that the number of bytes specified remains in the buffer."""
return len(self._data[self._offset:]) == num_bytes
def truncate(self, num_bytes):
"""Remove the specified number of bytes from the end of the buffer."""
self._data = self._data[:-num_bytes]
def at_end(self):
"""Return whether the buffer has reached the end of data."""
return self._offset >= len(self._data)
def __getitem__(self, item):
"""Return the data at the specified location."""
return self._data[item]
def __str__(self):
"""Return a string representation of the IOBuffer."""
return 'Size: {} Offset: {}'.format(len(self._data), self._offset)
def __len__(self):
"""Return the amount of data in the buffer."""
return len(self._data)
def zlib_decompress_all_frames(data):
"""Decompress all frames of zlib-compressed bytes.
Repeatedly tries to decompress `data` until all data are decompressed, or decompression
fails. This will skip over bytes that are not compressed with zlib.
Parameters
----------
data : bytearray or bytes
Binary data compressed using zlib.
Returns
-------
bytearray
All decompressed bytes
"""
frames = bytearray()
data = bytes(data)
while data:
decomp = zlib.decompressobj()
try:
frames.extend(decomp.decompress(data))
data = decomp.unused_data
except zlib.error:
frames.extend(data)
break
return frames
def bits_to_code(val):
"""Convert the number of bits to the proper code for unpacking."""
if val == 8:
return 'B'
elif val == 16:
return 'H'
else:
log.warning('Unsupported bit size: %s. Returning "B"', val)
return 'B'
# For debugging
def hexdump(buf, num_bytes, offset=0, width=32):
"""Perform a hexudmp of the buffer.
Returns the hexdump as a canonically-formatted string.
"""
ind = offset
end = offset + num_bytes
lines = []
while ind < end:
chunk = buf[ind:ind + width]
actual_width = len(chunk)
hexfmt = '{:02X}'
blocksize = 4
blocks = [hexfmt * blocksize for _ in range(actual_width // blocksize)]
# Need to get any partial lines
num_left = actual_width % blocksize # noqa: S001 Fix false alarm
if num_left:
blocks += [hexfmt * num_left + '--' * (blocksize - num_left)]
blocks += ['--' * blocksize] * (width // blocksize - len(blocks))
hexoutput = ' '.join(blocks)
printable = tuple(chunk)
lines.append(' '.join((hexoutput.format(*printable), str(ind).ljust(len(str(end))),
str(ind - offset).ljust(len(str(end))),
''.join(chr(c) if 31 < c < 128 else '.' for c in chunk))))
ind += width
return '\n'.join(lines) |
from __future__ import print_function
import bz2 | random_line_split |
_tools.py | # Copyright (c) 2009,2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""A collection of general purpose tools for reading files."""
from __future__ import print_function
import bz2
from collections import namedtuple
import gzip
import logging
from struct import Struct
import zlib
from ..units import UndefinedUnitError, units
log = logging.getLogger(__name__)
# This works around problems on early Python 2.7 where Struct.unpack_from() can't handle
# being given a bytearray; use memoryview on Python 3, since calling bytearray again isn't
# cheap.
try:
bytearray_to_buff = buffer
except NameError:
bytearray_to_buff = memoryview
def open_as_needed(filename):
"""Return a file-object given either a filename or an object.
Handles opening with the right class based on the file extension.
"""
if hasattr(filename, 'read'):
return filename
if filename.endswith('.bz2'):
return bz2.BZ2File(filename, 'rb')
elif filename.endswith('.gz'):
return gzip.GzipFile(filename, 'rb')
else:
return open(filename, 'rb')
class UnitLinker(object):
r"""Wrap a :class:`metpy.io.cdm.Variable` and handle units.
Converts any attached unit attribute to a class:`pint.Unit`. It also handles converting
data returns to be instances of class:`pint.Quantity` rather than bare (unit-less) arrays.
"""
def __init__(self, var):
r"""Construct a new :class:`UnitLinker`.
Parameters
----------
var : Variable
The :class:`metpy.io.cdm.Variable` to be wrapped.
"""
self._var = var
try:
self._unit = units(self._var.units)
except (AttributeError, UndefinedUnitError):
self._unit = None
def __getitem__(self, ind):
"""Get data from the underlying variable and add units."""
ret = self._var[ind]
return ret if self._unit is None else ret * self._unit
def __getattr__(self, item):
"""Forward all attribute access onto underlying variable."""
return getattr(self._var, item)
@property
def units(self):
"""Access the units from the underlying variable as a :class:`pint.Quantity`."""
return self._unit
@units.setter
def units(self, val):
"""Override the units on the underlying variable."""
if isinstance(val, units.Unit):
|
else:
self._unit = units(val)
class NamedStruct(Struct):
"""Parse bytes using :class:`Struct` but provide named fields."""
def __init__(self, info, prefmt='', tuple_name=None):
"""Initialize the NamedStruct."""
if tuple_name is None:
tuple_name = 'NamedStruct'
names, fmts = zip(*info)
self.converters = {}
conv_off = 0
for ind, i in enumerate(info):
if len(i) > 2:
self.converters[ind - conv_off] = i[-1]
elif not i[0]: # Skip items with no name
conv_off += 1
self._tuple = namedtuple(tuple_name, ' '.join(n for n in names if n))
super(NamedStruct, self).__init__(prefmt + ''.join(f for f in fmts if f))
def _create(self, items):
if self.converters:
items = list(items)
for ind, conv in self.converters.items():
items[ind] = conv(items[ind])
if len(items) < len(self._tuple._fields):
items.extend([None] * (len(self._tuple._fields) - len(items)))
return self.make_tuple(*items)
def make_tuple(self, *args, **kwargs):
"""Construct the underlying tuple from values."""
return self._tuple(*args, **kwargs)
def unpack(self, s):
"""Parse bytes and return a namedtuple."""
return self._create(super(NamedStruct, self).unpack(s))
def unpack_from(self, buff, offset=0):
"""Read bytes from a buffer and return as a namedtuple."""
return self._create(super(NamedStruct, self).unpack_from(buff, offset))
def unpack_file(self, fobj):
"""Unpack the next bytes from a file object."""
return self.unpack(fobj.read(self.size))
# This works around times when we have more than 255 items and can't use
# NamedStruct. This is a CPython limit for arguments.
class DictStruct(Struct):
"""Parse bytes using :class:`Struct` but provide named fields using dictionary access."""
def __init__(self, info, prefmt=''):
"""Initialize the DictStruct."""
names, formats = zip(*info)
# Remove empty names
self._names = [n for n in names if n]
super(DictStruct, self).__init__(prefmt + ''.join(f for f in formats if f))
def _create(self, items):
return dict(zip(self._names, items))
def unpack(self, s):
"""Parse bytes and return a namedtuple."""
return self._create(super(DictStruct, self).unpack(s))
def unpack_from(self, buff, offset=0):
"""Unpack the next bytes from a file object."""
return self._create(super(DictStruct, self).unpack_from(buff, offset))
class Enum(object):
"""Map values to specific strings."""
def __init__(self, *args, **kwargs):
"""Initialize the mapping."""
# Assign values for args in order starting at 0
self.val_map = {ind: a for ind, a in enumerate(args)}
# Invert the kwargs dict so that we can map from value to name
self.val_map.update(zip(kwargs.values(), kwargs.keys()))
def __call__(self, val):
"""Map an integer to the string representation."""
return self.val_map.get(val, 'Unknown ({})'.format(val))
class Bits(object):
"""Breaks an integer into a specified number of True/False bits."""
def __init__(self, num_bits):
"""Initialize the number of bits."""
self._bits = range(num_bits)
def __call__(self, val):
"""Convert the integer to the list of True/False values."""
return [bool((val >> i) & 0x1) for i in self._bits]
class BitField(object):
"""Convert an integer to a string for each bit."""
def __init__(self, *names):
"""Initialize the list of named bits."""
self._names = names
def __call__(self, val):
"""Return a list with a string for each True bit in the integer."""
if not val:
return None
bits = []
for n in self._names:
if val & 0x1:
bits.append(n)
val >>= 1
if not val:
break
# Return whole list if empty or multiple items, otherwise just single item
return bits[0] if len(bits) == 1 else bits
class Array(object):
"""Use a Struct as a callable to unpack a bunch of bytes as a list."""
def __init__(self, fmt):
"""Initialize the Struct unpacker."""
self._struct = Struct(fmt)
def __call__(self, buf):
"""Perform the actual unpacking."""
return list(self._struct.unpack(buf))
class IOBuffer(object):
"""Holds bytes from a buffer to simplify parsing and random access."""
def __init__(self, source):
"""Initialize the IOBuffer with the source data."""
self._data = bytearray(source)
self._offset = 0
self.clear_marks()
@classmethod
def fromfile(cls, fobj):
"""Initialize the IOBuffer with the contents of the file object."""
return cls(fobj.read())
def set_mark(self):
"""Mark the current location and return its id so that the buffer can return later."""
self._bookmarks.append(self._offset)
return len(self._bookmarks) - 1
def jump_to(self, mark, offset=0):
"""Jump to a previously set mark."""
self._offset = self._bookmarks[mark] + offset
def offset_from(self, mark):
"""Calculate the current offset relative to a marked location."""
return self._offset - self._bookmarks[mark]
def clear_marks(self):
"""Clear all marked locations."""
self._bookmarks = []
def splice(self, mark, newdata):
"""Replace the data after the marked location with the specified data."""
self.jump_to(mark)
self._data = self._data[:self._offset] + bytearray(newdata)
def read_struct(self, struct_class):
"""Parse and return a structure from the current buffer offset."""
struct = struct_class.unpack_from(bytearray_to_buff(self._data), self._offset)
self.skip(struct_class.size)
return struct
def read_func(self, func, num_bytes=None):
"""Parse data from the current buffer offset using a function."""
# only advance if func succeeds
res = func(self.get_next(num_bytes))
self.skip(num_bytes)
return res
def read_ascii(self, num_bytes=None):
"""Return the specified bytes as ascii-formatted text."""
return self.read(num_bytes).decode('ascii')
def read_binary(self, num, item_type='B'):
"""Parse the current buffer offset as the specified code."""
if 'B' in item_type:
return self.read(num)
if item_type[0] in ('@', '=', '<', '>', '!'):
order = item_type[0]
item_type = item_type[1:]
else:
order = '@'
return list(self.read_struct(Struct(order + '{:d}'.format(int(num)) + item_type)))
def read_int(self, code):
"""Parse the current buffer offset as the specified integer code."""
return self.read_struct(Struct(code))[0]
def read(self, num_bytes=None):
"""Read and return the specified bytes from the buffer."""
res = self.get_next(num_bytes)
self.skip(len(res))
return res
def get_next(self, num_bytes=None):
"""Get the next bytes in the buffer without modifying the offset."""
if num_bytes is None:
return self._data[self._offset:]
else:
return self._data[self._offset:self._offset + num_bytes]
def skip(self, num_bytes):
"""Jump the ahead the specified bytes in the buffer."""
if num_bytes is None:
self._offset = len(self._data)
else:
self._offset += num_bytes
def check_remains(self, num_bytes):
"""Check that the number of bytes specified remains in the buffer."""
return len(self._data[self._offset:]) == num_bytes
def truncate(self, num_bytes):
"""Remove the specified number of bytes from the end of the buffer."""
self._data = self._data[:-num_bytes]
def at_end(self):
"""Return whether the buffer has reached the end of data."""
return self._offset >= len(self._data)
def __getitem__(self, item):
"""Return the data at the specified location."""
return self._data[item]
def __str__(self):
"""Return a string representation of the IOBuffer."""
return 'Size: {} Offset: {}'.format(len(self._data), self._offset)
def __len__(self):
"""Return the amount of data in the buffer."""
return len(self._data)
def zlib_decompress_all_frames(data):
"""Decompress all frames of zlib-compressed bytes.
Repeatedly tries to decompress `data` until all data are decompressed, or decompression
fails. This will skip over bytes that are not compressed with zlib.
Parameters
----------
data : bytearray or bytes
Binary data compressed using zlib.
Returns
-------
bytearray
All decompressed bytes
"""
frames = bytearray()
data = bytes(data)
while data:
decomp = zlib.decompressobj()
try:
frames.extend(decomp.decompress(data))
data = decomp.unused_data
except zlib.error:
frames.extend(data)
break
return frames
def bits_to_code(val):
"""Convert the number of bits to the proper code for unpacking."""
if val == 8:
return 'B'
elif val == 16:
return 'H'
else:
log.warning('Unsupported bit size: %s. Returning "B"', val)
return 'B'
# For debugging
def hexdump(buf, num_bytes, offset=0, width=32):
"""Perform a hexudmp of the buffer.
Returns the hexdump as a canonically-formatted string.
"""
ind = offset
end = offset + num_bytes
lines = []
while ind < end:
chunk = buf[ind:ind + width]
actual_width = len(chunk)
hexfmt = '{:02X}'
blocksize = 4
blocks = [hexfmt * blocksize for _ in range(actual_width // blocksize)]
# Need to get any partial lines
num_left = actual_width % blocksize # noqa: S001 Fix false alarm
if num_left:
blocks += [hexfmt * num_left + '--' * (blocksize - num_left)]
blocks += ['--' * blocksize] * (width // blocksize - len(blocks))
hexoutput = ' '.join(blocks)
printable = tuple(chunk)
lines.append(' '.join((hexoutput.format(*printable), str(ind).ljust(len(str(end))),
str(ind - offset).ljust(len(str(end))),
''.join(chr(c) if 31 < c < 128 else '.' for c in chunk))))
ind += width
return '\n'.join(lines)
| self._unit = val | conditional_block |
types.d.ts | /// <reference types="node" />
import { IRouter, Request, Response } from 'express';
import { Readable } from 'stream';
export declare type PathParams = string | RegExp | Array<string | RegExp>;
export declare type CacheOptions = {
private?: boolean;
public?: boolean;
noStore?: boolean;
noCache?: boolean;
noTransform?: boolean;
proxyRevalidate?: boolean;
mustRevalidate?: boolean;
staleIfError?: number | string;
staleWhileRevalidate?: number | string;
maxAge?: number | string;
sMaxAge?: number | string;
};
export declare type MethodKeys = 'find' | 'create' | 'findById' | 'replaceById' | 'updateById' | 'deleteById';
export declare type MethodVerbs = 'get' | 'post' | 'put' | 'patch' | 'delete';
export declare type Methods = {
[key: string]: {
method: MethodVerbs;
instance: boolean;
successCode?: number;
};
};
export declare type ResourceRoot = {
hidden?: boolean;
path: PathParams;
method: string;
instance: boolean;
swagger?: SwaggerConfig;
isAuthorized?: EndpointIsAuthorized;
execute: EndpointExecute;
format?: EndpointFormat;
cache?: EndpointCache;
http: EndpointHTTP;
endpoint: ResourceRoot;
successCode?: number;
hierarchy: string;
};
export declare type Resource = {
[key: string]: ResourceRoot;
};
export declare type Resources = {
[key: string]: any;
};
export declare type Handler = (args: ResourceRoot) => void;
export declare type walkResourceArgs = {
base?: string;
name: string;
resource: ResourceRoot;
hierarchy?: string;
handler: Handler;
};
export declare type getPathArgs = {
resource: string;
endpoint?: string;
instance: boolean;
};
export declare type Paths = {
[key: string]: {
[key in MethodVerbs]?: SwaggerConfig;
};
};
export declare type MetaRoot = {
path?: PathParams;
method?: MethodVerbs;
instance?: boolean;
[Key: string]: any;
};
export declare type Meta = {
[key: string]: Meta | MetaRoot;
};
export declare type getSwaggerArgs = {
swagger: Swagger;
base?: string;
resources: Resources;
};
export declare type Swagger = {
swagger?: string;
info?: {
title?: string;
version?: string;
termsOfService?: string;
contact?: {
name?: string;
url?: string;
};
description?: string;
};
basePath?: string;
schemes?: string[];
paths?: {
[key: string]: {
[key: string]: SwaggerConfig;
};
};
};
export declare type SwaggerConfig = {
consumes?: string[];
produces?: string[];
parameters?: {
name: string;
in: string;
required: boolean;
type: string;
}[] | undefined;
responses?: Responses;
operationId?: string;
summary?: string;
description?: string;
};
export declare type Trace = {
start: (name: string) => Trace;
end: () => Trace;
};
export declare type SutroArgs = {
base?: string;
resources: Resources;
swagger?: Swagger;
pre?: (resource: ResourceRoot, req: Request, res: Response) => void;
post?: (resource: ResourceRoot, req: Request, res: Response, err?: any) => void;
augmentContext?: (context: SutroRequest, req: Request, resource: ResourceRoot) => Promise<SutroRequest> | SutroRequest; | formatResults?: (context: SutroRequest, req: Request, resource: ResourceRoot, rawData: any) => void;
trace?: Trace;
};
export interface SutroRouter extends IRouter {
swagger?: Swagger;
meta?: Meta;
base?: string;
}
export declare type ResponseStatusKeys = 'default' | '200' | '201' | '204' | '401' | '404' | '500';
export declare type Responses = {
[key in ResponseStatusKeys]?: {
description: string;
};
};
export interface ExpressRequest extends Request {
timedout: boolean;
user?: any;
session?: any;
}
export interface SutroRequest {
ip: Request['ip'];
url: Request['url'];
protocol: Request['protocol'];
method: Request['method'];
subdomains: Request['subdomains'];
path: Request['path'];
headers: Request['headers'];
cookies: Request['cookies'];
user?: any;
data?: any;
options: Request['query'];
session?: any;
noResponse?: boolean;
onFinish?: (fn: (req: Request, res: Response) => void) => void;
withRaw?: (fn: (req: Request, res: Response) => void) => void;
_req: ExpressRequest;
_res: Response;
[key: string]: any;
}
export interface SutroStream extends Readable {
contentType?: string;
}
export declare type EndpointIsAuthorized = (opt: SutroRequest) => Promise<boolean> | boolean;
export declare type EndpointExecute = (opt: SutroRequest) => Promise<any> | any;
export declare type EndpointFormat = (opt: SutroRequest, rawData: any) => Promise<any> | any;
export declare type EndpointCache = {
header?: CacheOptions | (() => CacheOptions);
key?: () => string;
get?: (opt: SutroRequest | string, key: string) => Promise<any> | any;
set?: (opt: SutroRequest | string, data: any, key: string) => Promise<any> | any;
};
export declare type EndpointHTTP = {
method: MethodVerbs;
instance: boolean;
}; | random_line_split | |
matcher.rs | #![feature(test)]
extern crate rff;
extern crate test;
use test::Bencher;
use rff::matcher::matches;
#[bench]
fn bench_matches(b: &mut Bencher) {
b.iter(|| matches("amor", "app/models/order.rb"))
}
#[bench]
fn bench_matches_utf8(b: &mut Bencher) {
b.iter(|| matches("ß", "WEIẞ"))
}
#[bench]
fn bench_matches_mixed(b: &mut Bencher) {
b.iter(|| matches("abc", "abØ"))
}
#[bench]
fn benc | &mut Bencher) {
b.iter(|| matches("app/models", "app/models/order.rb"))
}
#[bench]
fn bench_matches_mixed_case(b: &mut Bencher) {
b.iter(|| matches("AMOr", "App/Models/Order.rb"))
}
#[bench]
fn bench_matches_multiple(b: &mut Bencher) {
b.iter(|| {
matches("amor", "app/models/order.rb");
matches("amor", "spec/models/order_spec.rb");
matches("amor", "other_garbage.rb");
matches("amor", "Gemfile");
matches("amor", "node_modules/test/a/thing.js");
matches("amor", "vendor/bundle/ruby/gem.rb")
})
}
#[bench]
fn bench_matches_eq(b: &mut Bencher) {
b.iter(|| {
matches("Gemfile", "Gemfile");
matches("gemfile", "Gemfile")
})
}
| h_matches_more_specific(b: | identifier_name |
matcher.rs | #![feature(test)]
extern crate rff;
extern crate test;
use test::Bencher;
use rff::matcher::matches;
#[bench]
fn bench_matches(b: &mut Bencher) {
b.iter(|| matches("amor", "app/models/order.rb"))
} | fn bench_matches_utf8(b: &mut Bencher) {
b.iter(|| matches("ß", "WEIẞ"))
}
#[bench]
fn bench_matches_mixed(b: &mut Bencher) {
b.iter(|| matches("abc", "abØ"))
}
#[bench]
fn bench_matches_more_specific(b: &mut Bencher) {
b.iter(|| matches("app/models", "app/models/order.rb"))
}
#[bench]
fn bench_matches_mixed_case(b: &mut Bencher) {
b.iter(|| matches("AMOr", "App/Models/Order.rb"))
}
#[bench]
fn bench_matches_multiple(b: &mut Bencher) {
b.iter(|| {
matches("amor", "app/models/order.rb");
matches("amor", "spec/models/order_spec.rb");
matches("amor", "other_garbage.rb");
matches("amor", "Gemfile");
matches("amor", "node_modules/test/a/thing.js");
matches("amor", "vendor/bundle/ruby/gem.rb")
})
}
#[bench]
fn bench_matches_eq(b: &mut Bencher) {
b.iter(|| {
matches("Gemfile", "Gemfile");
matches("gemfile", "Gemfile")
})
} |
#[bench] | random_line_split |
matcher.rs | #![feature(test)]
extern crate rff;
extern crate test;
use test::Bencher;
use rff::matcher::matches;
#[bench]
fn bench_matches(b: &mut Bencher) {
b.iter(|| matches("amor", "app/models/order.rb"))
}
#[bench]
fn bench_matches_utf8(b: &mut Bencher) {
b.iter(|| matches("ß", "WEIẞ"))
}
#[bench]
fn bench_matches_mixed(b: &mut Bencher) {
b.iter(|| matches("abc", "abØ"))
}
#[bench]
fn bench_matches_more_specific(b: &mut Bencher) {
b.iter(|| matches("app/models", "app/models/order.rb"))
}
#[bench]
fn bench_matches_mixed_case(b: &mut Bencher) {
b.iter(|| matches("AMOr", "App/Models/Order.rb"))
}
#[bench]
fn bench_matches_multiple(b: &mut Bencher) {
| bench]
fn bench_matches_eq(b: &mut Bencher) {
b.iter(|| {
matches("Gemfile", "Gemfile");
matches("gemfile", "Gemfile")
})
}
| b.iter(|| {
matches("amor", "app/models/order.rb");
matches("amor", "spec/models/order_spec.rb");
matches("amor", "other_garbage.rb");
matches("amor", "Gemfile");
matches("amor", "node_modules/test/a/thing.js");
matches("amor", "vendor/bundle/ruby/gem.rb")
})
}
#[ | identifier_body |
translate-provider.js | 'use strict';
var storageKey = 'VN_TRANSLATE';
// ReSharper disable once InconsistentNaming
function Translate($translate, $translatePartialLoader, storage, options, disableTranslations) {
this.$translate = $translate;
this.$translatePartialLoader = $translatePartialLoader;
this.storage = storage;
this.disableTranslations = disableTranslations;
this.configure(angular.extend(options, this.getConfig()));
this.addPart = $translatePartialLoader.addPart;
}
Translate.prototype.getConfig = function() {
var storage = this.storage;
var config = JSON.parse(storage.get(storageKey)) || {};
var lang = storage.get('NG_TRANSLATE_LANG_KEY');
if (!this.disableTranslations && lang && lang !== 'undefined') {
config.lang = lang;
}
return config;
};
Translate.prototype.configure = function(config) {
config = angular.extend(this.getConfig(), config);
this.storage.set(storageKey, JSON.stringify(config));
this.$translate.use(config.lang);
};
Translate.prototype.addParts = function() {
if (this.disableTranslations) {
return true;
}
var loader = this.$translatePartialLoader;
angular.forEach(arguments, function(part) {
loader.addPart(part);
});
return this.$translate.refresh();
};
function TranslateProvider($translateProvider) {
this.$translateProvider = $translateProvider;
this.setPreferredLanguage = $translateProvider.preferredLanguage;
}
TranslateProvider.prototype.$get = [
'$translate', '$translatePartialLoader', 'storage',
function($translate, $translatePartialLoader, storage) {
var options = this.options;
return new Translate($translate, $translatePartialLoader, storage, {
region: options.region,
lang: options.lang,
country: options.country
}, options.disableTranslations);
}
];
TranslateProvider.prototype.configure = function(options) {
options = angular.extend({ region: 'us', lang: 'en', country: 'us' }, options);
if (options.lang) {
this.setPreferredLanguage(options.lang);
}
this.options = options;
if (!options.disableTranslations) {
this.initTranslateProvider(options.lang);
}
};
TranslateProvider.prototype.initTranslateProvider = function(lang) {
var $translateProvider = this.$translateProvider;
$translateProvider.useLoader('$translatePartialLoader', { | urlTemplate: '/translations/{part}/{lang}.json'
});
if (lang === 'en') {
$translateProvider.useMessageFormatInterpolation();
}
$translateProvider.useMissingTranslationHandlerLog();
$translateProvider.useLocalStorage();
};
angular.module('Volusion.toolboxCommon')
.provider('translate', ['$translateProvider', TranslateProvider]); | random_line_split | |
translate-provider.js | 'use strict';
var storageKey = 'VN_TRANSLATE';
// ReSharper disable once InconsistentNaming
function | ($translate, $translatePartialLoader, storage, options, disableTranslations) {
this.$translate = $translate;
this.$translatePartialLoader = $translatePartialLoader;
this.storage = storage;
this.disableTranslations = disableTranslations;
this.configure(angular.extend(options, this.getConfig()));
this.addPart = $translatePartialLoader.addPart;
}
Translate.prototype.getConfig = function() {
var storage = this.storage;
var config = JSON.parse(storage.get(storageKey)) || {};
var lang = storage.get('NG_TRANSLATE_LANG_KEY');
if (!this.disableTranslations && lang && lang !== 'undefined') {
config.lang = lang;
}
return config;
};
Translate.prototype.configure = function(config) {
config = angular.extend(this.getConfig(), config);
this.storage.set(storageKey, JSON.stringify(config));
this.$translate.use(config.lang);
};
Translate.prototype.addParts = function() {
if (this.disableTranslations) {
return true;
}
var loader = this.$translatePartialLoader;
angular.forEach(arguments, function(part) {
loader.addPart(part);
});
return this.$translate.refresh();
};
function TranslateProvider($translateProvider) {
this.$translateProvider = $translateProvider;
this.setPreferredLanguage = $translateProvider.preferredLanguage;
}
TranslateProvider.prototype.$get = [
'$translate', '$translatePartialLoader', 'storage',
function($translate, $translatePartialLoader, storage) {
var options = this.options;
return new Translate($translate, $translatePartialLoader, storage, {
region: options.region,
lang: options.lang,
country: options.country
}, options.disableTranslations);
}
];
TranslateProvider.prototype.configure = function(options) {
options = angular.extend({ region: 'us', lang: 'en', country: 'us' }, options);
if (options.lang) {
this.setPreferredLanguage(options.lang);
}
this.options = options;
if (!options.disableTranslations) {
this.initTranslateProvider(options.lang);
}
};
TranslateProvider.prototype.initTranslateProvider = function(lang) {
var $translateProvider = this.$translateProvider;
$translateProvider.useLoader('$translatePartialLoader', {
urlTemplate: '/translations/{part}/{lang}.json'
});
if (lang === 'en') {
$translateProvider.useMessageFormatInterpolation();
}
$translateProvider.useMissingTranslationHandlerLog();
$translateProvider.useLocalStorage();
};
angular.module('Volusion.toolboxCommon')
.provider('translate', ['$translateProvider', TranslateProvider]);
| Translate | identifier_name |
translate-provider.js | 'use strict';
var storageKey = 'VN_TRANSLATE';
// ReSharper disable once InconsistentNaming
function Translate($translate, $translatePartialLoader, storage, options, disableTranslations) {
this.$translate = $translate;
this.$translatePartialLoader = $translatePartialLoader;
this.storage = storage;
this.disableTranslations = disableTranslations;
this.configure(angular.extend(options, this.getConfig()));
this.addPart = $translatePartialLoader.addPart;
}
Translate.prototype.getConfig = function() {
var storage = this.storage;
var config = JSON.parse(storage.get(storageKey)) || {};
var lang = storage.get('NG_TRANSLATE_LANG_KEY');
if (!this.disableTranslations && lang && lang !== 'undefined') {
config.lang = lang;
}
return config;
};
Translate.prototype.configure = function(config) {
config = angular.extend(this.getConfig(), config);
this.storage.set(storageKey, JSON.stringify(config));
this.$translate.use(config.lang);
};
Translate.prototype.addParts = function() {
if (this.disableTranslations) |
var loader = this.$translatePartialLoader;
angular.forEach(arguments, function(part) {
loader.addPart(part);
});
return this.$translate.refresh();
};
function TranslateProvider($translateProvider) {
this.$translateProvider = $translateProvider;
this.setPreferredLanguage = $translateProvider.preferredLanguage;
}
TranslateProvider.prototype.$get = [
'$translate', '$translatePartialLoader', 'storage',
function($translate, $translatePartialLoader, storage) {
var options = this.options;
return new Translate($translate, $translatePartialLoader, storage, {
region: options.region,
lang: options.lang,
country: options.country
}, options.disableTranslations);
}
];
TranslateProvider.prototype.configure = function(options) {
options = angular.extend({ region: 'us', lang: 'en', country: 'us' }, options);
if (options.lang) {
this.setPreferredLanguage(options.lang);
}
this.options = options;
if (!options.disableTranslations) {
this.initTranslateProvider(options.lang);
}
};
TranslateProvider.prototype.initTranslateProvider = function(lang) {
var $translateProvider = this.$translateProvider;
$translateProvider.useLoader('$translatePartialLoader', {
urlTemplate: '/translations/{part}/{lang}.json'
});
if (lang === 'en') {
$translateProvider.useMessageFormatInterpolation();
}
$translateProvider.useMissingTranslationHandlerLog();
$translateProvider.useLocalStorage();
};
angular.module('Volusion.toolboxCommon')
.provider('translate', ['$translateProvider', TranslateProvider]);
| {
return true;
} | conditional_block |
translate-provider.js | 'use strict';
var storageKey = 'VN_TRANSLATE';
// ReSharper disable once InconsistentNaming
function Translate($translate, $translatePartialLoader, storage, options, disableTranslations) {
this.$translate = $translate;
this.$translatePartialLoader = $translatePartialLoader;
this.storage = storage;
this.disableTranslations = disableTranslations;
this.configure(angular.extend(options, this.getConfig()));
this.addPart = $translatePartialLoader.addPart;
}
Translate.prototype.getConfig = function() {
var storage = this.storage;
var config = JSON.parse(storage.get(storageKey)) || {};
var lang = storage.get('NG_TRANSLATE_LANG_KEY');
if (!this.disableTranslations && lang && lang !== 'undefined') {
config.lang = lang;
}
return config;
};
Translate.prototype.configure = function(config) {
config = angular.extend(this.getConfig(), config);
this.storage.set(storageKey, JSON.stringify(config));
this.$translate.use(config.lang);
};
Translate.prototype.addParts = function() {
if (this.disableTranslations) {
return true;
}
var loader = this.$translatePartialLoader;
angular.forEach(arguments, function(part) {
loader.addPart(part);
});
return this.$translate.refresh();
};
function TranslateProvider($translateProvider) |
TranslateProvider.prototype.$get = [
'$translate', '$translatePartialLoader', 'storage',
function($translate, $translatePartialLoader, storage) {
var options = this.options;
return new Translate($translate, $translatePartialLoader, storage, {
region: options.region,
lang: options.lang,
country: options.country
}, options.disableTranslations);
}
];
TranslateProvider.prototype.configure = function(options) {
options = angular.extend({ region: 'us', lang: 'en', country: 'us' }, options);
if (options.lang) {
this.setPreferredLanguage(options.lang);
}
this.options = options;
if (!options.disableTranslations) {
this.initTranslateProvider(options.lang);
}
};
TranslateProvider.prototype.initTranslateProvider = function(lang) {
var $translateProvider = this.$translateProvider;
$translateProvider.useLoader('$translatePartialLoader', {
urlTemplate: '/translations/{part}/{lang}.json'
});
if (lang === 'en') {
$translateProvider.useMessageFormatInterpolation();
}
$translateProvider.useMissingTranslationHandlerLog();
$translateProvider.useLocalStorage();
};
angular.module('Volusion.toolboxCommon')
.provider('translate', ['$translateProvider', TranslateProvider]);
| {
this.$translateProvider = $translateProvider;
this.setPreferredLanguage = $translateProvider.preferredLanguage;
} | identifier_body |
Xml.js | /**
* @author Ed Spencer
* @class Ext.data.reader.Xml
* @extends Ext.data.reader.Reader
*
* <p>The XML Reader is used by a Proxy to read a server response that is sent back in XML format. This usually
* happens as a result of loading a Store - for example we might create something like this:</p>
*
<pre><code>
Ext.define('User', {
extend: 'Ext.data.Model',
fields: ['id', 'name', 'email']
});
var store = Ext.create('Ext.data.Store', {
model: 'User',
proxy: {
type: 'ajax',
url : 'users.xml',
reader: {
type: 'xml',
record: 'user'
}
}
});
</code></pre>
*
* <p>The example above creates a 'User' model. Models are explained in the {@link Ext.data.Model Model} docs if you're
* not already familiar with them.</p>
*
* <p>We created the simplest type of XML Reader possible by simply telling our {@link Ext.data.Store Store}'s
* {@link Ext.data.proxy.Proxy Proxy} that we want a XML Reader. The Store automatically passes the configured model to the
* Store, so it is as if we passed this instead:
*
<pre><code>
reader: {
type : 'xml',
model: 'User',
record: 'user'
}
</code></pre>
*
* <p>The reader we set up is ready to read data from our server - at the moment it will accept a response like this:</p>
*
<pre><code>
<?xml version="1.0" encoding="UTF-8"?>
<user>
<id>1</id>
<name>Ed Spencer</name>
<email>ed@sencha.com</email>
</user>
<user>
<id>2</id>
<name>Abe Elias</name>
<email>abe@sencha.com</email>
</user>
</code></pre>
*
* <p>The XML Reader uses the configured {@link #record} option to pull out the data for each record - in this case we
* set record to 'user', so each <user> above will be converted into a User model.</p>
*
* <p><u>Reading other XML formats</u></p>
*
* <p>If you already have your XML format defined and it doesn't look quite like what we have above, you can usually
* pass XmlReader a couple of configuration options to make it parse your format. For example, we can use the
* {@link #root} configuration to parse data that comes back like this:</p>
*
<pre><code>
<?xml version="1.0" encoding="UTF-8"?>
<users>
<user>
<id>1</id>
<name>Ed Spencer</name>
<email>ed@sencha.com</email>
</user>
<user>
<id>2</id>
<name>Abe Elias</name>
<email>abe@sencha.com</email>
</user>
</users>
</code></pre>
*
* <p>To parse this we just pass in a {@link #root} configuration that matches the 'users' above:</p>
*
<pre><code>
reader: {
type : 'xml',
root : 'users',
record: 'user'
}
</code></pre>
*
* <p>Note that XmlReader doesn't care whether your {@link #root} and {@link #record} elements are nested deep inside
* a larger structure, so a response like this will still work:
*
<pre><code>
<?xml version="1.0" encoding="UTF-8"?>
<deeply>
<nested>
<xml>
<users>
<user>
<id>1</id>
<name>Ed Spencer</name>
<email>ed@sencha.com</email>
</user>
<user>
<id>2</id>
<name>Abe Elias</name>
<email>abe@sencha.com</email>
</user>
</users>
</xml>
</nested>
</deeply>
</code></pre>
*
* <p><u>Response metadata</u></p>
*
* <p>The server can return additional data in its response, such as the {@link #totalProperty total number of records}
* and the {@link #successProperty success status of the response}. These are typically included in the XML response
* like this:</p>
*
<pre><code>
<?xml version="1.0" encoding="UTF-8"?>
<total>100</total>
<success>true</success>
<users>
<user>
<id>1</id>
<name>Ed Spencer</name>
<email>ed@sencha.com</email>
</user>
<user>
<id>2</id>
<name>Abe Elias</name>
<email>abe@sencha.com</email>
</user>
</users>
</code></pre>
*
* <p>If these properties are present in the XML response they can be parsed out by the XmlReader and used by the
* Store that loaded it. We can set up the names of these properties by specifying a final pair of configuration
* options:</p>
*
<pre><code>
reader: {
type: 'xml',
root: 'users',
totalProperty : 'total',
successProperty: 'success'
}
</code></pre>
*
* <p>These final options are not necessary to make the Reader work, but can be useful when the server needs to report
* an error or if it needs to indicate that there is a lot of data available of which only a subset is currently being
* returned.</p>
*
* <p><u>Response format</u></p>
*
* <p><b>Note:</b> in order for the browser to parse a returned XML document, the Content-Type header in the HTTP
* response must be set to "text/xml" or "application/xml". This is very important - the XmlReader will not
* work correctly otherwise.</p>
*/
Ext.define('Ext.data.reader.Xml', {
extend: 'Ext.data.reader.Reader',
alternateClassName: 'Ext.data.XmlReader',
alias : 'reader.xml',
/**
* @cfg {String} record The DomQuery path to the repeated element which contains record information.
*/
/**
* @private
* Creates a function to return some particular key of data from a response. The totalProperty and
* successProperty are treated as special cases for type casting, everything else is just a simple selector.
* @param {String} key
* @return {Function}
*/
createAccessor: function(expr) {
var me = this;
if (Ext.isEmpty(expr)) {
return Ext.emptyFn;
}
if (Ext.isFunction(expr)) {
return expr;
}
return function(root) {
return me.getNodeValue(Ext.DomQuery.selectNode(expr, root));
};
},
getNodeValue: function(node) {
if (node && node.firstChild) {
return node.firstChild.nodeValue;
}
return undefined;
},
//inherit docs
getResponseData: function(response) {
var xml = response.responseXML;
//<debug>
if (!xml) {
Ext.Error.raise({ | response: response,
msg: 'XML data not found in the response'
});
}
//</debug>
return xml;
},
/**
* Normalizes the data object
* @param {Object} data The raw data object
* @return {Object} Returns the documentElement property of the data object if present, or the same object if not
*/
getData: function(data) {
return data.documentElement || data;
},
/**
* @private
* Given an XML object, returns the Element that represents the root as configured by the Reader's meta data
* @param {Object} data The XML data object
* @return {XMLElement} The root node element
*/
getRoot: function(data) {
var nodeName = data.nodeName,
root = this.root;
if (!root || (nodeName && nodeName == root)) {
return data;
} else if (Ext.DomQuery.isXml(data)) {
// This fix ensures we have XML data
// Related to TreeStore calling getRoot with the root node, which isn't XML
// Probably should be resolved in TreeStore at some point
return Ext.DomQuery.selectNode(root, data);
}
},
/**
* @private
* We're just preparing the data for the superclass by pulling out the record nodes we want
* @param {XMLElement} root The XML root node
* @return {Ext.data.Model[]} The records
*/
extractData: function(root) {
var recordName = this.record;
//<debug>
if (!recordName) {
Ext.Error.raise('Record is a required parameter');
}
//</debug>
if (recordName != root.nodeName) {
root = Ext.DomQuery.select(recordName, root);
} else {
root = [root];
}
return this.callParent([root]);
},
/**
* @private
* See Ext.data.reader.Reader's getAssociatedDataRoot docs
* @param {Object} data The raw data object
* @param {String} associationName The name of the association to get data for (uses associationKey if present)
* @return {XMLElement} The root
*/
getAssociatedDataRoot: function(data, associationName) {
return Ext.DomQuery.select(associationName, data)[0];
},
/**
* Parses an XML document and returns a ResultSet containing the model instances
* @param {Object} doc Parsed XML document
* @return {Ext.data.ResultSet} The parsed result set
*/
readRecords: function(doc) {
//it's possible that we get passed an array here by associations. Make sure we strip that out (see Ext.data.reader.Reader#readAssociated)
if (Ext.isArray(doc)) {
doc = doc[0];
}
/**
* DEPRECATED - will be removed in Ext JS 5.0. This is just a copy of this.rawData - use that instead
* @property xmlData
* @type Object
*/
this.xmlData = doc;
return this.callParent([doc]);
}
}); | random_line_split | |
Xml.js | /**
* @author Ed Spencer
* @class Ext.data.reader.Xml
* @extends Ext.data.reader.Reader
*
* <p>The XML Reader is used by a Proxy to read a server response that is sent back in XML format. This usually
* happens as a result of loading a Store - for example we might create something like this:</p>
*
<pre><code>
Ext.define('User', {
extend: 'Ext.data.Model',
fields: ['id', 'name', 'email']
});
var store = Ext.create('Ext.data.Store', {
model: 'User',
proxy: {
type: 'ajax',
url : 'users.xml',
reader: {
type: 'xml',
record: 'user'
}
}
});
</code></pre>
*
* <p>The example above creates a 'User' model. Models are explained in the {@link Ext.data.Model Model} docs if you're
* not already familiar with them.</p>
*
* <p>We created the simplest type of XML Reader possible by simply telling our {@link Ext.data.Store Store}'s
* {@link Ext.data.proxy.Proxy Proxy} that we want a XML Reader. The Store automatically passes the configured model to the
* Store, so it is as if we passed this instead:
*
<pre><code>
reader: {
type : 'xml',
model: 'User',
record: 'user'
}
</code></pre>
*
* <p>The reader we set up is ready to read data from our server - at the moment it will accept a response like this:</p>
*
<pre><code>
<?xml version="1.0" encoding="UTF-8"?>
<user>
<id>1</id>
<name>Ed Spencer</name>
<email>ed@sencha.com</email>
</user>
<user>
<id>2</id>
<name>Abe Elias</name>
<email>abe@sencha.com</email>
</user>
</code></pre>
*
* <p>The XML Reader uses the configured {@link #record} option to pull out the data for each record - in this case we
* set record to 'user', so each <user> above will be converted into a User model.</p>
*
* <p><u>Reading other XML formats</u></p>
*
* <p>If you already have your XML format defined and it doesn't look quite like what we have above, you can usually
* pass XmlReader a couple of configuration options to make it parse your format. For example, we can use the
* {@link #root} configuration to parse data that comes back like this:</p>
*
<pre><code>
<?xml version="1.0" encoding="UTF-8"?>
<users>
<user>
<id>1</id>
<name>Ed Spencer</name>
<email>ed@sencha.com</email>
</user>
<user>
<id>2</id>
<name>Abe Elias</name>
<email>abe@sencha.com</email>
</user>
</users>
</code></pre>
*
* <p>To parse this we just pass in a {@link #root} configuration that matches the 'users' above:</p>
*
<pre><code>
reader: {
type : 'xml',
root : 'users',
record: 'user'
}
</code></pre>
*
* <p>Note that XmlReader doesn't care whether your {@link #root} and {@link #record} elements are nested deep inside
* a larger structure, so a response like this will still work:
*
<pre><code>
<?xml version="1.0" encoding="UTF-8"?>
<deeply>
<nested>
<xml>
<users>
<user>
<id>1</id>
<name>Ed Spencer</name>
<email>ed@sencha.com</email>
</user>
<user>
<id>2</id>
<name>Abe Elias</name>
<email>abe@sencha.com</email>
</user>
</users>
</xml>
</nested>
</deeply>
</code></pre>
*
* <p><u>Response metadata</u></p>
*
* <p>The server can return additional data in its response, such as the {@link #totalProperty total number of records}
* and the {@link #successProperty success status of the response}. These are typically included in the XML response
* like this:</p>
*
<pre><code>
<?xml version="1.0" encoding="UTF-8"?>
<total>100</total>
<success>true</success>
<users>
<user>
<id>1</id>
<name>Ed Spencer</name>
<email>ed@sencha.com</email>
</user>
<user>
<id>2</id>
<name>Abe Elias</name>
<email>abe@sencha.com</email>
</user>
</users>
</code></pre>
*
* <p>If these properties are present in the XML response they can be parsed out by the XmlReader and used by the
* Store that loaded it. We can set up the names of these properties by specifying a final pair of configuration
* options:</p>
*
<pre><code>
reader: {
type: 'xml',
root: 'users',
totalProperty : 'total',
successProperty: 'success'
}
</code></pre>
*
* <p>These final options are not necessary to make the Reader work, but can be useful when the server needs to report
* an error or if it needs to indicate that there is a lot of data available of which only a subset is currently being
* returned.</p>
*
* <p><u>Response format</u></p>
*
* <p><b>Note:</b> in order for the browser to parse a returned XML document, the Content-Type header in the HTTP
* response must be set to "text/xml" or "application/xml". This is very important - the XmlReader will not
* work correctly otherwise.</p>
*/
Ext.define('Ext.data.reader.Xml', {
extend: 'Ext.data.reader.Reader',
alternateClassName: 'Ext.data.XmlReader',
alias : 'reader.xml',
/**
* @cfg {String} record The DomQuery path to the repeated element which contains record information.
*/
/**
* @private
* Creates a function to return some particular key of data from a response. The totalProperty and
* successProperty are treated as special cases for type casting, everything else is just a simple selector.
* @param {String} key
* @return {Function}
*/
createAccessor: function(expr) {
var me = this;
if (Ext.isEmpty(expr)) {
return Ext.emptyFn;
}
if (Ext.isFunction(expr)) {
return expr;
}
return function(root) {
return me.getNodeValue(Ext.DomQuery.selectNode(expr, root));
};
},
getNodeValue: function(node) {
if (node && node.firstChild) {
return node.firstChild.nodeValue;
}
return undefined;
},
//inherit docs
getResponseData: function(response) {
var xml = response.responseXML;
//<debug>
if (!xml) {
Ext.Error.raise({
response: response,
msg: 'XML data not found in the response'
});
}
//</debug>
return xml;
},
/**
* Normalizes the data object
* @param {Object} data The raw data object
* @return {Object} Returns the documentElement property of the data object if present, or the same object if not
*/
getData: function(data) {
return data.documentElement || data;
},
/**
* @private
* Given an XML object, returns the Element that represents the root as configured by the Reader's meta data
* @param {Object} data The XML data object
* @return {XMLElement} The root node element
*/
getRoot: function(data) {
var nodeName = data.nodeName,
root = this.root;
if (!root || (nodeName && nodeName == root)) {
return data;
} else if (Ext.DomQuery.isXml(data)) |
},
/**
* @private
* We're just preparing the data for the superclass by pulling out the record nodes we want
* @param {XMLElement} root The XML root node
* @return {Ext.data.Model[]} The records
*/
extractData: function(root) {
var recordName = this.record;
//<debug>
if (!recordName) {
Ext.Error.raise('Record is a required parameter');
}
//</debug>
if (recordName != root.nodeName) {
root = Ext.DomQuery.select(recordName, root);
} else {
root = [root];
}
return this.callParent([root]);
},
/**
* @private
* See Ext.data.reader.Reader's getAssociatedDataRoot docs
* @param {Object} data The raw data object
* @param {String} associationName The name of the association to get data for (uses associationKey if present)
* @return {XMLElement} The root
*/
getAssociatedDataRoot: function(data, associationName) {
return Ext.DomQuery.select(associationName, data)[0];
},
/**
* Parses an XML document and returns a ResultSet containing the model instances
* @param {Object} doc Parsed XML document
* @return {Ext.data.ResultSet} The parsed result set
*/
readRecords: function(doc) {
//it's possible that we get passed an array here by associations. Make sure we strip that out (see Ext.data.reader.Reader#readAssociated)
if (Ext.isArray(doc)) {
doc = doc[0];
}
/**
* DEPRECATED - will be removed in Ext JS 5.0. This is just a copy of this.rawData - use that instead
* @property xmlData
* @type Object
*/
this.xmlData = doc;
return this.callParent([doc]);
}
}); | {
// This fix ensures we have XML data
// Related to TreeStore calling getRoot with the root node, which isn't XML
// Probably should be resolved in TreeStore at some point
return Ext.DomQuery.selectNode(root, data);
} | conditional_block |
complaint.component.ts | /*
* Copyright (c) 2014-2021 Bjoern Kimminich.
* SPDX-License-Identifier: MIT
*/
import { environment } from '../../environments/environment'
import { ComplaintService } from '../Services/complaint.service'
import { UserService } from '../Services/user.service'
import { Component, ElementRef, OnInit, ViewChild } from '@angular/core'
import { FormControl, Validators } from '@angular/forms'
import { FileUploader } from 'ng2-file-upload'
import { dom, library } from '@fortawesome/fontawesome-svg-core'
import { faBomb } from '@fortawesome/free-solid-svg-icons'
import { FormSubmitService } from '../Services/form-submit.service'
import { TranslateService } from '@ngx-translate/core'
library.add(faBomb)
dom.watch()
@Component({
selector: 'app-complaint',
templateUrl: './complaint.component.html',
styleUrls: ['./complaint.component.scss']
})
export class | implements OnInit {
public customerControl: FormControl = new FormControl({ value: '', disabled: true }, [])
public messageControl: FormControl = new FormControl('', [Validators.required, Validators.maxLength(160)])
@ViewChild('fileControl', { static: true }) fileControl!: ElementRef // For controlling the DOM Element for file input.
public fileUploadError: any = undefined // For controlling error handling related to file input.
public uploader: FileUploader = new FileUploader({
url: environment.hostServer + '/file-upload',
authToken: `Bearer ${localStorage.getItem('token')}`,
allowedMimeType: ['application/pdf', 'application/xml', 'text/xml', 'application/zip', 'application/x-zip-compressed', 'multipart/x-zip'],
maxFileSize: 100000
})
public userEmail: any = undefined
public complaint: any = undefined
public confirmation: any
constructor (private readonly userService: UserService, private readonly complaintService: ComplaintService, private readonly formSubmitService: FormSubmitService, private readonly translate: TranslateService) { }
ngOnInit () {
this.initComplaint()
this.uploader.onWhenAddingFileFailed = (item, filter) => {
this.fileUploadError = filter
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
throw new Error(`Error due to : ${filter.name}`)
}
this.uploader.onAfterAddingFile = () => {
this.fileUploadError = undefined
}
this.uploader.onSuccessItem = () => {
this.saveComplaint()
this.uploader.clearQueue()
}
this.formSubmitService.attachEnterKeyHandler('complaint-form', 'submitButton', () => this.save())
}
initComplaint () {
this.userService.whoAmI().subscribe((user: any) => {
this.complaint = {}
this.complaint.UserId = user.id
this.userEmail = user.email
this.customerControl.setValue(this.userEmail)
}, (err) => {
this.complaint = undefined
console.log(err)
})
}
save () {
if (this.uploader.queue[0]) {
this.uploader.queue[0].upload()
this.fileControl.nativeElement.value = null
} else {
this.saveComplaint()
}
}
saveComplaint () {
this.complaint.message = this.messageControl.value
this.complaintService.save(this.complaint).subscribe((savedComplaint: any) => {
this.translate.get('CUSTOMER_SUPPORT_COMPLAINT_REPLY', { ref: savedComplaint.id }).subscribe((customerSupportReply) => {
this.confirmation = customerSupportReply
}, (translationId) => {
this.confirmation = translationId
})
this.initComplaint()
this.resetForm()
this.fileUploadError = undefined
}, (error) => error)
}
resetForm () {
this.messageControl.setValue('')
this.messageControl.markAsUntouched()
this.messageControl.markAsPristine()
this.fileControl.nativeElement.value = null
}
}
| ComplaintComponent | identifier_name |
complaint.component.ts | /*
* Copyright (c) 2014-2021 Bjoern Kimminich.
* SPDX-License-Identifier: MIT
*/
import { environment } from '../../environments/environment'
import { ComplaintService } from '../Services/complaint.service'
import { UserService } from '../Services/user.service'
import { Component, ElementRef, OnInit, ViewChild } from '@angular/core'
import { FormControl, Validators } from '@angular/forms'
import { FileUploader } from 'ng2-file-upload'
import { dom, library } from '@fortawesome/fontawesome-svg-core'
import { faBomb } from '@fortawesome/free-solid-svg-icons'
import { FormSubmitService } from '../Services/form-submit.service'
import { TranslateService } from '@ngx-translate/core'
library.add(faBomb)
dom.watch()
@Component({
selector: 'app-complaint',
templateUrl: './complaint.component.html',
styleUrls: ['./complaint.component.scss']
})
export class ComplaintComponent implements OnInit {
public customerControl: FormControl = new FormControl({ value: '', disabled: true }, [])
public messageControl: FormControl = new FormControl('', [Validators.required, Validators.maxLength(160)])
@ViewChild('fileControl', { static: true }) fileControl!: ElementRef // For controlling the DOM Element for file input.
public fileUploadError: any = undefined // For controlling error handling related to file input.
public uploader: FileUploader = new FileUploader({
url: environment.hostServer + '/file-upload',
authToken: `Bearer ${localStorage.getItem('token')}`,
allowedMimeType: ['application/pdf', 'application/xml', 'text/xml', 'application/zip', 'application/x-zip-compressed', 'multipart/x-zip'],
maxFileSize: 100000
})
public userEmail: any = undefined
public complaint: any = undefined
public confirmation: any
constructor (private readonly userService: UserService, private readonly complaintService: ComplaintService, private readonly formSubmitService: FormSubmitService, private readonly translate: TranslateService) { }
ngOnInit () {
this.initComplaint()
this.uploader.onWhenAddingFileFailed = (item, filter) => {
this.fileUploadError = filter
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
throw new Error(`Error due to : ${filter.name}`)
}
this.uploader.onAfterAddingFile = () => {
this.fileUploadError = undefined
}
this.uploader.onSuccessItem = () => {
this.saveComplaint()
this.uploader.clearQueue()
}
this.formSubmitService.attachEnterKeyHandler('complaint-form', 'submitButton', () => this.save())
}
initComplaint () {
this.userService.whoAmI().subscribe((user: any) => {
this.complaint = {}
this.complaint.UserId = user.id
this.userEmail = user.email
this.customerControl.setValue(this.userEmail)
}, (err) => {
this.complaint = undefined | save () {
if (this.uploader.queue[0]) {
this.uploader.queue[0].upload()
this.fileControl.nativeElement.value = null
} else {
this.saveComplaint()
}
}
saveComplaint () {
this.complaint.message = this.messageControl.value
this.complaintService.save(this.complaint).subscribe((savedComplaint: any) => {
this.translate.get('CUSTOMER_SUPPORT_COMPLAINT_REPLY', { ref: savedComplaint.id }).subscribe((customerSupportReply) => {
this.confirmation = customerSupportReply
}, (translationId) => {
this.confirmation = translationId
})
this.initComplaint()
this.resetForm()
this.fileUploadError = undefined
}, (error) => error)
}
resetForm () {
this.messageControl.setValue('')
this.messageControl.markAsUntouched()
this.messageControl.markAsPristine()
this.fileControl.nativeElement.value = null
}
} | console.log(err)
})
}
| random_line_split |
complaint.component.ts | /*
* Copyright (c) 2014-2021 Bjoern Kimminich.
* SPDX-License-Identifier: MIT
*/
import { environment } from '../../environments/environment'
import { ComplaintService } from '../Services/complaint.service'
import { UserService } from '../Services/user.service'
import { Component, ElementRef, OnInit, ViewChild } from '@angular/core'
import { FormControl, Validators } from '@angular/forms'
import { FileUploader } from 'ng2-file-upload'
import { dom, library } from '@fortawesome/fontawesome-svg-core'
import { faBomb } from '@fortawesome/free-solid-svg-icons'
import { FormSubmitService } from '../Services/form-submit.service'
import { TranslateService } from '@ngx-translate/core'
library.add(faBomb)
dom.watch()
@Component({
selector: 'app-complaint',
templateUrl: './complaint.component.html',
styleUrls: ['./complaint.component.scss']
})
export class ComplaintComponent implements OnInit {
public customerControl: FormControl = new FormControl({ value: '', disabled: true }, [])
public messageControl: FormControl = new FormControl('', [Validators.required, Validators.maxLength(160)])
@ViewChild('fileControl', { static: true }) fileControl!: ElementRef // For controlling the DOM Element for file input.
public fileUploadError: any = undefined // For controlling error handling related to file input.
public uploader: FileUploader = new FileUploader({
url: environment.hostServer + '/file-upload',
authToken: `Bearer ${localStorage.getItem('token')}`,
allowedMimeType: ['application/pdf', 'application/xml', 'text/xml', 'application/zip', 'application/x-zip-compressed', 'multipart/x-zip'],
maxFileSize: 100000
})
public userEmail: any = undefined
public complaint: any = undefined
public confirmation: any
constructor (private readonly userService: UserService, private readonly complaintService: ComplaintService, private readonly formSubmitService: FormSubmitService, private readonly translate: TranslateService) { }
ngOnInit () {
this.initComplaint()
this.uploader.onWhenAddingFileFailed = (item, filter) => {
this.fileUploadError = filter
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
throw new Error(`Error due to : ${filter.name}`)
}
this.uploader.onAfterAddingFile = () => {
this.fileUploadError = undefined
}
this.uploader.onSuccessItem = () => {
this.saveComplaint()
this.uploader.clearQueue()
}
this.formSubmitService.attachEnterKeyHandler('complaint-form', 'submitButton', () => this.save())
}
initComplaint () {
this.userService.whoAmI().subscribe((user: any) => {
this.complaint = {}
this.complaint.UserId = user.id
this.userEmail = user.email
this.customerControl.setValue(this.userEmail)
}, (err) => {
this.complaint = undefined
console.log(err)
})
}
save () {
if (this.uploader.queue[0]) {
this.uploader.queue[0].upload()
this.fileControl.nativeElement.value = null
} else |
}
saveComplaint () {
this.complaint.message = this.messageControl.value
this.complaintService.save(this.complaint).subscribe((savedComplaint: any) => {
this.translate.get('CUSTOMER_SUPPORT_COMPLAINT_REPLY', { ref: savedComplaint.id }).subscribe((customerSupportReply) => {
this.confirmation = customerSupportReply
}, (translationId) => {
this.confirmation = translationId
})
this.initComplaint()
this.resetForm()
this.fileUploadError = undefined
}, (error) => error)
}
resetForm () {
this.messageControl.setValue('')
this.messageControl.markAsUntouched()
this.messageControl.markAsPristine()
this.fileControl.nativeElement.value = null
}
}
| {
this.saveComplaint()
} | conditional_block |
main.rs | /* Aurélien DESBRIÈRES
aurelien(at)hackers(dot)camp
License GNU GPL latest */
// Rust experimentations ───────────────┐
// Std Library Option ──────────────────┘
// An integer division that doesn't `panic!`
fn checked_division(dividend: i32, divisor: i32) -> Option<i32> {
if di | // Failure is represented as the `None` variant
None
} else {
// Result is wrapped in a `Some` variant
Some(dividend / divisor)
}
}
// This function handles a division that may not succeed
fn try_division(dividend: i32, divisor: i32) {
// `Option` values can be pattern matched, just like other enums
match checked_division(dividend, divisor) {
None => println!("{} / {} failed!", dividend, divisor),
Some(quotient) => {
println!("{} / {} = {}", dividend, divisor, quotient)
},
}
}
fn main() {
try_division(4, 2);
try_division(1, 0);
// Binding `None` to a variable needs to be type annotated
let none: Option<i32> = None;
let _equivalent_none = None::<i32>;
let optional_float = Some(0f32);
// Unwrapping a `Some` variant will extract the value wrapped.
println!("{:?} unwraps to {:?}", optional_float, optional_float.unwrap());
// Unwrapping a `None` variant will `panic!`
println!("{:?} unwraps to {:?}", none, none.unwrap());
}
| visor == 0 {
| identifier_name |
main.rs | /* Aurélien DESBRIÈRES
aurelien(at)hackers(dot)camp
License GNU GPL latest */
// Rust experimentations ───────────────┐
// Std Library Option ──────────────────┘
// An integer division that doesn't `panic!`
fn checked_division(dividend: i32, divisor: i32) -> Option<i32> {
if divisor == 0 {
// Failure is represented as the `None` variant
None
} else {
// Result is wrapped in a `Some` variant
Some(dividend / divisor)
}
}
// This function handles a division that may not succeed
fn try_division(dividend: i32, divisor: i32) {
// `Option` values can be pattern matched, just like other enums
match checked_division(dividend, divisor) {
None => println!("{} / {} failed!", dividend, divisor),
Some(quotient) => {
println!("{} / {} = {}", dividend, divisor, quotient)
| // Binding `None` to a variable needs to be type annotated
let none: Option<i32> = None;
let _equivalent_none = None::<i32>;
let optional_float = Some(0f32);
// Unwrapping a `Some` variant will extract the value wrapped.
println!("{:?} unwraps to {:?}", optional_float, optional_float.unwrap());
// Unwrapping a `None` variant will `panic!`
println!("{:?} unwraps to {:?}", none, none.unwrap());
}
| },
}
}
fn main() {
try_division(4, 2);
try_division(1, 0);
| conditional_block |
main.rs | /* Aurélien DESBRIÈRES
aurelien(at)hackers(dot)camp
License GNU GPL latest */
// Rust experimentations ───────────────┐
// Std Library Option ──────────────────┘
// An integer division that doesn't `panic!`
fn checked_division(dividend: i32, divisor: i32) -> Option<i32> {
if divisor == 0 {
// Failure is represented as the `None` variant
None
} else { | }
}
// This function handles a division that may not succeed
fn try_division(dividend: i32, divisor: i32) {
// `Option` values can be pattern matched, just like other enums
match checked_division(dividend, divisor) {
None => println!("{} / {} failed!", dividend, divisor),
Some(quotient) => {
println!("{} / {} = {}", dividend, divisor, quotient)
},
}
}
fn main() {
try_division(4, 2);
try_division(1, 0);
// Binding `None` to a variable needs to be type annotated
let none: Option<i32> = None;
let _equivalent_none = None::<i32>;
let optional_float = Some(0f32);
// Unwrapping a `Some` variant will extract the value wrapped.
println!("{:?} unwraps to {:?}", optional_float, optional_float.unwrap());
// Unwrapping a `None` variant will `panic!`
println!("{:?} unwraps to {:?}", none, none.unwrap());
} | // Result is wrapped in a `Some` variant
Some(dividend / divisor) | random_line_split |
main.rs | /* Aurélien DESBRIÈRES
aurelien(at)hackers(dot)camp
License GNU GPL latest */
// Rust experimentations ───────────────┐
// Std Library Option ──────────────────┘
// An integer division that doesn't `panic!`
fn checked_division(dividend: i32, divisor: i32) -> Option<i32> {
if divisor == 0 {
// Failure is represented as the `None` variant
None
} else {
// Result is wrapped in a `Some` variant
Some(dividend / divisor)
}
}
// This function handles a division that may not succeed
fn try_division(dividend: i32, divisor: i32) {
// `Option` values can be pattern matched, just like other enums
| nding `None` to a variable needs to be type annotated
let none: Option<i32> = None;
let _equivalent_none = None::<i32>;
let optional_float = Some(0f32);
// Unwrapping a `Some` variant will extract the value wrapped.
println!("{:?} unwraps to {:?}", optional_float, optional_float.unwrap());
// Unwrapping a `None` variant will `panic!`
println!("{:?} unwraps to {:?}", none, none.unwrap());
}
| match checked_division(dividend, divisor) {
None => println!("{} / {} failed!", dividend, divisor),
Some(quotient) => {
println!("{} / {} = {}", dividend, divisor, quotient)
},
}
}
fn main() {
try_division(4, 2);
try_division(1, 0);
// Bi | identifier_body |
microphone-outline.js | import React from 'react'
import Icon from 'react-icon-base'
const TiMicrophoneOutline = props => (
<Icon viewBox="0 0 40 40" {...props}>
<g><path d="m20 26.7c-3.7 0-6.7-3-6.7-6.7v-10c0-3.7 3-6.7 6.7-6.7s6.7 3 6.7 6.7v10c0 3.7-3 6.7-6.7 6.7z m0-20c-1.8 0-3.3 1.5-3.3 3.3v10c0 1.8 1.5 3.3 3.3 3.3s3.3-1.5 3.3-3.3v-10c0-1.8-1.5-3.3-3.3-3.3z m11.7 13.3v-3.3c0-1-0.8-1.7-1.7-1.7s-1.7 0.7-1.7 1.7v3.3c0 4.6-3.7 8.3-8.3 8.3s-8.3-3.7-8.3-8.3v-3.3c0-1-0.8-1.7-1.7-1.7s-1.7 0.7-1.7 1.7v3.3c0 5.9 4.4 10.7 10 11.5v1.8h-5c-0.9 0-1.6 0.8-1.6 1.7s0.7 1.7 1.6 1.7h13.4c0.9 0 1.6-0.8 1.6-1.7s-0.7-1.7-1.6-1.7h-5v-1.8c5.6-0.8 10-5.6 10-11.5z"/></g>
</Icon> | )
export default TiMicrophoneOutline | random_line_split | |
regions-early-bound-used-in-bound-method.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that you can use a fn lifetime parameter as part of
// the value for a type parameter in a bound.
trait GetRef<'a> {
fn get(&self) -> &'a int;
}
struct Box<'a> {
t: &'a int
}
impl<'a> Copy for Box<'a> {}
impl<'a> GetRef<'a> for Box<'a> {
fn get(&self) -> &'a int {
self.t
}
}
impl<'a> Box<'a> {
fn add<'b,G:GetRef<'b>>(&self, g2: G) -> int |
}
pub fn main() {
let b1 = Box { t: &3 };
assert_eq!(b1.add(b1), 6);
}
| {
*self.t + *g2.get()
} | identifier_body |
regions-early-bound-used-in-bound-method.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that you can use a fn lifetime parameter as part of
// the value for a type parameter in a bound.
trait GetRef<'a> {
fn get(&self) -> &'a int;
}
struct Box<'a> {
t: &'a int
}
impl<'a> Copy for Box<'a> {}
impl<'a> GetRef<'a> for Box<'a> {
fn get(&self) -> &'a int {
self.t
}
}
impl<'a> Box<'a> {
fn add<'b,G:GetRef<'b>>(&self, g2: G) -> int {
*self.t + *g2.get()
}
}
pub fn main() {
let b1 = Box { t: &3 };
assert_eq!(b1.add(b1), 6);
} | // http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | random_line_split |
regions-early-bound-used-in-bound-method.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that you can use a fn lifetime parameter as part of
// the value for a type parameter in a bound.
trait GetRef<'a> {
fn get(&self) -> &'a int;
}
struct | <'a> {
t: &'a int
}
impl<'a> Copy for Box<'a> {}
impl<'a> GetRef<'a> for Box<'a> {
fn get(&self) -> &'a int {
self.t
}
}
impl<'a> Box<'a> {
fn add<'b,G:GetRef<'b>>(&self, g2: G) -> int {
*self.t + *g2.get()
}
}
pub fn main() {
let b1 = Box { t: &3 };
assert_eq!(b1.add(b1), 6);
}
| Box | identifier_name |
lib.rs | //! # r2d2-mysql
//! MySQL support for the r2d2 connection pool (Rust) . see [`r2d2`](http://github.com/sfackler/r2d2.git) .
//!
//! #### Install
//! Just include another `[dependencies.*]` section into your Cargo.toml:
//!
//! ```toml
//! [dependencies.r2d2_mysql]
//! git = "https://github.com/outersky/r2d2-mysql"
//! version="*"
//! ```
//! #### Sample
//!
//! ```
//! extern crate mysql;
//! extern crate r2d2_mysql;
//! extern crate r2d2;
//!
//! use std::env;
//! use std::sync::Arc;
//! use std::thread;
//! use mysql::{Opts,OptsBuilder};
//! use mysql::prelude::Queryable;
//! use r2d2_mysql::MysqlConnectionManager;
//!
//! fn main() {
//! let url = env::var("DATABASE_URL").unwrap();
//! let opts = Opts::from_url(&url).unwrap();
//! let builder = OptsBuilder::from_opts(opts);
//! let manager = MysqlConnectionManager::new(builder);
//! let pool = Arc::new(r2d2::Pool::builder().max_size(4).build(manager).unwrap());
//!
//! let mut tasks = vec![];
//!
//! for _ in 0..3 {
//! let pool = pool.clone();
//! let th = thread::spawn(move || {
//! let mut conn = pool.get()
//! .map_err(|err| {
//! println!(
//! "get connection from pool error in line:{} ! error: {:?}",
//! line!(),
//! err
//! )
//! })
//! .unwrap();
//! let _ = conn.query("SELECT version()").map(|_: Vec<String>| ()).map_err(|err| {
//! println!("execute query error in line:{} ! error: {:?}", line!(), err)
//! });
//! });
//! tasks.push(th);
//! }
//!
//! for th in tasks {
//! let _ = th.join();
//! }
//! }
//! ```
//!
#![doc(html_root_url = "http://outersky.github.io/r2d2-mysql/doc/v0.2.0/r2d2_mysql/")]
#![crate_name = "r2d2_mysql"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
pub extern crate mysql;
pub extern crate r2d2;
pub mod pool;
pub use pool::MysqlConnectionManager;
#[cfg(test)]
mod test {
use mysql::{Opts, OptsBuilder};
use mysql::prelude::Queryable;
use r2d2;
use std::env;
use std::sync::Arc;
use std::thread;
use super::MysqlConnectionManager;
#[test]
fn | () {
let url = env::var("DATABASE_URL").unwrap();
let opts = Opts::from_url(&url).unwrap();
let builder = OptsBuilder::from_opts(opts);
let manager = MysqlConnectionManager::new(builder);
let pool = Arc::new(r2d2::Pool::builder().max_size(4).build(manager).unwrap());
let mut tasks = vec![];
for _ in 0..3 {
let pool = pool.clone();
let th = thread::spawn(move || {
let mut conn = pool.get()
.map_err(|err| {
println!(
"get connection from pool error in line:{} ! error: {:?}",
line!(),
err
)
})
.unwrap();
let _ = conn.query("SELECT version()").map(|_: Vec<String>| ()).map_err(|err| {
println!("execute query error in line:{} ! error: {:?}", line!(), err)
});
});
tasks.push(th);
}
for th in tasks {
let _ = th.join();
}
}
}
| query_pool | identifier_name |
lib.rs | //! # r2d2-mysql
//! MySQL support for the r2d2 connection pool (Rust) . see [`r2d2`](http://github.com/sfackler/r2d2.git) .
//!
//! #### Install
//! Just include another `[dependencies.*]` section into your Cargo.toml:
//!
//! ```toml
//! [dependencies.r2d2_mysql]
//! git = "https://github.com/outersky/r2d2-mysql"
//! version="*"
//! ```
//! #### Sample
//!
//! ```
//! extern crate mysql;
//! extern crate r2d2_mysql;
//! extern crate r2d2;
//!
//! use std::env;
//! use std::sync::Arc;
//! use std::thread;
//! use mysql::{Opts,OptsBuilder};
//! use mysql::prelude::Queryable;
//! use r2d2_mysql::MysqlConnectionManager;
//!
//! fn main() {
//! let url = env::var("DATABASE_URL").unwrap();
//! let opts = Opts::from_url(&url).unwrap();
//! let builder = OptsBuilder::from_opts(opts);
//! let manager = MysqlConnectionManager::new(builder);
//! let pool = Arc::new(r2d2::Pool::builder().max_size(4).build(manager).unwrap());
//!
//! let mut tasks = vec![];
//!
//! for _ in 0..3 {
//! let pool = pool.clone();
//! let th = thread::spawn(move || {
//! let mut conn = pool.get()
//! .map_err(|err| {
//! println!(
//! "get connection from pool error in line:{} ! error: {:?}",
//! line!(),
//! err
//! )
//! })
//! .unwrap();
//! let _ = conn.query("SELECT version()").map(|_: Vec<String>| ()).map_err(|err| {
//! println!("execute query error in line:{} ! error: {:?}", line!(), err)
//! });
//! });
//! tasks.push(th);
//! }
//!
//! for th in tasks {
//! let _ = th.join();
//! }
//! }
//! ```
//!
#![doc(html_root_url = "http://outersky.github.io/r2d2-mysql/doc/v0.2.0/r2d2_mysql/")]
#![crate_name = "r2d2_mysql"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
pub extern crate mysql;
pub extern crate r2d2;
pub mod pool;
pub use pool::MysqlConnectionManager;
#[cfg(test)]
mod test {
use mysql::{Opts, OptsBuilder};
use mysql::prelude::Queryable;
use r2d2;
use std::env;
use std::sync::Arc;
use std::thread;
use super::MysqlConnectionManager;
#[test]
fn query_pool() |
}
| {
let url = env::var("DATABASE_URL").unwrap();
let opts = Opts::from_url(&url).unwrap();
let builder = OptsBuilder::from_opts(opts);
let manager = MysqlConnectionManager::new(builder);
let pool = Arc::new(r2d2::Pool::builder().max_size(4).build(manager).unwrap());
let mut tasks = vec![];
for _ in 0..3 {
let pool = pool.clone();
let th = thread::spawn(move || {
let mut conn = pool.get()
.map_err(|err| {
println!(
"get connection from pool error in line:{} ! error: {:?}",
line!(),
err
)
})
.unwrap();
let _ = conn.query("SELECT version()").map(|_: Vec<String>| ()).map_err(|err| {
println!("execute query error in line:{} ! error: {:?}", line!(), err)
});
});
tasks.push(th);
}
for th in tasks {
let _ = th.join();
}
} | identifier_body |
lib.rs | //! # r2d2-mysql
//! MySQL support for the r2d2 connection pool (Rust) . see [`r2d2`](http://github.com/sfackler/r2d2.git) .
//!
//! #### Install
//! Just include another `[dependencies.*]` section into your Cargo.toml:
//!
//! ```toml
//! [dependencies.r2d2_mysql]
//! git = "https://github.com/outersky/r2d2-mysql"
//! version="*"
//! ```
//! #### Sample
//!
//! ```
//! extern crate mysql;
//! extern crate r2d2_mysql;
//! extern crate r2d2;
//!
//! use std::env;
//! use std::sync::Arc;
//! use std::thread;
//! use mysql::{Opts,OptsBuilder};
//! use mysql::prelude::Queryable;
//! use r2d2_mysql::MysqlConnectionManager;
//!
//! fn main() {
//! let url = env::var("DATABASE_URL").unwrap();
//! let opts = Opts::from_url(&url).unwrap();
//! let builder = OptsBuilder::from_opts(opts);
//! let manager = MysqlConnectionManager::new(builder);
//! let pool = Arc::new(r2d2::Pool::builder().max_size(4).build(manager).unwrap());
//!
//! let mut tasks = vec![];
//!
//! for _ in 0..3 {
//! let pool = pool.clone();
//! let th = thread::spawn(move || {
//! let mut conn = pool.get()
//! .map_err(|err| {
//! println!(
//! "get connection from pool error in line:{} ! error: {:?}",
//! line!(),
//! err
//! )
//! })
//! .unwrap();
//! let _ = conn.query("SELECT version()").map(|_: Vec<String>| ()).map_err(|err| {
//! println!("execute query error in line:{} ! error: {:?}", line!(), err)
//! });
//! });
//! tasks.push(th);
//! }
//!
//! for th in tasks {
//! let _ = th.join();
//! }
//! } | //! ```
//!
#![doc(html_root_url = "http://outersky.github.io/r2d2-mysql/doc/v0.2.0/r2d2_mysql/")]
#![crate_name = "r2d2_mysql"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
pub extern crate mysql;
pub extern crate r2d2;
pub mod pool;
pub use pool::MysqlConnectionManager;
#[cfg(test)]
mod test {
use mysql::{Opts, OptsBuilder};
use mysql::prelude::Queryable;
use r2d2;
use std::env;
use std::sync::Arc;
use std::thread;
use super::MysqlConnectionManager;
#[test]
fn query_pool() {
let url = env::var("DATABASE_URL").unwrap();
let opts = Opts::from_url(&url).unwrap();
let builder = OptsBuilder::from_opts(opts);
let manager = MysqlConnectionManager::new(builder);
let pool = Arc::new(r2d2::Pool::builder().max_size(4).build(manager).unwrap());
let mut tasks = vec![];
for _ in 0..3 {
let pool = pool.clone();
let th = thread::spawn(move || {
let mut conn = pool.get()
.map_err(|err| {
println!(
"get connection from pool error in line:{} ! error: {:?}",
line!(),
err
)
})
.unwrap();
let _ = conn.query("SELECT version()").map(|_: Vec<String>| ()).map_err(|err| {
println!("execute query error in line:{} ! error: {:?}", line!(), err)
});
});
tasks.push(th);
}
for th in tasks {
let _ = th.join();
}
}
} | random_line_split | |
fi.py | # -*- coding: utf-8 -*-
# $Id: fi.py 7119 2011-09-02 13:00:23Z milde $
# Author: Asko Soukka <asko.soukka@iki.fi>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Finnish-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
'huomio': 'attention',
'varo': 'caution',
'code (translation required)': 'code',
'vaara': 'danger',
'virhe': 'error',
'vihje': 'hint',
't\u00e4rke\u00e4\u00e4': 'important',
'huomautus': 'note',
'neuvo': 'tip',
'varoitus': 'warning',
'kehotus': 'admonition',
'sivupalkki': 'sidebar',
'aihe': 'topic',
'rivi': 'line-block',
'tasalevyinen': 'parsed-literal',
'ohje': 'rubric',
'epigraafi': 'epigraph',
'kohokohdat': 'highlights',
'lainaus': 'pull-quote',
'taulukko': 'table',
'csv-taulukko': 'csv-table',
'list-table (translation required)': 'list-table',
'compound (translation required)': 'compound',
'container (translation required)': 'container',
#u'kysymykset': u'questions',
'meta': 'meta',
'math (translation required)': 'math',
#u'kuvakartta': u'imagemap',
'kuva': 'image',
'kaavio': 'figure',
'sis\u00e4llyt\u00e4': 'include',
'raaka': 'raw',
'korvaa': 'replace',
'unicode': 'unicode',
'p\u00e4iv\u00e4ys': 'date',
'luokka': 'class',
'rooli': 'role',
'default-role (translation required)': 'default-role',
'title (translation required)': 'title',
'sis\u00e4llys': 'contents',
'kappale': 'sectnum',
'header (translation required)': 'header',
'footer (translation required)': 'footer',
#u'alaviitteet': u'footnotes',
#u'viitaukset': u'citations',
'target-notes (translation required)': 'target-notes'}
"""Finnish name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
'lyhennys': 'abbreviation',
'akronyymi': 'acronym', | 'alaindeksi': 'subscript',
'indeksi': 'subscript',
'yl\u00e4indeksi': 'superscript',
'title-reference (translation required)': 'title-reference',
'title (translation required)': 'title-reference',
'pep-reference (translation required)': 'pep-reference',
'rfc-reference (translation required)': 'rfc-reference',
'korostus': 'emphasis',
'vahvistus': 'strong',
'tasalevyinen': 'literal',
'math (translation required)': 'math',
'named-reference (translation required)': 'named-reference',
'anonymous-reference (translation required)': 'anonymous-reference',
'footnote-reference (translation required)': 'footnote-reference',
'citation-reference (translation required)': 'citation-reference',
'substitution-reference (translation required)': 'substitution-reference',
'kohde': 'target',
'uri-reference (translation required)': 'uri-reference',
'raw (translation required)': 'raw',}
"""Mapping of Finnish role names to canonical role names for interpreted text.
""" | 'kirjainsana': 'acronym',
'code (translation required)': 'code',
'hakemisto': 'index',
'luettelo': 'index', | random_line_split |
build.rs | #[cfg(feature = "with-syntex")]
mod inner {
extern crate syntex;
extern crate syntex_syntax as syntax;
use std::env;
use std::path::Path;
use self::syntax::codemap::Span;
use self::syntax::ext::base::{self, ExtCtxt};
use self::syntax::tokenstream::TokenTree;
pub fn main() {
let out_dir = env::var_os("OUT_DIR").unwrap();
let mut registry = syntex::Registry::new(); | ($macro_name: ident, $name: ident) => {
fn $name<'cx>(
cx: &'cx mut ExtCtxt,
sp: Span,
tts: &[TokenTree],
) -> Box<base::MacResult + 'cx> {
syntax::ext::quote::$name(cx, sp, tts)
}
registry.add_macro(stringify!($macro_name), $name);
}
}
register_quote_macro!(quote_ty, expand_quote_ty);
register_quote_macro!(quote_item, expand_quote_item);
register_quote_macro!(quote_tokens, expand_quote_tokens);
register_quote_macro!(quote_expr, expand_quote_expr);
let src = Path::new("src/lib.in.rs");
let dst = Path::new(&out_dir).join("lib.rs");
registry.expand("", &src, &dst).unwrap();
}
}
#[cfg(not(feature = "with-syntex"))]
mod inner {
pub fn main() {}
}
fn main() {
inner::main();
} |
macro_rules! register_quote_macro { | random_line_split |
build.rs | #[cfg(feature = "with-syntex")]
mod inner {
extern crate syntex;
extern crate syntex_syntax as syntax;
use std::env;
use std::path::Path;
use self::syntax::codemap::Span;
use self::syntax::ext::base::{self, ExtCtxt};
use self::syntax::tokenstream::TokenTree;
pub fn main() {
let out_dir = env::var_os("OUT_DIR").unwrap();
let mut registry = syntex::Registry::new();
macro_rules! register_quote_macro {
($macro_name: ident, $name: ident) => {
fn $name<'cx>(
cx: &'cx mut ExtCtxt,
sp: Span,
tts: &[TokenTree],
) -> Box<base::MacResult + 'cx> {
syntax::ext::quote::$name(cx, sp, tts)
}
registry.add_macro(stringify!($macro_name), $name);
}
}
register_quote_macro!(quote_ty, expand_quote_ty);
register_quote_macro!(quote_item, expand_quote_item);
register_quote_macro!(quote_tokens, expand_quote_tokens);
register_quote_macro!(quote_expr, expand_quote_expr);
let src = Path::new("src/lib.in.rs");
let dst = Path::new(&out_dir).join("lib.rs");
registry.expand("", &src, &dst).unwrap();
}
}
#[cfg(not(feature = "with-syntex"))]
mod inner {
pub fn main() |
}
fn main() {
inner::main();
}
| {} | identifier_body |
build.rs | #[cfg(feature = "with-syntex")]
mod inner {
extern crate syntex;
extern crate syntex_syntax as syntax;
use std::env;
use std::path::Path;
use self::syntax::codemap::Span;
use self::syntax::ext::base::{self, ExtCtxt};
use self::syntax::tokenstream::TokenTree;
pub fn | () {
let out_dir = env::var_os("OUT_DIR").unwrap();
let mut registry = syntex::Registry::new();
macro_rules! register_quote_macro {
($macro_name: ident, $name: ident) => {
fn $name<'cx>(
cx: &'cx mut ExtCtxt,
sp: Span,
tts: &[TokenTree],
) -> Box<base::MacResult + 'cx> {
syntax::ext::quote::$name(cx, sp, tts)
}
registry.add_macro(stringify!($macro_name), $name);
}
}
register_quote_macro!(quote_ty, expand_quote_ty);
register_quote_macro!(quote_item, expand_quote_item);
register_quote_macro!(quote_tokens, expand_quote_tokens);
register_quote_macro!(quote_expr, expand_quote_expr);
let src = Path::new("src/lib.in.rs");
let dst = Path::new(&out_dir).join("lib.rs");
registry.expand("", &src, &dst).unwrap();
}
}
#[cfg(not(feature = "with-syntex"))]
mod inner {
pub fn main() {}
}
fn main() {
inner::main();
}
| main | identifier_name |
AddForm.tsx | import { randomUUID } from 'node:crypto';
import { Fragment, useState, type ReactElement, type Dispatch as D, type SetStateAction as S, type MouseEvent } from 'react';
import { useDispatch } from 'react-redux';
import type { Dispatch } from '@reduxjs/toolkit';
import { Button, Form, Modal, Input } from 'antd';
import type { FormInstance } from 'antd/es/form';
import type { Store } from 'antd/es/form/interface';
import style from './addForm.sass';
import { IDBSaveBilibiliLiveList } from '../reducers/live';
/* 添加一个直播间 */
function AddForm(props: {}): ReactElement {
const dispatch: Dispatch = useDispatch();
const [form]: [FormInstance] = Form.useForm(); | let formValue: Store;
try {
formValue = await form.validateFields();
} catch (err) {
return console.error(err);
}
dispatch(IDBSaveBilibiliLiveList({
data: {
...formValue,
id: randomUUID()
}
}));
setVisible(false);
}
// 关闭窗口后重置表单
function handleAddModalClose(): void {
form.resetFields();
}
// 打开弹出层
function handleOpenAddModalClick(event: MouseEvent<HTMLButtonElement>): void {
setVisible(true);
}
// 关闭弹出层
function handleCloseAddModalClick(event: MouseEvent<HTMLButtonElement>): void {
setVisible(false);
}
return (
<Fragment>
<Button type="primary" onClick={ handleOpenAddModalClick }>添加直播间信息</Button>
<Modal title="添加B站直播间信息"
visible={ visible }
width={ 500 }
afterClose={ handleAddModalClose }
onOk={ handleAddRoomIdClick }
onCancel={ handleCloseAddModalClick }
>
<Form className="h-[120px]" form={ form } labelCol={{ span: 5 }} wrapperCol={{ span: 19 }}>
<Form.Item name="description"
label="直播间说明"
rules={ [{ required: true, message: '请填写直播间说明', whitespace: true }] }
>
<Input />
</Form.Item>
<Form.Item name="roomId"
label="直播间ID"
rules={ [{ required: true, message: '请填写直播间ID', whitespace: true }] }
>
<Input />
</Form.Item>
<p className={ style.tips }>直播间ID支持配置短ID。</p>
</Form>
</Modal>
</Fragment>
);
}
export default AddForm; | const [visible, setVisible]: [boolean, D<S<boolean>>] = useState(false);
// 添加一个直播间
async function handleAddRoomIdClick(event: MouseEvent<HTMLButtonElement>): Promise<void> { | random_line_split |
AddForm.tsx | import { randomUUID } from 'node:crypto';
import { Fragment, useState, type ReactElement, type Dispatch as D, type SetStateAction as S, type MouseEvent } from 'react';
import { useDispatch } from 'react-redux';
import type { Dispatch } from '@reduxjs/toolkit';
import { Button, Form, Modal, Input } from 'antd';
import type { FormInstance } from 'antd/es/form';
import type { Store } from 'antd/es/form/interface';
import style from './addForm.sass';
import { IDBSaveBilibiliLiveList } from '../reducers/live';
/* 添加一个直播间 */
function AddForm(props: {}): ReactElement {
const dispatch: Dispatch = useDispatch();
const [form]: [FormInstance] = Form.useForm();
const [visible, setVisible]: [boolean, D<S<boolean>>] = useState(false);
// 添加一个直播间
async function handleAddRoomIdClick(event: MouseEvent<HTMLButtonElement>): Promise<void> {
let formValue: Store;
try {
formValue = await form.validateFields();
} catch (err) {
return console.error(err);
}
dispatch(IDBSaveBilibiliLiveList({
data: {
...formValue,
id: randomUUID()
}
}));
setVisible(false);
}
// 关闭窗口后重置表单
function handleAddModalClose(): void {
form.resetFields();
}
// 打开弹出层
function handleOpenAddModalClick(event: MouseEvent<HTMLButtonElement>): void {
setVisible(true);
}
// 关闭弹出层
function handleCloseAddModalClick(event: MouseEvent<HTMLButtonElement>): vo | e);
}
return (
<Fragment>
<Button type="primary" onClick={ handleOpenAddModalClick }>添加直播间信息</Button>
<Modal title="添加B站直播间信息"
visible={ visible }
width={ 500 }
afterClose={ handleAddModalClose }
onOk={ handleAddRoomIdClick }
onCancel={ handleCloseAddModalClick }
>
<Form className="h-[120px]" form={ form } labelCol={{ span: 5 }} wrapperCol={{ span: 19 }}>
<Form.Item name="description"
label="直播间说明"
rules={ [{ required: true, message: '请填写直播间说明', whitespace: true }] }
>
<Input />
</Form.Item>
<Form.Item name="roomId"
label="直播间ID"
rules={ [{ required: true, message: '请填写直播间ID', whitespace: true }] }
>
<Input />
</Form.Item>
<p className={ style.tips }>直播间ID支持配置短ID。</p>
</Form>
</Modal>
</Fragment>
);
}
export default AddForm; | id {
setVisible(fals | identifier_name |
AddForm.tsx | import { randomUUID } from 'node:crypto';
import { Fragment, useState, type ReactElement, type Dispatch as D, type SetStateAction as S, type MouseEvent } from 'react';
import { useDispatch } from 'react-redux';
import type { Dispatch } from '@reduxjs/toolkit';
import { Button, Form, Modal, Input } from 'antd';
import type { FormInstance } from 'antd/es/form';
import type { Store } from 'antd/es/form/interface';
import style from './addForm.sass';
import { IDBSaveBilibiliLiveList } from '../reducers/live';
/* 添加一个直播间 */
function AddForm(props: {}): ReactElement {
const dispatch: Dispatch = useDispatch();
const [form]: [FormInstance] = Form.useForm();
const [visible, setVisible]: [boolean, D<S<boolean>>] = useState(false);
// 添加一个直播间
async function handleAddRoomIdClick(event: MouseEvent<HTMLButtonElement>): Promise<void> {
let formValue: Store;
try {
formValue = await form.validateFields();
} catch (err) {
return console.error(err);
}
dispatch(IDBSaveBilibiliLiveList({
data: {
...formValue,
id: randomUUID()
}
}));
setVisible(false);
}
// 关闭窗口后重置表单
function handleAddModalClose(): void {
form.resetFields();
}
// 打开弹出层
fu | k(event: MouseEvent<HTMLButtonElement>): void {
setVisible(true);
}
// 关闭弹出层
function handleCloseAddModalClick(event: MouseEvent<HTMLButtonElement>): void {
setVisible(false);
}
return (
<Fragment>
<Button type="primary" onClick={ handleOpenAddModalClick }>添加直播间信息</Button>
<Modal title="添加B站直播间信息"
visible={ visible }
width={ 500 }
afterClose={ handleAddModalClose }
onOk={ handleAddRoomIdClick }
onCancel={ handleCloseAddModalClick }
>
<Form className="h-[120px]" form={ form } labelCol={{ span: 5 }} wrapperCol={{ span: 19 }}>
<Form.Item name="description"
label="直播间说明"
rules={ [{ required: true, message: '请填写直播间说明', whitespace: true }] }
>
<Input />
</Form.Item>
<Form.Item name="roomId"
label="直播间ID"
rules={ [{ required: true, message: '请填写直播间ID', whitespace: true }] }
>
<Input />
</Form.Item>
<p className={ style.tips }>直播间ID支持配置短ID。</p>
</Form>
</Modal>
</Fragment>
);
}
export default AddForm; | nction handleOpenAddModalClic | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.