file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
controlPanel.go | package main
import (
"fmt"
"golang.org/x/exp/shiny/gesture"
"golang.org/x/exp/shiny/iconvg"
"golang.org/x/exp/shiny/materialdesign/icons"
"golang.org/x/exp/shiny/screen"
"golang.org/x/exp/shiny/unit"
"golang.org/x/exp/shiny/widget"
"golang.org/x/exp/shiny/widget/node"
"golang.org/x/exp/shiny/widget/theme"
"golang.org/x/image/colornames"
"golang.org/x/image/math/f64"
"golang.org/x/mobile/event/lifecycle"
"golang.org/x/mobile/event/mouse"
"golang.org/x/mobile/event/paint"
"golang.org/x/mobile/event/size"
"image"
"image/draw"
"log"
"os"
)
type ControlPanel struct {
s screen.Screen
root *widget.Sheet
world *State
w screen.Window
r *RenderState
}
type panelUpdate struct {
}
type Icon struct {
node.LeafEmbed
icon []byte
z iconvg.Rasterizer
}
func NewIcon(icon []byte) *Icon {
w := &Icon{
icon: icon,
}
w.Wrapper = w
return w
}
func (w *Icon) Measure(t *theme.Theme, widthHint, heightHint int) {
px := t.Pixels(unit.Ems(2)).Ceil()
w.MeasuredSize = image.Point{X: px, Y: px}
}
func (w *Icon) PaintBase(ctx *node.PaintBaseContext, origin image.Point) error {
w.Marks.UnmarkNeedsPaintBase()
w.z.SetDstImage(ctx.Dst, w.Rect.Add(origin), draw.Over)
return iconvg.Decode(&w.z, w.icon, nil)
}
type Ticker struct {
node.ShellEmbed
tick func() string
label *widget.Label
}
func (p *ControlPanel) NewTicker(text string, tick func() string) *Ticker {
w := &Ticker{
tick: tick,
}
w.Wrapper = w
flow := widget.NewFlow(widget.AxisHorizontal)
flow.Insert(widget.NewSizer(unit.Ems(0.5), unit.Value{}, nil), nil)
flow.Insert(widget.NewLabel(fmt.Sprintf("%-30s", text)), nil)
flow.Insert(widget.NewSizer(unit.Ems(0.5), unit.Value{}, nil), nil)
w.label = widget.NewLabel(fmt.Sprintf("%30s", ""))
flow.Insert(w.label, nil)
uniform := widget.NewUniform(theme.StaticColor(colornames.Aqua), widget.NewPadder(widget.AxisBoth, unit.Ems(0.5), flow))
padding := widget.NewPadder(widget.AxisBoth, unit.Ems(0.5), uniform)
w.Insert(padding, nil)
go func() {
for {
newString := w.tick()
w.label.Text = fmt.Sprintf("%-30s", newString)
w.label.Mark(node.MarkNeedsPaintBase)
p.w.Send(update{})
}
}()
return w
}
type Button struct {
node.ShellEmbed
icon []byte
onClick func()
z iconvg.Rasterizer
uniform *widget.Uniform
label *widget.Label
pressed bool
}
func (p *ControlPanel) NewButton(text string, icon []byte, toggle bool, onClick func() string) *Button {
w := &Button{
icon: icon,
}
fn := func() {
w.pressed = !w.pressed
w.label.Text = fmt.Sprintf("%-30s", onClick())
w.label.Mark(node.MarkNeedsPaintBase)
if w.pressed || !toggle {
w.uniform.ThemeColor = theme.StaticColor(colornames.Lightgreen)
} else {
w.uniform.ThemeColor = theme.StaticColor(colornames.Lightsalmon)
}
w.uniform.Mark(node.MarkNeedsPaintBase)
p.w.Send(panelUpdate{})
}
w.onClick = fn
w.Wrapper = w
flow := widget.NewFlow(widget.AxisHorizontal)
flow.Insert(widget.NewSizer(unit.Ems(0.5), unit.Value{}, nil), nil)
w.label = widget.NewLabel(fmt.Sprintf("%-30s", text))
flow.Insert(w.label, nil)
flow.Insert(widget.NewSizer(unit.Ems(0.5), unit.Value{}, nil), nil)
flow.Insert(NewIcon(icon), nil)
w.uniform = widget.NewUniform(theme.StaticColor(colornames.Lightsalmon), flow)
padding := widget.NewPadder(widget.AxisBoth, unit.Ems(0.5), w.uniform)
w.Insert(padding, nil)
return w
}
func (w *Button) OnInputEvent(e interface{}, origin image.Point) node.EventHandled {
switch e := e.(type) {
case gesture.Event:
if e.Type != gesture.TypeTap {
break
}
if w.onClick != nil {
w.uniform.ThemeColor = theme.StaticColor(colornames.Orange)
w.uniform.Mark(node.MarkNeedsPaintBase)
go w.onClick()
}
return node.Handled
}
return node.NotHandled
}
func (p *ControlPanel) start(r *RenderState) {
p.world = r.world
p.s = r.s
p.r = r
controls := widget.NewFlow(widget.AxisVertical)
tickers := widget.NewFlow(widget.AxisVertical)
p.root = widget.NewSheet(
widget.NewUniform(theme.StaticColor(colornames.White),
widget.NewPadder(widget.AxisBoth, unit.Ems(1), widget.NewFlow(widget.AxisHorizontal, controls, widget.NewSizer(unit.Ems(1), unit.Value{}, nil), tickers))))
controls.Insert(p.NewGenrateFlowFieldsButton(), nil)
controls.Insert(p.NewStartSimulationButton(), nil)
controls.Insert(p.NewHighlightActiveButton(), nil)
controls.Insert(p.NewExitButton(), nil)
controls.Insert(p.NewSaveFlowFieldsButton(), nil)
controls.Insert(p.NewLoadFlowFieldsButton(), nil)
controls.Insert(p.NewCloseAllButton(), nil)
tickers.Insert(p.NewTicker("Total People:", func() string { return fmt.Sprintf("%d", <-p.world.peopleCurrentChan) }), nil)
tickers.Insert(p.NewTicker("Total People Added:", func() string { return fmt.Sprintf("%d", <-p.world.peopleAddedChan) }), nil)
tickers.Insert(p.NewTicker("Simulation Time:", func() string { return (<-p.world.simulationTimeChan).String() }), nil)
tickers.Insert(p.NewTicker("Current Active People:", func() string { return fmt.Sprintf("%d", <-p.world.currentSendersChan) }), nil)
tickers.Insert(p.NewNetworkTickers(), nil)
for i := range p.world.scenario.Destinations {
dest := &p.world.scenario.Destinations[i]
button := p.NewButton(fmt.Sprintf("Close %s", dest.Name), icons.NavigationClose, true, func() string {
if dest.isClosed() {
dest.Open()
return fmt.Sprintf("Close %s", dest.Name)
} else {
dest.Close()
return fmt.Sprintf("Reopen %s", dest.Name)
}
})
controls.Insert(button, nil)
}
newtheme := theme.Theme{}
p.root.Measure(&newtheme, -1, -1)
go func() {
//widget.RunWindow(p.s, p.root, nil)
err := p.RunWindow(&widget.RunWindowOptions{
NewWindowOptions: screen.NewWindowOptions{
Title: "Simulation control",
Width: p.root.MeasuredSize.X,
Height: p.root.MeasuredSize.Y,
},
Theme: newtheme})
if err != nil {
log.Fatalln("error: ", err)
}
}()
}
func (p *ControlPanel) NewGenrateFlowFieldsButton() *Button {
pressed := false
return p.NewButton("Generate Flow Fields", icons.MapsMap, false, func() string {
if pressed {
return "Flow Fields Generated"
}
pressed = true
log.Println("Generate Flow Fields")
InitFlowFields()
for _, dest := range p.world.scenario.Destinations {
log.Println("Flow field for", dest.Name, "starting")
err := p.world.GenerateFlowField(dest.ID)
log.Println("Flow field for", dest.Name, "done")
if err != nil {
log.Fatal("cannot make flow field for", dest)
}
}
log.Println("Flow fields done")
return "Flow Fields Generated"
})
}
func (p *ControlPanel) NewLoadFlowFieldsButton() *Button {
pressed := false
return p.NewButton("Load Flow Fields From File", icons.FileFileDownload, false, func() string {
if pressed {
return "Flow Fields Loaded"
}
pressed = true
log.Println("Load Flow Fields")
for _, dest := range p.world.scenario.Destinations {
log.Println("Flow field for", dest.Name, "saving")
err := p.world.LoadFlowField(dest.ID)
if err != nil {
log.Println("error loading flow field", err)
}
}
log.Println("Loading Flow Fields done")
return "Flow Fields Loaded"
})
}
func (p *ControlPanel) NewSaveFlowFieldsButton() *Button {
return p.NewButton("Save Flow Fields", icons.ContentSave, false, func() string {
log.Println("Save Flow Fields")
for _, dest := range p.world.scenario.Destinations {
log.Println("Flow field for", dest.Name, "saving")
err := p.world.SaveFlowField(dest.ID)
if err != nil {
log.Println("error saving flow field", err)
return "Retry Save Flow Fields"
}
}
log.Println("Saving Flow Fields done")
return "Flow Fields Saved"
})
}
func (p *ControlPanel) NewStartSimulationButton() *Button {
pressed := false
return p.NewButton("Run Simulation", icons.ActionBuild, true, func() string {
if pressed {
pressed = false
log.Println("Pausing Simulation")
p.world.playPauseChan <- true
return "Play Simulation"
}
pressed = true
log.Println("Starting Simulation")
p.world.playPauseChan <- true
return "Pause Simulation"
})
}
func (p *ControlPanel) | () *Button {
return p.NewButton("Highlight Active AI", icons.ActionFavorite, true, func() string {
if p.world.highlightActive {
p.world.highlightActive = false
} else {
p.world.highlightActive = true
}
p.r.w.Send(UpdateEvent{p.world})
return "Highlight Active AI"
})
}
func (p *ControlPanel) NewExitButton() *Button {
clicks := 3
return p.NewButton(fmt.Sprintf("Exit - click %d time(s)", clicks), icons.ActionExitToApp, false, func() string {
clicks--
if clicks < 1 {
os.Exit(0)
}
return fmt.Sprintf("Exit - click %d time(s)", clicks)
})
}
func (p *ControlPanel) NewCloseAllButton() *Button {
open := true
button := p.NewButton("Close all", icons.NavigationClose, true, func() string {
for i := 0; i < len(p.world.scenario.Destinations); i++ {
dest := &p.world.scenario.Destinations[i]
if dest.ID == p.world.scenario.Exit.ID {
continue
}
if open {
dest.Close()
} else {
dest.Open()
}
}
if open {
return "Close all"
} else {
return "Open all"
}
})
return button
}
func (p *ControlPanel) NewNetworkTickers() node.Node {
vf := widget.NewFlow(widget.AxisVertical)
vf.Insert(p.NewTicker("Total updates:", func() string { return fmt.Sprintf("%d", <-p.world.totalSendsChan) }), nil)
queued := 0
queueTicker := p.NewTicker("Queued Updates:", func() string {
i := <-networkStats.queuedUpdates
queued += i
return fmt.Sprintf("%d", queued)
})
vf.Insert(queueTicker, nil)
running := 0
runningTicker := p.NewTicker("Running Updates:", func() string {
i := <-networkStats.runningUpdates
if i {
running++
} else {
running--
}
return fmt.Sprintf("%d", running)
})
vf.Insert(runningTicker, nil)
return vf
}
// Slightly modified from widget.RunWindow
func (p *ControlPanel) RunWindow(opts *widget.RunWindowOptions) error {
var (
nwo *screen.NewWindowOptions
t *theme.Theme
)
if opts != nil {
nwo = &opts.NewWindowOptions
t = &opts.Theme
}
var err error
p.w, err = p.s.NewWindow(nwo)
if err != nil {
return err
}
defer p.w.Release()
paintPending := false
gef := gesture.EventFilter{EventDeque: p.w}
for {
e := p.w.NextEvent()
if e = gef.Filter(e); e == nil {
continue
}
switch e := e.(type) {
case lifecycle.Event:
p.root.OnLifecycleEvent(e)
if e.To == lifecycle.StageDead {
return nil
}
case gesture.Event, mouse.Event:
p.root.OnInputEvent(e, image.Point{})
case paint.Event:
ctx := &node.PaintContext{
Theme: t,
Screen: p.s,
Drawer: p.w,
Src2Dst: f64.Aff3{
1, 0, 0,
0, 1, 0,
},
}
if err := p.root.Paint(ctx, image.Point{}); err != nil {
return err
}
p.w.Publish()
paintPending = false
case size.Event:
if dpi := float64(e.PixelsPerPt) * unit.PointsPerInch; dpi != t.GetDPI() {
newT := new(theme.Theme)
if t != nil {
*newT = *t
}
newT.DPI = dpi
t = newT
}
windowSize := e.Size()
p.root.Measure(t, windowSize.X, windowSize.Y)
p.root.Wrappee().Rect = e.Bounds()
p.root.Layout(t)
// TODO: call Mark(node.MarkNeedsPaint)?
case panelUpdate:
case error:
return e
}
if !paintPending && p.root.Wrappee().Marks.NeedsPaint() {
paintPending = true
p.w.Send(paint.Event{})
}
}
}
| NewHighlightActiveButton | identifier_name |
timer.rs | use std::fmt;
use std::mem;
use std::pin::Pin;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::{Arc, Mutex, Weak};
use std::task::{Context, Poll};
use std::time::Instant;
use std::future::Future;
use super::AtomicWaker;
use super::{global, ArcList, Heap, HeapTimer, Node, Slot};
/// A "timer heap" used to power separately owned instances of `Delay`.
///
/// This timer is implemented as a priority queued-based heap. Each `Timer`
/// contains a few primary methods which which to drive it:
///
/// * `next_wake` indicates how long the ambient system needs to sleep until it
/// invokes further processing on a `Timer`
/// * `advance_to` is what actually fires timers on the `Timer`, and should be
/// called essentially every iteration of the event loop, or when the time
/// specified by `next_wake` has elapsed.
/// * The `Future` implementation for `Timer` is used to process incoming timer
/// updates and requests. This is used to schedule new timeouts, update
/// existing ones, or delete existing timeouts. The `Future` implementation
/// will never resolve, but it'll schedule notifications of when to wake up
/// and process more messages.
///
/// Note that if you're using this crate you probably don't need to use a
/// `Timer` as there is a global one already available for you run on a helper
/// thread. If this isn't desirable, though, then the
/// `TimerHandle::set_fallback` method can be used instead!
pub struct Timer {
inner: Arc<Inner>,
timer_heap: Heap<HeapTimer>,
}
/// A handle to a `Timer` which is used to create instances of a `Delay`.
#[derive(Clone)]
pub struct TimerHandle {
pub(crate) inner: Weak<Inner>,
}
pub(crate) struct Inner {
/// List of updates the `Timer` needs to process
pub(crate) list: ArcList<ScheduledTimer>,
/// The blocked `Timer` task to receive notifications to the `list` above.
pub(crate) waker: AtomicWaker,
}
/// Shared state between the `Timer` and a `Delay`.
pub(crate) struct ScheduledTimer {
pub(crate) waker: AtomicWaker,
// The lowest bit here is whether the timer has fired or not, the second
// lowest bit is whether the timer has been invalidated, and all the other
// bits are the "generation" of the timer which is reset during the `reset`
// function. Only timers for a matching generation are fired.
pub(crate) state: AtomicUsize,
pub(crate) inner: Weak<Inner>,
pub(crate) at: Mutex<Option<Instant>>,
// TODO: this is only accessed by the timer thread, should have a more
// lightweight protection than a `Mutex`
pub(crate) slot: Mutex<Option<Slot>>,
}
impl Timer {
/// Creates a new timer heap ready to create new timers.
pub fn new() -> Timer {
Timer {
inner: Arc::new(Inner {
list: ArcList::new(),
waker: AtomicWaker::new(),
}),
timer_heap: Heap::new(),
}
}
/// Returns a handle to this timer heap, used to create new timeouts.
pub fn handle(&self) -> TimerHandle {
TimerHandle {
inner: Arc::downgrade(&self.inner),
}
}
/// Returns the time at which this timer next needs to be invoked with
/// `advance_to`.
///
/// Event loops or threads typically want to sleep until the specified
/// instant.
pub fn next_event(&self) -> Option<Instant> {
self.timer_heap.peek().map(|t| t.at)
}
/// Proces any timers which are supposed to fire at or before the current
/// instant.
///
/// This method is equivalent to `self.advance_to(Instant::now())`.
pub fn advance(&mut self) {
self.advance_to(Instant::now())
}
/// Proces any timers which are supposed to fire before `now` specified.
///
/// This method should be called on `Timer` periodically to advance the
/// internal state and process any pending timers which need to fire.
pub fn advance_to(&mut self, now: Instant) {
loop {
match self.timer_heap.peek() {
Some(head) if head.at <= now => {}
Some(_) => break,
None => break,
};
// Flag the timer as fired and then notify its task, if any, that's
// blocked.
let heap_timer = self.timer_heap.pop().unwrap();
*heap_timer.node.slot.lock().unwrap() = None;
let bits = heap_timer.gen << 2;
match heap_timer
.node
.state
.compare_exchange(bits, bits | 0b01, SeqCst, SeqCst)
{
Ok(_) => heap_timer.node.waker.wake(),
Err(_b) => {}
}
}
}
/// Either updates the timer at slot `idx` to fire at `at`, or adds a new
/// timer at `idx` and sets it to fire at `at`.
fn update_or_add(&mut self, at: Instant, node: Arc<Node<ScheduledTimer>>) {
// TODO: avoid remove + push and instead just do one sift of the heap?
// In theory we could update it in place and then do the percolation
// as necessary
let gen = node.state.load(SeqCst) >> 2;
let mut slot = node.slot.lock().unwrap();
if let Some(heap_slot) = slot.take() {
self.timer_heap.remove(heap_slot);
}
*slot = Some(self.timer_heap.push(HeapTimer {
at,
gen,
node: node.clone(),
}));
}
fn remove(&mut self, node: Arc<Node<ScheduledTimer>>) {
// If this `idx` is still around and it's still got a registered timer,
// then we jettison it form the timer heap.
let mut slot = node.slot.lock().unwrap();
let heap_slot = match slot.take() {
Some(slot) => slot,
None => return,
};
self.timer_heap.remove(heap_slot);
}
fn invalidate(&mut self, node: Arc<Node<ScheduledTimer>>) {
node.state.fetch_or(0b10, SeqCst);
node.waker.wake();
}
}
impl Future for Timer {
type Output = ();
fn | (mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.inner).waker.register(cx.waker());
let mut list = self.inner.list.take();
while let Some(node) = list.pop() {
let at = *node.at.lock().unwrap();
match at {
Some(at) => self.update_or_add(at, node),
None => self.remove(node),
}
}
Poll::Pending
}
}
impl Drop for Timer {
fn drop(&mut self) {
// Seal off our list to prevent any more updates from getting pushed on.
// Any timer which sees an error from the push will immediately become
// inert.
let mut list = self.inner.list.take_and_seal();
// Now that we'll never receive another timer, drain the list of all
// updates and also drain our heap of all active timers, invalidating
// everything.
while let Some(t) = list.pop() {
self.invalidate(t);
}
while let Some(t) = self.timer_heap.pop() {
self.invalidate(t.node);
}
}
}
impl fmt::Debug for Timer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("Timer").field("heap", &"...").finish()
}
}
impl Default for Timer {
fn default() -> Self {
Self::new()
}
}
static HANDLE_FALLBACK: AtomicUsize = AtomicUsize::new(0);
/// Error returned from `TimerHandle::set_fallback`.
#[derive(Clone, Debug)]
struct SetDefaultError(());
impl TimerHandle {
/// Configures this timer handle to be the one returned by
/// `TimerHandle::default`.
///
/// By default a global thread is initialized on the first call to
/// `TimerHandle::default`. This first call can happen transitively through
/// `Delay::new`. If, however, that hasn't happened yet then the global
/// default timer handle can be configured through this method.
///
/// This method can be used to prevent the global helper thread from
/// spawning. If this method is successful then the global helper thread
/// will never get spun up.
///
/// On success this timer handle will have installed itself globally to be
/// used as the return value for `TimerHandle::default` unless otherwise
/// specified.
///
/// # Errors
///
/// If another thread has already called `set_as_global_fallback` or this
/// thread otherwise loses a race to call this method then it will fail
/// returning an error. Once a call to `set_as_global_fallback` is
/// successful then no future calls may succeed.
fn set_as_global_fallback(self) -> Result<(), SetDefaultError> {
unsafe {
let val = self.into_usize();
match HANDLE_FALLBACK.compare_exchange(0, val, SeqCst, SeqCst) {
Ok(_) => Ok(()),
Err(_) => {
drop(TimerHandle::from_usize(val));
Err(SetDefaultError(()))
}
}
}
}
fn into_usize(self) -> usize {
unsafe { mem::transmute::<Weak<Inner>, usize>(self.inner) }
}
unsafe fn from_usize(val: usize) -> TimerHandle {
let inner = mem::transmute::<usize, Weak<Inner>>(val);
TimerHandle { inner }
}
}
impl Default for TimerHandle {
fn default() -> TimerHandle {
let mut fallback = HANDLE_FALLBACK.load(SeqCst);
// If the fallback hasn't been previously initialized then let's spin
// up a helper thread and try to initialize with that. If we can't
// actually create a helper thread then we'll just return a "defunkt"
// handle which will return errors when timer objects are attempted to
// be associated.
if fallback == 0 {
let helper = match global::HelperThread::new() {
Ok(helper) => helper,
Err(_) => return TimerHandle { inner: Weak::new() },
};
// If we successfully set ourselves as the actual fallback then we
// want to `forget` the helper thread to ensure that it persists
// globally. If we fail to set ourselves as the fallback that means
// that someone was racing with this call to
// `TimerHandle::default`. They ended up winning so we'll destroy
// our helper thread (which shuts down the thread) and reload the
// fallback.
if helper.handle().set_as_global_fallback().is_ok() {
let ret = helper.handle();
helper.forget();
return ret;
}
fallback = HANDLE_FALLBACK.load(SeqCst);
}
// At this point our fallback handle global was configured so we use
// its value to reify a handle, clone it, and then forget our reified
// handle as we don't actually have an owning reference to it.
assert!(fallback != 0);
unsafe {
let handle = TimerHandle::from_usize(fallback);
let ret = handle.clone();
let _ = handle.into_usize();
ret
}
}
}
impl fmt::Debug for TimerHandle {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("TimerHandle")
.field("inner", &"...")
.finish()
}
}
| poll | identifier_name |
timer.rs | use std::fmt;
use std::mem;
use std::pin::Pin;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::{Arc, Mutex, Weak};
use std::task::{Context, Poll};
use std::time::Instant;
use std::future::Future;
use super::AtomicWaker;
use super::{global, ArcList, Heap, HeapTimer, Node, Slot};
/// A "timer heap" used to power separately owned instances of `Delay`.
///
/// This timer is implemented as a priority queued-based heap. Each `Timer`
/// contains a few primary methods which which to drive it:
///
/// * `next_wake` indicates how long the ambient system needs to sleep until it
/// invokes further processing on a `Timer`
/// * `advance_to` is what actually fires timers on the `Timer`, and should be
/// called essentially every iteration of the event loop, or when the time
/// specified by `next_wake` has elapsed.
/// * The `Future` implementation for `Timer` is used to process incoming timer
/// updates and requests. This is used to schedule new timeouts, update
/// existing ones, or delete existing timeouts. The `Future` implementation
/// will never resolve, but it'll schedule notifications of when to wake up
/// and process more messages.
///
/// Note that if you're using this crate you probably don't need to use a
/// `Timer` as there is a global one already available for you run on a helper
/// thread. If this isn't desirable, though, then the
/// `TimerHandle::set_fallback` method can be used instead!
pub struct Timer {
inner: Arc<Inner>,
timer_heap: Heap<HeapTimer>,
}
/// A handle to a `Timer` which is used to create instances of a `Delay`.
#[derive(Clone)]
pub struct TimerHandle {
pub(crate) inner: Weak<Inner>,
}
pub(crate) struct Inner {
/// List of updates the `Timer` needs to process
pub(crate) list: ArcList<ScheduledTimer>,
/// The blocked `Timer` task to receive notifications to the `list` above.
pub(crate) waker: AtomicWaker,
}
/// Shared state between the `Timer` and a `Delay`.
pub(crate) struct ScheduledTimer {
pub(crate) waker: AtomicWaker,
// The lowest bit here is whether the timer has fired or not, the second
// lowest bit is whether the timer has been invalidated, and all the other
// bits are the "generation" of the timer which is reset during the `reset`
// function. Only timers for a matching generation are fired.
pub(crate) state: AtomicUsize,
pub(crate) inner: Weak<Inner>,
pub(crate) at: Mutex<Option<Instant>>,
// TODO: this is only accessed by the timer thread, should have a more
// lightweight protection than a `Mutex`
pub(crate) slot: Mutex<Option<Slot>>,
}
impl Timer {
/// Creates a new timer heap ready to create new timers.
pub fn new() -> Timer {
Timer {
inner: Arc::new(Inner {
list: ArcList::new(),
waker: AtomicWaker::new(),
}),
timer_heap: Heap::new(),
}
}
/// Returns a handle to this timer heap, used to create new timeouts.
pub fn handle(&self) -> TimerHandle {
TimerHandle {
inner: Arc::downgrade(&self.inner),
}
}
/// Returns the time at which this timer next needs to be invoked with
/// `advance_to`.
///
/// Event loops or threads typically want to sleep until the specified
/// instant.
pub fn next_event(&self) -> Option<Instant> {
self.timer_heap.peek().map(|t| t.at)
}
/// Proces any timers which are supposed to fire at or before the current
/// instant.
///
/// This method is equivalent to `self.advance_to(Instant::now())`.
pub fn advance(&mut self) {
self.advance_to(Instant::now())
}
/// Proces any timers which are supposed to fire before `now` specified.
///
/// This method should be called on `Timer` periodically to advance the
/// internal state and process any pending timers which need to fire.
pub fn advance_to(&mut self, now: Instant) {
loop {
match self.timer_heap.peek() {
Some(head) if head.at <= now => {}
Some(_) => break,
None => break,
};
// Flag the timer as fired and then notify its task, if any, that's
// blocked.
let heap_timer = self.timer_heap.pop().unwrap();
*heap_timer.node.slot.lock().unwrap() = None;
let bits = heap_timer.gen << 2;
match heap_timer
.node
.state
.compare_exchange(bits, bits | 0b01, SeqCst, SeqCst)
{
Ok(_) => heap_timer.node.waker.wake(),
Err(_b) => {}
}
}
}
/// Either updates the timer at slot `idx` to fire at `at`, or adds a new
/// timer at `idx` and sets it to fire at `at`.
fn update_or_add(&mut self, at: Instant, node: Arc<Node<ScheduledTimer>>) {
// TODO: avoid remove + push and instead just do one sift of the heap?
// In theory we could update it in place and then do the percolation
// as necessary
let gen = node.state.load(SeqCst) >> 2;
let mut slot = node.slot.lock().unwrap();
if let Some(heap_slot) = slot.take() {
self.timer_heap.remove(heap_slot);
}
*slot = Some(self.timer_heap.push(HeapTimer {
at,
gen,
node: node.clone(),
}));
}
fn remove(&mut self, node: Arc<Node<ScheduledTimer>>) {
// If this `idx` is still around and it's still got a registered timer,
// then we jettison it form the timer heap.
let mut slot = node.slot.lock().unwrap();
let heap_slot = match slot.take() {
Some(slot) => slot,
None => return,
};
self.timer_heap.remove(heap_slot);
}
fn invalidate(&mut self, node: Arc<Node<ScheduledTimer>>) {
node.state.fetch_or(0b10, SeqCst);
node.waker.wake();
}
}
impl Future for Timer {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.inner).waker.register(cx.waker());
let mut list = self.inner.list.take();
while let Some(node) = list.pop() {
let at = *node.at.lock().unwrap();
match at {
Some(at) => self.update_or_add(at, node),
None => self.remove(node),
}
}
Poll::Pending
}
}
impl Drop for Timer {
fn drop(&mut self) {
// Seal off our list to prevent any more updates from getting pushed on.
// Any timer which sees an error from the push will immediately become
// inert.
let mut list = self.inner.list.take_and_seal();
// Now that we'll never receive another timer, drain the list of all
// updates and also drain our heap of all active timers, invalidating
// everything.
while let Some(t) = list.pop() {
self.invalidate(t);
}
while let Some(t) = self.timer_heap.pop() {
self.invalidate(t.node);
}
}
}
impl fmt::Debug for Timer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("Timer").field("heap", &"...").finish()
}
}
impl Default for Timer {
fn default() -> Self {
Self::new()
}
}
static HANDLE_FALLBACK: AtomicUsize = AtomicUsize::new(0);
/// Error returned from `TimerHandle::set_fallback`.
#[derive(Clone, Debug)]
struct SetDefaultError(());
impl TimerHandle {
/// Configures this timer handle to be the one returned by
/// `TimerHandle::default`.
///
/// By default a global thread is initialized on the first call to
/// `TimerHandle::default`. This first call can happen transitively through
/// `Delay::new`. If, however, that hasn't happened yet then the global
/// default timer handle can be configured through this method.
///
/// This method can be used to prevent the global helper thread from
/// spawning. If this method is successful then the global helper thread
/// will never get spun up.
///
/// On success this timer handle will have installed itself globally to be
/// used as the return value for `TimerHandle::default` unless otherwise
/// specified.
///
/// # Errors
///
/// If another thread has already called `set_as_global_fallback` or this
/// thread otherwise loses a race to call this method then it will fail
/// returning an error. Once a call to `set_as_global_fallback` is
/// successful then no future calls may succeed.
fn set_as_global_fallback(self) -> Result<(), SetDefaultError> {
unsafe {
let val = self.into_usize();
match HANDLE_FALLBACK.compare_exchange(0, val, SeqCst, SeqCst) {
Ok(_) => Ok(()),
Err(_) => {
drop(TimerHandle::from_usize(val));
Err(SetDefaultError(()))
}
}
}
}
fn into_usize(self) -> usize {
unsafe { mem::transmute::<Weak<Inner>, usize>(self.inner) }
}
unsafe fn from_usize(val: usize) -> TimerHandle {
let inner = mem::transmute::<usize, Weak<Inner>>(val);
TimerHandle { inner }
}
}
impl Default for TimerHandle {
fn default() -> TimerHandle {
let mut fallback = HANDLE_FALLBACK.load(SeqCst);
// If the fallback hasn't been previously initialized then let's spin
// up a helper thread and try to initialize with that. If we can't
// actually create a helper thread then we'll just return a "defunkt"
// handle which will return errors when timer objects are attempted to
// be associated.
if fallback == 0 {
let helper = match global::HelperThread::new() {
Ok(helper) => helper,
Err(_) => return TimerHandle { inner: Weak::new() },
};
// If we successfully set ourselves as the actual fallback then we
// want to `forget` the helper thread to ensure that it persists
// globally. If we fail to set ourselves as the fallback that means
// that someone was racing with this call to
// `TimerHandle::default`. They ended up winning so we'll destroy
// our helper thread (which shuts down the thread) and reload the
// fallback.
if helper.handle().set_as_global_fallback().is_ok() {
let ret = helper.handle();
helper.forget();
return ret; | }
fallback = HANDLE_FALLBACK.load(SeqCst);
}
// At this point our fallback handle global was configured so we use
// its value to reify a handle, clone it, and then forget our reified
// handle as we don't actually have an owning reference to it.
assert!(fallback != 0);
unsafe {
let handle = TimerHandle::from_usize(fallback);
let ret = handle.clone();
let _ = handle.into_usize();
ret
}
}
}
impl fmt::Debug for TimerHandle {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("TimerHandle")
.field("inner", &"...")
.finish()
}
} | random_line_split | |
openapi_terraform_provider_doc_generator.go | package openapiterraformdocsgenerator
import (
"errors"
"fmt"
"github.com/dikhan/terraform-provider-openapi/v3/openapi"
"github.com/mitchellh/hashstructure"
"log"
"sort"
)
// TerraformProviderDocGenerator defines the struct that holds the configuration needed to be able to generate the documentation
type TerraformProviderDocGenerator struct {
// ProviderName defines the provider name
ProviderName string
// Hostname the Terraform registry that distributes the provider as documented in https://www.terraform.io/docs/language/providers/requirements.html#source-addresses
// For in-house providers that you intend to distribute from a local filesystem directory, you can use an arbitrary hostname in a domain your organization controls. For example, if your corporate domain were example.com then you might choose
// to use terraform.example.com as your placeholder hostname, even if that hostname doesn't actually resolve in DNS.
Hostname string
// Namespace An organizational namespace within the specified registry to be used for configuration purposes as documented in https://www.terraform.io/docs/language/providers/requirements.html#source-addresses
Namespace string
// PluginVersionConstraint should contain the OpenAPI plugin version constraint eg: "~> 2.1.0". If not populated the renderer
// will default to ">= 2.1.0" OpenAPI provider version
PluginVersionConstraint string
// SpecAnalyser analyses the swagger doc and provides helper methods to retrieve all the end points that can
// be used as terraform resources.
SpecAnalyser openapi.SpecAnalyser
}
// NewTerraformProviderDocGenerator returns a TerraformProviderDocGenerator populated with the provider documentation which
// exposes methods to render the documentation in different formats (only html supported at the moment)
func NewTerraformProviderDocGenerator(providerName, hostname, namespace, openAPIDocURL string) (TerraformProviderDocGenerator, error) {
analyser, err := openapi.CreateSpecAnalyser("v2", openAPIDocURL)
if err != nil {
return TerraformProviderDocGenerator{}, err
}
return TerraformProviderDocGenerator{
ProviderName: providerName,
Hostname: hostname,
Namespace: namespace,
SpecAnalyser: analyser,
}, nil
}
// GenerateDocumentation creates a TerraformProviderDocumentation object populated based on the OpenAPIDocURL documentation
func (t TerraformProviderDocGenerator) GenerateDocumentation() (TerraformProviderDocumentation, error) {
if t.ProviderName == "" {
return TerraformProviderDocumentation{}, errors.New("provider name not provided")
}
if t.Hostname == "" {
return TerraformProviderDocumentation{}, errors.New("hostname not provided, this is required to be able to render the provider installation section containing the required_providers block with the source address configuration in the form of [<HOSTNAME>/]<NAMESPACE>/<TYPE>")
}
if t.Namespace == "" {
return TerraformProviderDocumentation{}, errors.New("namespace not provided, this is required to be able to render the provider installation section containing the required_providers block with the source address configuration in the form of [<HOSTNAME>/]<NAMESPACE>/<TYPE>")
}
if t.PluginVersionConstraint == "" {
log.Println("PluginVersionConstraint not provided, default value in the plugin's terraform required_providers rendered documentation will be version = \">= 2.1.0\"")
}
regions, err := getRegions(t.SpecAnalyser)
if err != nil {
return TerraformProviderDocumentation{}, err
}
globalSecuritySchemes, securityDefinitions, err := getSecurity(t.SpecAnalyser)
if err != nil {
return TerraformProviderDocumentation{}, err
}
headers := t.SpecAnalyser.GetAllHeaderParameters()
configRegions, configProperties := t.getRequiredProviderConfigurationProperties(regions, globalSecuritySchemes, securityDefinitions, headers)
r, err := t.SpecAnalyser.GetTerraformCompliantResources()
if err != nil {
return TerraformProviderDocumentation{}, err
}
resources, err := t.getProviderResources(r)
if err != nil {
return TerraformProviderDocumentation{}, err
}
// ignoring error from getDataSourceInstances bc resource errors will be caught when looping through resources in getProviderResources
dataSourceInstances, _ := t.getDataSourceInstances(r)
compliantDataSources := t.SpecAnalyser.GetTerraformCompliantDataSources()
dataSourceFilters, err := t.getDataSourceFilters(compliantDataSources)
if err != nil {
return TerraformProviderDocumentation{}, err
}
sort.SliceStable(resources, func(i, j int) bool {
return resources[i].Name < resources[j].Name
})
sort.SliceStable(dataSourceInstances, func(i, j int) bool {
return dataSourceInstances[i].Name < dataSourceInstances[j].Name
})
sort.SliceStable(dataSourceFilters, func(i, j int) bool {
return dataSourceFilters[i].Name < dataSourceFilters[j].Name
})
return TerraformProviderDocumentation{
ProviderName: t.ProviderName,
ProviderInstallation: ProviderInstallation{
ProviderName: t.ProviderName,
Namespace: t.Namespace,
Hostname: t.Hostname,
PluginVersionConstraint: t.PluginVersionConstraint,
Example: fmt.Sprintf("$ export PROVIDER_NAME=%s && curl -fsSL https://raw.githubusercontent.com/dikhan/terraform-provider-openapi/master/scripts/install.sh | bash -s -- --provider-name $PROVIDER_NAME<br>"+
"[INFO] Downloading https://github.com/dikhan/terraform-provider-openapi/v3/releases/download/v3.0.0/terraform-provider-openapi_3.0.0_darwin_amd64.tar.gz in temporally folder /var/folders/n_/1lrwb99s7f50xmn9jpmfnddh0000gp/T/tmp.Xv1AkIZh...<br>"+
"[INFO] Extracting terraform-provider-openapi from terraform-provider-openapi_0.29.4_darwin_amd64.tar.gz...<br>"+
"[INFO] Cleaning up tmp dir created for installation purposes: /var/folders/n_/1lrwb99s7f50xmn9jpmfnddh0000gp/T/tmp.Xv1AkIZh<br>"+
"[INFO] Terraform provider 'terraform-provider-%s' successfully installed at: '~/.terraform.d/plugins'!", t.ProviderName, t.ProviderName),
Other: "You can then start running the Terraform provider:",
OtherCommand: fmt.Sprintf(`$ export OTF_VAR_%s_PLUGIN_CONFIGURATION_FILE="https://api.service.com/openapi.yaml"<br>`, t.ProviderName),
},
ProviderConfiguration: ProviderConfiguration{
ProviderName: t.ProviderName,
Regions: configRegions,
ConfigProperties: configProperties,
},
ProviderResources: ProviderResources{
ProviderName: t.ProviderName,
Resources: resources,
},
DataSources: DataSources{
ProviderName: t.ProviderName,
DataSources: dataSourceFilters,
DataSourceInstances: dataSourceInstances,
},
}, err
}
func getRegions(s openapi.SpecAnalyser) ([]string, error) {
backendConfig, err := s.GetAPIBackendConfiguration()
if err != nil {
return nil, err
}
if backendConfig != nil {
_, _, regions, err := backendConfig.IsMultiRegion()
if err != nil {
return nil, err
}
return regions, nil
}
return nil, nil
}
func getSecurity(s openapi.SpecAnalyser) (openapi.SpecSecuritySchemes, *openapi.SpecSecurityDefinitions, error) {
security := s.GetSecurity()
if security != nil {
globalSecuritySchemes, err := security.GetGlobalSecuritySchemes()
if err != nil {
return nil, nil, err
}
securityDefinitions, err := security.GetAPIKeySecurityDefinitions()
if err != nil {
return nil, nil, err
}
return globalSecuritySchemes, securityDefinitions, nil
}
return nil, nil, nil
}
func (t TerraformProviderDocGenerator) getDataSourceFilters(dataSourcesFilter []openapi.SpecResource) ([]DataSource, error) {
dataSources := []DataSource{}
for _, dataSource := range dataSourcesFilter {
s, err := dataSource.GetResourceSchema()
if err != nil {
return nil, err
}
dataSourceSchemaDefinition := s.ConvertToDataSourceSpecSchemaDefinition()
props := []Property{}
for _, p := range dataSourceSchemaDefinition.Properties {
prop := t.resourceSchemaToProperty(*p)
props = append(props, prop)
}
dataSources = append(dataSources, DataSource{
Name: dataSource.GetResourceName(),
Properties: orderProps(props),
})
}
return dataSources, nil
}
func (t TerraformProviderDocGenerator) getDataSourceInstances(dataSourceInstances []openapi.SpecResource) ([]DataSource, error) {
dataSourcesInstance := []DataSource{}
for _, dataSource := range dataSourceInstances {
s, err := dataSource.GetResourceSchema()
if err != nil {
return nil, err
}
dataSourceSchemaDefinition := s.ConvertToDataSourceSpecSchemaDefinition()
props := []Property{}
for _, p := range dataSourceSchemaDefinition.Properties {
prop := t.resourceSchemaToProperty(*p)
props = append(props, prop)
}
dataSourcesInstance = append(dataSourcesInstance, DataSource{
Name: fmt.Sprintf("%s_instance", dataSource.GetResourceName()),
Properties: orderProps(props),
})
}
return dataSourcesInstance, nil
}
func (t TerraformProviderDocGenerator) getProviderResources(resources []openapi.SpecResource) ([]Resource, error) {
r := []Resource{}
for _, resource := range resources {
if resource.ShouldIgnoreResource() {
continue
}
resourceSchema, err := resource.GetResourceSchema()
if err != nil {
return nil, err
}
props := []Property{}
requiredProps := []Property{}
optionalProps := []Property{}
for _, p := range resourceSchema.Properties {
prop := t.resourceSchemaToProperty(*p)
if prop.Required {
requiredProps = append(requiredProps, prop)
}
if !prop.Required {
optionalProps = append(optionalProps, prop)
}
}
props = append(props, orderProps(requiredProps)...)
props = append(props, orderProps(optionalProps)...)
parentInfo := resource.GetParentResourceInfo()
var parentProperties []string
if parentInfo != nil {
parentProperties = parentInfo.GetParentPropertiesNames()
}
r = append(r, Resource{
Name: resource.GetResourceName(),
Description: "",
Properties: props,
ParentProperties: parentProperties,
ArgumentsReference: ArgumentsReference{
Notes: []string{},
},
})
}
return r, nil
}
func (t TerraformProviderDocGenerator) resourceSchemaToProperty(specSchemaDefinitionProperty openapi.SpecSchemaDefinitionProperty) Property {
var schema []Property
if specSchemaDefinitionProperty.Type == openapi.TypeObject || specSchemaDefinitionProperty.ArrayItemsType == openapi.TypeObject {
if specSchemaDefinitionProperty.SpecSchemaDefinition != nil {
for _, p := range specSchemaDefinitionProperty.SpecSchemaDefinition.Properties |
}
}
return Property{
Name: specSchemaDefinitionProperty.GetTerraformCompliantPropertyName(),
Type: string(specSchemaDefinitionProperty.Type),
ArrayItemsType: string(specSchemaDefinitionProperty.ArrayItemsType),
Required: specSchemaDefinitionProperty.IsRequired(),
Computed: specSchemaDefinitionProperty.Computed,
IsOptionalComputed: specSchemaDefinitionProperty.IsOptionalComputed() || specSchemaDefinitionProperty.IsOptionalComputedWithDefault(),
IsSensitive: specSchemaDefinitionProperty.Sensitive,
IsParent: specSchemaDefinitionProperty.IsParentProperty,
Description: specSchemaDefinitionProperty.Description,
Default: specSchemaDefinitionProperty.Default,
Schema: orderProps(schema),
}
}
func (t TerraformProviderDocGenerator) getRequiredProviderConfigurationProperties(regions []string, globalSecuritySchemes openapi.SpecSecuritySchemes, securityDefinitions *openapi.SpecSecurityDefinitions, headers openapi.SpecHeaderParameters) ([]string, []Property) {
var configProps []Property
if securityDefinitions != nil {
for _, securityDefinition := range *securityDefinitions {
secDefName := securityDefinition.GetTerraformConfigurationName()
configProps = append(configProps, Property{
Name: secDefName,
Type: "string",
Required: false,
Description: "",
})
}
}
// Mark as required the properties that are set in the security schemes (they are mandatory)
if globalSecuritySchemes != nil {
for _, securityScheme := range globalSecuritySchemes {
for idx, configProp := range configProps {
if configProp.Name == securityScheme.GetTerraformConfigurationName() {
configProps[idx].Required = true
break
}
}
}
}
if headers != nil {
for _, header := range headers {
configProps = append(configProps, Property{
Name: header.GetHeaderTerraformConfigurationName(),
Type: "string",
Required: header.IsRequired,
Description: "",
})
}
}
return regions, configProps
}
func orderProps(props []Property) []Property {
sort.Slice(props, func(i, j int) bool {
hash1, _ := hashstructure.Hash(props[i], nil)
hash2, _ := hashstructure.Hash(props[j], nil)
return hash1 > hash2
})
return props
}
| {
schema = append(schema, t.resourceSchemaToProperty(*p))
} | conditional_block |
openapi_terraform_provider_doc_generator.go | package openapiterraformdocsgenerator
import (
"errors"
"fmt"
"github.com/dikhan/terraform-provider-openapi/v3/openapi"
"github.com/mitchellh/hashstructure"
"log"
"sort"
)
// TerraformProviderDocGenerator defines the struct that holds the configuration needed to be able to generate the documentation
type TerraformProviderDocGenerator struct {
// ProviderName defines the provider name
ProviderName string
// Hostname the Terraform registry that distributes the provider as documented in https://www.terraform.io/docs/language/providers/requirements.html#source-addresses
// For in-house providers that you intend to distribute from a local filesystem directory, you can use an arbitrary hostname in a domain your organization controls. For example, if your corporate domain were example.com then you might choose
// to use terraform.example.com as your placeholder hostname, even if that hostname doesn't actually resolve in DNS.
Hostname string
// Namespace An organizational namespace within the specified registry to be used for configuration purposes as documented in https://www.terraform.io/docs/language/providers/requirements.html#source-addresses
Namespace string
// PluginVersionConstraint should contain the OpenAPI plugin version constraint eg: "~> 2.1.0". If not populated the renderer
// will default to ">= 2.1.0" OpenAPI provider version
PluginVersionConstraint string
// SpecAnalyser analyses the swagger doc and provides helper methods to retrieve all the end points that can
// be used as terraform resources.
SpecAnalyser openapi.SpecAnalyser
}
// NewTerraformProviderDocGenerator returns a TerraformProviderDocGenerator populated with the provider documentation which
// exposes methods to render the documentation in different formats (only html supported at the moment)
func NewTerraformProviderDocGenerator(providerName, hostname, namespace, openAPIDocURL string) (TerraformProviderDocGenerator, error) {
analyser, err := openapi.CreateSpecAnalyser("v2", openAPIDocURL)
if err != nil {
return TerraformProviderDocGenerator{}, err
}
return TerraformProviderDocGenerator{
ProviderName: providerName,
Hostname: hostname,
Namespace: namespace,
SpecAnalyser: analyser,
}, nil
}
// GenerateDocumentation creates a TerraformProviderDocumentation object populated based on the OpenAPIDocURL documentation
func (t TerraformProviderDocGenerator) GenerateDocumentation() (TerraformProviderDocumentation, error) {
if t.ProviderName == "" {
return TerraformProviderDocumentation{}, errors.New("provider name not provided")
}
if t.Hostname == "" {
return TerraformProviderDocumentation{}, errors.New("hostname not provided, this is required to be able to render the provider installation section containing the required_providers block with the source address configuration in the form of [<HOSTNAME>/]<NAMESPACE>/<TYPE>")
}
if t.Namespace == "" {
return TerraformProviderDocumentation{}, errors.New("namespace not provided, this is required to be able to render the provider installation section containing the required_providers block with the source address configuration in the form of [<HOSTNAME>/]<NAMESPACE>/<TYPE>")
}
if t.PluginVersionConstraint == "" {
log.Println("PluginVersionConstraint not provided, default value in the plugin's terraform required_providers rendered documentation will be version = \">= 2.1.0\"")
}
regions, err := getRegions(t.SpecAnalyser)
if err != nil {
return TerraformProviderDocumentation{}, err
}
globalSecuritySchemes, securityDefinitions, err := getSecurity(t.SpecAnalyser)
if err != nil {
return TerraformProviderDocumentation{}, err
}
headers := t.SpecAnalyser.GetAllHeaderParameters()
configRegions, configProperties := t.getRequiredProviderConfigurationProperties(regions, globalSecuritySchemes, securityDefinitions, headers)
r, err := t.SpecAnalyser.GetTerraformCompliantResources()
if err != nil {
return TerraformProviderDocumentation{}, err
}
resources, err := t.getProviderResources(r)
if err != nil {
return TerraformProviderDocumentation{}, err
}
// ignoring error from getDataSourceInstances bc resource errors will be caught when looping through resources in getProviderResources
dataSourceInstances, _ := t.getDataSourceInstances(r)
compliantDataSources := t.SpecAnalyser.GetTerraformCompliantDataSources()
dataSourceFilters, err := t.getDataSourceFilters(compliantDataSources)
if err != nil {
return TerraformProviderDocumentation{}, err
}
sort.SliceStable(resources, func(i, j int) bool {
return resources[i].Name < resources[j].Name
})
sort.SliceStable(dataSourceInstances, func(i, j int) bool {
return dataSourceInstances[i].Name < dataSourceInstances[j].Name
})
sort.SliceStable(dataSourceFilters, func(i, j int) bool {
return dataSourceFilters[i].Name < dataSourceFilters[j].Name
})
return TerraformProviderDocumentation{
ProviderName: t.ProviderName,
ProviderInstallation: ProviderInstallation{
ProviderName: t.ProviderName,
Namespace: t.Namespace,
Hostname: t.Hostname,
PluginVersionConstraint: t.PluginVersionConstraint,
Example: fmt.Sprintf("$ export PROVIDER_NAME=%s && curl -fsSL https://raw.githubusercontent.com/dikhan/terraform-provider-openapi/master/scripts/install.sh | bash -s -- --provider-name $PROVIDER_NAME<br>"+
"[INFO] Downloading https://github.com/dikhan/terraform-provider-openapi/v3/releases/download/v3.0.0/terraform-provider-openapi_3.0.0_darwin_amd64.tar.gz in temporally folder /var/folders/n_/1lrwb99s7f50xmn9jpmfnddh0000gp/T/tmp.Xv1AkIZh...<br>"+
"[INFO] Extracting terraform-provider-openapi from terraform-provider-openapi_0.29.4_darwin_amd64.tar.gz...<br>"+
"[INFO] Cleaning up tmp dir created for installation purposes: /var/folders/n_/1lrwb99s7f50xmn9jpmfnddh0000gp/T/tmp.Xv1AkIZh<br>"+
"[INFO] Terraform provider 'terraform-provider-%s' successfully installed at: '~/.terraform.d/plugins'!", t.ProviderName, t.ProviderName),
Other: "You can then start running the Terraform provider:",
OtherCommand: fmt.Sprintf(`$ export OTF_VAR_%s_PLUGIN_CONFIGURATION_FILE="https://api.service.com/openapi.yaml"<br>`, t.ProviderName),
},
ProviderConfiguration: ProviderConfiguration{
ProviderName: t.ProviderName,
Regions: configRegions,
ConfigProperties: configProperties,
},
ProviderResources: ProviderResources{
ProviderName: t.ProviderName,
Resources: resources,
},
DataSources: DataSources{
ProviderName: t.ProviderName,
DataSources: dataSourceFilters,
DataSourceInstances: dataSourceInstances,
},
}, err
}
func getRegions(s openapi.SpecAnalyser) ([]string, error) |
func getSecurity(s openapi.SpecAnalyser) (openapi.SpecSecuritySchemes, *openapi.SpecSecurityDefinitions, error) {
security := s.GetSecurity()
if security != nil {
globalSecuritySchemes, err := security.GetGlobalSecuritySchemes()
if err != nil {
return nil, nil, err
}
securityDefinitions, err := security.GetAPIKeySecurityDefinitions()
if err != nil {
return nil, nil, err
}
return globalSecuritySchemes, securityDefinitions, nil
}
return nil, nil, nil
}
func (t TerraformProviderDocGenerator) getDataSourceFilters(dataSourcesFilter []openapi.SpecResource) ([]DataSource, error) {
dataSources := []DataSource{}
for _, dataSource := range dataSourcesFilter {
s, err := dataSource.GetResourceSchema()
if err != nil {
return nil, err
}
dataSourceSchemaDefinition := s.ConvertToDataSourceSpecSchemaDefinition()
props := []Property{}
for _, p := range dataSourceSchemaDefinition.Properties {
prop := t.resourceSchemaToProperty(*p)
props = append(props, prop)
}
dataSources = append(dataSources, DataSource{
Name: dataSource.GetResourceName(),
Properties: orderProps(props),
})
}
return dataSources, nil
}
func (t TerraformProviderDocGenerator) getDataSourceInstances(dataSourceInstances []openapi.SpecResource) ([]DataSource, error) {
dataSourcesInstance := []DataSource{}
for _, dataSource := range dataSourceInstances {
s, err := dataSource.GetResourceSchema()
if err != nil {
return nil, err
}
dataSourceSchemaDefinition := s.ConvertToDataSourceSpecSchemaDefinition()
props := []Property{}
for _, p := range dataSourceSchemaDefinition.Properties {
prop := t.resourceSchemaToProperty(*p)
props = append(props, prop)
}
dataSourcesInstance = append(dataSourcesInstance, DataSource{
Name: fmt.Sprintf("%s_instance", dataSource.GetResourceName()),
Properties: orderProps(props),
})
}
return dataSourcesInstance, nil
}
func (t TerraformProviderDocGenerator) getProviderResources(resources []openapi.SpecResource) ([]Resource, error) {
r := []Resource{}
for _, resource := range resources {
if resource.ShouldIgnoreResource() {
continue
}
resourceSchema, err := resource.GetResourceSchema()
if err != nil {
return nil, err
}
props := []Property{}
requiredProps := []Property{}
optionalProps := []Property{}
for _, p := range resourceSchema.Properties {
prop := t.resourceSchemaToProperty(*p)
if prop.Required {
requiredProps = append(requiredProps, prop)
}
if !prop.Required {
optionalProps = append(optionalProps, prop)
}
}
props = append(props, orderProps(requiredProps)...)
props = append(props, orderProps(optionalProps)...)
parentInfo := resource.GetParentResourceInfo()
var parentProperties []string
if parentInfo != nil {
parentProperties = parentInfo.GetParentPropertiesNames()
}
r = append(r, Resource{
Name: resource.GetResourceName(),
Description: "",
Properties: props,
ParentProperties: parentProperties,
ArgumentsReference: ArgumentsReference{
Notes: []string{},
},
})
}
return r, nil
}
func (t TerraformProviderDocGenerator) resourceSchemaToProperty(specSchemaDefinitionProperty openapi.SpecSchemaDefinitionProperty) Property {
var schema []Property
if specSchemaDefinitionProperty.Type == openapi.TypeObject || specSchemaDefinitionProperty.ArrayItemsType == openapi.TypeObject {
if specSchemaDefinitionProperty.SpecSchemaDefinition != nil {
for _, p := range specSchemaDefinitionProperty.SpecSchemaDefinition.Properties {
schema = append(schema, t.resourceSchemaToProperty(*p))
}
}
}
return Property{
Name: specSchemaDefinitionProperty.GetTerraformCompliantPropertyName(),
Type: string(specSchemaDefinitionProperty.Type),
ArrayItemsType: string(specSchemaDefinitionProperty.ArrayItemsType),
Required: specSchemaDefinitionProperty.IsRequired(),
Computed: specSchemaDefinitionProperty.Computed,
IsOptionalComputed: specSchemaDefinitionProperty.IsOptionalComputed() || specSchemaDefinitionProperty.IsOptionalComputedWithDefault(),
IsSensitive: specSchemaDefinitionProperty.Sensitive,
IsParent: specSchemaDefinitionProperty.IsParentProperty,
Description: specSchemaDefinitionProperty.Description,
Default: specSchemaDefinitionProperty.Default,
Schema: orderProps(schema),
}
}
func (t TerraformProviderDocGenerator) getRequiredProviderConfigurationProperties(regions []string, globalSecuritySchemes openapi.SpecSecuritySchemes, securityDefinitions *openapi.SpecSecurityDefinitions, headers openapi.SpecHeaderParameters) ([]string, []Property) {
var configProps []Property
if securityDefinitions != nil {
for _, securityDefinition := range *securityDefinitions {
secDefName := securityDefinition.GetTerraformConfigurationName()
configProps = append(configProps, Property{
Name: secDefName,
Type: "string",
Required: false,
Description: "",
})
}
}
// Mark as required the properties that are set in the security schemes (they are mandatory)
if globalSecuritySchemes != nil {
for _, securityScheme := range globalSecuritySchemes {
for idx, configProp := range configProps {
if configProp.Name == securityScheme.GetTerraformConfigurationName() {
configProps[idx].Required = true
break
}
}
}
}
if headers != nil {
for _, header := range headers {
configProps = append(configProps, Property{
Name: header.GetHeaderTerraformConfigurationName(),
Type: "string",
Required: header.IsRequired,
Description: "",
})
}
}
return regions, configProps
}
func orderProps(props []Property) []Property {
sort.Slice(props, func(i, j int) bool {
hash1, _ := hashstructure.Hash(props[i], nil)
hash2, _ := hashstructure.Hash(props[j], nil)
return hash1 > hash2
})
return props
}
| {
backendConfig, err := s.GetAPIBackendConfiguration()
if err != nil {
return nil, err
}
if backendConfig != nil {
_, _, regions, err := backendConfig.IsMultiRegion()
if err != nil {
return nil, err
}
return regions, nil
}
return nil, nil
} | identifier_body |
openapi_terraform_provider_doc_generator.go | package openapiterraformdocsgenerator
import (
"errors"
"fmt"
"github.com/dikhan/terraform-provider-openapi/v3/openapi"
"github.com/mitchellh/hashstructure"
"log"
"sort"
)
// TerraformProviderDocGenerator defines the struct that holds the configuration needed to be able to generate the documentation
type TerraformProviderDocGenerator struct {
// ProviderName defines the provider name
ProviderName string
// Hostname the Terraform registry that distributes the provider as documented in https://www.terraform.io/docs/language/providers/requirements.html#source-addresses
// For in-house providers that you intend to distribute from a local filesystem directory, you can use an arbitrary hostname in a domain your organization controls. For example, if your corporate domain were example.com then you might choose
// to use terraform.example.com as your placeholder hostname, even if that hostname doesn't actually resolve in DNS.
Hostname string
// Namespace An organizational namespace within the specified registry to be used for configuration purposes as documented in https://www.terraform.io/docs/language/providers/requirements.html#source-addresses
Namespace string
// PluginVersionConstraint should contain the OpenAPI plugin version constraint eg: "~> 2.1.0". If not populated the renderer
// will default to ">= 2.1.0" OpenAPI provider version
PluginVersionConstraint string
// SpecAnalyser analyses the swagger doc and provides helper methods to retrieve all the end points that can
// be used as terraform resources.
SpecAnalyser openapi.SpecAnalyser
}
// NewTerraformProviderDocGenerator returns a TerraformProviderDocGenerator populated with the provider documentation which
// exposes methods to render the documentation in different formats (only html supported at the moment)
func NewTerraformProviderDocGenerator(providerName, hostname, namespace, openAPIDocURL string) (TerraformProviderDocGenerator, error) {
analyser, err := openapi.CreateSpecAnalyser("v2", openAPIDocURL)
if err != nil {
return TerraformProviderDocGenerator{}, err
}
return TerraformProviderDocGenerator{
ProviderName: providerName,
Hostname: hostname,
Namespace: namespace,
SpecAnalyser: analyser,
}, nil
}
// GenerateDocumentation creates a TerraformProviderDocumentation object populated based on the OpenAPIDocURL documentation
func (t TerraformProviderDocGenerator) GenerateDocumentation() (TerraformProviderDocumentation, error) {
if t.ProviderName == "" {
return TerraformProviderDocumentation{}, errors.New("provider name not provided")
}
if t.Hostname == "" {
return TerraformProviderDocumentation{}, errors.New("hostname not provided, this is required to be able to render the provider installation section containing the required_providers block with the source address configuration in the form of [<HOSTNAME>/]<NAMESPACE>/<TYPE>")
}
if t.Namespace == "" {
return TerraformProviderDocumentation{}, errors.New("namespace not provided, this is required to be able to render the provider installation section containing the required_providers block with the source address configuration in the form of [<HOSTNAME>/]<NAMESPACE>/<TYPE>")
}
if t.PluginVersionConstraint == "" {
log.Println("PluginVersionConstraint not provided, default value in the plugin's terraform required_providers rendered documentation will be version = \">= 2.1.0\"")
}
regions, err := getRegions(t.SpecAnalyser)
if err != nil {
return TerraformProviderDocumentation{}, err
}
globalSecuritySchemes, securityDefinitions, err := getSecurity(t.SpecAnalyser)
if err != nil {
return TerraformProviderDocumentation{}, err
}
headers := t.SpecAnalyser.GetAllHeaderParameters()
configRegions, configProperties := t.getRequiredProviderConfigurationProperties(regions, globalSecuritySchemes, securityDefinitions, headers)
r, err := t.SpecAnalyser.GetTerraformCompliantResources()
if err != nil {
return TerraformProviderDocumentation{}, err
}
resources, err := t.getProviderResources(r)
if err != nil {
return TerraformProviderDocumentation{}, err
}
// ignoring error from getDataSourceInstances bc resource errors will be caught when looping through resources in getProviderResources
dataSourceInstances, _ := t.getDataSourceInstances(r)
compliantDataSources := t.SpecAnalyser.GetTerraformCompliantDataSources()
dataSourceFilters, err := t.getDataSourceFilters(compliantDataSources)
if err != nil {
return TerraformProviderDocumentation{}, err
}
sort.SliceStable(resources, func(i, j int) bool {
return resources[i].Name < resources[j].Name
})
sort.SliceStable(dataSourceInstances, func(i, j int) bool {
return dataSourceInstances[i].Name < dataSourceInstances[j].Name
})
sort.SliceStable(dataSourceFilters, func(i, j int) bool {
return dataSourceFilters[i].Name < dataSourceFilters[j].Name
})
return TerraformProviderDocumentation{
ProviderName: t.ProviderName,
ProviderInstallation: ProviderInstallation{
ProviderName: t.ProviderName,
Namespace: t.Namespace,
Hostname: t.Hostname,
PluginVersionConstraint: t.PluginVersionConstraint,
Example: fmt.Sprintf("$ export PROVIDER_NAME=%s && curl -fsSL https://raw.githubusercontent.com/dikhan/terraform-provider-openapi/master/scripts/install.sh | bash -s -- --provider-name $PROVIDER_NAME<br>"+
"[INFO] Downloading https://github.com/dikhan/terraform-provider-openapi/v3/releases/download/v3.0.0/terraform-provider-openapi_3.0.0_darwin_amd64.tar.gz in temporally folder /var/folders/n_/1lrwb99s7f50xmn9jpmfnddh0000gp/T/tmp.Xv1AkIZh...<br>"+
"[INFO] Extracting terraform-provider-openapi from terraform-provider-openapi_0.29.4_darwin_amd64.tar.gz...<br>"+
"[INFO] Cleaning up tmp dir created for installation purposes: /var/folders/n_/1lrwb99s7f50xmn9jpmfnddh0000gp/T/tmp.Xv1AkIZh<br>"+
"[INFO] Terraform provider 'terraform-provider-%s' successfully installed at: '~/.terraform.d/plugins'!", t.ProviderName, t.ProviderName),
Other: "You can then start running the Terraform provider:",
OtherCommand: fmt.Sprintf(`$ export OTF_VAR_%s_PLUGIN_CONFIGURATION_FILE="https://api.service.com/openapi.yaml"<br>`, t.ProviderName),
},
ProviderConfiguration: ProviderConfiguration{
ProviderName: t.ProviderName,
Regions: configRegions,
ConfigProperties: configProperties,
},
ProviderResources: ProviderResources{
ProviderName: t.ProviderName,
Resources: resources,
},
DataSources: DataSources{
ProviderName: t.ProviderName,
DataSources: dataSourceFilters,
DataSourceInstances: dataSourceInstances,
},
}, err
}
func | (s openapi.SpecAnalyser) ([]string, error) {
backendConfig, err := s.GetAPIBackendConfiguration()
if err != nil {
return nil, err
}
if backendConfig != nil {
_, _, regions, err := backendConfig.IsMultiRegion()
if err != nil {
return nil, err
}
return regions, nil
}
return nil, nil
}
func getSecurity(s openapi.SpecAnalyser) (openapi.SpecSecuritySchemes, *openapi.SpecSecurityDefinitions, error) {
security := s.GetSecurity()
if security != nil {
globalSecuritySchemes, err := security.GetGlobalSecuritySchemes()
if err != nil {
return nil, nil, err
}
securityDefinitions, err := security.GetAPIKeySecurityDefinitions()
if err != nil {
return nil, nil, err
}
return globalSecuritySchemes, securityDefinitions, nil
}
return nil, nil, nil
}
func (t TerraformProviderDocGenerator) getDataSourceFilters(dataSourcesFilter []openapi.SpecResource) ([]DataSource, error) {
dataSources := []DataSource{}
for _, dataSource := range dataSourcesFilter {
s, err := dataSource.GetResourceSchema()
if err != nil {
return nil, err
}
dataSourceSchemaDefinition := s.ConvertToDataSourceSpecSchemaDefinition()
props := []Property{}
for _, p := range dataSourceSchemaDefinition.Properties {
prop := t.resourceSchemaToProperty(*p)
props = append(props, prop)
}
dataSources = append(dataSources, DataSource{
Name: dataSource.GetResourceName(),
Properties: orderProps(props),
})
}
return dataSources, nil
}
func (t TerraformProviderDocGenerator) getDataSourceInstances(dataSourceInstances []openapi.SpecResource) ([]DataSource, error) {
dataSourcesInstance := []DataSource{}
for _, dataSource := range dataSourceInstances {
s, err := dataSource.GetResourceSchema()
if err != nil {
return nil, err
}
dataSourceSchemaDefinition := s.ConvertToDataSourceSpecSchemaDefinition()
props := []Property{}
for _, p := range dataSourceSchemaDefinition.Properties {
prop := t.resourceSchemaToProperty(*p)
props = append(props, prop)
}
dataSourcesInstance = append(dataSourcesInstance, DataSource{
Name: fmt.Sprintf("%s_instance", dataSource.GetResourceName()),
Properties: orderProps(props),
})
}
return dataSourcesInstance, nil
}
func (t TerraformProviderDocGenerator) getProviderResources(resources []openapi.SpecResource) ([]Resource, error) {
r := []Resource{}
for _, resource := range resources {
if resource.ShouldIgnoreResource() {
continue
}
resourceSchema, err := resource.GetResourceSchema()
if err != nil {
return nil, err
}
props := []Property{}
requiredProps := []Property{}
optionalProps := []Property{}
for _, p := range resourceSchema.Properties {
prop := t.resourceSchemaToProperty(*p)
if prop.Required {
requiredProps = append(requiredProps, prop)
}
if !prop.Required {
optionalProps = append(optionalProps, prop)
}
}
props = append(props, orderProps(requiredProps)...)
props = append(props, orderProps(optionalProps)...)
parentInfo := resource.GetParentResourceInfo()
var parentProperties []string
if parentInfo != nil {
parentProperties = parentInfo.GetParentPropertiesNames()
}
r = append(r, Resource{
Name: resource.GetResourceName(),
Description: "",
Properties: props,
ParentProperties: parentProperties,
ArgumentsReference: ArgumentsReference{
Notes: []string{},
},
})
}
return r, nil
}
func (t TerraformProviderDocGenerator) resourceSchemaToProperty(specSchemaDefinitionProperty openapi.SpecSchemaDefinitionProperty) Property {
var schema []Property
if specSchemaDefinitionProperty.Type == openapi.TypeObject || specSchemaDefinitionProperty.ArrayItemsType == openapi.TypeObject {
if specSchemaDefinitionProperty.SpecSchemaDefinition != nil {
for _, p := range specSchemaDefinitionProperty.SpecSchemaDefinition.Properties {
schema = append(schema, t.resourceSchemaToProperty(*p))
}
}
}
return Property{
Name: specSchemaDefinitionProperty.GetTerraformCompliantPropertyName(),
Type: string(specSchemaDefinitionProperty.Type),
ArrayItemsType: string(specSchemaDefinitionProperty.ArrayItemsType),
Required: specSchemaDefinitionProperty.IsRequired(),
Computed: specSchemaDefinitionProperty.Computed,
IsOptionalComputed: specSchemaDefinitionProperty.IsOptionalComputed() || specSchemaDefinitionProperty.IsOptionalComputedWithDefault(),
IsSensitive: specSchemaDefinitionProperty.Sensitive,
IsParent: specSchemaDefinitionProperty.IsParentProperty,
Description: specSchemaDefinitionProperty.Description,
Default: specSchemaDefinitionProperty.Default,
Schema: orderProps(schema),
}
}
func (t TerraformProviderDocGenerator) getRequiredProviderConfigurationProperties(regions []string, globalSecuritySchemes openapi.SpecSecuritySchemes, securityDefinitions *openapi.SpecSecurityDefinitions, headers openapi.SpecHeaderParameters) ([]string, []Property) {
var configProps []Property
if securityDefinitions != nil {
for _, securityDefinition := range *securityDefinitions {
secDefName := securityDefinition.GetTerraformConfigurationName()
configProps = append(configProps, Property{
Name: secDefName,
Type: "string",
Required: false,
Description: "",
})
}
}
// Mark as required the properties that are set in the security schemes (they are mandatory)
if globalSecuritySchemes != nil {
for _, securityScheme := range globalSecuritySchemes {
for idx, configProp := range configProps {
if configProp.Name == securityScheme.GetTerraformConfigurationName() {
configProps[idx].Required = true
break
}
}
}
}
if headers != nil {
for _, header := range headers {
configProps = append(configProps, Property{
Name: header.GetHeaderTerraformConfigurationName(),
Type: "string",
Required: header.IsRequired,
Description: "",
})
}
}
return regions, configProps
}
func orderProps(props []Property) []Property {
sort.Slice(props, func(i, j int) bool {
hash1, _ := hashstructure.Hash(props[i], nil)
hash2, _ := hashstructure.Hash(props[j], nil)
return hash1 > hash2
})
return props
}
| getRegions | identifier_name |
openapi_terraform_provider_doc_generator.go | package openapiterraformdocsgenerator
import (
"errors"
"fmt"
"github.com/dikhan/terraform-provider-openapi/v3/openapi"
"github.com/mitchellh/hashstructure"
"log"
"sort"
)
// TerraformProviderDocGenerator defines the struct that holds the configuration needed to be able to generate the documentation
type TerraformProviderDocGenerator struct {
// ProviderName defines the provider name
ProviderName string
// Hostname the Terraform registry that distributes the provider as documented in https://www.terraform.io/docs/language/providers/requirements.html#source-addresses
// For in-house providers that you intend to distribute from a local filesystem directory, you can use an arbitrary hostname in a domain your organization controls. For example, if your corporate domain were example.com then you might choose
// to use terraform.example.com as your placeholder hostname, even if that hostname doesn't actually resolve in DNS.
Hostname string
// Namespace An organizational namespace within the specified registry to be used for configuration purposes as documented in https://www.terraform.io/docs/language/providers/requirements.html#source-addresses
Namespace string
// PluginVersionConstraint should contain the OpenAPI plugin version constraint eg: "~> 2.1.0". If not populated the renderer
// will default to ">= 2.1.0" OpenAPI provider version
PluginVersionConstraint string
// SpecAnalyser analyses the swagger doc and provides helper methods to retrieve all the end points that can
// be used as terraform resources.
SpecAnalyser openapi.SpecAnalyser
}
// NewTerraformProviderDocGenerator returns a TerraformProviderDocGenerator populated with the provider documentation which
// exposes methods to render the documentation in different formats (only html supported at the moment)
func NewTerraformProviderDocGenerator(providerName, hostname, namespace, openAPIDocURL string) (TerraformProviderDocGenerator, error) {
analyser, err := openapi.CreateSpecAnalyser("v2", openAPIDocURL)
if err != nil {
return TerraformProviderDocGenerator{}, err
}
return TerraformProviderDocGenerator{
ProviderName: providerName,
Hostname: hostname,
Namespace: namespace,
SpecAnalyser: analyser,
}, nil
}
// GenerateDocumentation creates a TerraformProviderDocumentation object populated based on the OpenAPIDocURL documentation
func (t TerraformProviderDocGenerator) GenerateDocumentation() (TerraformProviderDocumentation, error) {
if t.ProviderName == "" {
return TerraformProviderDocumentation{}, errors.New("provider name not provided")
}
if t.Hostname == "" {
return TerraformProviderDocumentation{}, errors.New("hostname not provided, this is required to be able to render the provider installation section containing the required_providers block with the source address configuration in the form of [<HOSTNAME>/]<NAMESPACE>/<TYPE>")
} | return TerraformProviderDocumentation{}, errors.New("namespace not provided, this is required to be able to render the provider installation section containing the required_providers block with the source address configuration in the form of [<HOSTNAME>/]<NAMESPACE>/<TYPE>")
}
if t.PluginVersionConstraint == "" {
log.Println("PluginVersionConstraint not provided, default value in the plugin's terraform required_providers rendered documentation will be version = \">= 2.1.0\"")
}
regions, err := getRegions(t.SpecAnalyser)
if err != nil {
return TerraformProviderDocumentation{}, err
}
globalSecuritySchemes, securityDefinitions, err := getSecurity(t.SpecAnalyser)
if err != nil {
return TerraformProviderDocumentation{}, err
}
headers := t.SpecAnalyser.GetAllHeaderParameters()
configRegions, configProperties := t.getRequiredProviderConfigurationProperties(regions, globalSecuritySchemes, securityDefinitions, headers)
r, err := t.SpecAnalyser.GetTerraformCompliantResources()
if err != nil {
return TerraformProviderDocumentation{}, err
}
resources, err := t.getProviderResources(r)
if err != nil {
return TerraformProviderDocumentation{}, err
}
// ignoring error from getDataSourceInstances bc resource errors will be caught when looping through resources in getProviderResources
dataSourceInstances, _ := t.getDataSourceInstances(r)
compliantDataSources := t.SpecAnalyser.GetTerraformCompliantDataSources()
dataSourceFilters, err := t.getDataSourceFilters(compliantDataSources)
if err != nil {
return TerraformProviderDocumentation{}, err
}
sort.SliceStable(resources, func(i, j int) bool {
return resources[i].Name < resources[j].Name
})
sort.SliceStable(dataSourceInstances, func(i, j int) bool {
return dataSourceInstances[i].Name < dataSourceInstances[j].Name
})
sort.SliceStable(dataSourceFilters, func(i, j int) bool {
return dataSourceFilters[i].Name < dataSourceFilters[j].Name
})
return TerraformProviderDocumentation{
ProviderName: t.ProviderName,
ProviderInstallation: ProviderInstallation{
ProviderName: t.ProviderName,
Namespace: t.Namespace,
Hostname: t.Hostname,
PluginVersionConstraint: t.PluginVersionConstraint,
Example: fmt.Sprintf("$ export PROVIDER_NAME=%s && curl -fsSL https://raw.githubusercontent.com/dikhan/terraform-provider-openapi/master/scripts/install.sh | bash -s -- --provider-name $PROVIDER_NAME<br>"+
"[INFO] Downloading https://github.com/dikhan/terraform-provider-openapi/v3/releases/download/v3.0.0/terraform-provider-openapi_3.0.0_darwin_amd64.tar.gz in temporally folder /var/folders/n_/1lrwb99s7f50xmn9jpmfnddh0000gp/T/tmp.Xv1AkIZh...<br>"+
"[INFO] Extracting terraform-provider-openapi from terraform-provider-openapi_0.29.4_darwin_amd64.tar.gz...<br>"+
"[INFO] Cleaning up tmp dir created for installation purposes: /var/folders/n_/1lrwb99s7f50xmn9jpmfnddh0000gp/T/tmp.Xv1AkIZh<br>"+
"[INFO] Terraform provider 'terraform-provider-%s' successfully installed at: '~/.terraform.d/plugins'!", t.ProviderName, t.ProviderName),
Other: "You can then start running the Terraform provider:",
OtherCommand: fmt.Sprintf(`$ export OTF_VAR_%s_PLUGIN_CONFIGURATION_FILE="https://api.service.com/openapi.yaml"<br>`, t.ProviderName),
},
ProviderConfiguration: ProviderConfiguration{
ProviderName: t.ProviderName,
Regions: configRegions,
ConfigProperties: configProperties,
},
ProviderResources: ProviderResources{
ProviderName: t.ProviderName,
Resources: resources,
},
DataSources: DataSources{
ProviderName: t.ProviderName,
DataSources: dataSourceFilters,
DataSourceInstances: dataSourceInstances,
},
}, err
}
func getRegions(s openapi.SpecAnalyser) ([]string, error) {
backendConfig, err := s.GetAPIBackendConfiguration()
if err != nil {
return nil, err
}
if backendConfig != nil {
_, _, regions, err := backendConfig.IsMultiRegion()
if err != nil {
return nil, err
}
return regions, nil
}
return nil, nil
}
func getSecurity(s openapi.SpecAnalyser) (openapi.SpecSecuritySchemes, *openapi.SpecSecurityDefinitions, error) {
security := s.GetSecurity()
if security != nil {
globalSecuritySchemes, err := security.GetGlobalSecuritySchemes()
if err != nil {
return nil, nil, err
}
securityDefinitions, err := security.GetAPIKeySecurityDefinitions()
if err != nil {
return nil, nil, err
}
return globalSecuritySchemes, securityDefinitions, nil
}
return nil, nil, nil
}
func (t TerraformProviderDocGenerator) getDataSourceFilters(dataSourcesFilter []openapi.SpecResource) ([]DataSource, error) {
dataSources := []DataSource{}
for _, dataSource := range dataSourcesFilter {
s, err := dataSource.GetResourceSchema()
if err != nil {
return nil, err
}
dataSourceSchemaDefinition := s.ConvertToDataSourceSpecSchemaDefinition()
props := []Property{}
for _, p := range dataSourceSchemaDefinition.Properties {
prop := t.resourceSchemaToProperty(*p)
props = append(props, prop)
}
dataSources = append(dataSources, DataSource{
Name: dataSource.GetResourceName(),
Properties: orderProps(props),
})
}
return dataSources, nil
}
func (t TerraformProviderDocGenerator) getDataSourceInstances(dataSourceInstances []openapi.SpecResource) ([]DataSource, error) {
dataSourcesInstance := []DataSource{}
for _, dataSource := range dataSourceInstances {
s, err := dataSource.GetResourceSchema()
if err != nil {
return nil, err
}
dataSourceSchemaDefinition := s.ConvertToDataSourceSpecSchemaDefinition()
props := []Property{}
for _, p := range dataSourceSchemaDefinition.Properties {
prop := t.resourceSchemaToProperty(*p)
props = append(props, prop)
}
dataSourcesInstance = append(dataSourcesInstance, DataSource{
Name: fmt.Sprintf("%s_instance", dataSource.GetResourceName()),
Properties: orderProps(props),
})
}
return dataSourcesInstance, nil
}
func (t TerraformProviderDocGenerator) getProviderResources(resources []openapi.SpecResource) ([]Resource, error) {
r := []Resource{}
for _, resource := range resources {
if resource.ShouldIgnoreResource() {
continue
}
resourceSchema, err := resource.GetResourceSchema()
if err != nil {
return nil, err
}
props := []Property{}
requiredProps := []Property{}
optionalProps := []Property{}
for _, p := range resourceSchema.Properties {
prop := t.resourceSchemaToProperty(*p)
if prop.Required {
requiredProps = append(requiredProps, prop)
}
if !prop.Required {
optionalProps = append(optionalProps, prop)
}
}
props = append(props, orderProps(requiredProps)...)
props = append(props, orderProps(optionalProps)...)
parentInfo := resource.GetParentResourceInfo()
var parentProperties []string
if parentInfo != nil {
parentProperties = parentInfo.GetParentPropertiesNames()
}
r = append(r, Resource{
Name: resource.GetResourceName(),
Description: "",
Properties: props,
ParentProperties: parentProperties,
ArgumentsReference: ArgumentsReference{
Notes: []string{},
},
})
}
return r, nil
}
func (t TerraformProviderDocGenerator) resourceSchemaToProperty(specSchemaDefinitionProperty openapi.SpecSchemaDefinitionProperty) Property {
var schema []Property
if specSchemaDefinitionProperty.Type == openapi.TypeObject || specSchemaDefinitionProperty.ArrayItemsType == openapi.TypeObject {
if specSchemaDefinitionProperty.SpecSchemaDefinition != nil {
for _, p := range specSchemaDefinitionProperty.SpecSchemaDefinition.Properties {
schema = append(schema, t.resourceSchemaToProperty(*p))
}
}
}
return Property{
Name: specSchemaDefinitionProperty.GetTerraformCompliantPropertyName(),
Type: string(specSchemaDefinitionProperty.Type),
ArrayItemsType: string(specSchemaDefinitionProperty.ArrayItemsType),
Required: specSchemaDefinitionProperty.IsRequired(),
Computed: specSchemaDefinitionProperty.Computed,
IsOptionalComputed: specSchemaDefinitionProperty.IsOptionalComputed() || specSchemaDefinitionProperty.IsOptionalComputedWithDefault(),
IsSensitive: specSchemaDefinitionProperty.Sensitive,
IsParent: specSchemaDefinitionProperty.IsParentProperty,
Description: specSchemaDefinitionProperty.Description,
Default: specSchemaDefinitionProperty.Default,
Schema: orderProps(schema),
}
}
func (t TerraformProviderDocGenerator) getRequiredProviderConfigurationProperties(regions []string, globalSecuritySchemes openapi.SpecSecuritySchemes, securityDefinitions *openapi.SpecSecurityDefinitions, headers openapi.SpecHeaderParameters) ([]string, []Property) {
var configProps []Property
if securityDefinitions != nil {
for _, securityDefinition := range *securityDefinitions {
secDefName := securityDefinition.GetTerraformConfigurationName()
configProps = append(configProps, Property{
Name: secDefName,
Type: "string",
Required: false,
Description: "",
})
}
}
// Mark as required the properties that are set in the security schemes (they are mandatory)
if globalSecuritySchemes != nil {
for _, securityScheme := range globalSecuritySchemes {
for idx, configProp := range configProps {
if configProp.Name == securityScheme.GetTerraformConfigurationName() {
configProps[idx].Required = true
break
}
}
}
}
if headers != nil {
for _, header := range headers {
configProps = append(configProps, Property{
Name: header.GetHeaderTerraformConfigurationName(),
Type: "string",
Required: header.IsRequired,
Description: "",
})
}
}
return regions, configProps
}
func orderProps(props []Property) []Property {
sort.Slice(props, func(i, j int) bool {
hash1, _ := hashstructure.Hash(props[i], nil)
hash2, _ := hashstructure.Hash(props[j], nil)
return hash1 > hash2
})
return props
} | if t.Namespace == "" { | random_line_split |
Teach.js | // 教学控制器,zsd,student,topic-teach
Ext.define('Youngshine.controller.Teach', {
extend: 'Ext.app.Controller',
config: {
refs: {
course: 'course',
//zsd: 'zsd',
//student: 'student',
topic: 'topic',
topicshow: 'topic-show',
pdf: 'pdf-file'
},
control: {
course: {
//select: 'zsdSelect', //itemtap
itemtap: 'courseItemtap',
//itemswipe: 'courseItemswipe'
},
topic: {
fetch: 'topicFetch',//抓取自适应考题
photos: 'topicPhotos', //该学生该知识点教学过程
pdf: 'topicPDF',
itemtap: 'topicItemtap',
back: 'topicBack',
},
topicshow: {
back: 'topicshowBack',
del: 'topicshowDelete',
done: 'topicshowDone' // 评分
},
'pdf-file': {
back: 'pdfBack'
},
topicteachphotos: {
back: 'topicteachphotosBack',
del: 'topicteachphotosDelete', //删除一个图片
},
}
},
// 一对多手机点名课时
showCourse: function(teacherID){
var me = this;
Ext.Viewport.setMasked({xtype:'loadmask',message:'正在加载'});
// 预先加载的数据
var obj = {
"teacherID": teacherID
}
var store = Ext.getStore('Course');
store.getProxy().setUrl(this.getApplication().dataUrl +
'readOne2nCourseList.php?data='+JSON.stringify(obj) );
store.load({ //异步async
callback: function(records, operation, success){
if (success){
console.log(records);
//me.showSearch();
Ext.Viewport.setMasked(false);
me.course = Ext.create('Youngshine.view.teach.Course')
//me.course.down('toolbar').setTitle(localStorage.teacherName+'老师的上课列表')
Ext.Viewport.add(me.course);
Ext.Viewport.setActiveItem(me.course);
/*
// 全部下课,才能开始上课
Ext.Array.each(records, function(record) {
console.log(record.data)
if(record.data.endTime < '1901-01-01'){
me.course.down('button[action=addnew]').setDisabled(true)
return false
}
}); */
}else{
Ext.toast('服务请求失败',3000); // toast 1000
};
}
});
},
// 登录后跳转这里,一对多教师的课程表
showKcb: function(teacherID){
var me = this;
console.log(teacherID)
Ext.Viewport.setMasked({xtype:'loadmask',message:'正在加载'});
// 预先加载的数据
var obj = {
"teacherID": teacherID
}
Ext.data.JsonP.request({
url: me.getApplication().dataUrl + 'readTeacher.php',
callbackKey: 'callback',
//timeout: 14000,
//params: obj, // ajax, not jsonp
params:{
data: JSON.stringify(obj)
},
success: function(result){
console.log(result.data)
Ext.Viewport.setMasked(false);
if (result.success){
// 只有一条记录[0],拆分上课时间列表
var timely_list = result.data.timely_list_one2n.split(',')
timely_list = Ext.Array.sort(timely_list)
console.log(timely_list)
//Ext.getStore('Kcb').setData(timely_list)
var store = Ext.getStore('Kcb')
Ext.Array.each(timely_list, function(timely, index, countriesItSelf) {
//arrTimely.push(timely )
store.add({"timely":timely})
});
console.log(store.data)
/*
var arrTimely = []
Ext.Array.each(timely_list, function(timely, index, countriesItSelf) {
arrTimely.push(timely )
});
arrTimely = Ext.Array.sort()
console.log(arrTimely)
*/
me.kcb = Ext.create('Youngshine.view.teach.Kcb')
Ext.Viewport.add(me.kcb);
Ext.Viewport.setActiveItem(me.kcb);
}
},
});
},
// 如果点击‘下课’
courseItemtap: function( list, index, target, record, e, eOpts ) {
var me = this; console.log(record)
me.topic = Ext.create('Youngshine.view.teach.Topic')
me.topic.setParentRecord(record);
//me.topic.down('toolbar').setTitle(record.data.studentName)
Ext.Viewport.setMasked({xtype:'loadmask',message:'正在加载'});
// 预先加载的数据
var obj = {
"courseNo" : record.data.courseNo, //当前课时的练习题集
"subjectID": record.data.subjectID, //学科,题库不同学科不同表table
}
var store = Ext.getStore('Topic');
store.getProxy().setUrl(me.getApplication().dataUrl +
'readTopicListByCourse.php?data=' + JSON.stringify(obj) );
store.load({ //异步async
callback: function(records, operation, success){
if (success){
console.log(records);
Ext.Viewport.setMasked(false);
Ext.Viewport.add(me.topic); //build
Ext.Viewport.setActiveItem(me.topic);
var btnTest = me.topic.down('button[action=test]'),
btnPhoto = me.topic.down('button[action=photo]')
console.log(btnPhoto)
btnTest.setHidden(records.length<10 ? true : false)
//btnPhoto.setHidden(records.length<1 ? true : false)
}else{
Ext.toast('出错',3000);
};
}
});
},
topicBack: function(oldView){
var me = this;
Ext.Viewport.setActiveItem(me.course)
Ext.Viewport.remove(me.topic,true)
},
// pdf保存在腾讯云cos
topicPDF: function(rec){
console.log(rec);
var me = this;
var file = 'http://teach1to1-10060757.file.myqcloud.com/teachPDF/';
if(rec.data.subjectID==1){
file = '../PDF/sx/'
}else if(rec.data.subjectID==2){
file += 'wl/'
}else if(rec.data.subjectID==2){
file += 'hx/'
}
file += rec.data.PDF
console.log(file)
me.pdf = Ext.create('Youngshine.view.teach.PdfFile')
me.pdf.down('pdfpanel').setSrc(file); // pdf file in zsd table
Ext.Viewport.add(me.pdf)
Ext.Viewport.setActiveItem(me.pdf);
},
pdfBack: function(oldView){
var me = this;
Ext.Viewport.setActiveItem(me.topic)
Ext.Viewport.remove(me.pdf,true)
},
// 返回选择学生,store不变, rec是上级course
topicPhotos: function(rec,oldView){
var me = this;
me.studyphotos = Ext.create('Youngshine.view.teach.Topic-teach-photos')
//me.studyphotos.setOldView(oldView); // oldView当前父view
me.studyphotos.setRecord(rec); // record
Ext.Viewport.setMasked({xtype:'loadmask',message:'正在加载'});
var obj = {
"studentstudyID": rec.data.studentstudyID, //zsd & student
}
var store = Ext.getStore('Study-photos');
store.getProxy().setUrl(me.getApplication().dataUrl +
'readStudyPhotosList.php?data='+JSON.stringify(obj) );
store.load({ //异步async
callback: function(records, operation, success){
console.log(records);
Ext.Viewport.setMasked(false);
if (success){
Ext.Viewport.add(me.studyphotos); //build
Ext.Viewport.setActiveItem(me.studyphotos);
}else{
Ext.toast('服务请求失败',3000);
};
}
});
},
// 根据level难度 抓取该生的自适应题目,并把记录添加到store:topic-teach
topicFetch: function(obj){
var me = this;
console.log(obj);
Ext.Viewport.setMasked({xtype:'loadmask',message:'添加自适应题目'});
// 自适应出题:抓取第一组题目(3,4,5)根据学生level,以后的根据做提评分level
// 取得记录,直接保存道 topic-teach表,从新load表
Ext.data.JsonP.request({
url: me.getApplication().dataUrl + 'createTopicteach.php',
callbackKey: 'callback',
timeout: 9000,
params:{
data: JSON.stringify(obj)
/* data: '{"level":"' + level +
'","zsdID":"' + zsdID +
'","studentstudyID":"' + studentstudyID + '"}' */
},
success: function(result){ // 服务器连接成功
Ext.Viewport.setMasked(false);
if (result.success){ // 返回值有success成功
//console.log(result.data)
// 直接添加到后台数据表ghjy_topic-teach,最新在最上面
Ext.getStore('Topic').load()
//store.add(result.data).. store.insert()
//console.log(store.data)
}else{
Ext.toast(result.message,3000);
}
},
});
},
topicItemtap: function(list,index,item,record,e){
var me = this;
if(e.target.className == 'answer'){
//this.topicteach.hide(); //remove(); 返回用
me.topicshow = Ext.create('Youngshine.view.teach.TopicShow');
me.topicshow.setParentRecord(record); // 传递参数而已,题目id
Ext.Viewport.add(me.topicshow) //build
Ext.Viewport.setActiveItem(me.topicshow)
}else{
this.overlay = Ext.Viewport.add({
xtype: 'panel',
modal: true,
hideOnMaskTap: true,
centered: true,
width: 550, height:550,
scrollable: true,
//layout: 'vbox',
items: [{
xtype: 'toolbar',
docked: 'top',
ui: 'light',
title: '题目',
},{
xtype: 'panel',
html: record.data.content,
itemId: 'topicContent',
styleHtmlContent: true
}],
})
this.overlay.show()
}
},
topicshowBack: function(oldView){
var me = this;
Ext.Viewport.setActiveItem(me.topic)
Ext.Viewport.remove(me.topicshow,true)
},
topicshowDelete: function(record,oldView){
var me = this;
Ext.Viewport.setMasked({xtype:'loadmask',message:'正在删除'});
Ext.data.JsonP.request({
// 删除服务端记录: 最好做个标记,别真正删除?或者过期的和定期的不能删除?
// 否则,删除过的题目,添加时候可能再出现
url: me.getApplication().dataUrl + 'deleteOne2nTopic.php',
callbackKey: 'callback',
params:{
data: '{"one2ntopicID":' + record.data.one2ntopicID + '}'
},
success: function(result){
Ext.Viewport.setMasked(false);
if(result.success){
// 服务端删除成功后,客户端store当前记录同时删除,列表list才能相应显示
Ext.getStore('Topic').remove(record); //.removeAt(i);
Ext.Viewport.setActiveItem(me.topic);
Ext.Viewport.remove(me.topicshow,true); //关闭自己
}else{
Ext.toast(result.message,3000);
}
},
failure: function(){
Ext.Viewport.setMasked(false); //unmask
Ext.toast('服务请求失败',3000);
}
});
},
// save & refresh 单个题目show.js
topicshowDone: function(done,fullDone,record,view){
var me = this;
Ext.Viewport.setMasked({xtype:'loadmask',message:'正在评分'});
var obj = {
"done": done,
"one2ntopicID": record.data.one2ntopicID
}
console.log(obj)
Ext.data.JsonP.request({
url: me.getApplication().dataUrl + 'updateOne2nTopic.php',
callbackKey: 'callback',
params:{
data: JSON.stringify(obj)
},
success: function(result){
Ext.Viewport.setMasked(false);
if(result.success){
//本地更新数据:打分结果 model.set, setRecord/updateRecord
//var model = record.data ????????
record.set('done',done)
record.set('fullDone',fullDone)
}else{
Ext.toast(result.message,3000); // 错误模式窗口
}
}
});
},
| },
// 删除教学图片
topicteachphotosDelete: function(rec){
var me = this;
Ext.Viewport.setMasked({xtype:'loadmask',message:'正在删除'});
Ext.data.JsonP.request({
url: me.getApplication().dataUrl + 'deleteStudyPhotos.php',
callbackKey: 'callback',
params:{
data: '{"studyphotoID":' + rec.data.studyphotoID + '}'
},
success: function(result){
Ext.Viewport.setMasked(false);
if(result.success){
Ext.getStore('Study-photos').remove(rec);
}else{
Ext.toast(result.message,3000);
}
},
failure: function(){
Ext.Viewport.setMasked(false);
Ext.toast('服务请求失败');
}
});
},
/* 如果用户登录的话,控制器launch加载相关的store */
launch: function(){
this.callParent(arguments);
},
init: function(){
this.callParent(arguments);
console.log('teach controller init');
}
}); | topicteachphotosBack: function(){
var me = this
Ext.Viewport.setActiveItem(me.topicteach)
Ext.Viewport.remove(me.topicteachphotos,true) | random_line_split |
Teach.js | // 教学控制器,zsd,student,topic-teach
Ext.define('Youngshine.controller.Teach', {
extend: 'Ext.app.Controller',
config: {
refs: {
course: 'course',
//zsd: 'zsd',
//student: 'student',
topic: 'topic',
topicshow: 'topic-show',
pdf: 'pdf-file'
},
control: {
course: {
//select: 'zsdSelect', //itemtap
itemtap: 'courseItemtap',
//itemswipe: 'courseItemswipe'
},
topic: {
fetch: 'topicFetch',//抓取自适应考题
photos: 'topicPhotos', //该学生该知识点教学过程
pdf: 'topicPDF',
itemtap: 'topicItemtap',
back: 'topicBack',
},
topicshow: {
back: 'topicshowBack',
del: 'topicshowDelete',
done: 'topicshowDone' // 评分
},
'pdf-file': {
back: 'pdfBack'
},
topicteachphotos: {
back: 'topicteachphotosBack',
del: 'topicteachphotosDelete', //删除一个图片
},
}
},
// 一对多手机点名课时
showCourse: function(teacherID){
var me = this;
Ext.Viewport.setMasked({xtype:'loadmask',message:'正在加载'});
// 预先加载的数据
var obj = {
"teacherID": teacherID
}
var store = Ext.getStore('Course');
store.getProxy().setUrl(this.getApplication().dataUrl +
'readOne2nCourseList.php?data='+JSON.stringify(obj) );
store.load({ //异步async
callback: function(records, operation, success){
if (success){
console.log(records);
//me.showSearch();
Ext.Viewport.setMasked(false);
me.course = Ext.create('Youngshine.view.teach.Course')
//me.course.down('toolbar').setTitle(localStorage.teacherName+'老师的上课列表')
Ext.Viewport.add(me.course);
Ext.Viewport.setActiveItem(me.course);
/*
// 全部下课,才能开始上课
Ext.Array.each(records, function(record) {
console.log(record.data)
if(record.data.endTime < '1901-01-01'){
me.course.down('button[action=addnew]').setDisabled(true)
return false
}
}); */
}else{
Ext.toast('服务请求失败',3000); // toast 1000
};
}
});
},
// 登录后跳转这里,一对多教师的课程表
showKcb: function(teacherID){
var me = this;
console.log(teacherID)
Ext.Viewport.setMasked({xtype:'loadmask',message:'正在加载'});
// 预先加载的数据
var obj = {
"teacherID": teacherID
}
Ext.data.JsonP.request({
url: me.getApplication().dataUrl + 'readTeacher.php',
callbackKey: 'callback',
//timeout: 14000,
//params: obj, // ajax, not jsonp
params:{
data: JSON.stringify(obj)
},
success: function(result){
console.log(result.data)
Ext.Viewport.setMasked(false);
if (result.success){
// 只有一条记录[0],拆分上课时间列表
var timely_list = result.data.timely_list_one2n.split(',')
timely_list = Ext.Array.sort(timely_list)
console.log(timely_list)
//Ext.getStore('Kcb').setData(timely_list)
var store = Ext.getStore('Kcb')
Ext.Array.each(timely_list, function(timely, index, countriesItSelf) {
//arrTimely.push(timely )
store.add({"timely":timely})
});
console.log(store.data)
/*
var arrTimely = []
Ext.Array.each(timely_list, function(timely, index, countriesItSelf) {
arrTimely.push(timely )
});
arrTimely = Ext.Array.sort()
console.log(arrTimely)
*/
me.kcb = Ext.create('Youngshine.view.teach.Kcb')
Ext.Viewport.add(me.kcb);
Ext.Viewport.setActiveItem(me.kcb);
}
},
});
},
// 如果点击‘下课’
courseItemtap: function( list, index, target, record, e, eOpts ) {
var me = this; console.log(record)
me.topic = Ext.create('Youngshine.view.teach.Topic')
me.topic.setParentRecord(record);
//me.topic.down('toolbar').setTitle(record.data.studentName)
Ext.Viewport.setMasked({xtype:'loadmask',message:'正在加载'});
// 预先加载的数据
var obj = {
"courseNo" : record.data.courseNo, //当前课时的练习题集
"subjectID": record.data.subjectID, //学科,题库不同学科不同表table
}
var store = Ext.getStore('Topic');
store.getProxy().setUrl(me.getApplication().dataUrl +
'readTopicListByCourse.php?data=' + JSON.stringify(obj) );
store.load({ //异步async
callback: function(records, operation, success){
if (success){
console.log(records);
Ext.Viewport.setMasked(false);
Ext.Viewport.add(me.topic); //build
Ext.Viewport.setActiveItem(me.topic);
var btnTest = me.topic.down('button[action=test]'),
btnPhoto = me.topic.down('button[action=photo]')
console.log(btnPhoto)
btnTest.setHidden(records.length<10 ? true : false)
//btnPhoto.setHidden(records.length<1 ? true : false)
}else{
Ext.toast('出错',3000);
};
}
});
},
topicBack: function(oldView){
var me = this;
Ext.Viewport.setActiveItem(me.course)
Ext.Viewport.remove(me.topic,true)
},
// pdf保存在腾讯云cos
topicPDF: function(rec){
console.log(rec);
var me = this;
var file = 'http://teach1to1-10060757.file.myqcloud.com/teachPDF/';
if(rec.data.subjectID==1){
file = '../PDF/sx/'
}else if(rec.data.subjectID==2){
file += 'wl/'
}else if(rec.data.subjectID==2){
file += 'hx/'
}
file += rec.data.PDF
console.log(file)
me.pdf = Ext.create('Youngshine.view.teach.PdfFile')
me.pdf.down('pdfpanel').setSrc(file); // pdf file in zsd table
Ext.Viewport.add(me.pdf)
Ext.Viewport.setActiveItem(me.pdf);
},
pdfBack: function(oldView){
var me = this;
Ext.Viewport.setActiveItem(me.topic)
Ext.Viewport.remove(me.pdf,true)
},
// 返回选择学生,store不变, rec是上级course
topicPhotos: function(rec,oldView){
var me = this;
me.studyphotos = Ext.create('Youngshine.view.teach.Topic-teach-photos')
//me.studyphotos.setOldView(oldView); // oldView当前父view
me.studyphotos.setRecord(rec); // record
Ext.Viewport.setMasked({xtype:'loadmask',message:'正在加载'});
var obj = {
"studentstudyID": rec.data.studentstudyID, //zsd & student
}
var store = Ext.getStore('Study-photos');
store.getProxy().setUrl(me.getApplication().dataUrl +
'readStudyPhotosList.php?data='+JSON.stringify(obj) );
store.load({ //异步async
callback: function(records, operation, success){
console.log(records);
Ext.Viewport.setMasked(false);
if (success){
Ext.Viewport.add(me.studyphotos); //build
Ext.Viewport.setActiveItem(me.studyphotos);
}else{
Ext.toast('服务请求失败',3000);
};
}
});
},
// 根据level难度 抓取该生的自适应题目,并把记录添加到store:topic-teach
topicFetch: function(obj){
var me = this;
console.log(obj);
Ext.Viewport.setMasked({xtype:'loadmask',message:'添加自适应题目'});
// 自适应出题:抓取第一组题目(3,4,5)根据学生level,以后的根据做提评分level
// 取得记录,直接保存道 topic-teach表,从新load表
Ext.data.JsonP.request({
url: me.getApplication().dataUrl + 'createTopicteach.php',
callbackKey: 'callback',
timeout: 9000,
params:{
data: JSON.stringify(obj)
/* data: '{"level":"' + level +
'","zsdID":"' + zsdID +
'","studentstudyID":"' + studentstudyID + '"}' */
},
success: function(result){ // 服务器连接成功
Ext.Viewport.setMasked(false);
if (result.success){ // 返回值有success成功
//console.log(result.data)
// 直接添加到后台数据表ghjy_topic-teach,最新在最上面
Ext.getStore('Topic').load()
//store.add(result.data).. store.insert()
//console.log(store.data)
}else{
Ext.toast(result.message,3000);
}
},
});
},
topicItemtap: function(list,index,item,record,e){
var me = this;
if(e.target.className == 'answer'){
//this.topicteach.hide(); //remove(); 返回用
me.topicshow = Ext.create('Youngshine.view.teach.TopicShow');
me.topicshow.setParentRecord(record); // 传递参数而已,题目id
Ext.Viewport.add(me.topicshow) //build
Ext.Viewport.setActiveItem(me.topicshow)
}else{
this.overlay = Ext.Viewport.add({
xtype: 'panel',
modal: true,
hideOnMaskTap: true,
centered: true,
width: 550, height:550,
scrollable: true,
//layout: 'vbox',
items: [{
xtype: 'toolbar',
docked: 'top',
ui: 'light',
title: '题目',
},{
xtype: 'panel',
| icshowDelete: function(record,oldView){
var me = this;
Ext.Viewport.setMasked({xtype:'loadmask',message:'正在删除'});
Ext.data.JsonP.request({
// 删除服务端记录: 最好做个标记,别真正删除?或者过期的和定期的不能删除?
// 否则,删除过的题目,添加时候可能再出现
url: me.getApplication().dataUrl + 'deleteOne2nTopic.php',
callbackKey: 'callback',
params:{
data: '{"one2ntopicID":' + record.data.one2ntopicID + '}'
},
success: function(result){
Ext.Viewport.setMasked(false);
if(result.success){
// 服务端删除成功后,客户端store当前记录同时删除,列表list才能相应显示
Ext.getStore('Topic').remove(record); //.removeAt(i);
Ext.Viewport.setActiveItem(me.topic);
Ext.Viewport.remove(me.topicshow,true); //关闭自己
}else{
Ext.toast(result.message,3000);
}
},
failure: function(){
Ext.Viewport.setMasked(false); //unmask
Ext.toast('服务请求失败',3000);
}
});
},
// save & refresh 单个题目show.js
topicshowDone: function(done,fullDone,record,view){
var me = this;
Ext.Viewport.setMasked({xtype:'loadmask',message:'正在评分'});
var obj = {
"done": done,
"one2ntopicID": record.data.one2ntopicID
}
console.log(obj)
Ext.data.JsonP.request({
url: me.getApplication().dataUrl + 'updateOne2nTopic.php',
callbackKey: 'callback',
params:{
data: JSON.stringify(obj)
},
success: function(result){
Ext.Viewport.setMasked(false);
if(result.success){
//本地更新数据:打分结果 model.set, setRecord/updateRecord
//var model = record.data ????????
record.set('done',done)
record.set('fullDone',fullDone)
}else{
Ext.toast(result.message,3000); // 错误模式窗口
}
}
});
},
topicteachphotosBack: function(){
var me = this
Ext.Viewport.setActiveItem(me.topicteach)
Ext.Viewport.remove(me.topicteachphotos,true)
},
// 删除教学图片
topicteachphotosDelete: function(rec){
var me = this;
Ext.Viewport.setMasked({xtype:'loadmask',message:'正在删除'});
Ext.data.JsonP.request({
url: me.getApplication().dataUrl + 'deleteStudyPhotos.php',
callbackKey: 'callback',
params:{
data: '{"studyphotoID":' + rec.data.studyphotoID + '}'
},
success: function(result){
Ext.Viewport.setMasked(false);
if(result.success){
Ext.getStore('Study-photos').remove(rec);
}else{
Ext.toast(result.message,3000);
}
},
failure: function(){
Ext.Viewport.setMasked(false);
Ext.toast('服务请求失败');
}
});
},
/* 如果用户登录的话,控制器launch加载相关的store */
launch: function(){
this.callParent(arguments);
},
init: function(){
this.callParent(arguments);
console.log('teach controller init');
}
});
| html: record.data.content,
itemId: 'topicContent',
styleHtmlContent: true
}],
})
this.overlay.show()
}
},
topicshowBack: function(oldView){
var me = this;
Ext.Viewport.setActiveItem(me.topic)
Ext.Viewport.remove(me.topicshow,true)
},
top | conditional_block |
networkBuilder.go | package builders
import (
"bytes"
"encoding/hex"
"encoding/json"
"fmt"
"html/template"
"io"
"math/rand"
"net"
"os"
"strings"
"github.com/emicklei/dot"
"github.com/pkg/errors"
"github.com/threefoldtech/tfexplorer/client"
"github.com/threefoldtech/tfexplorer/models/generated/workloads"
"github.com/threefoldtech/tfexplorer/schema"
"github.com/threefoldtech/zos/pkg"
"github.com/threefoldtech/zos/pkg/crypto"
"github.com/threefoldtech/zos/pkg/network/types"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
)
// NetworkBuilder is a struct that can build networks
type NetworkBuilder struct {
workloads.Network
NodeID string
explorer *client.Client
AccessPoints []AccessPoint `json:"access_points,omitempty"`
// NetResources field override
NetResources []NetResource `json:"net_resources"`
}
// NetResource is the description of a part of a network local to a specific node
type NetResource struct {
workloads.NetworkNetResource
// Public endpoints
PubEndpoints []net.IP `json:"pub_endpoints"`
}
// AccessPoint info for a network, defining a node which will act as the AP, and
// the subnet which will be routed through it
type AccessPoint struct {
// NodeID of the access point in the network
NodeID string `json:"node_id"`
// Subnet to be routed through this access point
Subnet schema.IPRange `json:"subnet"`
WGPublicKey string `json:"wg_public_key"`
IP4 bool `json:"ip4"`
}
// NewNetworkBuilder creates a new network builder
func NewNetworkBuilder(name string, iprange schema.IPRange, explorer *client.Client) *NetworkBuilder {
return &NetworkBuilder{
Network: workloads.Network{
Name: name,
Iprange: iprange,
NetworkResources: []workloads.NetworkNetResource{},
},
explorer: explorer,
}
}
// LoadNetworkBuilder loads a network builder based on a file path
func LoadNetworkBuilder(reader io.Reader, explorer *client.Client) (*NetworkBuilder, error) {
network := workloads.Network{}
err := json.NewDecoder(reader).Decode(&network)
if err != nil {
return &NetworkBuilder{}, err
}
networkBuilder := &NetworkBuilder{
Network: network,
explorer: explorer,
}
if err = networkBuilder.setPubEndpoints(); err != nil {
return nil, err
}
networkBuilder.extractAccessPoints()
return networkBuilder, nil
}
// Save saves the network builder to an IO.Writer
func (n *NetworkBuilder) Save(writer io.Writer) error {
err := json.NewEncoder(writer).Encode(n.Network)
if err != nil {
return err
}
return err
}
// Build returns the network
func (n *NetworkBuilder) Build() workloads.Network {
return n.Network
}
// WithName sets the name to the network
func (n *NetworkBuilder) WithName(name string) *NetworkBuilder {
n.Network.Name = name
return n
}
// WithIPRange sets the ip range to the network
func (n *NetworkBuilder) WithIPRange(ipRange schema.IPRange) *NetworkBuilder |
// WithStatsAggregator sets the stats aggregators to the network
func (n *NetworkBuilder) WithStatsAggregator(aggregators []workloads.StatsAggregator) *NetworkBuilder {
n.Network.StatsAggregator = aggregators
return n
}
// WithNetworkResources sets the network resources to the network
func (n *NetworkBuilder) WithNetworkResources(netResources []workloads.NetworkNetResource) *NetworkBuilder {
n.Network.NetworkResources = netResources
return n
}
// AddNode adds a node to the network
// the subnet will be added as network resource to the node
// forceHidden will set no public endpoints to the node
func (n *NetworkBuilder) AddNode(nodeID string, subnet string, port uint, forceHidden bool) (*NetworkBuilder, error) {
n.NodeID = nodeID
if subnet == "" {
return n, fmt.Errorf("subnet cannot be empty")
}
ipnet, err := types.ParseIPNet(subnet)
if err != nil {
return n, errors.Wrap(err, "invalid subnet")
}
if port == 0 {
port, err = n.pickPort()
if err != nil {
return n, errors.Wrap(err, "failed to pick wireguard port")
}
}
privateKey, err := wgtypes.GeneratePrivateKey()
if err != nil {
return n, errors.Wrap(err, "error during wireguard key generation")
}
sk := privateKey.String()
pk, err := crypto.KeyFromID(pkg.StrIdentifier(nodeID))
if err != nil {
return n, errors.Wrap(err, "failed to parse nodeID")
}
encrypted, err := crypto.Encrypt([]byte(sk), pk)
if err != nil {
return n, errors.Wrap(err, "failed to encrypt private key")
}
pubSubnets, err := n.getEndPointAddrs(pkg.StrIdentifier(nodeID))
if err != nil {
return n, errors.Wrap(err, "failed to get node public endpoints")
}
var endpoints []net.IP
if !forceHidden {
for _, sn := range pubSubnets {
endpoints = append(endpoints, sn.IP)
}
}
nr := NetResource{
NetworkNetResource: workloads.NetworkNetResource{
NodeId: nodeID,
Iprange: schema.IPRange{ipnet.IPNet},
WireguardListenPort: int64(port),
WireguardPublicKey: privateKey.PublicKey().String(),
WireguardPrivateKeyEncrypted: hex.EncodeToString(encrypted),
},
PubEndpoints: endpoints,
}
n.NetResources = append(n.NetResources, nr)
if err = n.generatePeers(); err != nil {
return n, errors.Wrap(err, "failed to generate peers")
}
return n, nil
}
// AddAccess adds access to a node in the network
// the subnet will be routed through the accesspoint of the node
func (n *NetworkBuilder) AddAccess(nodeID string, subnet schema.IPRange, wgPubKey string, ip4 bool) (*NetworkBuilder, string, error) {
if nodeID == "" {
return n, "", fmt.Errorf("nodeID cannot be empty")
}
var nodeExists bool
var node NetResource
for _, nr := range n.NetResources {
if nr.NodeId == nodeID {
node = nr
nodeExists = true
break
}
}
if !nodeExists {
return n, "", errors.New("can not add access through a node which is not in the network")
}
if len(node.PubEndpoints) == 0 {
return n, "", errors.New("access node must have at least 1 public endpoint")
}
var endpoint string
for _, ep := range node.PubEndpoints {
if ep.To4() != nil {
// ipv4 address
if ip4 {
endpoint = fmt.Sprintf("%s:%d", ep.String(), node.WireguardListenPort)
break
}
// we want ipv6 so use the next address
continue
}
if ep.To16() != nil {
// due to the previous branch this can now only be an ipv6 address
if !ip4 {
endpoint = fmt.Sprintf("[%s]:%d", node.PubEndpoints[0].String(), node.WireguardListenPort)
break
}
// we want ipv4 so use next address
continue
}
}
if endpoint == "" {
return n, "", errors.New("access node has no public endpoint of the requested type")
}
var privateKey wgtypes.Key
if wgPubKey == "" {
privateKey, err := wgtypes.GeneratePrivateKey()
if err != nil {
return n, "", errors.Wrap(err, "error during wireguard key generation")
}
wgPubKey = privateKey.PublicKey().String()
}
ap := AccessPoint{
NodeID: nodeID,
Subnet: subnet,
WGPublicKey: wgPubKey,
IP4: ip4,
}
n.AccessPoints = append(n.AccessPoints, ap)
if err := n.generatePeers(); err != nil {
return n, "", errors.Wrap(err, "failed to generate peers")
}
wgConf, err := genWGQuick(privateKey.String(), subnet, node.WireguardPublicKey, n.Network.Iprange, endpoint)
if err != nil {
return n, "", err
}
return n, wgConf, nil
}
// RemoveNode removes a node
func (n *NetworkBuilder) RemoveNode(schema string, nodeID string) error {
for i, nr := range n.NetResources {
if nr.NodeId == nodeID {
n.NetResources = append(n.NetResources[:i], n.NetResources[i+1:]...)
break
}
}
f, err := os.Open(schema)
if err != nil {
return errors.Wrap(err, "failed to open network schema")
}
return n.Save(f)
}
func (n *NetworkBuilder) setPubEndpoints() error {
for i := range n.NetResources {
pep, err := n.getEndPointAddrs(pkg.StrIdentifier(n.NetResources[i].NodeId))
if err != nil {
return err
}
var endpoints []net.IP
for _, sn := range pep {
endpoints = append(endpoints, sn.IP)
}
n.NetResources[i].PubEndpoints = endpoints
}
// remove the pub endpoints from nodes which we assume have been marked
// as force hidden
hiddenNodes := make(map[string]struct{})
for _, nr := range n.NetResources {
if len(nr.PubEndpoints) > 0 {
for _, peer := range nr.Peers {
if peer.Endpoint == "" {
hiddenNodes[peer.PublicKey] = struct{}{}
}
}
}
}
for i := range n.NetResources {
if _, exists := hiddenNodes[n.NetResources[i].WireguardPublicKey]; exists {
n.NetResources[i].PubEndpoints = nil
}
}
return nil
}
func (n *NetworkBuilder) pickPort() (uint, error) {
node, err := n.explorer.Directory.NodeGet(n.NodeID, false)
if err != nil {
return 0, err
}
p := uint(rand.Intn(6000) + 2000)
for isIn(node.WgPorts, p) {
p = uint(rand.Intn(6000) + 2000)
}
return p, nil
}
func isIn(l []int64, i uint) bool {
for _, x := range l {
if int64(i) == x {
return true
}
}
return false
}
func hasIPv4(n NetResource) bool {
for _, pep := range n.PubEndpoints {
if pep.To4() != nil {
return true
}
}
return false
}
// This function assumes:
// - that a hidden node has functioning IPv4
// - that a public node ALWAYS has public IPv6, and OPTIONALLY public IPv4
// - that any public endpoint on any node is actually reachable (i.e. no firewall
// blocking incoming traffic)
func (n *NetworkBuilder) generatePeers() error {
// Find public node, which will be used to connect all hidden nodes.
// In case there are hidden nodes, the public node needs IPv4 support as well.
var hasHiddenNodes bool
for _, nr := range n.NetResources {
if len(nr.PubEndpoints) == 0 {
hasHiddenNodes = true
break
}
}
// Look for a public node to connect hidden nodes. This is only needed
// in case there are hidden nodes.
var pubNr string
if hasHiddenNodes {
for _, nr := range n.NetResources {
if hasIPv4(nr) {
pubNr = n.NodeID
break
}
}
if pubNr == "" {
return errors.New("Network has hidden nodes but no public IPv4 node exists")
}
}
// We also need to inform nodes how to route the external access subnets.
// Working with the knowledge that these external subnets come in through
// the network through a single access point, which is part of the network
// and thus already routed, we can map the external subnets to the subnet
// of the access point, and add these external subnets to all peers who also
// have the associated internal subnet.
//
// Map the network subnets to their respective node ids first for easy access later
internalSubnets := make(map[string]schema.IPRange)
for _, nr := range n.NetResources {
internalSubnets[n.NodeID] = nr.Iprange
}
externalSubnets := make(map[string][]schema.IPRange) // go does not like `types.IPNet` as key
for _, ap := range n.AccessPoints {
externalSubnets[internalSubnets[ap.NodeID].String()] = append(externalSubnets[internalSubnets[ap.NodeID].String()], ap.Subnet)
}
// Maintain a mapping of access point nodes to the subnet and wg key they give access
// to, as these need to be added as peers as well for these nodes
accessPoints := make(map[string][]AccessPoint)
for _, ap := range n.AccessPoints {
accessPoints[ap.NodeID] = append(accessPoints[ap.NodeID], ap)
}
// Find all hidden nodes, and collect their subnets. Also collect the subnets
// of public IPv6 only nodes, since hidden nodes need IPv4 to connect.
hiddenSubnets := make(map[string]schema.IPRange)
// also maintain subnets from nodes who have only IPv6 since this will also
// need to be routed for hidden nodes
ipv6OnlySubnets := make(map[string]schema.IPRange)
for _, nr := range n.NetResources {
if len(nr.PubEndpoints) == 0 {
hiddenSubnets[n.NodeID] = nr.Iprange
continue
}
if !hasIPv4(nr) {
ipv6OnlySubnets[n.NodeID] = nr.Iprange
}
}
for i := range n.NetResources {
// Note: we need to loop by index and manually assign nr, doing
// for _, nr := range ... causes nr to be copied, meaning we can't modify
// it in place
nr := &n.NetResources[i]
nr.Peers = []workloads.WireguardPeer{}
for _, onr := range n.NetResources {
if n.NodeID == onr.NodeId {
continue
}
allowedIPs := make([]schema.IPRange, 2)
allowedIPs[0] = onr.Iprange
allowedIPs[1] = *wgIP(&onr.Iprange)
var endpoint string
if len(nr.PubEndpoints) == 0 {
// If node is hidden, set only public peers (with IPv4), and set first public peer to
// contain all hidden subnets, except for the one owned by the node
if !hasIPv4(onr) {
continue
}
// Also add all other subnets if this is the pub node
if onr.NodeId == pubNr {
for owner, subnet := range hiddenSubnets {
// Do not add our own subnet
if owner == nr.NodeId {
continue
}
allowedIPs = append(allowedIPs, subnet)
allowedIPs = append(allowedIPs, *wgIP(&schema.IPRange{subnet.IPNet}))
}
for _, subnet := range ipv6OnlySubnets {
allowedIPs = append(allowedIPs, subnet)
allowedIPs = append(allowedIPs, *wgIP(&schema.IPRange{subnet.IPNet}))
}
}
// Endpoint must be IPv4
for _, pep := range onr.PubEndpoints {
if pep.To4() != nil {
endpoint = fmt.Sprintf("%s:%d", pep.String(), onr.WireguardListenPort)
break
}
}
} else if len(onr.PubEndpoints) == 0 && hasIPv4(*nr) {
// if the peer is hidden but we have IPv4, we can connect to it, but we don't know
// an endpoint.
endpoint = ""
} else {
// if we are not hidden, we add all other nodes, unless we don't
// have IPv4, because then we also can't connect to hidden nodes.
// Ignore hidden nodes if we don't have IPv4
if !hasIPv4(*nr) && len(onr.PubEndpoints) == 0 {
continue
}
// both nodes are public therefore we can connect over IPv6
// if this is the selected pubNr - also need to add allowedIPs
// for the hidden nodes
if onr.NodeId == pubNr {
for _, subnet := range hiddenSubnets {
allowedIPs = append(allowedIPs, subnet)
allowedIPs = append(allowedIPs, *wgIP(&schema.IPRange{subnet.IPNet}))
}
}
// Since the node is not hidden, we know that it MUST have at least
// 1 IPv6 address
for _, pep := range onr.PubEndpoints {
if pep.To4() == nil && pep.To16() != nil {
endpoint = fmt.Sprintf("[%s]:%d", pep.String(), onr.WireguardListenPort)
break
}
}
// as a fallback assign IPv4
if endpoint == "" {
for _, pep := range onr.PubEndpoints {
if pep.To4() != nil {
endpoint = fmt.Sprintf("%s:%d", pep.String(), onr.WireguardListenPort)
break
}
}
}
}
// Add subnets for external access
for i := 0; i < len(allowedIPs); i++ {
for _, subnet := range externalSubnets[allowedIPs[i].String()] {
allowedIPs = append(allowedIPs, schema.IPRange{subnet.IPNet})
allowedIPs = append(allowedIPs, *wgIP(&schema.IPRange{subnet.IPNet}))
}
}
nr.Peers = append(nr.Peers, workloads.WireguardPeer{
PublicKey: onr.WireguardPublicKey,
Iprange: onr.Iprange,
AllowedIprange: allowedIPs,
Endpoint: endpoint,
})
}
// Add configured external access peers
for _, ea := range accessPoints[nr.NodeId] {
allowedIPs := make([]schema.IPRange, 2)
allowedIPs[0] = schema.IPRange{ea.Subnet.IPNet}
allowedIPs[1] = *wgIP(&schema.IPRange{ea.Subnet.IPNet})
nr.Peers = append(nr.Peers, workloads.WireguardPeer{
PublicKey: ea.WGPublicKey,
Iprange: schema.IPRange{ea.Subnet.IPNet},
AllowedIprange: allowedIPs,
Endpoint: "",
})
}
}
return nil
}
func isIPv4Subnet(n schema.IPRange) bool {
ones, bits := n.IPNet.Mask.Size()
if bits != 32 {
return false
}
return ones <= 30
}
func genWGQuick(wgPrivateKey string, localSubnet schema.IPRange, peerWgPubKey string, allowedSubnet schema.IPRange, peerEndpoint string) (string, error) {
type data struct {
PrivateKey string
Address string
PeerWgPubKey string
AllowedSubnet string
PeerEndpoint string
}
if !isIPv4Subnet(localSubnet) {
return "", errors.New("local subnet is not a valid IPv4 subnet")
}
tmpl, err := template.New("wg").Parse(wgTmpl)
if err != nil {
return "", err
}
buf := &bytes.Buffer{}
if err := tmpl.Execute(buf, data{
PrivateKey: wgPrivateKey,
Address: wgIP(&schema.IPRange{localSubnet.IPNet}).String(),
PeerWgPubKey: peerWgPubKey,
AllowedSubnet: strings.Join([]string{allowedSubnet.String(), types.NewIPNet(wgSubnet(&allowedSubnet.IPNet)).String()}, ","),
PeerEndpoint: peerEndpoint,
}); err != nil {
return "", err
}
return buf.String(), nil
}
var wgTmpl = `
[Interface]
PrivateKey = {{.PrivateKey}}
Address = {{.Address}}
[Peer]
PublicKey = {{.PeerWgPubKey}}
AllowedIPs = {{.AllowedSubnet}}
PersistentKeepalive = 20
{{if .PeerEndpoint}}Endpoint = {{.PeerEndpoint}}{{end}}
`
// NetworkGraph creates a networkgraph for a network
func (n *NetworkBuilder) NetworkGraph(w io.Writer) error {
nodes := make(map[string]dot.Node)
nodesByID := make(map[string]dot.Node)
graph := dot.NewGraph(dot.Directed)
for _, nr := range n.NetResources {
node := graph.Node(strings.Join([]string{nr.NodeId, nr.Iprange.String()}, "\n")).Box()
// set special style for "hidden" nodes
if len(nr.PubEndpoints) == 0 {
node.Attr("style", "dashed")
node.Attr("color", "blue")
graph.AddToSameRank("hidden nodes", node)
}
nodes[nr.WireguardPublicKey] = node
nodesByID[nr.NodeId] = node
}
// add external access
for _, ea := range n.AccessPoints {
node := graph.Node(strings.Join([]string{"External network", ea.Subnet.String()}, "\n")).Box()
// set style for hidden nodes
node.Attr("style", "dashed")
node.Attr("color", "green")
graph.AddToSameRank("external access", node)
// add link to access point
edge := graph.Edge(node, nodesByID[ea.NodeID], n.Iprange.String())
if ea.IP4 {
edge.Attr("color", "blue")
}
nodes[ea.WGPublicKey] = node
}
for _, nr := range n.NetResources {
for _, peer := range nr.Peers {
allowedIPs := make([]string, 0, len(peer.AllowedIprange)/2)
for _, aip := range peer.AllowedIprange {
if !isCGN(aip) {
allowedIPs = append(allowedIPs, aip.String())
}
}
edge := graph.Edge(nodes[nr.WireguardPublicKey], nodes[peer.PublicKey], strings.Join(allowedIPs, "\n"))
if peer.Endpoint == "" {
// connections to this peer are IPv4 -> blue, and can not be initiated by this node -> dashed
edge.Attr("color", "blue").Attr("style", "dashed")
continue
}
if net.ParseIP(peer.Endpoint[:strings.LastIndex(peer.Endpoint, ":")]).To4() != nil {
// IPv4 connection -> blue
edge.Attr("color", "blue")
}
}
}
graph.Write(w)
return nil
}
func wgIP(subnet *schema.IPRange) *schema.IPRange {
// example: 10.3.1.0 -> 100.64.3.1
a := subnet.IP[len(subnet.IP)-3]
b := subnet.IP[len(subnet.IP)-2]
return &schema.IPRange{net.IPNet{
IP: net.IPv4(0x64, 0x40, a, b),
Mask: net.CIDRMask(32, 32),
}}
}
func wgSubnet(subnet *net.IPNet) *net.IPNet {
// example: 10.3.1.0 -> 100.64.3.1
a := subnet.IP[len(subnet.IP)-3]
b := subnet.IP[len(subnet.IP)-2]
ones, _ := subnet.Mask.Size()
return &net.IPNet{
IP: net.IPv4(0x64, 0x40, a, b),
Mask: net.CIDRMask(ones+8, 32),
}
}
func isPrivateIP(ip net.IP) bool {
privateIPBlocks := []*net.IPNet{}
for _, cidr := range []string{
"127.0.0.0/8", // IPv4 loopback
"10.0.0.0/8", // RFC1918
"172.16.0.0/12", // RFC1918
"192.168.0.0/16", // RFC1918
"169.254.0.0/16", // RFC3927 link-local
"::1/128", // IPv6 loopback
"fe80::/10", // IPv6 link-local
"fc00::/7", // IPv6 unique local addr
} {
_, block, err := net.ParseCIDR(cidr)
if err != nil {
panic(fmt.Errorf("parse error on %q: %v", cidr, err))
}
privateIPBlocks = append(privateIPBlocks, block)
}
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() {
return true
}
for _, block := range privateIPBlocks {
if block.Contains(ip) {
return true
}
}
return false
}
func isCGN(subnet schema.IPRange) bool {
_, block, err := net.ParseCIDR("100.64.0.0/10")
if err != nil {
panic(err)
}
return block.Contains(subnet.IP)
}
func (n *NetworkBuilder) extractAccessPoints() {
// gather all actual nodes, using their wg pubkey as key in the map (NodeID
// can't be seen in the actual peer struct)
actualNodes := make(map[string]struct{})
for _, nr := range n.NetResources {
actualNodes[nr.WireguardPublicKey] = struct{}{}
}
aps := []AccessPoint{}
for _, nr := range n.NetResources {
for _, peer := range nr.Peers {
if _, exists := actualNodes[peer.PublicKey]; !exists {
// peer is not a node so it must be external
aps = append(aps, AccessPoint{
NodeID: nr.NodeId,
Subnet: peer.Iprange,
WGPublicKey: peer.PublicKey,
// we can't infer if we use IPv6 or IPv4
})
}
}
}
n.AccessPoints = aps
}
// a node has either a public namespace with []ipv4 or/and []ipv6 -or-
// some interface has received a SLAAC addr
// which has been registered in BCDB
func (n *NetworkBuilder) getEndPointAddrs(nodeID pkg.Identifier) ([]types.IPNet, error) {
schemaNode, err := n.explorer.Directory.NodeGet(nodeID.Identity(), false)
if err != nil {
return nil, err
}
node := types.NewNodeFromSchema(schemaNode)
var endpoints []types.IPNet
if node.PublicConfig != nil {
if node.PublicConfig.IPv4.IP != nil {
ip := node.PublicConfig.IPv4.IP
if ip.IsGlobalUnicast() && !isPrivateIP(ip) {
endpoints = append(endpoints, node.PublicConfig.IPv4)
}
}
if node.PublicConfig.IPv6.IP != nil {
ip := node.PublicConfig.IPv6.IP
if ip.IsGlobalUnicast() && !isPrivateIP(ip) {
endpoints = append(endpoints, node.PublicConfig.IPv6)
}
}
} else {
for _, iface := range node.Ifaces {
for _, ip := range iface.Addrs {
if !ip.IP.IsGlobalUnicast() || isPrivateIP(ip.IP) {
continue
}
endpoints = append(endpoints, ip)
}
}
}
// If the length is 0, then its a hidden node
return endpoints, nil
}
| {
n.Network.Iprange = ipRange
return n
} | identifier_body |
networkBuilder.go | package builders
import (
"bytes"
"encoding/hex"
"encoding/json"
"fmt"
"html/template"
"io"
"math/rand"
"net"
"os"
"strings"
"github.com/emicklei/dot"
"github.com/pkg/errors"
"github.com/threefoldtech/tfexplorer/client"
"github.com/threefoldtech/tfexplorer/models/generated/workloads"
"github.com/threefoldtech/tfexplorer/schema"
"github.com/threefoldtech/zos/pkg"
"github.com/threefoldtech/zos/pkg/crypto"
"github.com/threefoldtech/zos/pkg/network/types"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
)
// NetworkBuilder is a struct that can build networks
type NetworkBuilder struct {
workloads.Network
NodeID string
explorer *client.Client
AccessPoints []AccessPoint `json:"access_points,omitempty"`
// NetResources field override
NetResources []NetResource `json:"net_resources"`
}
// NetResource is the description of a part of a network local to a specific node
type NetResource struct {
workloads.NetworkNetResource
// Public endpoints
PubEndpoints []net.IP `json:"pub_endpoints"`
}
// AccessPoint info for a network, defining a node which will act as the AP, and
// the subnet which will be routed through it
type AccessPoint struct {
// NodeID of the access point in the network
NodeID string `json:"node_id"`
// Subnet to be routed through this access point
Subnet schema.IPRange `json:"subnet"`
WGPublicKey string `json:"wg_public_key"`
IP4 bool `json:"ip4"`
}
// NewNetworkBuilder creates a new network builder
func NewNetworkBuilder(name string, iprange schema.IPRange, explorer *client.Client) *NetworkBuilder {
return &NetworkBuilder{
Network: workloads.Network{
Name: name,
Iprange: iprange,
NetworkResources: []workloads.NetworkNetResource{},
},
explorer: explorer,
}
}
// LoadNetworkBuilder loads a network builder based on a file path
func LoadNetworkBuilder(reader io.Reader, explorer *client.Client) (*NetworkBuilder, error) {
network := workloads.Network{}
err := json.NewDecoder(reader).Decode(&network)
if err != nil {
return &NetworkBuilder{}, err
}
networkBuilder := &NetworkBuilder{
Network: network,
explorer: explorer,
}
if err = networkBuilder.setPubEndpoints(); err != nil {
return nil, err
}
networkBuilder.extractAccessPoints()
return networkBuilder, nil
}
// Save saves the network builder to an IO.Writer
func (n *NetworkBuilder) Save(writer io.Writer) error {
err := json.NewEncoder(writer).Encode(n.Network)
if err != nil {
return err
}
return err
}
// Build returns the network
func (n *NetworkBuilder) Build() workloads.Network {
return n.Network
}
// WithName sets the name to the network
func (n *NetworkBuilder) WithName(name string) *NetworkBuilder {
n.Network.Name = name
return n
}
// WithIPRange sets the ip range to the network
func (n *NetworkBuilder) WithIPRange(ipRange schema.IPRange) *NetworkBuilder {
n.Network.Iprange = ipRange
return n
} | func (n *NetworkBuilder) WithStatsAggregator(aggregators []workloads.StatsAggregator) *NetworkBuilder {
n.Network.StatsAggregator = aggregators
return n
}
// WithNetworkResources sets the network resources to the network
func (n *NetworkBuilder) WithNetworkResources(netResources []workloads.NetworkNetResource) *NetworkBuilder {
n.Network.NetworkResources = netResources
return n
}
// AddNode adds a node to the network
// the subnet will be added as network resource to the node
// forceHidden will set no public endpoints to the node
func (n *NetworkBuilder) AddNode(nodeID string, subnet string, port uint, forceHidden bool) (*NetworkBuilder, error) {
n.NodeID = nodeID
if subnet == "" {
return n, fmt.Errorf("subnet cannot be empty")
}
ipnet, err := types.ParseIPNet(subnet)
if err != nil {
return n, errors.Wrap(err, "invalid subnet")
}
if port == 0 {
port, err = n.pickPort()
if err != nil {
return n, errors.Wrap(err, "failed to pick wireguard port")
}
}
privateKey, err := wgtypes.GeneratePrivateKey()
if err != nil {
return n, errors.Wrap(err, "error during wireguard key generation")
}
sk := privateKey.String()
pk, err := crypto.KeyFromID(pkg.StrIdentifier(nodeID))
if err != nil {
return n, errors.Wrap(err, "failed to parse nodeID")
}
encrypted, err := crypto.Encrypt([]byte(sk), pk)
if err != nil {
return n, errors.Wrap(err, "failed to encrypt private key")
}
pubSubnets, err := n.getEndPointAddrs(pkg.StrIdentifier(nodeID))
if err != nil {
return n, errors.Wrap(err, "failed to get node public endpoints")
}
var endpoints []net.IP
if !forceHidden {
for _, sn := range pubSubnets {
endpoints = append(endpoints, sn.IP)
}
}
nr := NetResource{
NetworkNetResource: workloads.NetworkNetResource{
NodeId: nodeID,
Iprange: schema.IPRange{ipnet.IPNet},
WireguardListenPort: int64(port),
WireguardPublicKey: privateKey.PublicKey().String(),
WireguardPrivateKeyEncrypted: hex.EncodeToString(encrypted),
},
PubEndpoints: endpoints,
}
n.NetResources = append(n.NetResources, nr)
if err = n.generatePeers(); err != nil {
return n, errors.Wrap(err, "failed to generate peers")
}
return n, nil
}
// AddAccess adds access to a node in the network
// the subnet will be routed through the accesspoint of the node
func (n *NetworkBuilder) AddAccess(nodeID string, subnet schema.IPRange, wgPubKey string, ip4 bool) (*NetworkBuilder, string, error) {
if nodeID == "" {
return n, "", fmt.Errorf("nodeID cannot be empty")
}
var nodeExists bool
var node NetResource
for _, nr := range n.NetResources {
if nr.NodeId == nodeID {
node = nr
nodeExists = true
break
}
}
if !nodeExists {
return n, "", errors.New("can not add access through a node which is not in the network")
}
if len(node.PubEndpoints) == 0 {
return n, "", errors.New("access node must have at least 1 public endpoint")
}
var endpoint string
for _, ep := range node.PubEndpoints {
if ep.To4() != nil {
// ipv4 address
if ip4 {
endpoint = fmt.Sprintf("%s:%d", ep.String(), node.WireguardListenPort)
break
}
// we want ipv6 so use the next address
continue
}
if ep.To16() != nil {
// due to the previous branch this can now only be an ipv6 address
if !ip4 {
endpoint = fmt.Sprintf("[%s]:%d", node.PubEndpoints[0].String(), node.WireguardListenPort)
break
}
// we want ipv4 so use next address
continue
}
}
if endpoint == "" {
return n, "", errors.New("access node has no public endpoint of the requested type")
}
var privateKey wgtypes.Key
if wgPubKey == "" {
privateKey, err := wgtypes.GeneratePrivateKey()
if err != nil {
return n, "", errors.Wrap(err, "error during wireguard key generation")
}
wgPubKey = privateKey.PublicKey().String()
}
ap := AccessPoint{
NodeID: nodeID,
Subnet: subnet,
WGPublicKey: wgPubKey,
IP4: ip4,
}
n.AccessPoints = append(n.AccessPoints, ap)
if err := n.generatePeers(); err != nil {
return n, "", errors.Wrap(err, "failed to generate peers")
}
wgConf, err := genWGQuick(privateKey.String(), subnet, node.WireguardPublicKey, n.Network.Iprange, endpoint)
if err != nil {
return n, "", err
}
return n, wgConf, nil
}
// RemoveNode removes a node
func (n *NetworkBuilder) RemoveNode(schema string, nodeID string) error {
for i, nr := range n.NetResources {
if nr.NodeId == nodeID {
n.NetResources = append(n.NetResources[:i], n.NetResources[i+1:]...)
break
}
}
f, err := os.Open(schema)
if err != nil {
return errors.Wrap(err, "failed to open network schema")
}
return n.Save(f)
}
func (n *NetworkBuilder) setPubEndpoints() error {
for i := range n.NetResources {
pep, err := n.getEndPointAddrs(pkg.StrIdentifier(n.NetResources[i].NodeId))
if err != nil {
return err
}
var endpoints []net.IP
for _, sn := range pep {
endpoints = append(endpoints, sn.IP)
}
n.NetResources[i].PubEndpoints = endpoints
}
// remove the pub endpoints from nodes which we assume have been marked
// as force hidden
hiddenNodes := make(map[string]struct{})
for _, nr := range n.NetResources {
if len(nr.PubEndpoints) > 0 {
for _, peer := range nr.Peers {
if peer.Endpoint == "" {
hiddenNodes[peer.PublicKey] = struct{}{}
}
}
}
}
for i := range n.NetResources {
if _, exists := hiddenNodes[n.NetResources[i].WireguardPublicKey]; exists {
n.NetResources[i].PubEndpoints = nil
}
}
return nil
}
func (n *NetworkBuilder) pickPort() (uint, error) {
node, err := n.explorer.Directory.NodeGet(n.NodeID, false)
if err != nil {
return 0, err
}
p := uint(rand.Intn(6000) + 2000)
for isIn(node.WgPorts, p) {
p = uint(rand.Intn(6000) + 2000)
}
return p, nil
}
func isIn(l []int64, i uint) bool {
for _, x := range l {
if int64(i) == x {
return true
}
}
return false
}
func hasIPv4(n NetResource) bool {
for _, pep := range n.PubEndpoints {
if pep.To4() != nil {
return true
}
}
return false
}
// This function assumes:
// - that a hidden node has functioning IPv4
// - that a public node ALWAYS has public IPv6, and OPTIONALLY public IPv4
// - that any public endpoint on any node is actually reachable (i.e. no firewall
// blocking incoming traffic)
func (n *NetworkBuilder) generatePeers() error {
// Find public node, which will be used to connect all hidden nodes.
// In case there are hidden nodes, the public node needs IPv4 support as well.
var hasHiddenNodes bool
for _, nr := range n.NetResources {
if len(nr.PubEndpoints) == 0 {
hasHiddenNodes = true
break
}
}
// Look for a public node to connect hidden nodes. This is only needed
// in case there are hidden nodes.
var pubNr string
if hasHiddenNodes {
for _, nr := range n.NetResources {
if hasIPv4(nr) {
pubNr = n.NodeID
break
}
}
if pubNr == "" {
return errors.New("Network has hidden nodes but no public IPv4 node exists")
}
}
// We also need to inform nodes how to route the external access subnets.
// Working with the knowledge that these external subnets come in through
// the network through a single access point, which is part of the network
// and thus already routed, we can map the external subnets to the subnet
// of the access point, and add these external subnets to all peers who also
// have the associated internal subnet.
//
// Map the network subnets to their respective node ids first for easy access later
internalSubnets := make(map[string]schema.IPRange)
for _, nr := range n.NetResources {
internalSubnets[n.NodeID] = nr.Iprange
}
externalSubnets := make(map[string][]schema.IPRange) // go does not like `types.IPNet` as key
for _, ap := range n.AccessPoints {
externalSubnets[internalSubnets[ap.NodeID].String()] = append(externalSubnets[internalSubnets[ap.NodeID].String()], ap.Subnet)
}
// Maintain a mapping of access point nodes to the subnet and wg key they give access
// to, as these need to be added as peers as well for these nodes
accessPoints := make(map[string][]AccessPoint)
for _, ap := range n.AccessPoints {
accessPoints[ap.NodeID] = append(accessPoints[ap.NodeID], ap)
}
// Find all hidden nodes, and collect their subnets. Also collect the subnets
// of public IPv6 only nodes, since hidden nodes need IPv4 to connect.
hiddenSubnets := make(map[string]schema.IPRange)
// also maintain subnets from nodes who have only IPv6 since this will also
// need to be routed for hidden nodes
ipv6OnlySubnets := make(map[string]schema.IPRange)
for _, nr := range n.NetResources {
if len(nr.PubEndpoints) == 0 {
hiddenSubnets[n.NodeID] = nr.Iprange
continue
}
if !hasIPv4(nr) {
ipv6OnlySubnets[n.NodeID] = nr.Iprange
}
}
for i := range n.NetResources {
// Note: we need to loop by index and manually assign nr, doing
// for _, nr := range ... causes nr to be copied, meaning we can't modify
// it in place
nr := &n.NetResources[i]
nr.Peers = []workloads.WireguardPeer{}
for _, onr := range n.NetResources {
if n.NodeID == onr.NodeId {
continue
}
allowedIPs := make([]schema.IPRange, 2)
allowedIPs[0] = onr.Iprange
allowedIPs[1] = *wgIP(&onr.Iprange)
var endpoint string
if len(nr.PubEndpoints) == 0 {
// If node is hidden, set only public peers (with IPv4), and set first public peer to
// contain all hidden subnets, except for the one owned by the node
if !hasIPv4(onr) {
continue
}
// Also add all other subnets if this is the pub node
if onr.NodeId == pubNr {
for owner, subnet := range hiddenSubnets {
// Do not add our own subnet
if owner == nr.NodeId {
continue
}
allowedIPs = append(allowedIPs, subnet)
allowedIPs = append(allowedIPs, *wgIP(&schema.IPRange{subnet.IPNet}))
}
for _, subnet := range ipv6OnlySubnets {
allowedIPs = append(allowedIPs, subnet)
allowedIPs = append(allowedIPs, *wgIP(&schema.IPRange{subnet.IPNet}))
}
}
// Endpoint must be IPv4
for _, pep := range onr.PubEndpoints {
if pep.To4() != nil {
endpoint = fmt.Sprintf("%s:%d", pep.String(), onr.WireguardListenPort)
break
}
}
} else if len(onr.PubEndpoints) == 0 && hasIPv4(*nr) {
// if the peer is hidden but we have IPv4, we can connect to it, but we don't know
// an endpoint.
endpoint = ""
} else {
// if we are not hidden, we add all other nodes, unless we don't
// have IPv4, because then we also can't connect to hidden nodes.
// Ignore hidden nodes if we don't have IPv4
if !hasIPv4(*nr) && len(onr.PubEndpoints) == 0 {
continue
}
// both nodes are public therefore we can connect over IPv6
// if this is the selected pubNr - also need to add allowedIPs
// for the hidden nodes
if onr.NodeId == pubNr {
for _, subnet := range hiddenSubnets {
allowedIPs = append(allowedIPs, subnet)
allowedIPs = append(allowedIPs, *wgIP(&schema.IPRange{subnet.IPNet}))
}
}
// Since the node is not hidden, we know that it MUST have at least
// 1 IPv6 address
for _, pep := range onr.PubEndpoints {
if pep.To4() == nil && pep.To16() != nil {
endpoint = fmt.Sprintf("[%s]:%d", pep.String(), onr.WireguardListenPort)
break
}
}
// as a fallback assign IPv4
if endpoint == "" {
for _, pep := range onr.PubEndpoints {
if pep.To4() != nil {
endpoint = fmt.Sprintf("%s:%d", pep.String(), onr.WireguardListenPort)
break
}
}
}
}
// Add subnets for external access
for i := 0; i < len(allowedIPs); i++ {
for _, subnet := range externalSubnets[allowedIPs[i].String()] {
allowedIPs = append(allowedIPs, schema.IPRange{subnet.IPNet})
allowedIPs = append(allowedIPs, *wgIP(&schema.IPRange{subnet.IPNet}))
}
}
nr.Peers = append(nr.Peers, workloads.WireguardPeer{
PublicKey: onr.WireguardPublicKey,
Iprange: onr.Iprange,
AllowedIprange: allowedIPs,
Endpoint: endpoint,
})
}
// Add configured external access peers
for _, ea := range accessPoints[nr.NodeId] {
allowedIPs := make([]schema.IPRange, 2)
allowedIPs[0] = schema.IPRange{ea.Subnet.IPNet}
allowedIPs[1] = *wgIP(&schema.IPRange{ea.Subnet.IPNet})
nr.Peers = append(nr.Peers, workloads.WireguardPeer{
PublicKey: ea.WGPublicKey,
Iprange: schema.IPRange{ea.Subnet.IPNet},
AllowedIprange: allowedIPs,
Endpoint: "",
})
}
}
return nil
}
func isIPv4Subnet(n schema.IPRange) bool {
ones, bits := n.IPNet.Mask.Size()
if bits != 32 {
return false
}
return ones <= 30
}
func genWGQuick(wgPrivateKey string, localSubnet schema.IPRange, peerWgPubKey string, allowedSubnet schema.IPRange, peerEndpoint string) (string, error) {
type data struct {
PrivateKey string
Address string
PeerWgPubKey string
AllowedSubnet string
PeerEndpoint string
}
if !isIPv4Subnet(localSubnet) {
return "", errors.New("local subnet is not a valid IPv4 subnet")
}
tmpl, err := template.New("wg").Parse(wgTmpl)
if err != nil {
return "", err
}
buf := &bytes.Buffer{}
if err := tmpl.Execute(buf, data{
PrivateKey: wgPrivateKey,
Address: wgIP(&schema.IPRange{localSubnet.IPNet}).String(),
PeerWgPubKey: peerWgPubKey,
AllowedSubnet: strings.Join([]string{allowedSubnet.String(), types.NewIPNet(wgSubnet(&allowedSubnet.IPNet)).String()}, ","),
PeerEndpoint: peerEndpoint,
}); err != nil {
return "", err
}
return buf.String(), nil
}
var wgTmpl = `
[Interface]
PrivateKey = {{.PrivateKey}}
Address = {{.Address}}
[Peer]
PublicKey = {{.PeerWgPubKey}}
AllowedIPs = {{.AllowedSubnet}}
PersistentKeepalive = 20
{{if .PeerEndpoint}}Endpoint = {{.PeerEndpoint}}{{end}}
`
// NetworkGraph creates a networkgraph for a network
func (n *NetworkBuilder) NetworkGraph(w io.Writer) error {
nodes := make(map[string]dot.Node)
nodesByID := make(map[string]dot.Node)
graph := dot.NewGraph(dot.Directed)
for _, nr := range n.NetResources {
node := graph.Node(strings.Join([]string{nr.NodeId, nr.Iprange.String()}, "\n")).Box()
// set special style for "hidden" nodes
if len(nr.PubEndpoints) == 0 {
node.Attr("style", "dashed")
node.Attr("color", "blue")
graph.AddToSameRank("hidden nodes", node)
}
nodes[nr.WireguardPublicKey] = node
nodesByID[nr.NodeId] = node
}
// add external access
for _, ea := range n.AccessPoints {
node := graph.Node(strings.Join([]string{"External network", ea.Subnet.String()}, "\n")).Box()
// set style for hidden nodes
node.Attr("style", "dashed")
node.Attr("color", "green")
graph.AddToSameRank("external access", node)
// add link to access point
edge := graph.Edge(node, nodesByID[ea.NodeID], n.Iprange.String())
if ea.IP4 {
edge.Attr("color", "blue")
}
nodes[ea.WGPublicKey] = node
}
for _, nr := range n.NetResources {
for _, peer := range nr.Peers {
allowedIPs := make([]string, 0, len(peer.AllowedIprange)/2)
for _, aip := range peer.AllowedIprange {
if !isCGN(aip) {
allowedIPs = append(allowedIPs, aip.String())
}
}
edge := graph.Edge(nodes[nr.WireguardPublicKey], nodes[peer.PublicKey], strings.Join(allowedIPs, "\n"))
if peer.Endpoint == "" {
// connections to this peer are IPv4 -> blue, and can not be initiated by this node -> dashed
edge.Attr("color", "blue").Attr("style", "dashed")
continue
}
if net.ParseIP(peer.Endpoint[:strings.LastIndex(peer.Endpoint, ":")]).To4() != nil {
// IPv4 connection -> blue
edge.Attr("color", "blue")
}
}
}
graph.Write(w)
return nil
}
func wgIP(subnet *schema.IPRange) *schema.IPRange {
// example: 10.3.1.0 -> 100.64.3.1
a := subnet.IP[len(subnet.IP)-3]
b := subnet.IP[len(subnet.IP)-2]
return &schema.IPRange{net.IPNet{
IP: net.IPv4(0x64, 0x40, a, b),
Mask: net.CIDRMask(32, 32),
}}
}
func wgSubnet(subnet *net.IPNet) *net.IPNet {
// example: 10.3.1.0 -> 100.64.3.1
a := subnet.IP[len(subnet.IP)-3]
b := subnet.IP[len(subnet.IP)-2]
ones, _ := subnet.Mask.Size()
return &net.IPNet{
IP: net.IPv4(0x64, 0x40, a, b),
Mask: net.CIDRMask(ones+8, 32),
}
}
func isPrivateIP(ip net.IP) bool {
privateIPBlocks := []*net.IPNet{}
for _, cidr := range []string{
"127.0.0.0/8", // IPv4 loopback
"10.0.0.0/8", // RFC1918
"172.16.0.0/12", // RFC1918
"192.168.0.0/16", // RFC1918
"169.254.0.0/16", // RFC3927 link-local
"::1/128", // IPv6 loopback
"fe80::/10", // IPv6 link-local
"fc00::/7", // IPv6 unique local addr
} {
_, block, err := net.ParseCIDR(cidr)
if err != nil {
panic(fmt.Errorf("parse error on %q: %v", cidr, err))
}
privateIPBlocks = append(privateIPBlocks, block)
}
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() {
return true
}
for _, block := range privateIPBlocks {
if block.Contains(ip) {
return true
}
}
return false
}
func isCGN(subnet schema.IPRange) bool {
_, block, err := net.ParseCIDR("100.64.0.0/10")
if err != nil {
panic(err)
}
return block.Contains(subnet.IP)
}
func (n *NetworkBuilder) extractAccessPoints() {
// gather all actual nodes, using their wg pubkey as key in the map (NodeID
// can't be seen in the actual peer struct)
actualNodes := make(map[string]struct{})
for _, nr := range n.NetResources {
actualNodes[nr.WireguardPublicKey] = struct{}{}
}
aps := []AccessPoint{}
for _, nr := range n.NetResources {
for _, peer := range nr.Peers {
if _, exists := actualNodes[peer.PublicKey]; !exists {
// peer is not a node so it must be external
aps = append(aps, AccessPoint{
NodeID: nr.NodeId,
Subnet: peer.Iprange,
WGPublicKey: peer.PublicKey,
// we can't infer if we use IPv6 or IPv4
})
}
}
}
n.AccessPoints = aps
}
// a node has either a public namespace with []ipv4 or/and []ipv6 -or-
// some interface has received a SLAAC addr
// which has been registered in BCDB
func (n *NetworkBuilder) getEndPointAddrs(nodeID pkg.Identifier) ([]types.IPNet, error) {
schemaNode, err := n.explorer.Directory.NodeGet(nodeID.Identity(), false)
if err != nil {
return nil, err
}
node := types.NewNodeFromSchema(schemaNode)
var endpoints []types.IPNet
if node.PublicConfig != nil {
if node.PublicConfig.IPv4.IP != nil {
ip := node.PublicConfig.IPv4.IP
if ip.IsGlobalUnicast() && !isPrivateIP(ip) {
endpoints = append(endpoints, node.PublicConfig.IPv4)
}
}
if node.PublicConfig.IPv6.IP != nil {
ip := node.PublicConfig.IPv6.IP
if ip.IsGlobalUnicast() && !isPrivateIP(ip) {
endpoints = append(endpoints, node.PublicConfig.IPv6)
}
}
} else {
for _, iface := range node.Ifaces {
for _, ip := range iface.Addrs {
if !ip.IP.IsGlobalUnicast() || isPrivateIP(ip.IP) {
continue
}
endpoints = append(endpoints, ip)
}
}
}
// If the length is 0, then its a hidden node
return endpoints, nil
} |
// WithStatsAggregator sets the stats aggregators to the network | random_line_split |
networkBuilder.go | package builders
import (
"bytes"
"encoding/hex"
"encoding/json"
"fmt"
"html/template"
"io"
"math/rand"
"net"
"os"
"strings"
"github.com/emicklei/dot"
"github.com/pkg/errors"
"github.com/threefoldtech/tfexplorer/client"
"github.com/threefoldtech/tfexplorer/models/generated/workloads"
"github.com/threefoldtech/tfexplorer/schema"
"github.com/threefoldtech/zos/pkg"
"github.com/threefoldtech/zos/pkg/crypto"
"github.com/threefoldtech/zos/pkg/network/types"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
)
// NetworkBuilder is a struct that can build networks
type NetworkBuilder struct {
workloads.Network
NodeID string
explorer *client.Client
AccessPoints []AccessPoint `json:"access_points,omitempty"`
// NetResources field override
NetResources []NetResource `json:"net_resources"`
}
// NetResource is the description of a part of a network local to a specific node
type NetResource struct {
workloads.NetworkNetResource
// Public endpoints
PubEndpoints []net.IP `json:"pub_endpoints"`
}
// AccessPoint info for a network, defining a node which will act as the AP, and
// the subnet which will be routed through it
type AccessPoint struct {
// NodeID of the access point in the network
NodeID string `json:"node_id"`
// Subnet to be routed through this access point
Subnet schema.IPRange `json:"subnet"`
WGPublicKey string `json:"wg_public_key"`
IP4 bool `json:"ip4"`
}
// NewNetworkBuilder creates a new network builder
func NewNetworkBuilder(name string, iprange schema.IPRange, explorer *client.Client) *NetworkBuilder {
return &NetworkBuilder{
Network: workloads.Network{
Name: name,
Iprange: iprange,
NetworkResources: []workloads.NetworkNetResource{},
},
explorer: explorer,
}
}
// LoadNetworkBuilder loads a network builder based on a file path
func LoadNetworkBuilder(reader io.Reader, explorer *client.Client) (*NetworkBuilder, error) {
network := workloads.Network{}
err := json.NewDecoder(reader).Decode(&network)
if err != nil {
return &NetworkBuilder{}, err
}
networkBuilder := &NetworkBuilder{
Network: network,
explorer: explorer,
}
if err = networkBuilder.setPubEndpoints(); err != nil {
return nil, err
}
networkBuilder.extractAccessPoints()
return networkBuilder, nil
}
// Save saves the network builder to an IO.Writer
func (n *NetworkBuilder) Save(writer io.Writer) error {
err := json.NewEncoder(writer).Encode(n.Network)
if err != nil {
return err
}
return err
}
// Build returns the network
func (n *NetworkBuilder) Build() workloads.Network {
return n.Network
}
// WithName sets the name to the network
func (n *NetworkBuilder) WithName(name string) *NetworkBuilder {
n.Network.Name = name
return n
}
// WithIPRange sets the ip range to the network
func (n *NetworkBuilder) WithIPRange(ipRange schema.IPRange) *NetworkBuilder {
n.Network.Iprange = ipRange
return n
}
// WithStatsAggregator sets the stats aggregators to the network
func (n *NetworkBuilder) WithStatsAggregator(aggregators []workloads.StatsAggregator) *NetworkBuilder {
n.Network.StatsAggregator = aggregators
return n
}
// WithNetworkResources sets the network resources to the network
func (n *NetworkBuilder) WithNetworkResources(netResources []workloads.NetworkNetResource) *NetworkBuilder {
n.Network.NetworkResources = netResources
return n
}
// AddNode adds a node to the network
// the subnet will be added as network resource to the node
// forceHidden will set no public endpoints to the node
func (n *NetworkBuilder) AddNode(nodeID string, subnet string, port uint, forceHidden bool) (*NetworkBuilder, error) {
n.NodeID = nodeID
if subnet == "" {
return n, fmt.Errorf("subnet cannot be empty")
}
ipnet, err := types.ParseIPNet(subnet)
if err != nil {
return n, errors.Wrap(err, "invalid subnet")
}
if port == 0 {
port, err = n.pickPort()
if err != nil {
return n, errors.Wrap(err, "failed to pick wireguard port")
}
}
privateKey, err := wgtypes.GeneratePrivateKey()
if err != nil {
return n, errors.Wrap(err, "error during wireguard key generation")
}
sk := privateKey.String()
pk, err := crypto.KeyFromID(pkg.StrIdentifier(nodeID))
if err != nil {
return n, errors.Wrap(err, "failed to parse nodeID")
}
encrypted, err := crypto.Encrypt([]byte(sk), pk)
if err != nil {
return n, errors.Wrap(err, "failed to encrypt private key")
}
pubSubnets, err := n.getEndPointAddrs(pkg.StrIdentifier(nodeID))
if err != nil {
return n, errors.Wrap(err, "failed to get node public endpoints")
}
var endpoints []net.IP
if !forceHidden {
for _, sn := range pubSubnets {
endpoints = append(endpoints, sn.IP)
}
}
nr := NetResource{
NetworkNetResource: workloads.NetworkNetResource{
NodeId: nodeID,
Iprange: schema.IPRange{ipnet.IPNet},
WireguardListenPort: int64(port),
WireguardPublicKey: privateKey.PublicKey().String(),
WireguardPrivateKeyEncrypted: hex.EncodeToString(encrypted),
},
PubEndpoints: endpoints,
}
n.NetResources = append(n.NetResources, nr)
if err = n.generatePeers(); err != nil {
return n, errors.Wrap(err, "failed to generate peers")
}
return n, nil
}
// AddAccess adds access to a node in the network
// the subnet will be routed through the accesspoint of the node
func (n *NetworkBuilder) AddAccess(nodeID string, subnet schema.IPRange, wgPubKey string, ip4 bool) (*NetworkBuilder, string, error) {
if nodeID == "" {
return n, "", fmt.Errorf("nodeID cannot be empty")
}
var nodeExists bool
var node NetResource
for _, nr := range n.NetResources {
if nr.NodeId == nodeID {
node = nr
nodeExists = true
break
}
}
if !nodeExists {
return n, "", errors.New("can not add access through a node which is not in the network")
}
if len(node.PubEndpoints) == 0 {
return n, "", errors.New("access node must have at least 1 public endpoint")
}
var endpoint string
for _, ep := range node.PubEndpoints {
if ep.To4() != nil {
// ipv4 address
if ip4 {
endpoint = fmt.Sprintf("%s:%d", ep.String(), node.WireguardListenPort)
break
}
// we want ipv6 so use the next address
continue
}
if ep.To16() != nil {
// due to the previous branch this can now only be an ipv6 address
if !ip4 {
endpoint = fmt.Sprintf("[%s]:%d", node.PubEndpoints[0].String(), node.WireguardListenPort)
break
}
// we want ipv4 so use next address
continue
}
}
if endpoint == "" {
return n, "", errors.New("access node has no public endpoint of the requested type")
}
var privateKey wgtypes.Key
if wgPubKey == "" {
privateKey, err := wgtypes.GeneratePrivateKey()
if err != nil {
return n, "", errors.Wrap(err, "error during wireguard key generation")
}
wgPubKey = privateKey.PublicKey().String()
}
ap := AccessPoint{
NodeID: nodeID,
Subnet: subnet,
WGPublicKey: wgPubKey,
IP4: ip4,
}
n.AccessPoints = append(n.AccessPoints, ap)
if err := n.generatePeers(); err != nil {
return n, "", errors.Wrap(err, "failed to generate peers")
}
wgConf, err := genWGQuick(privateKey.String(), subnet, node.WireguardPublicKey, n.Network.Iprange, endpoint)
if err != nil {
return n, "", err
}
return n, wgConf, nil
}
// RemoveNode removes a node
func (n *NetworkBuilder) RemoveNode(schema string, nodeID string) error {
for i, nr := range n.NetResources {
if nr.NodeId == nodeID {
n.NetResources = append(n.NetResources[:i], n.NetResources[i+1:]...)
break
}
}
f, err := os.Open(schema)
if err != nil {
return errors.Wrap(err, "failed to open network schema")
}
return n.Save(f)
}
func (n *NetworkBuilder) setPubEndpoints() error {
for i := range n.NetResources {
pep, err := n.getEndPointAddrs(pkg.StrIdentifier(n.NetResources[i].NodeId))
if err != nil {
return err
}
var endpoints []net.IP
for _, sn := range pep {
endpoints = append(endpoints, sn.IP)
}
n.NetResources[i].PubEndpoints = endpoints
}
// remove the pub endpoints from nodes which we assume have been marked
// as force hidden
hiddenNodes := make(map[string]struct{})
for _, nr := range n.NetResources {
if len(nr.PubEndpoints) > 0 {
for _, peer := range nr.Peers {
if peer.Endpoint == "" {
hiddenNodes[peer.PublicKey] = struct{}{}
}
}
}
}
for i := range n.NetResources {
if _, exists := hiddenNodes[n.NetResources[i].WireguardPublicKey]; exists {
n.NetResources[i].PubEndpoints = nil
}
}
return nil
}
func (n *NetworkBuilder) pickPort() (uint, error) {
node, err := n.explorer.Directory.NodeGet(n.NodeID, false)
if err != nil {
return 0, err
}
p := uint(rand.Intn(6000) + 2000)
for isIn(node.WgPorts, p) {
p = uint(rand.Intn(6000) + 2000)
}
return p, nil
}
func isIn(l []int64, i uint) bool {
for _, x := range l {
if int64(i) == x {
return true
}
}
return false
}
func hasIPv4(n NetResource) bool {
for _, pep := range n.PubEndpoints {
if pep.To4() != nil {
return true
}
}
return false
}
// This function assumes:
// - that a hidden node has functioning IPv4
// - that a public node ALWAYS has public IPv6, and OPTIONALLY public IPv4
// - that any public endpoint on any node is actually reachable (i.e. no firewall
// blocking incoming traffic)
func (n *NetworkBuilder) generatePeers() error {
// Find public node, which will be used to connect all hidden nodes.
// In case there are hidden nodes, the public node needs IPv4 support as well.
var hasHiddenNodes bool
for _, nr := range n.NetResources {
if len(nr.PubEndpoints) == 0 {
hasHiddenNodes = true
break
}
}
// Look for a public node to connect hidden nodes. This is only needed
// in case there are hidden nodes.
var pubNr string
if hasHiddenNodes {
for _, nr := range n.NetResources {
if hasIPv4(nr) {
pubNr = n.NodeID
break
}
}
if pubNr == "" {
return errors.New("Network has hidden nodes but no public IPv4 node exists")
}
}
// We also need to inform nodes how to route the external access subnets.
// Working with the knowledge that these external subnets come in through
// the network through a single access point, which is part of the network
// and thus already routed, we can map the external subnets to the subnet
// of the access point, and add these external subnets to all peers who also
// have the associated internal subnet.
//
// Map the network subnets to their respective node ids first for easy access later
internalSubnets := make(map[string]schema.IPRange)
for _, nr := range n.NetResources {
internalSubnets[n.NodeID] = nr.Iprange
}
externalSubnets := make(map[string][]schema.IPRange) // go does not like `types.IPNet` as key
for _, ap := range n.AccessPoints {
externalSubnets[internalSubnets[ap.NodeID].String()] = append(externalSubnets[internalSubnets[ap.NodeID].String()], ap.Subnet)
}
// Maintain a mapping of access point nodes to the subnet and wg key they give access
// to, as these need to be added as peers as well for these nodes
accessPoints := make(map[string][]AccessPoint)
for _, ap := range n.AccessPoints {
accessPoints[ap.NodeID] = append(accessPoints[ap.NodeID], ap)
}
// Find all hidden nodes, and collect their subnets. Also collect the subnets
// of public IPv6 only nodes, since hidden nodes need IPv4 to connect.
hiddenSubnets := make(map[string]schema.IPRange)
// also maintain subnets from nodes who have only IPv6 since this will also
// need to be routed for hidden nodes
ipv6OnlySubnets := make(map[string]schema.IPRange)
for _, nr := range n.NetResources {
if len(nr.PubEndpoints) == 0 {
hiddenSubnets[n.NodeID] = nr.Iprange
continue
}
if !hasIPv4(nr) {
ipv6OnlySubnets[n.NodeID] = nr.Iprange
}
}
for i := range n.NetResources {
// Note: we need to loop by index and manually assign nr, doing
// for _, nr := range ... causes nr to be copied, meaning we can't modify
// it in place
nr := &n.NetResources[i]
nr.Peers = []workloads.WireguardPeer{}
for _, onr := range n.NetResources {
if n.NodeID == onr.NodeId {
continue
}
allowedIPs := make([]schema.IPRange, 2)
allowedIPs[0] = onr.Iprange
allowedIPs[1] = *wgIP(&onr.Iprange)
var endpoint string
if len(nr.PubEndpoints) == 0 {
// If node is hidden, set only public peers (with IPv4), and set first public peer to
// contain all hidden subnets, except for the one owned by the node
if !hasIPv4(onr) {
continue
}
// Also add all other subnets if this is the pub node
if onr.NodeId == pubNr {
for owner, subnet := range hiddenSubnets {
// Do not add our own subnet
if owner == nr.NodeId {
continue
}
allowedIPs = append(allowedIPs, subnet)
allowedIPs = append(allowedIPs, *wgIP(&schema.IPRange{subnet.IPNet}))
}
for _, subnet := range ipv6OnlySubnets {
allowedIPs = append(allowedIPs, subnet)
allowedIPs = append(allowedIPs, *wgIP(&schema.IPRange{subnet.IPNet}))
}
}
// Endpoint must be IPv4
for _, pep := range onr.PubEndpoints {
if pep.To4() != nil {
endpoint = fmt.Sprintf("%s:%d", pep.String(), onr.WireguardListenPort)
break
}
}
} else if len(onr.PubEndpoints) == 0 && hasIPv4(*nr) {
// if the peer is hidden but we have IPv4, we can connect to it, but we don't know
// an endpoint.
endpoint = ""
} else {
// if we are not hidden, we add all other nodes, unless we don't
// have IPv4, because then we also can't connect to hidden nodes.
// Ignore hidden nodes if we don't have IPv4
if !hasIPv4(*nr) && len(onr.PubEndpoints) == 0 {
continue
}
// both nodes are public therefore we can connect over IPv6
// if this is the selected pubNr - also need to add allowedIPs
// for the hidden nodes
if onr.NodeId == pubNr {
for _, subnet := range hiddenSubnets {
allowedIPs = append(allowedIPs, subnet)
allowedIPs = append(allowedIPs, *wgIP(&schema.IPRange{subnet.IPNet}))
}
}
// Since the node is not hidden, we know that it MUST have at least
// 1 IPv6 address
for _, pep := range onr.PubEndpoints {
if pep.To4() == nil && pep.To16() != nil {
endpoint = fmt.Sprintf("[%s]:%d", pep.String(), onr.WireguardListenPort)
break
}
}
// as a fallback assign IPv4
if endpoint == "" {
for _, pep := range onr.PubEndpoints {
if pep.To4() != nil {
endpoint = fmt.Sprintf("%s:%d", pep.String(), onr.WireguardListenPort)
break
}
}
}
}
// Add subnets for external access
for i := 0; i < len(allowedIPs); i++ {
for _, subnet := range externalSubnets[allowedIPs[i].String()] {
allowedIPs = append(allowedIPs, schema.IPRange{subnet.IPNet})
allowedIPs = append(allowedIPs, *wgIP(&schema.IPRange{subnet.IPNet}))
}
}
nr.Peers = append(nr.Peers, workloads.WireguardPeer{
PublicKey: onr.WireguardPublicKey,
Iprange: onr.Iprange,
AllowedIprange: allowedIPs,
Endpoint: endpoint,
})
}
// Add configured external access peers
for _, ea := range accessPoints[nr.NodeId] {
allowedIPs := make([]schema.IPRange, 2)
allowedIPs[0] = schema.IPRange{ea.Subnet.IPNet}
allowedIPs[1] = *wgIP(&schema.IPRange{ea.Subnet.IPNet})
nr.Peers = append(nr.Peers, workloads.WireguardPeer{
PublicKey: ea.WGPublicKey,
Iprange: schema.IPRange{ea.Subnet.IPNet},
AllowedIprange: allowedIPs,
Endpoint: "",
})
}
}
return nil
}
func isIPv4Subnet(n schema.IPRange) bool {
ones, bits := n.IPNet.Mask.Size()
if bits != 32 {
return false
}
return ones <= 30
}
func genWGQuick(wgPrivateKey string, localSubnet schema.IPRange, peerWgPubKey string, allowedSubnet schema.IPRange, peerEndpoint string) (string, error) {
type data struct {
PrivateKey string
Address string
PeerWgPubKey string
AllowedSubnet string
PeerEndpoint string
}
if !isIPv4Subnet(localSubnet) {
return "", errors.New("local subnet is not a valid IPv4 subnet")
}
tmpl, err := template.New("wg").Parse(wgTmpl)
if err != nil {
return "", err
}
buf := &bytes.Buffer{}
if err := tmpl.Execute(buf, data{
PrivateKey: wgPrivateKey,
Address: wgIP(&schema.IPRange{localSubnet.IPNet}).String(),
PeerWgPubKey: peerWgPubKey,
AllowedSubnet: strings.Join([]string{allowedSubnet.String(), types.NewIPNet(wgSubnet(&allowedSubnet.IPNet)).String()}, ","),
PeerEndpoint: peerEndpoint,
}); err != nil {
return "", err
}
return buf.String(), nil
}
var wgTmpl = `
[Interface]
PrivateKey = {{.PrivateKey}}
Address = {{.Address}}
[Peer]
PublicKey = {{.PeerWgPubKey}}
AllowedIPs = {{.AllowedSubnet}}
PersistentKeepalive = 20
{{if .PeerEndpoint}}Endpoint = {{.PeerEndpoint}}{{end}}
`
// NetworkGraph creates a networkgraph for a network
func (n *NetworkBuilder) NetworkGraph(w io.Writer) error {
nodes := make(map[string]dot.Node)
nodesByID := make(map[string]dot.Node)
graph := dot.NewGraph(dot.Directed)
for _, nr := range n.NetResources {
node := graph.Node(strings.Join([]string{nr.NodeId, nr.Iprange.String()}, "\n")).Box()
// set special style for "hidden" nodes
if len(nr.PubEndpoints) == 0 {
node.Attr("style", "dashed")
node.Attr("color", "blue")
graph.AddToSameRank("hidden nodes", node)
}
nodes[nr.WireguardPublicKey] = node
nodesByID[nr.NodeId] = node
}
// add external access
for _, ea := range n.AccessPoints {
node := graph.Node(strings.Join([]string{"External network", ea.Subnet.String()}, "\n")).Box()
// set style for hidden nodes
node.Attr("style", "dashed")
node.Attr("color", "green")
graph.AddToSameRank("external access", node)
// add link to access point
edge := graph.Edge(node, nodesByID[ea.NodeID], n.Iprange.String())
if ea.IP4 {
edge.Attr("color", "blue")
}
nodes[ea.WGPublicKey] = node
}
for _, nr := range n.NetResources {
for _, peer := range nr.Peers {
allowedIPs := make([]string, 0, len(peer.AllowedIprange)/2)
for _, aip := range peer.AllowedIprange {
if !isCGN(aip) {
allowedIPs = append(allowedIPs, aip.String())
}
}
edge := graph.Edge(nodes[nr.WireguardPublicKey], nodes[peer.PublicKey], strings.Join(allowedIPs, "\n"))
if peer.Endpoint == "" {
// connections to this peer are IPv4 -> blue, and can not be initiated by this node -> dashed
edge.Attr("color", "blue").Attr("style", "dashed")
continue
}
if net.ParseIP(peer.Endpoint[:strings.LastIndex(peer.Endpoint, ":")]).To4() != nil {
// IPv4 connection -> blue
edge.Attr("color", "blue")
}
}
}
graph.Write(w)
return nil
}
func wgIP(subnet *schema.IPRange) *schema.IPRange {
// example: 10.3.1.0 -> 100.64.3.1
a := subnet.IP[len(subnet.IP)-3]
b := subnet.IP[len(subnet.IP)-2]
return &schema.IPRange{net.IPNet{
IP: net.IPv4(0x64, 0x40, a, b),
Mask: net.CIDRMask(32, 32),
}}
}
func wgSubnet(subnet *net.IPNet) *net.IPNet {
// example: 10.3.1.0 -> 100.64.3.1
a := subnet.IP[len(subnet.IP)-3]
b := subnet.IP[len(subnet.IP)-2]
ones, _ := subnet.Mask.Size()
return &net.IPNet{
IP: net.IPv4(0x64, 0x40, a, b),
Mask: net.CIDRMask(ones+8, 32),
}
}
func isPrivateIP(ip net.IP) bool {
privateIPBlocks := []*net.IPNet{}
for _, cidr := range []string{
"127.0.0.0/8", // IPv4 loopback
"10.0.0.0/8", // RFC1918
"172.16.0.0/12", // RFC1918
"192.168.0.0/16", // RFC1918
"169.254.0.0/16", // RFC3927 link-local
"::1/128", // IPv6 loopback
"fe80::/10", // IPv6 link-local
"fc00::/7", // IPv6 unique local addr
} {
_, block, err := net.ParseCIDR(cidr)
if err != nil {
panic(fmt.Errorf("parse error on %q: %v", cidr, err))
}
privateIPBlocks = append(privateIPBlocks, block)
}
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() {
return true
}
for _, block := range privateIPBlocks {
if block.Contains(ip) {
return true
}
}
return false
}
func | (subnet schema.IPRange) bool {
_, block, err := net.ParseCIDR("100.64.0.0/10")
if err != nil {
panic(err)
}
return block.Contains(subnet.IP)
}
func (n *NetworkBuilder) extractAccessPoints() {
// gather all actual nodes, using their wg pubkey as key in the map (NodeID
// can't be seen in the actual peer struct)
actualNodes := make(map[string]struct{})
for _, nr := range n.NetResources {
actualNodes[nr.WireguardPublicKey] = struct{}{}
}
aps := []AccessPoint{}
for _, nr := range n.NetResources {
for _, peer := range nr.Peers {
if _, exists := actualNodes[peer.PublicKey]; !exists {
// peer is not a node so it must be external
aps = append(aps, AccessPoint{
NodeID: nr.NodeId,
Subnet: peer.Iprange,
WGPublicKey: peer.PublicKey,
// we can't infer if we use IPv6 or IPv4
})
}
}
}
n.AccessPoints = aps
}
// a node has either a public namespace with []ipv4 or/and []ipv6 -or-
// some interface has received a SLAAC addr
// which has been registered in BCDB
func (n *NetworkBuilder) getEndPointAddrs(nodeID pkg.Identifier) ([]types.IPNet, error) {
schemaNode, err := n.explorer.Directory.NodeGet(nodeID.Identity(), false)
if err != nil {
return nil, err
}
node := types.NewNodeFromSchema(schemaNode)
var endpoints []types.IPNet
if node.PublicConfig != nil {
if node.PublicConfig.IPv4.IP != nil {
ip := node.PublicConfig.IPv4.IP
if ip.IsGlobalUnicast() && !isPrivateIP(ip) {
endpoints = append(endpoints, node.PublicConfig.IPv4)
}
}
if node.PublicConfig.IPv6.IP != nil {
ip := node.PublicConfig.IPv6.IP
if ip.IsGlobalUnicast() && !isPrivateIP(ip) {
endpoints = append(endpoints, node.PublicConfig.IPv6)
}
}
} else {
for _, iface := range node.Ifaces {
for _, ip := range iface.Addrs {
if !ip.IP.IsGlobalUnicast() || isPrivateIP(ip.IP) {
continue
}
endpoints = append(endpoints, ip)
}
}
}
// If the length is 0, then its a hidden node
return endpoints, nil
}
| isCGN | identifier_name |
networkBuilder.go | package builders
import (
"bytes"
"encoding/hex"
"encoding/json"
"fmt"
"html/template"
"io"
"math/rand"
"net"
"os"
"strings"
"github.com/emicklei/dot"
"github.com/pkg/errors"
"github.com/threefoldtech/tfexplorer/client"
"github.com/threefoldtech/tfexplorer/models/generated/workloads"
"github.com/threefoldtech/tfexplorer/schema"
"github.com/threefoldtech/zos/pkg"
"github.com/threefoldtech/zos/pkg/crypto"
"github.com/threefoldtech/zos/pkg/network/types"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
)
// NetworkBuilder is a struct that can build networks
type NetworkBuilder struct {
workloads.Network
NodeID string
explorer *client.Client
AccessPoints []AccessPoint `json:"access_points,omitempty"`
// NetResources field override
NetResources []NetResource `json:"net_resources"`
}
// NetResource is the description of a part of a network local to a specific node
type NetResource struct {
workloads.NetworkNetResource
// Public endpoints
PubEndpoints []net.IP `json:"pub_endpoints"`
}
// AccessPoint info for a network, defining a node which will act as the AP, and
// the subnet which will be routed through it
type AccessPoint struct {
// NodeID of the access point in the network
NodeID string `json:"node_id"`
// Subnet to be routed through this access point
Subnet schema.IPRange `json:"subnet"`
WGPublicKey string `json:"wg_public_key"`
IP4 bool `json:"ip4"`
}
// NewNetworkBuilder creates a new network builder
func NewNetworkBuilder(name string, iprange schema.IPRange, explorer *client.Client) *NetworkBuilder {
return &NetworkBuilder{
Network: workloads.Network{
Name: name,
Iprange: iprange,
NetworkResources: []workloads.NetworkNetResource{},
},
explorer: explorer,
}
}
// LoadNetworkBuilder loads a network builder based on a file path
func LoadNetworkBuilder(reader io.Reader, explorer *client.Client) (*NetworkBuilder, error) {
network := workloads.Network{}
err := json.NewDecoder(reader).Decode(&network)
if err != nil {
return &NetworkBuilder{}, err
}
networkBuilder := &NetworkBuilder{
Network: network,
explorer: explorer,
}
if err = networkBuilder.setPubEndpoints(); err != nil {
return nil, err
}
networkBuilder.extractAccessPoints()
return networkBuilder, nil
}
// Save saves the network builder to an IO.Writer
func (n *NetworkBuilder) Save(writer io.Writer) error {
err := json.NewEncoder(writer).Encode(n.Network)
if err != nil {
return err
}
return err
}
// Build returns the network
func (n *NetworkBuilder) Build() workloads.Network {
return n.Network
}
// WithName sets the name to the network
func (n *NetworkBuilder) WithName(name string) *NetworkBuilder {
n.Network.Name = name
return n
}
// WithIPRange sets the ip range to the network
func (n *NetworkBuilder) WithIPRange(ipRange schema.IPRange) *NetworkBuilder {
n.Network.Iprange = ipRange
return n
}
// WithStatsAggregator sets the stats aggregators to the network
func (n *NetworkBuilder) WithStatsAggregator(aggregators []workloads.StatsAggregator) *NetworkBuilder {
n.Network.StatsAggregator = aggregators
return n
}
// WithNetworkResources sets the network resources to the network
func (n *NetworkBuilder) WithNetworkResources(netResources []workloads.NetworkNetResource) *NetworkBuilder {
n.Network.NetworkResources = netResources
return n
}
// AddNode adds a node to the network
// the subnet will be added as network resource to the node
// forceHidden will set no public endpoints to the node
func (n *NetworkBuilder) AddNode(nodeID string, subnet string, port uint, forceHidden bool) (*NetworkBuilder, error) {
n.NodeID = nodeID
if subnet == "" {
return n, fmt.Errorf("subnet cannot be empty")
}
ipnet, err := types.ParseIPNet(subnet)
if err != nil {
return n, errors.Wrap(err, "invalid subnet")
}
if port == 0 {
port, err = n.pickPort()
if err != nil {
return n, errors.Wrap(err, "failed to pick wireguard port")
}
}
privateKey, err := wgtypes.GeneratePrivateKey()
if err != nil {
return n, errors.Wrap(err, "error during wireguard key generation")
}
sk := privateKey.String()
pk, err := crypto.KeyFromID(pkg.StrIdentifier(nodeID))
if err != nil {
return n, errors.Wrap(err, "failed to parse nodeID")
}
encrypted, err := crypto.Encrypt([]byte(sk), pk)
if err != nil {
return n, errors.Wrap(err, "failed to encrypt private key")
}
pubSubnets, err := n.getEndPointAddrs(pkg.StrIdentifier(nodeID))
if err != nil {
return n, errors.Wrap(err, "failed to get node public endpoints")
}
var endpoints []net.IP
if !forceHidden {
for _, sn := range pubSubnets {
endpoints = append(endpoints, sn.IP)
}
}
nr := NetResource{
NetworkNetResource: workloads.NetworkNetResource{
NodeId: nodeID,
Iprange: schema.IPRange{ipnet.IPNet},
WireguardListenPort: int64(port),
WireguardPublicKey: privateKey.PublicKey().String(),
WireguardPrivateKeyEncrypted: hex.EncodeToString(encrypted),
},
PubEndpoints: endpoints,
}
n.NetResources = append(n.NetResources, nr)
if err = n.generatePeers(); err != nil {
return n, errors.Wrap(err, "failed to generate peers")
}
return n, nil
}
// AddAccess adds access to a node in the network
// the subnet will be routed through the accesspoint of the node
func (n *NetworkBuilder) AddAccess(nodeID string, subnet schema.IPRange, wgPubKey string, ip4 bool) (*NetworkBuilder, string, error) {
if nodeID == "" {
return n, "", fmt.Errorf("nodeID cannot be empty")
}
var nodeExists bool
var node NetResource
for _, nr := range n.NetResources {
if nr.NodeId == nodeID {
node = nr
nodeExists = true
break
}
}
if !nodeExists {
return n, "", errors.New("can not add access through a node which is not in the network")
}
if len(node.PubEndpoints) == 0 {
return n, "", errors.New("access node must have at least 1 public endpoint")
}
var endpoint string
for _, ep := range node.PubEndpoints {
if ep.To4() != nil {
// ipv4 address
if ip4 {
endpoint = fmt.Sprintf("%s:%d", ep.String(), node.WireguardListenPort)
break
}
// we want ipv6 so use the next address
continue
}
if ep.To16() != nil {
// due to the previous branch this can now only be an ipv6 address
if !ip4 {
endpoint = fmt.Sprintf("[%s]:%d", node.PubEndpoints[0].String(), node.WireguardListenPort)
break
}
// we want ipv4 so use next address
continue
}
}
if endpoint == "" {
return n, "", errors.New("access node has no public endpoint of the requested type")
}
var privateKey wgtypes.Key
if wgPubKey == "" {
privateKey, err := wgtypes.GeneratePrivateKey()
if err != nil {
return n, "", errors.Wrap(err, "error during wireguard key generation")
}
wgPubKey = privateKey.PublicKey().String()
}
ap := AccessPoint{
NodeID: nodeID,
Subnet: subnet,
WGPublicKey: wgPubKey,
IP4: ip4,
}
n.AccessPoints = append(n.AccessPoints, ap)
if err := n.generatePeers(); err != nil {
return n, "", errors.Wrap(err, "failed to generate peers")
}
wgConf, err := genWGQuick(privateKey.String(), subnet, node.WireguardPublicKey, n.Network.Iprange, endpoint)
if err != nil {
return n, "", err
}
return n, wgConf, nil
}
// RemoveNode removes a node
func (n *NetworkBuilder) RemoveNode(schema string, nodeID string) error {
for i, nr := range n.NetResources {
if nr.NodeId == nodeID {
n.NetResources = append(n.NetResources[:i], n.NetResources[i+1:]...)
break
}
}
f, err := os.Open(schema)
if err != nil {
return errors.Wrap(err, "failed to open network schema")
}
return n.Save(f)
}
func (n *NetworkBuilder) setPubEndpoints() error {
for i := range n.NetResources {
pep, err := n.getEndPointAddrs(pkg.StrIdentifier(n.NetResources[i].NodeId))
if err != nil {
return err
}
var endpoints []net.IP
for _, sn := range pep {
endpoints = append(endpoints, sn.IP)
}
n.NetResources[i].PubEndpoints = endpoints
}
// remove the pub endpoints from nodes which we assume have been marked
// as force hidden
hiddenNodes := make(map[string]struct{})
for _, nr := range n.NetResources {
if len(nr.PubEndpoints) > 0 {
for _, peer := range nr.Peers {
if peer.Endpoint == "" {
hiddenNodes[peer.PublicKey] = struct{}{}
}
}
}
}
for i := range n.NetResources {
if _, exists := hiddenNodes[n.NetResources[i].WireguardPublicKey]; exists {
n.NetResources[i].PubEndpoints = nil
}
}
return nil
}
func (n *NetworkBuilder) pickPort() (uint, error) {
node, err := n.explorer.Directory.NodeGet(n.NodeID, false)
if err != nil {
return 0, err
}
p := uint(rand.Intn(6000) + 2000)
for isIn(node.WgPorts, p) {
p = uint(rand.Intn(6000) + 2000)
}
return p, nil
}
func isIn(l []int64, i uint) bool {
for _, x := range l {
if int64(i) == x {
return true
}
}
return false
}
func hasIPv4(n NetResource) bool {
for _, pep := range n.PubEndpoints {
if pep.To4() != nil {
return true
}
}
return false
}
// This function assumes:
// - that a hidden node has functioning IPv4
// - that a public node ALWAYS has public IPv6, and OPTIONALLY public IPv4
// - that any public endpoint on any node is actually reachable (i.e. no firewall
// blocking incoming traffic)
func (n *NetworkBuilder) generatePeers() error {
// Find public node, which will be used to connect all hidden nodes.
// In case there are hidden nodes, the public node needs IPv4 support as well.
var hasHiddenNodes bool
for _, nr := range n.NetResources {
if len(nr.PubEndpoints) == 0 {
hasHiddenNodes = true
break
}
}
// Look for a public node to connect hidden nodes. This is only needed
// in case there are hidden nodes.
var pubNr string
if hasHiddenNodes {
for _, nr := range n.NetResources {
if hasIPv4(nr) {
pubNr = n.NodeID
break
}
}
if pubNr == "" {
return errors.New("Network has hidden nodes but no public IPv4 node exists")
}
}
// We also need to inform nodes how to route the external access subnets.
// Working with the knowledge that these external subnets come in through
// the network through a single access point, which is part of the network
// and thus already routed, we can map the external subnets to the subnet
// of the access point, and add these external subnets to all peers who also
// have the associated internal subnet.
//
// Map the network subnets to their respective node ids first for easy access later
internalSubnets := make(map[string]schema.IPRange)
for _, nr := range n.NetResources {
internalSubnets[n.NodeID] = nr.Iprange
}
externalSubnets := make(map[string][]schema.IPRange) // go does not like `types.IPNet` as key
for _, ap := range n.AccessPoints {
externalSubnets[internalSubnets[ap.NodeID].String()] = append(externalSubnets[internalSubnets[ap.NodeID].String()], ap.Subnet)
}
// Maintain a mapping of access point nodes to the subnet and wg key they give access
// to, as these need to be added as peers as well for these nodes
accessPoints := make(map[string][]AccessPoint)
for _, ap := range n.AccessPoints {
accessPoints[ap.NodeID] = append(accessPoints[ap.NodeID], ap)
}
// Find all hidden nodes, and collect their subnets. Also collect the subnets
// of public IPv6 only nodes, since hidden nodes need IPv4 to connect.
hiddenSubnets := make(map[string]schema.IPRange)
// also maintain subnets from nodes who have only IPv6 since this will also
// need to be routed for hidden nodes
ipv6OnlySubnets := make(map[string]schema.IPRange)
for _, nr := range n.NetResources {
if len(nr.PubEndpoints) == 0 {
hiddenSubnets[n.NodeID] = nr.Iprange
continue
}
if !hasIPv4(nr) {
ipv6OnlySubnets[n.NodeID] = nr.Iprange
}
}
for i := range n.NetResources {
// Note: we need to loop by index and manually assign nr, doing
// for _, nr := range ... causes nr to be copied, meaning we can't modify
// it in place
nr := &n.NetResources[i]
nr.Peers = []workloads.WireguardPeer{}
for _, onr := range n.NetResources {
if n.NodeID == onr.NodeId {
continue
}
allowedIPs := make([]schema.IPRange, 2)
allowedIPs[0] = onr.Iprange
allowedIPs[1] = *wgIP(&onr.Iprange)
var endpoint string
if len(nr.PubEndpoints) == 0 {
// If node is hidden, set only public peers (with IPv4), and set first public peer to
// contain all hidden subnets, except for the one owned by the node
if !hasIPv4(onr) {
continue
}
// Also add all other subnets if this is the pub node
if onr.NodeId == pubNr {
for owner, subnet := range hiddenSubnets {
// Do not add our own subnet
if owner == nr.NodeId {
continue
}
allowedIPs = append(allowedIPs, subnet)
allowedIPs = append(allowedIPs, *wgIP(&schema.IPRange{subnet.IPNet}))
}
for _, subnet := range ipv6OnlySubnets {
allowedIPs = append(allowedIPs, subnet)
allowedIPs = append(allowedIPs, *wgIP(&schema.IPRange{subnet.IPNet}))
}
}
// Endpoint must be IPv4
for _, pep := range onr.PubEndpoints {
if pep.To4() != nil {
endpoint = fmt.Sprintf("%s:%d", pep.String(), onr.WireguardListenPort)
break
}
}
} else if len(onr.PubEndpoints) == 0 && hasIPv4(*nr) {
// if the peer is hidden but we have IPv4, we can connect to it, but we don't know
// an endpoint.
endpoint = ""
} else {
// if we are not hidden, we add all other nodes, unless we don't
// have IPv4, because then we also can't connect to hidden nodes.
// Ignore hidden nodes if we don't have IPv4
if !hasIPv4(*nr) && len(onr.PubEndpoints) == 0 {
continue
}
// both nodes are public therefore we can connect over IPv6
// if this is the selected pubNr - also need to add allowedIPs
// for the hidden nodes
if onr.NodeId == pubNr {
for _, subnet := range hiddenSubnets {
allowedIPs = append(allowedIPs, subnet)
allowedIPs = append(allowedIPs, *wgIP(&schema.IPRange{subnet.IPNet}))
}
}
// Since the node is not hidden, we know that it MUST have at least
// 1 IPv6 address
for _, pep := range onr.PubEndpoints {
if pep.To4() == nil && pep.To16() != nil {
endpoint = fmt.Sprintf("[%s]:%d", pep.String(), onr.WireguardListenPort)
break
}
}
// as a fallback assign IPv4
if endpoint == "" {
for _, pep := range onr.PubEndpoints {
if pep.To4() != nil {
endpoint = fmt.Sprintf("%s:%d", pep.String(), onr.WireguardListenPort)
break
}
}
}
}
// Add subnets for external access
for i := 0; i < len(allowedIPs); i++ {
for _, subnet := range externalSubnets[allowedIPs[i].String()] {
allowedIPs = append(allowedIPs, schema.IPRange{subnet.IPNet})
allowedIPs = append(allowedIPs, *wgIP(&schema.IPRange{subnet.IPNet}))
}
}
nr.Peers = append(nr.Peers, workloads.WireguardPeer{
PublicKey: onr.WireguardPublicKey,
Iprange: onr.Iprange,
AllowedIprange: allowedIPs,
Endpoint: endpoint,
})
}
// Add configured external access peers
for _, ea := range accessPoints[nr.NodeId] {
allowedIPs := make([]schema.IPRange, 2)
allowedIPs[0] = schema.IPRange{ea.Subnet.IPNet}
allowedIPs[1] = *wgIP(&schema.IPRange{ea.Subnet.IPNet})
nr.Peers = append(nr.Peers, workloads.WireguardPeer{
PublicKey: ea.WGPublicKey,
Iprange: schema.IPRange{ea.Subnet.IPNet},
AllowedIprange: allowedIPs,
Endpoint: "",
})
}
}
return nil
}
func isIPv4Subnet(n schema.IPRange) bool {
ones, bits := n.IPNet.Mask.Size()
if bits != 32 {
return false
}
return ones <= 30
}
func genWGQuick(wgPrivateKey string, localSubnet schema.IPRange, peerWgPubKey string, allowedSubnet schema.IPRange, peerEndpoint string) (string, error) {
type data struct {
PrivateKey string
Address string
PeerWgPubKey string
AllowedSubnet string
PeerEndpoint string
}
if !isIPv4Subnet(localSubnet) {
return "", errors.New("local subnet is not a valid IPv4 subnet")
}
tmpl, err := template.New("wg").Parse(wgTmpl)
if err != nil {
return "", err
}
buf := &bytes.Buffer{}
if err := tmpl.Execute(buf, data{
PrivateKey: wgPrivateKey,
Address: wgIP(&schema.IPRange{localSubnet.IPNet}).String(),
PeerWgPubKey: peerWgPubKey,
AllowedSubnet: strings.Join([]string{allowedSubnet.String(), types.NewIPNet(wgSubnet(&allowedSubnet.IPNet)).String()}, ","),
PeerEndpoint: peerEndpoint,
}); err != nil {
return "", err
}
return buf.String(), nil
}
var wgTmpl = `
[Interface]
PrivateKey = {{.PrivateKey}}
Address = {{.Address}}
[Peer]
PublicKey = {{.PeerWgPubKey}}
AllowedIPs = {{.AllowedSubnet}}
PersistentKeepalive = 20
{{if .PeerEndpoint}}Endpoint = {{.PeerEndpoint}}{{end}}
`
// NetworkGraph creates a networkgraph for a network
func (n *NetworkBuilder) NetworkGraph(w io.Writer) error {
nodes := make(map[string]dot.Node)
nodesByID := make(map[string]dot.Node)
graph := dot.NewGraph(dot.Directed)
for _, nr := range n.NetResources {
node := graph.Node(strings.Join([]string{nr.NodeId, nr.Iprange.String()}, "\n")).Box()
// set special style for "hidden" nodes
if len(nr.PubEndpoints) == 0 {
node.Attr("style", "dashed")
node.Attr("color", "blue")
graph.AddToSameRank("hidden nodes", node)
}
nodes[nr.WireguardPublicKey] = node
nodesByID[nr.NodeId] = node
}
// add external access
for _, ea := range n.AccessPoints {
node := graph.Node(strings.Join([]string{"External network", ea.Subnet.String()}, "\n")).Box()
// set style for hidden nodes
node.Attr("style", "dashed")
node.Attr("color", "green")
graph.AddToSameRank("external access", node)
// add link to access point
edge := graph.Edge(node, nodesByID[ea.NodeID], n.Iprange.String())
if ea.IP4 {
edge.Attr("color", "blue")
}
nodes[ea.WGPublicKey] = node
}
for _, nr := range n.NetResources {
for _, peer := range nr.Peers {
allowedIPs := make([]string, 0, len(peer.AllowedIprange)/2)
for _, aip := range peer.AllowedIprange {
if !isCGN(aip) {
allowedIPs = append(allowedIPs, aip.String())
}
}
edge := graph.Edge(nodes[nr.WireguardPublicKey], nodes[peer.PublicKey], strings.Join(allowedIPs, "\n"))
if peer.Endpoint == "" {
// connections to this peer are IPv4 -> blue, and can not be initiated by this node -> dashed
edge.Attr("color", "blue").Attr("style", "dashed")
continue
}
if net.ParseIP(peer.Endpoint[:strings.LastIndex(peer.Endpoint, ":")]).To4() != nil {
// IPv4 connection -> blue
edge.Attr("color", "blue")
}
}
}
graph.Write(w)
return nil
}
func wgIP(subnet *schema.IPRange) *schema.IPRange {
// example: 10.3.1.0 -> 100.64.3.1
a := subnet.IP[len(subnet.IP)-3]
b := subnet.IP[len(subnet.IP)-2]
return &schema.IPRange{net.IPNet{
IP: net.IPv4(0x64, 0x40, a, b),
Mask: net.CIDRMask(32, 32),
}}
}
func wgSubnet(subnet *net.IPNet) *net.IPNet {
// example: 10.3.1.0 -> 100.64.3.1
a := subnet.IP[len(subnet.IP)-3]
b := subnet.IP[len(subnet.IP)-2]
ones, _ := subnet.Mask.Size()
return &net.IPNet{
IP: net.IPv4(0x64, 0x40, a, b),
Mask: net.CIDRMask(ones+8, 32),
}
}
func isPrivateIP(ip net.IP) bool {
privateIPBlocks := []*net.IPNet{}
for _, cidr := range []string{
"127.0.0.0/8", // IPv4 loopback
"10.0.0.0/8", // RFC1918
"172.16.0.0/12", // RFC1918
"192.168.0.0/16", // RFC1918
"169.254.0.0/16", // RFC3927 link-local
"::1/128", // IPv6 loopback
"fe80::/10", // IPv6 link-local
"fc00::/7", // IPv6 unique local addr
} {
_, block, err := net.ParseCIDR(cidr)
if err != nil {
panic(fmt.Errorf("parse error on %q: %v", cidr, err))
}
privateIPBlocks = append(privateIPBlocks, block)
}
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() {
return true
}
for _, block := range privateIPBlocks {
if block.Contains(ip) {
return true
}
}
return false
}
func isCGN(subnet schema.IPRange) bool {
_, block, err := net.ParseCIDR("100.64.0.0/10")
if err != nil {
panic(err)
}
return block.Contains(subnet.IP)
}
func (n *NetworkBuilder) extractAccessPoints() {
// gather all actual nodes, using their wg pubkey as key in the map (NodeID
// can't be seen in the actual peer struct)
actualNodes := make(map[string]struct{})
for _, nr := range n.NetResources {
actualNodes[nr.WireguardPublicKey] = struct{}{}
}
aps := []AccessPoint{}
for _, nr := range n.NetResources {
for _, peer := range nr.Peers {
if _, exists := actualNodes[peer.PublicKey]; !exists {
// peer is not a node so it must be external
aps = append(aps, AccessPoint{
NodeID: nr.NodeId,
Subnet: peer.Iprange,
WGPublicKey: peer.PublicKey,
// we can't infer if we use IPv6 or IPv4
})
}
}
}
n.AccessPoints = aps
}
// a node has either a public namespace with []ipv4 or/and []ipv6 -or-
// some interface has received a SLAAC addr
// which has been registered in BCDB
func (n *NetworkBuilder) getEndPointAddrs(nodeID pkg.Identifier) ([]types.IPNet, error) {
schemaNode, err := n.explorer.Directory.NodeGet(nodeID.Identity(), false)
if err != nil {
return nil, err
}
node := types.NewNodeFromSchema(schemaNode)
var endpoints []types.IPNet
if node.PublicConfig != nil {
if node.PublicConfig.IPv4.IP != nil {
ip := node.PublicConfig.IPv4.IP
if ip.IsGlobalUnicast() && !isPrivateIP(ip) {
endpoints = append(endpoints, node.PublicConfig.IPv4)
}
}
if node.PublicConfig.IPv6.IP != nil {
ip := node.PublicConfig.IPv6.IP
if ip.IsGlobalUnicast() && !isPrivateIP(ip) {
endpoints = append(endpoints, node.PublicConfig.IPv6)
}
}
} else {
for _, iface := range node.Ifaces |
}
// If the length is 0, then its a hidden node
return endpoints, nil
}
| {
for _, ip := range iface.Addrs {
if !ip.IP.IsGlobalUnicast() || isPrivateIP(ip.IP) {
continue
}
endpoints = append(endpoints, ip)
}
} | conditional_block |
mf6_data_tutorial06.py | # ---
# jupyter:
# jupytext:
# text_representation: | # format_version: "1.5"
# jupytext_version: 1.5.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# metadata:
# section: mf6
# ---
# # MODFLOW 6: Working with MODFLOW List Data.
#
# This tutorial shows how to view, access, and change the underlying data
# variables for MODFLOW 6 objects in FloPy. Interaction with a FloPy
# MODFLOW 6 model is different from other models, such as MODFLOW-2005,
# MT3D, and SEAWAT, for example.
#
# FloPy stores model data in data objects (`MFDataArray`, `MFDataList`,
# `MFDataScalar` objects) that are accessible from packages. Data can be
# added to a package by using the appropriate parameters when the package is
# constructed and through package attributes.
#
# The MODFLOW 6 simulation structure is arranged in the following
# generalized way:
#
# > Simulation --> Package --> DATA
# >
# > Simulation --> Model --> Package (--> Package) --> DATA
#
#
# This tutorial focuses on MODFLOW Data from the `PackageData`,
# `ConnectionData`, `StressPeriodData`, and other similar blocks. These
# blocks contain data with columns, data that fits into a numpy recarray,
# pandas data frame, or a spreadsheet with column headers. These data are
# stored by FloPy in a `MFList` or `MFTransientList` object and a referred to
# as MODFLOW list data.
# ## Introduction to MODFLOW List Data
#
# MODFLOW contains list data that can be conveniently stored in a numpy
# recarray or a pandas dataframe. These data are either a single or multiple
# row of data, with each column containing the same data type.
#
# Some MODFLOW list data only contains a single row, like the `OC` package's
# `head print_format` option and the `NPF` package's `rewet_record`. Other
# MODFLOW list data can contain multiple rows, like the `MAW` package's
# `packagedata` and `connectiondata`. FloPy stores both single row and
# multiple row list data in `MFList` objects.
#
# MODFLOW stress period data can contain lists of data for one or more stress
# periods. FloPy stores stress period list data in `MFTransientList` objects.
# Note that not all MODFLOW stress period data is "list" data that fits neatly
# in a recarray or a panda's dataframe. Some packages including `RCH` and
# `EVT` have a `READASARRAYS` option that allows stress period data to be
# inputted as an array. When `READASARRAYS` is selected FloPy stores stress
# period array data in an `MFTransientArray` object (see tutorial 8).
#
# Examples of using FloPy to store, update, and retrieve different types of
# MODFLOW list data are given below. The examples start by first creating a
# simulation (`MFSimulation`) and a model (`MFModel`) object in FloPy.
# package import
import os
from pathlib import Path
from tempfile import TemporaryDirectory
import numpy as np
import flopy
# set up where simulation workspace will be stored
temp_dir = TemporaryDirectory()
workspace = temp_dir.name
name = "tutorial06_mf6_data"
# create the Flopy simulation and tdis objects
sim = flopy.mf6.MFSimulation(
sim_name=name, exe_name="mf6", version="mf6", sim_ws=workspace
)
tdis = flopy.mf6.modflow.mftdis.ModflowTdis(
sim,
pname="tdis",
time_units="DAYS",
nper=2,
perioddata=[(1.0, 1, 1.0), (1.0, 1, 1.0)],
)
# create the Flopy groundwater flow (gwf) model object
model_nam_file = f"{name}.nam"
gwf = flopy.mf6.ModflowGwf(sim, modelname=name, model_nam_file=model_nam_file)
# create the flopy iterative model solver (ims) package object
ims = flopy.mf6.modflow.mfims.ModflowIms(sim, pname="ims", complexity="SIMPLE")
# create the discretization package
bot = np.linspace(-50.0 / 3.0, -3.0, 3)
delrow = delcol = 4.0
dis = flopy.mf6.modflow.mfgwfdis.ModflowGwfdis(
gwf,
pname="dis",
nogrb=True,
nlay=3,
nrow=10,
ncol=10,
delr=delrow,
delc=delcol,
top=0.0,
botm=bot,
)
# ## Adding MODFLOW Package Data, Connection Data, and Option Lists
#
# MODFLOW Package data, connection data, and option lists are stored by FloPy
# as numpy recarrays. FloPy does accept numpy recarrays as input, but does
# has other supported formats discussed below.
#
# MODFLOW option lists that only contain a single row or data can be either
# specified by:
#
# 1. Specifying a string containing the entire line as it would be displayed
# in the package file (`rewet_record="REWET WETFCT 1.0 IWETIT 1 IHDWET 0"`)
# 2. Specifying the data in a tuple within a list
# (`rewet_record=[("WETFCT", 1.0, "IWETIT", 1, "IHDWET", 0)]`)
#
# In the example below the npf package is created setting the `rewet_record`
# option to a string of text as would be typed into the package file.
npf = flopy.mf6.modflow.mfgwfnpf.ModflowGwfnpf(
gwf,
rewet_record="REWET WETFCT 1.0 IWETIT 1 IHDWET 0",
pname="npf",
icelltype=1,
k=1.0,
save_flows=True,
xt3doptions="xt3d rhs",
)
# `rewet_record` is then set using the npf package's `rewet_record` property.
# This time 'rewet_record' is defined using a tuple within a list.
npf.rewet_record = [("WETFCT", 1.1, "IWETIT", 0, "IHDWET", 1)]
# MODFLOW multirow lists, like package data and connection data, can be
# specified:
#
# 1. As a list of tuples where each tuple represents a row in the list
# (stress_period_data = [((1, 2, 3), 20.0), ((1, 7, 3), 25.0)])
# 2. As a numpy recarray. Building a numpy recarray is more complicated and
# is beyond the scope of this guide.
#
# In the example below the chd package is created, setting `stress_period_data`
# as a list of tuples.
# We build the chd package using an array of tuples for stress_period_data
# stress_period_data = [(first_chd_cell, head), (second_chd_cell, head), ...]
# Note that the cellid information (layer, row, column) is encapsulated in
# a tuple.
stress_period_data = [((1, 10, 10), 100.0), ((1, 10, 11), 105.0)]
# build chd package
chd = flopy.mf6.modflow.mfgwfchd.ModflowGwfchd(
gwf,
pname="chd",
maxbound=len(stress_period_data),
stress_period_data=stress_period_data,
save_flows=True,
)
# ## Adding Stress Period List Data
#
# MODFLOW stress period data is stored by FloPy as a dictionary of numpy
# recarrays, where each dictionary key is a zero-based stress period and each
# dictionary value is a recarray containing the stress period data for that
# stress period. FloPy keeps this stress period data in a `MFTransientList`
# object and this data type is referred to as a transient list.
#
# FloPy accepts stress period data as a dictionary of numpy recarrays, but also
# supports replacing the recarrays with lists of tuples discussed above.
# Stress period data spanning multiple stress periods must be specified as a
# dictionary of lists where the dictionary key is the stress period expressed
# as a zero-based integer.
#
# The example below creates `stress_period_data` for the wel package with the
# first stress period containing a single well and the second stress period
# empty. When empty stress period data is entered FloPy writes an empty
# stress period block to the package file.
# First we create wel package with stress_period_data dictionary
# keys as zero-based integers so key "0" is stress period 1
stress_period_data = {
0: [((2, 3, 1), -25.0)], # stress period 1 well data
1: [],
} # stress period 2 well data is empty
# Then, using the dictionary created above, we build the wel package.
wel = flopy.mf6.ModflowGwfwel(
gwf,
print_input=True,
print_flows=True,
stress_period_data=stress_period_data,
save_flows=False,
pname="WEL-1",
)
# ## Retrieving MODFLOW Package Data, Connection Data, and Option Lists
#
# MODFLOW package data, connection data, and option lists can be retrieved
# with `get_data`, `array`, `repr`/`str`,
# or get_file_entry.
#
# | Retrieval Method | Description |
# | :--- | :---- |
# | get_data | Returns recarray |
# | array | Return recarray |
# | repr/str | Returns string with storage information followed by recarray's repr/str |
# | get_file_entry | Returns string containing data formatted for the MODFLOW-6 package file. Certain zero-based numbers, like layer, row, column, are converted to one-based numbers. |
# The `NPF` package's `rewet_record` is printed below using the different data
# retrieval methods highlighted above.
# First we use the `get_data` method to get the rewet_record as a recarray.
print(npf.rewet_record.get_data())
# Next we use the `array` method, which also returns a recarray.
print(npf.rewet_record.array)
# Then we use repr to print a string representation of rewet_record.
print(repr(npf.rewet_record))
# Using str prints a similar string representation of rewet_record.
print(str(npf.rewet_record))
# Last, using the `get_file_entry` method the data is printed as it would
# appear in a MODFLOW 6 file.
print(npf.rewet_record.get_file_entry())
# ## Retrieving MODFLOW Stress Period List Data
# Stress period data can be retrieved with `get_data`, `array`, `repr`/`str`,
# or `get_file_entry`.
#
# | Retrieval Method | Description |
# | :--- | :---- |
# | get_data | Returns dictionary of recarrays |
# | array | Return single recarray for all stress periods |
# | repr/str | Returns string with storage information followed by recarray repr/str for each recarray |
# | get_file_entry(key) | Returns string containing data formatted for the MODFLOW-6 package file for the stress period specified by key |
# The `WEL` package's `stress_period_data` is printed below using the
# different data retrieval methods highlighted above.
# First we use the `get_data` method to get the stress period data as a
# dictionary of recarrays.
print(wel.stress_period_data.get_data())
# Next we use the `array` attribute to get the stress period data as a single
# recarray.
print(wel.stress_period_data.array)
# repr can be used to generate a string representation of stress period data.
print(repr(wel.stress_period_data))
# str produces a similar string representation of stress period data.
print(str(wel.stress_period_data))
# The `get_file_entry` method prints the stress period data as it would
# appear in a MODFLOW 6 file.
print(wel.stress_period_data.get_file_entry(0))
try:
temp_dir.cleanup()
except PermissionError:
# can occur on windows: https://docs.python.org/3/library/tempfile.html#tempfile.TemporaryDirectory
pass | # extension: .py
# format_name: light | random_line_split |
business-export-import.component.ts | //Import library
import { Component, ViewChild, ElementRef, OnInit, AfterViewInit } from '@angular/core';
import * as XLSX from 'xlsx';
import { MatTableDataSource } from '@angular/material/table';
import { MatPaginator } from '@angular/material/paginator';
import { Router } from '@angular/router';
import { tap, startWith, map } from 'rxjs/operators';
import { MatTableFilter } from 'mat-table-filter';
import { Observable } from 'rxjs';
import { FormControl } from '@angular/forms';
//Import service
import { MarketService } from '../../../../_services/APIService/market.service';
import { PaginationService } from '../../../../_services/PaginationService';
import { PagerService } from 'src/app/_services/pagination.service';
//Import model
import { CompanyDetailModel, ProductModel, ImportExportValueModel } from '../../../../_models/APIModel/domestic-market.model';
import { CareerModel, DistrictModel } from 'src/app/_models/APIModel/domestic-market.model';
import { formatDate } from '@angular/common';
import { LoginService } from 'src/app/_services/APIService/login.service';
//Interface
interface HashTableNumber<T> {
[key: string]: T;
}
export class filterModel {
ten_doanh_nghiep: string = '';
ten_quan_huyen: string = '';
ten_nganh_nghe: string = '';
}
@Component({
selector: 'business-export-import',
templateUrl: './business-export-import.component.html',
styleUrls: ['../../manager_layout.scss'],
})
export class BusinessExportImportComponent implements OnInit {
//Declare variable for CONSTANT
public readonly SEPERATE_FILTER = ";";
public readonly FORMAT = 'dd/MM/yyyy';
public readonly LOCALE = 'en-GB';
public readonly DEFAULT_IMAGE: string = '../../../../assets/img/brandlogo/company_ph01.jpg';
public readonly DEFAULT_PERIOD = "6 Tháng";
//Declare variable for TS & HTML
public filterEntity;
public tempFilter;
public filterType: MatTableFilter;
public dataSource: MatTableDataSource<CompanyDetailModel> = new MatTableDataSource();
public dataSourceImport: MatTableDataSource<CompanyDetailModel> = new MatTableDataSource();
public selectedCategory: string = "Tất cả";
public selectedAddress: string = "Tất cả";
public selectedName: string;
public selected_Career: string = "";
public selectedType: string = "Dạng bảng";
public typeShow: number = 1;
public displayedColumns: string[] = ['index', 'ten_doanh_nghiep', 'mst', 'san_luong', 'gia_tri', 'chi_tiet_doanh_nghiep'];
public filteredCareerList: Observable<CareerModel[]>;
public addresses: Array<any> = [null];
public loading: boolean = false;
public types = ['Dạng thẻ', 'Dạng bảng'];
public page: number = 1;
public pager: any = {};
public selectedPeriod: string = "";
public periods = ["Tháng", "Quý", "6 Tháng", "Năm"];
public months: number[] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
public quarters: any[] = [
{ ma_so: 1, ma_chu: "I" },
{ ma_so: 2, ma_chu: "II" },
{ ma_so: 3, ma_chu: "III" },
{ ma_so: 4, ma_chu: "IV" },
];
public selectedHalf: number = 1;
public selectedMonth: number = 1;
public selectedQuarter: number = 0;
public selectedYear: number = 2020;
public years: Array<number> = [];
public halfs: number[] = [1];
//Declare variable for ONLU TS
public control = new FormControl();
public _marketService: MarketService;
public errorMessage: any;
public careerList: Array<CareerModel> = new Array<CareerModel>();
public districtList: Array<DistrictModel> = new Array<DistrictModel>();
public categories = [null];//['Tất cả', 'Hạt điều', 'Hạt tiêu', 'Hạt cà phê', 'Cao su'];
public pagedItems: any[];
public productList: any;
public isSCT: boolean = false;
//Viewchild
@ViewChild('TABLE1', { static: false }) table: ElementRef;
@ViewChild('scheduledOrdersPaginator', { static: true }) paginator: MatPaginator;
@ViewChild('scheduledOrdersPaginator1', { static: true }) paginator1: MatPaginator;
@ViewChild('selected_Career', { static: false }) careerEle: ElementRef;
constructor(
public marketService: MarketService,
public paginationService: PaginationService,
public _loginService: LoginService,
public router: Router,
) {
this._marketService = marketService;
}
ngOnInit(): void {
this.filterEntity = new CompanyDetailModel();
this.tempFilter = new CompanyDetailModel();
this.filterType = MatTableFilter.ANYWHERE;
this.selectedPeriod = this.DEFAULT_PERIOD;
this.selectedYear = this.getCurrentYear();
this.selectedMonth = this.getCurrentMonth();
this.years = this.initialYears();
this.isSCT = this._loginService.userValue.user_role < 3;
console.log(this._loginService.userValue);
this.getAllCompanyExport();
this.getAllCompanyImport();
this.getAllDistrict();
}
// ngAfterViewInit(): void {
// if (this.typeShow == 1)
// this.paginator.page
// .pipe(
// tap(() => this.loadLessonsPage())
// )
// .subscribe();
// }
//Function for PROCESS-FLOW -------------------------------------------------------------------------------
public getAllDistrict() {
console.log("+ Function: GetAllDistrict()");
this._marketService.GetAllDistrict().subscribe(
allrecords => {
this.districtList = allrecords.data as DistrictModel[];
this.districtList.forEach(element => this.addresses.push(element.ten_quan_huyen));
});
}
public getAllNganhNghe() {
console.log("+ Function: GetAllNganhNghe()");
this._marketService.GetAllCareer().subscribe(
allrecords => {
this.careerList = allrecords.data as CareerModel[];
this.careerList.forEach(element => element.ma_nganh_nghe.length > 3 ? this.categories.push(element.ten_kem_ma) : 0);
});
this.filteredCareerList = this.control.valueChanges.pipe(
startWith(''),
map(value => this._filter(value))
);
}
public getAllCompanyImport() {
console.log("+ Function: getAllCompanyImport()");
let valueOfPeriod: number = 0;
let valueOfYear: number = 0;
let valueOfPeriodDetail: number = 0;
//
if (this.selectedPeriod == "Tháng") {//"Tháng", "Quý", "6 Tháng", "Năm"{
valueOfPeriod = 1;
valueOfPeriodDetail = this.selectedMonth;
} else if (this.selectedPeriod == 'Quý') {
valueOfPeriod = 2;
valueOfPeriodDetail = this.selectedQuarter;
} else if (this.selectedPeriod == '6 Tháng') {
valueOfPeriod = 3;
valueOfPeriodDetail = this.selectedHalf;
} else {
valueOfPeriod = 4;
valueOfPeriodDetail = 1;
}
valueOfYear = this.selectedYear;
console.log(valueOfPeriod, valueOfYear, valueOfPeriodDetail);
this._marketService.GetAllCompanyImport(valueOfPeriod, valueOfYear, valueOfPeriodDetail, this.isSCT).subscribe(
allrecords => {
if (allrecords.data.length > 0) {
this.dataSourceImport = new MatTableDataSource<CompanyDetailModel>(allrecords.data[0]);
if (this.dataSourceImport.data.length) {
this.dataSourceImport.paginator = this.paginator1;
this.paginator1._intl.itemsPerPageLabel = "Số hàng";
this.paginator1._intl.firstPageLabel = "Trang Đầu";
this.paginator1._intl.lastPageLabel = "Trang Cuối";
this.paginator1._intl.previousPageLabel = "Trang Trước";
this.paginator1._intl.nextPageLabel = "Trang Tiếp";
}
}
},
error => this.errorMessage = <any>error
);
}
public getAllCompanyExport() {
console.log("+ Function: getAllCompanyExport()");
let valueOfPeriod: number = 0;
let valueOfYear: number = 0;
let valueOfPeriodDetail: number = 0;
//
if (this.selectedPeriod == "Tháng") {//"Tháng", "Quý", "6 Tháng", "Năm"{
valueOfPeriod = 1;
valueOfPeriodDetail = this.selectedMonth;
} else if (this.selectedPeriod == 'Quý') {
valueOfPeriod = 2;
valueOfPeriodDetail = this.selectedQuarter;
} else if (this.selectedPeriod == '6 Tháng') {
valueOfPeriod = 3;
valueOfPeriodDetail = this.selectedHalf;
} else {
valueOfPeriod = 4;
valueOfPeriodDetail = 1;
}
valueOfYear = this.selectedYear;
console.log(valueOfPeriod, valueOfYear, valueOfPeriodDetail);
this._marketService.GetAllCompanyExport(valueOfPeriod, valueOfYear, valueOfPeriodDetail, this.isSCT).subscribe(
allrecords => {
if (allrecords.data.length > 0) {
this.dataSource = new MatTableDataSource<CompanyDetailModel>(allrecords.data[0]);
if (this.dataSource.data.length) {
this.dataSource.paginator = this.paginator;
this.paginator._intl.itemsPerPageLabel = "Số hàng";
this.paginator._intl.firstPageLabel = "Trang Đầu";
this.paginator._intl.lastPageLabel = "Trang Cuối";
this.paginator._intl.previousPageLabel = "Trang Trước";
this.paginator._intl.nextPageLabel = "Trang Tiếp";
}
}
},
error => this.errorMessage = <any>error
);
}
public getAllProduct(allrecords) {
console.log("+ Function: GetAllProduct");
this.productList = allrecords.data as Array<ProductModel>;
if (this.typeShow == 1) {
this.dataSource.paginator = this.paginator;
this.paginator._intl.itemsPerPageLabel = "Số hàng";
this.paginator._intl.firstPageLabel = "Trang Đầu";
this.paginator._intl.lastPageLabel = "Trang Cuối";
this.paginator._intl.previousPageLabel = "Trang Trước";
this.paginator._intl.nextPageLabel = "Trang Tiếp";
}
}
//Function for EVENT HTML -------------------------------------------------------------------------------
public timKiem() {
this.getAllCompanyExport();
this.getAllCompanyImport();
}
//Xuất excel
public exportToExcel(filename: string, sheetname: string, is_export: boolean) {
let excelFileName: string;
let newArray: any[] = [];
//Format name of Excel will be export
sheetname = sheetname.replace('/', '_');
excelFileName = filename + '.xlsx';
//Alias column name
let data;
if (is_export)
data = Object.values(this.dataSource.data);
else
data = Object.values(this.dataSourceImport.data);
Object.keys(data).forEach((key, index) => {
newArray.push({
'Tên doanh nghiệp': this.formatString(data[key].ten_doanh_nghiep),
// 'Điện thoại': this.formatString(data[key].dien_thoai),
'Mã số thuế': data[key].mst,
'Sản lượng': data[key].tong_san_luong,
'Trị giá': data[key].tong_tri_gia
});
});
const ws: XLSX.WorkSheet = XLSX.utils.json_to_sheet(newArray);
const wb: XLSX.WorkBook = XLSX.utils.book_new();
/* save to file */
XLSX.utils.book_append_sheet(wb, ws, sheetname);
XLSX.writeFile(wb, excelFileName);
}
public _filter(value: string): CareerModel[] {
const filterValue = this._normalizeValue(value);
return this.careerList.filter(career => this._normalizeValue(career.ten_kem_ma).includes(filterValue));
}
public openDetailCompany(mst: string) {
let url = this.router.serializeUrl(
this.router.createUrlTree([encodeURI('#') + 'manager/business/search/' + mst]));
window.open(url.replace('%23', '#'), "_blank");
}
public changeType() {
if (this.selectedType == this.types[0]) {
this.typeShow = 0;
}
else {
this.typeShow = 1;
//this.ngAfterViewInit();
this.dataSource.paginator = this.paginator;
this.paginator._intl.itemsPerPageLabel = "Số hàng";
this.paginator._intl.firstPageLabel = "Trang Đầu";
this.paginator._intl.lastPageLabel = "Trang Cuối";
this.paginator._intl.previousPageLabel = "Trang Trước";
this.paginator._intl.nextPageLabel = "Trang Tiếp";
}
}
public filter() {
this.filterEntity = { ...this.tempFilter }
}
public cancel() {
this.tempFilter = new filterModel();
this.filterEntity = { ...filterModel };
}
changePeriod() {
switch (this.selectedPeriod) {
case "Tháng":
this.selectedMonth = this.getCurrentMonth();
this.selectedYear = this.getCurrentYear();
break;
case "Quý":
this.selectedQuarter = this.getCurrentQuarter();
this.selectedYear = this.getCurrentYear();
break;
case "Năm":
this.selectedYear = this.getCurrentYear();
break;
case "6 Tháng":
this.selectedYear = this.getCurrentYear();
this.selectedHalf = 1;
break;
default:
break;
}
}
//Function for EXTENTION -------------------------------------------------------------------------------
public loadLessonsPage() {
// this.dataSource;
// this.setPage(1);
}
public unicodeToAZ(str: string) {
str = str.replace(/à|á|ạ|ả|ã|â|ầ|ấ|ậ|ẩ|ẫ|ă|ằ|ắ|ặ|ẳ|ẵ/g, "a");
str = str.replace(/è|é|ẹ|ẻ|ẽ|ê|ề|ế|ệ|ể|ễ/g, "e");
str = str.replace(/ì|í|ị|ỉ|ĩ/g, "i");
str = str.replace(/ò|ó|ọ|ỏ|õ|ô|ồ|ố|ộ|ổ|ỗ|ơ|ờ|ớ|ợ|ở|ỡ/g, "o");
str = str.replace(/ù|ú|ụ|ủ|ũ|ư|ừ|ứ|ự|ử|ữ/g, "u");
str = str.replace(/ỳ|ý|ỵ|ỷ|ỹ/g, "y");
str = str.replace(/đ/g, "d");
return str;
}
public _normalizeValue(value: string): string {
return value.toLowerCase().replace(/\s/g, '');
}
public formatDateFromString(date: string) {
if (!date) {
return '';
}
return formatDate(date, this.FORMAT, this.LOCALE);
}
public formatString(value: string) {
if (!value) {
return '';
}
else if (value.trim().toLowerCase() === 'null') {
retu | urrentDate = new Date();
return currentDate.getMonth() + 1;
}
public getCurrentYear() {
var currentDate = new Date();
return currentDate.getFullYear();
}
public getCurrentQuarter() {
let currentDate = new Date();
let month = currentDate.getMonth() + 1;
return month <= 3 ? 1 : month <= 6 ? 2 : month <= 9 ? 3 : 4;
}
public initialYears() {
let returnYear: Array<any> = [];
let currentDate = new Date();
let nextYear = currentDate.getFullYear() + 1;
for (let index = 0; index < 11; index++) {
returnYear.push(nextYear - index);
}
return returnYear;
}
// applyFilter(type: string, filterValue: string) {
// let newFilter = "";
// let checkAdded = false;
// if (this._currentFilter.length > 0) {
// let param = this._currentFilter.split(this.SEPERATE_FILTER);
// param.forEach(element => {
// if (element.length > 0) {
// let newValueFilter = "";
// let key = element.split("|")[0];
// if (type == key) {
// newValueFilter = key + "|" + filterValue;
// if (newFilter.length > 0) newFilter += ";" + newValueFilter;
// else newFilter = newValueFilter;
// checkAdded = true;
// }
// else {
// if (newFilter.length > 0) newFilter += ";" + element;
// else newFilter = element;
// }
// }
// });
// }
// if (!checkAdded) {
// let newValueFilter = type + "|" + filterValue;
// if (newFilter.length > 0) newFilter += ";" + newValueFilter;
// else newFilter = newValueFilter;
// }
// this._currentFilter = newFilter;
// // filterValue = type + '|' + filterValue;
// console.log(this._currentFilter);
// this.dataSource.filter = this._currentFilter;
// }
// removecompany(key: string) {
// console.log(key);
// }
// addFavourite(company: Company) {
// console.log(company);
// }
// addToCart(company: Company) {
// console.log(company);
// }
}
| rn '';
}
else {
return value.trim();
}
}
public getCurrentMonth() {
var c | identifier_body |
business-export-import.component.ts | //Import library
import { Component, ViewChild, ElementRef, OnInit, AfterViewInit } from '@angular/core';
import * as XLSX from 'xlsx';
import { MatTableDataSource } from '@angular/material/table';
import { MatPaginator } from '@angular/material/paginator';
import { Router } from '@angular/router';
import { tap, startWith, map } from 'rxjs/operators';
import { MatTableFilter } from 'mat-table-filter';
import { Observable } from 'rxjs';
import { FormControl } from '@angular/forms';
//Import service
import { MarketService } from '../../../../_services/APIService/market.service';
import { PaginationService } from '../../../../_services/PaginationService';
import { PagerService } from 'src/app/_services/pagination.service';
//Import model
import { CompanyDetailModel, ProductModel, ImportExportValueModel } from '../../../../_models/APIModel/domestic-market.model';
import { CareerModel, DistrictModel } from 'src/app/_models/APIModel/domestic-market.model';
import { formatDate } from '@angular/common';
import { LoginService } from 'src/app/_services/APIService/login.service';
//Interface
interface HashTableNumber<T> {
[key: string]: T;
}
export class filterModel {
ten_doanh_nghiep: string = '';
ten_quan_huyen: string = '';
ten_nganh_nghe: string = '';
}
@Component({
selector: 'business-export-import',
templateUrl: './business-export-import.component.html',
styleUrls: ['../../manager_layout.scss'],
})
export class BusinessExportImportComponent implements OnInit {
//Declare variable for CONSTANT
public readonly SEPERATE_FILTER = ";";
public readonly FORMAT = 'dd/MM/yyyy';
public readonly LOCALE = 'en-GB';
public readonly DEFAULT_IMAGE: string = '../../../../assets/img/brandlogo/company_ph01.jpg';
public readonly DEFAULT_PERIOD = "6 Tháng";
//Declare variable for TS & HTML
public filterEntity;
public tempFilter;
public filterType: MatTableFilter;
public dataSource: MatTableDataSource<CompanyDetailModel> = new MatTableDataSource();
public dataSourceImport: MatTableDataSource<CompanyDetailModel> = new MatTableDataSource();
public selectedCategory: string = "Tất cả";
public selectedAddress: string = "Tất cả";
public selectedName: string;
public selected_Career: string = "";
public selectedType: string = "Dạng bảng";
public typeShow: number = 1;
public displayedColumns: string[] = ['index', 'ten_doanh_nghiep', 'mst', 'san_luong', 'gia_tri', 'chi_tiet_doanh_nghiep'];
public filteredCareerList: Observable<CareerModel[]>;
public addresses: Array<any> = [null];
public loading: boolean = false;
public types = ['Dạng thẻ', 'Dạng bảng'];
public page: number = 1;
public pager: any = {};
public selectedPeriod: string = "";
public periods = ["Tháng", "Quý", "6 Tháng", "Năm"];
public months: number[] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
public quarters: any[] = [
{ ma_so: 1, ma_chu: "I" },
{ ma_so: 2, ma_chu: "II" },
{ ma_so: 3, ma_chu: "III" },
{ ma_so: 4, ma_chu: "IV" },
];
public selectedHalf: number = 1;
public selectedMonth: number = 1;
public selectedQuarter: number = 0;
public selectedYear: number = 2020;
public years: Array<number> = [];
public halfs: number[] = [1];
//Declare variable for ONLU TS
public control = new FormControl();
public _marketService: MarketService;
public errorMessage: any;
public careerList: Array<CareerModel> = new Array<CareerModel>();
public districtList: Array<DistrictModel> = new Array<DistrictModel>();
public categories = [null];//['Tất cả', 'Hạt điều', 'Hạt tiêu', 'Hạt cà phê', 'Cao su'];
public pagedItems: any[];
public productList: any;
public isSCT: boolean = false;
//Viewchild
@ViewChild('TABLE1', { static: false }) table: ElementRef;
@ViewChild('scheduledOrdersPaginator', { static: true }) paginator: MatPaginator;
@ViewChild('scheduledOrdersPaginator1', { static: true }) paginator1: MatPaginator;
@ViewChild('selected_Career', { static: false }) careerEle: ElementRef;
constructor(
public marketService: MarketService,
public paginationService: PaginationService,
public _loginService: LoginService,
public router: Router,
) {
this._marketService = marketService;
}
ngOnInit(): void {
this.filterEntity = new CompanyDetailModel();
this.tempFilter = new CompanyDetailModel();
this.filterType = MatTableFilter.ANYWHERE;
this.selectedPeriod = this.DEFAULT_PERIOD;
this.selectedYear = this.getCurrentYear();
this.selectedMonth = this.getCurrentMonth();
this.years = this.initialYears();
this.isSCT = this._loginService.userValue.user_role < 3;
console.log(this._loginService.userValue);
this.getAllCompanyExport();
this.getAllCompanyImport();
this.getAllDistrict();
}
// ngAfterViewInit(): void {
// if (this.typeShow == 1)
// this.paginator.page
// .pipe(
// tap(() => this.loadLessonsPage())
// )
// .subscribe();
// }
//Function for PROCESS-FLOW -------------------------------------------------------------------------------
public getAllDistrict() {
console.log("+ Function: GetAllDistrict()");
this._marketService.GetAllDistrict().subscribe(
allrecords => {
this.districtList = allrecords.data as DistrictModel[];
this.districtList.forEach(element => this.addresses.push(element.ten_quan_huyen));
});
}
public getAllNganhNghe() {
console.log("+ Function: GetAllNganhNghe()");
this._marketService.GetAllCareer().subscribe(
allrecords => {
this.careerList = allrecords.data as CareerModel[];
this.careerList.forEach(element => element.ma_nganh_nghe.length > 3 ? this.categories.push(element.ten_kem_ma) : 0);
});
this.filteredCareerList = this.control.valueChanges.pipe(
startWith(''),
map(value => this._filter(value))
);
}
public getAllCompanyImport() {
console.log("+ Function: getAllCompanyImport()");
let valueOfPeriod: number = 0;
let valueOfYear: number = 0;
let valueOfPeriodDetail: number = 0;
//
if (this.selectedPeriod == "Tháng") {//"Tháng", "Quý", "6 Tháng", "Năm"{
valueOfPeriod = 1;
valueOfPeriodDetail = this.selectedMonth;
} else if (this.selectedPeriod == 'Quý') {
valueOfPeriod = 2;
valueOfPeriodDetail = this.selectedQuarter;
} else if (this.selectedPeriod == '6 Tháng') {
valueOfPeriod = 3;
valueOfPeriodDetail = this.selectedHalf;
} else {
valueOfPeriod = 4;
valueOfPeriodDetail = 1;
}
valueOfYear = this.selectedYear;
console.log(valueOfPeriod, valueOfYear, valueOfPeriodDetail);
this._marketService.GetAllCompanyImport(valueOfPeriod, valueOfYear, valueOfPeriodDetail, this.isSCT).subscribe(
allrecords => {
if (allrecords.data.length > 0) {
this.dataSourceImport = new MatTableDataSource<CompanyDetailModel>(allrecords.data[0]);
if (this.dataSourceImport.data.length) {
this.dataSourceImport.paginator = this.paginator1;
this.paginator1._intl.itemsPerPageLabel = "Số hàng";
this.paginator1._intl.firstPageLabel = "Trang Đầu";
this.paginator1._intl.lastPageLabel = "Trang Cuối";
this.paginator1._intl.previousPageLabel = "Trang Trước";
this.paginator1._intl.nextPageLabel = "Trang Tiếp";
}
}
},
error => this.errorMessage = <any>error
);
}
public getAllCompanyExport() {
console.log("+ Function: getAllCompanyExport()");
let valueOfPeriod: number = 0;
let valueOfYear: number = 0;
let valueOfPeriodDetail: number = 0;
//
if (this.selectedPeriod == "Tháng") {//"Tháng", "Quý", "6 Tháng", "Năm"{
valueOfPeriod = 1;
valueOfPeriodDetail = this.selectedMonth;
} else if (this.selectedPeriod == 'Quý') {
valueOfPeriod = 2;
valueOfPeriodDetail = this.selectedQuarter;
} else if (this.selectedPeriod == '6 Tháng') {
valueOfPeriod = 3;
valueOfPeriodDetail = this.selectedHalf;
} else {
valueOfPeriod = 4;
valueOfPeriodDetail = 1;
}
| valueOfYear, valueOfPeriodDetail);
this._marketService.GetAllCompanyExport(valueOfPeriod, valueOfYear, valueOfPeriodDetail, this.isSCT).subscribe(
allrecords => {
if (allrecords.data.length > 0) {
this.dataSource = new MatTableDataSource<CompanyDetailModel>(allrecords.data[0]);
if (this.dataSource.data.length) {
this.dataSource.paginator = this.paginator;
this.paginator._intl.itemsPerPageLabel = "Số hàng";
this.paginator._intl.firstPageLabel = "Trang Đầu";
this.paginator._intl.lastPageLabel = "Trang Cuối";
this.paginator._intl.previousPageLabel = "Trang Trước";
this.paginator._intl.nextPageLabel = "Trang Tiếp";
}
}
},
error => this.errorMessage = <any>error
);
}
public getAllProduct(allrecords) {
console.log("+ Function: GetAllProduct");
this.productList = allrecords.data as Array<ProductModel>;
if (this.typeShow == 1) {
this.dataSource.paginator = this.paginator;
this.paginator._intl.itemsPerPageLabel = "Số hàng";
this.paginator._intl.firstPageLabel = "Trang Đầu";
this.paginator._intl.lastPageLabel = "Trang Cuối";
this.paginator._intl.previousPageLabel = "Trang Trước";
this.paginator._intl.nextPageLabel = "Trang Tiếp";
}
}
//Function for EVENT HTML -------------------------------------------------------------------------------
public timKiem() {
this.getAllCompanyExport();
this.getAllCompanyImport();
}
//Xuất excel
public exportToExcel(filename: string, sheetname: string, is_export: boolean) {
let excelFileName: string;
let newArray: any[] = [];
//Format name of Excel will be export
sheetname = sheetname.replace('/', '_');
excelFileName = filename + '.xlsx';
//Alias column name
let data;
if (is_export)
data = Object.values(this.dataSource.data);
else
data = Object.values(this.dataSourceImport.data);
Object.keys(data).forEach((key, index) => {
newArray.push({
'Tên doanh nghiệp': this.formatString(data[key].ten_doanh_nghiep),
// 'Điện thoại': this.formatString(data[key].dien_thoai),
'Mã số thuế': data[key].mst,
'Sản lượng': data[key].tong_san_luong,
'Trị giá': data[key].tong_tri_gia
});
});
const ws: XLSX.WorkSheet = XLSX.utils.json_to_sheet(newArray);
const wb: XLSX.WorkBook = XLSX.utils.book_new();
/* save to file */
XLSX.utils.book_append_sheet(wb, ws, sheetname);
XLSX.writeFile(wb, excelFileName);
}
public _filter(value: string): CareerModel[] {
const filterValue = this._normalizeValue(value);
return this.careerList.filter(career => this._normalizeValue(career.ten_kem_ma).includes(filterValue));
}
public openDetailCompany(mst: string) {
let url = this.router.serializeUrl(
this.router.createUrlTree([encodeURI('#') + 'manager/business/search/' + mst]));
window.open(url.replace('%23', '#'), "_blank");
}
public changeType() {
if (this.selectedType == this.types[0]) {
this.typeShow = 0;
}
else {
this.typeShow = 1;
//this.ngAfterViewInit();
this.dataSource.paginator = this.paginator;
this.paginator._intl.itemsPerPageLabel = "Số hàng";
this.paginator._intl.firstPageLabel = "Trang Đầu";
this.paginator._intl.lastPageLabel = "Trang Cuối";
this.paginator._intl.previousPageLabel = "Trang Trước";
this.paginator._intl.nextPageLabel = "Trang Tiếp";
}
}
public filter() {
this.filterEntity = { ...this.tempFilter }
}
public cancel() {
this.tempFilter = new filterModel();
this.filterEntity = { ...filterModel };
}
changePeriod() {
switch (this.selectedPeriod) {
case "Tháng":
this.selectedMonth = this.getCurrentMonth();
this.selectedYear = this.getCurrentYear();
break;
case "Quý":
this.selectedQuarter = this.getCurrentQuarter();
this.selectedYear = this.getCurrentYear();
break;
case "Năm":
this.selectedYear = this.getCurrentYear();
break;
case "6 Tháng":
this.selectedYear = this.getCurrentYear();
this.selectedHalf = 1;
break;
default:
break;
}
}
//Function for EXTENTION -------------------------------------------------------------------------------
public loadLessonsPage() {
// this.dataSource;
// this.setPage(1);
}
public unicodeToAZ(str: string) {
str = str.replace(/à|á|ạ|ả|ã|â|ầ|ấ|ậ|ẩ|ẫ|ă|ằ|ắ|ặ|ẳ|ẵ/g, "a");
str = str.replace(/è|é|ẹ|ẻ|ẽ|ê|ề|ế|ệ|ể|ễ/g, "e");
str = str.replace(/ì|í|ị|ỉ|ĩ/g, "i");
str = str.replace(/ò|ó|ọ|ỏ|õ|ô|ồ|ố|ộ|ổ|ỗ|ơ|ờ|ớ|ợ|ở|ỡ/g, "o");
str = str.replace(/ù|ú|ụ|ủ|ũ|ư|ừ|ứ|ự|ử|ữ/g, "u");
str = str.replace(/ỳ|ý|ỵ|ỷ|ỹ/g, "y");
str = str.replace(/đ/g, "d");
return str;
}
public _normalizeValue(value: string): string {
return value.toLowerCase().replace(/\s/g, '');
}
public formatDateFromString(date: string) {
if (!date) {
return '';
}
return formatDate(date, this.FORMAT, this.LOCALE);
}
public formatString(value: string) {
if (!value) {
return '';
}
else if (value.trim().toLowerCase() === 'null') {
return '';
}
else {
return value.trim();
}
}
public getCurrentMonth() {
var currentDate = new Date();
return currentDate.getMonth() + 1;
}
public getCurrentYear() {
var currentDate = new Date();
return currentDate.getFullYear();
}
public getCurrentQuarter() {
let currentDate = new Date();
let month = currentDate.getMonth() + 1;
return month <= 3 ? 1 : month <= 6 ? 2 : month <= 9 ? 3 : 4;
}
public initialYears() {
let returnYear: Array<any> = [];
let currentDate = new Date();
let nextYear = currentDate.getFullYear() + 1;
for (let index = 0; index < 11; index++) {
returnYear.push(nextYear - index);
}
return returnYear;
}
// applyFilter(type: string, filterValue: string) {
// let newFilter = "";
// let checkAdded = false;
// if (this._currentFilter.length > 0) {
// let param = this._currentFilter.split(this.SEPERATE_FILTER);
// param.forEach(element => {
// if (element.length > 0) {
// let newValueFilter = "";
// let key = element.split("|")[0];
// if (type == key) {
// newValueFilter = key + "|" + filterValue;
// if (newFilter.length > 0) newFilter += ";" + newValueFilter;
// else newFilter = newValueFilter;
// checkAdded = true;
// }
// else {
// if (newFilter.length > 0) newFilter += ";" + element;
// else newFilter = element;
// }
// }
// });
// }
// if (!checkAdded) {
// let newValueFilter = type + "|" + filterValue;
// if (newFilter.length > 0) newFilter += ";" + newValueFilter;
// else newFilter = newValueFilter;
// }
// this._currentFilter = newFilter;
// // filterValue = type + '|' + filterValue;
// console.log(this._currentFilter);
// this.dataSource.filter = this._currentFilter;
// }
// removecompany(key: string) {
// console.log(key);
// }
// addFavourite(company: Company) {
// console.log(company);
// }
// addToCart(company: Company) {
// console.log(company);
// }
}
| valueOfYear = this.selectedYear;
console.log(valueOfPeriod, | conditional_block |
business-export-import.component.ts | //Import library
import { Component, ViewChild, ElementRef, OnInit, AfterViewInit } from '@angular/core';
import * as XLSX from 'xlsx';
import { MatTableDataSource } from '@angular/material/table';
import { MatPaginator } from '@angular/material/paginator';
import { Router } from '@angular/router';
import { tap, startWith, map } from 'rxjs/operators';
import { MatTableFilter } from 'mat-table-filter';
import { Observable } from 'rxjs';
import { FormControl } from '@angular/forms';
//Import service
import { MarketService } from '../../../../_services/APIService/market.service';
import { PaginationService } from '../../../../_services/PaginationService';
import { PagerService } from 'src/app/_services/pagination.service';
//Import model
import { CompanyDetailModel, ProductModel, ImportExportValueModel } from '../../../../_models/APIModel/domestic-market.model';
import { CareerModel, DistrictModel } from 'src/app/_models/APIModel/domestic-market.model';
import { formatDate } from '@angular/common';
import { LoginService } from 'src/app/_services/APIService/login.service';
//Interface
interface HashTableNumber<T> {
[key: string]: T;
}
export class filterModel {
ten_doanh_nghiep: string = '';
ten_quan_huyen: string = '';
ten_nganh_nghe: string = '';
}
@Component({
selector: 'business-export-import',
templateUrl: './business-export-import.component.html',
styleUrls: ['../../manager_layout.scss'],
})
export class BusinessExportImportComponent implements OnInit {
//Declare variable for CONSTANT
public readonly SEPERATE_FILTER = ";";
public readonly FORMAT = 'dd/MM/yyyy';
public readonly LOCALE = 'en-GB';
public readonly DEFAULT_IMAGE: string = '../../../../assets/img/brandlogo/company_ph01.jpg';
public readonly DEFAULT_PERIOD = "6 Tháng";
//Declare variable for TS & HTML
public filterEntity;
public tempFilter;
public filterType: MatTableFilter;
public dataSource: MatTableDataSource<CompanyDetailModel> = new MatTableDataSource();
public dataSourceImport: MatTableDataSource<CompanyDetailModel> = new MatTableDataSource();
public selectedCategory: string = "Tất cả";
public selectedAddress: string = "Tất cả";
public selectedName: string;
public selected_Career: string = "";
public selectedType: string = "Dạng bảng";
public typeShow: number = 1;
public displayedColumns: string[] = ['index', 'ten_doanh_nghiep', 'mst', 'san_luong', 'gia_tri', 'chi_tiet_doanh_nghiep'];
public filteredCareerList: Observable<CareerModel[]>;
public addresses: Array<any> = [null];
public loading: boolean = false;
public types = ['Dạng thẻ', 'Dạng bảng'];
public page: number = 1;
public pager: any = {};
public selectedPeriod: string = "";
public periods = ["Tháng", "Quý", "6 Tháng", "Năm"];
public months: number[] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
public quarters: any[] = [
{ ma_so: 1, ma_chu: "I" },
{ ma_so: 2, ma_chu: "II" },
{ ma_so: 3, ma_chu: "III" },
{ ma_so: 4, ma_chu: "IV" },
];
public selectedHalf: number = 1;
public selectedMonth: number = 1;
public selectedQuarter: number = 0;
public selectedYear: number = 2020;
public years: Array<number> = [];
public halfs: number[] = [1];
//Declare variable for ONLU TS
public control = new FormControl();
public _marketService: MarketService;
public errorMessage: any;
public careerList: Array<CareerModel> = new Array<CareerModel>();
public districtList: Array<DistrictModel> = new Array<DistrictModel>();
public categories = [null];//['Tất cả', 'Hạt điều', 'Hạt tiêu', 'Hạt cà phê', 'Cao su'];
public pagedItems: any[];
public productList: any;
public isSCT: boolean = false;
//Viewchild
@ViewChild('TABLE1', { static: false }) table: ElementRef;
@ViewChild('scheduledOrdersPaginator', { static: true }) paginator: MatPaginator;
@ViewChild('scheduledOrdersPaginator1', { static: true }) paginator1: MatPaginator;
@ViewChild('selected_Career', { static: false }) careerEle: ElementRef;
constructor(
public marketService: MarketService,
public paginationService: PaginationService,
public _loginService: LoginService,
public router: Router,
) {
this._marketService = marketService;
}
ngOnInit(): void {
this.filterEntity = new CompanyDetailModel();
this.tempFilter = new CompanyDetailModel();
this.filterType = MatTableFilter.ANYWHERE;
this.selectedPeriod = this.DEFAULT_PERIOD;
this.selectedYear = this.getCurrentYear();
this.selectedMonth = this.getCurrentMonth();
this.years = this.initialYears();
this.isSCT = this._loginService.userValue.user_role < 3;
console.log(this._loginService.userValue);
this.getAllCompanyExport();
this.getAllCompanyImport();
this.getAllDistrict();
}
// ngAfterViewInit(): void {
// if (this.typeShow == 1)
// this.paginator.page
// .pipe(
// tap(() => this.loadLessonsPage())
// )
// .subscribe();
// }
//Function for PROCESS-FLOW -------------------------------------------------------------------------------
public getAllDistrict() {
console.log("+ Fun | istrict()");
this._marketService.GetAllDistrict().subscribe(
allrecords => {
this.districtList = allrecords.data as DistrictModel[];
this.districtList.forEach(element => this.addresses.push(element.ten_quan_huyen));
});
}
public getAllNganhNghe() {
console.log("+ Function: GetAllNganhNghe()");
this._marketService.GetAllCareer().subscribe(
allrecords => {
this.careerList = allrecords.data as CareerModel[];
this.careerList.forEach(element => element.ma_nganh_nghe.length > 3 ? this.categories.push(element.ten_kem_ma) : 0);
});
this.filteredCareerList = this.control.valueChanges.pipe(
startWith(''),
map(value => this._filter(value))
);
}
public getAllCompanyImport() {
console.log("+ Function: getAllCompanyImport()");
let valueOfPeriod: number = 0;
let valueOfYear: number = 0;
let valueOfPeriodDetail: number = 0;
//
if (this.selectedPeriod == "Tháng") {//"Tháng", "Quý", "6 Tháng", "Năm"{
valueOfPeriod = 1;
valueOfPeriodDetail = this.selectedMonth;
} else if (this.selectedPeriod == 'Quý') {
valueOfPeriod = 2;
valueOfPeriodDetail = this.selectedQuarter;
} else if (this.selectedPeriod == '6 Tháng') {
valueOfPeriod = 3;
valueOfPeriodDetail = this.selectedHalf;
} else {
valueOfPeriod = 4;
valueOfPeriodDetail = 1;
}
valueOfYear = this.selectedYear;
console.log(valueOfPeriod, valueOfYear, valueOfPeriodDetail);
this._marketService.GetAllCompanyImport(valueOfPeriod, valueOfYear, valueOfPeriodDetail, this.isSCT).subscribe(
allrecords => {
if (allrecords.data.length > 0) {
this.dataSourceImport = new MatTableDataSource<CompanyDetailModel>(allrecords.data[0]);
if (this.dataSourceImport.data.length) {
this.dataSourceImport.paginator = this.paginator1;
this.paginator1._intl.itemsPerPageLabel = "Số hàng";
this.paginator1._intl.firstPageLabel = "Trang Đầu";
this.paginator1._intl.lastPageLabel = "Trang Cuối";
this.paginator1._intl.previousPageLabel = "Trang Trước";
this.paginator1._intl.nextPageLabel = "Trang Tiếp";
}
}
},
error => this.errorMessage = <any>error
);
}
public getAllCompanyExport() {
console.log("+ Function: getAllCompanyExport()");
let valueOfPeriod: number = 0;
let valueOfYear: number = 0;
let valueOfPeriodDetail: number = 0;
//
if (this.selectedPeriod == "Tháng") {//"Tháng", "Quý", "6 Tháng", "Năm"{
valueOfPeriod = 1;
valueOfPeriodDetail = this.selectedMonth;
} else if (this.selectedPeriod == 'Quý') {
valueOfPeriod = 2;
valueOfPeriodDetail = this.selectedQuarter;
} else if (this.selectedPeriod == '6 Tháng') {
valueOfPeriod = 3;
valueOfPeriodDetail = this.selectedHalf;
} else {
valueOfPeriod = 4;
valueOfPeriodDetail = 1;
}
valueOfYear = this.selectedYear;
console.log(valueOfPeriod, valueOfYear, valueOfPeriodDetail);
this._marketService.GetAllCompanyExport(valueOfPeriod, valueOfYear, valueOfPeriodDetail, this.isSCT).subscribe(
allrecords => {
if (allrecords.data.length > 0) {
this.dataSource = new MatTableDataSource<CompanyDetailModel>(allrecords.data[0]);
if (this.dataSource.data.length) {
this.dataSource.paginator = this.paginator;
this.paginator._intl.itemsPerPageLabel = "Số hàng";
this.paginator._intl.firstPageLabel = "Trang Đầu";
this.paginator._intl.lastPageLabel = "Trang Cuối";
this.paginator._intl.previousPageLabel = "Trang Trước";
this.paginator._intl.nextPageLabel = "Trang Tiếp";
}
}
},
error => this.errorMessage = <any>error
);
}
public getAllProduct(allrecords) {
console.log("+ Function: GetAllProduct");
this.productList = allrecords.data as Array<ProductModel>;
if (this.typeShow == 1) {
this.dataSource.paginator = this.paginator;
this.paginator._intl.itemsPerPageLabel = "Số hàng";
this.paginator._intl.firstPageLabel = "Trang Đầu";
this.paginator._intl.lastPageLabel = "Trang Cuối";
this.paginator._intl.previousPageLabel = "Trang Trước";
this.paginator._intl.nextPageLabel = "Trang Tiếp";
}
}
//Function for EVENT HTML -------------------------------------------------------------------------------
public timKiem() {
this.getAllCompanyExport();
this.getAllCompanyImport();
}
//Xuất excel
public exportToExcel(filename: string, sheetname: string, is_export: boolean) {
let excelFileName: string;
let newArray: any[] = [];
//Format name of Excel will be export
sheetname = sheetname.replace('/', '_');
excelFileName = filename + '.xlsx';
//Alias column name
let data;
if (is_export)
data = Object.values(this.dataSource.data);
else
data = Object.values(this.dataSourceImport.data);
Object.keys(data).forEach((key, index) => {
newArray.push({
'Tên doanh nghiệp': this.formatString(data[key].ten_doanh_nghiep),
// 'Điện thoại': this.formatString(data[key].dien_thoai),
'Mã số thuế': data[key].mst,
'Sản lượng': data[key].tong_san_luong,
'Trị giá': data[key].tong_tri_gia
});
});
const ws: XLSX.WorkSheet = XLSX.utils.json_to_sheet(newArray);
const wb: XLSX.WorkBook = XLSX.utils.book_new();
/* save to file */
XLSX.utils.book_append_sheet(wb, ws, sheetname);
XLSX.writeFile(wb, excelFileName);
}
public _filter(value: string): CareerModel[] {
const filterValue = this._normalizeValue(value);
return this.careerList.filter(career => this._normalizeValue(career.ten_kem_ma).includes(filterValue));
}
public openDetailCompany(mst: string) {
let url = this.router.serializeUrl(
this.router.createUrlTree([encodeURI('#') + 'manager/business/search/' + mst]));
window.open(url.replace('%23', '#'), "_blank");
}
public changeType() {
if (this.selectedType == this.types[0]) {
this.typeShow = 0;
}
else {
this.typeShow = 1;
//this.ngAfterViewInit();
this.dataSource.paginator = this.paginator;
this.paginator._intl.itemsPerPageLabel = "Số hàng";
this.paginator._intl.firstPageLabel = "Trang Đầu";
this.paginator._intl.lastPageLabel = "Trang Cuối";
this.paginator._intl.previousPageLabel = "Trang Trước";
this.paginator._intl.nextPageLabel = "Trang Tiếp";
}
}
public filter() {
this.filterEntity = { ...this.tempFilter }
}
public cancel() {
this.tempFilter = new filterModel();
this.filterEntity = { ...filterModel };
}
changePeriod() {
switch (this.selectedPeriod) {
case "Tháng":
this.selectedMonth = this.getCurrentMonth();
this.selectedYear = this.getCurrentYear();
break;
case "Quý":
this.selectedQuarter = this.getCurrentQuarter();
this.selectedYear = this.getCurrentYear();
break;
case "Năm":
this.selectedYear = this.getCurrentYear();
break;
case "6 Tháng":
this.selectedYear = this.getCurrentYear();
this.selectedHalf = 1;
break;
default:
break;
}
}
//Function for EXTENTION -------------------------------------------------------------------------------
public loadLessonsPage() {
// this.dataSource;
// this.setPage(1);
}
public unicodeToAZ(str: string) {
str = str.replace(/à|á|ạ|ả|ã|â|ầ|ấ|ậ|ẩ|ẫ|ă|ằ|ắ|ặ|ẳ|ẵ/g, "a");
str = str.replace(/è|é|ẹ|ẻ|ẽ|ê|ề|ế|ệ|ể|ễ/g, "e");
str = str.replace(/ì|í|ị|ỉ|ĩ/g, "i");
str = str.replace(/ò|ó|ọ|ỏ|õ|ô|ồ|ố|ộ|ổ|ỗ|ơ|ờ|ớ|ợ|ở|ỡ/g, "o");
str = str.replace(/ù|ú|ụ|ủ|ũ|ư|ừ|ứ|ự|ử|ữ/g, "u");
str = str.replace(/ỳ|ý|ỵ|ỷ|ỹ/g, "y");
str = str.replace(/đ/g, "d");
return str;
}
public _normalizeValue(value: string): string {
return value.toLowerCase().replace(/\s/g, '');
}
public formatDateFromString(date: string) {
if (!date) {
return '';
}
return formatDate(date, this.FORMAT, this.LOCALE);
}
public formatString(value: string) {
if (!value) {
return '';
}
else if (value.trim().toLowerCase() === 'null') {
return '';
}
else {
return value.trim();
}
}
public getCurrentMonth() {
var currentDate = new Date();
return currentDate.getMonth() + 1;
}
public getCurrentYear() {
var currentDate = new Date();
return currentDate.getFullYear();
}
public getCurrentQuarter() {
let currentDate = new Date();
let month = currentDate.getMonth() + 1;
return month <= 3 ? 1 : month <= 6 ? 2 : month <= 9 ? 3 : 4;
}
public initialYears() {
let returnYear: Array<any> = [];
let currentDate = new Date();
let nextYear = currentDate.getFullYear() + 1;
for (let index = 0; index < 11; index++) {
returnYear.push(nextYear - index);
}
return returnYear;
}
// applyFilter(type: string, filterValue: string) {
// let newFilter = "";
// let checkAdded = false;
// if (this._currentFilter.length > 0) {
// let param = this._currentFilter.split(this.SEPERATE_FILTER);
// param.forEach(element => {
// if (element.length > 0) {
// let newValueFilter = "";
// let key = element.split("|")[0];
// if (type == key) {
// newValueFilter = key + "|" + filterValue;
// if (newFilter.length > 0) newFilter += ";" + newValueFilter;
// else newFilter = newValueFilter;
// checkAdded = true;
// }
// else {
// if (newFilter.length > 0) newFilter += ";" + element;
// else newFilter = element;
// }
// }
// });
// }
// if (!checkAdded) {
// let newValueFilter = type + "|" + filterValue;
// if (newFilter.length > 0) newFilter += ";" + newValueFilter;
// else newFilter = newValueFilter;
// }
// this._currentFilter = newFilter;
// // filterValue = type + '|' + filterValue;
// console.log(this._currentFilter);
// this.dataSource.filter = this._currentFilter;
// }
// removecompany(key: string) {
// console.log(key);
// }
// addFavourite(company: Company) {
// console.log(company);
// }
// addToCart(company: Company) {
// console.log(company);
// }
}
| ction: GetAllD | identifier_name |
business-export-import.component.ts | //Import library
import { Component, ViewChild, ElementRef, OnInit, AfterViewInit } from '@angular/core';
import * as XLSX from 'xlsx';
import { MatTableDataSource } from '@angular/material/table';
import { MatPaginator } from '@angular/material/paginator';
import { Router } from '@angular/router';
import { tap, startWith, map } from 'rxjs/operators';
import { MatTableFilter } from 'mat-table-filter';
import { Observable } from 'rxjs';
import { FormControl } from '@angular/forms';
//Import service
import { MarketService } from '../../../../_services/APIService/market.service';
import { PaginationService } from '../../../../_services/PaginationService';
import { PagerService } from 'src/app/_services/pagination.service';
//Import model
import { CompanyDetailModel, ProductModel, ImportExportValueModel } from '../../../../_models/APIModel/domestic-market.model';
import { CareerModel, DistrictModel } from 'src/app/_models/APIModel/domestic-market.model';
import { formatDate } from '@angular/common';
import { LoginService } from 'src/app/_services/APIService/login.service';
//Interface
interface HashTableNumber<T> {
[key: string]: T;
}
export class filterModel {
ten_doanh_nghiep: string = '';
ten_quan_huyen: string = '';
ten_nganh_nghe: string = '';
}
@Component({
selector: 'business-export-import',
templateUrl: './business-export-import.component.html',
styleUrls: ['../../manager_layout.scss'],
})
export class BusinessExportImportComponent implements OnInit {
//Declare variable for CONSTANT
public readonly SEPERATE_FILTER = ";";
public readonly FORMAT = 'dd/MM/yyyy';
public readonly LOCALE = 'en-GB';
public readonly DEFAULT_IMAGE: string = '../../../../assets/img/brandlogo/company_ph01.jpg';
public readonly DEFAULT_PERIOD = "6 Tháng";
//Declare variable for TS & HTML
public filterEntity;
public tempFilter;
public filterType: MatTableFilter;
public dataSource: MatTableDataSource<CompanyDetailModel> = new MatTableDataSource();
public dataSourceImport: MatTableDataSource<CompanyDetailModel> = new MatTableDataSource();
public selectedCategory: string = "Tất cả";
public selectedAddress: string = "Tất cả";
public selectedName: string;
public selected_Career: string = "";
public selectedType: string = "Dạng bảng";
public typeShow: number = 1;
public displayedColumns: string[] = ['index', 'ten_doanh_nghiep', 'mst', 'san_luong', 'gia_tri', 'chi_tiet_doanh_nghiep'];
public filteredCareerList: Observable<CareerModel[]>;
public addresses: Array<any> = [null];
public loading: boolean = false;
public types = ['Dạng thẻ', 'Dạng bảng'];
public page: number = 1;
public pager: any = {};
public selectedPeriod: string = "";
public periods = ["Tháng", "Quý", "6 Tháng", "Năm"];
public months: number[] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
public quarters: any[] = [
{ ma_so: 1, ma_chu: "I" },
{ ma_so: 2, ma_chu: "II" },
{ ma_so: 3, ma_chu: "III" },
{ ma_so: 4, ma_chu: "IV" },
];
public selectedHalf: number = 1;
public selectedMonth: number = 1;
public selectedQuarter: number = 0;
public selectedYear: number = 2020;
public years: Array<number> = [];
public halfs: number[] = [1];
//Declare variable for ONLU TS
public control = new FormControl();
public _marketService: MarketService;
public errorMessage: any;
public careerList: Array<CareerModel> = new Array<CareerModel>();
public districtList: Array<DistrictModel> = new Array<DistrictModel>();
public categories = [null];//['Tất cả', 'Hạt điều', 'Hạt tiêu', 'Hạt cà phê', 'Cao su'];
public pagedItems: any[];
public productList: any;
public isSCT: boolean = false;
//Viewchild
@ViewChild('TABLE1', { static: false }) table: ElementRef;
@ViewChild('scheduledOrdersPaginator', { static: true }) paginator: MatPaginator;
@ViewChild('scheduledOrdersPaginator1', { static: true }) paginator1: MatPaginator;
@ViewChild('selected_Career', { static: false }) careerEle: ElementRef;
constructor(
public marketService: MarketService,
public paginationService: PaginationService,
public _loginService: LoginService,
public router: Router,
) {
this._marketService = marketService;
}
ngOnInit(): void {
this.filterEntity = new CompanyDetailModel();
this.tempFilter = new CompanyDetailModel();
this.filterType = MatTableFilter.ANYWHERE;
this.selectedPeriod = this.DEFAULT_PERIOD;
this.selectedYear = this.getCurrentYear();
this.selectedMonth = this.getCurrentMonth();
this.years = this.initialYears();
this.isSCT = this._loginService.userValue.user_role < 3;
console.log(this._loginService.userValue);
this.getAllCompanyExport();
this.getAllCompanyImport();
this.getAllDistrict();
}
// ngAfterViewInit(): void {
// if (this.typeShow == 1)
// this.paginator.page
// .pipe(
// tap(() => this.loadLessonsPage())
// )
// .subscribe();
// }
//Function for PROCESS-FLOW -------------------------------------------------------------------------------
public getAllDistrict() {
console.log("+ Function: GetAllDistrict()");
this._marketService.GetAllDistrict().subscribe(
allrecords => {
this.districtList = allrecords.data as DistrictModel[];
this.districtList.forEach(element => this.addresses.push(element.ten_quan_huyen));
});
}
public getAllNganhNghe() {
console.log("+ Function: GetAllNganhNghe()");
this._marketService.GetAllCareer().subscribe(
allrecords => {
this.careerList = allrecords.data as CareerModel[];
this.careerList.forEach(element => element.ma_nganh_nghe.length > 3 ? this.categories.push(element.ten_kem_ma) : 0);
});
this.filteredCareerList = this.control.valueChanges.pipe(
startWith(''),
map(value => this._filter(value))
);
}
public getAllCompanyImport() {
console.log("+ Function: getAllCompanyImport()");
let valueOfPeriod: number = 0;
let valueOfYear: number = 0;
let valueOfPeriodDetail: number = 0;
//
if (this.selectedPeriod == "Tháng") {//"Tháng", "Quý", "6 Tháng", "Năm"{
valueOfPeriod = 1;
valueOfPeriodDetail = this.selectedMonth;
} else if (this.selectedPeriod == 'Quý') {
valueOfPeriod = 2;
valueOfPeriodDetail = this.selectedQuarter;
} else if (this.selectedPeriod == '6 Tháng') {
valueOfPeriod = 3;
valueOfPeriodDetail = this.selectedHalf;
} else {
valueOfPeriod = 4;
valueOfPeriodDetail = 1;
}
valueOfYear = this.selectedYear;
console.log(valueOfPeriod, valueOfYear, valueOfPeriodDetail);
this._marketService.GetAllCompanyImport(valueOfPeriod, valueOfYear, valueOfPeriodDetail, this.isSCT).subscribe(
allrecords => {
if (allrecords.data.length > 0) {
this.dataSourceImport = new MatTableDataSource<CompanyDetailModel>(allrecords.data[0]);
if (this.dataSourceImport.data.length) {
this.dataSourceImport.paginator = this.paginator1;
this.paginator1._intl.itemsPerPageLabel = "Số hàng";
this.paginator1._intl.firstPageLabel = "Trang Đầu";
this.paginator1._intl.lastPageLabel = "Trang Cuối";
this.paginator1._intl.previousPageLabel = "Trang Trước";
this.paginator1._intl.nextPageLabel = "Trang Tiếp";
}
}
},
error => this.errorMessage = <any>error
);
}
public getAllCompanyExport() {
console.log("+ Function: getAllCompanyExport()");
let valueOfPeriod: number = 0;
let valueOfYear: number = 0;
let valueOfPeriodDetail: number = 0;
//
if (this.selectedPeriod == "Tháng") {//"Tháng", "Quý", "6 Tháng", "Năm"{
valueOfPeriod = 1;
valueOfPeriodDetail = this.selectedMonth;
} else if (this.selectedPeriod == 'Quý') {
valueOfPeriod = 2;
valueOfPeriodDetail = this.selectedQuarter;
} else if (this.selectedPeriod == '6 Tháng') {
valueOfPeriod = 3;
valueOfPeriodDetail = this.selectedHalf;
} else {
valueOfPeriod = 4;
valueOfPeriodDetail = 1;
}
valueOfYear = this.selectedYear;
console.log(valueOfPeriod, valueOfYear, valueOfPeriodDetail);
this._marketService.GetAllCompanyExport(valueOfPeriod, valueOfYear, valueOfPeriodDetail, this.isSCT).subscribe(
allrecords => {
if (allrecords.data.length > 0) {
this.dataSource = new MatTableDataSource<CompanyDetailModel>(allrecords.data[0]);
if (this.dataSource.data.length) {
this.dataSource.paginator = this.paginator;
this.paginator._intl.itemsPerPageLabel = "Số hàng";
this.paginator._intl.firstPageLabel = "Trang Đầu";
this.paginator._intl.lastPageLabel = "Trang Cuối";
this.paginator._intl.previousPageLabel = "Trang Trước";
this.paginator._intl.nextPageLabel = "Trang Tiếp";
}
}
},
error => this.errorMessage = <any>error
);
}
public getAllProduct(allrecords) {
console.log("+ Function: GetAllProduct");
this.productList = allrecords.data as Array<ProductModel>;
if (this.typeShow == 1) {
this.dataSource.paginator = this.paginator;
this.paginator._intl.itemsPerPageLabel = "Số hàng";
this.paginator._intl.firstPageLabel = "Trang Đầu";
this.paginator._intl.lastPageLabel = "Trang Cuối";
this.paginator._intl.previousPageLabel = "Trang Trước";
this.paginator._intl.nextPageLabel = "Trang Tiếp";
}
}
//Function for EVENT HTML -------------------------------------------------------------------------------
public timKiem() {
this.getAllCompanyExport();
this.getAllCompanyImport();
}
//Xuất excel
public exportToExcel(filename: string, sheetname: string, is_export: boolean) {
let excelFileName: string;
let newArray: any[] = [];
//Format name of Excel will be export
sheetname = sheetname.replace('/', '_');
excelFileName = filename + '.xlsx';
//Alias column name
let data;
if (is_export)
data = Object.values(this.dataSource.data);
else
data = Object.values(this.dataSourceImport.data);
Object.keys(data).forEach((key, index) => {
newArray.push({
'Tên doanh nghiệp': this.formatString(data[key].ten_doanh_nghiep),
// 'Điện thoại': this.formatString(data[key].dien_thoai),
'Mã số thuế': data[key].mst,
'Sản lượng': data[key].tong_san_luong,
'Trị giá': data[key].tong_tri_gia
});
});
const ws: XLSX.WorkSheet = XLSX.utils.json_to_sheet(newArray);
const wb: XLSX.WorkBook = XLSX.utils.book_new();
/* save to file */
XLSX.utils.book_append_sheet(wb, ws, sheetname);
XLSX.writeFile(wb, excelFileName);
}
public _filter(value: string): CareerModel[] {
const filterValue = this._normalizeValue(value);
return this.careerList.filter(career => this._normalizeValue(career.ten_kem_ma).includes(filterValue));
}
public openDetailCompany(mst: string) {
let url = this.router.serializeUrl(
this.router.createUrlTree([encodeURI('#') + 'manager/business/search/' + mst]));
window.open(url.replace('%23', '#'), "_blank");
}
public changeType() {
if (this.selectedType == this.types[0]) {
this.typeShow = 0;
}
else {
this.typeShow = 1;
//this.ngAfterViewInit();
this.dataSource.paginator = this.paginator;
this.paginator._intl.itemsPerPageLabel = "Số hàng";
this.paginator._intl.firstPageLabel = "Trang Đầu";
this.paginator._intl.lastPageLabel = "Trang Cuối";
this.paginator._intl.previousPageLabel = "Trang Trước";
this.paginator._intl.nextPageLabel = "Trang Tiếp";
}
}
public filter() {
this.filterEntity = { ...this.tempFilter }
}
public cancel() {
this.tempFilter = new filterModel();
this.filterEntity = { ...filterModel };
}
changePeriod() {
switch (this.selectedPeriod) {
case "Tháng":
this.selectedMonth = this.getCurrentMonth();
this.selectedYear = this.getCurrentYear();
break;
case "Quý":
this.selectedQuarter = this.getCurrentQuarter(); | this.selectedYear = this.getCurrentYear();
break;
case "Năm":
this.selectedYear = this.getCurrentYear();
break;
case "6 Tháng":
this.selectedYear = this.getCurrentYear();
this.selectedHalf = 1;
break;
default:
break;
}
}
//Function for EXTENTION -------------------------------------------------------------------------------
public loadLessonsPage() {
// this.dataSource;
// this.setPage(1);
}
public unicodeToAZ(str: string) {
str = str.replace(/à|á|ạ|ả|ã|â|ầ|ấ|ậ|ẩ|ẫ|ă|ằ|ắ|ặ|ẳ|ẵ/g, "a");
str = str.replace(/è|é|ẹ|ẻ|ẽ|ê|ề|ế|ệ|ể|ễ/g, "e");
str = str.replace(/ì|í|ị|ỉ|ĩ/g, "i");
str = str.replace(/ò|ó|ọ|ỏ|õ|ô|ồ|ố|ộ|ổ|ỗ|ơ|ờ|ớ|ợ|ở|ỡ/g, "o");
str = str.replace(/ù|ú|ụ|ủ|ũ|ư|ừ|ứ|ự|ử|ữ/g, "u");
str = str.replace(/ỳ|ý|ỵ|ỷ|ỹ/g, "y");
str = str.replace(/đ/g, "d");
return str;
}
public _normalizeValue(value: string): string {
return value.toLowerCase().replace(/\s/g, '');
}
public formatDateFromString(date: string) {
if (!date) {
return '';
}
return formatDate(date, this.FORMAT, this.LOCALE);
}
public formatString(value: string) {
if (!value) {
return '';
}
else if (value.trim().toLowerCase() === 'null') {
return '';
}
else {
return value.trim();
}
}
public getCurrentMonth() {
var currentDate = new Date();
return currentDate.getMonth() + 1;
}
public getCurrentYear() {
var currentDate = new Date();
return currentDate.getFullYear();
}
public getCurrentQuarter() {
let currentDate = new Date();
let month = currentDate.getMonth() + 1;
return month <= 3 ? 1 : month <= 6 ? 2 : month <= 9 ? 3 : 4;
}
public initialYears() {
let returnYear: Array<any> = [];
let currentDate = new Date();
let nextYear = currentDate.getFullYear() + 1;
for (let index = 0; index < 11; index++) {
returnYear.push(nextYear - index);
}
return returnYear;
}
// applyFilter(type: string, filterValue: string) {
// let newFilter = "";
// let checkAdded = false;
// if (this._currentFilter.length > 0) {
// let param = this._currentFilter.split(this.SEPERATE_FILTER);
// param.forEach(element => {
// if (element.length > 0) {
// let newValueFilter = "";
// let key = element.split("|")[0];
// if (type == key) {
// newValueFilter = key + "|" + filterValue;
// if (newFilter.length > 0) newFilter += ";" + newValueFilter;
// else newFilter = newValueFilter;
// checkAdded = true;
// }
// else {
// if (newFilter.length > 0) newFilter += ";" + element;
// else newFilter = element;
// }
// }
// });
// }
// if (!checkAdded) {
// let newValueFilter = type + "|" + filterValue;
// if (newFilter.length > 0) newFilter += ";" + newValueFilter;
// else newFilter = newValueFilter;
// }
// this._currentFilter = newFilter;
// // filterValue = type + '|' + filterValue;
// console.log(this._currentFilter);
// this.dataSource.filter = this._currentFilter;
// }
// removecompany(key: string) {
// console.log(key);
// }
// addFavourite(company: Company) {
// console.log(company);
// }
// addToCart(company: Company) {
// console.log(company);
// }
} | random_line_split | |
merge_queryable.go | package tenantfederation
import (
"context"
"fmt"
"sort"
"strings"
"github.com/pkg/errors"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/weaveworks/common/user"
"github.com/cortexproject/cortex/pkg/tenant"
"github.com/cortexproject/cortex/pkg/util/concurrency"
)
const (
defaultTenantLabel = "__tenant_id__"
retainExistingPrefix = "original_"
originalDefaultTenantLabel = retainExistingPrefix + defaultTenantLabel
maxConcurrency = 16
)
// NewQueryable returns a queryable that iterates through all the tenant IDs
// that are part of the request and aggregates the results from each tenant's
// Querier by sending of subsequent requests.
// The result contains a label tenantLabelName to identify the tenant ID that
// it originally resulted from.
// If the label tenantLabelName is already existing, its value is overwritten
// by the tenant ID and the previous value is exposed through a new label
// prefixed with "original_". This behaviour is not implemented recursively
func NewQueryable(upstream storage.Queryable) storage.Queryable {
return &mergeQueryable{
upstream: upstream,
}
}
type mergeQueryable struct {
upstream storage.Queryable
}
// Querier returns a new mergeQuerier, which aggregates results from multiple
// tenants into a single result.
func (m *mergeQueryable) Querier(ctx context.Context, mint int64, maxt int64) (storage.Querier, error) {
tenantIDs, err := tenant.TenantIDs(ctx)
if err != nil {
return nil, err
}
if len(tenantIDs) <= 1 {
return m.upstream.Querier(ctx, mint, maxt)
}
var queriers = make([]storage.Querier, len(tenantIDs))
for pos, tenantID := range tenantIDs {
q, err := m.upstream.Querier(
user.InjectOrgID(ctx, tenantID),
mint,
maxt,
)
if err != nil {
return nil, err
}
queriers[pos] = q
}
return &mergeQuerier{
ctx: ctx,
queriers: queriers,
tenantIDs: tenantIDs,
}, nil
}
// mergeQuerier aggregates the results from underlying queriers and adds a
// label tenantLabelName to identify the tenant ID that the metric resulted
// from.
// If the label tenantLabelName is already existing, its value is
// overwritten by the tenant ID and the previous value is exposed through a new
// label prefixed with "original_". This behaviour is not implemented recursively
type mergeQuerier struct {
ctx context.Context
queriers []storage.Querier
tenantIDs []string
}
// LabelValues returns all potential values for a label name.
// It is not safe to use the strings beyond the lifefime of the querier.
// For the label "tenantLabelName" it will return all the tenant IDs available.
// For the label "original_" + tenantLabelName it will return all the values
// of the underlying queriers for tenantLabelName.
func (m *mergeQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) {
if name == defaultTenantLabel {
return m.tenantIDs, nil, nil
}
// ensure the name of a retained tenant id label gets handled under the
// original label name
if name == originalDefaultTenantLabel {
name = defaultTenantLabel
}
return m.mergeDistinctStringSlice(func(ctx context.Context, q storage.Querier) ([]string, storage.Warnings, error) {
return q.LabelValues(name, matchers...)
})
}
// LabelNames returns all the unique label names present in the underlying
// queriers. It also adds the defaultTenantLabel and if present in the original
// results the originalDefaultTenantLabel
func (m *mergeQuerier) LabelNames() ([]string, storage.Warnings, error) {
labelNames, warnings, err := m.mergeDistinctStringSlice(func(ctx context.Context, q storage.Querier) ([]string, storage.Warnings, error) {
return q.LabelNames()
})
if err != nil {
return nil, nil, err
}
// check if the tenant label exists in the original result
var tenantLabelExists bool
labelPos := sort.SearchStrings(labelNames, defaultTenantLabel)
if labelPos < len(labelNames) && labelNames[labelPos] == defaultTenantLabel {
tenantLabelExists = true
}
labelToAdd := defaultTenantLabel
// if defaultTenantLabel already exists, we need to add the
// originalDefaultTenantLabel
if tenantLabelExists {
labelToAdd = originalDefaultTenantLabel
labelPos = sort.SearchStrings(labelNames, labelToAdd)
}
// insert label at the correct position
labelNames = append(labelNames, "")
copy(labelNames[labelPos+1:], labelNames[labelPos:])
labelNames[labelPos] = labelToAdd
return labelNames, warnings, nil
}
type stringSliceFunc func(context.Context, storage.Querier) ([]string, storage.Warnings, error)
type stringSliceFuncJob struct {
querier storage.Querier
tenantID string
result []string
warnings storage.Warnings
}
// mergeDistinctStringSlice is aggregating results from stringSliceFunc calls
// on per querier in parallel. It removes duplicates and sorts the result. It
// doesn't require the output of the stringSliceFunc to be sorted, as results
// of LabelValues are not sorted.
func (m *mergeQuerier) mergeDistinctStringSlice(f stringSliceFunc) ([]string, storage.Warnings, error) {
var jobs = make([]interface{}, len(m.tenantIDs))
for pos := range m.tenantIDs {
jobs[pos] = &stringSliceFuncJob{
querier: m.queriers[pos],
tenantID: m.tenantIDs[pos],
}
}
run := func(ctx context.Context, jobIntf interface{}) error {
job, ok := jobIntf.(*stringSliceFuncJob)
if !ok {
return fmt.Errorf("unexpected type %T", jobIntf)
}
var err error
job.result, job.warnings, err = f(ctx, job.querier)
if err != nil {
return errors.Wrapf(err, "error querying %s %s", rewriteLabelName(defaultTenantLabel), job.tenantID)
}
return nil
}
err := concurrency.ForEach(m.ctx, jobs, maxConcurrency, run)
if err != nil {
return nil, nil, err
}
// aggregate warnings and deduplicate string results
var warnings storage.Warnings
resultMap := make(map[string]struct{})
for _, jobIntf := range jobs {
job, ok := jobIntf.(*stringSliceFuncJob)
if !ok {
return nil, nil, fmt.Errorf("unexpected type %T", jobIntf)
}
for _, e := range job.result {
resultMap[e] = struct{}{}
}
for _, w := range job.warnings {
warnings = append(warnings, errors.Wrapf(w, "warning querying %s %s", rewriteLabelName(defaultTenantLabel), job.tenantID))
}
}
var result = make([]string, 0, len(resultMap))
for e := range resultMap {
result = append(result, e)
}
sort.Strings(result)
return result, warnings, nil
}
// Close releases the resources of the Querier.
func (m *mergeQuerier) Close() error {
errs := tsdb_errors.NewMulti()
for pos, tenantID := range m.tenantIDs {
errs.Add(errors.Wrapf(m.queriers[pos].Close(), "failed to close querier for %s %s", rewriteLabelName(defaultTenantLabel), tenantID))
}
return errs.Err()
}
type selectJob struct {
pos int
querier storage.Querier
tenantID string
}
// Select returns a set of series that matches the given label matchers. If the
// tenantLabelName is matched on it only considers those queriers matching. The
// forwarded labelSelector is not containing those that operate on
// tenantLabelName.
func (m *mergeQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
matchedTenants, filteredMatchers := filterValuesByMatchers(defaultTenantLabel, m.tenantIDs, matchers...)
var jobs = make([]interface{}, len(matchedTenants))
var seriesSets = make([]storage.SeriesSet, len(matchedTenants))
var jobPos int
for tenantPos := range m.tenantIDs {
if _, matched := matchedTenants[m.tenantIDs[tenantPos]]; !matched {
continue
}
jobs[jobPos] = &selectJob{
pos: jobPos,
querier: m.queriers[tenantPos],
tenantID: m.tenantIDs[tenantPos],
}
jobPos++
}
run := func(ctx context.Context, jobIntf interface{}) error {
job, ok := jobIntf.(*selectJob)
if !ok {
return fmt.Errorf("unexpected type %T", jobIntf)
}
seriesSets[job.pos] = &addLabelsSeriesSet{
upstream: job.querier.Select(sortSeries, hints, filteredMatchers...),
labels: labels.Labels{
{
Name: defaultTenantLabel,
Value: job.tenantID,
},
},
}
return nil
}
err := concurrency.ForEach(m.ctx, jobs, maxConcurrency, run)
if err != nil {
return storage.ErrSeriesSet(err)
}
return storage.NewMergeSeriesSet(seriesSets, storage.ChainedSeriesMerge)
}
// filterValuesByMatchers applies matchers to inputed labelName and
// labelValues. A map of matched values is returned and also all label matchers
// not matching the labelName.
// In case a label matcher is set on a label conflicting with tenantLabelName,
// we need to rename this labelMatcher's name to its original name. This is
// used to as part of Select in the mergeQueryable, to ensure only relevant
// queries are considered and the forwarded matchers do not contain matchers on
// the tenantLabelName.
func filterValuesByMatchers(labelName string, labelValues []string, matchers ...*labels.Matcher) (matchedValues map[string]struct{}, unrelatedMatchers []*labels.Matcher) {
// this contains the matchers which are not related to labelName
unrelatedMatchers = make([]*labels.Matcher, 0, len(matchers))
// build map of values to consider for the matchers
matchedValues = make(map[string]struct{}, len(labelValues))
for _, value := range labelValues {
matchedValues[value] = struct{}{}
}
for _, m := range matchers {
if m.Name != labelName {
// check if has the retained label name
if m.Name == originalDefaultTenantLabel {
// rewrite label to the original name, by copying matcher and
// replacing the label name
rewrittenM := *m
rewrittenM.Name = labelName
unrelatedMatchers = append(unrelatedMatchers, &rewrittenM)
} else {
unrelatedMatchers = append(unrelatedMatchers, m)
}
continue
}
for value := range matchedValues {
if !m.Matches(value) {
delete(matchedValues, value)
}
}
}
return matchedValues, unrelatedMatchers
}
type addLabelsSeriesSet struct {
upstream storage.SeriesSet
labels labels.Labels
}
func (m *addLabelsSeriesSet) Next() bool {
return m.upstream.Next()
}
// At returns full series. Returned series should be iteratable even after Next is called.
func (m *addLabelsSeriesSet) At() storage.Series {
return &addLabelsSeries{
upstream: m.upstream.At(),
labels: m.labels,
}
}
// The error that iteration as failed with.
// When an error occurs, set cannot continue to iterate.
func (m *addLabelsSeriesSet) Err() error {
return errors.Wrapf(m.upstream.Err(), "error querying %s", labelsToString(m.labels))
}
// A collection of warnings for the whole set.
// Warnings could be return even iteration has not failed with error.
func (m *addLabelsSeriesSet) Warnings() storage.Warnings |
// rewrite label name to be more readable in error output
func rewriteLabelName(s string) string {
return strings.TrimRight(strings.TrimLeft(s, "_"), "_")
}
// this outputs a more readable error format
func labelsToString(labels labels.Labels) string {
parts := make([]string, len(labels))
for pos, l := range labels {
parts[pos] = rewriteLabelName(l.Name) + " " + l.Value
}
return strings.Join(parts, ", ")
}
type addLabelsSeries struct {
upstream storage.Series
labels labels.Labels
}
// Labels returns the complete set of labels. For series it means all labels identifying the series.
func (a *addLabelsSeries) Labels() labels.Labels {
return setLabelsRetainExisting(a.upstream.Labels(), a.labels...)
}
// Iterator returns a new, independent iterator of the data of the series.
func (a *addLabelsSeries) Iterator() chunkenc.Iterator {
return a.upstream.Iterator()
}
// this sets a label and preserves an existing value a new label prefixed with
// original_. It doesn't do this recursively.
func setLabelsRetainExisting(src labels.Labels, additionalLabels ...labels.Label) labels.Labels {
lb := labels.NewBuilder(src)
for _, additionalL := range additionalLabels {
if oldValue := src.Get(additionalL.Name); oldValue != "" {
lb.Set(
retainExistingPrefix+additionalL.Name,
oldValue,
)
}
lb.Set(additionalL.Name, additionalL.Value)
}
return lb.Labels()
}
| {
upstream := m.upstream.Warnings()
warnings := make(storage.Warnings, len(upstream))
for pos := range upstream {
warnings[pos] = errors.Wrapf(upstream[pos], "warning querying %s", labelsToString(m.labels))
}
return warnings
} | identifier_body |
merge_queryable.go | package tenantfederation
import (
"context"
"fmt"
"sort"
"strings"
"github.com/pkg/errors"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/weaveworks/common/user"
"github.com/cortexproject/cortex/pkg/tenant"
"github.com/cortexproject/cortex/pkg/util/concurrency"
)
const (
defaultTenantLabel = "__tenant_id__"
retainExistingPrefix = "original_"
originalDefaultTenantLabel = retainExistingPrefix + defaultTenantLabel
maxConcurrency = 16
)
// NewQueryable returns a queryable that iterates through all the tenant IDs
// that are part of the request and aggregates the results from each tenant's
// Querier by sending of subsequent requests.
// The result contains a label tenantLabelName to identify the tenant ID that
// it originally resulted from.
// If the label tenantLabelName is already existing, its value is overwritten
// by the tenant ID and the previous value is exposed through a new label
// prefixed with "original_". This behaviour is not implemented recursively
func NewQueryable(upstream storage.Queryable) storage.Queryable {
return &mergeQueryable{
upstream: upstream,
}
}
type mergeQueryable struct {
upstream storage.Queryable
}
// Querier returns a new mergeQuerier, which aggregates results from multiple
// tenants into a single result.
func (m *mergeQueryable) Querier(ctx context.Context, mint int64, maxt int64) (storage.Querier, error) {
tenantIDs, err := tenant.TenantIDs(ctx)
if err != nil {
return nil, err
}
if len(tenantIDs) <= 1 {
return m.upstream.Querier(ctx, mint, maxt)
}
var queriers = make([]storage.Querier, len(tenantIDs))
for pos, tenantID := range tenantIDs {
q, err := m.upstream.Querier(
user.InjectOrgID(ctx, tenantID),
mint,
maxt,
)
if err != nil {
return nil, err
}
queriers[pos] = q
}
return &mergeQuerier{
ctx: ctx,
queriers: queriers,
tenantIDs: tenantIDs,
}, nil
}
// mergeQuerier aggregates the results from underlying queriers and adds a
// label tenantLabelName to identify the tenant ID that the metric resulted
// from.
// If the label tenantLabelName is already existing, its value is
// overwritten by the tenant ID and the previous value is exposed through a new
// label prefixed with "original_". This behaviour is not implemented recursively
type mergeQuerier struct {
ctx context.Context
queriers []storage.Querier
tenantIDs []string
}
// LabelValues returns all potential values for a label name.
// It is not safe to use the strings beyond the lifefime of the querier.
// For the label "tenantLabelName" it will return all the tenant IDs available.
// For the label "original_" + tenantLabelName it will return all the values
// of the underlying queriers for tenantLabelName.
func (m *mergeQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) {
if name == defaultTenantLabel {
return m.tenantIDs, nil, nil
}
// ensure the name of a retained tenant id label gets handled under the
// original label name
if name == originalDefaultTenantLabel {
name = defaultTenantLabel
}
return m.mergeDistinctStringSlice(func(ctx context.Context, q storage.Querier) ([]string, storage.Warnings, error) {
return q.LabelValues(name, matchers...)
})
}
// LabelNames returns all the unique label names present in the underlying
// queriers. It also adds the defaultTenantLabel and if present in the original
// results the originalDefaultTenantLabel
func (m *mergeQuerier) LabelNames() ([]string, storage.Warnings, error) {
labelNames, warnings, err := m.mergeDistinctStringSlice(func(ctx context.Context, q storage.Querier) ([]string, storage.Warnings, error) {
return q.LabelNames()
})
if err != nil {
return nil, nil, err
}
// check if the tenant label exists in the original result
var tenantLabelExists bool
labelPos := sort.SearchStrings(labelNames, defaultTenantLabel)
if labelPos < len(labelNames) && labelNames[labelPos] == defaultTenantLabel {
tenantLabelExists = true
}
labelToAdd := defaultTenantLabel
// if defaultTenantLabel already exists, we need to add the
// originalDefaultTenantLabel
if tenantLabelExists {
labelToAdd = originalDefaultTenantLabel
labelPos = sort.SearchStrings(labelNames, labelToAdd)
}
// insert label at the correct position
labelNames = append(labelNames, "")
copy(labelNames[labelPos+1:], labelNames[labelPos:])
labelNames[labelPos] = labelToAdd
return labelNames, warnings, nil
}
type stringSliceFunc func(context.Context, storage.Querier) ([]string, storage.Warnings, error)
type stringSliceFuncJob struct {
querier storage.Querier
tenantID string
result []string
warnings storage.Warnings
}
// mergeDistinctStringSlice is aggregating results from stringSliceFunc calls
// on per querier in parallel. It removes duplicates and sorts the result. It
// doesn't require the output of the stringSliceFunc to be sorted, as results
// of LabelValues are not sorted.
func (m *mergeQuerier) mergeDistinctStringSlice(f stringSliceFunc) ([]string, storage.Warnings, error) {
var jobs = make([]interface{}, len(m.tenantIDs))
for pos := range m.tenantIDs {
jobs[pos] = &stringSliceFuncJob{
querier: m.queriers[pos],
tenantID: m.tenantIDs[pos],
}
}
run := func(ctx context.Context, jobIntf interface{}) error {
job, ok := jobIntf.(*stringSliceFuncJob)
if !ok {
return fmt.Errorf("unexpected type %T", jobIntf)
}
var err error
job.result, job.warnings, err = f(ctx, job.querier)
if err != nil {
return errors.Wrapf(err, "error querying %s %s", rewriteLabelName(defaultTenantLabel), job.tenantID)
}
return nil
}
err := concurrency.ForEach(m.ctx, jobs, maxConcurrency, run)
if err != nil {
return nil, nil, err
}
// aggregate warnings and deduplicate string results
var warnings storage.Warnings
resultMap := make(map[string]struct{})
for _, jobIntf := range jobs {
job, ok := jobIntf.(*stringSliceFuncJob)
if !ok {
return nil, nil, fmt.Errorf("unexpected type %T", jobIntf)
}
for _, e := range job.result {
resultMap[e] = struct{}{}
}
for _, w := range job.warnings {
warnings = append(warnings, errors.Wrapf(w, "warning querying %s %s", rewriteLabelName(defaultTenantLabel), job.tenantID))
}
}
var result = make([]string, 0, len(resultMap))
for e := range resultMap {
result = append(result, e)
}
sort.Strings(result)
return result, warnings, nil
}
// Close releases the resources of the Querier.
func (m *mergeQuerier) Close() error {
errs := tsdb_errors.NewMulti()
for pos, tenantID := range m.tenantIDs {
errs.Add(errors.Wrapf(m.queriers[pos].Close(), "failed to close querier for %s %s", rewriteLabelName(defaultTenantLabel), tenantID))
}
return errs.Err()
}
type selectJob struct {
pos int
querier storage.Querier
tenantID string
}
// Select returns a set of series that matches the given label matchers. If the
// tenantLabelName is matched on it only considers those queriers matching. The
// forwarded labelSelector is not containing those that operate on
// tenantLabelName.
func (m *mergeQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
matchedTenants, filteredMatchers := filterValuesByMatchers(defaultTenantLabel, m.tenantIDs, matchers...)
var jobs = make([]interface{}, len(matchedTenants))
var seriesSets = make([]storage.SeriesSet, len(matchedTenants))
var jobPos int
for tenantPos := range m.tenantIDs {
if _, matched := matchedTenants[m.tenantIDs[tenantPos]]; !matched {
continue
}
jobs[jobPos] = &selectJob{
pos: jobPos,
querier: m.queriers[tenantPos],
tenantID: m.tenantIDs[tenantPos],
}
jobPos++
}
run := func(ctx context.Context, jobIntf interface{}) error {
job, ok := jobIntf.(*selectJob)
if !ok {
return fmt.Errorf("unexpected type %T", jobIntf)
}
seriesSets[job.pos] = &addLabelsSeriesSet{
upstream: job.querier.Select(sortSeries, hints, filteredMatchers...),
labels: labels.Labels{
{
Name: defaultTenantLabel,
Value: job.tenantID,
},
},
}
return nil
}
err := concurrency.ForEach(m.ctx, jobs, maxConcurrency, run)
if err != nil {
return storage.ErrSeriesSet(err)
}
return storage.NewMergeSeriesSet(seriesSets, storage.ChainedSeriesMerge)
}
// filterValuesByMatchers applies matchers to inputed labelName and
// labelValues. A map of matched values is returned and also all label matchers
// not matching the labelName.
// In case a label matcher is set on a label conflicting with tenantLabelName,
// we need to rename this labelMatcher's name to its original name. This is
// used to as part of Select in the mergeQueryable, to ensure only relevant
// queries are considered and the forwarded matchers do not contain matchers on
// the tenantLabelName.
func filterValuesByMatchers(labelName string, labelValues []string, matchers ...*labels.Matcher) (matchedValues map[string]struct{}, unrelatedMatchers []*labels.Matcher) {
// this contains the matchers which are not related to labelName
unrelatedMatchers = make([]*labels.Matcher, 0, len(matchers))
// build map of values to consider for the matchers
matchedValues = make(map[string]struct{}, len(labelValues))
for _, value := range labelValues {
matchedValues[value] = struct{}{}
}
for _, m := range matchers {
if m.Name != labelName {
// check if has the retained label name
if m.Name == originalDefaultTenantLabel {
// rewrite label to the original name, by copying matcher and
// replacing the label name
rewrittenM := *m
rewrittenM.Name = labelName
unrelatedMatchers = append(unrelatedMatchers, &rewrittenM)
} else {
unrelatedMatchers = append(unrelatedMatchers, m)
}
continue
}
for value := range matchedValues {
if !m.Matches(value) {
delete(matchedValues, value)
}
}
}
return matchedValues, unrelatedMatchers
}
type addLabelsSeriesSet struct {
upstream storage.SeriesSet
labels labels.Labels
}
func (m *addLabelsSeriesSet) Next() bool {
return m.upstream.Next()
}
// At returns full series. Returned series should be iteratable even after Next is called.
func (m *addLabelsSeriesSet) At() storage.Series {
return &addLabelsSeries{
upstream: m.upstream.At(),
labels: m.labels,
}
}
// The error that iteration as failed with.
// When an error occurs, set cannot continue to iterate.
func (m *addLabelsSeriesSet) Err() error {
return errors.Wrapf(m.upstream.Err(), "error querying %s", labelsToString(m.labels))
}
// A collection of warnings for the whole set.
// Warnings could be return even iteration has not failed with error.
func (m *addLabelsSeriesSet) Warnings() storage.Warnings {
upstream := m.upstream.Warnings()
warnings := make(storage.Warnings, len(upstream))
for pos := range upstream {
warnings[pos] = errors.Wrapf(upstream[pos], "warning querying %s", labelsToString(m.labels))
}
return warnings
}
// rewrite label name to be more readable in error output
func rewriteLabelName(s string) string {
return strings.TrimRight(strings.TrimLeft(s, "_"), "_")
}
// this outputs a more readable error format
func labelsToString(labels labels.Labels) string {
parts := make([]string, len(labels))
for pos, l := range labels {
parts[pos] = rewriteLabelName(l.Name) + " " + l.Value
}
return strings.Join(parts, ", ")
}
type addLabelsSeries struct {
upstream storage.Series
labels labels.Labels
}
// Labels returns the complete set of labels. For series it means all labels identifying the series.
func (a *addLabelsSeries) Labels() labels.Labels {
return setLabelsRetainExisting(a.upstream.Labels(), a.labels...)
}
// Iterator returns a new, independent iterator of the data of the series.
func (a *addLabelsSeries) Iterator() chunkenc.Iterator {
return a.upstream.Iterator()
}
// this sets a label and preserves an existing value a new label prefixed with
// original_. It doesn't do this recursively.
func | (src labels.Labels, additionalLabels ...labels.Label) labels.Labels {
lb := labels.NewBuilder(src)
for _, additionalL := range additionalLabels {
if oldValue := src.Get(additionalL.Name); oldValue != "" {
lb.Set(
retainExistingPrefix+additionalL.Name,
oldValue,
)
}
lb.Set(additionalL.Name, additionalL.Value)
}
return lb.Labels()
}
| setLabelsRetainExisting | identifier_name |
merge_queryable.go | package tenantfederation
import (
"context"
"fmt"
"sort"
"strings"
"github.com/pkg/errors"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/weaveworks/common/user"
"github.com/cortexproject/cortex/pkg/tenant"
"github.com/cortexproject/cortex/pkg/util/concurrency"
)
const (
defaultTenantLabel = "__tenant_id__"
retainExistingPrefix = "original_"
originalDefaultTenantLabel = retainExistingPrefix + defaultTenantLabel
maxConcurrency = 16
)
// NewQueryable returns a queryable that iterates through all the tenant IDs
// that are part of the request and aggregates the results from each tenant's
// Querier by sending of subsequent requests.
// The result contains a label tenantLabelName to identify the tenant ID that
// it originally resulted from.
// If the label tenantLabelName is already existing, its value is overwritten
// by the tenant ID and the previous value is exposed through a new label
// prefixed with "original_". This behaviour is not implemented recursively
func NewQueryable(upstream storage.Queryable) storage.Queryable {
return &mergeQueryable{
upstream: upstream,
}
}
type mergeQueryable struct {
upstream storage.Queryable
}
// Querier returns a new mergeQuerier, which aggregates results from multiple
// tenants into a single result.
func (m *mergeQueryable) Querier(ctx context.Context, mint int64, maxt int64) (storage.Querier, error) {
tenantIDs, err := tenant.TenantIDs(ctx)
if err != nil {
return nil, err
}
if len(tenantIDs) <= 1 {
return m.upstream.Querier(ctx, mint, maxt)
}
var queriers = make([]storage.Querier, len(tenantIDs))
for pos, tenantID := range tenantIDs {
q, err := m.upstream.Querier(
user.InjectOrgID(ctx, tenantID),
mint,
maxt,
)
if err != nil {
return nil, err
}
queriers[pos] = q
}
return &mergeQuerier{
ctx: ctx,
queriers: queriers,
tenantIDs: tenantIDs,
}, nil
}
// mergeQuerier aggregates the results from underlying queriers and adds a
// label tenantLabelName to identify the tenant ID that the metric resulted
// from.
// If the label tenantLabelName is already existing, its value is
// overwritten by the tenant ID and the previous value is exposed through a new
// label prefixed with "original_". This behaviour is not implemented recursively
type mergeQuerier struct {
ctx context.Context
queriers []storage.Querier
tenantIDs []string
}
// LabelValues returns all potential values for a label name.
// It is not safe to use the strings beyond the lifefime of the querier.
// For the label "tenantLabelName" it will return all the tenant IDs available.
// For the label "original_" + tenantLabelName it will return all the values
// of the underlying queriers for tenantLabelName.
func (m *mergeQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) {
if name == defaultTenantLabel {
return m.tenantIDs, nil, nil
}
// ensure the name of a retained tenant id label gets handled under the
// original label name
if name == originalDefaultTenantLabel {
name = defaultTenantLabel
}
return m.mergeDistinctStringSlice(func(ctx context.Context, q storage.Querier) ([]string, storage.Warnings, error) {
return q.LabelValues(name, matchers...)
})
}
// LabelNames returns all the unique label names present in the underlying
// queriers. It also adds the defaultTenantLabel and if present in the original
// results the originalDefaultTenantLabel
func (m *mergeQuerier) LabelNames() ([]string, storage.Warnings, error) {
labelNames, warnings, err := m.mergeDistinctStringSlice(func(ctx context.Context, q storage.Querier) ([]string, storage.Warnings, error) {
return q.LabelNames()
})
if err != nil {
return nil, nil, err
}
// check if the tenant label exists in the original result
var tenantLabelExists bool
labelPos := sort.SearchStrings(labelNames, defaultTenantLabel)
if labelPos < len(labelNames) && labelNames[labelPos] == defaultTenantLabel {
tenantLabelExists = true
}
labelToAdd := defaultTenantLabel
// if defaultTenantLabel already exists, we need to add the
// originalDefaultTenantLabel
if tenantLabelExists {
labelToAdd = originalDefaultTenantLabel
labelPos = sort.SearchStrings(labelNames, labelToAdd)
}
// insert label at the correct position
labelNames = append(labelNames, "")
copy(labelNames[labelPos+1:], labelNames[labelPos:])
labelNames[labelPos] = labelToAdd
return labelNames, warnings, nil
}
type stringSliceFunc func(context.Context, storage.Querier) ([]string, storage.Warnings, error)
type stringSliceFuncJob struct {
querier storage.Querier
tenantID string
result []string
warnings storage.Warnings
}
// mergeDistinctStringSlice is aggregating results from stringSliceFunc calls
// on per querier in parallel. It removes duplicates and sorts the result. It
// doesn't require the output of the stringSliceFunc to be sorted, as results
// of LabelValues are not sorted.
func (m *mergeQuerier) mergeDistinctStringSlice(f stringSliceFunc) ([]string, storage.Warnings, error) {
var jobs = make([]interface{}, len(m.tenantIDs))
for pos := range m.tenantIDs {
jobs[pos] = &stringSliceFuncJob{
querier: m.queriers[pos],
tenantID: m.tenantIDs[pos],
}
}
run := func(ctx context.Context, jobIntf interface{}) error {
job, ok := jobIntf.(*stringSliceFuncJob)
if !ok {
return fmt.Errorf("unexpected type %T", jobIntf)
}
var err error
job.result, job.warnings, err = f(ctx, job.querier)
if err != nil {
return errors.Wrapf(err, "error querying %s %s", rewriteLabelName(defaultTenantLabel), job.tenantID)
}
return nil
}
err := concurrency.ForEach(m.ctx, jobs, maxConcurrency, run)
if err != nil {
return nil, nil, err
}
// aggregate warnings and deduplicate string results
var warnings storage.Warnings
resultMap := make(map[string]struct{})
for _, jobIntf := range jobs {
job, ok := jobIntf.(*stringSliceFuncJob)
if !ok {
return nil, nil, fmt.Errorf("unexpected type %T", jobIntf)
}
for _, e := range job.result {
resultMap[e] = struct{}{}
}
for _, w := range job.warnings {
warnings = append(warnings, errors.Wrapf(w, "warning querying %s %s", rewriteLabelName(defaultTenantLabel), job.tenantID))
}
}
var result = make([]string, 0, len(resultMap))
for e := range resultMap {
result = append(result, e)
}
sort.Strings(result)
return result, warnings, nil
}
// Close releases the resources of the Querier.
func (m *mergeQuerier) Close() error {
errs := tsdb_errors.NewMulti()
for pos, tenantID := range m.tenantIDs {
errs.Add(errors.Wrapf(m.queriers[pos].Close(), "failed to close querier for %s %s", rewriteLabelName(defaultTenantLabel), tenantID))
}
return errs.Err()
}
type selectJob struct {
pos int
querier storage.Querier
tenantID string
}
// Select returns a set of series that matches the given label matchers. If the
// tenantLabelName is matched on it only considers those queriers matching. The
// forwarded labelSelector is not containing those that operate on
// tenantLabelName.
func (m *mergeQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
matchedTenants, filteredMatchers := filterValuesByMatchers(defaultTenantLabel, m.tenantIDs, matchers...)
var jobs = make([]interface{}, len(matchedTenants))
var seriesSets = make([]storage.SeriesSet, len(matchedTenants))
var jobPos int
for tenantPos := range m.tenantIDs {
if _, matched := matchedTenants[m.tenantIDs[tenantPos]]; !matched {
continue
}
jobs[jobPos] = &selectJob{
pos: jobPos,
querier: m.queriers[tenantPos],
tenantID: m.tenantIDs[tenantPos],
}
jobPos++
}
run := func(ctx context.Context, jobIntf interface{}) error {
job, ok := jobIntf.(*selectJob)
if !ok {
return fmt.Errorf("unexpected type %T", jobIntf)
}
seriesSets[job.pos] = &addLabelsSeriesSet{
upstream: job.querier.Select(sortSeries, hints, filteredMatchers...),
labels: labels.Labels{
{
Name: defaultTenantLabel,
Value: job.tenantID,
},
},
}
return nil
}
err := concurrency.ForEach(m.ctx, jobs, maxConcurrency, run)
if err != nil {
return storage.ErrSeriesSet(err)
}
return storage.NewMergeSeriesSet(seriesSets, storage.ChainedSeriesMerge)
}
// filterValuesByMatchers applies matchers to inputed labelName and
// labelValues. A map of matched values is returned and also all label matchers
// not matching the labelName.
// In case a label matcher is set on a label conflicting with tenantLabelName,
// we need to rename this labelMatcher's name to its original name. This is
// used to as part of Select in the mergeQueryable, to ensure only relevant
// queries are considered and the forwarded matchers do not contain matchers on
// the tenantLabelName.
func filterValuesByMatchers(labelName string, labelValues []string, matchers ...*labels.Matcher) (matchedValues map[string]struct{}, unrelatedMatchers []*labels.Matcher) {
// this contains the matchers which are not related to labelName
unrelatedMatchers = make([]*labels.Matcher, 0, len(matchers))
// build map of values to consider for the matchers
matchedValues = make(map[string]struct{}, len(labelValues))
for _, value := range labelValues {
matchedValues[value] = struct{}{}
}
for _, m := range matchers {
if m.Name != labelName {
// check if has the retained label name
if m.Name == originalDefaultTenantLabel {
// rewrite label to the original name, by copying matcher and
// replacing the label name
rewrittenM := *m
rewrittenM.Name = labelName
unrelatedMatchers = append(unrelatedMatchers, &rewrittenM)
} else {
unrelatedMatchers = append(unrelatedMatchers, m)
}
continue
}
for value := range matchedValues {
if !m.Matches(value) {
delete(matchedValues, value)
}
}
}
return matchedValues, unrelatedMatchers
}
type addLabelsSeriesSet struct {
upstream storage.SeriesSet
labels labels.Labels
}
func (m *addLabelsSeriesSet) Next() bool {
return m.upstream.Next()
}
// At returns full series. Returned series should be iteratable even after Next is called.
func (m *addLabelsSeriesSet) At() storage.Series {
return &addLabelsSeries{
upstream: m.upstream.At(),
labels: m.labels,
}
}
// The error that iteration as failed with.
// When an error occurs, set cannot continue to iterate.
func (m *addLabelsSeriesSet) Err() error {
return errors.Wrapf(m.upstream.Err(), "error querying %s", labelsToString(m.labels))
}
// A collection of warnings for the whole set.
// Warnings could be return even iteration has not failed with error.
func (m *addLabelsSeriesSet) Warnings() storage.Warnings {
upstream := m.upstream.Warnings()
warnings := make(storage.Warnings, len(upstream))
for pos := range upstream {
warnings[pos] = errors.Wrapf(upstream[pos], "warning querying %s", labelsToString(m.labels)) | }
// rewrite label name to be more readable in error output
func rewriteLabelName(s string) string {
return strings.TrimRight(strings.TrimLeft(s, "_"), "_")
}
// this outputs a more readable error format
func labelsToString(labels labels.Labels) string {
parts := make([]string, len(labels))
for pos, l := range labels {
parts[pos] = rewriteLabelName(l.Name) + " " + l.Value
}
return strings.Join(parts, ", ")
}
type addLabelsSeries struct {
upstream storage.Series
labels labels.Labels
}
// Labels returns the complete set of labels. For series it means all labels identifying the series.
func (a *addLabelsSeries) Labels() labels.Labels {
return setLabelsRetainExisting(a.upstream.Labels(), a.labels...)
}
// Iterator returns a new, independent iterator of the data of the series.
func (a *addLabelsSeries) Iterator() chunkenc.Iterator {
return a.upstream.Iterator()
}
// this sets a label and preserves an existing value a new label prefixed with
// original_. It doesn't do this recursively.
func setLabelsRetainExisting(src labels.Labels, additionalLabels ...labels.Label) labels.Labels {
lb := labels.NewBuilder(src)
for _, additionalL := range additionalLabels {
if oldValue := src.Get(additionalL.Name); oldValue != "" {
lb.Set(
retainExistingPrefix+additionalL.Name,
oldValue,
)
}
lb.Set(additionalL.Name, additionalL.Value)
}
return lb.Labels()
} | }
return warnings | random_line_split |
merge_queryable.go | package tenantfederation
import (
"context"
"fmt"
"sort"
"strings"
"github.com/pkg/errors"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/weaveworks/common/user"
"github.com/cortexproject/cortex/pkg/tenant"
"github.com/cortexproject/cortex/pkg/util/concurrency"
)
const (
defaultTenantLabel = "__tenant_id__"
retainExistingPrefix = "original_"
originalDefaultTenantLabel = retainExistingPrefix + defaultTenantLabel
maxConcurrency = 16
)
// NewQueryable returns a queryable that iterates through all the tenant IDs
// that are part of the request and aggregates the results from each tenant's
// Querier by sending of subsequent requests.
// The result contains a label tenantLabelName to identify the tenant ID that
// it originally resulted from.
// If the label tenantLabelName is already existing, its value is overwritten
// by the tenant ID and the previous value is exposed through a new label
// prefixed with "original_". This behaviour is not implemented recursively
func NewQueryable(upstream storage.Queryable) storage.Queryable {
return &mergeQueryable{
upstream: upstream,
}
}
type mergeQueryable struct {
upstream storage.Queryable
}
// Querier returns a new mergeQuerier, which aggregates results from multiple
// tenants into a single result.
func (m *mergeQueryable) Querier(ctx context.Context, mint int64, maxt int64) (storage.Querier, error) {
tenantIDs, err := tenant.TenantIDs(ctx)
if err != nil {
return nil, err
}
if len(tenantIDs) <= 1 {
return m.upstream.Querier(ctx, mint, maxt)
}
var queriers = make([]storage.Querier, len(tenantIDs))
for pos, tenantID := range tenantIDs {
q, err := m.upstream.Querier(
user.InjectOrgID(ctx, tenantID),
mint,
maxt,
)
if err != nil {
return nil, err
}
queriers[pos] = q
}
return &mergeQuerier{
ctx: ctx,
queriers: queriers,
tenantIDs: tenantIDs,
}, nil
}
// mergeQuerier aggregates the results from underlying queriers and adds a
// label tenantLabelName to identify the tenant ID that the metric resulted
// from.
// If the label tenantLabelName is already existing, its value is
// overwritten by the tenant ID and the previous value is exposed through a new
// label prefixed with "original_". This behaviour is not implemented recursively
type mergeQuerier struct {
ctx context.Context
queriers []storage.Querier
tenantIDs []string
}
// LabelValues returns all potential values for a label name.
// It is not safe to use the strings beyond the lifefime of the querier.
// For the label "tenantLabelName" it will return all the tenant IDs available.
// For the label "original_" + tenantLabelName it will return all the values
// of the underlying queriers for tenantLabelName.
func (m *mergeQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) {
if name == defaultTenantLabel {
return m.tenantIDs, nil, nil
}
// ensure the name of a retained tenant id label gets handled under the
// original label name
if name == originalDefaultTenantLabel {
name = defaultTenantLabel
}
return m.mergeDistinctStringSlice(func(ctx context.Context, q storage.Querier) ([]string, storage.Warnings, error) {
return q.LabelValues(name, matchers...)
})
}
// LabelNames returns all the unique label names present in the underlying
// queriers. It also adds the defaultTenantLabel and if present in the original
// results the originalDefaultTenantLabel
func (m *mergeQuerier) LabelNames() ([]string, storage.Warnings, error) {
labelNames, warnings, err := m.mergeDistinctStringSlice(func(ctx context.Context, q storage.Querier) ([]string, storage.Warnings, error) {
return q.LabelNames()
})
if err != nil {
return nil, nil, err
}
// check if the tenant label exists in the original result
var tenantLabelExists bool
labelPos := sort.SearchStrings(labelNames, defaultTenantLabel)
if labelPos < len(labelNames) && labelNames[labelPos] == defaultTenantLabel {
tenantLabelExists = true
}
labelToAdd := defaultTenantLabel
// if defaultTenantLabel already exists, we need to add the
// originalDefaultTenantLabel
if tenantLabelExists {
labelToAdd = originalDefaultTenantLabel
labelPos = sort.SearchStrings(labelNames, labelToAdd)
}
// insert label at the correct position
labelNames = append(labelNames, "")
copy(labelNames[labelPos+1:], labelNames[labelPos:])
labelNames[labelPos] = labelToAdd
return labelNames, warnings, nil
}
type stringSliceFunc func(context.Context, storage.Querier) ([]string, storage.Warnings, error)
type stringSliceFuncJob struct {
querier storage.Querier
tenantID string
result []string
warnings storage.Warnings
}
// mergeDistinctStringSlice is aggregating results from stringSliceFunc calls
// on per querier in parallel. It removes duplicates and sorts the result. It
// doesn't require the output of the stringSliceFunc to be sorted, as results
// of LabelValues are not sorted.
func (m *mergeQuerier) mergeDistinctStringSlice(f stringSliceFunc) ([]string, storage.Warnings, error) {
var jobs = make([]interface{}, len(m.tenantIDs))
for pos := range m.tenantIDs {
jobs[pos] = &stringSliceFuncJob{
querier: m.queriers[pos],
tenantID: m.tenantIDs[pos],
}
}
run := func(ctx context.Context, jobIntf interface{}) error {
job, ok := jobIntf.(*stringSliceFuncJob)
if !ok {
return fmt.Errorf("unexpected type %T", jobIntf)
}
var err error
job.result, job.warnings, err = f(ctx, job.querier)
if err != nil {
return errors.Wrapf(err, "error querying %s %s", rewriteLabelName(defaultTenantLabel), job.tenantID)
}
return nil
}
err := concurrency.ForEach(m.ctx, jobs, maxConcurrency, run)
if err != nil {
return nil, nil, err
}
// aggregate warnings and deduplicate string results
var warnings storage.Warnings
resultMap := make(map[string]struct{})
for _, jobIntf := range jobs {
job, ok := jobIntf.(*stringSliceFuncJob)
if !ok {
return nil, nil, fmt.Errorf("unexpected type %T", jobIntf)
}
for _, e := range job.result {
resultMap[e] = struct{}{}
}
for _, w := range job.warnings {
warnings = append(warnings, errors.Wrapf(w, "warning querying %s %s", rewriteLabelName(defaultTenantLabel), job.tenantID))
}
}
var result = make([]string, 0, len(resultMap))
for e := range resultMap {
result = append(result, e)
}
sort.Strings(result)
return result, warnings, nil
}
// Close releases the resources of the Querier.
func (m *mergeQuerier) Close() error {
errs := tsdb_errors.NewMulti()
for pos, tenantID := range m.tenantIDs |
return errs.Err()
}
type selectJob struct {
pos int
querier storage.Querier
tenantID string
}
// Select returns a set of series that matches the given label matchers. If the
// tenantLabelName is matched on it only considers those queriers matching. The
// forwarded labelSelector is not containing those that operate on
// tenantLabelName.
func (m *mergeQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
matchedTenants, filteredMatchers := filterValuesByMatchers(defaultTenantLabel, m.tenantIDs, matchers...)
var jobs = make([]interface{}, len(matchedTenants))
var seriesSets = make([]storage.SeriesSet, len(matchedTenants))
var jobPos int
for tenantPos := range m.tenantIDs {
if _, matched := matchedTenants[m.tenantIDs[tenantPos]]; !matched {
continue
}
jobs[jobPos] = &selectJob{
pos: jobPos,
querier: m.queriers[tenantPos],
tenantID: m.tenantIDs[tenantPos],
}
jobPos++
}
run := func(ctx context.Context, jobIntf interface{}) error {
job, ok := jobIntf.(*selectJob)
if !ok {
return fmt.Errorf("unexpected type %T", jobIntf)
}
seriesSets[job.pos] = &addLabelsSeriesSet{
upstream: job.querier.Select(sortSeries, hints, filteredMatchers...),
labels: labels.Labels{
{
Name: defaultTenantLabel,
Value: job.tenantID,
},
},
}
return nil
}
err := concurrency.ForEach(m.ctx, jobs, maxConcurrency, run)
if err != nil {
return storage.ErrSeriesSet(err)
}
return storage.NewMergeSeriesSet(seriesSets, storage.ChainedSeriesMerge)
}
// filterValuesByMatchers applies matchers to inputed labelName and
// labelValues. A map of matched values is returned and also all label matchers
// not matching the labelName.
// In case a label matcher is set on a label conflicting with tenantLabelName,
// we need to rename this labelMatcher's name to its original name. This is
// used to as part of Select in the mergeQueryable, to ensure only relevant
// queries are considered and the forwarded matchers do not contain matchers on
// the tenantLabelName.
func filterValuesByMatchers(labelName string, labelValues []string, matchers ...*labels.Matcher) (matchedValues map[string]struct{}, unrelatedMatchers []*labels.Matcher) {
// this contains the matchers which are not related to labelName
unrelatedMatchers = make([]*labels.Matcher, 0, len(matchers))
// build map of values to consider for the matchers
matchedValues = make(map[string]struct{}, len(labelValues))
for _, value := range labelValues {
matchedValues[value] = struct{}{}
}
for _, m := range matchers {
if m.Name != labelName {
// check if has the retained label name
if m.Name == originalDefaultTenantLabel {
// rewrite label to the original name, by copying matcher and
// replacing the label name
rewrittenM := *m
rewrittenM.Name = labelName
unrelatedMatchers = append(unrelatedMatchers, &rewrittenM)
} else {
unrelatedMatchers = append(unrelatedMatchers, m)
}
continue
}
for value := range matchedValues {
if !m.Matches(value) {
delete(matchedValues, value)
}
}
}
return matchedValues, unrelatedMatchers
}
type addLabelsSeriesSet struct {
upstream storage.SeriesSet
labels labels.Labels
}
func (m *addLabelsSeriesSet) Next() bool {
return m.upstream.Next()
}
// At returns full series. Returned series should be iteratable even after Next is called.
func (m *addLabelsSeriesSet) At() storage.Series {
return &addLabelsSeries{
upstream: m.upstream.At(),
labels: m.labels,
}
}
// The error that iteration as failed with.
// When an error occurs, set cannot continue to iterate.
func (m *addLabelsSeriesSet) Err() error {
return errors.Wrapf(m.upstream.Err(), "error querying %s", labelsToString(m.labels))
}
// A collection of warnings for the whole set.
// Warnings could be return even iteration has not failed with error.
func (m *addLabelsSeriesSet) Warnings() storage.Warnings {
upstream := m.upstream.Warnings()
warnings := make(storage.Warnings, len(upstream))
for pos := range upstream {
warnings[pos] = errors.Wrapf(upstream[pos], "warning querying %s", labelsToString(m.labels))
}
return warnings
}
// rewrite label name to be more readable in error output
func rewriteLabelName(s string) string {
return strings.TrimRight(strings.TrimLeft(s, "_"), "_")
}
// this outputs a more readable error format
func labelsToString(labels labels.Labels) string {
parts := make([]string, len(labels))
for pos, l := range labels {
parts[pos] = rewriteLabelName(l.Name) + " " + l.Value
}
return strings.Join(parts, ", ")
}
type addLabelsSeries struct {
upstream storage.Series
labels labels.Labels
}
// Labels returns the complete set of labels. For series it means all labels identifying the series.
func (a *addLabelsSeries) Labels() labels.Labels {
return setLabelsRetainExisting(a.upstream.Labels(), a.labels...)
}
// Iterator returns a new, independent iterator of the data of the series.
func (a *addLabelsSeries) Iterator() chunkenc.Iterator {
return a.upstream.Iterator()
}
// this sets a label and preserves an existing value a new label prefixed with
// original_. It doesn't do this recursively.
func setLabelsRetainExisting(src labels.Labels, additionalLabels ...labels.Label) labels.Labels {
lb := labels.NewBuilder(src)
for _, additionalL := range additionalLabels {
if oldValue := src.Get(additionalL.Name); oldValue != "" {
lb.Set(
retainExistingPrefix+additionalL.Name,
oldValue,
)
}
lb.Set(additionalL.Name, additionalL.Value)
}
return lb.Labels()
}
| {
errs.Add(errors.Wrapf(m.queriers[pos].Close(), "failed to close querier for %s %s", rewriteLabelName(defaultTenantLabel), tenantID))
} | conditional_block |
model.py | # Imports here
import torch
import numpy as np
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from collections import OrderedDict
from workspace_utils import active_session
from PIL import Image
import sys
import os
import json
# Define class for flower prediction model
class FlowerPredictionModel:
# Model class constructor
def __init__(self, gpu):
''' Initialise model object
'''
# Set device
if (gpu):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = "cpu"
# Function to build model
def | (self, arch, learning_rate, hidden_units):
''' Function to build model
'''
print('Building model...')
# Select & load pre-trained model
try:
arch = arch.lower()
self.model = models.__dict__[arch](pretrained=True)
self.arch = arch
except:
print("Model " + arch + " not recognised: please refer to documentation for valid model names in pytorch ie vgg16 https://pytorch.org/docs/stable/torchvision/models.html")
sys.exit()
self.hidden_units = hidden_units
self.learning_rate = learning_rate
# Freeze parameters of pre-trained model part by removing gradients
for param in self.model.parameters():
param.requires_grad = False
# Determine classifier input units for selected model
if hasattr(self.model, "classifier"):
try:
classifier_input_neurons = self.model.classifier[0].in_features
except TypeError:
classifier_input_neurons = self.model.classifier.in_features
elif hasattr(self.model, "fc"):
classifier_input_neurons = self.model.fc.in_features
else:
print("Unable to determine classifier input units number - unable to create model")
return
# Classifier architecture parameters
classifier_output_neurons = 102
classifier_dropout = 0.2
# Build new classifier for recognising flowers to work with model
self.model.classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(classifier_input_neurons, hidden_units)),
('relu', nn.ReLU()),
('dropout', nn.Dropout(classifier_dropout)),
('fc2', nn.Linear(hidden_units, classifier_output_neurons)),
('output', nn.LogSoftmax(dim=1))]))
# Define model loss function
self.criterion = nn.NLLLoss()
# Define training function: only train the classifier parameters, feature parameters are frozen
self.optimizer = optim.Adam(self.model.classifier.parameters(), lr=learning_rate)
# Move model to current device
self.model.to(self.device)
# Function to train model
def train(self, epochs, trainloader, validloader, class_to_idx):
''' Function to train model
'''
print('Training model...')
# Set variables
self.epochs = epochs
self.training_steps = 0
training_loss = 0
print_every = 20
self.model.class_to_idx = class_to_idx
# Train network
# Ensure notebook session stays active through long runs
with active_session():
# For each training pass of whole dataset/epoch
for epoch in range(epochs):
print(f"Epoch {epoch+1}")
print("-------")
# For each training batch/step of images & labels
for inputs, labels in trainloader:
# Increment training steps count
self.training_steps += 1
# Move data and label tensors to device
inputs, labels = inputs.to(self.device), labels.to(self.device)
# Clear gradients
self.optimizer.zero_grad()
# Do forward pass through network
logps = self.model(inputs)
# Calculate loss for whole network
loss = self.criterion(logps, labels)
# Calculate gradients for each element to be trained by network (weights & biases)
loss.backward()
# Do back-propogation step: apply negative gradients to weights & biases
self.optimizer.step()
# Accumulate training loss
training_loss += loss.item()
# Every 20 training steps, validation check & output stats
if self.training_steps % print_every == 0:
valid_loss = 0
accuracy = 0
# Switch to evaluation mode - dropout inactive
self.model.eval()
# Disable gradients - not needed for modal validation/prediction
with torch.no_grad():
# For each validation batch of images & labels
for inputs, labels in validloader:
# Move data and label tensors to device
inputs, labels = inputs.to(self.device), labels.to(self.device)
# Do forward pass through network
logps = self.model.forward(inputs)
# Calculate loss for network
batch_loss = self.criterion(logps, labels)
# Accumulate validation loss
valid_loss += batch_loss.item()
# Calculate stats
# Get actual probabilties output from network for this batch
ps = torch.exp(logps)
# Get top probability/prediction for each image in batch
top_p, top_class = ps.topk(1, dim=1)
# Check each prediction against label (accuracy)
equals = top_class == labels.view(*top_class.shape)
# Calculate mean accuracy for this batch
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
# Output stats for current training step
print(f"Training step {self.training_steps}")
print(f"Training loss: {training_loss/print_every:.3f} - "
f"Validation loss: {valid_loss/len(validloader):.3f} - "
f"Validation accuracy: {accuracy/len(validloader):.3f}")
# Validation end - reset training loss & set model back to training mode
training_loss = 0
self.model.train()
# Function to test model
def test(self, testloader):
''' Function to test model
'''
print('Testing model...')
accuracy = 0
# Switch to evaluation mode - dropout inactive
self.model.eval()
# Disable gradients - not needed for modal testing/prediction
with torch.no_grad():
# For each test batch of images & labels
for inputs, labels in testloader:
# Move data and label tensors to device
inputs, labels = inputs.to(self.device), labels.to(self.device)
# Do forward pass through network
logps = self.model.forward(inputs)
# Calculate stats
# Get actual probabilties output from network for this batch
ps = torch.exp(logps)
# Get top probability/prediction for each image in batch
top_p, top_class = ps.topk(1, dim=1)
# Check each prediction against label (accuracy)
equals = top_class == labels.view(*top_class.shape)
# Calculate mean accuracy for this batch
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
else:
# Output accuracy for entire test dataset
print(f"Test accuracy: {accuracy/len(testloader):.3f}")
# Function to save model
def save(self, save_dir):
''' Function to save model
'''
print('Saving model...')
# If save dir set
if (save_dir):
save_dir = save_dir + '/'
# If it does not exist
if (not os.path.isdir(save_dir)):
# Make dir
try:
os.mkdir(save_dir)
except OSError:
print ("Creation of the directory %s failed" % save_dir)
print ("Model was not saved")
sys.exit()
# Define checkpoint parameters
checkpoint = {'class_to_idx': self.model.class_to_idx,
'model_state_dict': self.model.state_dict(),
'arch': self.arch,
'learning_rate': self.learning_rate,
'hidden_units': self.hidden_units,
'epochs': self.epochs,
'training_steps': self.training_steps}
# Save it
torch.save(checkpoint, save_dir + 'checkpoint.pth')
# Function to save model
def load(self, save_dir):
''' Function to load model
'''
print('Loading model...')
# Load checkpoint
if torch.cuda.is_available():
checkpoint = torch.load(save_dir + 'checkpoint.pth')
else:
checkpoint = torch.load(save_dir + 'checkpoint.pth', map_location=lambda storage, loc: storage)
# Create model
self.build(checkpoint['arch'], checkpoint['learning_rate'], checkpoint['hidden_units'])
# Load classifier state values from checkpoint
self.model.load_state_dict(checkpoint['model_state_dict'])
self.model.class_to_idx = checkpoint['class_to_idx']
def predict(self, np_image, topk):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
print('Model predicting...')
# Convert image to tensor
image_tensor = torch.from_numpy(np_image)
# Add batch dimension to tensor
image_tensor = image_tensor.unsqueeze_(0)
# Convert to float tensor
image_tensor = image_tensor.float()
# Switch to evaluation mode - dropout inactive
self.model.eval()
# Disable gradients - not needed for model prediction
with torch.no_grad():
# Do forward pass through network
logps = self.model.forward(image_tensor)
# Get actual probabilties output from network for this image
ps = torch.exp(logps)
# Get topk probability/prediction for this image
top_p, top_class = ps.topk(topk, dim=1)
top_p = top_p.numpy()
top_class = top_class.numpy()
# Invert class map
idx_to_class = {j: i for i, j in self.model.class_to_idx.items()}
# Map indexes to get true class indexes
top_classes = [idx_to_class[index] for index in top_class[0]]
# Return probabilties and classes
return top_p[0], top_classes
def predict_image(self, image_path, np_image, top_k, category_names_json):
print('Testing model prediction...')
# Get image file parts
image_filename = image_path.split('/')[-2]
# Get prediction of image
probs, classes = self.predict(np_image, top_k)
print(" ")
# If category names set
if (category_names_json):
with open(category_names_json, 'r') as f:
cat_to_name = json.load(f)
classes = [cat_to_name[x] for x in classes]
print("Actual flower category: " + cat_to_name[image_filename])
print("Categories predicted")
print(classes)
print("Probabilities of categories predicted")
print(probs)
| build | identifier_name |
model.py | # Imports here
import torch
import numpy as np
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from collections import OrderedDict
from workspace_utils import active_session
from PIL import Image
import sys
import os
import json
# Define class for flower prediction model
class FlowerPredictionModel:
# Model class constructor
| def __init__(self, gpu):
''' Initialise model object
'''
# Set device
if (gpu):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = "cpu"
# Function to build model
def build(self, arch, learning_rate, hidden_units):
''' Function to build model
'''
print('Building model...')
# Select & load pre-trained model
try:
arch = arch.lower()
self.model = models.__dict__[arch](pretrained=True)
self.arch = arch
except:
print("Model " + arch + " not recognised: please refer to documentation for valid model names in pytorch ie vgg16 https://pytorch.org/docs/stable/torchvision/models.html")
sys.exit()
self.hidden_units = hidden_units
self.learning_rate = learning_rate
# Freeze parameters of pre-trained model part by removing gradients
for param in self.model.parameters():
param.requires_grad = False
# Determine classifier input units for selected model
if hasattr(self.model, "classifier"):
try:
classifier_input_neurons = self.model.classifier[0].in_features
except TypeError:
classifier_input_neurons = self.model.classifier.in_features
elif hasattr(self.model, "fc"):
classifier_input_neurons = self.model.fc.in_features
else:
print("Unable to determine classifier input units number - unable to create model")
return
# Classifier architecture parameters
classifier_output_neurons = 102
classifier_dropout = 0.2
# Build new classifier for recognising flowers to work with model
self.model.classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(classifier_input_neurons, hidden_units)),
('relu', nn.ReLU()),
('dropout', nn.Dropout(classifier_dropout)),
('fc2', nn.Linear(hidden_units, classifier_output_neurons)),
('output', nn.LogSoftmax(dim=1))]))
# Define model loss function
self.criterion = nn.NLLLoss()
# Define training function: only train the classifier parameters, feature parameters are frozen
self.optimizer = optim.Adam(self.model.classifier.parameters(), lr=learning_rate)
# Move model to current device
self.model.to(self.device)
# Function to train model
def train(self, epochs, trainloader, validloader, class_to_idx):
''' Function to train model
'''
print('Training model...')
# Set variables
self.epochs = epochs
self.training_steps = 0
training_loss = 0
print_every = 20
self.model.class_to_idx = class_to_idx
# Train network
# Ensure notebook session stays active through long runs
with active_session():
# For each training pass of whole dataset/epoch
for epoch in range(epochs):
print(f"Epoch {epoch+1}")
print("-------")
# For each training batch/step of images & labels
for inputs, labels in trainloader:
# Increment training steps count
self.training_steps += 1
# Move data and label tensors to device
inputs, labels = inputs.to(self.device), labels.to(self.device)
# Clear gradients
self.optimizer.zero_grad()
# Do forward pass through network
logps = self.model(inputs)
# Calculate loss for whole network
loss = self.criterion(logps, labels)
# Calculate gradients for each element to be trained by network (weights & biases)
loss.backward()
# Do back-propogation step: apply negative gradients to weights & biases
self.optimizer.step()
# Accumulate training loss
training_loss += loss.item()
# Every 20 training steps, validation check & output stats
if self.training_steps % print_every == 0:
valid_loss = 0
accuracy = 0
# Switch to evaluation mode - dropout inactive
self.model.eval()
# Disable gradients - not needed for modal validation/prediction
with torch.no_grad():
# For each validation batch of images & labels
for inputs, labels in validloader:
# Move data and label tensors to device
inputs, labels = inputs.to(self.device), labels.to(self.device)
# Do forward pass through network
logps = self.model.forward(inputs)
# Calculate loss for network
batch_loss = self.criterion(logps, labels)
# Accumulate validation loss
valid_loss += batch_loss.item()
# Calculate stats
# Get actual probabilties output from network for this batch
ps = torch.exp(logps)
# Get top probability/prediction for each image in batch
top_p, top_class = ps.topk(1, dim=1)
# Check each prediction against label (accuracy)
equals = top_class == labels.view(*top_class.shape)
# Calculate mean accuracy for this batch
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
# Output stats for current training step
print(f"Training step {self.training_steps}")
print(f"Training loss: {training_loss/print_every:.3f} - "
f"Validation loss: {valid_loss/len(validloader):.3f} - "
f"Validation accuracy: {accuracy/len(validloader):.3f}")
# Validation end - reset training loss & set model back to training mode
training_loss = 0
self.model.train()
# Function to test model
def test(self, testloader):
''' Function to test model
'''
print('Testing model...')
accuracy = 0
# Switch to evaluation mode - dropout inactive
self.model.eval()
# Disable gradients - not needed for modal testing/prediction
with torch.no_grad():
# For each test batch of images & labels
for inputs, labels in testloader:
# Move data and label tensors to device
inputs, labels = inputs.to(self.device), labels.to(self.device)
# Do forward pass through network
logps = self.model.forward(inputs)
# Calculate stats
# Get actual probabilties output from network for this batch
ps = torch.exp(logps)
# Get top probability/prediction for each image in batch
top_p, top_class = ps.topk(1, dim=1)
# Check each prediction against label (accuracy)
equals = top_class == labels.view(*top_class.shape)
# Calculate mean accuracy for this batch
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
else:
# Output accuracy for entire test dataset
print(f"Test accuracy: {accuracy/len(testloader):.3f}")
# Function to save model
def save(self, save_dir):
''' Function to save model
'''
print('Saving model...')
# If save dir set
if (save_dir):
save_dir = save_dir + '/'
# If it does not exist
if (not os.path.isdir(save_dir)):
# Make dir
try:
os.mkdir(save_dir)
except OSError:
print ("Creation of the directory %s failed" % save_dir)
print ("Model was not saved")
sys.exit()
# Define checkpoint parameters
checkpoint = {'class_to_idx': self.model.class_to_idx,
'model_state_dict': self.model.state_dict(),
'arch': self.arch,
'learning_rate': self.learning_rate,
'hidden_units': self.hidden_units,
'epochs': self.epochs,
'training_steps': self.training_steps}
# Save it
torch.save(checkpoint, save_dir + 'checkpoint.pth')
# Function to save model
def load(self, save_dir):
''' Function to load model
'''
print('Loading model...')
# Load checkpoint
if torch.cuda.is_available():
checkpoint = torch.load(save_dir + 'checkpoint.pth')
else:
checkpoint = torch.load(save_dir + 'checkpoint.pth', map_location=lambda storage, loc: storage)
# Create model
self.build(checkpoint['arch'], checkpoint['learning_rate'], checkpoint['hidden_units'])
# Load classifier state values from checkpoint
self.model.load_state_dict(checkpoint['model_state_dict'])
self.model.class_to_idx = checkpoint['class_to_idx']
def predict(self, np_image, topk):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
print('Model predicting...')
# Convert image to tensor
image_tensor = torch.from_numpy(np_image)
# Add batch dimension to tensor
image_tensor = image_tensor.unsqueeze_(0)
# Convert to float tensor
image_tensor = image_tensor.float()
# Switch to evaluation mode - dropout inactive
self.model.eval()
# Disable gradients - not needed for model prediction
with torch.no_grad():
# Do forward pass through network
logps = self.model.forward(image_tensor)
# Get actual probabilties output from network for this image
ps = torch.exp(logps)
# Get topk probability/prediction for this image
top_p, top_class = ps.topk(topk, dim=1)
top_p = top_p.numpy()
top_class = top_class.numpy()
# Invert class map
idx_to_class = {j: i for i, j in self.model.class_to_idx.items()}
# Map indexes to get true class indexes
top_classes = [idx_to_class[index] for index in top_class[0]]
# Return probabilties and classes
return top_p[0], top_classes
def predict_image(self, image_path, np_image, top_k, category_names_json):
print('Testing model prediction...')
# Get image file parts
image_filename = image_path.split('/')[-2]
# Get prediction of image
probs, classes = self.predict(np_image, top_k)
print(" ")
# If category names set
if (category_names_json):
with open(category_names_json, 'r') as f:
cat_to_name = json.load(f)
classes = [cat_to_name[x] for x in classes]
print("Actual flower category: " + cat_to_name[image_filename])
print("Categories predicted")
print(classes)
print("Probabilities of categories predicted")
print(probs) | identifier_body | |
model.py | # Imports here
import torch
import numpy as np
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from collections import OrderedDict
from workspace_utils import active_session
from PIL import Image
import sys
import os
import json
# Define class for flower prediction model
class FlowerPredictionModel:
# Model class constructor
def __init__(self, gpu):
''' Initialise model object
'''
# Set device
if (gpu):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = "cpu"
# Function to build model
def build(self, arch, learning_rate, hidden_units):
''' Function to build model
'''
print('Building model...')
# Select & load pre-trained model
try:
arch = arch.lower()
self.model = models.__dict__[arch](pretrained=True)
self.arch = arch
except:
print("Model " + arch + " not recognised: please refer to documentation for valid model names in pytorch ie vgg16 https://pytorch.org/docs/stable/torchvision/models.html")
sys.exit()
self.hidden_units = hidden_units
self.learning_rate = learning_rate
# Freeze parameters of pre-trained model part by removing gradients
for param in self.model.parameters():
param.requires_grad = False
# Determine classifier input units for selected model
if hasattr(self.model, "classifier"):
try:
classifier_input_neurons = self.model.classifier[0].in_features
except TypeError:
classifier_input_neurons = self.model.classifier.in_features
elif hasattr(self.model, "fc"):
classifier_input_neurons = self.model.fc.in_features
else:
print("Unable to determine classifier input units number - unable to create model")
return
# Classifier architecture parameters
classifier_output_neurons = 102
classifier_dropout = 0.2
# Build new classifier for recognising flowers to work with model
self.model.classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(classifier_input_neurons, hidden_units)),
('relu', nn.ReLU()),
('dropout', nn.Dropout(classifier_dropout)),
('fc2', nn.Linear(hidden_units, classifier_output_neurons)),
('output', nn.LogSoftmax(dim=1))]))
# Define model loss function
self.criterion = nn.NLLLoss()
# Define training function: only train the classifier parameters, feature parameters are frozen
self.optimizer = optim.Adam(self.model.classifier.parameters(), lr=learning_rate)
# Move model to current device
self.model.to(self.device)
# Function to train model
def train(self, epochs, trainloader, validloader, class_to_idx):
''' Function to train model
'''
print('Training model...')
# Set variables
self.epochs = epochs
self.training_steps = 0
training_loss = 0
print_every = 20
self.model.class_to_idx = class_to_idx
# Train network
# Ensure notebook session stays active through long runs
with active_session():
# For each training pass of whole dataset/epoch
for epoch in range(epochs):
print(f"Epoch {epoch+1}")
print("-------")
# For each training batch/step of images & labels
for inputs, labels in trainloader:
# Increment training steps count
self.training_steps += 1
# Move data and label tensors to device
inputs, labels = inputs.to(self.device), labels.to(self.device)
# Clear gradients
self.optimizer.zero_grad()
# Do forward pass through network
logps = self.model(inputs)
# Calculate loss for whole network
loss = self.criterion(logps, labels)
# Calculate gradients for each element to be trained by network (weights & biases)
loss.backward()
# Do back-propogation step: apply negative gradients to weights & biases
self.optimizer.step()
# Accumulate training loss
training_loss += loss.item()
# Every 20 training steps, validation check & output stats
if self.training_steps % print_every == 0:
valid_loss = 0
accuracy = 0
# Switch to evaluation mode - dropout inactive
self.model.eval()
# Disable gradients - not needed for modal validation/prediction
with torch.no_grad():
# For each validation batch of images & labels
for inputs, labels in validloader:
# Move data and label tensors to device
inputs, labels = inputs.to(self.device), labels.to(self.device)
# Do forward pass through network
logps = self.model.forward(inputs)
# Calculate loss for network
batch_loss = self.criterion(logps, labels)
# Accumulate validation loss
valid_loss += batch_loss.item()
# Calculate stats
# Get actual probabilties output from network for this batch
ps = torch.exp(logps)
# Get top probability/prediction for each image in batch
top_p, top_class = ps.topk(1, dim=1)
# Check each prediction against label (accuracy)
equals = top_class == labels.view(*top_class.shape)
# Calculate mean accuracy for this batch
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
# Output stats for current training step
print(f"Training step {self.training_steps}")
print(f"Training loss: {training_loss/print_every:.3f} - "
f"Validation loss: {valid_loss/len(validloader):.3f} - "
f"Validation accuracy: {accuracy/len(validloader):.3f}")
# Validation end - reset training loss & set model back to training mode
training_loss = 0
self.model.train()
# Function to test model
def test(self, testloader):
''' Function to test model
'''
print('Testing model...')
accuracy = 0
# Switch to evaluation mode - dropout inactive
self.model.eval()
# Disable gradients - not needed for modal testing/prediction
with torch.no_grad():
# For each test batch of images & labels
for inputs, labels in testloader:
# Move data and label tensors to device
inputs, labels = inputs.to(self.device), labels.to(self.device)
# Do forward pass through network
logps = self.model.forward(inputs)
# Calculate stats
# Get actual probabilties output from network for this batch
ps = torch.exp(logps)
# Get top probability/prediction for each image in batch
top_p, top_class = ps.topk(1, dim=1)
# Check each prediction against label (accuracy)
equals = top_class == labels.view(*top_class.shape)
# Calculate mean accuracy for this batch
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
else:
# Output accuracy for entire test dataset
print(f"Test accuracy: {accuracy/len(testloader):.3f}")
# Function to save model
def save(self, save_dir):
''' Function to save model
'''
print('Saving model...')
# If save dir set
if (save_dir):
save_dir = save_dir + '/'
# If it does not exist
if (not os.path.isdir(save_dir)):
# Make dir
try:
os.mkdir(save_dir)
except OSError:
print ("Creation of the directory %s failed" % save_dir)
print ("Model was not saved")
sys.exit()
# Define checkpoint parameters
checkpoint = {'class_to_idx': self.model.class_to_idx,
'model_state_dict': self.model.state_dict(),
'arch': self.arch,
'learning_rate': self.learning_rate,
'hidden_units': self.hidden_units,
'epochs': self.epochs,
'training_steps': self.training_steps}
# Save it
torch.save(checkpoint, save_dir + 'checkpoint.pth')
# Function to save model
def load(self, save_dir):
''' Function to load model
'''
print('Loading model...')
# Load checkpoint
if torch.cuda.is_available():
checkpoint = torch.load(save_dir + 'checkpoint.pth')
else:
checkpoint = torch.load(save_dir + 'checkpoint.pth', map_location=lambda storage, loc: storage)
# Create model
self.build(checkpoint['arch'], checkpoint['learning_rate'], checkpoint['hidden_units'])
# Load classifier state values from checkpoint
self.model.load_state_dict(checkpoint['model_state_dict'])
self.model.class_to_idx = checkpoint['class_to_idx']
def predict(self, np_image, topk):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
print('Model predicting...')
# Convert image to tensor
image_tensor = torch.from_numpy(np_image)
# Add batch dimension to tensor
image_tensor = image_tensor.unsqueeze_(0) |
# Switch to evaluation mode - dropout inactive
self.model.eval()
# Disable gradients - not needed for model prediction
with torch.no_grad():
# Do forward pass through network
logps = self.model.forward(image_tensor)
# Get actual probabilties output from network for this image
ps = torch.exp(logps)
# Get topk probability/prediction for this image
top_p, top_class = ps.topk(topk, dim=1)
top_p = top_p.numpy()
top_class = top_class.numpy()
# Invert class map
idx_to_class = {j: i for i, j in self.model.class_to_idx.items()}
# Map indexes to get true class indexes
top_classes = [idx_to_class[index] for index in top_class[0]]
# Return probabilties and classes
return top_p[0], top_classes
def predict_image(self, image_path, np_image, top_k, category_names_json):
print('Testing model prediction...')
# Get image file parts
image_filename = image_path.split('/')[-2]
# Get prediction of image
probs, classes = self.predict(np_image, top_k)
print(" ")
# If category names set
if (category_names_json):
with open(category_names_json, 'r') as f:
cat_to_name = json.load(f)
classes = [cat_to_name[x] for x in classes]
print("Actual flower category: " + cat_to_name[image_filename])
print("Categories predicted")
print(classes)
print("Probabilities of categories predicted")
print(probs) | # Convert to float tensor
image_tensor = image_tensor.float() | random_line_split |
model.py | # Imports here
import torch
import numpy as np
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from collections import OrderedDict
from workspace_utils import active_session
from PIL import Image
import sys
import os
import json
# Define class for flower prediction model
class FlowerPredictionModel:
# Model class constructor
def __init__(self, gpu):
''' Initialise model object
'''
# Set device
if (gpu):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = "cpu"
# Function to build model
def build(self, arch, learning_rate, hidden_units):
''' Function to build model
'''
print('Building model...')
# Select & load pre-trained model
try:
arch = arch.lower()
self.model = models.__dict__[arch](pretrained=True)
self.arch = arch
except:
print("Model " + arch + " not recognised: please refer to documentation for valid model names in pytorch ie vgg16 https://pytorch.org/docs/stable/torchvision/models.html")
sys.exit()
self.hidden_units = hidden_units
self.learning_rate = learning_rate
# Freeze parameters of pre-trained model part by removing gradients
for param in self.model.parameters():
param.requires_grad = False
# Determine classifier input units for selected model
if hasattr(self.model, "classifier"):
try:
classifier_input_neurons = self.model.classifier[0].in_features
except TypeError:
classifier_input_neurons = self.model.classifier.in_features
elif hasattr(self.model, "fc"):
classifier_input_neurons = self.model.fc.in_features
else:
|
# Classifier architecture parameters
classifier_output_neurons = 102
classifier_dropout = 0.2
# Build new classifier for recognising flowers to work with model
self.model.classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(classifier_input_neurons, hidden_units)),
('relu', nn.ReLU()),
('dropout', nn.Dropout(classifier_dropout)),
('fc2', nn.Linear(hidden_units, classifier_output_neurons)),
('output', nn.LogSoftmax(dim=1))]))
# Define model loss function
self.criterion = nn.NLLLoss()
# Define training function: only train the classifier parameters, feature parameters are frozen
self.optimizer = optim.Adam(self.model.classifier.parameters(), lr=learning_rate)
# Move model to current device
self.model.to(self.device)
# Function to train model
def train(self, epochs, trainloader, validloader, class_to_idx):
''' Function to train model
'''
print('Training model...')
# Set variables
self.epochs = epochs
self.training_steps = 0
training_loss = 0
print_every = 20
self.model.class_to_idx = class_to_idx
# Train network
# Ensure notebook session stays active through long runs
with active_session():
# For each training pass of whole dataset/epoch
for epoch in range(epochs):
print(f"Epoch {epoch+1}")
print("-------")
# For each training batch/step of images & labels
for inputs, labels in trainloader:
# Increment training steps count
self.training_steps += 1
# Move data and label tensors to device
inputs, labels = inputs.to(self.device), labels.to(self.device)
# Clear gradients
self.optimizer.zero_grad()
# Do forward pass through network
logps = self.model(inputs)
# Calculate loss for whole network
loss = self.criterion(logps, labels)
# Calculate gradients for each element to be trained by network (weights & biases)
loss.backward()
# Do back-propogation step: apply negative gradients to weights & biases
self.optimizer.step()
# Accumulate training loss
training_loss += loss.item()
# Every 20 training steps, validation check & output stats
if self.training_steps % print_every == 0:
valid_loss = 0
accuracy = 0
# Switch to evaluation mode - dropout inactive
self.model.eval()
# Disable gradients - not needed for modal validation/prediction
with torch.no_grad():
# For each validation batch of images & labels
for inputs, labels in validloader:
# Move data and label tensors to device
inputs, labels = inputs.to(self.device), labels.to(self.device)
# Do forward pass through network
logps = self.model.forward(inputs)
# Calculate loss for network
batch_loss = self.criterion(logps, labels)
# Accumulate validation loss
valid_loss += batch_loss.item()
# Calculate stats
# Get actual probabilties output from network for this batch
ps = torch.exp(logps)
# Get top probability/prediction for each image in batch
top_p, top_class = ps.topk(1, dim=1)
# Check each prediction against label (accuracy)
equals = top_class == labels.view(*top_class.shape)
# Calculate mean accuracy for this batch
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
# Output stats for current training step
print(f"Training step {self.training_steps}")
print(f"Training loss: {training_loss/print_every:.3f} - "
f"Validation loss: {valid_loss/len(validloader):.3f} - "
f"Validation accuracy: {accuracy/len(validloader):.3f}")
# Validation end - reset training loss & set model back to training mode
training_loss = 0
self.model.train()
# Function to test model
def test(self, testloader):
''' Function to test model
'''
print('Testing model...')
accuracy = 0
# Switch to evaluation mode - dropout inactive
self.model.eval()
# Disable gradients - not needed for modal testing/prediction
with torch.no_grad():
# For each test batch of images & labels
for inputs, labels in testloader:
# Move data and label tensors to device
inputs, labels = inputs.to(self.device), labels.to(self.device)
# Do forward pass through network
logps = self.model.forward(inputs)
# Calculate stats
# Get actual probabilties output from network for this batch
ps = torch.exp(logps)
# Get top probability/prediction for each image in batch
top_p, top_class = ps.topk(1, dim=1)
# Check each prediction against label (accuracy)
equals = top_class == labels.view(*top_class.shape)
# Calculate mean accuracy for this batch
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
else:
# Output accuracy for entire test dataset
print(f"Test accuracy: {accuracy/len(testloader):.3f}")
# Function to save model
def save(self, save_dir):
''' Function to save model
'''
print('Saving model...')
# If save dir set
if (save_dir):
save_dir = save_dir + '/'
# If it does not exist
if (not os.path.isdir(save_dir)):
# Make dir
try:
os.mkdir(save_dir)
except OSError:
print ("Creation of the directory %s failed" % save_dir)
print ("Model was not saved")
sys.exit()
# Define checkpoint parameters
checkpoint = {'class_to_idx': self.model.class_to_idx,
'model_state_dict': self.model.state_dict(),
'arch': self.arch,
'learning_rate': self.learning_rate,
'hidden_units': self.hidden_units,
'epochs': self.epochs,
'training_steps': self.training_steps}
# Save it
torch.save(checkpoint, save_dir + 'checkpoint.pth')
# Function to save model
def load(self, save_dir):
''' Function to load model
'''
print('Loading model...')
# Load checkpoint
if torch.cuda.is_available():
checkpoint = torch.load(save_dir + 'checkpoint.pth')
else:
checkpoint = torch.load(save_dir + 'checkpoint.pth', map_location=lambda storage, loc: storage)
# Create model
self.build(checkpoint['arch'], checkpoint['learning_rate'], checkpoint['hidden_units'])
# Load classifier state values from checkpoint
self.model.load_state_dict(checkpoint['model_state_dict'])
self.model.class_to_idx = checkpoint['class_to_idx']
def predict(self, np_image, topk):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
print('Model predicting...')
# Convert image to tensor
image_tensor = torch.from_numpy(np_image)
# Add batch dimension to tensor
image_tensor = image_tensor.unsqueeze_(0)
# Convert to float tensor
image_tensor = image_tensor.float()
# Switch to evaluation mode - dropout inactive
self.model.eval()
# Disable gradients - not needed for model prediction
with torch.no_grad():
# Do forward pass through network
logps = self.model.forward(image_tensor)
# Get actual probabilties output from network for this image
ps = torch.exp(logps)
# Get topk probability/prediction for this image
top_p, top_class = ps.topk(topk, dim=1)
top_p = top_p.numpy()
top_class = top_class.numpy()
# Invert class map
idx_to_class = {j: i for i, j in self.model.class_to_idx.items()}
# Map indexes to get true class indexes
top_classes = [idx_to_class[index] for index in top_class[0]]
# Return probabilties and classes
return top_p[0], top_classes
def predict_image(self, image_path, np_image, top_k, category_names_json):
print('Testing model prediction...')
# Get image file parts
image_filename = image_path.split('/')[-2]
# Get prediction of image
probs, classes = self.predict(np_image, top_k)
print(" ")
# If category names set
if (category_names_json):
with open(category_names_json, 'r') as f:
cat_to_name = json.load(f)
classes = [cat_to_name[x] for x in classes]
print("Actual flower category: " + cat_to_name[image_filename])
print("Categories predicted")
print(classes)
print("Probabilities of categories predicted")
print(probs)
| print("Unable to determine classifier input units number - unable to create model")
return | conditional_block |
longfruit.py | #!/usr/bin/env python3
from random import choice, choices, expovariate, randint, randrange, shuffle, uniform
import configparser
import os
import re
import string
import subprocess
import sys
def georand(lmb):
# roughly geometrically distributed
return int(expovariate(lmb))
def random_id():
return ''.join(choices(
string.ascii_uppercase +
string.ascii_lowercase +
string.digits, k=8))
def filter_asm(asm):
for line in asm.splitlines():
# remove comments
m = re.match(r'^([^#]*)#(.*)$', line)
if m:
line = m.group(1)
line = line.strip()
# skip empty lines
if len(line) == 0:
continue
# skip directives
if line.startswith('.'):
continue
# skip labels
if line.endswith(':'):
continue
yield line
def instr_cost(line):
parts = re.split('[ \t,]', line)
instr = parts[0]
cost_load_store = 2
cost_fpu_load_store = cost_load_store
cost_alu = 1
cost_fpu = 1
cost_branch = 3
cost_mul = cost_branch
cost_div = 8 # TODO: clang generates long instr. sequences instead of div
cost_call = 20 # TODO: builtins
cost_ret = 2
cost_ebreak = 0 # should be unreachable if no undef. behavior is generated
if instr == 'li':
imm = int(parts[-1])
if imm >= -2048 and imm < 2048: # addi
return cost_alu
elif (imm & 0xFFF) == 0: # lui
return cost_alu
else: # lui + addi
return cost_alu * 2
return {
'add': cost_alu,
'addi': cost_alu,
'addiw': cost_alu,
'addw': cost_alu,
'and': cost_alu,
'andi': cost_alu,
'beq': cost_branch,
'beqz': cost_branch,
'bge': cost_branch,
'bgeu': cost_branch,
'bgez': cost_branch,
'bgt': cost_branch,
'bgtu': cost_branch,
'bgtz': cost_branch,
'ble': cost_branch,
'bleu': cost_branch,
'blez': cost_branch,
'blt': cost_branch,
'bltu': cost_branch,
'bltz': cost_branch,
'bne': cost_branch,
'bnez': cost_branch,
'call': cost_call,
'div': cost_div,
'divu': cost_div,
'divuw': cost_div,
'divw': cost_div,
'ebreak': cost_ebreak,
'fadd.d': cost_fpu,
'fadd.s': cost_fpu,
'fcvt.d.l': cost_fpu,
'fcvt.d.lu': cost_fpu,
'fcvt.d.s': cost_fpu,
'fcvt.d.w': cost_fpu,
'fcvt.d.wu': cost_fpu,
'fcvt.l.d': cost_fpu,
'fcvt.l.s': cost_fpu,
'fcvt.lu.d': cost_fpu,
'fcvt.lu.s': cost_fpu,
'fcvt.s.d': cost_fpu,
'fcvt.s.l': cost_fpu,
'fcvt.s.lu': cost_fpu,
'fcvt.s.w': cost_fpu,
'fcvt.s.wu': cost_fpu,
'fcvt.w.d': cost_fpu,
'fcvt.w.s': cost_fpu,
'fcvt.wu.d': cost_fpu,
'fcvt.wu.s': cost_fpu,
'fdiv.d': cost_fpu,
'fdiv.s': cost_fpu,
'feq.d': cost_fpu,
'feq.s': cost_fpu,
'fge.d': cost_fpu,
'fge.s': cost_fpu,
'fgt.d': cost_fpu,
'fgt.s': cost_fpu,
'fld': cost_fpu_load_store,
'fle.d': cost_fpu,
'fle.s': cost_fpu,
'flt.d': cost_fpu,
'flt.s': cost_fpu,
'flw': cost_fpu_load_store,
'fmadd.d': cost_fpu,
'fmadd.s': cost_fpu,
'fmul.d': cost_fpu,
'fmul.s': cost_fpu,
'fmv.d': cost_fpu,
'fmv.d.x': cost_fpu,
'fmv.s': cost_fpu,
'fmv.s.x': cost_fpu,
'fmv.w.x': cost_fpu,
'fmv.x.s': cost_fpu,
'fneg.d': cost_fpu,
'fneg.s': cost_fpu,
#'fnmsub.s': cost_fpu, # not found yet
'fnmsub.d': cost_fpu,
'fsd': cost_fpu_load_store,
'fsub.d': cost_fpu,
'fsub.s': cost_fpu,
'fsw': cost_fpu_load_store,
'j': cost_branch,
'jr': cost_branch,
'lb': cost_load_store,
'lbu': cost_load_store,
'ld': cost_load_store,
'lh': cost_load_store,
'lhu': cost_load_store,
'lui': cost_alu,
'lw': cost_load_store,
'lwu': cost_load_store,
'mul': cost_mul,
'mulh': cost_mul,
'mulhu': cost_mul,
'mulw': cost_mul,
'mv': cost_alu,
'neg': cost_alu,
'negw': cost_alu,
'nop': cost_alu,
'not': cost_alu,
'or': cost_alu,
'ori': cost_alu,
'rem': cost_div,
'remu': cost_div,
'remuw': cost_div,
'remw': cost_div,
'ret': cost_ret,
'sb': cost_load_store,
'sd': cost_load_store,
'seqz': cost_alu,
'sext': cost_alu,
'sext.w': cost_alu,
'sgt': cost_alu,
'sgtu': cost_alu,
'sgtz': cost_alu,
'sh': cost_load_store,
'sll': cost_alu,
'slli': cost_alu,
'slliw': cost_alu,
'sllw': cost_alu,
'slt': cost_alu,
'slti': cost_alu,
'sltiu': cost_alu,
'sltu': cost_alu,
'snez': cost_alu,
'sra': cost_alu,
'srai': cost_alu,
'sraiw': cost_alu,
'sraw': cost_alu,
'srl': cost_alu,
'srli': cost_alu,
'srliw': cost_alu,
'srlw': cost_alu,
'sub': cost_alu,
'subw': cost_alu,
'sw': cost_load_store,
'xor': cost_alu,
'xori': cost_alu,
}[instr]
def compile(compiler, arch, abi, filename):
if compiler.endswith('gcc'):
prog = 'riscv64-unknown-linux-gnu-gcc'
opts = []
elif compiler.endswith('clang'):
prog = 'clang'
opts = [
'-Wno-literal-conversion',
'-Wno-implicit-int-float-conversion'
]
if arch == 'rv64gc':
opts.append('--target=riscv64')
elif arch == 'rv32gc':
opts.append('--target=riscv32')
else:
assert False, 'unsupported arch'
else:
assert False, 'unsupported compiler'
opts = opts + [
'-Werror=implicit-int',
'-Wno-tautological-compare',
'-Wno-overflow',
'-Wno-constant-conversion',
'-Wno-unsequenced',
f'-march={arch}',
f'-mabi={abi}',
'-O2', '-S', '-o', '-', filename
]
r = subprocess.check_output([prog] + opts).decode('utf-8')
return r
def get_cost(asm):
cost_total = 0
for line in filter_asm(asm):
cost = instr_cost(line)
cost_total = cost_total + cost
return cost_total
class Context:
def __init__(self):
self.var_counter = 0
self.vars = []
def gen_var(self, loop_counter = False):
if not loop_counter:
v = f'v{self.var_counter}'
else:
v = f'i{self.var_counter}'
self.var_counter = self.var_counter + 1
self.vars.append(v)
return v
def gen_vars(self, num):
return [self.gen_var() for i in range(randint(1, num))]
def rand_var(self):
return choice(self.vars)
def copy(self):
ctx = Context()
ctx.var_counter = self.var_counter
ctx.vars = self.vars.copy()
return ctx
def gen_type():
return choice([
'char', 'short', 'int', 'long', 'long long',
'float', 'double'
])
def gen_type_integer():
signed = choice(['signed', 'unsigned'])
ty = choice(['char', 'short', 'int', 'long', 'long long'])
return f'{signed} {ty}'
def gen_cast_integer():
return f'({gen_type_integer()})'
def gen_expr_literal_int_zero():
return 0
def gen_expr_literal_int_12_bit():
return randrange(-2048, 2048)
def gen_expr_literal_int_20_bit_up():
return randrange(0, 2**20) << 12
def gen_expr_literal_int_32_bit():
return randrange(-2**31, 2**31)
def gen_expr_literal_int_64_bit():
return randrange(-2**63, 2**63)
def gen_expr_literal_float():
return uniform(-1_000_000, 1_000_000)
def gen_expr_literal(ctx = None):
v = choice([
gen_expr_literal_int_zero,
gen_expr_literal_int_12_bit,
gen_expr_literal_int_20_bit_up,
gen_expr_literal_int_32_bit,
gen_expr_literal_int_64_bit,
gen_expr_literal_float,
])()
return v
def gen_expr_var(ctx):
return ctx.rand_var()
def gen_expr_unary(ctx):
a = ctx.rand_var()
op = choice(['-', '~', '!', '++', '--'])
cast = ''
if op == '~':
# must be applied to an integer operand
cast = gen_cast_integer()
return f'{op}{cast}{a}'
def gen_expr_binary(ctx):
a = ctx.rand_var()
b = ctx.rand_var()
ops = [
'^', '&', '|', '<<', '>>',
'+', '-',
'*', '/', '%',
'==', '!=',
'<', '<=', '>', '>=',
'&&', '||'
]
op = choice(ops)
cast1 = ''
cast2 = ''
if op in ['^', '&', '|', '%', '<<', '>>']:
# must be applied to integer operands
cast1 = gen_cast_integer()
cast2 = gen_cast_integer()
return f'{cast1}{a} {op} {cast2}{b}'
def gen_expr_ternary(ctx):
a = ctx.rand_var()
b = ctx.rand_var()
c = ctx.rand_var()
return f'{a} ? {b} : {c}'
def gen_expr(ctx):
return choice([
gen_expr_var,
gen_expr_literal,
gen_expr_unary,
gen_expr_binary,
gen_expr_ternary,
])(ctx)
def gen_stmt_decl(ctx):
t = gen_type()
e = gen_expr(ctx)
v = ctx.gen_var()
s = f'{t} {v} = {e};'
return s
def gen_stmt_assign(ctx):
# avoid assigning to loop counters
while True:
v = ctx.rand_var()
if v[0] != 'i':
break
e = gen_expr(ctx)
return f'{v} = {e};'
def gen_stmt_loop(ctx):
loop_ctx = ctx.copy()
t = gen_type_integer()
i = loop_ctx.gen_var(loop_counter = True)
end = randrange(1, 127)
return (
f'for({t} {i} = 0; {i} < {end}; ++{i}) {{\n'
f'{gen_block(loop_ctx)}'
f'}}'
)
def gen_stmt(ctx):
stmt = choice([
gen_stmt_decl,
gen_stmt_assign,
gen_stmt_loop,
])(ctx)
return f'{stmt}\n'
def gen_block(ctx):
block = ''
for i in range(georand(0.5)):
block = block + gen_stmt(ctx)
return block
def gen_func_args(ctx):
n = georand(0.2) + 1
args = [f'{gen_type()} {v}' for v in ctx.gen_vars(n)]
return ', '.join(args)
def gen_func(ctx):
return (
f'{gen_type()} test({gen_func_args(ctx)}) {{\n'
f'{gen_block(ctx)}'
f'return {ctx.rand_var()};\n'
f'}}'
)
def gen_global(ctx):
g = ctx.gen_var()
return f'{gen_type()} {g} = {gen_expr_literal()};'
def gen_globals(ctx):
globals = ''
for i in range(georand(1.0)):
g = gen_global(ctx)
globals = f'{globals}{g}\n'
return globals
def gen_unit(ctx):
# for now, one function with some parameter and access to some globals
unit = gen_globals(ctx)
unit = f'{unit}{gen_func(ctx)}\n'
return unit
def gen_test(filename):
with open(filename, 'w') as f:
ctx = Context()
print(gen_unit(ctx), file=f)
def test_file(filename, arch, abi):
asm_gcc = compile('gcc', arch, abi, filename)
c1 = get_cost(asm_gcc)
asm_clang = compile('clang', arch, abi, filename)
c2 = get_cost(asm_clang)
return c1, c2, asm_gcc, asm_clang
def read_file(fn):
with open(fn) as f:
return f.read()
def write_config(filename, arch, abi, cost1, cost2):
config = configparser.ConfigParser()
config.add_section('scenario')
config['scenario']['filename'] = filename
config['scenario']['arch'] = str(arch)
config['scenario']['abi'] = str(abi)
config['scenario']['cost1'] = str(cost1)
config['scenario']['cost2'] = str(cost2)
with open('scenario.ini', 'w') as f:
config.write(f)
return config
def write_result(f, config, asm1, asm2):
config.write(f)
filename = config['scenario']['filename']
print(f'### Source:\n{read_file(filename)}', file=f)
print(f'### GCC:\n{asm1}', file=f)
print(f'### Clang:\n{asm2}', file=f)
def run_test(filename, arch, abi):
asm1 = compile('gcc', arch, abi, filename)
c1 = get_cost(asm1)
asm2 = compile('clang', arch, abi, filename)
c2 = get_cost(asm2)
return c1, c2, asm1, asm2
def reduce_case(filename):
subprocess.check_output(['creduce', 'test.py', filename])
def main():
while True:
id = random_id()
source_file = f'case-{id}.c'
case_file = f'case-{id}.txt'
gen_test(source_file)
scenarios = [
['rv32gc', 'ilp32d'],
['rv64gc', 'lp64d'],
]
shuffle(scenarios) | if c2 > c1:
passed = True
config = write_config(source_file, arch, abi, c1, c2)
print('reducing')
reduce_case(source_file)
c1, c2, asm1, asm2 = run_test(source_file, arch, abi)
write_result(sys.stdout, config, asm1, asm2)
write_result(open(case_file, 'w'), config, asm1, asm2)
break
if not passed:
os.remove(source_file)
if __name__ == '__main__':
main() | passed = False
for arch, abi in scenarios:
c1, c2, asm1, asm2 = run_test(source_file, arch, abi)
print(c1, c2) | random_line_split |
longfruit.py | #!/usr/bin/env python3
from random import choice, choices, expovariate, randint, randrange, shuffle, uniform
import configparser
import os
import re
import string
import subprocess
import sys
def georand(lmb):
# roughly geometrically distributed
return int(expovariate(lmb))
def random_id():
return ''.join(choices(
string.ascii_uppercase +
string.ascii_lowercase +
string.digits, k=8))
def filter_asm(asm):
for line in asm.splitlines():
# remove comments
m = re.match(r'^([^#]*)#(.*)$', line)
if m:
line = m.group(1)
line = line.strip()
# skip empty lines
if len(line) == 0:
continue
# skip directives
if line.startswith('.'):
continue
# skip labels
if line.endswith(':'):
continue
yield line
def instr_cost(line):
parts = re.split('[ \t,]', line)
instr = parts[0]
cost_load_store = 2
cost_fpu_load_store = cost_load_store
cost_alu = 1
cost_fpu = 1
cost_branch = 3
cost_mul = cost_branch
cost_div = 8 # TODO: clang generates long instr. sequences instead of div
cost_call = 20 # TODO: builtins
cost_ret = 2
cost_ebreak = 0 # should be unreachable if no undef. behavior is generated
if instr == 'li':
imm = int(parts[-1])
if imm >= -2048 and imm < 2048: # addi
return cost_alu
elif (imm & 0xFFF) == 0: # lui
return cost_alu
else: # lui + addi
return cost_alu * 2
return {
'add': cost_alu,
'addi': cost_alu,
'addiw': cost_alu,
'addw': cost_alu,
'and': cost_alu,
'andi': cost_alu,
'beq': cost_branch,
'beqz': cost_branch,
'bge': cost_branch,
'bgeu': cost_branch,
'bgez': cost_branch,
'bgt': cost_branch,
'bgtu': cost_branch,
'bgtz': cost_branch,
'ble': cost_branch,
'bleu': cost_branch,
'blez': cost_branch,
'blt': cost_branch,
'bltu': cost_branch,
'bltz': cost_branch,
'bne': cost_branch,
'bnez': cost_branch,
'call': cost_call,
'div': cost_div,
'divu': cost_div,
'divuw': cost_div,
'divw': cost_div,
'ebreak': cost_ebreak,
'fadd.d': cost_fpu,
'fadd.s': cost_fpu,
'fcvt.d.l': cost_fpu,
'fcvt.d.lu': cost_fpu,
'fcvt.d.s': cost_fpu,
'fcvt.d.w': cost_fpu,
'fcvt.d.wu': cost_fpu,
'fcvt.l.d': cost_fpu,
'fcvt.l.s': cost_fpu,
'fcvt.lu.d': cost_fpu,
'fcvt.lu.s': cost_fpu,
'fcvt.s.d': cost_fpu,
'fcvt.s.l': cost_fpu,
'fcvt.s.lu': cost_fpu,
'fcvt.s.w': cost_fpu,
'fcvt.s.wu': cost_fpu,
'fcvt.w.d': cost_fpu,
'fcvt.w.s': cost_fpu,
'fcvt.wu.d': cost_fpu,
'fcvt.wu.s': cost_fpu,
'fdiv.d': cost_fpu,
'fdiv.s': cost_fpu,
'feq.d': cost_fpu,
'feq.s': cost_fpu,
'fge.d': cost_fpu,
'fge.s': cost_fpu,
'fgt.d': cost_fpu,
'fgt.s': cost_fpu,
'fld': cost_fpu_load_store,
'fle.d': cost_fpu,
'fle.s': cost_fpu,
'flt.d': cost_fpu,
'flt.s': cost_fpu,
'flw': cost_fpu_load_store,
'fmadd.d': cost_fpu,
'fmadd.s': cost_fpu,
'fmul.d': cost_fpu,
'fmul.s': cost_fpu,
'fmv.d': cost_fpu,
'fmv.d.x': cost_fpu,
'fmv.s': cost_fpu,
'fmv.s.x': cost_fpu,
'fmv.w.x': cost_fpu,
'fmv.x.s': cost_fpu,
'fneg.d': cost_fpu,
'fneg.s': cost_fpu,
#'fnmsub.s': cost_fpu, # not found yet
'fnmsub.d': cost_fpu,
'fsd': cost_fpu_load_store,
'fsub.d': cost_fpu,
'fsub.s': cost_fpu,
'fsw': cost_fpu_load_store,
'j': cost_branch,
'jr': cost_branch,
'lb': cost_load_store,
'lbu': cost_load_store,
'ld': cost_load_store,
'lh': cost_load_store,
'lhu': cost_load_store,
'lui': cost_alu,
'lw': cost_load_store,
'lwu': cost_load_store,
'mul': cost_mul,
'mulh': cost_mul,
'mulhu': cost_mul,
'mulw': cost_mul,
'mv': cost_alu,
'neg': cost_alu,
'negw': cost_alu,
'nop': cost_alu,
'not': cost_alu,
'or': cost_alu,
'ori': cost_alu,
'rem': cost_div,
'remu': cost_div,
'remuw': cost_div,
'remw': cost_div,
'ret': cost_ret,
'sb': cost_load_store,
'sd': cost_load_store,
'seqz': cost_alu,
'sext': cost_alu,
'sext.w': cost_alu,
'sgt': cost_alu,
'sgtu': cost_alu,
'sgtz': cost_alu,
'sh': cost_load_store,
'sll': cost_alu,
'slli': cost_alu,
'slliw': cost_alu,
'sllw': cost_alu,
'slt': cost_alu,
'slti': cost_alu,
'sltiu': cost_alu,
'sltu': cost_alu,
'snez': cost_alu,
'sra': cost_alu,
'srai': cost_alu,
'sraiw': cost_alu,
'sraw': cost_alu,
'srl': cost_alu,
'srli': cost_alu,
'srliw': cost_alu,
'srlw': cost_alu,
'sub': cost_alu,
'subw': cost_alu,
'sw': cost_load_store,
'xor': cost_alu,
'xori': cost_alu,
}[instr]
def compile(compiler, arch, abi, filename):
if compiler.endswith('gcc'):
prog = 'riscv64-unknown-linux-gnu-gcc'
opts = []
elif compiler.endswith('clang'):
prog = 'clang'
opts = [
'-Wno-literal-conversion',
'-Wno-implicit-int-float-conversion'
]
if arch == 'rv64gc':
opts.append('--target=riscv64')
elif arch == 'rv32gc':
opts.append('--target=riscv32')
else:
assert False, 'unsupported arch'
else:
assert False, 'unsupported compiler'
opts = opts + [
'-Werror=implicit-int',
'-Wno-tautological-compare',
'-Wno-overflow',
'-Wno-constant-conversion',
'-Wno-unsequenced',
f'-march={arch}',
f'-mabi={abi}',
'-O2', '-S', '-o', '-', filename
]
r = subprocess.check_output([prog] + opts).decode('utf-8')
return r
def get_cost(asm):
cost_total = 0
for line in filter_asm(asm):
cost = instr_cost(line)
cost_total = cost_total + cost
return cost_total
class Context:
def __init__(self):
self.var_counter = 0
self.vars = []
def gen_var(self, loop_counter = False):
if not loop_counter:
|
else:
v = f'i{self.var_counter}'
self.var_counter = self.var_counter + 1
self.vars.append(v)
return v
def gen_vars(self, num):
return [self.gen_var() for i in range(randint(1, num))]
def rand_var(self):
return choice(self.vars)
def copy(self):
ctx = Context()
ctx.var_counter = self.var_counter
ctx.vars = self.vars.copy()
return ctx
def gen_type():
return choice([
'char', 'short', 'int', 'long', 'long long',
'float', 'double'
])
def gen_type_integer():
signed = choice(['signed', 'unsigned'])
ty = choice(['char', 'short', 'int', 'long', 'long long'])
return f'{signed} {ty}'
def gen_cast_integer():
return f'({gen_type_integer()})'
def gen_expr_literal_int_zero():
return 0
def gen_expr_literal_int_12_bit():
return randrange(-2048, 2048)
def gen_expr_literal_int_20_bit_up():
return randrange(0, 2**20) << 12
def gen_expr_literal_int_32_bit():
return randrange(-2**31, 2**31)
def gen_expr_literal_int_64_bit():
return randrange(-2**63, 2**63)
def gen_expr_literal_float():
return uniform(-1_000_000, 1_000_000)
def gen_expr_literal(ctx = None):
v = choice([
gen_expr_literal_int_zero,
gen_expr_literal_int_12_bit,
gen_expr_literal_int_20_bit_up,
gen_expr_literal_int_32_bit,
gen_expr_literal_int_64_bit,
gen_expr_literal_float,
])()
return v
def gen_expr_var(ctx):
return ctx.rand_var()
def gen_expr_unary(ctx):
a = ctx.rand_var()
op = choice(['-', '~', '!', '++', '--'])
cast = ''
if op == '~':
# must be applied to an integer operand
cast = gen_cast_integer()
return f'{op}{cast}{a}'
def gen_expr_binary(ctx):
a = ctx.rand_var()
b = ctx.rand_var()
ops = [
'^', '&', '|', '<<', '>>',
'+', '-',
'*', '/', '%',
'==', '!=',
'<', '<=', '>', '>=',
'&&', '||'
]
op = choice(ops)
cast1 = ''
cast2 = ''
if op in ['^', '&', '|', '%', '<<', '>>']:
# must be applied to integer operands
cast1 = gen_cast_integer()
cast2 = gen_cast_integer()
return f'{cast1}{a} {op} {cast2}{b}'
def gen_expr_ternary(ctx):
a = ctx.rand_var()
b = ctx.rand_var()
c = ctx.rand_var()
return f'{a} ? {b} : {c}'
def gen_expr(ctx):
return choice([
gen_expr_var,
gen_expr_literal,
gen_expr_unary,
gen_expr_binary,
gen_expr_ternary,
])(ctx)
def gen_stmt_decl(ctx):
t = gen_type()
e = gen_expr(ctx)
v = ctx.gen_var()
s = f'{t} {v} = {e};'
return s
def gen_stmt_assign(ctx):
# avoid assigning to loop counters
while True:
v = ctx.rand_var()
if v[0] != 'i':
break
e = gen_expr(ctx)
return f'{v} = {e};'
def gen_stmt_loop(ctx):
loop_ctx = ctx.copy()
t = gen_type_integer()
i = loop_ctx.gen_var(loop_counter = True)
end = randrange(1, 127)
return (
f'for({t} {i} = 0; {i} < {end}; ++{i}) {{\n'
f'{gen_block(loop_ctx)}'
f'}}'
)
def gen_stmt(ctx):
stmt = choice([
gen_stmt_decl,
gen_stmt_assign,
gen_stmt_loop,
])(ctx)
return f'{stmt}\n'
def gen_block(ctx):
block = ''
for i in range(georand(0.5)):
block = block + gen_stmt(ctx)
return block
def gen_func_args(ctx):
n = georand(0.2) + 1
args = [f'{gen_type()} {v}' for v in ctx.gen_vars(n)]
return ', '.join(args)
def gen_func(ctx):
return (
f'{gen_type()} test({gen_func_args(ctx)}) {{\n'
f'{gen_block(ctx)}'
f'return {ctx.rand_var()};\n'
f'}}'
)
def gen_global(ctx):
g = ctx.gen_var()
return f'{gen_type()} {g} = {gen_expr_literal()};'
def gen_globals(ctx):
globals = ''
for i in range(georand(1.0)):
g = gen_global(ctx)
globals = f'{globals}{g}\n'
return globals
def gen_unit(ctx):
# for now, one function with some parameter and access to some globals
unit = gen_globals(ctx)
unit = f'{unit}{gen_func(ctx)}\n'
return unit
def gen_test(filename):
with open(filename, 'w') as f:
ctx = Context()
print(gen_unit(ctx), file=f)
def test_file(filename, arch, abi):
asm_gcc = compile('gcc', arch, abi, filename)
c1 = get_cost(asm_gcc)
asm_clang = compile('clang', arch, abi, filename)
c2 = get_cost(asm_clang)
return c1, c2, asm_gcc, asm_clang
def read_file(fn):
with open(fn) as f:
return f.read()
def write_config(filename, arch, abi, cost1, cost2):
config = configparser.ConfigParser()
config.add_section('scenario')
config['scenario']['filename'] = filename
config['scenario']['arch'] = str(arch)
config['scenario']['abi'] = str(abi)
config['scenario']['cost1'] = str(cost1)
config['scenario']['cost2'] = str(cost2)
with open('scenario.ini', 'w') as f:
config.write(f)
return config
def write_result(f, config, asm1, asm2):
config.write(f)
filename = config['scenario']['filename']
print(f'### Source:\n{read_file(filename)}', file=f)
print(f'### GCC:\n{asm1}', file=f)
print(f'### Clang:\n{asm2}', file=f)
def run_test(filename, arch, abi):
asm1 = compile('gcc', arch, abi, filename)
c1 = get_cost(asm1)
asm2 = compile('clang', arch, abi, filename)
c2 = get_cost(asm2)
return c1, c2, asm1, asm2
def reduce_case(filename):
subprocess.check_output(['creduce', 'test.py', filename])
def main():
while True:
id = random_id()
source_file = f'case-{id}.c'
case_file = f'case-{id}.txt'
gen_test(source_file)
scenarios = [
['rv32gc', 'ilp32d'],
['rv64gc', 'lp64d'],
]
shuffle(scenarios)
passed = False
for arch, abi in scenarios:
c1, c2, asm1, asm2 = run_test(source_file, arch, abi)
print(c1, c2)
if c2 > c1:
passed = True
config = write_config(source_file, arch, abi, c1, c2)
print('reducing')
reduce_case(source_file)
c1, c2, asm1, asm2 = run_test(source_file, arch, abi)
write_result(sys.stdout, config, asm1, asm2)
write_result(open(case_file, 'w'), config, asm1, asm2)
break
if not passed:
os.remove(source_file)
if __name__ == '__main__':
main()
| v = f'v{self.var_counter}' | conditional_block |
longfruit.py | #!/usr/bin/env python3
from random import choice, choices, expovariate, randint, randrange, shuffle, uniform
import configparser
import os
import re
import string
import subprocess
import sys
def georand(lmb):
# roughly geometrically distributed
return int(expovariate(lmb))
def random_id():
return ''.join(choices(
string.ascii_uppercase +
string.ascii_lowercase +
string.digits, k=8))
def filter_asm(asm):
for line in asm.splitlines():
# remove comments
m = re.match(r'^([^#]*)#(.*)$', line)
if m:
line = m.group(1)
line = line.strip()
# skip empty lines
if len(line) == 0:
continue
# skip directives
if line.startswith('.'):
continue
# skip labels
if line.endswith(':'):
continue
yield line
def instr_cost(line):
parts = re.split('[ \t,]', line)
instr = parts[0]
cost_load_store = 2
cost_fpu_load_store = cost_load_store
cost_alu = 1
cost_fpu = 1
cost_branch = 3
cost_mul = cost_branch
cost_div = 8 # TODO: clang generates long instr. sequences instead of div
cost_call = 20 # TODO: builtins
cost_ret = 2
cost_ebreak = 0 # should be unreachable if no undef. behavior is generated
if instr == 'li':
imm = int(parts[-1])
if imm >= -2048 and imm < 2048: # addi
return cost_alu
elif (imm & 0xFFF) == 0: # lui
return cost_alu
else: # lui + addi
return cost_alu * 2
return {
'add': cost_alu,
'addi': cost_alu,
'addiw': cost_alu,
'addw': cost_alu,
'and': cost_alu,
'andi': cost_alu,
'beq': cost_branch,
'beqz': cost_branch,
'bge': cost_branch,
'bgeu': cost_branch,
'bgez': cost_branch,
'bgt': cost_branch,
'bgtu': cost_branch,
'bgtz': cost_branch,
'ble': cost_branch,
'bleu': cost_branch,
'blez': cost_branch,
'blt': cost_branch,
'bltu': cost_branch,
'bltz': cost_branch,
'bne': cost_branch,
'bnez': cost_branch,
'call': cost_call,
'div': cost_div,
'divu': cost_div,
'divuw': cost_div,
'divw': cost_div,
'ebreak': cost_ebreak,
'fadd.d': cost_fpu,
'fadd.s': cost_fpu,
'fcvt.d.l': cost_fpu,
'fcvt.d.lu': cost_fpu,
'fcvt.d.s': cost_fpu,
'fcvt.d.w': cost_fpu,
'fcvt.d.wu': cost_fpu,
'fcvt.l.d': cost_fpu,
'fcvt.l.s': cost_fpu,
'fcvt.lu.d': cost_fpu,
'fcvt.lu.s': cost_fpu,
'fcvt.s.d': cost_fpu,
'fcvt.s.l': cost_fpu,
'fcvt.s.lu': cost_fpu,
'fcvt.s.w': cost_fpu,
'fcvt.s.wu': cost_fpu,
'fcvt.w.d': cost_fpu,
'fcvt.w.s': cost_fpu,
'fcvt.wu.d': cost_fpu,
'fcvt.wu.s': cost_fpu,
'fdiv.d': cost_fpu,
'fdiv.s': cost_fpu,
'feq.d': cost_fpu,
'feq.s': cost_fpu,
'fge.d': cost_fpu,
'fge.s': cost_fpu,
'fgt.d': cost_fpu,
'fgt.s': cost_fpu,
'fld': cost_fpu_load_store,
'fle.d': cost_fpu,
'fle.s': cost_fpu,
'flt.d': cost_fpu,
'flt.s': cost_fpu,
'flw': cost_fpu_load_store,
'fmadd.d': cost_fpu,
'fmadd.s': cost_fpu,
'fmul.d': cost_fpu,
'fmul.s': cost_fpu,
'fmv.d': cost_fpu,
'fmv.d.x': cost_fpu,
'fmv.s': cost_fpu,
'fmv.s.x': cost_fpu,
'fmv.w.x': cost_fpu,
'fmv.x.s': cost_fpu,
'fneg.d': cost_fpu,
'fneg.s': cost_fpu,
#'fnmsub.s': cost_fpu, # not found yet
'fnmsub.d': cost_fpu,
'fsd': cost_fpu_load_store,
'fsub.d': cost_fpu,
'fsub.s': cost_fpu,
'fsw': cost_fpu_load_store,
'j': cost_branch,
'jr': cost_branch,
'lb': cost_load_store,
'lbu': cost_load_store,
'ld': cost_load_store,
'lh': cost_load_store,
'lhu': cost_load_store,
'lui': cost_alu,
'lw': cost_load_store,
'lwu': cost_load_store,
'mul': cost_mul,
'mulh': cost_mul,
'mulhu': cost_mul,
'mulw': cost_mul,
'mv': cost_alu,
'neg': cost_alu,
'negw': cost_alu,
'nop': cost_alu,
'not': cost_alu,
'or': cost_alu,
'ori': cost_alu,
'rem': cost_div,
'remu': cost_div,
'remuw': cost_div,
'remw': cost_div,
'ret': cost_ret,
'sb': cost_load_store,
'sd': cost_load_store,
'seqz': cost_alu,
'sext': cost_alu,
'sext.w': cost_alu,
'sgt': cost_alu,
'sgtu': cost_alu,
'sgtz': cost_alu,
'sh': cost_load_store,
'sll': cost_alu,
'slli': cost_alu,
'slliw': cost_alu,
'sllw': cost_alu,
'slt': cost_alu,
'slti': cost_alu,
'sltiu': cost_alu,
'sltu': cost_alu,
'snez': cost_alu,
'sra': cost_alu,
'srai': cost_alu,
'sraiw': cost_alu,
'sraw': cost_alu,
'srl': cost_alu,
'srli': cost_alu,
'srliw': cost_alu,
'srlw': cost_alu,
'sub': cost_alu,
'subw': cost_alu,
'sw': cost_load_store,
'xor': cost_alu,
'xori': cost_alu,
}[instr]
def compile(compiler, arch, abi, filename):
if compiler.endswith('gcc'):
prog = 'riscv64-unknown-linux-gnu-gcc'
opts = []
elif compiler.endswith('clang'):
prog = 'clang'
opts = [
'-Wno-literal-conversion',
'-Wno-implicit-int-float-conversion'
]
if arch == 'rv64gc':
opts.append('--target=riscv64')
elif arch == 'rv32gc':
opts.append('--target=riscv32')
else:
assert False, 'unsupported arch'
else:
assert False, 'unsupported compiler'
opts = opts + [
'-Werror=implicit-int',
'-Wno-tautological-compare',
'-Wno-overflow',
'-Wno-constant-conversion',
'-Wno-unsequenced',
f'-march={arch}',
f'-mabi={abi}',
'-O2', '-S', '-o', '-', filename
]
r = subprocess.check_output([prog] + opts).decode('utf-8')
return r
def get_cost(asm):
cost_total = 0
for line in filter_asm(asm):
cost = instr_cost(line)
cost_total = cost_total + cost
return cost_total
class Context:
def __init__(self):
self.var_counter = 0
self.vars = []
def gen_var(self, loop_counter = False):
if not loop_counter:
v = f'v{self.var_counter}'
else:
v = f'i{self.var_counter}'
self.var_counter = self.var_counter + 1
self.vars.append(v)
return v
def gen_vars(self, num):
return [self.gen_var() for i in range(randint(1, num))]
def rand_var(self):
return choice(self.vars)
def copy(self):
ctx = Context()
ctx.var_counter = self.var_counter
ctx.vars = self.vars.copy()
return ctx
def gen_type():
return choice([
'char', 'short', 'int', 'long', 'long long',
'float', 'double'
])
def gen_type_integer():
signed = choice(['signed', 'unsigned'])
ty = choice(['char', 'short', 'int', 'long', 'long long'])
return f'{signed} {ty}'
def gen_cast_integer():
return f'({gen_type_integer()})'
def gen_expr_literal_int_zero():
return 0
def gen_expr_literal_int_12_bit():
return randrange(-2048, 2048)
def gen_expr_literal_int_20_bit_up():
return randrange(0, 2**20) << 12
def gen_expr_literal_int_32_bit():
return randrange(-2**31, 2**31)
def gen_expr_literal_int_64_bit():
return randrange(-2**63, 2**63)
def gen_expr_literal_float():
return uniform(-1_000_000, 1_000_000)
def gen_expr_literal(ctx = None):
v = choice([
gen_expr_literal_int_zero,
gen_expr_literal_int_12_bit,
gen_expr_literal_int_20_bit_up,
gen_expr_literal_int_32_bit,
gen_expr_literal_int_64_bit,
gen_expr_literal_float,
])()
return v
def gen_expr_var(ctx):
return ctx.rand_var()
def gen_expr_unary(ctx):
a = ctx.rand_var()
op = choice(['-', '~', '!', '++', '--'])
cast = ''
if op == '~':
# must be applied to an integer operand
cast = gen_cast_integer()
return f'{op}{cast}{a}'
def gen_expr_binary(ctx):
a = ctx.rand_var()
b = ctx.rand_var()
ops = [
'^', '&', '|', '<<', '>>',
'+', '-',
'*', '/', '%',
'==', '!=',
'<', '<=', '>', '>=',
'&&', '||'
]
op = choice(ops)
cast1 = ''
cast2 = ''
if op in ['^', '&', '|', '%', '<<', '>>']:
# must be applied to integer operands
cast1 = gen_cast_integer()
cast2 = gen_cast_integer()
return f'{cast1}{a} {op} {cast2}{b}'
def gen_expr_ternary(ctx):
a = ctx.rand_var()
b = ctx.rand_var()
c = ctx.rand_var()
return f'{a} ? {b} : {c}'
def gen_expr(ctx):
return choice([
gen_expr_var,
gen_expr_literal,
gen_expr_unary,
gen_expr_binary,
gen_expr_ternary,
])(ctx)
def gen_stmt_decl(ctx):
t = gen_type()
e = gen_expr(ctx)
v = ctx.gen_var()
s = f'{t} {v} = {e};'
return s
def gen_stmt_assign(ctx):
# avoid assigning to loop counters
while True:
v = ctx.rand_var()
if v[0] != 'i':
break
e = gen_expr(ctx)
return f'{v} = {e};'
def gen_stmt_loop(ctx):
loop_ctx = ctx.copy()
t = gen_type_integer()
i = loop_ctx.gen_var(loop_counter = True)
end = randrange(1, 127)
return (
f'for({t} {i} = 0; {i} < {end}; ++{i}) {{\n'
f'{gen_block(loop_ctx)}'
f'}}'
)
def gen_stmt(ctx):
stmt = choice([
gen_stmt_decl,
gen_stmt_assign,
gen_stmt_loop,
])(ctx)
return f'{stmt}\n'
def gen_block(ctx):
block = ''
for i in range(georand(0.5)):
block = block + gen_stmt(ctx)
return block
def gen_func_args(ctx):
n = georand(0.2) + 1
args = [f'{gen_type()} {v}' for v in ctx.gen_vars(n)]
return ', '.join(args)
def gen_func(ctx):
return (
f'{gen_type()} test({gen_func_args(ctx)}) {{\n'
f'{gen_block(ctx)}'
f'return {ctx.rand_var()};\n'
f'}}'
)
def gen_global(ctx):
g = ctx.gen_var()
return f'{gen_type()} {g} = {gen_expr_literal()};'
def gen_globals(ctx):
globals = ''
for i in range(georand(1.0)):
g = gen_global(ctx)
globals = f'{globals}{g}\n'
return globals
def gen_unit(ctx):
# for now, one function with some parameter and access to some globals
|
def gen_test(filename):
with open(filename, 'w') as f:
ctx = Context()
print(gen_unit(ctx), file=f)
def test_file(filename, arch, abi):
asm_gcc = compile('gcc', arch, abi, filename)
c1 = get_cost(asm_gcc)
asm_clang = compile('clang', arch, abi, filename)
c2 = get_cost(asm_clang)
return c1, c2, asm_gcc, asm_clang
def read_file(fn):
with open(fn) as f:
return f.read()
def write_config(filename, arch, abi, cost1, cost2):
config = configparser.ConfigParser()
config.add_section('scenario')
config['scenario']['filename'] = filename
config['scenario']['arch'] = str(arch)
config['scenario']['abi'] = str(abi)
config['scenario']['cost1'] = str(cost1)
config['scenario']['cost2'] = str(cost2)
with open('scenario.ini', 'w') as f:
config.write(f)
return config
def write_result(f, config, asm1, asm2):
config.write(f)
filename = config['scenario']['filename']
print(f'### Source:\n{read_file(filename)}', file=f)
print(f'### GCC:\n{asm1}', file=f)
print(f'### Clang:\n{asm2}', file=f)
def run_test(filename, arch, abi):
asm1 = compile('gcc', arch, abi, filename)
c1 = get_cost(asm1)
asm2 = compile('clang', arch, abi, filename)
c2 = get_cost(asm2)
return c1, c2, asm1, asm2
def reduce_case(filename):
subprocess.check_output(['creduce', 'test.py', filename])
def main():
while True:
id = random_id()
source_file = f'case-{id}.c'
case_file = f'case-{id}.txt'
gen_test(source_file)
scenarios = [
['rv32gc', 'ilp32d'],
['rv64gc', 'lp64d'],
]
shuffle(scenarios)
passed = False
for arch, abi in scenarios:
c1, c2, asm1, asm2 = run_test(source_file, arch, abi)
print(c1, c2)
if c2 > c1:
passed = True
config = write_config(source_file, arch, abi, c1, c2)
print('reducing')
reduce_case(source_file)
c1, c2, asm1, asm2 = run_test(source_file, arch, abi)
write_result(sys.stdout, config, asm1, asm2)
write_result(open(case_file, 'w'), config, asm1, asm2)
break
if not passed:
os.remove(source_file)
if __name__ == '__main__':
main()
| unit = gen_globals(ctx)
unit = f'{unit}{gen_func(ctx)}\n'
return unit | identifier_body |
longfruit.py | #!/usr/bin/env python3
from random import choice, choices, expovariate, randint, randrange, shuffle, uniform
import configparser
import os
import re
import string
import subprocess
import sys
def georand(lmb):
# roughly geometrically distributed
return int(expovariate(lmb))
def random_id():
return ''.join(choices(
string.ascii_uppercase +
string.ascii_lowercase +
string.digits, k=8))
def filter_asm(asm):
for line in asm.splitlines():
# remove comments
m = re.match(r'^([^#]*)#(.*)$', line)
if m:
line = m.group(1)
line = line.strip()
# skip empty lines
if len(line) == 0:
continue
# skip directives
if line.startswith('.'):
continue
# skip labels
if line.endswith(':'):
continue
yield line
def instr_cost(line):
parts = re.split('[ \t,]', line)
instr = parts[0]
cost_load_store = 2
cost_fpu_load_store = cost_load_store
cost_alu = 1
cost_fpu = 1
cost_branch = 3
cost_mul = cost_branch
cost_div = 8 # TODO: clang generates long instr. sequences instead of div
cost_call = 20 # TODO: builtins
cost_ret = 2
cost_ebreak = 0 # should be unreachable if no undef. behavior is generated
if instr == 'li':
imm = int(parts[-1])
if imm >= -2048 and imm < 2048: # addi
return cost_alu
elif (imm & 0xFFF) == 0: # lui
return cost_alu
else: # lui + addi
return cost_alu * 2
return {
'add': cost_alu,
'addi': cost_alu,
'addiw': cost_alu,
'addw': cost_alu,
'and': cost_alu,
'andi': cost_alu,
'beq': cost_branch,
'beqz': cost_branch,
'bge': cost_branch,
'bgeu': cost_branch,
'bgez': cost_branch,
'bgt': cost_branch,
'bgtu': cost_branch,
'bgtz': cost_branch,
'ble': cost_branch,
'bleu': cost_branch,
'blez': cost_branch,
'blt': cost_branch,
'bltu': cost_branch,
'bltz': cost_branch,
'bne': cost_branch,
'bnez': cost_branch,
'call': cost_call,
'div': cost_div,
'divu': cost_div,
'divuw': cost_div,
'divw': cost_div,
'ebreak': cost_ebreak,
'fadd.d': cost_fpu,
'fadd.s': cost_fpu,
'fcvt.d.l': cost_fpu,
'fcvt.d.lu': cost_fpu,
'fcvt.d.s': cost_fpu,
'fcvt.d.w': cost_fpu,
'fcvt.d.wu': cost_fpu,
'fcvt.l.d': cost_fpu,
'fcvt.l.s': cost_fpu,
'fcvt.lu.d': cost_fpu,
'fcvt.lu.s': cost_fpu,
'fcvt.s.d': cost_fpu,
'fcvt.s.l': cost_fpu,
'fcvt.s.lu': cost_fpu,
'fcvt.s.w': cost_fpu,
'fcvt.s.wu': cost_fpu,
'fcvt.w.d': cost_fpu,
'fcvt.w.s': cost_fpu,
'fcvt.wu.d': cost_fpu,
'fcvt.wu.s': cost_fpu,
'fdiv.d': cost_fpu,
'fdiv.s': cost_fpu,
'feq.d': cost_fpu,
'feq.s': cost_fpu,
'fge.d': cost_fpu,
'fge.s': cost_fpu,
'fgt.d': cost_fpu,
'fgt.s': cost_fpu,
'fld': cost_fpu_load_store,
'fle.d': cost_fpu,
'fle.s': cost_fpu,
'flt.d': cost_fpu,
'flt.s': cost_fpu,
'flw': cost_fpu_load_store,
'fmadd.d': cost_fpu,
'fmadd.s': cost_fpu,
'fmul.d': cost_fpu,
'fmul.s': cost_fpu,
'fmv.d': cost_fpu,
'fmv.d.x': cost_fpu,
'fmv.s': cost_fpu,
'fmv.s.x': cost_fpu,
'fmv.w.x': cost_fpu,
'fmv.x.s': cost_fpu,
'fneg.d': cost_fpu,
'fneg.s': cost_fpu,
#'fnmsub.s': cost_fpu, # not found yet
'fnmsub.d': cost_fpu,
'fsd': cost_fpu_load_store,
'fsub.d': cost_fpu,
'fsub.s': cost_fpu,
'fsw': cost_fpu_load_store,
'j': cost_branch,
'jr': cost_branch,
'lb': cost_load_store,
'lbu': cost_load_store,
'ld': cost_load_store,
'lh': cost_load_store,
'lhu': cost_load_store,
'lui': cost_alu,
'lw': cost_load_store,
'lwu': cost_load_store,
'mul': cost_mul,
'mulh': cost_mul,
'mulhu': cost_mul,
'mulw': cost_mul,
'mv': cost_alu,
'neg': cost_alu,
'negw': cost_alu,
'nop': cost_alu,
'not': cost_alu,
'or': cost_alu,
'ori': cost_alu,
'rem': cost_div,
'remu': cost_div,
'remuw': cost_div,
'remw': cost_div,
'ret': cost_ret,
'sb': cost_load_store,
'sd': cost_load_store,
'seqz': cost_alu,
'sext': cost_alu,
'sext.w': cost_alu,
'sgt': cost_alu,
'sgtu': cost_alu,
'sgtz': cost_alu,
'sh': cost_load_store,
'sll': cost_alu,
'slli': cost_alu,
'slliw': cost_alu,
'sllw': cost_alu,
'slt': cost_alu,
'slti': cost_alu,
'sltiu': cost_alu,
'sltu': cost_alu,
'snez': cost_alu,
'sra': cost_alu,
'srai': cost_alu,
'sraiw': cost_alu,
'sraw': cost_alu,
'srl': cost_alu,
'srli': cost_alu,
'srliw': cost_alu,
'srlw': cost_alu,
'sub': cost_alu,
'subw': cost_alu,
'sw': cost_load_store,
'xor': cost_alu,
'xori': cost_alu,
}[instr]
def compile(compiler, arch, abi, filename):
if compiler.endswith('gcc'):
prog = 'riscv64-unknown-linux-gnu-gcc'
opts = []
elif compiler.endswith('clang'):
prog = 'clang'
opts = [
'-Wno-literal-conversion',
'-Wno-implicit-int-float-conversion'
]
if arch == 'rv64gc':
opts.append('--target=riscv64')
elif arch == 'rv32gc':
opts.append('--target=riscv32')
else:
assert False, 'unsupported arch'
else:
assert False, 'unsupported compiler'
opts = opts + [
'-Werror=implicit-int',
'-Wno-tautological-compare',
'-Wno-overflow',
'-Wno-constant-conversion',
'-Wno-unsequenced',
f'-march={arch}',
f'-mabi={abi}',
'-O2', '-S', '-o', '-', filename
]
r = subprocess.check_output([prog] + opts).decode('utf-8')
return r
def get_cost(asm):
cost_total = 0
for line in filter_asm(asm):
cost = instr_cost(line)
cost_total = cost_total + cost
return cost_total
class Context:
def __init__(self):
self.var_counter = 0
self.vars = []
def gen_var(self, loop_counter = False):
if not loop_counter:
v = f'v{self.var_counter}'
else:
v = f'i{self.var_counter}'
self.var_counter = self.var_counter + 1
self.vars.append(v)
return v
def gen_vars(self, num):
return [self.gen_var() for i in range(randint(1, num))]
def rand_var(self):
return choice(self.vars)
def copy(self):
ctx = Context()
ctx.var_counter = self.var_counter
ctx.vars = self.vars.copy()
return ctx
def gen_type():
return choice([
'char', 'short', 'int', 'long', 'long long',
'float', 'double'
])
def gen_type_integer():
signed = choice(['signed', 'unsigned'])
ty = choice(['char', 'short', 'int', 'long', 'long long'])
return f'{signed} {ty}'
def gen_cast_integer():
return f'({gen_type_integer()})'
def gen_expr_literal_int_zero():
return 0
def gen_expr_literal_int_12_bit():
return randrange(-2048, 2048)
def gen_expr_literal_int_20_bit_up():
return randrange(0, 2**20) << 12
def gen_expr_literal_int_32_bit():
return randrange(-2**31, 2**31)
def gen_expr_literal_int_64_bit():
return randrange(-2**63, 2**63)
def gen_expr_literal_float():
return uniform(-1_000_000, 1_000_000)
def gen_expr_literal(ctx = None):
v = choice([
gen_expr_literal_int_zero,
gen_expr_literal_int_12_bit,
gen_expr_literal_int_20_bit_up,
gen_expr_literal_int_32_bit,
gen_expr_literal_int_64_bit,
gen_expr_literal_float,
])()
return v
def gen_expr_var(ctx):
return ctx.rand_var()
def gen_expr_unary(ctx):
a = ctx.rand_var()
op = choice(['-', '~', '!', '++', '--'])
cast = ''
if op == '~':
# must be applied to an integer operand
cast = gen_cast_integer()
return f'{op}{cast}{a}'
def gen_expr_binary(ctx):
a = ctx.rand_var()
b = ctx.rand_var()
ops = [
'^', '&', '|', '<<', '>>',
'+', '-',
'*', '/', '%',
'==', '!=',
'<', '<=', '>', '>=',
'&&', '||'
]
op = choice(ops)
cast1 = ''
cast2 = ''
if op in ['^', '&', '|', '%', '<<', '>>']:
# must be applied to integer operands
cast1 = gen_cast_integer()
cast2 = gen_cast_integer()
return f'{cast1}{a} {op} {cast2}{b}'
def gen_expr_ternary(ctx):
a = ctx.rand_var()
b = ctx.rand_var()
c = ctx.rand_var()
return f'{a} ? {b} : {c}'
def gen_expr(ctx):
return choice([
gen_expr_var,
gen_expr_literal,
gen_expr_unary,
gen_expr_binary,
gen_expr_ternary,
])(ctx)
def gen_stmt_decl(ctx):
t = gen_type()
e = gen_expr(ctx)
v = ctx.gen_var()
s = f'{t} {v} = {e};'
return s
def gen_stmt_assign(ctx):
# avoid assigning to loop counters
while True:
v = ctx.rand_var()
if v[0] != 'i':
break
e = gen_expr(ctx)
return f'{v} = {e};'
def gen_stmt_loop(ctx):
loop_ctx = ctx.copy()
t = gen_type_integer()
i = loop_ctx.gen_var(loop_counter = True)
end = randrange(1, 127)
return (
f'for({t} {i} = 0; {i} < {end}; ++{i}) {{\n'
f'{gen_block(loop_ctx)}'
f'}}'
)
def gen_stmt(ctx):
stmt = choice([
gen_stmt_decl,
gen_stmt_assign,
gen_stmt_loop,
])(ctx)
return f'{stmt}\n'
def gen_block(ctx):
block = ''
for i in range(georand(0.5)):
block = block + gen_stmt(ctx)
return block
def gen_func_args(ctx):
n = georand(0.2) + 1
args = [f'{gen_type()} {v}' for v in ctx.gen_vars(n)]
return ', '.join(args)
def gen_func(ctx):
return (
f'{gen_type()} test({gen_func_args(ctx)}) {{\n'
f'{gen_block(ctx)}'
f'return {ctx.rand_var()};\n'
f'}}'
)
def gen_global(ctx):
g = ctx.gen_var()
return f'{gen_type()} {g} = {gen_expr_literal()};'
def gen_globals(ctx):
globals = ''
for i in range(georand(1.0)):
g = gen_global(ctx)
globals = f'{globals}{g}\n'
return globals
def gen_unit(ctx):
# for now, one function with some parameter and access to some globals
unit = gen_globals(ctx)
unit = f'{unit}{gen_func(ctx)}\n'
return unit
def gen_test(filename):
with open(filename, 'w') as f:
ctx = Context()
print(gen_unit(ctx), file=f)
def test_file(filename, arch, abi):
asm_gcc = compile('gcc', arch, abi, filename)
c1 = get_cost(asm_gcc)
asm_clang = compile('clang', arch, abi, filename)
c2 = get_cost(asm_clang)
return c1, c2, asm_gcc, asm_clang
def read_file(fn):
with open(fn) as f:
return f.read()
def | (filename, arch, abi, cost1, cost2):
config = configparser.ConfigParser()
config.add_section('scenario')
config['scenario']['filename'] = filename
config['scenario']['arch'] = str(arch)
config['scenario']['abi'] = str(abi)
config['scenario']['cost1'] = str(cost1)
config['scenario']['cost2'] = str(cost2)
with open('scenario.ini', 'w') as f:
config.write(f)
return config
def write_result(f, config, asm1, asm2):
config.write(f)
filename = config['scenario']['filename']
print(f'### Source:\n{read_file(filename)}', file=f)
print(f'### GCC:\n{asm1}', file=f)
print(f'### Clang:\n{asm2}', file=f)
def run_test(filename, arch, abi):
asm1 = compile('gcc', arch, abi, filename)
c1 = get_cost(asm1)
asm2 = compile('clang', arch, abi, filename)
c2 = get_cost(asm2)
return c1, c2, asm1, asm2
def reduce_case(filename):
subprocess.check_output(['creduce', 'test.py', filename])
def main():
while True:
id = random_id()
source_file = f'case-{id}.c'
case_file = f'case-{id}.txt'
gen_test(source_file)
scenarios = [
['rv32gc', 'ilp32d'],
['rv64gc', 'lp64d'],
]
shuffle(scenarios)
passed = False
for arch, abi in scenarios:
c1, c2, asm1, asm2 = run_test(source_file, arch, abi)
print(c1, c2)
if c2 > c1:
passed = True
config = write_config(source_file, arch, abi, c1, c2)
print('reducing')
reduce_case(source_file)
c1, c2, asm1, asm2 = run_test(source_file, arch, abi)
write_result(sys.stdout, config, asm1, asm2)
write_result(open(case_file, 'w'), config, asm1, asm2)
break
if not passed:
os.remove(source_file)
if __name__ == '__main__':
main()
| write_config | identifier_name |
main.rs | use {
serde::Deserialize,
serde_json,
serde_repr::Deserialize_repr,
std::{collections::BTreeMap, io::Read, fs::File, process::Command},
};
fn main() -> Result<(), Failure> {
let mut file = File::open("token")?;
let mut token = String::new();
file.read_to_string(&mut token)?;
token.insert_str(0, "token=");
let output = &Command::new("curl").args(&["https://api.todoist.com/sync/v8/sync", "-d", &token, "-d", "sync_token=*", "-d", "resource_types=[\"all\"]"]).output()?.stdout;
let sync: Sync = serde_json::from_slice(&output)?;
let user = User::from(sync);
for task in user.tasks {
println!("- {:?}", task);
}
Ok(())
}
#[derive(Debug)]
enum Failure {
Io(std::io::Error),
Serde(serde_json::Error),
Utf8(std::str::Utf8Error),
}
impl From<std::io::Error> for Failure {
fn from(error: std::io::Error) -> Self {
Self::Io(error)
}
}
impl From<serde_json::Error> for Failure {
fn from(error: serde_json::Error) -> Self {
Self::Serde(error)
}
}
impl From<std::str::Utf8Error> for Failure {
fn from(error: std::str::Utf8Error) -> Self {
Self::Utf8(error)
}
}
struct User {
tasks: Vec<Task>,
}
impl From<Sync> for User {
fn from(sync: Sync) -> Self {
let mut tasks = Vec::new();
for item in sync.items {
tasks.push(item.into());
}
Self {
tasks,
}
}
}
#[derive(Debug)]
struct Task {
item: Item,
}
impl From<Item> for Task {
fn from(item: Item) -> Self {
Self { item }
}
}
#[derive(Debug, Deserialize)]
struct Sync {
/// A new synchronization token.
sync_token: String,
/// If this contains all data.
full_sync: bool,
/// A [`UserData`].
user: UserData,
/// An array of [`Project`]s.
projects: Vec<Project>,
/// An array of [`Item`]s.
items: Vec<Item>,
/// An array of [`Note`]s.
notes: Vec<Note>,
/// An array of [`ProjectNote`]s.
project_notes: Vec<ProjectNote>,
/// An array of [`Section`]s.
sections: Vec<Section>,
/// An array of [`Label`]s.
labels: Vec<Label>,
/// An array of [`Filter`]s.
filters: Vec<Filter>,
/// Maps items to their order in the daily agenda.
day_orders: BTreeMap<ItemId, Order>,
/// An array of [`Reminder`]s.
reminders: Vec<Reminder>,
/// The collaborators for all shared projects.
collaborators: Vec<Collaborator>,
/// An array of [`CollaboratorState`]s.
#[serde(default)]
collaborators_states: Vec<CollaboratorState>,
/// An array of [`LiveNotification`]s.
live_notifications: Vec<LiveNotification>,
/// The id of the last [`LiveNotification`] seen by the user.
live_notifications_last_read_id: LiveNotificationId,
/// The [`UserSettings`].
user_settings: UserSettings,
}
#[derive(Debug, Deserialize)]
struct Order(i64);
#[derive(Debug, Deserialize)]
/// A Todoist user.
struct UserData {
/// The default number of minutes for set automatic reminders.
auto_reminder: u64,
/// Link to a 195x195 image of the user's avatar.
avatar_big: String,
/// Link to a 60x60 image of the user's avatar.
avatar_medium: String,
/// Link to a 640x640 image of the user's avatar.
avatar_s640: String,
/// Link to a 35x35 image of the user's avatar.
avatar_small: String,
/// The user's [`BusinessAccountId`].
#[serde(default)]
business_account_id: Option<BusinessAccountId>,
/// The number of tasks set as the user's daily goal.
daily_goal: u64,
/// The user's desired date format.
date_format: DateFormat,
/// If smart date recognition has been disabled.
dateist_inline_disabled: bool,
/// The language expected for the date recognition.
dateist_lang: Option<Language>,
/// The days that the user is off.
days_off: Vec<Day>,
/// The default reminder for the user.
default_reminder: Reminder,
/// The user's email.
email: String,
/// Special internal features that apply to the user.
features: Features,
full_name: String,
id: UserId,
#[serde(default)]
image_id: Option<String>,
inbox_project: ProjectId,
is_biz_admin: bool,
is_premium: bool,
join_date: String,
karma: f64,
karma_trend: KarmaTrend,
lang: Language,
mobile_host: Option<String>,
mobile_number: Option<String>,
next_week: Day,
premium_until: Option<String>,
sort_order: SortOrder,
start_day: Day,
start_page: Page,
#[serde(default)]
team_inbox: Option<ProjectId>,
theme: Theme,
time_format: TimeFormat,
token: String,
tz_info: TimezoneInfo,
weekly_goal: u64,
}
#[derive(Debug, Deserialize)]
struct UserId(u64);
#[derive(Debug, Deserialize)]
struct BusinessAccountId(u64);
#[derive(Debug, Deserialize)]
struct Project {
id: ProjectId,
name: String,
color: Color,
parent_id: Option<ProjectId>,
child_order: Order,
collapsed: Flag,
shared: bool,
is_deleted: Flag,
is_archived: Flag,
is_favorite: Flag,
sync_id: Option<ProjectSyncId>,
#[serde(default)]
inbox_project: bool,
#[serde(default)]
team_inbox: bool,
}
#[derive(Debug, Deserialize)]
struct ProjectId(u64);
#[derive(Debug, Deserialize)]
struct ProjectSyncId(u64);
#[derive(Debug, Deserialize)]
struct Item {
id: ItemId,
user_id: UserId,
project_id: ProjectId,
content: String,
due: Option<Date>,
priority: Priority,
parent_id: Option<ItemId>,
child_order: Order,
section_id: Option<SectionId>,
day_order: Order,
collapsed: Flag,
labels: Vec<LabelId>,
added_by_uid: Option<UserId>,
assigned_by_uid: Option<UserId>,
responsible_uid: Option<UserId>,
checked: Flag,
in_history: Flag,
is_deleted: Flag,
sync_id: Option<ItemSyncId>,
date_completed: Option<String>,
date_added: String,
}
#[derive(Debug, Deserialize)]
struct ItemSyncId(u64);
#[derive(Clone, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd)]
struct ItemId(u64);
#[derive(Debug, Deserialize)]
struct Note {
id: NoteId,
posted_uid: UserId,
item_id: ItemId,
project_id: ProjectId,
content: String,
file_attachment: FileAttachment,
uids_to_notify: Vec<UserId>,
is_deleted: Flag,
posted: String,
reactions: BTreeMap<String, Vec<UserId>>,
}
#[derive(Debug, Deserialize)]
struct NoteId(u64);
#[derive(Debug, Deserialize)]
struct ProjectNote {
id: ProjectNoteId,
posted_uid: UserId,
project_id: ProjectId,
content: String,
file_attachment: FileAttachment,
uids_to_notify: Vec<UserId>,
is_deleted: Flag,
posted: String,
reactions: BTreeMap<String, Vec<UserId>>,
}
#[derive(Debug, Deserialize)]
struct ProjectNoteId(u64);
#[derive(Debug, Deserialize)]
struct Section {
id: SectionId,
name: String,
project_id: ProjectId,
section_order: Order,
collapsed: bool,
sync_id: Option<SectionSyncId>,
is_deleted: bool,
is_archived: bool,
date_archived: Option<String>,
date_added: String,
}
#[derive(Debug, Deserialize)]
struct SectionId(u64);
#[derive(Debug, Deserialize)]
struct SectionSyncId(u64);
#[derive(Debug, Deserialize)]
struct Label {
id: LabelId,
name: String,
color: Color,
item_order: Order,
is_deleted: Flag,
is_favorite: Flag,
}
#[derive(Debug, Deserialize)]
struct | (u64);
#[derive(Debug, Deserialize)]
struct Filter {
id: FilterId,
name: String,
query: String,
color: Color,
item_order: Order,
is_deleted: Flag,
is_favorite: Flag,
}
#[derive(Debug, Deserialize)]
struct FilterId(u64);
#[derive(Debug, Deserialize)]
struct Collaborator {
id: CollaboratorId,
email: String,
full_name: String,
timezone: String,
#[serde(default)]
image_id: Option<String>,
}
#[derive(Debug, Deserialize)]
struct CollaboratorId(u64);
#[derive(Debug, Deserialize)]
struct CollaboratorState {
project_id: ProjectId,
user_id: UserId,
state: CollaboratorStatus,
is_deleted: bool,
}
#[derive(Debug, Deserialize)]
// Note: v8 api says there should be a `seq_no` field that holds an integer.
struct LiveNotification {
id: LiveNotificationId,
// Note: v8 api says that created should be an integer that is the epoch timestamp.
created: String,
// Note: v8 api does not say from_uid is optional.
#[serde(default)]
from_uid: Option<UserId>,
notification_key: String,
notification_type: String,
is_unread: Flag,
}
#[derive(Debug, Deserialize)]
struct LiveNotificationId(u64);
#[derive(Debug, Deserialize)]
struct UserSettings {
reminder_push: bool,
#[serde(default)]
reminder_sms: bool,
reminder_desktop: bool,
reminder_email: bool,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Color {
Crimson = 30,
Red = 31,
Orange = 32,
Yellow = 33,
Olive = 34,
LightGreen = 35,
DarkGreen = 36,
SeaGreen = 37,
SteelBlue = 38,
SkyBlue = 39,
BabyBlue = 40,
Blue = 41,
RoyalPurple = 42,
Violet = 43,
Pink = 44,
Mulberry = 45,
Salmon = 46,
Gray = 47,
LightGray = 48,
Tan = 49,
}
#[derive(Debug, Deserialize)]
enum CollaboratorStatus {
Active,
Invited,
}
#[derive(Debug, Deserialize)]
struct FileAttachment {
file_type: String,
file_name: String,
file_size: u64,
file_url: String,
upload_state: UploadState,
}
#[derive(Debug, Deserialize)]
enum UploadState {
Pending,
Completed,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Priority {
Natural = 1,
High = 2,
Urgent = 3,
VeryUrgent = 4,
}
#[derive(Debug, Deserialize)]
struct Date {
date: String,
timezone: Option<String>,
string: String,
lang: Language,
is_recurring: bool,
}
#[derive(Debug, Deserialize)]
struct TimezoneInfo {
gmt_string: String,
hours: i8,
is_dst: Flag,
minutes: u8,
timezone: String,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum TimeFormat {
TwentyFour = 0,
Twelve = 1,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Theme {
Theme0 = 0,
Theme1 = 1,
Theme2 = 2,
Theme3 = 3,
Theme4 = 4,
Theme5 = 5,
Theme6 = 6,
Theme7 = 7,
Theme8 = 8,
Theme9 = 9,
Theme10 = 10,
}
#[derive(Debug, Deserialize)]
#[serde(untagged)]
enum Page {
InfoPage,
Blank,
Query(String),
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum SortOrder {
OldestDatesFirst = 0,
OldestDatesLast = 1,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "snake_case")]
enum KarmaTrend {
Up,
}
#[derive(Debug, Deserialize)]
struct Features {
/// If the user has enabled beta.
beta: Flag,
/// If inline date parsing is enabled.
dateist_inline_disabled: bool,
dateist_lang: Option<Language>,
#[serde(default)]
gold_theme: bool,
has_push_reminders: bool,
karma_disabled: bool,
karma_vacation: bool,
restriction: u64,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Flag {
False = 0,
True = 1,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Day {
Monday = 1,
Tuesday = 2,
Wednesday = 3,
Thursday = 4,
Friday = 5,
Saturday = 6,
Sunday = 7,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "snake_case")]
enum Reminder {
Email,
Mobile,
Push,
NoDefault,
}
/// The format of a date.
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum DateFormat {
/// dd-mm-yyyy
DayMonth = 0,
/// mm-dd-yyyy
MonthDay = 1,
}
#[derive(Debug, Deserialize)]
enum Language {
#[serde(rename = "da")]
Danish,
#[serde(rename = "ge")]
German,
#[serde(rename = "en")]
English,
#[serde(rename = "es")]
Spanish,
#[serde(rename = "fi")]
Finnish,
#[serde(rename = "fr")]
French,
#[serde(rename = "it")]
Italian,
#[serde(rename = "ja")]
Japanese,
#[serde(rename = "ko")]
Korean,
#[serde(rename = "nl")]
Dutch,
#[serde(rename = "pl")]
Polish,
#[serde(rename = "pt_Br")]
BrazilianPortuguese,
#[serde(rename = "ru")]
Russian,
#[serde(rename = "sv")]
Sweedish,
#[serde(rename = "tr")]
Turkish,
#[serde(rename = "zh_CN")]
MainlandChinese,
#[serde(rename = "zh_TW")]
TaiwanChinese,
}
| LabelId | identifier_name |
main.rs | use { | std::{collections::BTreeMap, io::Read, fs::File, process::Command},
};
fn main() -> Result<(), Failure> {
let mut file = File::open("token")?;
let mut token = String::new();
file.read_to_string(&mut token)?;
token.insert_str(0, "token=");
let output = &Command::new("curl").args(&["https://api.todoist.com/sync/v8/sync", "-d", &token, "-d", "sync_token=*", "-d", "resource_types=[\"all\"]"]).output()?.stdout;
let sync: Sync = serde_json::from_slice(&output)?;
let user = User::from(sync);
for task in user.tasks {
println!("- {:?}", task);
}
Ok(())
}
#[derive(Debug)]
enum Failure {
Io(std::io::Error),
Serde(serde_json::Error),
Utf8(std::str::Utf8Error),
}
impl From<std::io::Error> for Failure {
fn from(error: std::io::Error) -> Self {
Self::Io(error)
}
}
impl From<serde_json::Error> for Failure {
fn from(error: serde_json::Error) -> Self {
Self::Serde(error)
}
}
impl From<std::str::Utf8Error> for Failure {
fn from(error: std::str::Utf8Error) -> Self {
Self::Utf8(error)
}
}
struct User {
tasks: Vec<Task>,
}
impl From<Sync> for User {
fn from(sync: Sync) -> Self {
let mut tasks = Vec::new();
for item in sync.items {
tasks.push(item.into());
}
Self {
tasks,
}
}
}
#[derive(Debug)]
struct Task {
item: Item,
}
impl From<Item> for Task {
fn from(item: Item) -> Self {
Self { item }
}
}
#[derive(Debug, Deserialize)]
struct Sync {
/// A new synchronization token.
sync_token: String,
/// If this contains all data.
full_sync: bool,
/// A [`UserData`].
user: UserData,
/// An array of [`Project`]s.
projects: Vec<Project>,
/// An array of [`Item`]s.
items: Vec<Item>,
/// An array of [`Note`]s.
notes: Vec<Note>,
/// An array of [`ProjectNote`]s.
project_notes: Vec<ProjectNote>,
/// An array of [`Section`]s.
sections: Vec<Section>,
/// An array of [`Label`]s.
labels: Vec<Label>,
/// An array of [`Filter`]s.
filters: Vec<Filter>,
/// Maps items to their order in the daily agenda.
day_orders: BTreeMap<ItemId, Order>,
/// An array of [`Reminder`]s.
reminders: Vec<Reminder>,
/// The collaborators for all shared projects.
collaborators: Vec<Collaborator>,
/// An array of [`CollaboratorState`]s.
#[serde(default)]
collaborators_states: Vec<CollaboratorState>,
/// An array of [`LiveNotification`]s.
live_notifications: Vec<LiveNotification>,
/// The id of the last [`LiveNotification`] seen by the user.
live_notifications_last_read_id: LiveNotificationId,
/// The [`UserSettings`].
user_settings: UserSettings,
}
#[derive(Debug, Deserialize)]
struct Order(i64);
#[derive(Debug, Deserialize)]
/// A Todoist user.
struct UserData {
/// The default number of minutes for set automatic reminders.
auto_reminder: u64,
/// Link to a 195x195 image of the user's avatar.
avatar_big: String,
/// Link to a 60x60 image of the user's avatar.
avatar_medium: String,
/// Link to a 640x640 image of the user's avatar.
avatar_s640: String,
/// Link to a 35x35 image of the user's avatar.
avatar_small: String,
/// The user's [`BusinessAccountId`].
#[serde(default)]
business_account_id: Option<BusinessAccountId>,
/// The number of tasks set as the user's daily goal.
daily_goal: u64,
/// The user's desired date format.
date_format: DateFormat,
/// If smart date recognition has been disabled.
dateist_inline_disabled: bool,
/// The language expected for the date recognition.
dateist_lang: Option<Language>,
/// The days that the user is off.
days_off: Vec<Day>,
/// The default reminder for the user.
default_reminder: Reminder,
/// The user's email.
email: String,
/// Special internal features that apply to the user.
features: Features,
full_name: String,
id: UserId,
#[serde(default)]
image_id: Option<String>,
inbox_project: ProjectId,
is_biz_admin: bool,
is_premium: bool,
join_date: String,
karma: f64,
karma_trend: KarmaTrend,
lang: Language,
mobile_host: Option<String>,
mobile_number: Option<String>,
next_week: Day,
premium_until: Option<String>,
sort_order: SortOrder,
start_day: Day,
start_page: Page,
#[serde(default)]
team_inbox: Option<ProjectId>,
theme: Theme,
time_format: TimeFormat,
token: String,
tz_info: TimezoneInfo,
weekly_goal: u64,
}
#[derive(Debug, Deserialize)]
struct UserId(u64);
#[derive(Debug, Deserialize)]
struct BusinessAccountId(u64);
#[derive(Debug, Deserialize)]
struct Project {
id: ProjectId,
name: String,
color: Color,
parent_id: Option<ProjectId>,
child_order: Order,
collapsed: Flag,
shared: bool,
is_deleted: Flag,
is_archived: Flag,
is_favorite: Flag,
sync_id: Option<ProjectSyncId>,
#[serde(default)]
inbox_project: bool,
#[serde(default)]
team_inbox: bool,
}
#[derive(Debug, Deserialize)]
struct ProjectId(u64);
#[derive(Debug, Deserialize)]
struct ProjectSyncId(u64);
#[derive(Debug, Deserialize)]
struct Item {
id: ItemId,
user_id: UserId,
project_id: ProjectId,
content: String,
due: Option<Date>,
priority: Priority,
parent_id: Option<ItemId>,
child_order: Order,
section_id: Option<SectionId>,
day_order: Order,
collapsed: Flag,
labels: Vec<LabelId>,
added_by_uid: Option<UserId>,
assigned_by_uid: Option<UserId>,
responsible_uid: Option<UserId>,
checked: Flag,
in_history: Flag,
is_deleted: Flag,
sync_id: Option<ItemSyncId>,
date_completed: Option<String>,
date_added: String,
}
#[derive(Debug, Deserialize)]
struct ItemSyncId(u64);
#[derive(Clone, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd)]
struct ItemId(u64);
#[derive(Debug, Deserialize)]
struct Note {
id: NoteId,
posted_uid: UserId,
item_id: ItemId,
project_id: ProjectId,
content: String,
file_attachment: FileAttachment,
uids_to_notify: Vec<UserId>,
is_deleted: Flag,
posted: String,
reactions: BTreeMap<String, Vec<UserId>>,
}
#[derive(Debug, Deserialize)]
struct NoteId(u64);
#[derive(Debug, Deserialize)]
struct ProjectNote {
id: ProjectNoteId,
posted_uid: UserId,
project_id: ProjectId,
content: String,
file_attachment: FileAttachment,
uids_to_notify: Vec<UserId>,
is_deleted: Flag,
posted: String,
reactions: BTreeMap<String, Vec<UserId>>,
}
#[derive(Debug, Deserialize)]
struct ProjectNoteId(u64);
#[derive(Debug, Deserialize)]
struct Section {
id: SectionId,
name: String,
project_id: ProjectId,
section_order: Order,
collapsed: bool,
sync_id: Option<SectionSyncId>,
is_deleted: bool,
is_archived: bool,
date_archived: Option<String>,
date_added: String,
}
#[derive(Debug, Deserialize)]
struct SectionId(u64);
#[derive(Debug, Deserialize)]
struct SectionSyncId(u64);
#[derive(Debug, Deserialize)]
struct Label {
id: LabelId,
name: String,
color: Color,
item_order: Order,
is_deleted: Flag,
is_favorite: Flag,
}
#[derive(Debug, Deserialize)]
struct LabelId(u64);
#[derive(Debug, Deserialize)]
struct Filter {
id: FilterId,
name: String,
query: String,
color: Color,
item_order: Order,
is_deleted: Flag,
is_favorite: Flag,
}
#[derive(Debug, Deserialize)]
struct FilterId(u64);
#[derive(Debug, Deserialize)]
struct Collaborator {
id: CollaboratorId,
email: String,
full_name: String,
timezone: String,
#[serde(default)]
image_id: Option<String>,
}
#[derive(Debug, Deserialize)]
struct CollaboratorId(u64);
#[derive(Debug, Deserialize)]
struct CollaboratorState {
project_id: ProjectId,
user_id: UserId,
state: CollaboratorStatus,
is_deleted: bool,
}
#[derive(Debug, Deserialize)]
// Note: v8 api says there should be a `seq_no` field that holds an integer.
struct LiveNotification {
id: LiveNotificationId,
// Note: v8 api says that created should be an integer that is the epoch timestamp.
created: String,
// Note: v8 api does not say from_uid is optional.
#[serde(default)]
from_uid: Option<UserId>,
notification_key: String,
notification_type: String,
is_unread: Flag,
}
#[derive(Debug, Deserialize)]
struct LiveNotificationId(u64);
#[derive(Debug, Deserialize)]
struct UserSettings {
reminder_push: bool,
#[serde(default)]
reminder_sms: bool,
reminder_desktop: bool,
reminder_email: bool,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Color {
Crimson = 30,
Red = 31,
Orange = 32,
Yellow = 33,
Olive = 34,
LightGreen = 35,
DarkGreen = 36,
SeaGreen = 37,
SteelBlue = 38,
SkyBlue = 39,
BabyBlue = 40,
Blue = 41,
RoyalPurple = 42,
Violet = 43,
Pink = 44,
Mulberry = 45,
Salmon = 46,
Gray = 47,
LightGray = 48,
Tan = 49,
}
#[derive(Debug, Deserialize)]
enum CollaboratorStatus {
Active,
Invited,
}
#[derive(Debug, Deserialize)]
struct FileAttachment {
file_type: String,
file_name: String,
file_size: u64,
file_url: String,
upload_state: UploadState,
}
#[derive(Debug, Deserialize)]
enum UploadState {
Pending,
Completed,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Priority {
Natural = 1,
High = 2,
Urgent = 3,
VeryUrgent = 4,
}
#[derive(Debug, Deserialize)]
struct Date {
date: String,
timezone: Option<String>,
string: String,
lang: Language,
is_recurring: bool,
}
#[derive(Debug, Deserialize)]
struct TimezoneInfo {
gmt_string: String,
hours: i8,
is_dst: Flag,
minutes: u8,
timezone: String,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum TimeFormat {
TwentyFour = 0,
Twelve = 1,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Theme {
Theme0 = 0,
Theme1 = 1,
Theme2 = 2,
Theme3 = 3,
Theme4 = 4,
Theme5 = 5,
Theme6 = 6,
Theme7 = 7,
Theme8 = 8,
Theme9 = 9,
Theme10 = 10,
}
#[derive(Debug, Deserialize)]
#[serde(untagged)]
enum Page {
InfoPage,
Blank,
Query(String),
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum SortOrder {
OldestDatesFirst = 0,
OldestDatesLast = 1,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "snake_case")]
enum KarmaTrend {
Up,
}
#[derive(Debug, Deserialize)]
struct Features {
/// If the user has enabled beta.
beta: Flag,
/// If inline date parsing is enabled.
dateist_inline_disabled: bool,
dateist_lang: Option<Language>,
#[serde(default)]
gold_theme: bool,
has_push_reminders: bool,
karma_disabled: bool,
karma_vacation: bool,
restriction: u64,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Flag {
False = 0,
True = 1,
}
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum Day {
Monday = 1,
Tuesday = 2,
Wednesday = 3,
Thursday = 4,
Friday = 5,
Saturday = 6,
Sunday = 7,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "snake_case")]
enum Reminder {
Email,
Mobile,
Push,
NoDefault,
}
/// The format of a date.
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
enum DateFormat {
/// dd-mm-yyyy
DayMonth = 0,
/// mm-dd-yyyy
MonthDay = 1,
}
#[derive(Debug, Deserialize)]
enum Language {
#[serde(rename = "da")]
Danish,
#[serde(rename = "ge")]
German,
#[serde(rename = "en")]
English,
#[serde(rename = "es")]
Spanish,
#[serde(rename = "fi")]
Finnish,
#[serde(rename = "fr")]
French,
#[serde(rename = "it")]
Italian,
#[serde(rename = "ja")]
Japanese,
#[serde(rename = "ko")]
Korean,
#[serde(rename = "nl")]
Dutch,
#[serde(rename = "pl")]
Polish,
#[serde(rename = "pt_Br")]
BrazilianPortuguese,
#[serde(rename = "ru")]
Russian,
#[serde(rename = "sv")]
Sweedish,
#[serde(rename = "tr")]
Turkish,
#[serde(rename = "zh_CN")]
MainlandChinese,
#[serde(rename = "zh_TW")]
TaiwanChinese,
} | serde::Deserialize,
serde_json,
serde_repr::Deserialize_repr, | random_line_split |
secrets.go | package secret
import (
"context"
"encoding/json"
"fmt"
"sort"
"strings"
"time"
"github.com/mittwald/kubernetes-replicator/replicate/common"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/types"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
)
type Replicator struct {
*common.GenericReplicator
}
// NewReplicator creates a new secret replicator
func NewReplicator(client kubernetes.Interface, resyncPeriod time.Duration, allowAll bool) common.Replicator |
// ReplicateDataFrom takes a source object and copies over data to target object
func (r *Replicator) ReplicateDataFrom(sourceObj interface{}, targetObj interface{}) error {
source := sourceObj.(*v1.Secret)
target := targetObj.(*v1.Secret)
// make sure replication is allowed
logger := log.
WithField("kind", r.Kind).
WithField("source", common.MustGetKey(source)).
WithField("target", common.MustGetKey(target))
if ok, err := r.IsReplicationPermitted(&target.ObjectMeta, &source.ObjectMeta); !ok {
return errors.Wrapf(err, "replication of target %s is not permitted", common.MustGetKey(source))
}
targetVersion, ok := target.Annotations[common.ReplicatedFromVersionAnnotation]
sourceVersion := source.ResourceVersion
if ok && targetVersion == sourceVersion {
logger.Debugf("target %s is already up-to-date", common.MustGetKey(target))
return nil
}
targetCopy := target.DeepCopy()
if targetCopy.Data == nil {
targetCopy.Data = make(map[string][]byte)
}
prevKeys, hasPrevKeys := common.PreviouslyPresentKeys(&targetCopy.ObjectMeta)
replicatedKeys := make([]string, 0)
for key, value := range source.Data {
newValue := make([]byte, len(value))
copy(newValue, value)
targetCopy.Data[key] = newValue
replicatedKeys = append(replicatedKeys, key)
delete(prevKeys, key)
}
if hasPrevKeys {
for k := range prevKeys {
logger.Debugf("removing previously present key %s: not present in source any more", k)
delete(targetCopy.Data, k)
}
}
sort.Strings(replicatedKeys)
logger.Infof("updating target %s", common.MustGetKey(target))
targetCopy.Annotations[common.ReplicatedAtAnnotation] = time.Now().Format(time.RFC3339)
targetCopy.Annotations[common.ReplicatedFromVersionAnnotation] = source.ResourceVersion
targetCopy.Annotations[common.ReplicatedKeysAnnotation] = strings.Join(replicatedKeys, ",")
s, err := r.Client.CoreV1().Secrets(target.Namespace).Update(context.TODO(), targetCopy, metav1.UpdateOptions{})
if err != nil {
err = errors.Wrapf(err, "Failed updating target %s/%s", target.Namespace, targetCopy.Name)
} else if err = r.Store.Update(s); err != nil {
err = errors.Wrapf(err, "Failed to update cache for %s/%s: %v", target.Namespace, targetCopy, err)
}
return err
}
// ReplicateObjectTo copies the whole object to target namespace
func (r *Replicator) ReplicateObjectTo(sourceObj interface{}, target *v1.Namespace) error {
source := sourceObj.(*v1.Secret)
targetLocation := fmt.Sprintf("%s/%s", target.Name, source.Name)
logger := log.
WithField("kind", r.Kind).
WithField("source", common.MustGetKey(source)).
WithField("target", targetLocation)
targetResourceType := source.Type
targetResource, exists, err := r.Store.GetByKey(targetLocation)
if err != nil {
return errors.Wrapf(err, "Could not get %s from cache!", targetLocation)
}
logger.Infof("Checking if %s exists? %v", targetLocation, exists)
var resourceCopy *v1.Secret
if exists {
targetObject := targetResource.(*v1.Secret)
targetVersion, ok := targetObject.Annotations[common.ReplicatedFromVersionAnnotation]
sourceVersion := source.ResourceVersion
if ok && targetVersion == sourceVersion {
logger.Debugf("Secret %s is already up-to-date", common.MustGetKey(targetObject))
return nil
}
targetResourceType = targetObject.Type
resourceCopy = targetObject.DeepCopy()
} else {
resourceCopy = new(v1.Secret)
}
keepOwnerReferences, ok := source.Annotations[common.KeepOwnerReferences]
if ok && keepOwnerReferences == "true" {
resourceCopy.OwnerReferences = source.OwnerReferences
}
if resourceCopy.Data == nil {
resourceCopy.Data = make(map[string][]byte)
}
if resourceCopy.Annotations == nil {
resourceCopy.Annotations = make(map[string]string)
}
replicatedKeys := r.extractReplicatedKeys(source, targetLocation, resourceCopy)
sort.Strings(replicatedKeys)
labelsCopy := make(map[string]string)
stripLabels, ok := source.Annotations[common.StripLabels]
if !ok && stripLabels != "true" {
if source.Labels != nil {
for key, value := range source.Labels {
labelsCopy[key] = value
}
}
}
resourceCopy.Name = source.Name
resourceCopy.Labels = labelsCopy
resourceCopy.Type = targetResourceType
resourceCopy.Annotations[common.ReplicatedAtAnnotation] = time.Now().Format(time.RFC3339)
resourceCopy.Annotations[common.ReplicatedFromVersionAnnotation] = source.ResourceVersion
resourceCopy.Annotations[common.ReplicatedKeysAnnotation] = strings.Join(replicatedKeys, ",")
var obj interface{}
if exists {
logger.Debugf("Updating existing secret %s/%s", target.Name, resourceCopy.Name)
obj, err = r.Client.CoreV1().Secrets(target.Name).Update(context.TODO(), resourceCopy, metav1.UpdateOptions{})
} else {
logger.Debugf("Creating a new secret secret %s/%s", target.Name, resourceCopy.Name)
obj, err = r.Client.CoreV1().Secrets(target.Name).Create(context.TODO(), resourceCopy, metav1.CreateOptions{})
}
if err != nil {
err = errors.Wrapf(err, "Failed to update secret %s/%s", target.Name, resourceCopy.Name)
} else if err = r.Store.Update(obj); err != nil {
err = errors.Wrapf(err, "Failed to update cache for %s/%s", target.Name, resourceCopy)
}
return err
}
func (r *Replicator) extractReplicatedKeys(source *v1.Secret, targetLocation string, resourceCopy *v1.Secret) []string {
logger := log.
WithField("kind", r.Kind).
WithField("source", common.MustGetKey(source)).
WithField("target", targetLocation)
prevKeys, hasPrevKeys := common.PreviouslyPresentKeys(&resourceCopy.ObjectMeta)
replicatedKeys := make([]string, 0)
for key, value := range source.Data {
newValue := make([]byte, len(value))
copy(newValue, value)
resourceCopy.Data[key] = newValue
replicatedKeys = append(replicatedKeys, key)
delete(prevKeys, key)
}
if hasPrevKeys {
for k := range prevKeys {
logger.Debugf("removing previously present key %s: not present in source secret any more", k)
delete(resourceCopy.Data, k)
}
}
return replicatedKeys
}
func (r *Replicator) PatchDeleteDependent(sourceKey string, target interface{}) (interface{}, error) {
dependentKey := common.MustGetKey(target)
logger := log.WithFields(log.Fields{
"kind": r.Kind,
"source": sourceKey,
"target": dependentKey,
})
targetObject, ok := target.(*v1.Secret)
if !ok {
err := errors.Errorf("bad type returned from Store: %T", target)
return nil, err
}
patch := []common.JSONPatchOperation{{Operation: "remove", Path: "/data"}}
patchBody, err := json.Marshal(&patch)
if err != nil {
return nil, errors.Wrapf(err, "error while building patch body for secret %s: %v", dependentKey, err)
}
logger.Debugf("clearing dependent %s %s", r.Kind, dependentKey)
logger.Tracef("patch body: %s", string(patchBody))
s, err := r.Client.CoreV1().Secrets(targetObject.Namespace).Patch(context.TODO(), targetObject.Name, types.JSONPatchType, patchBody, metav1.PatchOptions{})
if err != nil {
return nil, errors.Wrapf(err, "error while patching secret %s: %v", dependentKey, err)
}
return s, nil
}
// DeleteReplicatedResource deletes a resource replicated by ReplicateTo annotation
func (r *Replicator) DeleteReplicatedResource(targetResource interface{}) error {
targetLocation := common.MustGetKey(targetResource)
logger := log.WithFields(log.Fields{
"kind": r.Kind,
"target": targetLocation,
})
object := targetResource.(*v1.Secret)
resourceKeys := strings.Join(common.GetKeysFromBinaryMap(object.Data), ",")
if resourceKeys == object.Annotations[common.ReplicatedKeysAnnotation] {
logger.Debugf("Deleting %s", targetLocation)
if err := r.Client.CoreV1().Secrets(object.Namespace).Delete(context.TODO(), object.Name, metav1.DeleteOptions{}); err != nil {
return errors.Wrapf(err, "Failed deleting %s: %v", targetLocation, err)
}
} else {
var patch []common.JSONPatchOperation
exists := make(map[string]struct{})
for _, value := range common.GetKeysFromBinaryMap(object.Data) {
exists[value] = struct{}{}
}
for _, val := range strings.Split(object.Annotations[common.ReplicatedKeysAnnotation], ",") {
if _, ok := exists[val]; ok {
patch = append(patch, common.JSONPatchOperation{Operation: "remove", Path: fmt.Sprintf("/data/%s", val)})
}
}
patch = append(patch, common.JSONPatchOperation{Operation: "remove", Path: fmt.Sprintf("/metadata/annotations/%s", common.JSONPatchPathEscape(common.ReplicatedKeysAnnotation))})
patchBody, err := json.Marshal(&patch)
if err != nil {
return errors.Wrapf(err, "error while building patch body for confimap %s: %v", object, err)
}
s, err := r.Client.CoreV1().Secrets(object.Namespace).Patch(context.TODO(), object.Name, types.JSONPatchType, patchBody, metav1.PatchOptions{})
if err != nil {
return errors.Wrapf(err, "error while patching secret %s: %v", s, err)
}
logger.Debugf("Not deleting %s since it contains other keys then replicated.", targetLocation)
}
return nil
}
| {
repl := Replicator{
GenericReplicator: common.NewGenericReplicator(common.ReplicatorConfig{
Kind: "Secret",
ObjType: &v1.Secret{},
AllowAll: allowAll,
ResyncPeriod: resyncPeriod,
Client: client,
ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) {
return client.CoreV1().Secrets("").List(context.TODO(), lo)
},
WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) {
return client.CoreV1().Secrets("").Watch(context.TODO(), lo)
},
}),
}
repl.UpdateFuncs = common.UpdateFuncs{
ReplicateDataFrom: repl.ReplicateDataFrom,
ReplicateObjectTo: repl.ReplicateObjectTo,
PatchDeleteDependent: repl.PatchDeleteDependent,
DeleteReplicatedResource: repl.DeleteReplicatedResource,
}
return &repl
} | identifier_body |
secrets.go | package secret
import (
"context"
"encoding/json"
"fmt"
"sort"
"strings"
"time"
"github.com/mittwald/kubernetes-replicator/replicate/common"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/types"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
)
type Replicator struct {
*common.GenericReplicator
}
// NewReplicator creates a new secret replicator
func NewReplicator(client kubernetes.Interface, resyncPeriod time.Duration, allowAll bool) common.Replicator {
repl := Replicator{
GenericReplicator: common.NewGenericReplicator(common.ReplicatorConfig{
Kind: "Secret",
ObjType: &v1.Secret{},
AllowAll: allowAll,
ResyncPeriod: resyncPeriod,
Client: client,
ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) {
return client.CoreV1().Secrets("").List(context.TODO(), lo)
},
WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) {
return client.CoreV1().Secrets("").Watch(context.TODO(), lo)
},
}),
}
repl.UpdateFuncs = common.UpdateFuncs{
ReplicateDataFrom: repl.ReplicateDataFrom,
ReplicateObjectTo: repl.ReplicateObjectTo,
PatchDeleteDependent: repl.PatchDeleteDependent,
DeleteReplicatedResource: repl.DeleteReplicatedResource,
}
return &repl
}
// ReplicateDataFrom takes a source object and copies over data to target object
func (r *Replicator) ReplicateDataFrom(sourceObj interface{}, targetObj interface{}) error {
source := sourceObj.(*v1.Secret)
target := targetObj.(*v1.Secret)
// make sure replication is allowed
logger := log.
WithField("kind", r.Kind).
WithField("source", common.MustGetKey(source)).
WithField("target", common.MustGetKey(target))
if ok, err := r.IsReplicationPermitted(&target.ObjectMeta, &source.ObjectMeta); !ok {
return errors.Wrapf(err, "replication of target %s is not permitted", common.MustGetKey(source))
}
targetVersion, ok := target.Annotations[common.ReplicatedFromVersionAnnotation]
sourceVersion := source.ResourceVersion
if ok && targetVersion == sourceVersion {
logger.Debugf("target %s is already up-to-date", common.MustGetKey(target))
return nil
}
targetCopy := target.DeepCopy()
if targetCopy.Data == nil {
targetCopy.Data = make(map[string][]byte)
}
prevKeys, hasPrevKeys := common.PreviouslyPresentKeys(&targetCopy.ObjectMeta)
replicatedKeys := make([]string, 0)
for key, value := range source.Data {
newValue := make([]byte, len(value))
copy(newValue, value)
targetCopy.Data[key] = newValue
replicatedKeys = append(replicatedKeys, key)
delete(prevKeys, key)
}
if hasPrevKeys {
for k := range prevKeys {
logger.Debugf("removing previously present key %s: not present in source any more", k)
delete(targetCopy.Data, k)
}
}
sort.Strings(replicatedKeys)
logger.Infof("updating target %s", common.MustGetKey(target))
targetCopy.Annotations[common.ReplicatedAtAnnotation] = time.Now().Format(time.RFC3339)
targetCopy.Annotations[common.ReplicatedFromVersionAnnotation] = source.ResourceVersion
targetCopy.Annotations[common.ReplicatedKeysAnnotation] = strings.Join(replicatedKeys, ",")
s, err := r.Client.CoreV1().Secrets(target.Namespace).Update(context.TODO(), targetCopy, metav1.UpdateOptions{})
if err != nil {
err = errors.Wrapf(err, "Failed updating target %s/%s", target.Namespace, targetCopy.Name)
} else if err = r.Store.Update(s); err != nil {
err = errors.Wrapf(err, "Failed to update cache for %s/%s: %v", target.Namespace, targetCopy, err)
}
return err
}
// ReplicateObjectTo copies the whole object to target namespace
func (r *Replicator) ReplicateObjectTo(sourceObj interface{}, target *v1.Namespace) error {
source := sourceObj.(*v1.Secret)
targetLocation := fmt.Sprintf("%s/%s", target.Name, source.Name)
logger := log.
WithField("kind", r.Kind).
WithField("source", common.MustGetKey(source)).
WithField("target", targetLocation)
targetResourceType := source.Type
targetResource, exists, err := r.Store.GetByKey(targetLocation)
if err != nil {
return errors.Wrapf(err, "Could not get %s from cache!", targetLocation)
}
logger.Infof("Checking if %s exists? %v", targetLocation, exists)
var resourceCopy *v1.Secret
if exists {
targetObject := targetResource.(*v1.Secret)
targetVersion, ok := targetObject.Annotations[common.ReplicatedFromVersionAnnotation]
sourceVersion := source.ResourceVersion
if ok && targetVersion == sourceVersion {
logger.Debugf("Secret %s is already up-to-date", common.MustGetKey(targetObject))
return nil
}
targetResourceType = targetObject.Type
resourceCopy = targetObject.DeepCopy()
} else {
resourceCopy = new(v1.Secret)
}
keepOwnerReferences, ok := source.Annotations[common.KeepOwnerReferences]
if ok && keepOwnerReferences == "true" {
resourceCopy.OwnerReferences = source.OwnerReferences
}
if resourceCopy.Data == nil {
resourceCopy.Data = make(map[string][]byte)
}
if resourceCopy.Annotations == nil {
resourceCopy.Annotations = make(map[string]string)
}
replicatedKeys := r.extractReplicatedKeys(source, targetLocation, resourceCopy)
sort.Strings(replicatedKeys)
labelsCopy := make(map[string]string)
stripLabels, ok := source.Annotations[common.StripLabels]
if !ok && stripLabels != "true" {
if source.Labels != nil {
for key, value := range source.Labels {
labelsCopy[key] = value
}
}
}
resourceCopy.Name = source.Name
resourceCopy.Labels = labelsCopy
resourceCopy.Type = targetResourceType
resourceCopy.Annotations[common.ReplicatedAtAnnotation] = time.Now().Format(time.RFC3339)
resourceCopy.Annotations[common.ReplicatedFromVersionAnnotation] = source.ResourceVersion
resourceCopy.Annotations[common.ReplicatedKeysAnnotation] = strings.Join(replicatedKeys, ",")
var obj interface{}
if exists {
logger.Debugf("Updating existing secret %s/%s", target.Name, resourceCopy.Name)
obj, err = r.Client.CoreV1().Secrets(target.Name).Update(context.TODO(), resourceCopy, metav1.UpdateOptions{})
} else {
logger.Debugf("Creating a new secret secret %s/%s", target.Name, resourceCopy.Name)
obj, err = r.Client.CoreV1().Secrets(target.Name).Create(context.TODO(), resourceCopy, metav1.CreateOptions{})
}
if err != nil {
err = errors.Wrapf(err, "Failed to update secret %s/%s", target.Name, resourceCopy.Name)
} else if err = r.Store.Update(obj); err != nil {
err = errors.Wrapf(err, "Failed to update cache for %s/%s", target.Name, resourceCopy)
}
return err
}
func (r *Replicator) extractReplicatedKeys(source *v1.Secret, targetLocation string, resourceCopy *v1.Secret) []string {
logger := log.
WithField("kind", r.Kind).
WithField("source", common.MustGetKey(source)).
WithField("target", targetLocation)
prevKeys, hasPrevKeys := common.PreviouslyPresentKeys(&resourceCopy.ObjectMeta)
replicatedKeys := make([]string, 0)
for key, value := range source.Data {
newValue := make([]byte, len(value))
copy(newValue, value) | replicatedKeys = append(replicatedKeys, key)
delete(prevKeys, key)
}
if hasPrevKeys {
for k := range prevKeys {
logger.Debugf("removing previously present key %s: not present in source secret any more", k)
delete(resourceCopy.Data, k)
}
}
return replicatedKeys
}
func (r *Replicator) PatchDeleteDependent(sourceKey string, target interface{}) (interface{}, error) {
dependentKey := common.MustGetKey(target)
logger := log.WithFields(log.Fields{
"kind": r.Kind,
"source": sourceKey,
"target": dependentKey,
})
targetObject, ok := target.(*v1.Secret)
if !ok {
err := errors.Errorf("bad type returned from Store: %T", target)
return nil, err
}
patch := []common.JSONPatchOperation{{Operation: "remove", Path: "/data"}}
patchBody, err := json.Marshal(&patch)
if err != nil {
return nil, errors.Wrapf(err, "error while building patch body for secret %s: %v", dependentKey, err)
}
logger.Debugf("clearing dependent %s %s", r.Kind, dependentKey)
logger.Tracef("patch body: %s", string(patchBody))
s, err := r.Client.CoreV1().Secrets(targetObject.Namespace).Patch(context.TODO(), targetObject.Name, types.JSONPatchType, patchBody, metav1.PatchOptions{})
if err != nil {
return nil, errors.Wrapf(err, "error while patching secret %s: %v", dependentKey, err)
}
return s, nil
}
// DeleteReplicatedResource deletes a resource replicated by ReplicateTo annotation
func (r *Replicator) DeleteReplicatedResource(targetResource interface{}) error {
targetLocation := common.MustGetKey(targetResource)
logger := log.WithFields(log.Fields{
"kind": r.Kind,
"target": targetLocation,
})
object := targetResource.(*v1.Secret)
resourceKeys := strings.Join(common.GetKeysFromBinaryMap(object.Data), ",")
if resourceKeys == object.Annotations[common.ReplicatedKeysAnnotation] {
logger.Debugf("Deleting %s", targetLocation)
if err := r.Client.CoreV1().Secrets(object.Namespace).Delete(context.TODO(), object.Name, metav1.DeleteOptions{}); err != nil {
return errors.Wrapf(err, "Failed deleting %s: %v", targetLocation, err)
}
} else {
var patch []common.JSONPatchOperation
exists := make(map[string]struct{})
for _, value := range common.GetKeysFromBinaryMap(object.Data) {
exists[value] = struct{}{}
}
for _, val := range strings.Split(object.Annotations[common.ReplicatedKeysAnnotation], ",") {
if _, ok := exists[val]; ok {
patch = append(patch, common.JSONPatchOperation{Operation: "remove", Path: fmt.Sprintf("/data/%s", val)})
}
}
patch = append(patch, common.JSONPatchOperation{Operation: "remove", Path: fmt.Sprintf("/metadata/annotations/%s", common.JSONPatchPathEscape(common.ReplicatedKeysAnnotation))})
patchBody, err := json.Marshal(&patch)
if err != nil {
return errors.Wrapf(err, "error while building patch body for confimap %s: %v", object, err)
}
s, err := r.Client.CoreV1().Secrets(object.Namespace).Patch(context.TODO(), object.Name, types.JSONPatchType, patchBody, metav1.PatchOptions{})
if err != nil {
return errors.Wrapf(err, "error while patching secret %s: %v", s, err)
}
logger.Debugf("Not deleting %s since it contains other keys then replicated.", targetLocation)
}
return nil
} | resourceCopy.Data[key] = newValue
| random_line_split |
secrets.go | package secret
import (
"context"
"encoding/json"
"fmt"
"sort"
"strings"
"time"
"github.com/mittwald/kubernetes-replicator/replicate/common"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/types"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
)
type Replicator struct {
*common.GenericReplicator
}
// NewReplicator creates a new secret replicator
func NewReplicator(client kubernetes.Interface, resyncPeriod time.Duration, allowAll bool) common.Replicator {
repl := Replicator{
GenericReplicator: common.NewGenericReplicator(common.ReplicatorConfig{
Kind: "Secret",
ObjType: &v1.Secret{},
AllowAll: allowAll,
ResyncPeriod: resyncPeriod,
Client: client,
ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) {
return client.CoreV1().Secrets("").List(context.TODO(), lo)
},
WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) {
return client.CoreV1().Secrets("").Watch(context.TODO(), lo)
},
}),
}
repl.UpdateFuncs = common.UpdateFuncs{
ReplicateDataFrom: repl.ReplicateDataFrom,
ReplicateObjectTo: repl.ReplicateObjectTo,
PatchDeleteDependent: repl.PatchDeleteDependent,
DeleteReplicatedResource: repl.DeleteReplicatedResource,
}
return &repl
}
// ReplicateDataFrom takes a source object and copies over data to target object
func (r *Replicator) ReplicateDataFrom(sourceObj interface{}, targetObj interface{}) error {
source := sourceObj.(*v1.Secret)
target := targetObj.(*v1.Secret)
// make sure replication is allowed
logger := log.
WithField("kind", r.Kind).
WithField("source", common.MustGetKey(source)).
WithField("target", common.MustGetKey(target))
if ok, err := r.IsReplicationPermitted(&target.ObjectMeta, &source.ObjectMeta); !ok {
return errors.Wrapf(err, "replication of target %s is not permitted", common.MustGetKey(source))
}
targetVersion, ok := target.Annotations[common.ReplicatedFromVersionAnnotation]
sourceVersion := source.ResourceVersion
if ok && targetVersion == sourceVersion {
logger.Debugf("target %s is already up-to-date", common.MustGetKey(target))
return nil
}
targetCopy := target.DeepCopy()
if targetCopy.Data == nil {
targetCopy.Data = make(map[string][]byte)
}
prevKeys, hasPrevKeys := common.PreviouslyPresentKeys(&targetCopy.ObjectMeta)
replicatedKeys := make([]string, 0)
for key, value := range source.Data {
newValue := make([]byte, len(value))
copy(newValue, value)
targetCopy.Data[key] = newValue
replicatedKeys = append(replicatedKeys, key)
delete(prevKeys, key)
}
if hasPrevKeys {
for k := range prevKeys {
logger.Debugf("removing previously present key %s: not present in source any more", k)
delete(targetCopy.Data, k)
}
}
sort.Strings(replicatedKeys)
logger.Infof("updating target %s", common.MustGetKey(target))
targetCopy.Annotations[common.ReplicatedAtAnnotation] = time.Now().Format(time.RFC3339)
targetCopy.Annotations[common.ReplicatedFromVersionAnnotation] = source.ResourceVersion
targetCopy.Annotations[common.ReplicatedKeysAnnotation] = strings.Join(replicatedKeys, ",")
s, err := r.Client.CoreV1().Secrets(target.Namespace).Update(context.TODO(), targetCopy, metav1.UpdateOptions{})
if err != nil {
err = errors.Wrapf(err, "Failed updating target %s/%s", target.Namespace, targetCopy.Name)
} else if err = r.Store.Update(s); err != nil {
err = errors.Wrapf(err, "Failed to update cache for %s/%s: %v", target.Namespace, targetCopy, err)
}
return err
}
// ReplicateObjectTo copies the whole object to target namespace
func (r *Replicator) ReplicateObjectTo(sourceObj interface{}, target *v1.Namespace) error {
source := sourceObj.(*v1.Secret)
targetLocation := fmt.Sprintf("%s/%s", target.Name, source.Name)
logger := log.
WithField("kind", r.Kind).
WithField("source", common.MustGetKey(source)).
WithField("target", targetLocation)
targetResourceType := source.Type
targetResource, exists, err := r.Store.GetByKey(targetLocation)
if err != nil {
return errors.Wrapf(err, "Could not get %s from cache!", targetLocation)
}
logger.Infof("Checking if %s exists? %v", targetLocation, exists)
var resourceCopy *v1.Secret
if exists {
targetObject := targetResource.(*v1.Secret)
targetVersion, ok := targetObject.Annotations[common.ReplicatedFromVersionAnnotation]
sourceVersion := source.ResourceVersion
if ok && targetVersion == sourceVersion {
logger.Debugf("Secret %s is already up-to-date", common.MustGetKey(targetObject))
return nil
}
targetResourceType = targetObject.Type
resourceCopy = targetObject.DeepCopy()
} else {
resourceCopy = new(v1.Secret)
}
keepOwnerReferences, ok := source.Annotations[common.KeepOwnerReferences]
if ok && keepOwnerReferences == "true" {
resourceCopy.OwnerReferences = source.OwnerReferences
}
if resourceCopy.Data == nil {
resourceCopy.Data = make(map[string][]byte)
}
if resourceCopy.Annotations == nil {
resourceCopy.Annotations = make(map[string]string)
}
replicatedKeys := r.extractReplicatedKeys(source, targetLocation, resourceCopy)
sort.Strings(replicatedKeys)
labelsCopy := make(map[string]string)
stripLabels, ok := source.Annotations[common.StripLabels]
if !ok && stripLabels != "true" {
if source.Labels != nil |
}
resourceCopy.Name = source.Name
resourceCopy.Labels = labelsCopy
resourceCopy.Type = targetResourceType
resourceCopy.Annotations[common.ReplicatedAtAnnotation] = time.Now().Format(time.RFC3339)
resourceCopy.Annotations[common.ReplicatedFromVersionAnnotation] = source.ResourceVersion
resourceCopy.Annotations[common.ReplicatedKeysAnnotation] = strings.Join(replicatedKeys, ",")
var obj interface{}
if exists {
logger.Debugf("Updating existing secret %s/%s", target.Name, resourceCopy.Name)
obj, err = r.Client.CoreV1().Secrets(target.Name).Update(context.TODO(), resourceCopy, metav1.UpdateOptions{})
} else {
logger.Debugf("Creating a new secret secret %s/%s", target.Name, resourceCopy.Name)
obj, err = r.Client.CoreV1().Secrets(target.Name).Create(context.TODO(), resourceCopy, metav1.CreateOptions{})
}
if err != nil {
err = errors.Wrapf(err, "Failed to update secret %s/%s", target.Name, resourceCopy.Name)
} else if err = r.Store.Update(obj); err != nil {
err = errors.Wrapf(err, "Failed to update cache for %s/%s", target.Name, resourceCopy)
}
return err
}
func (r *Replicator) extractReplicatedKeys(source *v1.Secret, targetLocation string, resourceCopy *v1.Secret) []string {
logger := log.
WithField("kind", r.Kind).
WithField("source", common.MustGetKey(source)).
WithField("target", targetLocation)
prevKeys, hasPrevKeys := common.PreviouslyPresentKeys(&resourceCopy.ObjectMeta)
replicatedKeys := make([]string, 0)
for key, value := range source.Data {
newValue := make([]byte, len(value))
copy(newValue, value)
resourceCopy.Data[key] = newValue
replicatedKeys = append(replicatedKeys, key)
delete(prevKeys, key)
}
if hasPrevKeys {
for k := range prevKeys {
logger.Debugf("removing previously present key %s: not present in source secret any more", k)
delete(resourceCopy.Data, k)
}
}
return replicatedKeys
}
func (r *Replicator) PatchDeleteDependent(sourceKey string, target interface{}) (interface{}, error) {
dependentKey := common.MustGetKey(target)
logger := log.WithFields(log.Fields{
"kind": r.Kind,
"source": sourceKey,
"target": dependentKey,
})
targetObject, ok := target.(*v1.Secret)
if !ok {
err := errors.Errorf("bad type returned from Store: %T", target)
return nil, err
}
patch := []common.JSONPatchOperation{{Operation: "remove", Path: "/data"}}
patchBody, err := json.Marshal(&patch)
if err != nil {
return nil, errors.Wrapf(err, "error while building patch body for secret %s: %v", dependentKey, err)
}
logger.Debugf("clearing dependent %s %s", r.Kind, dependentKey)
logger.Tracef("patch body: %s", string(patchBody))
s, err := r.Client.CoreV1().Secrets(targetObject.Namespace).Patch(context.TODO(), targetObject.Name, types.JSONPatchType, patchBody, metav1.PatchOptions{})
if err != nil {
return nil, errors.Wrapf(err, "error while patching secret %s: %v", dependentKey, err)
}
return s, nil
}
// DeleteReplicatedResource deletes a resource replicated by ReplicateTo annotation
func (r *Replicator) DeleteReplicatedResource(targetResource interface{}) error {
targetLocation := common.MustGetKey(targetResource)
logger := log.WithFields(log.Fields{
"kind": r.Kind,
"target": targetLocation,
})
object := targetResource.(*v1.Secret)
resourceKeys := strings.Join(common.GetKeysFromBinaryMap(object.Data), ",")
if resourceKeys == object.Annotations[common.ReplicatedKeysAnnotation] {
logger.Debugf("Deleting %s", targetLocation)
if err := r.Client.CoreV1().Secrets(object.Namespace).Delete(context.TODO(), object.Name, metav1.DeleteOptions{}); err != nil {
return errors.Wrapf(err, "Failed deleting %s: %v", targetLocation, err)
}
} else {
var patch []common.JSONPatchOperation
exists := make(map[string]struct{})
for _, value := range common.GetKeysFromBinaryMap(object.Data) {
exists[value] = struct{}{}
}
for _, val := range strings.Split(object.Annotations[common.ReplicatedKeysAnnotation], ",") {
if _, ok := exists[val]; ok {
patch = append(patch, common.JSONPatchOperation{Operation: "remove", Path: fmt.Sprintf("/data/%s", val)})
}
}
patch = append(patch, common.JSONPatchOperation{Operation: "remove", Path: fmt.Sprintf("/metadata/annotations/%s", common.JSONPatchPathEscape(common.ReplicatedKeysAnnotation))})
patchBody, err := json.Marshal(&patch)
if err != nil {
return errors.Wrapf(err, "error while building patch body for confimap %s: %v", object, err)
}
s, err := r.Client.CoreV1().Secrets(object.Namespace).Patch(context.TODO(), object.Name, types.JSONPatchType, patchBody, metav1.PatchOptions{})
if err != nil {
return errors.Wrapf(err, "error while patching secret %s: %v", s, err)
}
logger.Debugf("Not deleting %s since it contains other keys then replicated.", targetLocation)
}
return nil
}
| {
for key, value := range source.Labels {
labelsCopy[key] = value
}
} | conditional_block |
secrets.go | package secret
import (
"context"
"encoding/json"
"fmt"
"sort"
"strings"
"time"
"github.com/mittwald/kubernetes-replicator/replicate/common"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/types"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
)
type Replicator struct {
*common.GenericReplicator
}
// NewReplicator creates a new secret replicator
func NewReplicator(client kubernetes.Interface, resyncPeriod time.Duration, allowAll bool) common.Replicator {
repl := Replicator{
GenericReplicator: common.NewGenericReplicator(common.ReplicatorConfig{
Kind: "Secret",
ObjType: &v1.Secret{},
AllowAll: allowAll,
ResyncPeriod: resyncPeriod,
Client: client,
ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) {
return client.CoreV1().Secrets("").List(context.TODO(), lo)
},
WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) {
return client.CoreV1().Secrets("").Watch(context.TODO(), lo)
},
}),
}
repl.UpdateFuncs = common.UpdateFuncs{
ReplicateDataFrom: repl.ReplicateDataFrom,
ReplicateObjectTo: repl.ReplicateObjectTo,
PatchDeleteDependent: repl.PatchDeleteDependent,
DeleteReplicatedResource: repl.DeleteReplicatedResource,
}
return &repl
}
// ReplicateDataFrom takes a source object and copies over data to target object
func (r *Replicator) ReplicateDataFrom(sourceObj interface{}, targetObj interface{}) error {
source := sourceObj.(*v1.Secret)
target := targetObj.(*v1.Secret)
// make sure replication is allowed
logger := log.
WithField("kind", r.Kind).
WithField("source", common.MustGetKey(source)).
WithField("target", common.MustGetKey(target))
if ok, err := r.IsReplicationPermitted(&target.ObjectMeta, &source.ObjectMeta); !ok {
return errors.Wrapf(err, "replication of target %s is not permitted", common.MustGetKey(source))
}
targetVersion, ok := target.Annotations[common.ReplicatedFromVersionAnnotation]
sourceVersion := source.ResourceVersion
if ok && targetVersion == sourceVersion {
logger.Debugf("target %s is already up-to-date", common.MustGetKey(target))
return nil
}
targetCopy := target.DeepCopy()
if targetCopy.Data == nil {
targetCopy.Data = make(map[string][]byte)
}
prevKeys, hasPrevKeys := common.PreviouslyPresentKeys(&targetCopy.ObjectMeta)
replicatedKeys := make([]string, 0)
for key, value := range source.Data {
newValue := make([]byte, len(value))
copy(newValue, value)
targetCopy.Data[key] = newValue
replicatedKeys = append(replicatedKeys, key)
delete(prevKeys, key)
}
if hasPrevKeys {
for k := range prevKeys {
logger.Debugf("removing previously present key %s: not present in source any more", k)
delete(targetCopy.Data, k)
}
}
sort.Strings(replicatedKeys)
logger.Infof("updating target %s", common.MustGetKey(target))
targetCopy.Annotations[common.ReplicatedAtAnnotation] = time.Now().Format(time.RFC3339)
targetCopy.Annotations[common.ReplicatedFromVersionAnnotation] = source.ResourceVersion
targetCopy.Annotations[common.ReplicatedKeysAnnotation] = strings.Join(replicatedKeys, ",")
s, err := r.Client.CoreV1().Secrets(target.Namespace).Update(context.TODO(), targetCopy, metav1.UpdateOptions{})
if err != nil {
err = errors.Wrapf(err, "Failed updating target %s/%s", target.Namespace, targetCopy.Name)
} else if err = r.Store.Update(s); err != nil {
err = errors.Wrapf(err, "Failed to update cache for %s/%s: %v", target.Namespace, targetCopy, err)
}
return err
}
// ReplicateObjectTo copies the whole object to target namespace
func (r *Replicator) ReplicateObjectTo(sourceObj interface{}, target *v1.Namespace) error {
source := sourceObj.(*v1.Secret)
targetLocation := fmt.Sprintf("%s/%s", target.Name, source.Name)
logger := log.
WithField("kind", r.Kind).
WithField("source", common.MustGetKey(source)).
WithField("target", targetLocation)
targetResourceType := source.Type
targetResource, exists, err := r.Store.GetByKey(targetLocation)
if err != nil {
return errors.Wrapf(err, "Could not get %s from cache!", targetLocation)
}
logger.Infof("Checking if %s exists? %v", targetLocation, exists)
var resourceCopy *v1.Secret
if exists {
targetObject := targetResource.(*v1.Secret)
targetVersion, ok := targetObject.Annotations[common.ReplicatedFromVersionAnnotation]
sourceVersion := source.ResourceVersion
if ok && targetVersion == sourceVersion {
logger.Debugf("Secret %s is already up-to-date", common.MustGetKey(targetObject))
return nil
}
targetResourceType = targetObject.Type
resourceCopy = targetObject.DeepCopy()
} else {
resourceCopy = new(v1.Secret)
}
keepOwnerReferences, ok := source.Annotations[common.KeepOwnerReferences]
if ok && keepOwnerReferences == "true" {
resourceCopy.OwnerReferences = source.OwnerReferences
}
if resourceCopy.Data == nil {
resourceCopy.Data = make(map[string][]byte)
}
if resourceCopy.Annotations == nil {
resourceCopy.Annotations = make(map[string]string)
}
replicatedKeys := r.extractReplicatedKeys(source, targetLocation, resourceCopy)
sort.Strings(replicatedKeys)
labelsCopy := make(map[string]string)
stripLabels, ok := source.Annotations[common.StripLabels]
if !ok && stripLabels != "true" {
if source.Labels != nil {
for key, value := range source.Labels {
labelsCopy[key] = value
}
}
}
resourceCopy.Name = source.Name
resourceCopy.Labels = labelsCopy
resourceCopy.Type = targetResourceType
resourceCopy.Annotations[common.ReplicatedAtAnnotation] = time.Now().Format(time.RFC3339)
resourceCopy.Annotations[common.ReplicatedFromVersionAnnotation] = source.ResourceVersion
resourceCopy.Annotations[common.ReplicatedKeysAnnotation] = strings.Join(replicatedKeys, ",")
var obj interface{}
if exists {
logger.Debugf("Updating existing secret %s/%s", target.Name, resourceCopy.Name)
obj, err = r.Client.CoreV1().Secrets(target.Name).Update(context.TODO(), resourceCopy, metav1.UpdateOptions{})
} else {
logger.Debugf("Creating a new secret secret %s/%s", target.Name, resourceCopy.Name)
obj, err = r.Client.CoreV1().Secrets(target.Name).Create(context.TODO(), resourceCopy, metav1.CreateOptions{})
}
if err != nil {
err = errors.Wrapf(err, "Failed to update secret %s/%s", target.Name, resourceCopy.Name)
} else if err = r.Store.Update(obj); err != nil {
err = errors.Wrapf(err, "Failed to update cache for %s/%s", target.Name, resourceCopy)
}
return err
}
func (r *Replicator) extractReplicatedKeys(source *v1.Secret, targetLocation string, resourceCopy *v1.Secret) []string {
logger := log.
WithField("kind", r.Kind).
WithField("source", common.MustGetKey(source)).
WithField("target", targetLocation)
prevKeys, hasPrevKeys := common.PreviouslyPresentKeys(&resourceCopy.ObjectMeta)
replicatedKeys := make([]string, 0)
for key, value := range source.Data {
newValue := make([]byte, len(value))
copy(newValue, value)
resourceCopy.Data[key] = newValue
replicatedKeys = append(replicatedKeys, key)
delete(prevKeys, key)
}
if hasPrevKeys {
for k := range prevKeys {
logger.Debugf("removing previously present key %s: not present in source secret any more", k)
delete(resourceCopy.Data, k)
}
}
return replicatedKeys
}
func (r *Replicator) | (sourceKey string, target interface{}) (interface{}, error) {
dependentKey := common.MustGetKey(target)
logger := log.WithFields(log.Fields{
"kind": r.Kind,
"source": sourceKey,
"target": dependentKey,
})
targetObject, ok := target.(*v1.Secret)
if !ok {
err := errors.Errorf("bad type returned from Store: %T", target)
return nil, err
}
patch := []common.JSONPatchOperation{{Operation: "remove", Path: "/data"}}
patchBody, err := json.Marshal(&patch)
if err != nil {
return nil, errors.Wrapf(err, "error while building patch body for secret %s: %v", dependentKey, err)
}
logger.Debugf("clearing dependent %s %s", r.Kind, dependentKey)
logger.Tracef("patch body: %s", string(patchBody))
s, err := r.Client.CoreV1().Secrets(targetObject.Namespace).Patch(context.TODO(), targetObject.Name, types.JSONPatchType, patchBody, metav1.PatchOptions{})
if err != nil {
return nil, errors.Wrapf(err, "error while patching secret %s: %v", dependentKey, err)
}
return s, nil
}
// DeleteReplicatedResource deletes a resource replicated by ReplicateTo annotation
func (r *Replicator) DeleteReplicatedResource(targetResource interface{}) error {
targetLocation := common.MustGetKey(targetResource)
logger := log.WithFields(log.Fields{
"kind": r.Kind,
"target": targetLocation,
})
object := targetResource.(*v1.Secret)
resourceKeys := strings.Join(common.GetKeysFromBinaryMap(object.Data), ",")
if resourceKeys == object.Annotations[common.ReplicatedKeysAnnotation] {
logger.Debugf("Deleting %s", targetLocation)
if err := r.Client.CoreV1().Secrets(object.Namespace).Delete(context.TODO(), object.Name, metav1.DeleteOptions{}); err != nil {
return errors.Wrapf(err, "Failed deleting %s: %v", targetLocation, err)
}
} else {
var patch []common.JSONPatchOperation
exists := make(map[string]struct{})
for _, value := range common.GetKeysFromBinaryMap(object.Data) {
exists[value] = struct{}{}
}
for _, val := range strings.Split(object.Annotations[common.ReplicatedKeysAnnotation], ",") {
if _, ok := exists[val]; ok {
patch = append(patch, common.JSONPatchOperation{Operation: "remove", Path: fmt.Sprintf("/data/%s", val)})
}
}
patch = append(patch, common.JSONPatchOperation{Operation: "remove", Path: fmt.Sprintf("/metadata/annotations/%s", common.JSONPatchPathEscape(common.ReplicatedKeysAnnotation))})
patchBody, err := json.Marshal(&patch)
if err != nil {
return errors.Wrapf(err, "error while building patch body for confimap %s: %v", object, err)
}
s, err := r.Client.CoreV1().Secrets(object.Namespace).Patch(context.TODO(), object.Name, types.JSONPatchType, patchBody, metav1.PatchOptions{})
if err != nil {
return errors.Wrapf(err, "error while patching secret %s: %v", s, err)
}
logger.Debugf("Not deleting %s since it contains other keys then replicated.", targetLocation)
}
return nil
}
| PatchDeleteDependent | identifier_name |
lib.rs | #![recursion_limit = "1024"]
#[macro_use]
extern crate derive_new;
#[macro_use]
extern crate derive_setters;
#[macro_use]
extern crate log;
#[macro_use]
extern crate thiserror;
pub mod checksum;
mod range;
mod systems;
pub use self::systems::*;
use std::{
fmt::Debug,
io,
num::{NonZeroU16, NonZeroU32, NonZeroU64},
path::Path,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::{Duration, UNIX_EPOCH},
};
use async_std::{
fs::{self, File},
prelude::*,
};
use chrono::{DateTime, Utc};
use filetime::FileTime;
use futures::{
channel::mpsc,
stream::{self, StreamExt},
};
use http_client::native::NativeClient;
use numtoa::NumToA;
use surf::{
Client, Request, Response, StatusCode
};
pub type EventSender = mpsc::UnboundedSender<(Arc<Path>, FetchEvent)>;
pub type Output<T> = (Arc<Path>, Result<T, Error>);
/// An error from the asynchronous file fetcher.
#[derive(Debug, Error)]
pub enum Error {
#[error("task was cancelled")]
Cancelled,
#[error("http client error")]
Client(surf::Error),
#[error("unable to concatenate fetched parts")]
Concatenate(#[source] io::Error),
#[error("unable to create file")]
FileCreate(#[source] io::Error),
#[error("unable to set timestamp on {:?}", _0)]
FileTime(Arc<Path>, #[source] io::Error),
#[error("content length is an invalid range")]
InvalidRange(#[source] io::Error),
#[error("unable to remove file with bad metadata")]
MetadataRemove(#[source] io::Error),
#[error("destination has no file name")]
Nameless,
#[error("unable to open fetched part")]
OpenPart(Arc<Path>, #[source] io::Error),
#[error("destination lacks parent")]
Parentless,
#[error("connection timed out")]
TimedOut,
#[error("error writing to file")]
Write(#[source] io::Error),
#[error("failed to rename partial to destination")]
Rename(#[source] io::Error),
#[error("server responded with an error: {}", _0)]
Status(StatusCode),
}
/// Information about a source being fetched.
#[derive(Debug, Setters)]
pub struct Source {
/// URLs whereby the file can be found.
#[setters(skip)]
pub urls: Arc<[Box<str>]>,
/// Where the file shall ultimately be fetched to.
#[setters(skip)]
pub dest: Arc<Path>,
/// Optional location to store the partial file
#[setters(strip_option)]
#[setters(into)]
pub part: Option<Arc<Path>>,
}
impl Source {
pub fn new(urls: impl Into<Arc<[Box<str>]>>, dest: impl Into<Arc<Path>>) -> Self {
Self { urls: urls.into(), dest: dest.into(), part: None }
}
}
impl From<surf::Error> for Error {
fn from(e: surf::Error) -> Self { Self::Client(e) }
}
/// Events which are submitted by the fetcher.
#[derive(Debug)]
pub enum FetchEvent {
/// Signals that this file was already fetched.
AlreadyFetched,
/// States that we know the length of the file being fetched.
ContentLength(u64),
/// Notifies that the file has been fetched.
Fetched,
/// Notifies that a file is being fetched.
Fetching,
/// Reports the amount of bytes that have been read for a file.
Progress(usize),
/// Reports that a part of a file is being fetched.
PartFetching(u64),
/// Reports that a part has been fetched.
PartFetched(u64),
}
/// An asynchronous file fetcher for clients fetching files.
///
/// The futures generated by the fetcher are compatible with single and multi-threaded
/// runtimes, allowing you to choose between the runtime that works best for your
/// application. A single-threaded runtime is generally recommended for fetching files,
/// as your network connection is unlikely to be faster than a single CPU core.
#[derive(new, Setters)]
pub struct Fetcher {
#[setters(skip)]
client: Client,
/// When set, cancels any active operations.
#[new(default)]
#[setters(strip_option)]
cancel: Option<Arc<AtomicBool>>,
/// The number of concurrent connections to sustain per file being fetched.
#[new(default)]
connections_per_file: Option<NonZeroU16>,
/// The number of attempts to make when a request fails.
#[new(value = "unsafe { NonZeroU16::new_unchecked(3) } ")]
retries: NonZeroU16,
/// The maximum size of a part file when downloading in parts.
#[new(value = "unsafe { NonZeroU32::new_unchecked(2 * 1024 * 1024) }")]
max_part_size: NonZeroU32,
/// The time to wait between chunks before giving up.
#[new(default)]
#[setters(strip_option)]
timeout: Option<Duration>,
/// Holds a sender for submitting events to.
#[new(default)]
#[setters(into)]
#[setters(strip_option)]
events: Option<Arc<EventSender>>,
}
impl Default for Fetcher {
fn default() -> Self { Self::new(Client::with_http_client(NativeClient::default())) }
}
impl Fetcher {
/// Wraps the fetcher in an Arc.
pub fn into_arc(self) -> Arc<Self> { Arc::new(self) }
/// Request a file from one or more URIs.
///
/// At least one URI must be provided as a source for the file. Each additional URI
/// serves as a mirror for failover and load-balancing purposes.
pub async fn request(
self: Arc<Self>,
uris: Arc<[Box<str>]>,
to: Arc<Path>,
) -> Result<(), Error> {
match self.clone().inner_request(uris.clone(), to.clone()).await {
Ok(()) => Ok(()),
Err(mut why) => {
for _ in 1..self.retries.get() {
match self.clone().inner_request(uris.clone(), to.clone()).await {
Ok(()) => return Ok(()),
Err(cause) => why = cause,
}
}
Err(why)
}
}
}
async fn inner_request(
self: Arc<Self>,
uris: Arc<[Box<str>]>,
to: Arc<Path>,
) -> Result<(), Error> {
let mut modified = None;
let mut length = None;
let mut if_modified_since = None;
// If the file already exists, validate that it is the same.
if to.exists() {
if let Some(response) = head(&self.client, &*uris[0]).await? {
let content_length = response.content_length();
modified = response.last_modified();
if let (Some(content_length), Some(last_modified)) =
(content_length, modified)
{
match fs::metadata(to.as_ref()).await {
Ok(metadata) => {
let modified = metadata.modified().map_err(Error::Write)?;
let ts = modified
.duration_since(UNIX_EPOCH)
.expect("time went backwards");
if metadata.len() == content_length
&& ts.as_secs() == last_modified.timestamp() as u64
{
self.send((to, FetchEvent::AlreadyFetched));
return Ok(());
}
if_modified_since =
Some(DateTime::<Utc>::from(modified).to_rfc2822());
length = Some(content_length);
}
Err(why) => {
error!("failed to fetch metadata of {:?}: {}", to, why);
fs::remove_file(to.as_ref())
.await
.map_err(Error::MetadataRemove)?;
}
}
}
}
}
// If set, this will use multiple connections to download a file in parts.
if let Some(connections) = self.connections_per_file {
if let Some(response) = head(&self.client, &*uris[0]).await? {
modified = response.last_modified();
let length = match length {
Some(length) => Some(length),
None => response.content_length(),
};
if let Some(length) = length {
if supports_range(&self.client, &*uris[0], length).await? {
self.send((to.clone(), FetchEvent::ContentLength(length)));
return self
.get_many(length, connections.get(), uris, to, modified)
.await;
}
}
}
}
let mut request = self.client.get(&*uris[0]).header("Expect", "").build();
if let Some(modified_since) = if_modified_since {
request.set_header("if-modified-since", modified_since);
}
let path =
match self.get(&mut modified, request, to.clone(), to.clone(), None).await {
Ok(path) => path,
// Server does not support if-modified-since
Err(Error::Status(StatusCode::NotImplemented)) => {
let request = self.client.get(&*uris[0]).header("Expect", "").build();
self.get(&mut modified, request, to.clone(), to, None).await?
}
Err(why) => return Err(why),
};
if let Some(modified) = modified {
let filetime = FileTime::from_unix_time(modified.timestamp(), 0);
filetime::set_file_times(&path, filetime, filetime)
.map_err(move |why| Error::FileTime(path, why))?;
}
Ok(())
}
async fn get(
&self,
modified: &mut Option<DateTime<Utc>>,
request: Request,
to: Arc<Path>,
dest: Arc<Path>,
length: Option<u64>,
) -> Result<Arc<Path>, Error> {
let mut file = File::create(to.as_ref()).await.map_err(Error::FileCreate)?;
if let Some(length) = length {
file.set_len(length).await.map_err(Error::Write)?;
}
let response = &mut validate(if let Some(duration) = self.timeout {
timed(duration, async { self.client.send(request).await.map_err(Error::from) }).await??
} else {
self.client.send(request).await?
})?;
if modified.is_none() {
*modified = response.last_modified();
}
if response.status() == StatusCode::NotModified {
return Ok(to);
}
let buffer = &mut [0u8; 8 * 1024];
let mut read;
loop {
if self.cancelled() {
return Err(Error::Cancelled);
}
let reader = async { response.read(buffer).await.map_err(Error::Write) };
read = match self.timeout {
Some(duration) => timed(duration, reader).await??,
None => reader.await?,
};
if read != 0 {
self.send((dest.clone(), FetchEvent::Progress(read)));
file.write_all(&buffer[..read]).await.map_err(Error::Write)?;
} else {
break;
}
}
Ok(to)
}
async fn get_many(
self: Arc<Self>,
length: u64,
concurrent: u16,
uris: Arc<[Box<str>]>,
to: Arc<Path>,
mut modified: Option<DateTime<Utc>>,
) -> Result<(), Error> {
let parent = to.parent().ok_or(Error::Parentless)?;
let filename = to.file_name().ok_or(Error::Nameless)?;
let mut buf = [0u8; 20];
// The destination which parts will be concatenated to.
let concatenated_file =
&mut File::create(to.as_ref()).await.map_err(Error::FileCreate)?;
let max_part_size =
unsafe { NonZeroU64::new_unchecked(u64::from(self.max_part_size.get())) };
let to_ = to.clone();
let parts = stream::iter(range::generate(length, max_part_size).enumerate())
// Generate a future for fetching each part that a range describes.
.map(move |(partn, (range_start, range_end))| {
let uri = uris[partn % uris.len()].clone();
let part_path = {
let mut new_filename = filename.to_os_string();
new_filename
.push(&[".part", partn.numtoa_str(10, &mut buf)].concat());
parent.join(new_filename)
};
let fetcher = self.clone();
let to = to_.clone();
async move {
let range = range::to_string(range_start, range_end);
fetcher.send((to.clone(), FetchEvent::PartFetching(partn as u64)));
let request = fetcher
.client
.get(&*uri)
.header("range", range.as_str())
.header("Expect", "")
.build();
let result = fetcher
.get(
&mut modified,
request,
part_path.into(),
to.clone(),
Some(range_end - range_start),
)
.await;
fetcher.send((to, FetchEvent::PartFetched(partn as u64)));
result
}
})
// Ensure that only this many connections are happenning concurrently at a
// time
.buffered(concurrent as usize)
// This type exploded the stack, and therefore needs to be boxed
.boxed_local();
systems::concatenator(concatenated_file, parts).await?;
if let Some(modified) = modified {
let filetime = FileTime::from_unix_time(modified.timestamp(), 0);
filetime::set_file_times(&to, filetime, filetime)
.map_err(|why| Error::FileTime(to, why))?;
}
Ok(())
}
fn cancelled(&self) -> bool {
self.cancel.as_ref().map_or(false, |cancel| cancel.load(Ordering::SeqCst))
}
fn send(&self, event: (Arc<Path>, FetchEvent)) {
if let Some(sender) = self.events.as_ref() {
let _ = sender.unbounded_send(event);
}
}
}
async fn head(
client: &Client,
uri: &str,
) -> Result<Option<Response>, Error> {
match validate(client.head(uri).header("Expect", "").await?).map(Some) {
result @ Ok(_) => result,
Err(Error::Status(StatusCode::NotImplemented)) => Ok(None),
Err(other) => Err(other),
} |
async fn supports_range(
client: &Client,
uri: &str,
length: u64,
) -> Result<bool, Error> {
let response = client
.head(uri)
.header("Expect", "")
.header("range", range::to_string(0, length).as_str())
.await?;
if response.status() == StatusCode::PartialContent {
Ok(true)
} else {
validate(response).map(|_| false)
}
}
async fn timed<F, T>(duration: Duration, future: F) -> Result<T, Error>
where
F: Future<Output = T>,
{
async_std::future::timeout(duration, future).await.map_err(|_| Error::TimedOut)
}
fn validate(response: Response) -> Result<Response, Error> {
let status = response.status();
if status.is_informational() || status.is_success() {
Ok(response)
} else {
Err(Error::Status(status))
}
}
trait ResponseExt {
fn content_length(&self) -> Option<u64>;
fn last_modified(&self) -> Option<DateTime<Utc>>;
}
impl ResponseExt for Response {
fn content_length(&self) -> Option<u64> {
let header = self.header("content-lenght")?.get(0)?;
header.as_str().parse::<u64>().ok()
}
fn last_modified(&self) -> Option<DateTime<Utc>> {
let header = self.header("last-modified")?.get(0)?;
DateTime::parse_from_rfc2822(header.as_str())
.ok()
.map(|tz| tz.with_timezone(&Utc))
}
} | } | random_line_split |
lib.rs | #![recursion_limit = "1024"]
#[macro_use]
extern crate derive_new;
#[macro_use]
extern crate derive_setters;
#[macro_use]
extern crate log;
#[macro_use]
extern crate thiserror;
pub mod checksum;
mod range;
mod systems;
pub use self::systems::*;
use std::{
fmt::Debug,
io,
num::{NonZeroU16, NonZeroU32, NonZeroU64},
path::Path,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::{Duration, UNIX_EPOCH},
};
use async_std::{
fs::{self, File},
prelude::*,
};
use chrono::{DateTime, Utc};
use filetime::FileTime;
use futures::{
channel::mpsc,
stream::{self, StreamExt},
};
use http_client::native::NativeClient;
use numtoa::NumToA;
use surf::{
Client, Request, Response, StatusCode
};
pub type EventSender = mpsc::UnboundedSender<(Arc<Path>, FetchEvent)>;
pub type Output<T> = (Arc<Path>, Result<T, Error>);
/// An error from the asynchronous file fetcher.
#[derive(Debug, Error)]
pub enum Error {
#[error("task was cancelled")]
Cancelled,
#[error("http client error")]
Client(surf::Error),
#[error("unable to concatenate fetched parts")]
Concatenate(#[source] io::Error),
#[error("unable to create file")]
FileCreate(#[source] io::Error),
#[error("unable to set timestamp on {:?}", _0)]
FileTime(Arc<Path>, #[source] io::Error),
#[error("content length is an invalid range")]
InvalidRange(#[source] io::Error),
#[error("unable to remove file with bad metadata")]
MetadataRemove(#[source] io::Error),
#[error("destination has no file name")]
Nameless,
#[error("unable to open fetched part")]
OpenPart(Arc<Path>, #[source] io::Error),
#[error("destination lacks parent")]
Parentless,
#[error("connection timed out")]
TimedOut,
#[error("error writing to file")]
Write(#[source] io::Error),
#[error("failed to rename partial to destination")]
Rename(#[source] io::Error),
#[error("server responded with an error: {}", _0)]
Status(StatusCode),
}
/// Information about a source being fetched.
#[derive(Debug, Setters)]
pub struct Source {
/// URLs whereby the file can be found.
#[setters(skip)]
pub urls: Arc<[Box<str>]>,
/// Where the file shall ultimately be fetched to.
#[setters(skip)]
pub dest: Arc<Path>,
/// Optional location to store the partial file
#[setters(strip_option)]
#[setters(into)]
pub part: Option<Arc<Path>>,
}
impl Source {
pub fn new(urls: impl Into<Arc<[Box<str>]>>, dest: impl Into<Arc<Path>>) -> Self {
Self { urls: urls.into(), dest: dest.into(), part: None }
}
}
impl From<surf::Error> for Error {
fn from(e: surf::Error) -> Self { Self::Client(e) }
}
/// Events which are submitted by the fetcher.
#[derive(Debug)]
pub enum | {
/// Signals that this file was already fetched.
AlreadyFetched,
/// States that we know the length of the file being fetched.
ContentLength(u64),
/// Notifies that the file has been fetched.
Fetched,
/// Notifies that a file is being fetched.
Fetching,
/// Reports the amount of bytes that have been read for a file.
Progress(usize),
/// Reports that a part of a file is being fetched.
PartFetching(u64),
/// Reports that a part has been fetched.
PartFetched(u64),
}
/// An asynchronous file fetcher for clients fetching files.
///
/// The futures generated by the fetcher are compatible with single and multi-threaded
/// runtimes, allowing you to choose between the runtime that works best for your
/// application. A single-threaded runtime is generally recommended for fetching files,
/// as your network connection is unlikely to be faster than a single CPU core.
#[derive(new, Setters)]
pub struct Fetcher {
#[setters(skip)]
client: Client,
/// When set, cancels any active operations.
#[new(default)]
#[setters(strip_option)]
cancel: Option<Arc<AtomicBool>>,
/// The number of concurrent connections to sustain per file being fetched.
#[new(default)]
connections_per_file: Option<NonZeroU16>,
/// The number of attempts to make when a request fails.
#[new(value = "unsafe { NonZeroU16::new_unchecked(3) } ")]
retries: NonZeroU16,
/// The maximum size of a part file when downloading in parts.
#[new(value = "unsafe { NonZeroU32::new_unchecked(2 * 1024 * 1024) }")]
max_part_size: NonZeroU32,
/// The time to wait between chunks before giving up.
#[new(default)]
#[setters(strip_option)]
timeout: Option<Duration>,
/// Holds a sender for submitting events to.
#[new(default)]
#[setters(into)]
#[setters(strip_option)]
events: Option<Arc<EventSender>>,
}
impl Default for Fetcher {
fn default() -> Self { Self::new(Client::with_http_client(NativeClient::default())) }
}
impl Fetcher {
/// Wraps the fetcher in an Arc.
pub fn into_arc(self) -> Arc<Self> { Arc::new(self) }
/// Request a file from one or more URIs.
///
/// At least one URI must be provided as a source for the file. Each additional URI
/// serves as a mirror for failover and load-balancing purposes.
pub async fn request(
self: Arc<Self>,
uris: Arc<[Box<str>]>,
to: Arc<Path>,
) -> Result<(), Error> {
match self.clone().inner_request(uris.clone(), to.clone()).await {
Ok(()) => Ok(()),
Err(mut why) => {
for _ in 1..self.retries.get() {
match self.clone().inner_request(uris.clone(), to.clone()).await {
Ok(()) => return Ok(()),
Err(cause) => why = cause,
}
}
Err(why)
}
}
}
async fn inner_request(
self: Arc<Self>,
uris: Arc<[Box<str>]>,
to: Arc<Path>,
) -> Result<(), Error> {
let mut modified = None;
let mut length = None;
let mut if_modified_since = None;
// If the file already exists, validate that it is the same.
if to.exists() {
if let Some(response) = head(&self.client, &*uris[0]).await? {
let content_length = response.content_length();
modified = response.last_modified();
if let (Some(content_length), Some(last_modified)) =
(content_length, modified)
{
match fs::metadata(to.as_ref()).await {
Ok(metadata) => {
let modified = metadata.modified().map_err(Error::Write)?;
let ts = modified
.duration_since(UNIX_EPOCH)
.expect("time went backwards");
if metadata.len() == content_length
&& ts.as_secs() == last_modified.timestamp() as u64
{
self.send((to, FetchEvent::AlreadyFetched));
return Ok(());
}
if_modified_since =
Some(DateTime::<Utc>::from(modified).to_rfc2822());
length = Some(content_length);
}
Err(why) => {
error!("failed to fetch metadata of {:?}: {}", to, why);
fs::remove_file(to.as_ref())
.await
.map_err(Error::MetadataRemove)?;
}
}
}
}
}
// If set, this will use multiple connections to download a file in parts.
if let Some(connections) = self.connections_per_file {
if let Some(response) = head(&self.client, &*uris[0]).await? {
modified = response.last_modified();
let length = match length {
Some(length) => Some(length),
None => response.content_length(),
};
if let Some(length) = length {
if supports_range(&self.client, &*uris[0], length).await? {
self.send((to.clone(), FetchEvent::ContentLength(length)));
return self
.get_many(length, connections.get(), uris, to, modified)
.await;
}
}
}
}
let mut request = self.client.get(&*uris[0]).header("Expect", "").build();
if let Some(modified_since) = if_modified_since {
request.set_header("if-modified-since", modified_since);
}
let path =
match self.get(&mut modified, request, to.clone(), to.clone(), None).await {
Ok(path) => path,
// Server does not support if-modified-since
Err(Error::Status(StatusCode::NotImplemented)) => {
let request = self.client.get(&*uris[0]).header("Expect", "").build();
self.get(&mut modified, request, to.clone(), to, None).await?
}
Err(why) => return Err(why),
};
if let Some(modified) = modified {
let filetime = FileTime::from_unix_time(modified.timestamp(), 0);
filetime::set_file_times(&path, filetime, filetime)
.map_err(move |why| Error::FileTime(path, why))?;
}
Ok(())
}
async fn get(
&self,
modified: &mut Option<DateTime<Utc>>,
request: Request,
to: Arc<Path>,
dest: Arc<Path>,
length: Option<u64>,
) -> Result<Arc<Path>, Error> {
let mut file = File::create(to.as_ref()).await.map_err(Error::FileCreate)?;
if let Some(length) = length {
file.set_len(length).await.map_err(Error::Write)?;
}
let response = &mut validate(if let Some(duration) = self.timeout {
timed(duration, async { self.client.send(request).await.map_err(Error::from) }).await??
} else {
self.client.send(request).await?
})?;
if modified.is_none() {
*modified = response.last_modified();
}
if response.status() == StatusCode::NotModified {
return Ok(to);
}
let buffer = &mut [0u8; 8 * 1024];
let mut read;
loop {
if self.cancelled() {
return Err(Error::Cancelled);
}
let reader = async { response.read(buffer).await.map_err(Error::Write) };
read = match self.timeout {
Some(duration) => timed(duration, reader).await??,
None => reader.await?,
};
if read != 0 {
self.send((dest.clone(), FetchEvent::Progress(read)));
file.write_all(&buffer[..read]).await.map_err(Error::Write)?;
} else {
break;
}
}
Ok(to)
}
async fn get_many(
self: Arc<Self>,
length: u64,
concurrent: u16,
uris: Arc<[Box<str>]>,
to: Arc<Path>,
mut modified: Option<DateTime<Utc>>,
) -> Result<(), Error> {
let parent = to.parent().ok_or(Error::Parentless)?;
let filename = to.file_name().ok_or(Error::Nameless)?;
let mut buf = [0u8; 20];
// The destination which parts will be concatenated to.
let concatenated_file =
&mut File::create(to.as_ref()).await.map_err(Error::FileCreate)?;
let max_part_size =
unsafe { NonZeroU64::new_unchecked(u64::from(self.max_part_size.get())) };
let to_ = to.clone();
let parts = stream::iter(range::generate(length, max_part_size).enumerate())
// Generate a future for fetching each part that a range describes.
.map(move |(partn, (range_start, range_end))| {
let uri = uris[partn % uris.len()].clone();
let part_path = {
let mut new_filename = filename.to_os_string();
new_filename
.push(&[".part", partn.numtoa_str(10, &mut buf)].concat());
parent.join(new_filename)
};
let fetcher = self.clone();
let to = to_.clone();
async move {
let range = range::to_string(range_start, range_end);
fetcher.send((to.clone(), FetchEvent::PartFetching(partn as u64)));
let request = fetcher
.client
.get(&*uri)
.header("range", range.as_str())
.header("Expect", "")
.build();
let result = fetcher
.get(
&mut modified,
request,
part_path.into(),
to.clone(),
Some(range_end - range_start),
)
.await;
fetcher.send((to, FetchEvent::PartFetched(partn as u64)));
result
}
})
// Ensure that only this many connections are happenning concurrently at a
// time
.buffered(concurrent as usize)
// This type exploded the stack, and therefore needs to be boxed
.boxed_local();
systems::concatenator(concatenated_file, parts).await?;
if let Some(modified) = modified {
let filetime = FileTime::from_unix_time(modified.timestamp(), 0);
filetime::set_file_times(&to, filetime, filetime)
.map_err(|why| Error::FileTime(to, why))?;
}
Ok(())
}
fn cancelled(&self) -> bool {
self.cancel.as_ref().map_or(false, |cancel| cancel.load(Ordering::SeqCst))
}
fn send(&self, event: (Arc<Path>, FetchEvent)) {
if let Some(sender) = self.events.as_ref() {
let _ = sender.unbounded_send(event);
}
}
}
async fn head(
client: &Client,
uri: &str,
) -> Result<Option<Response>, Error> {
match validate(client.head(uri).header("Expect", "").await?).map(Some) {
result @ Ok(_) => result,
Err(Error::Status(StatusCode::NotImplemented)) => Ok(None),
Err(other) => Err(other),
}
}
async fn supports_range(
client: &Client,
uri: &str,
length: u64,
) -> Result<bool, Error> {
let response = client
.head(uri)
.header("Expect", "")
.header("range", range::to_string(0, length).as_str())
.await?;
if response.status() == StatusCode::PartialContent {
Ok(true)
} else {
validate(response).map(|_| false)
}
}
async fn timed<F, T>(duration: Duration, future: F) -> Result<T, Error>
where
F: Future<Output = T>,
{
async_std::future::timeout(duration, future).await.map_err(|_| Error::TimedOut)
}
fn validate(response: Response) -> Result<Response, Error> {
let status = response.status();
if status.is_informational() || status.is_success() {
Ok(response)
} else {
Err(Error::Status(status))
}
}
trait ResponseExt {
fn content_length(&self) -> Option<u64>;
fn last_modified(&self) -> Option<DateTime<Utc>>;
}
impl ResponseExt for Response {
fn content_length(&self) -> Option<u64> {
let header = self.header("content-lenght")?.get(0)?;
header.as_str().parse::<u64>().ok()
}
fn last_modified(&self) -> Option<DateTime<Utc>> {
let header = self.header("last-modified")?.get(0)?;
DateTime::parse_from_rfc2822(header.as_str())
.ok()
.map(|tz| tz.with_timezone(&Utc))
}
}
| FetchEvent | identifier_name |
lib.rs | #![recursion_limit = "1024"]
#[macro_use]
extern crate derive_new;
#[macro_use]
extern crate derive_setters;
#[macro_use]
extern crate log;
#[macro_use]
extern crate thiserror;
pub mod checksum;
mod range;
mod systems;
pub use self::systems::*;
use std::{
fmt::Debug,
io,
num::{NonZeroU16, NonZeroU32, NonZeroU64},
path::Path,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::{Duration, UNIX_EPOCH},
};
use async_std::{
fs::{self, File},
prelude::*,
};
use chrono::{DateTime, Utc};
use filetime::FileTime;
use futures::{
channel::mpsc,
stream::{self, StreamExt},
};
use http_client::native::NativeClient;
use numtoa::NumToA;
use surf::{
Client, Request, Response, StatusCode
};
pub type EventSender = mpsc::UnboundedSender<(Arc<Path>, FetchEvent)>;
pub type Output<T> = (Arc<Path>, Result<T, Error>);
/// An error from the asynchronous file fetcher.
#[derive(Debug, Error)]
pub enum Error {
#[error("task was cancelled")]
Cancelled,
#[error("http client error")]
Client(surf::Error),
#[error("unable to concatenate fetched parts")]
Concatenate(#[source] io::Error),
#[error("unable to create file")]
FileCreate(#[source] io::Error),
#[error("unable to set timestamp on {:?}", _0)]
FileTime(Arc<Path>, #[source] io::Error),
#[error("content length is an invalid range")]
InvalidRange(#[source] io::Error),
#[error("unable to remove file with bad metadata")]
MetadataRemove(#[source] io::Error),
#[error("destination has no file name")]
Nameless,
#[error("unable to open fetched part")]
OpenPart(Arc<Path>, #[source] io::Error),
#[error("destination lacks parent")]
Parentless,
#[error("connection timed out")]
TimedOut,
#[error("error writing to file")]
Write(#[source] io::Error),
#[error("failed to rename partial to destination")]
Rename(#[source] io::Error),
#[error("server responded with an error: {}", _0)]
Status(StatusCode),
}
/// Information about a source being fetched.
#[derive(Debug, Setters)]
pub struct Source {
/// URLs whereby the file can be found.
#[setters(skip)]
pub urls: Arc<[Box<str>]>,
/// Where the file shall ultimately be fetched to.
#[setters(skip)]
pub dest: Arc<Path>,
/// Optional location to store the partial file
#[setters(strip_option)]
#[setters(into)]
pub part: Option<Arc<Path>>,
}
impl Source {
pub fn new(urls: impl Into<Arc<[Box<str>]>>, dest: impl Into<Arc<Path>>) -> Self {
Self { urls: urls.into(), dest: dest.into(), part: None }
}
}
impl From<surf::Error> for Error {
fn from(e: surf::Error) -> Self { Self::Client(e) }
}
/// Events which are submitted by the fetcher.
#[derive(Debug)]
pub enum FetchEvent {
/// Signals that this file was already fetched.
AlreadyFetched,
/// States that we know the length of the file being fetched.
ContentLength(u64),
/// Notifies that the file has been fetched.
Fetched,
/// Notifies that a file is being fetched.
Fetching,
/// Reports the amount of bytes that have been read for a file.
Progress(usize),
/// Reports that a part of a file is being fetched.
PartFetching(u64),
/// Reports that a part has been fetched.
PartFetched(u64),
}
/// An asynchronous file fetcher for clients fetching files.
///
/// The futures generated by the fetcher are compatible with single and multi-threaded
/// runtimes, allowing you to choose between the runtime that works best for your
/// application. A single-threaded runtime is generally recommended for fetching files,
/// as your network connection is unlikely to be faster than a single CPU core.
#[derive(new, Setters)]
pub struct Fetcher {
#[setters(skip)]
client: Client,
/// When set, cancels any active operations.
#[new(default)]
#[setters(strip_option)]
cancel: Option<Arc<AtomicBool>>,
/// The number of concurrent connections to sustain per file being fetched.
#[new(default)]
connections_per_file: Option<NonZeroU16>,
/// The number of attempts to make when a request fails.
#[new(value = "unsafe { NonZeroU16::new_unchecked(3) } ")]
retries: NonZeroU16,
/// The maximum size of a part file when downloading in parts.
#[new(value = "unsafe { NonZeroU32::new_unchecked(2 * 1024 * 1024) }")]
max_part_size: NonZeroU32,
/// The time to wait between chunks before giving up.
#[new(default)]
#[setters(strip_option)]
timeout: Option<Duration>,
/// Holds a sender for submitting events to.
#[new(default)]
#[setters(into)]
#[setters(strip_option)]
events: Option<Arc<EventSender>>,
}
impl Default for Fetcher {
fn default() -> Self { Self::new(Client::with_http_client(NativeClient::default())) }
}
impl Fetcher {
/// Wraps the fetcher in an Arc.
pub fn into_arc(self) -> Arc<Self> { Arc::new(self) }
/// Request a file from one or more URIs.
///
/// At least one URI must be provided as a source for the file. Each additional URI
/// serves as a mirror for failover and load-balancing purposes.
pub async fn request(
self: Arc<Self>,
uris: Arc<[Box<str>]>,
to: Arc<Path>,
) -> Result<(), Error> {
match self.clone().inner_request(uris.clone(), to.clone()).await {
Ok(()) => Ok(()),
Err(mut why) => {
for _ in 1..self.retries.get() {
match self.clone().inner_request(uris.clone(), to.clone()).await {
Ok(()) => return Ok(()),
Err(cause) => why = cause,
}
}
Err(why)
}
}
}
async fn inner_request(
self: Arc<Self>,
uris: Arc<[Box<str>]>,
to: Arc<Path>,
) -> Result<(), Error> {
let mut modified = None;
let mut length = None;
let mut if_modified_since = None;
// If the file already exists, validate that it is the same.
if to.exists() {
if let Some(response) = head(&self.client, &*uris[0]).await? {
let content_length = response.content_length();
modified = response.last_modified();
if let (Some(content_length), Some(last_modified)) =
(content_length, modified)
{
match fs::metadata(to.as_ref()).await {
Ok(metadata) => {
let modified = metadata.modified().map_err(Error::Write)?;
let ts = modified
.duration_since(UNIX_EPOCH)
.expect("time went backwards");
if metadata.len() == content_length
&& ts.as_secs() == last_modified.timestamp() as u64
{
self.send((to, FetchEvent::AlreadyFetched));
return Ok(());
}
if_modified_since =
Some(DateTime::<Utc>::from(modified).to_rfc2822());
length = Some(content_length);
}
Err(why) => {
error!("failed to fetch metadata of {:?}: {}", to, why);
fs::remove_file(to.as_ref())
.await
.map_err(Error::MetadataRemove)?;
}
}
}
}
}
// If set, this will use multiple connections to download a file in parts.
if let Some(connections) = self.connections_per_file {
if let Some(response) = head(&self.client, &*uris[0]).await? {
modified = response.last_modified();
let length = match length {
Some(length) => Some(length),
None => response.content_length(),
};
if let Some(length) = length {
if supports_range(&self.client, &*uris[0], length).await? {
self.send((to.clone(), FetchEvent::ContentLength(length)));
return self
.get_many(length, connections.get(), uris, to, modified)
.await;
}
}
}
}
let mut request = self.client.get(&*uris[0]).header("Expect", "").build();
if let Some(modified_since) = if_modified_since {
request.set_header("if-modified-since", modified_since);
}
let path =
match self.get(&mut modified, request, to.clone(), to.clone(), None).await {
Ok(path) => path,
// Server does not support if-modified-since
Err(Error::Status(StatusCode::NotImplemented)) => {
let request = self.client.get(&*uris[0]).header("Expect", "").build();
self.get(&mut modified, request, to.clone(), to, None).await?
}
Err(why) => return Err(why),
};
if let Some(modified) = modified {
let filetime = FileTime::from_unix_time(modified.timestamp(), 0);
filetime::set_file_times(&path, filetime, filetime)
.map_err(move |why| Error::FileTime(path, why))?;
}
Ok(())
}
async fn get(
&self,
modified: &mut Option<DateTime<Utc>>,
request: Request,
to: Arc<Path>,
dest: Arc<Path>,
length: Option<u64>,
) -> Result<Arc<Path>, Error> {
let mut file = File::create(to.as_ref()).await.map_err(Error::FileCreate)?;
if let Some(length) = length {
file.set_len(length).await.map_err(Error::Write)?;
}
let response = &mut validate(if let Some(duration) = self.timeout {
timed(duration, async { self.client.send(request).await.map_err(Error::from) }).await??
} else {
self.client.send(request).await?
})?;
if modified.is_none() {
*modified = response.last_modified();
}
if response.status() == StatusCode::NotModified {
return Ok(to);
}
let buffer = &mut [0u8; 8 * 1024];
let mut read;
loop {
if self.cancelled() {
return Err(Error::Cancelled);
}
let reader = async { response.read(buffer).await.map_err(Error::Write) };
read = match self.timeout {
Some(duration) => timed(duration, reader).await??,
None => reader.await?,
};
if read != 0 {
self.send((dest.clone(), FetchEvent::Progress(read)));
file.write_all(&buffer[..read]).await.map_err(Error::Write)?;
} else {
break;
}
}
Ok(to)
}
async fn get_many(
self: Arc<Self>,
length: u64,
concurrent: u16,
uris: Arc<[Box<str>]>,
to: Arc<Path>,
mut modified: Option<DateTime<Utc>>,
) -> Result<(), Error> {
let parent = to.parent().ok_or(Error::Parentless)?;
let filename = to.file_name().ok_or(Error::Nameless)?;
let mut buf = [0u8; 20];
// The destination which parts will be concatenated to.
let concatenated_file =
&mut File::create(to.as_ref()).await.map_err(Error::FileCreate)?;
let max_part_size =
unsafe { NonZeroU64::new_unchecked(u64::from(self.max_part_size.get())) };
let to_ = to.clone();
let parts = stream::iter(range::generate(length, max_part_size).enumerate())
// Generate a future for fetching each part that a range describes.
.map(move |(partn, (range_start, range_end))| {
let uri = uris[partn % uris.len()].clone();
let part_path = {
let mut new_filename = filename.to_os_string();
new_filename
.push(&[".part", partn.numtoa_str(10, &mut buf)].concat());
parent.join(new_filename)
};
let fetcher = self.clone();
let to = to_.clone();
async move {
let range = range::to_string(range_start, range_end);
fetcher.send((to.clone(), FetchEvent::PartFetching(partn as u64)));
let request = fetcher
.client
.get(&*uri)
.header("range", range.as_str())
.header("Expect", "")
.build();
let result = fetcher
.get(
&mut modified,
request,
part_path.into(),
to.clone(),
Some(range_end - range_start),
)
.await;
fetcher.send((to, FetchEvent::PartFetched(partn as u64)));
result
}
})
// Ensure that only this many connections are happenning concurrently at a
// time
.buffered(concurrent as usize)
// This type exploded the stack, and therefore needs to be boxed
.boxed_local();
systems::concatenator(concatenated_file, parts).await?;
if let Some(modified) = modified {
let filetime = FileTime::from_unix_time(modified.timestamp(), 0);
filetime::set_file_times(&to, filetime, filetime)
.map_err(|why| Error::FileTime(to, why))?;
}
Ok(())
}
fn cancelled(&self) -> bool {
self.cancel.as_ref().map_or(false, |cancel| cancel.load(Ordering::SeqCst))
}
fn send(&self, event: (Arc<Path>, FetchEvent)) {
if let Some(sender) = self.events.as_ref() {
let _ = sender.unbounded_send(event);
}
}
}
async fn head(
client: &Client,
uri: &str,
) -> Result<Option<Response>, Error> {
match validate(client.head(uri).header("Expect", "").await?).map(Some) {
result @ Ok(_) => result,
Err(Error::Status(StatusCode::NotImplemented)) => Ok(None),
Err(other) => Err(other),
}
}
async fn supports_range(
client: &Client,
uri: &str,
length: u64,
) -> Result<bool, Error> {
let response = client
.head(uri)
.header("Expect", "")
.header("range", range::to_string(0, length).as_str())
.await?;
if response.status() == StatusCode::PartialContent {
Ok(true)
} else {
validate(response).map(|_| false)
}
}
async fn timed<F, T>(duration: Duration, future: F) -> Result<T, Error>
where
F: Future<Output = T>,
{
async_std::future::timeout(duration, future).await.map_err(|_| Error::TimedOut)
}
fn validate(response: Response) -> Result<Response, Error> |
trait ResponseExt {
fn content_length(&self) -> Option<u64>;
fn last_modified(&self) -> Option<DateTime<Utc>>;
}
impl ResponseExt for Response {
fn content_length(&self) -> Option<u64> {
let header = self.header("content-lenght")?.get(0)?;
header.as_str().parse::<u64>().ok()
}
fn last_modified(&self) -> Option<DateTime<Utc>> {
let header = self.header("last-modified")?.get(0)?;
DateTime::parse_from_rfc2822(header.as_str())
.ok()
.map(|tz| tz.with_timezone(&Utc))
}
}
| {
let status = response.status();
if status.is_informational() || status.is_success() {
Ok(response)
} else {
Err(Error::Status(status))
}
} | identifier_body |
statformat.py | #!/usr/bin/env python3
# ======================================================================
#
# This file contains functions used for formatting statistical data
# returned by SPARQL queries on NIDM-Results packs.
#
# Authors: Peter Williams, Tom Maullin, Camille Maumet (10/01/18)
#
# ======================================================================
from queries.querytools import run_query
from style.pagestyling import encode_image
import numpy as np
import random
import os
import math
import matplotlib
matplotlib.use('Agg')
# This function converts obo statistic types into the corresponding statistic.
def statistic_type(stat):
if stat == "http://purl.obolibrary.org/obo/STATO_0000376":
return("Z")
elif stat == "http://purl.obolibrary.org/obo/STATO_0000282":
return("F")
elif stat == "http://purl.obolibrary.org/obo/STATO_0000176":
return("T")
else:
return("P")
# This function returns the cluster forming threshold type of an image.
def height_thresh_type(graph, imageType):
if run_query(graph, 'askIfOboStatistic', 'Ask'):
return(imageType)
else:
return("P")
# This function returns the statistic type of a statistic
def statistic_type_string(statImage):
if statImage == "T":
return("T")
elif statImage == "F":
return("F")
elif statImage == "Z":
return("Z (Gaussianised T/F)")
def format_cluster_stats(g, excName):
# ----------------------------------------------------------------------
# First we gather data for peaks table.
# ----------------------------------------------------------------------
# Run the peak query
peakQueryResult = run_query(g, 'selectPeakData', 'Select',
{'EXC_NAME': excName})
# Retrieve query results.
peakZstats = [float(peakQueryResult[i]) for i in list(range(0, len(
peakQueryResult), 5))]
clusterIndicesForPeaks = [int(peakQueryResult[i]) for i in list(range(
1, len(peakQueryResult), 5))]
locations = [peakQueryResult[i] for i in list(range(2, len(
peakQueryResult), 5))]
# If a corrected height threshold has been applied we should display
# corrected peak P values. Else we should use uncorrected peak P values.
try:
if run_query(g, 'askCHeightThreshold', 'Ask'):
|
peakPVals = [float(peakQueryResult[i]) for i in list(
range(4, len(peakQueryResult), 5))]
# This is a temporary bug fix due to the FSL exporter currently not
# recording corrected peak P-values.
except ValueError:
peakPVals = [math.nan for row in peakQueryResult]
# Obtain permutation used to sort the results in order of descending
# cluster index and then descending peak statistic size.
peaksSortPermutation = sorted(range(len(clusterIndicesForPeaks)),
reverse=True,
key=lambda k: (clusterIndicesForPeaks[k],
peakZstats[k]))
# Sort all peak data using this permutation.
sortedPeaksZstatsArray = [peakZstats[i] for i in peaksSortPermutation]
sortedClusIndicesForPeaks = [
clusterIndicesForPeaks[i] for i in peaksSortPermutation]
sortedPeakLocations = [locations[i] for i in peaksSortPermutation]
sortedPeakPVals = [peakPVals[i] for i in peaksSortPermutation]
# ----------------------------------------------------------------------
# Second we gather data for cluster table.
# ----------------------------------------------------------------------
# Run the cluster query
clusQueryResult = run_query(g, 'selectClusterData', 'Select',
{'EXC_NAME': excName})
clusterIndices = [
int(clusQueryResult[i]) for i in list(
range(0, len(clusQueryResult), 3))]
clusterSizes = [
int(clusQueryResult[i]) for i in list(
range(1, len(clusQueryResult), 3))]
clusterPVals = [
float(clusQueryResult[i]) for i in list(
range(2, len(clusQueryResult), 3))]
# Create an array for the highest peaks.
highestPeakZArray = [0]*len(clusterIndices)
highestPeakLocations = [0]*len(clusterIndices)
for i in list(range(0, len(peakZstats))):
if highestPeakZArray[clusterIndicesForPeaks[i]-1] < peakZstats[i]:
highestPeakZArray[clusterIndicesForPeaks[i]-1] = peakZstats[i]
highestPeakLocations[clusterIndicesForPeaks[i]-1] = locations[i]
# Obtain permutation used to sort the results in order of descending
# cluster index and then for each cluster by peak statistic size.
clusterSortPermutation = sorted(
range(len(clusterIndices)),
reverse=True,
key=lambda k: (clusterSizes[k], clusterIndices[k]))
# Sorted cluster arrays
sortedClusSizeArray = [
clusterSizes[i] for i in clusterSortPermutation]
sortedClusIndicesArray = [
clusterIndices[i] for i in clusterSortPermutation]
sortedClusPVals = [
clusterPVals[i] for i in clusterSortPermutation]
# Sort the highest peaks
sortedMaxPeakZstats = [
highestPeakZArray[
sortedClusIndicesArray[i]-1] for i in list(
range(0, len(clusterIndices)))]
sortedMaxPeakLocations = [
highestPeakLocations[
sortedClusIndicesArray[i]-1] for i in list(
range(0, len(clusterIndices)))]
# Deal with inf issues for peaks.
logPeakPVals = [0]*len(sortedPeakPVals)
for i in list(range(0, len(sortedPeakPVals))):
if sortedPeakPVals[i] == 0:
logPeakPVals[i] = math.inf
else:
logPeakPVals[i] = -math.log(sortedPeakPVals[i], 10)
# Deal with inf issues for clusters.
logClusPVals = [0]*len(sortedClusPVals)
for i in list(range(0, len(sortedClusPVals))):
if sortedClusPVals[i] == 0:
logClusPVals[i] = math.inf
else:
logClusPVals[i] = -math.log(sortedClusPVals[i], 10)
# Record the data for display.
clusterData = {}
# If a corrected cluster threshold has been applied we should display
# cluster P values.
if run_query(g, 'askCExtentThreshold', 'Ask'):
clusterData['clusterPValues'] = sortedClusPVals
clusterData['logClusterPValues'] = logClusPVals
clusterData['clusSizes'] = sortedClusSizeArray
clusterData['clusIndices'] = sortedClusIndicesArray
clusterData['clusPeakZstats'] = sortedMaxPeakZstats
clusterData['clusPeakLocations'] = sortedMaxPeakLocations
clusterData['peakZstats'] = sortedPeaksZstatsArray
clusterData['peakClusIndices'] = sortedClusIndicesForPeaks
clusterData['peakLocations'] = sortedPeakLocations
clusterData['peakPVals'] = sortedPeakPVals
clusterData['logPeakPVals'] = logPeakPVals
return(clusterData)
def contrast_vec(data, v_min, v_max):
# This import is needed only in this function.
from matplotlib import pyplot as plt
conLength = len(data)
# We invert the values so the colours appear correctly (i.
# e. 1 -> white, 0 -> black).
data = np.ones(len(data))-data
# Make the contrast vector larger so we can make an image.
data = np.kron(data, np.ones((10, 30)))
# Add border to data.
data[:, 0] = v_max*np.ones(10)
data[:, 30*conLength-1] = v_max*np.ones(10)
data[0, :] = v_max*np.ones(30*conLength)
data[10-1, :] = v_max*np.ones(30*conLength)
# Create figure.
fig = plt.figure(figsize=(len(data), 1))
# Remove axis
ax = fig.add_subplot(1, 1, 1)
plt.axis('off')
# Add contrast vector to figure
plt.imshow(data, aspect='auto', cmap='Greys', vmin=v_min, vmax=v_max)
# Check for bording box.
extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
# Save figure (without bording box)
tempFile = 'tempCon' + str(random.randint(0, 999999)) + '.png'
plt.savefig(tempFile, bbox_inches=extent)
# Encode the figure.
encodedIm = encode_image(tempFile)
# Remove the image.
os.remove(tempFile)
# Return the image
return('data:image/jpg;base64,' + encodedIm.decode())
# This function takes in an excursion set name and a graph and generates
# an FSL-like name for the HTML output display for the excursionset.
#
# e.g. ExcursionSet_F001.nii.gz -> cluster_zfstat1_std.html
def get_clus_filename(g, excName):
# For SPM data we can't work out the filename we want from just
# the contrast name.
if run_query(g, 'askSPM', 'Ask'):
# For SPM data we must look for the statistic map to
# assert which statistic is associated to a contrast.
statisticMap = run_query(g, 'selectStatMap', 'Select',
{'EXC_NAME': (excName + '.nii.gz')})[0]
# If it's T stat string is '', if it's F stat string
# is 'f'
if statisticMap[0] == 'T':
statString = ''
else:
statString = statisticMap[0].lower()
return('cluster_z' + statString + 'stat1_std.html')
else:
# In FSL the excursion set maps are always of the form
# ExcursionSet_(stattype)00(number), unless only one T
# statistic was computed. Then the excursion set map is
# named ExcursionSet.
if '_F' in excName:
statString = 'f'
else:
statString = ''
# The last letter of the name should either be the
# excursion number or, if there is only one excursion
# set, 't'.
number = excName.replace('.nii.gz', '')[-1]
if number == 't':
number = '1'
return('cluster_z' + statString + 'stat' + number + '_std.html') | peakPVals = [float(peakQueryResult[i]) for i in list(
range(3, len(peakQueryResult), 5))]
else: | random_line_split |
statformat.py | #!/usr/bin/env python3
# ======================================================================
#
# This file contains functions used for formatting statistical data
# returned by SPARQL queries on NIDM-Results packs.
#
# Authors: Peter Williams, Tom Maullin, Camille Maumet (10/01/18)
#
# ======================================================================
from queries.querytools import run_query
from style.pagestyling import encode_image
import numpy as np
import random
import os
import math
import matplotlib
matplotlib.use('Agg')
# This function converts obo statistic types into the corresponding statistic.
def statistic_type(stat):
if stat == "http://purl.obolibrary.org/obo/STATO_0000376":
|
elif stat == "http://purl.obolibrary.org/obo/STATO_0000282":
return("F")
elif stat == "http://purl.obolibrary.org/obo/STATO_0000176":
return("T")
else:
return("P")
# This function returns the cluster forming threshold type of an image.
def height_thresh_type(graph, imageType):
if run_query(graph, 'askIfOboStatistic', 'Ask'):
return(imageType)
else:
return("P")
# This function returns the statistic type of a statistic
def statistic_type_string(statImage):
if statImage == "T":
return("T")
elif statImage == "F":
return("F")
elif statImage == "Z":
return("Z (Gaussianised T/F)")
def format_cluster_stats(g, excName):
# ----------------------------------------------------------------------
# First we gather data for peaks table.
# ----------------------------------------------------------------------
# Run the peak query
peakQueryResult = run_query(g, 'selectPeakData', 'Select',
{'EXC_NAME': excName})
# Retrieve query results.
peakZstats = [float(peakQueryResult[i]) for i in list(range(0, len(
peakQueryResult), 5))]
clusterIndicesForPeaks = [int(peakQueryResult[i]) for i in list(range(
1, len(peakQueryResult), 5))]
locations = [peakQueryResult[i] for i in list(range(2, len(
peakQueryResult), 5))]
# If a corrected height threshold has been applied we should display
# corrected peak P values. Else we should use uncorrected peak P values.
try:
if run_query(g, 'askCHeightThreshold', 'Ask'):
peakPVals = [float(peakQueryResult[i]) for i in list(
range(3, len(peakQueryResult), 5))]
else:
peakPVals = [float(peakQueryResult[i]) for i in list(
range(4, len(peakQueryResult), 5))]
# This is a temporary bug fix due to the FSL exporter currently not
# recording corrected peak P-values.
except ValueError:
peakPVals = [math.nan for row in peakQueryResult]
# Obtain permutation used to sort the results in order of descending
# cluster index and then descending peak statistic size.
peaksSortPermutation = sorted(range(len(clusterIndicesForPeaks)),
reverse=True,
key=lambda k: (clusterIndicesForPeaks[k],
peakZstats[k]))
# Sort all peak data using this permutation.
sortedPeaksZstatsArray = [peakZstats[i] for i in peaksSortPermutation]
sortedClusIndicesForPeaks = [
clusterIndicesForPeaks[i] for i in peaksSortPermutation]
sortedPeakLocations = [locations[i] for i in peaksSortPermutation]
sortedPeakPVals = [peakPVals[i] for i in peaksSortPermutation]
# ----------------------------------------------------------------------
# Second we gather data for cluster table.
# ----------------------------------------------------------------------
# Run the cluster query
clusQueryResult = run_query(g, 'selectClusterData', 'Select',
{'EXC_NAME': excName})
clusterIndices = [
int(clusQueryResult[i]) for i in list(
range(0, len(clusQueryResult), 3))]
clusterSizes = [
int(clusQueryResult[i]) for i in list(
range(1, len(clusQueryResult), 3))]
clusterPVals = [
float(clusQueryResult[i]) for i in list(
range(2, len(clusQueryResult), 3))]
# Create an array for the highest peaks.
highestPeakZArray = [0]*len(clusterIndices)
highestPeakLocations = [0]*len(clusterIndices)
for i in list(range(0, len(peakZstats))):
if highestPeakZArray[clusterIndicesForPeaks[i]-1] < peakZstats[i]:
highestPeakZArray[clusterIndicesForPeaks[i]-1] = peakZstats[i]
highestPeakLocations[clusterIndicesForPeaks[i]-1] = locations[i]
# Obtain permutation used to sort the results in order of descending
# cluster index and then for each cluster by peak statistic size.
clusterSortPermutation = sorted(
range(len(clusterIndices)),
reverse=True,
key=lambda k: (clusterSizes[k], clusterIndices[k]))
# Sorted cluster arrays
sortedClusSizeArray = [
clusterSizes[i] for i in clusterSortPermutation]
sortedClusIndicesArray = [
clusterIndices[i] for i in clusterSortPermutation]
sortedClusPVals = [
clusterPVals[i] for i in clusterSortPermutation]
# Sort the highest peaks
sortedMaxPeakZstats = [
highestPeakZArray[
sortedClusIndicesArray[i]-1] for i in list(
range(0, len(clusterIndices)))]
sortedMaxPeakLocations = [
highestPeakLocations[
sortedClusIndicesArray[i]-1] for i in list(
range(0, len(clusterIndices)))]
# Deal with inf issues for peaks.
logPeakPVals = [0]*len(sortedPeakPVals)
for i in list(range(0, len(sortedPeakPVals))):
if sortedPeakPVals[i] == 0:
logPeakPVals[i] = math.inf
else:
logPeakPVals[i] = -math.log(sortedPeakPVals[i], 10)
# Deal with inf issues for clusters.
logClusPVals = [0]*len(sortedClusPVals)
for i in list(range(0, len(sortedClusPVals))):
if sortedClusPVals[i] == 0:
logClusPVals[i] = math.inf
else:
logClusPVals[i] = -math.log(sortedClusPVals[i], 10)
# Record the data for display.
clusterData = {}
# If a corrected cluster threshold has been applied we should display
# cluster P values.
if run_query(g, 'askCExtentThreshold', 'Ask'):
clusterData['clusterPValues'] = sortedClusPVals
clusterData['logClusterPValues'] = logClusPVals
clusterData['clusSizes'] = sortedClusSizeArray
clusterData['clusIndices'] = sortedClusIndicesArray
clusterData['clusPeakZstats'] = sortedMaxPeakZstats
clusterData['clusPeakLocations'] = sortedMaxPeakLocations
clusterData['peakZstats'] = sortedPeaksZstatsArray
clusterData['peakClusIndices'] = sortedClusIndicesForPeaks
clusterData['peakLocations'] = sortedPeakLocations
clusterData['peakPVals'] = sortedPeakPVals
clusterData['logPeakPVals'] = logPeakPVals
return(clusterData)
def contrast_vec(data, v_min, v_max):
# This import is needed only in this function.
from matplotlib import pyplot as plt
conLength = len(data)
# We invert the values so the colours appear correctly (i.
# e. 1 -> white, 0 -> black).
data = np.ones(len(data))-data
# Make the contrast vector larger so we can make an image.
data = np.kron(data, np.ones((10, 30)))
# Add border to data.
data[:, 0] = v_max*np.ones(10)
data[:, 30*conLength-1] = v_max*np.ones(10)
data[0, :] = v_max*np.ones(30*conLength)
data[10-1, :] = v_max*np.ones(30*conLength)
# Create figure.
fig = plt.figure(figsize=(len(data), 1))
# Remove axis
ax = fig.add_subplot(1, 1, 1)
plt.axis('off')
# Add contrast vector to figure
plt.imshow(data, aspect='auto', cmap='Greys', vmin=v_min, vmax=v_max)
# Check for bording box.
extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
# Save figure (without bording box)
tempFile = 'tempCon' + str(random.randint(0, 999999)) + '.png'
plt.savefig(tempFile, bbox_inches=extent)
# Encode the figure.
encodedIm = encode_image(tempFile)
# Remove the image.
os.remove(tempFile)
# Return the image
return('data:image/jpg;base64,' + encodedIm.decode())
# This function takes in an excursion set name and a graph and generates
# an FSL-like name for the HTML output display for the excursionset.
#
# e.g. ExcursionSet_F001.nii.gz -> cluster_zfstat1_std.html
def get_clus_filename(g, excName):
# For SPM data we can't work out the filename we want from just
# the contrast name.
if run_query(g, 'askSPM', 'Ask'):
# For SPM data we must look for the statistic map to
# assert which statistic is associated to a contrast.
statisticMap = run_query(g, 'selectStatMap', 'Select',
{'EXC_NAME': (excName + '.nii.gz')})[0]
# If it's T stat string is '', if it's F stat string
# is 'f'
if statisticMap[0] == 'T':
statString = ''
else:
statString = statisticMap[0].lower()
return('cluster_z' + statString + 'stat1_std.html')
else:
# In FSL the excursion set maps are always of the form
# ExcursionSet_(stattype)00(number), unless only one T
# statistic was computed. Then the excursion set map is
# named ExcursionSet.
if '_F' in excName:
statString = 'f'
else:
statString = ''
# The last letter of the name should either be the
# excursion number or, if there is only one excursion
# set, 't'.
number = excName.replace('.nii.gz', '')[-1]
if number == 't':
number = '1'
return('cluster_z' + statString + 'stat' + number + '_std.html')
| return("Z") | conditional_block |
statformat.py | #!/usr/bin/env python3
# ======================================================================
#
# This file contains functions used for formatting statistical data
# returned by SPARQL queries on NIDM-Results packs.
#
# Authors: Peter Williams, Tom Maullin, Camille Maumet (10/01/18)
#
# ======================================================================
from queries.querytools import run_query
from style.pagestyling import encode_image
import numpy as np
import random
import os
import math
import matplotlib
matplotlib.use('Agg')
# This function converts obo statistic types into the corresponding statistic.
def statistic_type(stat):
if stat == "http://purl.obolibrary.org/obo/STATO_0000376":
return("Z")
elif stat == "http://purl.obolibrary.org/obo/STATO_0000282":
return("F")
elif stat == "http://purl.obolibrary.org/obo/STATO_0000176":
return("T")
else:
return("P")
# This function returns the cluster forming threshold type of an image.
def height_thresh_type(graph, imageType):
if run_query(graph, 'askIfOboStatistic', 'Ask'):
return(imageType)
else:
return("P")
# This function returns the statistic type of a statistic
def statistic_type_string(statImage):
if statImage == "T":
return("T")
elif statImage == "F":
return("F")
elif statImage == "Z":
return("Z (Gaussianised T/F)")
def format_cluster_stats(g, excName):
# ----------------------------------------------------------------------
# First we gather data for peaks table.
# ----------------------------------------------------------------------
# Run the peak query
peakQueryResult = run_query(g, 'selectPeakData', 'Select',
{'EXC_NAME': excName})
# Retrieve query results.
peakZstats = [float(peakQueryResult[i]) for i in list(range(0, len(
peakQueryResult), 5))]
clusterIndicesForPeaks = [int(peakQueryResult[i]) for i in list(range(
1, len(peakQueryResult), 5))]
locations = [peakQueryResult[i] for i in list(range(2, len(
peakQueryResult), 5))]
# If a corrected height threshold has been applied we should display
# corrected peak P values. Else we should use uncorrected peak P values.
try:
if run_query(g, 'askCHeightThreshold', 'Ask'):
peakPVals = [float(peakQueryResult[i]) for i in list(
range(3, len(peakQueryResult), 5))]
else:
peakPVals = [float(peakQueryResult[i]) for i in list(
range(4, len(peakQueryResult), 5))]
# This is a temporary bug fix due to the FSL exporter currently not
# recording corrected peak P-values.
except ValueError:
peakPVals = [math.nan for row in peakQueryResult]
# Obtain permutation used to sort the results in order of descending
# cluster index and then descending peak statistic size.
peaksSortPermutation = sorted(range(len(clusterIndicesForPeaks)),
reverse=True,
key=lambda k: (clusterIndicesForPeaks[k],
peakZstats[k]))
# Sort all peak data using this permutation.
sortedPeaksZstatsArray = [peakZstats[i] for i in peaksSortPermutation]
sortedClusIndicesForPeaks = [
clusterIndicesForPeaks[i] for i in peaksSortPermutation]
sortedPeakLocations = [locations[i] for i in peaksSortPermutation]
sortedPeakPVals = [peakPVals[i] for i in peaksSortPermutation]
# ----------------------------------------------------------------------
# Second we gather data for cluster table.
# ----------------------------------------------------------------------
# Run the cluster query
clusQueryResult = run_query(g, 'selectClusterData', 'Select',
{'EXC_NAME': excName})
clusterIndices = [
int(clusQueryResult[i]) for i in list(
range(0, len(clusQueryResult), 3))]
clusterSizes = [
int(clusQueryResult[i]) for i in list(
range(1, len(clusQueryResult), 3))]
clusterPVals = [
float(clusQueryResult[i]) for i in list(
range(2, len(clusQueryResult), 3))]
# Create an array for the highest peaks.
highestPeakZArray = [0]*len(clusterIndices)
highestPeakLocations = [0]*len(clusterIndices)
for i in list(range(0, len(peakZstats))):
if highestPeakZArray[clusterIndicesForPeaks[i]-1] < peakZstats[i]:
highestPeakZArray[clusterIndicesForPeaks[i]-1] = peakZstats[i]
highestPeakLocations[clusterIndicesForPeaks[i]-1] = locations[i]
# Obtain permutation used to sort the results in order of descending
# cluster index and then for each cluster by peak statistic size.
clusterSortPermutation = sorted(
range(len(clusterIndices)),
reverse=True,
key=lambda k: (clusterSizes[k], clusterIndices[k]))
# Sorted cluster arrays
sortedClusSizeArray = [
clusterSizes[i] for i in clusterSortPermutation]
sortedClusIndicesArray = [
clusterIndices[i] for i in clusterSortPermutation]
sortedClusPVals = [
clusterPVals[i] for i in clusterSortPermutation]
# Sort the highest peaks
sortedMaxPeakZstats = [
highestPeakZArray[
sortedClusIndicesArray[i]-1] for i in list(
range(0, len(clusterIndices)))]
sortedMaxPeakLocations = [
highestPeakLocations[
sortedClusIndicesArray[i]-1] for i in list(
range(0, len(clusterIndices)))]
# Deal with inf issues for peaks.
logPeakPVals = [0]*len(sortedPeakPVals)
for i in list(range(0, len(sortedPeakPVals))):
if sortedPeakPVals[i] == 0:
logPeakPVals[i] = math.inf
else:
logPeakPVals[i] = -math.log(sortedPeakPVals[i], 10)
# Deal with inf issues for clusters.
logClusPVals = [0]*len(sortedClusPVals)
for i in list(range(0, len(sortedClusPVals))):
if sortedClusPVals[i] == 0:
logClusPVals[i] = math.inf
else:
logClusPVals[i] = -math.log(sortedClusPVals[i], 10)
# Record the data for display.
clusterData = {}
# If a corrected cluster threshold has been applied we should display
# cluster P values.
if run_query(g, 'askCExtentThreshold', 'Ask'):
clusterData['clusterPValues'] = sortedClusPVals
clusterData['logClusterPValues'] = logClusPVals
clusterData['clusSizes'] = sortedClusSizeArray
clusterData['clusIndices'] = sortedClusIndicesArray
clusterData['clusPeakZstats'] = sortedMaxPeakZstats
clusterData['clusPeakLocations'] = sortedMaxPeakLocations
clusterData['peakZstats'] = sortedPeaksZstatsArray
clusterData['peakClusIndices'] = sortedClusIndicesForPeaks
clusterData['peakLocations'] = sortedPeakLocations
clusterData['peakPVals'] = sortedPeakPVals
clusterData['logPeakPVals'] = logPeakPVals
return(clusterData)
def contrast_vec(data, v_min, v_max):
# This import is needed only in this function.
|
# This function takes in an excursion set name and a graph and generates
# an FSL-like name for the HTML output display for the excursionset.
#
# e.g. ExcursionSet_F001.nii.gz -> cluster_zfstat1_std.html
def get_clus_filename(g, excName):
# For SPM data we can't work out the filename we want from just
# the contrast name.
if run_query(g, 'askSPM', 'Ask'):
# For SPM data we must look for the statistic map to
# assert which statistic is associated to a contrast.
statisticMap = run_query(g, 'selectStatMap', 'Select',
{'EXC_NAME': (excName + '.nii.gz')})[0]
# If it's T stat string is '', if it's F stat string
# is 'f'
if statisticMap[0] == 'T':
statString = ''
else:
statString = statisticMap[0].lower()
return('cluster_z' + statString + 'stat1_std.html')
else:
# In FSL the excursion set maps are always of the form
# ExcursionSet_(stattype)00(number), unless only one T
# statistic was computed. Then the excursion set map is
# named ExcursionSet.
if '_F' in excName:
statString = 'f'
else:
statString = ''
# The last letter of the name should either be the
# excursion number or, if there is only one excursion
# set, 't'.
number = excName.replace('.nii.gz', '')[-1]
if number == 't':
number = '1'
return('cluster_z' + statString + 'stat' + number + '_std.html')
| from matplotlib import pyplot as plt
conLength = len(data)
# We invert the values so the colours appear correctly (i.
# e. 1 -> white, 0 -> black).
data = np.ones(len(data))-data
# Make the contrast vector larger so we can make an image.
data = np.kron(data, np.ones((10, 30)))
# Add border to data.
data[:, 0] = v_max*np.ones(10)
data[:, 30*conLength-1] = v_max*np.ones(10)
data[0, :] = v_max*np.ones(30*conLength)
data[10-1, :] = v_max*np.ones(30*conLength)
# Create figure.
fig = plt.figure(figsize=(len(data), 1))
# Remove axis
ax = fig.add_subplot(1, 1, 1)
plt.axis('off')
# Add contrast vector to figure
plt.imshow(data, aspect='auto', cmap='Greys', vmin=v_min, vmax=v_max)
# Check for bording box.
extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
# Save figure (without bording box)
tempFile = 'tempCon' + str(random.randint(0, 999999)) + '.png'
plt.savefig(tempFile, bbox_inches=extent)
# Encode the figure.
encodedIm = encode_image(tempFile)
# Remove the image.
os.remove(tempFile)
# Return the image
return('data:image/jpg;base64,' + encodedIm.decode()) | identifier_body |
statformat.py | #!/usr/bin/env python3
# ======================================================================
#
# This file contains functions used for formatting statistical data
# returned by SPARQL queries on NIDM-Results packs.
#
# Authors: Peter Williams, Tom Maullin, Camille Maumet (10/01/18)
#
# ======================================================================
from queries.querytools import run_query
from style.pagestyling import encode_image
import numpy as np
import random
import os
import math
import matplotlib
matplotlib.use('Agg')
# This function converts obo statistic types into the corresponding statistic.
def statistic_type(stat):
if stat == "http://purl.obolibrary.org/obo/STATO_0000376":
return("Z")
elif stat == "http://purl.obolibrary.org/obo/STATO_0000282":
return("F")
elif stat == "http://purl.obolibrary.org/obo/STATO_0000176":
return("T")
else:
return("P")
# This function returns the cluster forming threshold type of an image.
def height_thresh_type(graph, imageType):
if run_query(graph, 'askIfOboStatistic', 'Ask'):
return(imageType)
else:
return("P")
# This function returns the statistic type of a statistic
def | (statImage):
if statImage == "T":
return("T")
elif statImage == "F":
return("F")
elif statImage == "Z":
return("Z (Gaussianised T/F)")
def format_cluster_stats(g, excName):
# ----------------------------------------------------------------------
# First we gather data for peaks table.
# ----------------------------------------------------------------------
# Run the peak query
peakQueryResult = run_query(g, 'selectPeakData', 'Select',
{'EXC_NAME': excName})
# Retrieve query results.
peakZstats = [float(peakQueryResult[i]) for i in list(range(0, len(
peakQueryResult), 5))]
clusterIndicesForPeaks = [int(peakQueryResult[i]) for i in list(range(
1, len(peakQueryResult), 5))]
locations = [peakQueryResult[i] for i in list(range(2, len(
peakQueryResult), 5))]
# If a corrected height threshold has been applied we should display
# corrected peak P values. Else we should use uncorrected peak P values.
try:
if run_query(g, 'askCHeightThreshold', 'Ask'):
peakPVals = [float(peakQueryResult[i]) for i in list(
range(3, len(peakQueryResult), 5))]
else:
peakPVals = [float(peakQueryResult[i]) for i in list(
range(4, len(peakQueryResult), 5))]
# This is a temporary bug fix due to the FSL exporter currently not
# recording corrected peak P-values.
except ValueError:
peakPVals = [math.nan for row in peakQueryResult]
# Obtain permutation used to sort the results in order of descending
# cluster index and then descending peak statistic size.
peaksSortPermutation = sorted(range(len(clusterIndicesForPeaks)),
reverse=True,
key=lambda k: (clusterIndicesForPeaks[k],
peakZstats[k]))
# Sort all peak data using this permutation.
sortedPeaksZstatsArray = [peakZstats[i] for i in peaksSortPermutation]
sortedClusIndicesForPeaks = [
clusterIndicesForPeaks[i] for i in peaksSortPermutation]
sortedPeakLocations = [locations[i] for i in peaksSortPermutation]
sortedPeakPVals = [peakPVals[i] for i in peaksSortPermutation]
# ----------------------------------------------------------------------
# Second we gather data for cluster table.
# ----------------------------------------------------------------------
# Run the cluster query
clusQueryResult = run_query(g, 'selectClusterData', 'Select',
{'EXC_NAME': excName})
clusterIndices = [
int(clusQueryResult[i]) for i in list(
range(0, len(clusQueryResult), 3))]
clusterSizes = [
int(clusQueryResult[i]) for i in list(
range(1, len(clusQueryResult), 3))]
clusterPVals = [
float(clusQueryResult[i]) for i in list(
range(2, len(clusQueryResult), 3))]
# Create an array for the highest peaks.
highestPeakZArray = [0]*len(clusterIndices)
highestPeakLocations = [0]*len(clusterIndices)
for i in list(range(0, len(peakZstats))):
if highestPeakZArray[clusterIndicesForPeaks[i]-1] < peakZstats[i]:
highestPeakZArray[clusterIndicesForPeaks[i]-1] = peakZstats[i]
highestPeakLocations[clusterIndicesForPeaks[i]-1] = locations[i]
# Obtain permutation used to sort the results in order of descending
# cluster index and then for each cluster by peak statistic size.
clusterSortPermutation = sorted(
range(len(clusterIndices)),
reverse=True,
key=lambda k: (clusterSizes[k], clusterIndices[k]))
# Sorted cluster arrays
sortedClusSizeArray = [
clusterSizes[i] for i in clusterSortPermutation]
sortedClusIndicesArray = [
clusterIndices[i] for i in clusterSortPermutation]
sortedClusPVals = [
clusterPVals[i] for i in clusterSortPermutation]
# Sort the highest peaks
sortedMaxPeakZstats = [
highestPeakZArray[
sortedClusIndicesArray[i]-1] for i in list(
range(0, len(clusterIndices)))]
sortedMaxPeakLocations = [
highestPeakLocations[
sortedClusIndicesArray[i]-1] for i in list(
range(0, len(clusterIndices)))]
# Deal with inf issues for peaks.
logPeakPVals = [0]*len(sortedPeakPVals)
for i in list(range(0, len(sortedPeakPVals))):
if sortedPeakPVals[i] == 0:
logPeakPVals[i] = math.inf
else:
logPeakPVals[i] = -math.log(sortedPeakPVals[i], 10)
# Deal with inf issues for clusters.
logClusPVals = [0]*len(sortedClusPVals)
for i in list(range(0, len(sortedClusPVals))):
if sortedClusPVals[i] == 0:
logClusPVals[i] = math.inf
else:
logClusPVals[i] = -math.log(sortedClusPVals[i], 10)
# Record the data for display.
clusterData = {}
# If a corrected cluster threshold has been applied we should display
# cluster P values.
if run_query(g, 'askCExtentThreshold', 'Ask'):
clusterData['clusterPValues'] = sortedClusPVals
clusterData['logClusterPValues'] = logClusPVals
clusterData['clusSizes'] = sortedClusSizeArray
clusterData['clusIndices'] = sortedClusIndicesArray
clusterData['clusPeakZstats'] = sortedMaxPeakZstats
clusterData['clusPeakLocations'] = sortedMaxPeakLocations
clusterData['peakZstats'] = sortedPeaksZstatsArray
clusterData['peakClusIndices'] = sortedClusIndicesForPeaks
clusterData['peakLocations'] = sortedPeakLocations
clusterData['peakPVals'] = sortedPeakPVals
clusterData['logPeakPVals'] = logPeakPVals
return(clusterData)
def contrast_vec(data, v_min, v_max):
# This import is needed only in this function.
from matplotlib import pyplot as plt
conLength = len(data)
# We invert the values so the colours appear correctly (i.
# e. 1 -> white, 0 -> black).
data = np.ones(len(data))-data
# Make the contrast vector larger so we can make an image.
data = np.kron(data, np.ones((10, 30)))
# Add border to data.
data[:, 0] = v_max*np.ones(10)
data[:, 30*conLength-1] = v_max*np.ones(10)
data[0, :] = v_max*np.ones(30*conLength)
data[10-1, :] = v_max*np.ones(30*conLength)
# Create figure.
fig = plt.figure(figsize=(len(data), 1))
# Remove axis
ax = fig.add_subplot(1, 1, 1)
plt.axis('off')
# Add contrast vector to figure
plt.imshow(data, aspect='auto', cmap='Greys', vmin=v_min, vmax=v_max)
# Check for bording box.
extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
# Save figure (without bording box)
tempFile = 'tempCon' + str(random.randint(0, 999999)) + '.png'
plt.savefig(tempFile, bbox_inches=extent)
# Encode the figure.
encodedIm = encode_image(tempFile)
# Remove the image.
os.remove(tempFile)
# Return the image
return('data:image/jpg;base64,' + encodedIm.decode())
# This function takes in an excursion set name and a graph and generates
# an FSL-like name for the HTML output display for the excursionset.
#
# e.g. ExcursionSet_F001.nii.gz -> cluster_zfstat1_std.html
def get_clus_filename(g, excName):
# For SPM data we can't work out the filename we want from just
# the contrast name.
if run_query(g, 'askSPM', 'Ask'):
# For SPM data we must look for the statistic map to
# assert which statistic is associated to a contrast.
statisticMap = run_query(g, 'selectStatMap', 'Select',
{'EXC_NAME': (excName + '.nii.gz')})[0]
# If it's T stat string is '', if it's F stat string
# is 'f'
if statisticMap[0] == 'T':
statString = ''
else:
statString = statisticMap[0].lower()
return('cluster_z' + statString + 'stat1_std.html')
else:
# In FSL the excursion set maps are always of the form
# ExcursionSet_(stattype)00(number), unless only one T
# statistic was computed. Then the excursion set map is
# named ExcursionSet.
if '_F' in excName:
statString = 'f'
else:
statString = ''
# The last letter of the name should either be the
# excursion number or, if there is only one excursion
# set, 't'.
number = excName.replace('.nii.gz', '')[-1]
if number == 't':
number = '1'
return('cluster_z' + statString + 'stat' + number + '_std.html')
| statistic_type_string | identifier_name |
message_partition.go | package store
import (
"encoding/binary"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
)
var MAGIC_NUMBER = []byte{42, 249, 180, 108, 82, 75, 222, 182}
var FILE_FORMAT_VERSION = []byte{1}
var MESSAGES_PER_FILE = uint64(10000)
var INDEX_ENTRY_SIZE = 12
type fetchEntry struct {
messageId uint64
fileId uint64
offset int64
size int
}
type MessagePartition struct {
basedir string
name string
appendFile *os.File
appendIndexFile *os.File
appendFirstId uint64
appendLastId uint64
appendFileWritePosition uint64
maxMessageId uint64
mutex *sync.RWMutex
}
func NewMessagePartition(basedir string, storeName string) (*MessagePartition, error) {
p := &MessagePartition{
basedir: basedir,
name: storeName,
mutex: &sync.RWMutex{},
}
return p, p.initialize()
}
func (p *MessagePartition) initialize() error {
p.mutex.Lock()
defer p.mutex.Unlock()
fileList, err := p.scanFiles()
if err != nil {
return err
}
if len(fileList) == 0 {
p.maxMessageId = 0
} else {
var err error
p.maxMessageId, err = p.calculateMaxMessageIdFromIndex(fileList[len(fileList)-1])
if err != nil {
return err
}
}
return nil
}
// returns the max message id for a message file
func (p *MessagePartition) calculateMaxMessageIdFromIndex(fileId uint64) (uint64, error) {
stat, err := os.Stat(p.indexFilenameByMessageId(fileId))
if err != nil {
return 0, err
}
entriesInIndex := uint64(stat.Size() / int64(INDEX_ENTRY_SIZE))
return (entriesInIndex - 1 + fileId), nil
}
// Returns the start messages ids for all available message files
// in a sorted list
func (p *MessagePartition) scanFiles() ([]uint64, error) {
result := []uint64{}
allFiles, err := ioutil.ReadDir(p.basedir)
if err != nil {
return nil, err
}
for _, fileInfo := range allFiles {
if strings.HasPrefix(fileInfo.Name(), p.name+"-") &&
strings.HasSuffix(fileInfo.Name(), ".idx") {
fileIdString := fileInfo.Name()[len(p.name)+1 : len(fileInfo.Name())-4]
if fileId, err := strconv.ParseUint(fileIdString, 10, 64); err == nil {
result = append(result, fileId)
}
}
}
return result, nil
}
func (p *MessagePartition) MaxMessageId() (uint64, error) {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.maxMessageId, nil
}
func (p *MessagePartition) closeAppendFiles() error {
if p.appendFile != nil {
if err := p.appendFile.Close(); err != nil {
if p.appendIndexFile != nil {
defer p.appendIndexFile.Close()
}
return err
}
p.appendFile = nil
}
if p.appendIndexFile != nil {
err := p.appendIndexFile.Close()
p.appendIndexFile = nil
return err
}
return nil
}
func (p *MessagePartition) createNextAppendFiles(msgId uint64) error {
firstMessageIdForFile := p.firstMessageIdForFile(msgId)
file, err := os.OpenFile(p.filenameByMessageId(firstMessageIdForFile), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
return err
}
// write file header on new files
if stat, _ := file.Stat(); stat.Size() == 0 {
p.appendFileWritePosition = uint64(stat.Size())
_, err = file.Write(MAGIC_NUMBER)
if err != nil {
return err
}
_, err = file.Write(FILE_FORMAT_VERSION)
if err != nil {
return err
}
}
index, errIndex := os.OpenFile(p.indexFilenameByMessageId(firstMessageIdForFile), os.O_RDWR|os.O_CREATE, 0666)
if errIndex != nil {
defer file.Close()
defer os.Remove(file.Name())
return err
}
p.appendFile = file
p.appendIndexFile = index
p.appendFirstId = firstMessageIdForFile
p.appendLastId = firstMessageIdForFile + MESSAGES_PER_FILE - 1
stat, err := file.Stat()
p.appendFileWritePosition = uint64(stat.Size())
|
func (p *MessagePartition) Close() error {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.closeAppendFiles()
}
func (p *MessagePartition) DoInTx(fnToExecute func(maxMessageId uint64) error) error {
p.mutex.Lock()
defer p.mutex.Unlock()
return fnToExecute(p.maxMessageId)
}
func (p *MessagePartition) StoreTx(partition string,
callback func(msgId uint64) (msg []byte)) error {
p.mutex.Lock()
defer p.mutex.Unlock()
msgId := p.maxMessageId + 1
return p.store(msgId, callback(msgId))
}
func (p *MessagePartition) Store(msgId uint64, msg []byte) error {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.store(msgId, msg)
}
func (p *MessagePartition) store(msgId uint64, msg []byte) error {
if msgId != 1+p.maxMessageId {
return fmt.Errorf("Invalid message id for partition %v. Next id should be %v, but was %q", p.name, 1+p.maxMessageId, msgId)
}
if msgId > p.appendLastId ||
p.appendFile == nil ||
p.appendIndexFile == nil {
if err := p.closeAppendFiles(); err != nil {
return err
}
if err := p.createNextAppendFiles(msgId); err != nil {
return err
}
}
// write the message size and the message id 32bit and 64 bit
sizeAndId := make([]byte, 12)
binary.LittleEndian.PutUint32(sizeAndId, uint32(len(msg)))
binary.LittleEndian.PutUint64(sizeAndId[4:], msgId)
if _, err := p.appendFile.Write(sizeAndId); err != nil {
return err
}
// write the message
if _, err := p.appendFile.Write(msg); err != nil {
return err
}
// write the index entry to the index file
indexPosition := int64(uint64(INDEX_ENTRY_SIZE) * (msgId % MESSAGES_PER_FILE))
messageOffset := p.appendFileWritePosition + uint64(len(sizeAndId))
messageOffsetBuff := make([]byte, INDEX_ENTRY_SIZE)
binary.LittleEndian.PutUint64(messageOffsetBuff, messageOffset)
binary.LittleEndian.PutUint32(messageOffsetBuff[8:], uint32(len(msg)))
if _, err := p.appendIndexFile.WriteAt(messageOffsetBuff, indexPosition); err != nil {
return err
}
p.appendFileWritePosition += uint64(len(sizeAndId) + len(msg))
p.maxMessageId = msgId
return nil
}
// fetch a set of messages
func (p *MessagePartition) Fetch(req FetchRequest) {
go func() {
fetchList, err := p.calculateFetchList(req)
if err != nil {
req.ErrorCallback <- err
return
}
req.StartCallback <- len(fetchList)
err = p.fetchByFetchlist(fetchList, req.MessageC)
if err != nil {
req.ErrorCallback <- err
return
}
close(req.MessageC)
}()
}
// fetch the messages in the supplied fetchlist and send them to the channel
func (p *MessagePartition) fetchByFetchlist(fetchList []fetchEntry, messageC chan MessageAndId) error {
var fileId uint64
var file *os.File
var err error
var lastMsgId uint64
for _, f := range fetchList {
if lastMsgId == 0 {
lastMsgId = f.messageId - 1
}
lastMsgId = f.messageId
// ensure, that we read on the correct file
if file == nil || fileId != f.fileId {
file, err = p.checkoutMessagefile(f.fileId)
if err != nil {
return err
}
defer p.releaseMessagefile(f.fileId, file)
fileId = f.fileId
}
msg := make([]byte, f.size, f.size)
_, err = file.ReadAt(msg, f.offset)
if err != nil {
return err
}
messageC <- MessageAndId{f.messageId, msg}
}
return nil
}
// returns a list of fetchEntry records for all message in the fetch request.
func (p *MessagePartition) calculateFetchList(req FetchRequest) ([]fetchEntry, error) {
if req.Direction == 0 {
req.Direction = 1
}
nextId := req.StartId
initialCap := req.Count
if initialCap > 100 {
initialCap = 100
}
result := make([]fetchEntry, 0, initialCap)
var file *os.File
var fileId uint64
for len(result) < req.Count && nextId >= 0 {
nextFileId := p.firstMessageIdForFile(nextId)
// ensure, that we read on the correct file
if file == nil || nextFileId != fileId {
var err error
file, err = p.checkoutIndexfile(nextFileId)
if err != nil {
if os.IsNotExist(err) {
return result, nil
}
return nil, err
}
defer p.releaseIndexfile(fileId, file)
fileId = nextFileId
}
indexPosition := int64(uint64(INDEX_ENTRY_SIZE) * (nextId % MESSAGES_PER_FILE))
msgOffset, msgSize, err := readIndexEntry(file, indexPosition)
if err != nil {
if err.Error() == "EOF" {
return result, nil // we reached the end of the index
} else {
return nil, err
}
}
if msgOffset != uint64(0) { // only append, if the message exists
result = append(result, fetchEntry{
messageId: nextId,
fileId: fileId,
offset: int64(msgOffset),
size: int(msgSize),
})
}
nextId += uint64(req.Direction)
}
return result, nil
}
func readIndexEntry(file *os.File, indexPosition int64) (msgOffset uint64, msgSize uint32, err error) {
msgOffsetBuff := make([]byte, INDEX_ENTRY_SIZE)
if _, err := file.ReadAt(msgOffsetBuff, indexPosition); err != nil {
return 0, 0, err
}
msgOffset = binary.LittleEndian.Uint64(msgOffsetBuff)
msgSize = binary.LittleEndian.Uint32(msgOffsetBuff[8:])
return msgOffset, msgSize, nil
}
// Return a file handle to the message file with the supplied file id.
// The returned file handle may be shared for multiple go routinep.
func (p *MessagePartition) checkoutMessagefile(fileId uint64) (*os.File, error) {
return os.Open(p.filenameByMessageId(fileId))
}
// Release a message file handle
func (p *MessagePartition) releaseMessagefile(fileId uint64, file *os.File) {
file.Close()
}
// Return a file handle to the index file with the supplied file id.
// The returned file handle may be shared for multiple go routinep.
func (p *MessagePartition) checkoutIndexfile(fileId uint64) (*os.File, error) {
return os.Open(p.indexFilenameByMessageId(fileId))
}
// Release an index file handle
func (p *MessagePartition) releaseIndexfile(fileId uint64, file *os.File) {
file.Close()
}
func (p *MessagePartition) firstMessageIdForFile(messageId uint64) uint64 {
return messageId - messageId%MESSAGES_PER_FILE
}
func (p *MessagePartition) filenameByMessageId(messageId uint64) string {
return filepath.Join(p.basedir, fmt.Sprintf("%s-%020d.msg", p.name, messageId))
}
func (p *MessagePartition) indexFilenameByMessageId(messageId uint64) string {
return filepath.Join(p.basedir, fmt.Sprintf("%s-%020d.idx", p.name, messageId))
} | return nil
} | random_line_split |
message_partition.go | package store
import (
"encoding/binary"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
)
var MAGIC_NUMBER = []byte{42, 249, 180, 108, 82, 75, 222, 182}
var FILE_FORMAT_VERSION = []byte{1}
var MESSAGES_PER_FILE = uint64(10000)
var INDEX_ENTRY_SIZE = 12
type fetchEntry struct {
messageId uint64
fileId uint64
offset int64
size int
}
type MessagePartition struct {
basedir string
name string
appendFile *os.File
appendIndexFile *os.File
appendFirstId uint64
appendLastId uint64
appendFileWritePosition uint64
maxMessageId uint64
mutex *sync.RWMutex
}
func NewMessagePartition(basedir string, storeName string) (*MessagePartition, error) {
p := &MessagePartition{
basedir: basedir,
name: storeName,
mutex: &sync.RWMutex{},
}
return p, p.initialize()
}
func (p *MessagePartition) initialize() error {
p.mutex.Lock()
defer p.mutex.Unlock()
fileList, err := p.scanFiles()
if err != nil {
return err
}
if len(fileList) == 0 {
p.maxMessageId = 0
} else {
var err error
p.maxMessageId, err = p.calculateMaxMessageIdFromIndex(fileList[len(fileList)-1])
if err != nil {
return err
}
}
return nil
}
// returns the max message id for a message file
func (p *MessagePartition) calculateMaxMessageIdFromIndex(fileId uint64) (uint64, error) {
stat, err := os.Stat(p.indexFilenameByMessageId(fileId))
if err != nil {
return 0, err
}
entriesInIndex := uint64(stat.Size() / int64(INDEX_ENTRY_SIZE))
return (entriesInIndex - 1 + fileId), nil
}
// Returns the start messages ids for all available message files
// in a sorted list
func (p *MessagePartition) scanFiles() ([]uint64, error) {
result := []uint64{}
allFiles, err := ioutil.ReadDir(p.basedir)
if err != nil {
return nil, err
}
for _, fileInfo := range allFiles {
if strings.HasPrefix(fileInfo.Name(), p.name+"-") &&
strings.HasSuffix(fileInfo.Name(), ".idx") {
fileIdString := fileInfo.Name()[len(p.name)+1 : len(fileInfo.Name())-4]
if fileId, err := strconv.ParseUint(fileIdString, 10, 64); err == nil {
result = append(result, fileId)
}
}
}
return result, nil
}
func (p *MessagePartition) MaxMessageId() (uint64, error) {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.maxMessageId, nil
}
func (p *MessagePartition) closeAppendFiles() error {
if p.appendFile != nil {
if err := p.appendFile.Close(); err != nil {
if p.appendIndexFile != nil {
defer p.appendIndexFile.Close()
}
return err
}
p.appendFile = nil
}
if p.appendIndexFile != nil {
err := p.appendIndexFile.Close()
p.appendIndexFile = nil
return err
}
return nil
}
func (p *MessagePartition) createNextAppendFiles(msgId uint64) error {
firstMessageIdForFile := p.firstMessageIdForFile(msgId)
file, err := os.OpenFile(p.filenameByMessageId(firstMessageIdForFile), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
return err
}
// write file header on new files
if stat, _ := file.Stat(); stat.Size() == 0 {
p.appendFileWritePosition = uint64(stat.Size())
_, err = file.Write(MAGIC_NUMBER)
if err != nil {
return err
}
_, err = file.Write(FILE_FORMAT_VERSION)
if err != nil {
return err
}
}
index, errIndex := os.OpenFile(p.indexFilenameByMessageId(firstMessageIdForFile), os.O_RDWR|os.O_CREATE, 0666)
if errIndex != nil {
defer file.Close()
defer os.Remove(file.Name())
return err
}
p.appendFile = file
p.appendIndexFile = index
p.appendFirstId = firstMessageIdForFile
p.appendLastId = firstMessageIdForFile + MESSAGES_PER_FILE - 1
stat, err := file.Stat()
p.appendFileWritePosition = uint64(stat.Size())
return nil
}
func (p *MessagePartition) Close() error {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.closeAppendFiles()
}
func (p *MessagePartition) DoInTx(fnToExecute func(maxMessageId uint64) error) error {
p.mutex.Lock()
defer p.mutex.Unlock()
return fnToExecute(p.maxMessageId)
}
func (p *MessagePartition) StoreTx(partition string,
callback func(msgId uint64) (msg []byte)) error {
p.mutex.Lock()
defer p.mutex.Unlock()
msgId := p.maxMessageId + 1
return p.store(msgId, callback(msgId))
}
func (p *MessagePartition) Store(msgId uint64, msg []byte) error {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.store(msgId, msg)
}
func (p *MessagePartition) store(msgId uint64, msg []byte) error {
if msgId != 1+p.maxMessageId {
return fmt.Errorf("Invalid message id for partition %v. Next id should be %v, but was %q", p.name, 1+p.maxMessageId, msgId)
}
if msgId > p.appendLastId ||
p.appendFile == nil ||
p.appendIndexFile == nil {
if err := p.closeAppendFiles(); err != nil {
return err
}
if err := p.createNextAppendFiles(msgId); err != nil {
return err
}
}
// write the message size and the message id 32bit and 64 bit
sizeAndId := make([]byte, 12)
binary.LittleEndian.PutUint32(sizeAndId, uint32(len(msg)))
binary.LittleEndian.PutUint64(sizeAndId[4:], msgId)
if _, err := p.appendFile.Write(sizeAndId); err != nil {
return err
}
// write the message
if _, err := p.appendFile.Write(msg); err != nil {
return err
}
// write the index entry to the index file
indexPosition := int64(uint64(INDEX_ENTRY_SIZE) * (msgId % MESSAGES_PER_FILE))
messageOffset := p.appendFileWritePosition + uint64(len(sizeAndId))
messageOffsetBuff := make([]byte, INDEX_ENTRY_SIZE)
binary.LittleEndian.PutUint64(messageOffsetBuff, messageOffset)
binary.LittleEndian.PutUint32(messageOffsetBuff[8:], uint32(len(msg)))
if _, err := p.appendIndexFile.WriteAt(messageOffsetBuff, indexPosition); err != nil {
return err
}
p.appendFileWritePosition += uint64(len(sizeAndId) + len(msg))
p.maxMessageId = msgId
return nil
}
// fetch a set of messages
func (p *MessagePartition) Fetch(req FetchRequest) {
go func() {
fetchList, err := p.calculateFetchList(req)
if err != nil {
req.ErrorCallback <- err
return
}
req.StartCallback <- len(fetchList)
err = p.fetchByFetchlist(fetchList, req.MessageC)
if err != nil {
req.ErrorCallback <- err
return
}
close(req.MessageC)
}()
}
// fetch the messages in the supplied fetchlist and send them to the channel
func (p *MessagePartition) fetchByFetchlist(fetchList []fetchEntry, messageC chan MessageAndId) error {
var fileId uint64
var file *os.File
var err error
var lastMsgId uint64
for _, f := range fetchList {
if lastMsgId == 0 {
lastMsgId = f.messageId - 1
}
lastMsgId = f.messageId
// ensure, that we read on the correct file
if file == nil || fileId != f.fileId {
file, err = p.checkoutMessagefile(f.fileId)
if err != nil {
return err
}
defer p.releaseMessagefile(f.fileId, file)
fileId = f.fileId
}
msg := make([]byte, f.size, f.size)
_, err = file.ReadAt(msg, f.offset)
if err != nil {
return err
}
messageC <- MessageAndId{f.messageId, msg}
}
return nil
}
// returns a list of fetchEntry records for all message in the fetch request.
func (p *MessagePartition) calculateFetchList(req FetchRequest) ([]fetchEntry, error) {
if req.Direction == 0 {
req.Direction = 1
}
nextId := req.StartId
initialCap := req.Count
if initialCap > 100 {
initialCap = 100
}
result := make([]fetchEntry, 0, initialCap)
var file *os.File
var fileId uint64
for len(result) < req.Count && nextId >= 0 {
nextFileId := p.firstMessageIdForFile(nextId)
// ensure, that we read on the correct file
if file == nil || nextFileId != fileId {
var err error
file, err = p.checkoutIndexfile(nextFileId)
if err != nil {
if os.IsNotExist(err) {
return result, nil
}
return nil, err
}
defer p.releaseIndexfile(fileId, file)
fileId = nextFileId
}
indexPosition := int64(uint64(INDEX_ENTRY_SIZE) * (nextId % MESSAGES_PER_FILE))
msgOffset, msgSize, err := readIndexEntry(file, indexPosition)
if err != nil {
if err.Error() == "EOF" {
return result, nil // we reached the end of the index
} else {
return nil, err
}
}
if msgOffset != uint64(0) { // only append, if the message exists
result = append(result, fetchEntry{
messageId: nextId,
fileId: fileId,
offset: int64(msgOffset),
size: int(msgSize),
})
}
nextId += uint64(req.Direction)
}
return result, nil
}
func | (file *os.File, indexPosition int64) (msgOffset uint64, msgSize uint32, err error) {
msgOffsetBuff := make([]byte, INDEX_ENTRY_SIZE)
if _, err := file.ReadAt(msgOffsetBuff, indexPosition); err != nil {
return 0, 0, err
}
msgOffset = binary.LittleEndian.Uint64(msgOffsetBuff)
msgSize = binary.LittleEndian.Uint32(msgOffsetBuff[8:])
return msgOffset, msgSize, nil
}
// Return a file handle to the message file with the supplied file id.
// The returned file handle may be shared for multiple go routinep.
func (p *MessagePartition) checkoutMessagefile(fileId uint64) (*os.File, error) {
return os.Open(p.filenameByMessageId(fileId))
}
// Release a message file handle
func (p *MessagePartition) releaseMessagefile(fileId uint64, file *os.File) {
file.Close()
}
// Return a file handle to the index file with the supplied file id.
// The returned file handle may be shared for multiple go routinep.
func (p *MessagePartition) checkoutIndexfile(fileId uint64) (*os.File, error) {
return os.Open(p.indexFilenameByMessageId(fileId))
}
// Release an index file handle
func (p *MessagePartition) releaseIndexfile(fileId uint64, file *os.File) {
file.Close()
}
func (p *MessagePartition) firstMessageIdForFile(messageId uint64) uint64 {
return messageId - messageId%MESSAGES_PER_FILE
}
func (p *MessagePartition) filenameByMessageId(messageId uint64) string {
return filepath.Join(p.basedir, fmt.Sprintf("%s-%020d.msg", p.name, messageId))
}
func (p *MessagePartition) indexFilenameByMessageId(messageId uint64) string {
return filepath.Join(p.basedir, fmt.Sprintf("%s-%020d.idx", p.name, messageId))
}
| readIndexEntry | identifier_name |
message_partition.go | package store
import (
"encoding/binary"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
)
var MAGIC_NUMBER = []byte{42, 249, 180, 108, 82, 75, 222, 182}
var FILE_FORMAT_VERSION = []byte{1}
var MESSAGES_PER_FILE = uint64(10000)
var INDEX_ENTRY_SIZE = 12
type fetchEntry struct {
messageId uint64
fileId uint64
offset int64
size int
}
type MessagePartition struct {
basedir string
name string
appendFile *os.File
appendIndexFile *os.File
appendFirstId uint64
appendLastId uint64
appendFileWritePosition uint64
maxMessageId uint64
mutex *sync.RWMutex
}
func NewMessagePartition(basedir string, storeName string) (*MessagePartition, error) {
p := &MessagePartition{
basedir: basedir,
name: storeName,
mutex: &sync.RWMutex{},
}
return p, p.initialize()
}
func (p *MessagePartition) initialize() error {
p.mutex.Lock()
defer p.mutex.Unlock()
fileList, err := p.scanFiles()
if err != nil {
return err
}
if len(fileList) == 0 {
p.maxMessageId = 0
} else {
var err error
p.maxMessageId, err = p.calculateMaxMessageIdFromIndex(fileList[len(fileList)-1])
if err != nil {
return err
}
}
return nil
}
// returns the max message id for a message file
func (p *MessagePartition) calculateMaxMessageIdFromIndex(fileId uint64) (uint64, error) {
stat, err := os.Stat(p.indexFilenameByMessageId(fileId))
if err != nil {
return 0, err
}
entriesInIndex := uint64(stat.Size() / int64(INDEX_ENTRY_SIZE))
return (entriesInIndex - 1 + fileId), nil
}
// Returns the start messages ids for all available message files
// in a sorted list
func (p *MessagePartition) scanFiles() ([]uint64, error) {
result := []uint64{}
allFiles, err := ioutil.ReadDir(p.basedir)
if err != nil {
return nil, err
}
for _, fileInfo := range allFiles {
if strings.HasPrefix(fileInfo.Name(), p.name+"-") &&
strings.HasSuffix(fileInfo.Name(), ".idx") {
fileIdString := fileInfo.Name()[len(p.name)+1 : len(fileInfo.Name())-4]
if fileId, err := strconv.ParseUint(fileIdString, 10, 64); err == nil {
result = append(result, fileId)
}
}
}
return result, nil
}
func (p *MessagePartition) MaxMessageId() (uint64, error) {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.maxMessageId, nil
}
func (p *MessagePartition) closeAppendFiles() error {
if p.appendFile != nil {
if err := p.appendFile.Close(); err != nil {
if p.appendIndexFile != nil {
defer p.appendIndexFile.Close()
}
return err
}
p.appendFile = nil
}
if p.appendIndexFile != nil {
err := p.appendIndexFile.Close()
p.appendIndexFile = nil
return err
}
return nil
}
func (p *MessagePartition) createNextAppendFiles(msgId uint64) error {
firstMessageIdForFile := p.firstMessageIdForFile(msgId)
file, err := os.OpenFile(p.filenameByMessageId(firstMessageIdForFile), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
return err
}
// write file header on new files
if stat, _ := file.Stat(); stat.Size() == 0 |
index, errIndex := os.OpenFile(p.indexFilenameByMessageId(firstMessageIdForFile), os.O_RDWR|os.O_CREATE, 0666)
if errIndex != nil {
defer file.Close()
defer os.Remove(file.Name())
return err
}
p.appendFile = file
p.appendIndexFile = index
p.appendFirstId = firstMessageIdForFile
p.appendLastId = firstMessageIdForFile + MESSAGES_PER_FILE - 1
stat, err := file.Stat()
p.appendFileWritePosition = uint64(stat.Size())
return nil
}
func (p *MessagePartition) Close() error {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.closeAppendFiles()
}
func (p *MessagePartition) DoInTx(fnToExecute func(maxMessageId uint64) error) error {
p.mutex.Lock()
defer p.mutex.Unlock()
return fnToExecute(p.maxMessageId)
}
func (p *MessagePartition) StoreTx(partition string,
callback func(msgId uint64) (msg []byte)) error {
p.mutex.Lock()
defer p.mutex.Unlock()
msgId := p.maxMessageId + 1
return p.store(msgId, callback(msgId))
}
func (p *MessagePartition) Store(msgId uint64, msg []byte) error {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.store(msgId, msg)
}
func (p *MessagePartition) store(msgId uint64, msg []byte) error {
if msgId != 1+p.maxMessageId {
return fmt.Errorf("Invalid message id for partition %v. Next id should be %v, but was %q", p.name, 1+p.maxMessageId, msgId)
}
if msgId > p.appendLastId ||
p.appendFile == nil ||
p.appendIndexFile == nil {
if err := p.closeAppendFiles(); err != nil {
return err
}
if err := p.createNextAppendFiles(msgId); err != nil {
return err
}
}
// write the message size and the message id 32bit and 64 bit
sizeAndId := make([]byte, 12)
binary.LittleEndian.PutUint32(sizeAndId, uint32(len(msg)))
binary.LittleEndian.PutUint64(sizeAndId[4:], msgId)
if _, err := p.appendFile.Write(sizeAndId); err != nil {
return err
}
// write the message
if _, err := p.appendFile.Write(msg); err != nil {
return err
}
// write the index entry to the index file
indexPosition := int64(uint64(INDEX_ENTRY_SIZE) * (msgId % MESSAGES_PER_FILE))
messageOffset := p.appendFileWritePosition + uint64(len(sizeAndId))
messageOffsetBuff := make([]byte, INDEX_ENTRY_SIZE)
binary.LittleEndian.PutUint64(messageOffsetBuff, messageOffset)
binary.LittleEndian.PutUint32(messageOffsetBuff[8:], uint32(len(msg)))
if _, err := p.appendIndexFile.WriteAt(messageOffsetBuff, indexPosition); err != nil {
return err
}
p.appendFileWritePosition += uint64(len(sizeAndId) + len(msg))
p.maxMessageId = msgId
return nil
}
// fetch a set of messages
func (p *MessagePartition) Fetch(req FetchRequest) {
go func() {
fetchList, err := p.calculateFetchList(req)
if err != nil {
req.ErrorCallback <- err
return
}
req.StartCallback <- len(fetchList)
err = p.fetchByFetchlist(fetchList, req.MessageC)
if err != nil {
req.ErrorCallback <- err
return
}
close(req.MessageC)
}()
}
// fetch the messages in the supplied fetchlist and send them to the channel
func (p *MessagePartition) fetchByFetchlist(fetchList []fetchEntry, messageC chan MessageAndId) error {
var fileId uint64
var file *os.File
var err error
var lastMsgId uint64
for _, f := range fetchList {
if lastMsgId == 0 {
lastMsgId = f.messageId - 1
}
lastMsgId = f.messageId
// ensure, that we read on the correct file
if file == nil || fileId != f.fileId {
file, err = p.checkoutMessagefile(f.fileId)
if err != nil {
return err
}
defer p.releaseMessagefile(f.fileId, file)
fileId = f.fileId
}
msg := make([]byte, f.size, f.size)
_, err = file.ReadAt(msg, f.offset)
if err != nil {
return err
}
messageC <- MessageAndId{f.messageId, msg}
}
return nil
}
// returns a list of fetchEntry records for all message in the fetch request.
func (p *MessagePartition) calculateFetchList(req FetchRequest) ([]fetchEntry, error) {
if req.Direction == 0 {
req.Direction = 1
}
nextId := req.StartId
initialCap := req.Count
if initialCap > 100 {
initialCap = 100
}
result := make([]fetchEntry, 0, initialCap)
var file *os.File
var fileId uint64
for len(result) < req.Count && nextId >= 0 {
nextFileId := p.firstMessageIdForFile(nextId)
// ensure, that we read on the correct file
if file == nil || nextFileId != fileId {
var err error
file, err = p.checkoutIndexfile(nextFileId)
if err != nil {
if os.IsNotExist(err) {
return result, nil
}
return nil, err
}
defer p.releaseIndexfile(fileId, file)
fileId = nextFileId
}
indexPosition := int64(uint64(INDEX_ENTRY_SIZE) * (nextId % MESSAGES_PER_FILE))
msgOffset, msgSize, err := readIndexEntry(file, indexPosition)
if err != nil {
if err.Error() == "EOF" {
return result, nil // we reached the end of the index
} else {
return nil, err
}
}
if msgOffset != uint64(0) { // only append, if the message exists
result = append(result, fetchEntry{
messageId: nextId,
fileId: fileId,
offset: int64(msgOffset),
size: int(msgSize),
})
}
nextId += uint64(req.Direction)
}
return result, nil
}
func readIndexEntry(file *os.File, indexPosition int64) (msgOffset uint64, msgSize uint32, err error) {
msgOffsetBuff := make([]byte, INDEX_ENTRY_SIZE)
if _, err := file.ReadAt(msgOffsetBuff, indexPosition); err != nil {
return 0, 0, err
}
msgOffset = binary.LittleEndian.Uint64(msgOffsetBuff)
msgSize = binary.LittleEndian.Uint32(msgOffsetBuff[8:])
return msgOffset, msgSize, nil
}
// Return a file handle to the message file with the supplied file id.
// The returned file handle may be shared for multiple go routinep.
func (p *MessagePartition) checkoutMessagefile(fileId uint64) (*os.File, error) {
return os.Open(p.filenameByMessageId(fileId))
}
// Release a message file handle
func (p *MessagePartition) releaseMessagefile(fileId uint64, file *os.File) {
file.Close()
}
// Return a file handle to the index file with the supplied file id.
// The returned file handle may be shared for multiple go routinep.
func (p *MessagePartition) checkoutIndexfile(fileId uint64) (*os.File, error) {
return os.Open(p.indexFilenameByMessageId(fileId))
}
// Release an index file handle
func (p *MessagePartition) releaseIndexfile(fileId uint64, file *os.File) {
file.Close()
}
func (p *MessagePartition) firstMessageIdForFile(messageId uint64) uint64 {
return messageId - messageId%MESSAGES_PER_FILE
}
func (p *MessagePartition) filenameByMessageId(messageId uint64) string {
return filepath.Join(p.basedir, fmt.Sprintf("%s-%020d.msg", p.name, messageId))
}
func (p *MessagePartition) indexFilenameByMessageId(messageId uint64) string {
return filepath.Join(p.basedir, fmt.Sprintf("%s-%020d.idx", p.name, messageId))
}
| {
p.appendFileWritePosition = uint64(stat.Size())
_, err = file.Write(MAGIC_NUMBER)
if err != nil {
return err
}
_, err = file.Write(FILE_FORMAT_VERSION)
if err != nil {
return err
}
} | conditional_block |
message_partition.go | package store
import (
"encoding/binary"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
)
var MAGIC_NUMBER = []byte{42, 249, 180, 108, 82, 75, 222, 182}
var FILE_FORMAT_VERSION = []byte{1}
var MESSAGES_PER_FILE = uint64(10000)
var INDEX_ENTRY_SIZE = 12
type fetchEntry struct {
messageId uint64
fileId uint64
offset int64
size int
}
type MessagePartition struct {
basedir string
name string
appendFile *os.File
appendIndexFile *os.File
appendFirstId uint64
appendLastId uint64
appendFileWritePosition uint64
maxMessageId uint64
mutex *sync.RWMutex
}
func NewMessagePartition(basedir string, storeName string) (*MessagePartition, error) {
p := &MessagePartition{
basedir: basedir,
name: storeName,
mutex: &sync.RWMutex{},
}
return p, p.initialize()
}
func (p *MessagePartition) initialize() error {
p.mutex.Lock()
defer p.mutex.Unlock()
fileList, err := p.scanFiles()
if err != nil {
return err
}
if len(fileList) == 0 {
p.maxMessageId = 0
} else {
var err error
p.maxMessageId, err = p.calculateMaxMessageIdFromIndex(fileList[len(fileList)-1])
if err != nil {
return err
}
}
return nil
}
// returns the max message id for a message file
func (p *MessagePartition) calculateMaxMessageIdFromIndex(fileId uint64) (uint64, error) {
stat, err := os.Stat(p.indexFilenameByMessageId(fileId))
if err != nil {
return 0, err
}
entriesInIndex := uint64(stat.Size() / int64(INDEX_ENTRY_SIZE))
return (entriesInIndex - 1 + fileId), nil
}
// Returns the start messages ids for all available message files
// in a sorted list
func (p *MessagePartition) scanFiles() ([]uint64, error) {
result := []uint64{}
allFiles, err := ioutil.ReadDir(p.basedir)
if err != nil {
return nil, err
}
for _, fileInfo := range allFiles {
if strings.HasPrefix(fileInfo.Name(), p.name+"-") &&
strings.HasSuffix(fileInfo.Name(), ".idx") {
fileIdString := fileInfo.Name()[len(p.name)+1 : len(fileInfo.Name())-4]
if fileId, err := strconv.ParseUint(fileIdString, 10, 64); err == nil {
result = append(result, fileId)
}
}
}
return result, nil
}
func (p *MessagePartition) MaxMessageId() (uint64, error) {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.maxMessageId, nil
}
func (p *MessagePartition) closeAppendFiles() error {
if p.appendFile != nil {
if err := p.appendFile.Close(); err != nil {
if p.appendIndexFile != nil {
defer p.appendIndexFile.Close()
}
return err
}
p.appendFile = nil
}
if p.appendIndexFile != nil {
err := p.appendIndexFile.Close()
p.appendIndexFile = nil
return err
}
return nil
}
func (p *MessagePartition) createNextAppendFiles(msgId uint64) error {
firstMessageIdForFile := p.firstMessageIdForFile(msgId)
file, err := os.OpenFile(p.filenameByMessageId(firstMessageIdForFile), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
return err
}
// write file header on new files
if stat, _ := file.Stat(); stat.Size() == 0 {
p.appendFileWritePosition = uint64(stat.Size())
_, err = file.Write(MAGIC_NUMBER)
if err != nil {
return err
}
_, err = file.Write(FILE_FORMAT_VERSION)
if err != nil {
return err
}
}
index, errIndex := os.OpenFile(p.indexFilenameByMessageId(firstMessageIdForFile), os.O_RDWR|os.O_CREATE, 0666)
if errIndex != nil {
defer file.Close()
defer os.Remove(file.Name())
return err
}
p.appendFile = file
p.appendIndexFile = index
p.appendFirstId = firstMessageIdForFile
p.appendLastId = firstMessageIdForFile + MESSAGES_PER_FILE - 1
stat, err := file.Stat()
p.appendFileWritePosition = uint64(stat.Size())
return nil
}
func (p *MessagePartition) Close() error {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.closeAppendFiles()
}
func (p *MessagePartition) DoInTx(fnToExecute func(maxMessageId uint64) error) error {
p.mutex.Lock()
defer p.mutex.Unlock()
return fnToExecute(p.maxMessageId)
}
func (p *MessagePartition) StoreTx(partition string,
callback func(msgId uint64) (msg []byte)) error {
p.mutex.Lock()
defer p.mutex.Unlock()
msgId := p.maxMessageId + 1
return p.store(msgId, callback(msgId))
}
func (p *MessagePartition) Store(msgId uint64, msg []byte) error |
func (p *MessagePartition) store(msgId uint64, msg []byte) error {
if msgId != 1+p.maxMessageId {
return fmt.Errorf("Invalid message id for partition %v. Next id should be %v, but was %q", p.name, 1+p.maxMessageId, msgId)
}
if msgId > p.appendLastId ||
p.appendFile == nil ||
p.appendIndexFile == nil {
if err := p.closeAppendFiles(); err != nil {
return err
}
if err := p.createNextAppendFiles(msgId); err != nil {
return err
}
}
// write the message size and the message id 32bit and 64 bit
sizeAndId := make([]byte, 12)
binary.LittleEndian.PutUint32(sizeAndId, uint32(len(msg)))
binary.LittleEndian.PutUint64(sizeAndId[4:], msgId)
if _, err := p.appendFile.Write(sizeAndId); err != nil {
return err
}
// write the message
if _, err := p.appendFile.Write(msg); err != nil {
return err
}
// write the index entry to the index file
indexPosition := int64(uint64(INDEX_ENTRY_SIZE) * (msgId % MESSAGES_PER_FILE))
messageOffset := p.appendFileWritePosition + uint64(len(sizeAndId))
messageOffsetBuff := make([]byte, INDEX_ENTRY_SIZE)
binary.LittleEndian.PutUint64(messageOffsetBuff, messageOffset)
binary.LittleEndian.PutUint32(messageOffsetBuff[8:], uint32(len(msg)))
if _, err := p.appendIndexFile.WriteAt(messageOffsetBuff, indexPosition); err != nil {
return err
}
p.appendFileWritePosition += uint64(len(sizeAndId) + len(msg))
p.maxMessageId = msgId
return nil
}
// fetch a set of messages
func (p *MessagePartition) Fetch(req FetchRequest) {
go func() {
fetchList, err := p.calculateFetchList(req)
if err != nil {
req.ErrorCallback <- err
return
}
req.StartCallback <- len(fetchList)
err = p.fetchByFetchlist(fetchList, req.MessageC)
if err != nil {
req.ErrorCallback <- err
return
}
close(req.MessageC)
}()
}
// fetch the messages in the supplied fetchlist and send them to the channel
func (p *MessagePartition) fetchByFetchlist(fetchList []fetchEntry, messageC chan MessageAndId) error {
var fileId uint64
var file *os.File
var err error
var lastMsgId uint64
for _, f := range fetchList {
if lastMsgId == 0 {
lastMsgId = f.messageId - 1
}
lastMsgId = f.messageId
// ensure, that we read on the correct file
if file == nil || fileId != f.fileId {
file, err = p.checkoutMessagefile(f.fileId)
if err != nil {
return err
}
defer p.releaseMessagefile(f.fileId, file)
fileId = f.fileId
}
msg := make([]byte, f.size, f.size)
_, err = file.ReadAt(msg, f.offset)
if err != nil {
return err
}
messageC <- MessageAndId{f.messageId, msg}
}
return nil
}
// returns a list of fetchEntry records for all message in the fetch request.
func (p *MessagePartition) calculateFetchList(req FetchRequest) ([]fetchEntry, error) {
if req.Direction == 0 {
req.Direction = 1
}
nextId := req.StartId
initialCap := req.Count
if initialCap > 100 {
initialCap = 100
}
result := make([]fetchEntry, 0, initialCap)
var file *os.File
var fileId uint64
for len(result) < req.Count && nextId >= 0 {
nextFileId := p.firstMessageIdForFile(nextId)
// ensure, that we read on the correct file
if file == nil || nextFileId != fileId {
var err error
file, err = p.checkoutIndexfile(nextFileId)
if err != nil {
if os.IsNotExist(err) {
return result, nil
}
return nil, err
}
defer p.releaseIndexfile(fileId, file)
fileId = nextFileId
}
indexPosition := int64(uint64(INDEX_ENTRY_SIZE) * (nextId % MESSAGES_PER_FILE))
msgOffset, msgSize, err := readIndexEntry(file, indexPosition)
if err != nil {
if err.Error() == "EOF" {
return result, nil // we reached the end of the index
} else {
return nil, err
}
}
if msgOffset != uint64(0) { // only append, if the message exists
result = append(result, fetchEntry{
messageId: nextId,
fileId: fileId,
offset: int64(msgOffset),
size: int(msgSize),
})
}
nextId += uint64(req.Direction)
}
return result, nil
}
func readIndexEntry(file *os.File, indexPosition int64) (msgOffset uint64, msgSize uint32, err error) {
msgOffsetBuff := make([]byte, INDEX_ENTRY_SIZE)
if _, err := file.ReadAt(msgOffsetBuff, indexPosition); err != nil {
return 0, 0, err
}
msgOffset = binary.LittleEndian.Uint64(msgOffsetBuff)
msgSize = binary.LittleEndian.Uint32(msgOffsetBuff[8:])
return msgOffset, msgSize, nil
}
// Return a file handle to the message file with the supplied file id.
// The returned file handle may be shared for multiple go routinep.
func (p *MessagePartition) checkoutMessagefile(fileId uint64) (*os.File, error) {
return os.Open(p.filenameByMessageId(fileId))
}
// Release a message file handle
func (p *MessagePartition) releaseMessagefile(fileId uint64, file *os.File) {
file.Close()
}
// Return a file handle to the index file with the supplied file id.
// The returned file handle may be shared for multiple go routinep.
func (p *MessagePartition) checkoutIndexfile(fileId uint64) (*os.File, error) {
return os.Open(p.indexFilenameByMessageId(fileId))
}
// Release an index file handle
func (p *MessagePartition) releaseIndexfile(fileId uint64, file *os.File) {
file.Close()
}
func (p *MessagePartition) firstMessageIdForFile(messageId uint64) uint64 {
return messageId - messageId%MESSAGES_PER_FILE
}
func (p *MessagePartition) filenameByMessageId(messageId uint64) string {
return filepath.Join(p.basedir, fmt.Sprintf("%s-%020d.msg", p.name, messageId))
}
func (p *MessagePartition) indexFilenameByMessageId(messageId uint64) string {
return filepath.Join(p.basedir, fmt.Sprintf("%s-%020d.idx", p.name, messageId))
}
| {
p.mutex.Lock()
defer p.mutex.Unlock()
return p.store(msgId, msg)
} | identifier_body |
credential.rs | //! Internal `Credential` and external `CredentialId` ("keyhandle").
use core::cmp::Ordering;
use trussed::{client, syscall, try_syscall, types::KeyId};
pub(crate) use ctap_types::{
// authenticator::{ctap1, ctap2, Error, Request, Response},
ctap2::credential_management::CredentialProtectionPolicy,
sizes::*,
webauthn::PublicKeyCredentialDescriptor,
Bytes,
String,
};
use crate::{Authenticator, Error, Result, UserPresence};
/// As signaled in `get_info`.
///
/// Eventual goal is full support for the CTAP2.1 specification.
#[derive(Copy, Clone, Debug, serde::Deserialize, serde::Serialize)]
pub enum CtapVersion {
U2fV2,
Fido20,
Fido21Pre,
}
/// External ID of a credential, commonly known as "keyhandle".
#[derive(Clone, Debug, Default, serde::Serialize, serde::Deserialize)]
pub struct CredentialId(pub Bytes<MAX_CREDENTIAL_ID_LENGTH>);
// TODO: how to determine necessary size?
// pub type SerializedCredential = Bytes<512>;
// pub type SerializedCredential = Bytes<256>;
pub(crate) type SerializedCredential = trussed::types::Message;
#[derive(Clone, Debug)]
struct EncryptedSerializedCredential(pub trussed::api::reply::Encrypt);
impl TryFrom<EncryptedSerializedCredential> for CredentialId {
type Error = Error;
fn try_from(esc: EncryptedSerializedCredential) -> Result<CredentialId> {
Ok(CredentialId(
trussed::cbor_serialize_bytes(&esc.0).map_err(|_| Error::Other)?,
))
}
}
impl TryFrom<CredentialId> for EncryptedSerializedCredential {
// tag = 16B
// nonce = 12B
type Error = Error;
fn try_from(cid: CredentialId) -> Result<EncryptedSerializedCredential> {
let encrypted_serialized_credential = EncryptedSerializedCredential(
ctap_types::serde::cbor_deserialize(&cid.0).map_err(|_| Error::InvalidCredential)?,
);
Ok(encrypted_serialized_credential)
}
}
/// Credential keys can either be "discoverable" or not.
///
/// The FIDO Alliance likes to refer to "resident keys" as "(client-side) discoverable public key
/// credential sources" now ;)
#[derive(Clone, Debug, PartialEq, serde::Deserialize, serde::Serialize)]
pub enum Key {
ResidentKey(KeyId),
// THIS USED TO BE 92 NOW IT'S 96 or 97 or so... waddup?
WrappedKey(Bytes<128>),
}
/// The main content of a `Credential`.
#[derive(
Clone, Debug, PartialEq, serde_indexed::DeserializeIndexed, serde_indexed::SerializeIndexed,
)]
pub struct CredentialData {
// id, name, url
pub rp: ctap_types::webauthn::PublicKeyCredentialRpEntity,
// id, icon, name, display_name
pub user: ctap_types::webauthn::PublicKeyCredentialUserEntity,
// can be just a counter, need to be able to determine "latest"
pub creation_time: u32,
// for stateless deterministic keys, it seems CTAP2 (but not CTAP1) makes signature counters optional
use_counter: bool,
// P256 or Ed25519
pub algorithm: i32,
// for RK in non-deterministic mode: refers to actual key
// TODO(implement enums in cbor-deser): for all others, is a wrapped key
// --> use above Key enum
// #[serde(skip_serializing_if = "Option::is_none")]
// key_id: Option<KeyId>,
pub key: Key,
// extensions
#[serde(skip_serializing_if = "Option::is_none")]
pub hmac_secret: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub cred_protect: Option<CredentialProtectionPolicy>,
// TODO: add `sig_counter: Option<CounterId>`,
// and grant RKs a per-credential sig-counter.
}
// TODO: figure out sizes
// We may or may not follow https://github.com/satoshilabs/slips/blob/master/slip-0022.md
/// The core structure this authenticator creates and uses.
#[derive(Clone, Debug, serde_indexed::DeserializeIndexed, serde_indexed::SerializeIndexed)]
pub struct Credential {
ctap: CtapVersion,
pub data: CredentialData,
nonce: Bytes<12>,
}
// Alas... it would be more symmetrical to have Credential { meta, data },
// but let's not break binary compatibility for this.
//
// struct Metadata {
// ctap: CtapVersion,
// nonce: Bytes<12>,
// }
impl core::ops::Deref for Credential {
type Target = CredentialData;
fn deref(&self) -> &Self::Target {
&self.data
}
}
/// Compare credentials based on key + timestamp.
///
/// Likely comparison based on timestamp would be good enough?
impl PartialEq for Credential {
fn eq(&self, other: &Self) -> bool {
(self.creation_time == other.creation_time) && (self.key == other.key)
}
}
impl PartialEq<&Credential> for Credential {
fn eq(&self, other: &&Self) -> bool {
self == *other
}
}
impl Eq for Credential {}
impl Ord for Credential {
fn cmp(&self, other: &Self) -> Ordering {
self.data.creation_time.cmp(&other.data.creation_time)
}
}
/// Order by timestamp of creation.
impl PartialOrd for Credential {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<&Credential> for Credential {
fn partial_cmp(&self, other: &&Self) -> Option<Ordering> {
Some(self.cmp(*other))
}
}
// Bad idea - huge stack
// pub(crate) type CredentialList = Vec<Credential, {ctap_types::sizes::MAX_CREDENTIAL_COUNT_IN_LIST}>;
impl From<CredentialId> for PublicKeyCredentialDescriptor {
fn from(id: CredentialId) -> PublicKeyCredentialDescriptor {
PublicKeyCredentialDescriptor {
id: id.0,
key_type: {
let mut key_type = String::new();
key_type.push_str("public-key").unwrap();
key_type
},
}
}
}
impl Credential {
#[allow(clippy::too_many_arguments)]
pub fn new(
ctap: CtapVersion,
// parameters: &ctap2::make_credential::Parameters,
rp: &ctap_types::webauthn::PublicKeyCredentialRpEntity,
user: &ctap_types::webauthn::PublicKeyCredentialUserEntity,
algorithm: i32,
key: Key,
timestamp: u32,
hmac_secret: Option<bool>,
cred_protect: Option<CredentialProtectionPolicy>,
nonce: [u8; 12],
) -> Self {
info!("credential for algorithm {}", algorithm);
let data = CredentialData {
rp: rp.clone(),
user: user.clone(),
creation_time: timestamp,
use_counter: true,
algorithm,
key,
hmac_secret,
cred_protect,
};
Credential {
ctap,
data,
nonce: Bytes::from_slice(&nonce).unwrap(),
}
}
// ID (or "keyhandle") for the credential.
//
// Originally, the entire data was serialized, and its encryption
// (binding RP as associated data) used as a keyhandle.
//
// However, this leads to problems with relying parties. According to the old U2F
// spec, the length of a keyhandle is encoded as one byte, whereas this procedure would
// generate keyhandles of length ~320 bytes.
//
// Therefore, inessential metadata is stripped before serialization, ensuring
// the ID will stay below 255 bytes.
//
// Existing keyhandles can still be decoded
pub fn id<T: client::Chacha8Poly1305 + client::Sha256>(
&self,
trussed: &mut T,
key_encryption_key: KeyId,
rp_id_hash: Option<&Bytes<32>>,
) -> Result<CredentialId> {
let serialized_credential = self.strip().serialize()?;
let message = &serialized_credential;
// info!("serialized cred = {:?}", message).ok();
let rp_id_hash: Bytes<32> = if let Some(hash) = rp_id_hash {
hash.clone()
} else {
syscall!(trussed.hash_sha256(self.rp.id.as_ref()))
.hash
.to_bytes()
.map_err(|_| Error::Other)?
};
let associated_data = &rp_id_hash[..];
let nonce: [u8; 12] = self.nonce.as_slice().try_into().unwrap();
let encrypted_serialized_credential = EncryptedSerializedCredential(syscall!(trussed
.encrypt_chacha8poly1305(key_encryption_key, message, associated_data, Some(&nonce))));
let credential_id: CredentialId = encrypted_serialized_credential
.try_into()
.map_err(|_| Error::RequestTooLarge)?;
Ok(credential_id)
}
pub fn serialize(&self) -> Result<SerializedCredential> {
trussed::cbor_serialize_bytes(self).map_err(|_| Error::Other)
}
pub fn deserialize(bytes: &SerializedCredential) -> Result<Self> {
match ctap_types::serde::cbor_deserialize(bytes) {
Ok(s) => Ok(s),
Err(_) => {
info_now!("could not deserialize {:?}", bytes);
Err(Error::Other)
}
}
}
pub fn try_from<UP: UserPresence, T: client::Client + client::Chacha8Poly1305>(
authnr: &mut Authenticator<UP, T>,
rp_id_hash: &Bytes<32>,
descriptor: &PublicKeyCredentialDescriptor,
) -> Result<Self> {
Self::try_from_bytes(authnr, rp_id_hash, &descriptor.id)
}
pub fn | <UP: UserPresence, T: client::Client + client::Chacha8Poly1305>(
authnr: &mut Authenticator<UP, T>,
rp_id_hash: &Bytes<32>,
id: &[u8],
) -> Result<Self> {
let mut cred: Bytes<MAX_CREDENTIAL_ID_LENGTH> = Bytes::new();
cred.extend_from_slice(id)
.map_err(|_| Error::InvalidCredential)?;
let encrypted_serialized = EncryptedSerializedCredential::try_from(CredentialId(cred))?;
let kek = authnr
.state
.persistent
.key_encryption_key(&mut authnr.trussed)?;
let serialized = try_syscall!(authnr.trussed.decrypt_chacha8poly1305(
// TODO: use RpId as associated data here?
kek,
&encrypted_serialized.0.ciphertext,
&rp_id_hash[..],
&encrypted_serialized.0.nonce,
&encrypted_serialized.0.tag,
))
.map_err(|_| Error::InvalidCredential)?
.plaintext
.ok_or(Error::InvalidCredential)?;
let credential =
Credential::deserialize(&serialized).map_err(|_| Error::InvalidCredential)?;
Ok(credential)
}
// Remove inessential metadata from credential.
//
// Called by the `id` method, see its documentation.
pub fn strip(&self) -> Self {
info_now!(":: stripping ID");
let mut stripped = self.clone();
let data = &mut stripped.data;
data.rp.name = None;
data.rp.icon = None;
data.user.icon = None;
data.user.name = None;
data.user.display_name = None;
// data.hmac_secret = None;
// data.cred_protect = None;
stripped
}
}
#[cfg(test)]
mod test {
use super::*;
fn credential_data() -> CredentialData {
use ctap_types::webauthn::{PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity};
CredentialData {
rp: PublicKeyCredentialRpEntity {
id: String::from("John Doe"),
name: None,
icon: None,
},
user: PublicKeyCredentialUserEntity {
id: Bytes::from_slice(&[1, 2, 3]).unwrap(),
icon: None,
name: None,
display_name: None,
},
creation_time: 123,
use_counter: false,
algorithm: -7,
key: Key::WrappedKey(Bytes::from_slice(&[1, 2, 3]).unwrap()),
hmac_secret: Some(false),
cred_protect: None,
}
}
fn random_bytes<const N: usize>() -> Bytes<N> {
use rand::{
distributions::{Distribution, Uniform},
rngs::OsRng,
RngCore,
};
let mut bytes = Bytes::default();
let between = Uniform::from(0..(N + 1));
let n = between.sample(&mut OsRng);
bytes.resize_default(n).unwrap();
OsRng.fill_bytes(&mut bytes);
bytes
}
#[allow(dead_code)]
fn maybe_random_bytes<const N: usize>() -> Option<Bytes<N>> {
use rand::{rngs::OsRng, RngCore};
if OsRng.next_u32() & 1 != 0 {
Some(random_bytes())
} else {
None
}
}
fn random_string<const N: usize>() -> String<N> {
use rand::{
distributions::{Alphanumeric, Distribution, Uniform},
rngs::OsRng,
Rng,
};
use std::str::FromStr;
let between = Uniform::from(0..(N + 1));
let n = between.sample(&mut OsRng);
let std_string: std::string::String = OsRng
.sample_iter(&Alphanumeric)
.take(n)
.map(char::from)
.collect();
String::from_str(&std_string).unwrap()
}
fn maybe_random_string<const N: usize>() -> Option<String<N>> {
use rand::{rngs::OsRng, RngCore};
if OsRng.next_u32() & 1 != 0 {
Some(random_string())
} else {
None
}
}
fn random_credential_data() -> CredentialData {
use ctap_types::webauthn::{PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity};
CredentialData {
rp: PublicKeyCredentialRpEntity {
id: random_string(),
name: maybe_random_string(),
icon: None,
},
user: PublicKeyCredentialUserEntity {
id: random_bytes(), //Bytes::from_slice(&[1,2,3]).unwrap(),
icon: maybe_random_string(),
name: maybe_random_string(),
display_name: maybe_random_string(),
},
creation_time: 123,
use_counter: false,
algorithm: -7,
key: Key::WrappedKey(random_bytes()),
hmac_secret: Some(false),
cred_protect: None,
}
}
#[test]
fn skip_credential_data_options() {
use trussed::{cbor_deserialize as deserialize, cbor_serialize_bytes as serialize};
let credential_data = credential_data();
let serialization: Bytes<1024> = serialize(&credential_data).unwrap();
let deserialized: CredentialData = deserialize(&serialization).unwrap();
assert_eq!(credential_data, deserialized);
let credential_data = random_credential_data();
let serialization: Bytes<1024> = serialize(&credential_data).unwrap();
let deserialized: CredentialData = deserialize(&serialization).unwrap();
assert_eq!(credential_data, deserialized);
}
// use quickcheck::TestResult;
// quickcheck::quickcheck! {
// fn prop(
// rp_id: std::string::String,
// rp_name: Option<std::string::String>,
// rp_url: Option<std::string::String>,
// user_id: std::vec::Vec<u8>,
// user_name: Option<std::string::String>,
// creation_time: u32,
// use_counter: bool,
// algorithm: i32
// ) -> TestResult {
// use std::str::FromStr;
// use ctap_types::webauthn::{PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity};
// use trussed::{cbor_deserialize as deserialize, cbor_serialize_bytes as serialize};
// let rp_name = &rp_name.as_ref().map(|string| string.as_str());
// let rp_url = &rp_url.as_ref().map(|string| string.as_str());
// let user_name = &user_name.as_ref().map(|string| string.as_str());
// let discard = [
// rp_id.len() > 256,
// rp_name.unwrap_or(&"").len() > 64,
// rp_url.unwrap_or(&"").len() > 64,
// user_id.len() > 64,
// user_name.unwrap_or(&"").len() > 64,
// ];
// if discard.iter().any(|&x| x) {
// return TestResult::discard();
// }
// let credential_data = CredentialData {
// rp: PublicKeyCredentialRpEntity {
// id: String::from_str(&rp_id).unwrap(),
// name: rp_name.map(|rp_name| String::from_str(rp_name).unwrap()),
// url: rp_url.map(|rp_url| String::from_str(rp_url).unwrap()),
// },
// user: PublicKeyCredentialUserEntity {
// id: Bytes::from_slice(&user_id).unwrap(),
// icon: maybe_random_string(),
// name: user_name.map(|user_name| String::from_str(user_name).unwrap()),
// display_name: maybe_random_string(),
// },
// creation_time,
// use_counter,
// algorithm,
// key: Key::WrappedKey(random_bytes()),
// hmac_secret: Some(false),
// cred_protect: None,
// };
// let serialization: Bytes<1024> = serialize(&credential_data).unwrap();
// let deserialized: CredentialData = deserialize(&serialization).unwrap();
// TestResult::from_bool(credential_data == deserialized)
// }
// }
}
| try_from_bytes | identifier_name |
credential.rs | //! Internal `Credential` and external `CredentialId` ("keyhandle").
use core::cmp::Ordering;
use trussed::{client, syscall, try_syscall, types::KeyId};
pub(crate) use ctap_types::{
// authenticator::{ctap1, ctap2, Error, Request, Response},
ctap2::credential_management::CredentialProtectionPolicy,
sizes::*,
webauthn::PublicKeyCredentialDescriptor,
Bytes,
String,
};
use crate::{Authenticator, Error, Result, UserPresence};
| #[derive(Copy, Clone, Debug, serde::Deserialize, serde::Serialize)]
pub enum CtapVersion {
U2fV2,
Fido20,
Fido21Pre,
}
/// External ID of a credential, commonly known as "keyhandle".
#[derive(Clone, Debug, Default, serde::Serialize, serde::Deserialize)]
pub struct CredentialId(pub Bytes<MAX_CREDENTIAL_ID_LENGTH>);
// TODO: how to determine necessary size?
// pub type SerializedCredential = Bytes<512>;
// pub type SerializedCredential = Bytes<256>;
pub(crate) type SerializedCredential = trussed::types::Message;
#[derive(Clone, Debug)]
struct EncryptedSerializedCredential(pub trussed::api::reply::Encrypt);
impl TryFrom<EncryptedSerializedCredential> for CredentialId {
type Error = Error;
fn try_from(esc: EncryptedSerializedCredential) -> Result<CredentialId> {
Ok(CredentialId(
trussed::cbor_serialize_bytes(&esc.0).map_err(|_| Error::Other)?,
))
}
}
impl TryFrom<CredentialId> for EncryptedSerializedCredential {
// tag = 16B
// nonce = 12B
type Error = Error;
fn try_from(cid: CredentialId) -> Result<EncryptedSerializedCredential> {
let encrypted_serialized_credential = EncryptedSerializedCredential(
ctap_types::serde::cbor_deserialize(&cid.0).map_err(|_| Error::InvalidCredential)?,
);
Ok(encrypted_serialized_credential)
}
}
/// Credential keys can either be "discoverable" or not.
///
/// The FIDO Alliance likes to refer to "resident keys" as "(client-side) discoverable public key
/// credential sources" now ;)
#[derive(Clone, Debug, PartialEq, serde::Deserialize, serde::Serialize)]
pub enum Key {
ResidentKey(KeyId),
// THIS USED TO BE 92 NOW IT'S 96 or 97 or so... waddup?
WrappedKey(Bytes<128>),
}
/// The main content of a `Credential`.
#[derive(
Clone, Debug, PartialEq, serde_indexed::DeserializeIndexed, serde_indexed::SerializeIndexed,
)]
pub struct CredentialData {
// id, name, url
pub rp: ctap_types::webauthn::PublicKeyCredentialRpEntity,
// id, icon, name, display_name
pub user: ctap_types::webauthn::PublicKeyCredentialUserEntity,
// can be just a counter, need to be able to determine "latest"
pub creation_time: u32,
// for stateless deterministic keys, it seems CTAP2 (but not CTAP1) makes signature counters optional
use_counter: bool,
// P256 or Ed25519
pub algorithm: i32,
// for RK in non-deterministic mode: refers to actual key
// TODO(implement enums in cbor-deser): for all others, is a wrapped key
// --> use above Key enum
// #[serde(skip_serializing_if = "Option::is_none")]
// key_id: Option<KeyId>,
pub key: Key,
// extensions
#[serde(skip_serializing_if = "Option::is_none")]
pub hmac_secret: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub cred_protect: Option<CredentialProtectionPolicy>,
// TODO: add `sig_counter: Option<CounterId>`,
// and grant RKs a per-credential sig-counter.
}
// TODO: figure out sizes
// We may or may not follow https://github.com/satoshilabs/slips/blob/master/slip-0022.md
/// The core structure this authenticator creates and uses.
#[derive(Clone, Debug, serde_indexed::DeserializeIndexed, serde_indexed::SerializeIndexed)]
pub struct Credential {
ctap: CtapVersion,
pub data: CredentialData,
nonce: Bytes<12>,
}
// Alas... it would be more symmetrical to have Credential { meta, data },
// but let's not break binary compatibility for this.
//
// struct Metadata {
// ctap: CtapVersion,
// nonce: Bytes<12>,
// }
impl core::ops::Deref for Credential {
type Target = CredentialData;
fn deref(&self) -> &Self::Target {
&self.data
}
}
/// Compare credentials based on key + timestamp.
///
/// Likely comparison based on timestamp would be good enough?
impl PartialEq for Credential {
fn eq(&self, other: &Self) -> bool {
(self.creation_time == other.creation_time) && (self.key == other.key)
}
}
impl PartialEq<&Credential> for Credential {
fn eq(&self, other: &&Self) -> bool {
self == *other
}
}
impl Eq for Credential {}
impl Ord for Credential {
fn cmp(&self, other: &Self) -> Ordering {
self.data.creation_time.cmp(&other.data.creation_time)
}
}
/// Order by timestamp of creation.
impl PartialOrd for Credential {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<&Credential> for Credential {
fn partial_cmp(&self, other: &&Self) -> Option<Ordering> {
Some(self.cmp(*other))
}
}
// Bad idea - huge stack
// pub(crate) type CredentialList = Vec<Credential, {ctap_types::sizes::MAX_CREDENTIAL_COUNT_IN_LIST}>;
impl From<CredentialId> for PublicKeyCredentialDescriptor {
fn from(id: CredentialId) -> PublicKeyCredentialDescriptor {
PublicKeyCredentialDescriptor {
id: id.0,
key_type: {
let mut key_type = String::new();
key_type.push_str("public-key").unwrap();
key_type
},
}
}
}
impl Credential {
#[allow(clippy::too_many_arguments)]
pub fn new(
ctap: CtapVersion,
// parameters: &ctap2::make_credential::Parameters,
rp: &ctap_types::webauthn::PublicKeyCredentialRpEntity,
user: &ctap_types::webauthn::PublicKeyCredentialUserEntity,
algorithm: i32,
key: Key,
timestamp: u32,
hmac_secret: Option<bool>,
cred_protect: Option<CredentialProtectionPolicy>,
nonce: [u8; 12],
) -> Self {
info!("credential for algorithm {}", algorithm);
let data = CredentialData {
rp: rp.clone(),
user: user.clone(),
creation_time: timestamp,
use_counter: true,
algorithm,
key,
hmac_secret,
cred_protect,
};
Credential {
ctap,
data,
nonce: Bytes::from_slice(&nonce).unwrap(),
}
}
// ID (or "keyhandle") for the credential.
//
// Originally, the entire data was serialized, and its encryption
// (binding RP as associated data) used as a keyhandle.
//
// However, this leads to problems with relying parties. According to the old U2F
// spec, the length of a keyhandle is encoded as one byte, whereas this procedure would
// generate keyhandles of length ~320 bytes.
//
// Therefore, inessential metadata is stripped before serialization, ensuring
// the ID will stay below 255 bytes.
//
// Existing keyhandles can still be decoded
pub fn id<T: client::Chacha8Poly1305 + client::Sha256>(
&self,
trussed: &mut T,
key_encryption_key: KeyId,
rp_id_hash: Option<&Bytes<32>>,
) -> Result<CredentialId> {
let serialized_credential = self.strip().serialize()?;
let message = &serialized_credential;
// info!("serialized cred = {:?}", message).ok();
let rp_id_hash: Bytes<32> = if let Some(hash) = rp_id_hash {
hash.clone()
} else {
syscall!(trussed.hash_sha256(self.rp.id.as_ref()))
.hash
.to_bytes()
.map_err(|_| Error::Other)?
};
let associated_data = &rp_id_hash[..];
let nonce: [u8; 12] = self.nonce.as_slice().try_into().unwrap();
let encrypted_serialized_credential = EncryptedSerializedCredential(syscall!(trussed
.encrypt_chacha8poly1305(key_encryption_key, message, associated_data, Some(&nonce))));
let credential_id: CredentialId = encrypted_serialized_credential
.try_into()
.map_err(|_| Error::RequestTooLarge)?;
Ok(credential_id)
}
pub fn serialize(&self) -> Result<SerializedCredential> {
trussed::cbor_serialize_bytes(self).map_err(|_| Error::Other)
}
pub fn deserialize(bytes: &SerializedCredential) -> Result<Self> {
match ctap_types::serde::cbor_deserialize(bytes) {
Ok(s) => Ok(s),
Err(_) => {
info_now!("could not deserialize {:?}", bytes);
Err(Error::Other)
}
}
}
pub fn try_from<UP: UserPresence, T: client::Client + client::Chacha8Poly1305>(
authnr: &mut Authenticator<UP, T>,
rp_id_hash: &Bytes<32>,
descriptor: &PublicKeyCredentialDescriptor,
) -> Result<Self> {
Self::try_from_bytes(authnr, rp_id_hash, &descriptor.id)
}
pub fn try_from_bytes<UP: UserPresence, T: client::Client + client::Chacha8Poly1305>(
authnr: &mut Authenticator<UP, T>,
rp_id_hash: &Bytes<32>,
id: &[u8],
) -> Result<Self> {
let mut cred: Bytes<MAX_CREDENTIAL_ID_LENGTH> = Bytes::new();
cred.extend_from_slice(id)
.map_err(|_| Error::InvalidCredential)?;
let encrypted_serialized = EncryptedSerializedCredential::try_from(CredentialId(cred))?;
let kek = authnr
.state
.persistent
.key_encryption_key(&mut authnr.trussed)?;
let serialized = try_syscall!(authnr.trussed.decrypt_chacha8poly1305(
// TODO: use RpId as associated data here?
kek,
&encrypted_serialized.0.ciphertext,
&rp_id_hash[..],
&encrypted_serialized.0.nonce,
&encrypted_serialized.0.tag,
))
.map_err(|_| Error::InvalidCredential)?
.plaintext
.ok_or(Error::InvalidCredential)?;
let credential =
Credential::deserialize(&serialized).map_err(|_| Error::InvalidCredential)?;
Ok(credential)
}
// Remove inessential metadata from credential.
//
// Called by the `id` method, see its documentation.
pub fn strip(&self) -> Self {
info_now!(":: stripping ID");
let mut stripped = self.clone();
let data = &mut stripped.data;
data.rp.name = None;
data.rp.icon = None;
data.user.icon = None;
data.user.name = None;
data.user.display_name = None;
// data.hmac_secret = None;
// data.cred_protect = None;
stripped
}
}
#[cfg(test)]
mod test {
use super::*;
fn credential_data() -> CredentialData {
use ctap_types::webauthn::{PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity};
CredentialData {
rp: PublicKeyCredentialRpEntity {
id: String::from("John Doe"),
name: None,
icon: None,
},
user: PublicKeyCredentialUserEntity {
id: Bytes::from_slice(&[1, 2, 3]).unwrap(),
icon: None,
name: None,
display_name: None,
},
creation_time: 123,
use_counter: false,
algorithm: -7,
key: Key::WrappedKey(Bytes::from_slice(&[1, 2, 3]).unwrap()),
hmac_secret: Some(false),
cred_protect: None,
}
}
fn random_bytes<const N: usize>() -> Bytes<N> {
use rand::{
distributions::{Distribution, Uniform},
rngs::OsRng,
RngCore,
};
let mut bytes = Bytes::default();
let between = Uniform::from(0..(N + 1));
let n = between.sample(&mut OsRng);
bytes.resize_default(n).unwrap();
OsRng.fill_bytes(&mut bytes);
bytes
}
#[allow(dead_code)]
fn maybe_random_bytes<const N: usize>() -> Option<Bytes<N>> {
use rand::{rngs::OsRng, RngCore};
if OsRng.next_u32() & 1 != 0 {
Some(random_bytes())
} else {
None
}
}
fn random_string<const N: usize>() -> String<N> {
use rand::{
distributions::{Alphanumeric, Distribution, Uniform},
rngs::OsRng,
Rng,
};
use std::str::FromStr;
let between = Uniform::from(0..(N + 1));
let n = between.sample(&mut OsRng);
let std_string: std::string::String = OsRng
.sample_iter(&Alphanumeric)
.take(n)
.map(char::from)
.collect();
String::from_str(&std_string).unwrap()
}
fn maybe_random_string<const N: usize>() -> Option<String<N>> {
use rand::{rngs::OsRng, RngCore};
if OsRng.next_u32() & 1 != 0 {
Some(random_string())
} else {
None
}
}
fn random_credential_data() -> CredentialData {
use ctap_types::webauthn::{PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity};
CredentialData {
rp: PublicKeyCredentialRpEntity {
id: random_string(),
name: maybe_random_string(),
icon: None,
},
user: PublicKeyCredentialUserEntity {
id: random_bytes(), //Bytes::from_slice(&[1,2,3]).unwrap(),
icon: maybe_random_string(),
name: maybe_random_string(),
display_name: maybe_random_string(),
},
creation_time: 123,
use_counter: false,
algorithm: -7,
key: Key::WrappedKey(random_bytes()),
hmac_secret: Some(false),
cred_protect: None,
}
}
#[test]
fn skip_credential_data_options() {
use trussed::{cbor_deserialize as deserialize, cbor_serialize_bytes as serialize};
let credential_data = credential_data();
let serialization: Bytes<1024> = serialize(&credential_data).unwrap();
let deserialized: CredentialData = deserialize(&serialization).unwrap();
assert_eq!(credential_data, deserialized);
let credential_data = random_credential_data();
let serialization: Bytes<1024> = serialize(&credential_data).unwrap();
let deserialized: CredentialData = deserialize(&serialization).unwrap();
assert_eq!(credential_data, deserialized);
}
// use quickcheck::TestResult;
// quickcheck::quickcheck! {
// fn prop(
// rp_id: std::string::String,
// rp_name: Option<std::string::String>,
// rp_url: Option<std::string::String>,
// user_id: std::vec::Vec<u8>,
// user_name: Option<std::string::String>,
// creation_time: u32,
// use_counter: bool,
// algorithm: i32
// ) -> TestResult {
// use std::str::FromStr;
// use ctap_types::webauthn::{PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity};
// use trussed::{cbor_deserialize as deserialize, cbor_serialize_bytes as serialize};
// let rp_name = &rp_name.as_ref().map(|string| string.as_str());
// let rp_url = &rp_url.as_ref().map(|string| string.as_str());
// let user_name = &user_name.as_ref().map(|string| string.as_str());
// let discard = [
// rp_id.len() > 256,
// rp_name.unwrap_or(&"").len() > 64,
// rp_url.unwrap_or(&"").len() > 64,
// user_id.len() > 64,
// user_name.unwrap_or(&"").len() > 64,
// ];
// if discard.iter().any(|&x| x) {
// return TestResult::discard();
// }
// let credential_data = CredentialData {
// rp: PublicKeyCredentialRpEntity {
// id: String::from_str(&rp_id).unwrap(),
// name: rp_name.map(|rp_name| String::from_str(rp_name).unwrap()),
// url: rp_url.map(|rp_url| String::from_str(rp_url).unwrap()),
// },
// user: PublicKeyCredentialUserEntity {
// id: Bytes::from_slice(&user_id).unwrap(),
// icon: maybe_random_string(),
// name: user_name.map(|user_name| String::from_str(user_name).unwrap()),
// display_name: maybe_random_string(),
// },
// creation_time,
// use_counter,
// algorithm,
// key: Key::WrappedKey(random_bytes()),
// hmac_secret: Some(false),
// cred_protect: None,
// };
// let serialization: Bytes<1024> = serialize(&credential_data).unwrap();
// let deserialized: CredentialData = deserialize(&serialization).unwrap();
// TestResult::from_bool(credential_data == deserialized)
// }
// }
} | /// As signaled in `get_info`.
///
/// Eventual goal is full support for the CTAP2.1 specification. | random_line_split |
credential.rs | //! Internal `Credential` and external `CredentialId` ("keyhandle").
use core::cmp::Ordering;
use trussed::{client, syscall, try_syscall, types::KeyId};
pub(crate) use ctap_types::{
// authenticator::{ctap1, ctap2, Error, Request, Response},
ctap2::credential_management::CredentialProtectionPolicy,
sizes::*,
webauthn::PublicKeyCredentialDescriptor,
Bytes,
String,
};
use crate::{Authenticator, Error, Result, UserPresence};
/// As signaled in `get_info`.
///
/// Eventual goal is full support for the CTAP2.1 specification.
#[derive(Copy, Clone, Debug, serde::Deserialize, serde::Serialize)]
pub enum CtapVersion {
U2fV2,
Fido20,
Fido21Pre,
}
/// External ID of a credential, commonly known as "keyhandle".
#[derive(Clone, Debug, Default, serde::Serialize, serde::Deserialize)]
pub struct CredentialId(pub Bytes<MAX_CREDENTIAL_ID_LENGTH>);
// TODO: how to determine necessary size?
// pub type SerializedCredential = Bytes<512>;
// pub type SerializedCredential = Bytes<256>;
pub(crate) type SerializedCredential = trussed::types::Message;
#[derive(Clone, Debug)]
struct EncryptedSerializedCredential(pub trussed::api::reply::Encrypt);
impl TryFrom<EncryptedSerializedCredential> for CredentialId {
type Error = Error;
fn try_from(esc: EncryptedSerializedCredential) -> Result<CredentialId> {
Ok(CredentialId(
trussed::cbor_serialize_bytes(&esc.0).map_err(|_| Error::Other)?,
))
}
}
impl TryFrom<CredentialId> for EncryptedSerializedCredential {
// tag = 16B
// nonce = 12B
type Error = Error;
fn try_from(cid: CredentialId) -> Result<EncryptedSerializedCredential> {
let encrypted_serialized_credential = EncryptedSerializedCredential(
ctap_types::serde::cbor_deserialize(&cid.0).map_err(|_| Error::InvalidCredential)?,
);
Ok(encrypted_serialized_credential)
}
}
/// Credential keys can either be "discoverable" or not.
///
/// The FIDO Alliance likes to refer to "resident keys" as "(client-side) discoverable public key
/// credential sources" now ;)
#[derive(Clone, Debug, PartialEq, serde::Deserialize, serde::Serialize)]
pub enum Key {
ResidentKey(KeyId),
// THIS USED TO BE 92 NOW IT'S 96 or 97 or so... waddup?
WrappedKey(Bytes<128>),
}
/// The main content of a `Credential`.
#[derive(
Clone, Debug, PartialEq, serde_indexed::DeserializeIndexed, serde_indexed::SerializeIndexed,
)]
pub struct CredentialData {
// id, name, url
pub rp: ctap_types::webauthn::PublicKeyCredentialRpEntity,
// id, icon, name, display_name
pub user: ctap_types::webauthn::PublicKeyCredentialUserEntity,
// can be just a counter, need to be able to determine "latest"
pub creation_time: u32,
// for stateless deterministic keys, it seems CTAP2 (but not CTAP1) makes signature counters optional
use_counter: bool,
// P256 or Ed25519
pub algorithm: i32,
// for RK in non-deterministic mode: refers to actual key
// TODO(implement enums in cbor-deser): for all others, is a wrapped key
// --> use above Key enum
// #[serde(skip_serializing_if = "Option::is_none")]
// key_id: Option<KeyId>,
pub key: Key,
// extensions
#[serde(skip_serializing_if = "Option::is_none")]
pub hmac_secret: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub cred_protect: Option<CredentialProtectionPolicy>,
// TODO: add `sig_counter: Option<CounterId>`,
// and grant RKs a per-credential sig-counter.
}
// TODO: figure out sizes
// We may or may not follow https://github.com/satoshilabs/slips/blob/master/slip-0022.md
/// The core structure this authenticator creates and uses.
#[derive(Clone, Debug, serde_indexed::DeserializeIndexed, serde_indexed::SerializeIndexed)]
pub struct Credential {
ctap: CtapVersion,
pub data: CredentialData,
nonce: Bytes<12>,
}
// Alas... it would be more symmetrical to have Credential { meta, data },
// but let's not break binary compatibility for this.
//
// struct Metadata {
// ctap: CtapVersion,
// nonce: Bytes<12>,
// }
impl core::ops::Deref for Credential {
type Target = CredentialData;
fn deref(&self) -> &Self::Target {
&self.data
}
}
/// Compare credentials based on key + timestamp.
///
/// Likely comparison based on timestamp would be good enough?
impl PartialEq for Credential {
fn eq(&self, other: &Self) -> bool {
(self.creation_time == other.creation_time) && (self.key == other.key)
}
}
impl PartialEq<&Credential> for Credential {
fn eq(&self, other: &&Self) -> bool {
self == *other
}
}
impl Eq for Credential {}
impl Ord for Credential {
fn cmp(&self, other: &Self) -> Ordering {
self.data.creation_time.cmp(&other.data.creation_time)
}
}
/// Order by timestamp of creation.
impl PartialOrd for Credential {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<&Credential> for Credential {
fn partial_cmp(&self, other: &&Self) -> Option<Ordering> {
Some(self.cmp(*other))
}
}
// Bad idea - huge stack
// pub(crate) type CredentialList = Vec<Credential, {ctap_types::sizes::MAX_CREDENTIAL_COUNT_IN_LIST}>;
impl From<CredentialId> for PublicKeyCredentialDescriptor {
fn from(id: CredentialId) -> PublicKeyCredentialDescriptor {
PublicKeyCredentialDescriptor {
id: id.0,
key_type: {
let mut key_type = String::new();
key_type.push_str("public-key").unwrap();
key_type
},
}
}
}
impl Credential {
#[allow(clippy::too_many_arguments)]
pub fn new(
ctap: CtapVersion,
// parameters: &ctap2::make_credential::Parameters,
rp: &ctap_types::webauthn::PublicKeyCredentialRpEntity,
user: &ctap_types::webauthn::PublicKeyCredentialUserEntity,
algorithm: i32,
key: Key,
timestamp: u32,
hmac_secret: Option<bool>,
cred_protect: Option<CredentialProtectionPolicy>,
nonce: [u8; 12],
) -> Self {
info!("credential for algorithm {}", algorithm);
let data = CredentialData {
rp: rp.clone(),
user: user.clone(),
creation_time: timestamp,
use_counter: true,
algorithm,
key,
hmac_secret,
cred_protect,
};
Credential {
ctap,
data,
nonce: Bytes::from_slice(&nonce).unwrap(),
}
}
// ID (or "keyhandle") for the credential.
//
// Originally, the entire data was serialized, and its encryption
// (binding RP as associated data) used as a keyhandle.
//
// However, this leads to problems with relying parties. According to the old U2F
// spec, the length of a keyhandle is encoded as one byte, whereas this procedure would
// generate keyhandles of length ~320 bytes.
//
// Therefore, inessential metadata is stripped before serialization, ensuring
// the ID will stay below 255 bytes.
//
// Existing keyhandles can still be decoded
pub fn id<T: client::Chacha8Poly1305 + client::Sha256>(
&self,
trussed: &mut T,
key_encryption_key: KeyId,
rp_id_hash: Option<&Bytes<32>>,
) -> Result<CredentialId> {
let serialized_credential = self.strip().serialize()?;
let message = &serialized_credential;
// info!("serialized cred = {:?}", message).ok();
let rp_id_hash: Bytes<32> = if let Some(hash) = rp_id_hash {
hash.clone()
} else {
syscall!(trussed.hash_sha256(self.rp.id.as_ref()))
.hash
.to_bytes()
.map_err(|_| Error::Other)?
};
let associated_data = &rp_id_hash[..];
let nonce: [u8; 12] = self.nonce.as_slice().try_into().unwrap();
let encrypted_serialized_credential = EncryptedSerializedCredential(syscall!(trussed
.encrypt_chacha8poly1305(key_encryption_key, message, associated_data, Some(&nonce))));
let credential_id: CredentialId = encrypted_serialized_credential
.try_into()
.map_err(|_| Error::RequestTooLarge)?;
Ok(credential_id)
}
pub fn serialize(&self) -> Result<SerializedCredential> {
trussed::cbor_serialize_bytes(self).map_err(|_| Error::Other)
}
pub fn deserialize(bytes: &SerializedCredential) -> Result<Self> {
match ctap_types::serde::cbor_deserialize(bytes) {
Ok(s) => Ok(s),
Err(_) => |
}
}
pub fn try_from<UP: UserPresence, T: client::Client + client::Chacha8Poly1305>(
authnr: &mut Authenticator<UP, T>,
rp_id_hash: &Bytes<32>,
descriptor: &PublicKeyCredentialDescriptor,
) -> Result<Self> {
Self::try_from_bytes(authnr, rp_id_hash, &descriptor.id)
}
pub fn try_from_bytes<UP: UserPresence, T: client::Client + client::Chacha8Poly1305>(
authnr: &mut Authenticator<UP, T>,
rp_id_hash: &Bytes<32>,
id: &[u8],
) -> Result<Self> {
let mut cred: Bytes<MAX_CREDENTIAL_ID_LENGTH> = Bytes::new();
cred.extend_from_slice(id)
.map_err(|_| Error::InvalidCredential)?;
let encrypted_serialized = EncryptedSerializedCredential::try_from(CredentialId(cred))?;
let kek = authnr
.state
.persistent
.key_encryption_key(&mut authnr.trussed)?;
let serialized = try_syscall!(authnr.trussed.decrypt_chacha8poly1305(
// TODO: use RpId as associated data here?
kek,
&encrypted_serialized.0.ciphertext,
&rp_id_hash[..],
&encrypted_serialized.0.nonce,
&encrypted_serialized.0.tag,
))
.map_err(|_| Error::InvalidCredential)?
.plaintext
.ok_or(Error::InvalidCredential)?;
let credential =
Credential::deserialize(&serialized).map_err(|_| Error::InvalidCredential)?;
Ok(credential)
}
// Remove inessential metadata from credential.
//
// Called by the `id` method, see its documentation.
pub fn strip(&self) -> Self {
info_now!(":: stripping ID");
let mut stripped = self.clone();
let data = &mut stripped.data;
data.rp.name = None;
data.rp.icon = None;
data.user.icon = None;
data.user.name = None;
data.user.display_name = None;
// data.hmac_secret = None;
// data.cred_protect = None;
stripped
}
}
#[cfg(test)]
mod test {
use super::*;
fn credential_data() -> CredentialData {
use ctap_types::webauthn::{PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity};
CredentialData {
rp: PublicKeyCredentialRpEntity {
id: String::from("John Doe"),
name: None,
icon: None,
},
user: PublicKeyCredentialUserEntity {
id: Bytes::from_slice(&[1, 2, 3]).unwrap(),
icon: None,
name: None,
display_name: None,
},
creation_time: 123,
use_counter: false,
algorithm: -7,
key: Key::WrappedKey(Bytes::from_slice(&[1, 2, 3]).unwrap()),
hmac_secret: Some(false),
cred_protect: None,
}
}
fn random_bytes<const N: usize>() -> Bytes<N> {
use rand::{
distributions::{Distribution, Uniform},
rngs::OsRng,
RngCore,
};
let mut bytes = Bytes::default();
let between = Uniform::from(0..(N + 1));
let n = between.sample(&mut OsRng);
bytes.resize_default(n).unwrap();
OsRng.fill_bytes(&mut bytes);
bytes
}
#[allow(dead_code)]
fn maybe_random_bytes<const N: usize>() -> Option<Bytes<N>> {
use rand::{rngs::OsRng, RngCore};
if OsRng.next_u32() & 1 != 0 {
Some(random_bytes())
} else {
None
}
}
fn random_string<const N: usize>() -> String<N> {
use rand::{
distributions::{Alphanumeric, Distribution, Uniform},
rngs::OsRng,
Rng,
};
use std::str::FromStr;
let between = Uniform::from(0..(N + 1));
let n = between.sample(&mut OsRng);
let std_string: std::string::String = OsRng
.sample_iter(&Alphanumeric)
.take(n)
.map(char::from)
.collect();
String::from_str(&std_string).unwrap()
}
fn maybe_random_string<const N: usize>() -> Option<String<N>> {
use rand::{rngs::OsRng, RngCore};
if OsRng.next_u32() & 1 != 0 {
Some(random_string())
} else {
None
}
}
fn random_credential_data() -> CredentialData {
use ctap_types::webauthn::{PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity};
CredentialData {
rp: PublicKeyCredentialRpEntity {
id: random_string(),
name: maybe_random_string(),
icon: None,
},
user: PublicKeyCredentialUserEntity {
id: random_bytes(), //Bytes::from_slice(&[1,2,3]).unwrap(),
icon: maybe_random_string(),
name: maybe_random_string(),
display_name: maybe_random_string(),
},
creation_time: 123,
use_counter: false,
algorithm: -7,
key: Key::WrappedKey(random_bytes()),
hmac_secret: Some(false),
cred_protect: None,
}
}
#[test]
fn skip_credential_data_options() {
use trussed::{cbor_deserialize as deserialize, cbor_serialize_bytes as serialize};
let credential_data = credential_data();
let serialization: Bytes<1024> = serialize(&credential_data).unwrap();
let deserialized: CredentialData = deserialize(&serialization).unwrap();
assert_eq!(credential_data, deserialized);
let credential_data = random_credential_data();
let serialization: Bytes<1024> = serialize(&credential_data).unwrap();
let deserialized: CredentialData = deserialize(&serialization).unwrap();
assert_eq!(credential_data, deserialized);
}
// use quickcheck::TestResult;
// quickcheck::quickcheck! {
// fn prop(
// rp_id: std::string::String,
// rp_name: Option<std::string::String>,
// rp_url: Option<std::string::String>,
// user_id: std::vec::Vec<u8>,
// user_name: Option<std::string::String>,
// creation_time: u32,
// use_counter: bool,
// algorithm: i32
// ) -> TestResult {
// use std::str::FromStr;
// use ctap_types::webauthn::{PublicKeyCredentialRpEntity, PublicKeyCredentialUserEntity};
// use trussed::{cbor_deserialize as deserialize, cbor_serialize_bytes as serialize};
// let rp_name = &rp_name.as_ref().map(|string| string.as_str());
// let rp_url = &rp_url.as_ref().map(|string| string.as_str());
// let user_name = &user_name.as_ref().map(|string| string.as_str());
// let discard = [
// rp_id.len() > 256,
// rp_name.unwrap_or(&"").len() > 64,
// rp_url.unwrap_or(&"").len() > 64,
// user_id.len() > 64,
// user_name.unwrap_or(&"").len() > 64,
// ];
// if discard.iter().any(|&x| x) {
// return TestResult::discard();
// }
// let credential_data = CredentialData {
// rp: PublicKeyCredentialRpEntity {
// id: String::from_str(&rp_id).unwrap(),
// name: rp_name.map(|rp_name| String::from_str(rp_name).unwrap()),
// url: rp_url.map(|rp_url| String::from_str(rp_url).unwrap()),
// },
// user: PublicKeyCredentialUserEntity {
// id: Bytes::from_slice(&user_id).unwrap(),
// icon: maybe_random_string(),
// name: user_name.map(|user_name| String::from_str(user_name).unwrap()),
// display_name: maybe_random_string(),
// },
// creation_time,
// use_counter,
// algorithm,
// key: Key::WrappedKey(random_bytes()),
// hmac_secret: Some(false),
// cred_protect: None,
// };
// let serialization: Bytes<1024> = serialize(&credential_data).unwrap();
// let deserialized: CredentialData = deserialize(&serialization).unwrap();
// TestResult::from_bool(credential_data == deserialized)
// }
// }
}
| {
info_now!("could not deserialize {:?}", bytes);
Err(Error::Other)
} | conditional_block |
dns.go | package dns
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"net"
"os"
"strings"
"time"
"unsafe"
"k8s.io/klog/v2"
"github.com/kubeedge/kubeedge/edge/pkg/metamanager/client"
"github.com/kubeedge/kubeedge/edgemesh/pkg/common"
"github.com/kubeedge/kubeedge/edgemesh/pkg/config"
"github.com/kubeedge/kubeedge/edgemesh/pkg/listener"
)
type Event int
var (
// default docker0
ifi = "docker0"
// QR: 0 represents query, 1 represents response
dnsQR = uint16(0x8000)
oneByteSize = uint16(1)
twoByteSize = uint16(2)
ttl = uint32(64)
)
const (
// 1 for ipv4
aRecord = 1
bufSize = 1024
errNotImplemented = uint16(0x0004)
errRefused = uint16(0x0005)
eventNothing = Event(0)
eventUpstream = Event(1)
eventNxDomain = Event(2)
)
type dnsHeader struct {
id uint16
flags uint16
qdCount uint16
anCount uint16
nsCount uint16
arCount uint16
}
type dnsQuestion struct {
from *net.UDPAddr
head *dnsHeader
name []byte
queByte []byte
qType uint16
qClass uint16
queNum uint16
event Event
}
type dnsAnswer struct {
name []byte
qType uint16
qClass uint16
ttl uint32
dataLen uint16
addr []byte
}
// metaClient is a query client
var metaClient client.CoreInterface
// dnsConn saves DNS protocol
var dnsConn *net.UDPConn
// Start is for external call
func Start() {
startDNS()
}
// startDNS starts edgemesh dns server
func startDNS() {
// init meta client
metaClient = client.New()
// get dns listen ip
lip, err := common.GetInterfaceIP(ifi)
if err != nil {
klog.Errorf("[EdgeMesh] get dns listen ip err: %v", err)
return
}
laddr := &net.UDPAddr{
IP: lip,
Port: 53,
}
udpConn, err := net.ListenUDP("udp", laddr)
if err != nil {
klog.Errorf("[EdgeMesh] dns server listen on %v error: %v", laddr, err)
return
}
defer udpConn.Close()
dnsConn = udpConn
for {
req := make([]byte, bufSize)
n, from, err := dnsConn.ReadFromUDP(req)
if err != nil || n <= 0 {
klog.Errorf("[EdgeMesh] dns server read from udp error: %v", err)
continue
}
que, err := parseDNSQuery(req[:n])
if err != nil {
continue
}
que.from = from
rsp := make([]byte, 0)
rsp, err = recordHandle(que, req[:n])
if err != nil {
klog.Warningf("[EdgeMesh] failed to resolve dns: %v", err)
continue
}
if _, err = dnsConn.WriteTo(rsp, from); err != nil {
klog.Warningf("[EdgeMesh] failed to write: %v", err)
}
}
}
// recordHandle returns the answer for the dns question
func recordHandle(que *dnsQuestion, req []byte) (rsp []byte, err error) {
var exist bool
var ip string
// qType should be 1 for ipv4
if que.name != nil && que.qType == aRecord {
domainName := string(que.name)
exist, ip = lookupFromMetaManager(domainName)
}
if !exist || que.event == eventUpstream {
// if this service doesn't belongs to this cluster
go getFromRealDNS(req, que.from)
return rsp, fmt.Errorf("get from real dns")
}
address := net.ParseIP(ip).To4()
if address == nil {
que.event = eventNxDomain
}
// gen
pre := modifyRspPrefix(que)
rsp = append(rsp, pre...)
if que.event != eventNothing {
return rsp, nil
}
// create a deceptive resp, if no error
dnsAns := &dnsAnswer{
name: que.name,
qType: que.qType,
qClass: que.qClass,
ttl: ttl,
dataLen: uint16(len(address)),
addr: address,
}
ans := dnsAns.getAnswer()
rsp = append(rsp, ans...)
return rsp, nil
}
// parseDNSQuery converts bytes to *dnsQuestion
func parseDNSQuery(req []byte) (que *dnsQuestion, err error) {
head := &dnsHeader{}
head.getHeader(req)
if !head.isAQuery() {
return nil, errors.New("not a dns query, ignore")
}
que = &dnsQuestion{
event: eventNothing,
}
// Generally, when the recursive DNS server requests upward, it may
// initiate a resolution request for multiple aliases/domain names
// at once, Edge DNS does not need to process a message that carries
// multiple questions at a time.
if head.qdCount != 1 |
offset := uint16(unsafe.Sizeof(dnsHeader{}))
// DNS NS <ROOT> operation
if req[offset] == 0x0 {
que.event = eventUpstream
return
}
que.getQuestion(req, offset, head)
err = nil
return
}
// isAQuery judges if the dns pkg is a query
func (h *dnsHeader) isAQuery() bool {
return h.flags&dnsQR != dnsQR
}
// getHeader gets dns pkg head
func (h *dnsHeader) getHeader(req []byte) {
h.id = binary.BigEndian.Uint16(req[0:2])
h.flags = binary.BigEndian.Uint16(req[2:4])
h.qdCount = binary.BigEndian.Uint16(req[4:6])
h.anCount = binary.BigEndian.Uint16(req[6:8])
h.nsCount = binary.BigEndian.Uint16(req[8:10])
h.arCount = binary.BigEndian.Uint16(req[10:12])
}
// getQuestion gets a dns question
func (q *dnsQuestion) getQuestion(req []byte, offset uint16, head *dnsHeader) {
ost := offset
tmp := ost
ost = q.getQName(req, ost)
q.qType = binary.BigEndian.Uint16(req[ost : ost+twoByteSize])
ost += twoByteSize
q.qClass = binary.BigEndian.Uint16(req[ost : ost+twoByteSize])
ost += twoByteSize
q.head = head
q.queByte = req[tmp:ost]
}
// getAnswer generates answer for the dns question
func (da *dnsAnswer) getAnswer() (answer []byte) {
answer = make([]byte, 0)
if da.qType == aRecord {
answer = append(answer, 0xc0)
answer = append(answer, 0x0c)
tmp16 := make([]byte, 2)
tmp32 := make([]byte, 4)
binary.BigEndian.PutUint16(tmp16, da.qType)
answer = append(answer, tmp16...)
binary.BigEndian.PutUint16(tmp16, da.qClass)
answer = append(answer, tmp16...)
binary.BigEndian.PutUint32(tmp32, da.ttl)
answer = append(answer, tmp32...)
binary.BigEndian.PutUint16(tmp16, da.dataLen)
answer = append(answer, tmp16...)
answer = append(answer, da.addr...)
}
return answer
}
// getQName gets dns question qName
func (q *dnsQuestion) getQName(req []byte, offset uint16) uint16 {
ost := offset
for {
// one byte to suggest length
qbyte := uint16(req[ost])
// qName ends with 0x00, and 0x00 should not be included
if qbyte == 0x00 {
q.name = q.name[:uint16(len(q.name))-oneByteSize]
return ost + oneByteSize
}
// step forward one more byte and get the real stuff
ost += oneByteSize
q.name = append(q.name, req[ost:ost+qbyte]...)
// add "." symbol
q.name = append(q.name, 0x2e)
ost += qbyte
}
}
// lookupFromMetaManager confirms if the service exists
func lookupFromMetaManager(serviceURL string) (exist bool, ip string) {
name, namespace := common.SplitServiceKey(serviceURL)
s, _ := metaClient.Services(namespace).Get(name)
if s != nil {
svcName := namespace + "." + name
ip := listener.GetServiceServer(svcName)
klog.Infof("[EdgeMesh] dns server parse %s ip %s", serviceURL, ip)
return true, ip
}
klog.Errorf("[EdgeMesh] service %s is not found in this cluster", serviceURL)
return false, ""
}
// getFromRealDNS returns a dns response from real dns servers
func getFromRealDNS(req []byte, from *net.UDPAddr) {
rsp := make([]byte, 0)
ips, err := parseNameServer()
if err != nil {
klog.Errorf("[EdgeMesh] parse nameserver err: %v", err)
return
}
laddr := &net.UDPAddr{
IP: net.IPv4zero,
Port: 0,
}
// get from real dns servers
for _, ip := range ips {
raddr := &net.UDPAddr{
IP: ip,
Port: 53,
}
conn, err := net.DialUDP("udp", laddr, raddr)
if err != nil {
continue
}
defer conn.Close()
_, err = conn.Write(req)
if err != nil {
continue
}
if err = conn.SetReadDeadline(time.Now().Add(time.Minute)); err != nil {
continue
}
var n int
buf := make([]byte, bufSize)
n, err = conn.Read(buf)
if err != nil {
continue
}
if n > 0 {
rsp = append(rsp, buf[:n]...)
if _, err = dnsConn.WriteToUDP(rsp, from); err != nil {
klog.Errorf("[EdgeMesh] failed to wirte to udp, err: %v", err)
continue
}
break
}
}
}
// parseNameServer gets all real nameservers from the resolv.conf
func parseNameServer() ([]net.IP, error) {
file, err := os.Open("/etc/resolv.conf")
if err != nil {
return nil, fmt.Errorf("error opening /etc/resolv.conf: %v", err)
}
defer file.Close()
scan := bufio.NewScanner(file)
scan.Split(bufio.ScanLines)
ip := make([]net.IP, 0)
for scan.Scan() {
serverString := scan.Text()
if strings.Contains(serverString, "nameserver") {
tmpString := strings.Replace(serverString, "nameserver", "", 1)
nameserver := strings.TrimSpace(tmpString)
sip := net.ParseIP(nameserver)
if sip != nil && !sip.Equal(config.Config.ListenIP) {
ip = append(ip, sip)
}
}
}
if len(ip) == 0 {
return nil, fmt.Errorf("there is no nameserver in /etc/resolv.conf")
}
return ip, nil
}
// modifyRspPrefix generates a dns response head
func modifyRspPrefix(que *dnsQuestion) (pre []byte) {
if que == nil {
return
}
// use head in que
rspHead := que.head
rspHead.convertQueryRsp(true)
if que.qType == aRecord {
rspHead.setAnswerNum(1)
} else {
rspHead.setAnswerNum(0)
}
rspHead.setRspRCode(que)
pre = rspHead.getByteFromDNSHeader()
pre = append(pre, que.queByte...)
return
}
// convertQueryRsp converts a dns question head to a response head
func (h *dnsHeader) convertQueryRsp(isRsp bool) {
if isRsp {
h.flags |= dnsQR
}
}
// setAnswerNum sets the answer num for dns head
func (h *dnsHeader) setAnswerNum(num uint16) {
h.anCount = num
}
// setRspRCode sets dns response return code
func (h *dnsHeader) setRspRCode(que *dnsQuestion) {
if que.qType != aRecord {
h.flags &= (^errNotImplemented)
h.flags |= errNotImplemented
} else if que.event == eventNxDomain {
h.flags &= (^errRefused)
h.flags |= errRefused
}
}
// getByteFromDNSHeader converts dnsHeader to bytes
func (h *dnsHeader) getByteFromDNSHeader() (rspHead []byte) {
rspHead = make([]byte, unsafe.Sizeof(*h))
idxTransactionID := unsafe.Sizeof(h.id)
idxFlags := unsafe.Sizeof(h.flags) + idxTransactionID
idxQDCount := unsafe.Sizeof(h.qdCount) + idxFlags
idxANCount := unsafe.Sizeof(h.anCount) + idxQDCount
idxNSCount := unsafe.Sizeof(h.nsCount) + idxANCount
idxARCount := unsafe.Sizeof(h.arCount) + idxNSCount
binary.BigEndian.PutUint16(rspHead[:idxTransactionID], h.id)
binary.BigEndian.PutUint16(rspHead[idxTransactionID:idxFlags], h.flags)
binary.BigEndian.PutUint16(rspHead[idxFlags:idxQDCount], h.qdCount)
binary.BigEndian.PutUint16(rspHead[idxQDCount:idxANCount], h.anCount)
binary.BigEndian.PutUint16(rspHead[idxANCount:idxNSCount], h.nsCount)
binary.BigEndian.PutUint16(rspHead[idxNSCount:idxARCount], h.arCount)
return
}
| {
que.event = eventUpstream
return
} | conditional_block |
dns.go | package dns
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"net"
"os"
"strings"
"time"
"unsafe"
"k8s.io/klog/v2"
"github.com/kubeedge/kubeedge/edge/pkg/metamanager/client"
"github.com/kubeedge/kubeedge/edgemesh/pkg/common"
"github.com/kubeedge/kubeedge/edgemesh/pkg/config"
"github.com/kubeedge/kubeedge/edgemesh/pkg/listener"
)
type Event int
var (
// default docker0
ifi = "docker0"
// QR: 0 represents query, 1 represents response
dnsQR = uint16(0x8000)
oneByteSize = uint16(1)
twoByteSize = uint16(2)
ttl = uint32(64)
)
const (
// 1 for ipv4
aRecord = 1
bufSize = 1024
errNotImplemented = uint16(0x0004)
errRefused = uint16(0x0005)
eventNothing = Event(0)
eventUpstream = Event(1)
eventNxDomain = Event(2)
)
type dnsHeader struct {
id uint16
flags uint16
qdCount uint16
anCount uint16
nsCount uint16
arCount uint16
}
type dnsQuestion struct {
from *net.UDPAddr
head *dnsHeader
name []byte
queByte []byte
qType uint16
qClass uint16
queNum uint16
event Event
}
type dnsAnswer struct {
name []byte
qType uint16
qClass uint16
ttl uint32
dataLen uint16
addr []byte
}
// metaClient is a query client
var metaClient client.CoreInterface
// dnsConn saves DNS protocol
var dnsConn *net.UDPConn
// Start is for external call
func Start() {
startDNS()
}
// startDNS starts edgemesh dns server
func startDNS() {
// init meta client
metaClient = client.New()
// get dns listen ip
lip, err := common.GetInterfaceIP(ifi)
if err != nil {
klog.Errorf("[EdgeMesh] get dns listen ip err: %v", err)
return
}
laddr := &net.UDPAddr{
IP: lip,
Port: 53,
}
udpConn, err := net.ListenUDP("udp", laddr)
if err != nil {
klog.Errorf("[EdgeMesh] dns server listen on %v error: %v", laddr, err)
return
}
defer udpConn.Close()
dnsConn = udpConn
for {
req := make([]byte, bufSize)
n, from, err := dnsConn.ReadFromUDP(req)
if err != nil || n <= 0 {
klog.Errorf("[EdgeMesh] dns server read from udp error: %v", err)
continue
}
que, err := parseDNSQuery(req[:n])
if err != nil {
continue
}
que.from = from
rsp := make([]byte, 0)
rsp, err = recordHandle(que, req[:n])
if err != nil {
klog.Warningf("[EdgeMesh] failed to resolve dns: %v", err)
continue
}
if _, err = dnsConn.WriteTo(rsp, from); err != nil {
klog.Warningf("[EdgeMesh] failed to write: %v", err)
}
}
}
// recordHandle returns the answer for the dns question
func | (que *dnsQuestion, req []byte) (rsp []byte, err error) {
var exist bool
var ip string
// qType should be 1 for ipv4
if que.name != nil && que.qType == aRecord {
domainName := string(que.name)
exist, ip = lookupFromMetaManager(domainName)
}
if !exist || que.event == eventUpstream {
// if this service doesn't belongs to this cluster
go getFromRealDNS(req, que.from)
return rsp, fmt.Errorf("get from real dns")
}
address := net.ParseIP(ip).To4()
if address == nil {
que.event = eventNxDomain
}
// gen
pre := modifyRspPrefix(que)
rsp = append(rsp, pre...)
if que.event != eventNothing {
return rsp, nil
}
// create a deceptive resp, if no error
dnsAns := &dnsAnswer{
name: que.name,
qType: que.qType,
qClass: que.qClass,
ttl: ttl,
dataLen: uint16(len(address)),
addr: address,
}
ans := dnsAns.getAnswer()
rsp = append(rsp, ans...)
return rsp, nil
}
// parseDNSQuery converts bytes to *dnsQuestion
func parseDNSQuery(req []byte) (que *dnsQuestion, err error) {
head := &dnsHeader{}
head.getHeader(req)
if !head.isAQuery() {
return nil, errors.New("not a dns query, ignore")
}
que = &dnsQuestion{
event: eventNothing,
}
// Generally, when the recursive DNS server requests upward, it may
// initiate a resolution request for multiple aliases/domain names
// at once, Edge DNS does not need to process a message that carries
// multiple questions at a time.
if head.qdCount != 1 {
que.event = eventUpstream
return
}
offset := uint16(unsafe.Sizeof(dnsHeader{}))
// DNS NS <ROOT> operation
if req[offset] == 0x0 {
que.event = eventUpstream
return
}
que.getQuestion(req, offset, head)
err = nil
return
}
// isAQuery judges if the dns pkg is a query
func (h *dnsHeader) isAQuery() bool {
return h.flags&dnsQR != dnsQR
}
// getHeader gets dns pkg head
func (h *dnsHeader) getHeader(req []byte) {
h.id = binary.BigEndian.Uint16(req[0:2])
h.flags = binary.BigEndian.Uint16(req[2:4])
h.qdCount = binary.BigEndian.Uint16(req[4:6])
h.anCount = binary.BigEndian.Uint16(req[6:8])
h.nsCount = binary.BigEndian.Uint16(req[8:10])
h.arCount = binary.BigEndian.Uint16(req[10:12])
}
// getQuestion gets a dns question
func (q *dnsQuestion) getQuestion(req []byte, offset uint16, head *dnsHeader) {
ost := offset
tmp := ost
ost = q.getQName(req, ost)
q.qType = binary.BigEndian.Uint16(req[ost : ost+twoByteSize])
ost += twoByteSize
q.qClass = binary.BigEndian.Uint16(req[ost : ost+twoByteSize])
ost += twoByteSize
q.head = head
q.queByte = req[tmp:ost]
}
// getAnswer generates answer for the dns question
func (da *dnsAnswer) getAnswer() (answer []byte) {
answer = make([]byte, 0)
if da.qType == aRecord {
answer = append(answer, 0xc0)
answer = append(answer, 0x0c)
tmp16 := make([]byte, 2)
tmp32 := make([]byte, 4)
binary.BigEndian.PutUint16(tmp16, da.qType)
answer = append(answer, tmp16...)
binary.BigEndian.PutUint16(tmp16, da.qClass)
answer = append(answer, tmp16...)
binary.BigEndian.PutUint32(tmp32, da.ttl)
answer = append(answer, tmp32...)
binary.BigEndian.PutUint16(tmp16, da.dataLen)
answer = append(answer, tmp16...)
answer = append(answer, da.addr...)
}
return answer
}
// getQName gets dns question qName
func (q *dnsQuestion) getQName(req []byte, offset uint16) uint16 {
ost := offset
for {
// one byte to suggest length
qbyte := uint16(req[ost])
// qName ends with 0x00, and 0x00 should not be included
if qbyte == 0x00 {
q.name = q.name[:uint16(len(q.name))-oneByteSize]
return ost + oneByteSize
}
// step forward one more byte and get the real stuff
ost += oneByteSize
q.name = append(q.name, req[ost:ost+qbyte]...)
// add "." symbol
q.name = append(q.name, 0x2e)
ost += qbyte
}
}
// lookupFromMetaManager confirms if the service exists
func lookupFromMetaManager(serviceURL string) (exist bool, ip string) {
name, namespace := common.SplitServiceKey(serviceURL)
s, _ := metaClient.Services(namespace).Get(name)
if s != nil {
svcName := namespace + "." + name
ip := listener.GetServiceServer(svcName)
klog.Infof("[EdgeMesh] dns server parse %s ip %s", serviceURL, ip)
return true, ip
}
klog.Errorf("[EdgeMesh] service %s is not found in this cluster", serviceURL)
return false, ""
}
// getFromRealDNS returns a dns response from real dns servers
func getFromRealDNS(req []byte, from *net.UDPAddr) {
rsp := make([]byte, 0)
ips, err := parseNameServer()
if err != nil {
klog.Errorf("[EdgeMesh] parse nameserver err: %v", err)
return
}
laddr := &net.UDPAddr{
IP: net.IPv4zero,
Port: 0,
}
// get from real dns servers
for _, ip := range ips {
raddr := &net.UDPAddr{
IP: ip,
Port: 53,
}
conn, err := net.DialUDP("udp", laddr, raddr)
if err != nil {
continue
}
defer conn.Close()
_, err = conn.Write(req)
if err != nil {
continue
}
if err = conn.SetReadDeadline(time.Now().Add(time.Minute)); err != nil {
continue
}
var n int
buf := make([]byte, bufSize)
n, err = conn.Read(buf)
if err != nil {
continue
}
if n > 0 {
rsp = append(rsp, buf[:n]...)
if _, err = dnsConn.WriteToUDP(rsp, from); err != nil {
klog.Errorf("[EdgeMesh] failed to wirte to udp, err: %v", err)
continue
}
break
}
}
}
// parseNameServer gets all real nameservers from the resolv.conf
func parseNameServer() ([]net.IP, error) {
file, err := os.Open("/etc/resolv.conf")
if err != nil {
return nil, fmt.Errorf("error opening /etc/resolv.conf: %v", err)
}
defer file.Close()
scan := bufio.NewScanner(file)
scan.Split(bufio.ScanLines)
ip := make([]net.IP, 0)
for scan.Scan() {
serverString := scan.Text()
if strings.Contains(serverString, "nameserver") {
tmpString := strings.Replace(serverString, "nameserver", "", 1)
nameserver := strings.TrimSpace(tmpString)
sip := net.ParseIP(nameserver)
if sip != nil && !sip.Equal(config.Config.ListenIP) {
ip = append(ip, sip)
}
}
}
if len(ip) == 0 {
return nil, fmt.Errorf("there is no nameserver in /etc/resolv.conf")
}
return ip, nil
}
// modifyRspPrefix generates a dns response head
func modifyRspPrefix(que *dnsQuestion) (pre []byte) {
if que == nil {
return
}
// use head in que
rspHead := que.head
rspHead.convertQueryRsp(true)
if que.qType == aRecord {
rspHead.setAnswerNum(1)
} else {
rspHead.setAnswerNum(0)
}
rspHead.setRspRCode(que)
pre = rspHead.getByteFromDNSHeader()
pre = append(pre, que.queByte...)
return
}
// convertQueryRsp converts a dns question head to a response head
func (h *dnsHeader) convertQueryRsp(isRsp bool) {
if isRsp {
h.flags |= dnsQR
}
}
// setAnswerNum sets the answer num for dns head
func (h *dnsHeader) setAnswerNum(num uint16) {
h.anCount = num
}
// setRspRCode sets dns response return code
func (h *dnsHeader) setRspRCode(que *dnsQuestion) {
if que.qType != aRecord {
h.flags &= (^errNotImplemented)
h.flags |= errNotImplemented
} else if que.event == eventNxDomain {
h.flags &= (^errRefused)
h.flags |= errRefused
}
}
// getByteFromDNSHeader converts dnsHeader to bytes
func (h *dnsHeader) getByteFromDNSHeader() (rspHead []byte) {
rspHead = make([]byte, unsafe.Sizeof(*h))
idxTransactionID := unsafe.Sizeof(h.id)
idxFlags := unsafe.Sizeof(h.flags) + idxTransactionID
idxQDCount := unsafe.Sizeof(h.qdCount) + idxFlags
idxANCount := unsafe.Sizeof(h.anCount) + idxQDCount
idxNSCount := unsafe.Sizeof(h.nsCount) + idxANCount
idxARCount := unsafe.Sizeof(h.arCount) + idxNSCount
binary.BigEndian.PutUint16(rspHead[:idxTransactionID], h.id)
binary.BigEndian.PutUint16(rspHead[idxTransactionID:idxFlags], h.flags)
binary.BigEndian.PutUint16(rspHead[idxFlags:idxQDCount], h.qdCount)
binary.BigEndian.PutUint16(rspHead[idxQDCount:idxANCount], h.anCount)
binary.BigEndian.PutUint16(rspHead[idxANCount:idxNSCount], h.nsCount)
binary.BigEndian.PutUint16(rspHead[idxNSCount:idxARCount], h.arCount)
return
}
| recordHandle | identifier_name |
dns.go | package dns | "bufio"
"encoding/binary"
"errors"
"fmt"
"net"
"os"
"strings"
"time"
"unsafe"
"k8s.io/klog/v2"
"github.com/kubeedge/kubeedge/edge/pkg/metamanager/client"
"github.com/kubeedge/kubeedge/edgemesh/pkg/common"
"github.com/kubeedge/kubeedge/edgemesh/pkg/config"
"github.com/kubeedge/kubeedge/edgemesh/pkg/listener"
)
type Event int
var (
// default docker0
ifi = "docker0"
// QR: 0 represents query, 1 represents response
dnsQR = uint16(0x8000)
oneByteSize = uint16(1)
twoByteSize = uint16(2)
ttl = uint32(64)
)
const (
// 1 for ipv4
aRecord = 1
bufSize = 1024
errNotImplemented = uint16(0x0004)
errRefused = uint16(0x0005)
eventNothing = Event(0)
eventUpstream = Event(1)
eventNxDomain = Event(2)
)
type dnsHeader struct {
id uint16
flags uint16
qdCount uint16
anCount uint16
nsCount uint16
arCount uint16
}
type dnsQuestion struct {
from *net.UDPAddr
head *dnsHeader
name []byte
queByte []byte
qType uint16
qClass uint16
queNum uint16
event Event
}
type dnsAnswer struct {
name []byte
qType uint16
qClass uint16
ttl uint32
dataLen uint16
addr []byte
}
// metaClient is a query client
var metaClient client.CoreInterface
// dnsConn saves DNS protocol
var dnsConn *net.UDPConn
// Start is for external call
func Start() {
startDNS()
}
// startDNS starts edgemesh dns server
func startDNS() {
// init meta client
metaClient = client.New()
// get dns listen ip
lip, err := common.GetInterfaceIP(ifi)
if err != nil {
klog.Errorf("[EdgeMesh] get dns listen ip err: %v", err)
return
}
laddr := &net.UDPAddr{
IP: lip,
Port: 53,
}
udpConn, err := net.ListenUDP("udp", laddr)
if err != nil {
klog.Errorf("[EdgeMesh] dns server listen on %v error: %v", laddr, err)
return
}
defer udpConn.Close()
dnsConn = udpConn
for {
req := make([]byte, bufSize)
n, from, err := dnsConn.ReadFromUDP(req)
if err != nil || n <= 0 {
klog.Errorf("[EdgeMesh] dns server read from udp error: %v", err)
continue
}
que, err := parseDNSQuery(req[:n])
if err != nil {
continue
}
que.from = from
rsp := make([]byte, 0)
rsp, err = recordHandle(que, req[:n])
if err != nil {
klog.Warningf("[EdgeMesh] failed to resolve dns: %v", err)
continue
}
if _, err = dnsConn.WriteTo(rsp, from); err != nil {
klog.Warningf("[EdgeMesh] failed to write: %v", err)
}
}
}
// recordHandle returns the answer for the dns question
func recordHandle(que *dnsQuestion, req []byte) (rsp []byte, err error) {
var exist bool
var ip string
// qType should be 1 for ipv4
if que.name != nil && que.qType == aRecord {
domainName := string(que.name)
exist, ip = lookupFromMetaManager(domainName)
}
if !exist || que.event == eventUpstream {
// if this service doesn't belongs to this cluster
go getFromRealDNS(req, que.from)
return rsp, fmt.Errorf("get from real dns")
}
address := net.ParseIP(ip).To4()
if address == nil {
que.event = eventNxDomain
}
// gen
pre := modifyRspPrefix(que)
rsp = append(rsp, pre...)
if que.event != eventNothing {
return rsp, nil
}
// create a deceptive resp, if no error
dnsAns := &dnsAnswer{
name: que.name,
qType: que.qType,
qClass: que.qClass,
ttl: ttl,
dataLen: uint16(len(address)),
addr: address,
}
ans := dnsAns.getAnswer()
rsp = append(rsp, ans...)
return rsp, nil
}
// parseDNSQuery converts bytes to *dnsQuestion
func parseDNSQuery(req []byte) (que *dnsQuestion, err error) {
head := &dnsHeader{}
head.getHeader(req)
if !head.isAQuery() {
return nil, errors.New("not a dns query, ignore")
}
que = &dnsQuestion{
event: eventNothing,
}
// Generally, when the recursive DNS server requests upward, it may
// initiate a resolution request for multiple aliases/domain names
// at once, Edge DNS does not need to process a message that carries
// multiple questions at a time.
if head.qdCount != 1 {
que.event = eventUpstream
return
}
offset := uint16(unsafe.Sizeof(dnsHeader{}))
// DNS NS <ROOT> operation
if req[offset] == 0x0 {
que.event = eventUpstream
return
}
que.getQuestion(req, offset, head)
err = nil
return
}
// isAQuery judges if the dns pkg is a query
func (h *dnsHeader) isAQuery() bool {
return h.flags&dnsQR != dnsQR
}
// getHeader gets dns pkg head
func (h *dnsHeader) getHeader(req []byte) {
h.id = binary.BigEndian.Uint16(req[0:2])
h.flags = binary.BigEndian.Uint16(req[2:4])
h.qdCount = binary.BigEndian.Uint16(req[4:6])
h.anCount = binary.BigEndian.Uint16(req[6:8])
h.nsCount = binary.BigEndian.Uint16(req[8:10])
h.arCount = binary.BigEndian.Uint16(req[10:12])
}
// getQuestion gets a dns question
func (q *dnsQuestion) getQuestion(req []byte, offset uint16, head *dnsHeader) {
ost := offset
tmp := ost
ost = q.getQName(req, ost)
q.qType = binary.BigEndian.Uint16(req[ost : ost+twoByteSize])
ost += twoByteSize
q.qClass = binary.BigEndian.Uint16(req[ost : ost+twoByteSize])
ost += twoByteSize
q.head = head
q.queByte = req[tmp:ost]
}
// getAnswer generates answer for the dns question
func (da *dnsAnswer) getAnswer() (answer []byte) {
answer = make([]byte, 0)
if da.qType == aRecord {
answer = append(answer, 0xc0)
answer = append(answer, 0x0c)
tmp16 := make([]byte, 2)
tmp32 := make([]byte, 4)
binary.BigEndian.PutUint16(tmp16, da.qType)
answer = append(answer, tmp16...)
binary.BigEndian.PutUint16(tmp16, da.qClass)
answer = append(answer, tmp16...)
binary.BigEndian.PutUint32(tmp32, da.ttl)
answer = append(answer, tmp32...)
binary.BigEndian.PutUint16(tmp16, da.dataLen)
answer = append(answer, tmp16...)
answer = append(answer, da.addr...)
}
return answer
}
// getQName gets dns question qName
func (q *dnsQuestion) getQName(req []byte, offset uint16) uint16 {
ost := offset
for {
// one byte to suggest length
qbyte := uint16(req[ost])
// qName ends with 0x00, and 0x00 should not be included
if qbyte == 0x00 {
q.name = q.name[:uint16(len(q.name))-oneByteSize]
return ost + oneByteSize
}
// step forward one more byte and get the real stuff
ost += oneByteSize
q.name = append(q.name, req[ost:ost+qbyte]...)
// add "." symbol
q.name = append(q.name, 0x2e)
ost += qbyte
}
}
// lookupFromMetaManager confirms if the service exists
func lookupFromMetaManager(serviceURL string) (exist bool, ip string) {
name, namespace := common.SplitServiceKey(serviceURL)
s, _ := metaClient.Services(namespace).Get(name)
if s != nil {
svcName := namespace + "." + name
ip := listener.GetServiceServer(svcName)
klog.Infof("[EdgeMesh] dns server parse %s ip %s", serviceURL, ip)
return true, ip
}
klog.Errorf("[EdgeMesh] service %s is not found in this cluster", serviceURL)
return false, ""
}
// getFromRealDNS returns a dns response from real dns servers
func getFromRealDNS(req []byte, from *net.UDPAddr) {
rsp := make([]byte, 0)
ips, err := parseNameServer()
if err != nil {
klog.Errorf("[EdgeMesh] parse nameserver err: %v", err)
return
}
laddr := &net.UDPAddr{
IP: net.IPv4zero,
Port: 0,
}
// get from real dns servers
for _, ip := range ips {
raddr := &net.UDPAddr{
IP: ip,
Port: 53,
}
conn, err := net.DialUDP("udp", laddr, raddr)
if err != nil {
continue
}
defer conn.Close()
_, err = conn.Write(req)
if err != nil {
continue
}
if err = conn.SetReadDeadline(time.Now().Add(time.Minute)); err != nil {
continue
}
var n int
buf := make([]byte, bufSize)
n, err = conn.Read(buf)
if err != nil {
continue
}
if n > 0 {
rsp = append(rsp, buf[:n]...)
if _, err = dnsConn.WriteToUDP(rsp, from); err != nil {
klog.Errorf("[EdgeMesh] failed to wirte to udp, err: %v", err)
continue
}
break
}
}
}
// parseNameServer gets all real nameservers from the resolv.conf
func parseNameServer() ([]net.IP, error) {
file, err := os.Open("/etc/resolv.conf")
if err != nil {
return nil, fmt.Errorf("error opening /etc/resolv.conf: %v", err)
}
defer file.Close()
scan := bufio.NewScanner(file)
scan.Split(bufio.ScanLines)
ip := make([]net.IP, 0)
for scan.Scan() {
serverString := scan.Text()
if strings.Contains(serverString, "nameserver") {
tmpString := strings.Replace(serverString, "nameserver", "", 1)
nameserver := strings.TrimSpace(tmpString)
sip := net.ParseIP(nameserver)
if sip != nil && !sip.Equal(config.Config.ListenIP) {
ip = append(ip, sip)
}
}
}
if len(ip) == 0 {
return nil, fmt.Errorf("there is no nameserver in /etc/resolv.conf")
}
return ip, nil
}
// modifyRspPrefix generates a dns response head
func modifyRspPrefix(que *dnsQuestion) (pre []byte) {
if que == nil {
return
}
// use head in que
rspHead := que.head
rspHead.convertQueryRsp(true)
if que.qType == aRecord {
rspHead.setAnswerNum(1)
} else {
rspHead.setAnswerNum(0)
}
rspHead.setRspRCode(que)
pre = rspHead.getByteFromDNSHeader()
pre = append(pre, que.queByte...)
return
}
// convertQueryRsp converts a dns question head to a response head
func (h *dnsHeader) convertQueryRsp(isRsp bool) {
if isRsp {
h.flags |= dnsQR
}
}
// setAnswerNum sets the answer num for dns head
func (h *dnsHeader) setAnswerNum(num uint16) {
h.anCount = num
}
// setRspRCode sets dns response return code
func (h *dnsHeader) setRspRCode(que *dnsQuestion) {
if que.qType != aRecord {
h.flags &= (^errNotImplemented)
h.flags |= errNotImplemented
} else if que.event == eventNxDomain {
h.flags &= (^errRefused)
h.flags |= errRefused
}
}
// getByteFromDNSHeader converts dnsHeader to bytes
func (h *dnsHeader) getByteFromDNSHeader() (rspHead []byte) {
rspHead = make([]byte, unsafe.Sizeof(*h))
idxTransactionID := unsafe.Sizeof(h.id)
idxFlags := unsafe.Sizeof(h.flags) + idxTransactionID
idxQDCount := unsafe.Sizeof(h.qdCount) + idxFlags
idxANCount := unsafe.Sizeof(h.anCount) + idxQDCount
idxNSCount := unsafe.Sizeof(h.nsCount) + idxANCount
idxARCount := unsafe.Sizeof(h.arCount) + idxNSCount
binary.BigEndian.PutUint16(rspHead[:idxTransactionID], h.id)
binary.BigEndian.PutUint16(rspHead[idxTransactionID:idxFlags], h.flags)
binary.BigEndian.PutUint16(rspHead[idxFlags:idxQDCount], h.qdCount)
binary.BigEndian.PutUint16(rspHead[idxQDCount:idxANCount], h.anCount)
binary.BigEndian.PutUint16(rspHead[idxANCount:idxNSCount], h.nsCount)
binary.BigEndian.PutUint16(rspHead[idxNSCount:idxARCount], h.arCount)
return
} |
import ( | random_line_split |
dns.go | package dns
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"net"
"os"
"strings"
"time"
"unsafe"
"k8s.io/klog/v2"
"github.com/kubeedge/kubeedge/edge/pkg/metamanager/client"
"github.com/kubeedge/kubeedge/edgemesh/pkg/common"
"github.com/kubeedge/kubeedge/edgemesh/pkg/config"
"github.com/kubeedge/kubeedge/edgemesh/pkg/listener"
)
type Event int
var (
// default docker0
ifi = "docker0"
// QR: 0 represents query, 1 represents response
dnsQR = uint16(0x8000)
oneByteSize = uint16(1)
twoByteSize = uint16(2)
ttl = uint32(64)
)
const (
// 1 for ipv4
aRecord = 1
bufSize = 1024
errNotImplemented = uint16(0x0004)
errRefused = uint16(0x0005)
eventNothing = Event(0)
eventUpstream = Event(1)
eventNxDomain = Event(2)
)
type dnsHeader struct {
id uint16
flags uint16
qdCount uint16
anCount uint16
nsCount uint16
arCount uint16
}
type dnsQuestion struct {
from *net.UDPAddr
head *dnsHeader
name []byte
queByte []byte
qType uint16
qClass uint16
queNum uint16
event Event
}
type dnsAnswer struct {
name []byte
qType uint16
qClass uint16
ttl uint32
dataLen uint16
addr []byte
}
// metaClient is a query client
var metaClient client.CoreInterface
// dnsConn saves DNS protocol
var dnsConn *net.UDPConn
// Start is for external call
func Start() {
startDNS()
}
// startDNS starts edgemesh dns server
func startDNS() {
// init meta client
metaClient = client.New()
// get dns listen ip
lip, err := common.GetInterfaceIP(ifi)
if err != nil {
klog.Errorf("[EdgeMesh] get dns listen ip err: %v", err)
return
}
laddr := &net.UDPAddr{
IP: lip,
Port: 53,
}
udpConn, err := net.ListenUDP("udp", laddr)
if err != nil {
klog.Errorf("[EdgeMesh] dns server listen on %v error: %v", laddr, err)
return
}
defer udpConn.Close()
dnsConn = udpConn
for {
req := make([]byte, bufSize)
n, from, err := dnsConn.ReadFromUDP(req)
if err != nil || n <= 0 {
klog.Errorf("[EdgeMesh] dns server read from udp error: %v", err)
continue
}
que, err := parseDNSQuery(req[:n])
if err != nil {
continue
}
que.from = from
rsp := make([]byte, 0)
rsp, err = recordHandle(que, req[:n])
if err != nil {
klog.Warningf("[EdgeMesh] failed to resolve dns: %v", err)
continue
}
if _, err = dnsConn.WriteTo(rsp, from); err != nil {
klog.Warningf("[EdgeMesh] failed to write: %v", err)
}
}
}
// recordHandle returns the answer for the dns question
func recordHandle(que *dnsQuestion, req []byte) (rsp []byte, err error) {
var exist bool
var ip string
// qType should be 1 for ipv4
if que.name != nil && que.qType == aRecord {
domainName := string(que.name)
exist, ip = lookupFromMetaManager(domainName)
}
if !exist || que.event == eventUpstream {
// if this service doesn't belongs to this cluster
go getFromRealDNS(req, que.from)
return rsp, fmt.Errorf("get from real dns")
}
address := net.ParseIP(ip).To4()
if address == nil {
que.event = eventNxDomain
}
// gen
pre := modifyRspPrefix(que)
rsp = append(rsp, pre...)
if que.event != eventNothing {
return rsp, nil
}
// create a deceptive resp, if no error
dnsAns := &dnsAnswer{
name: que.name,
qType: que.qType,
qClass: que.qClass,
ttl: ttl,
dataLen: uint16(len(address)),
addr: address,
}
ans := dnsAns.getAnswer()
rsp = append(rsp, ans...)
return rsp, nil
}
// parseDNSQuery converts bytes to *dnsQuestion
func parseDNSQuery(req []byte) (que *dnsQuestion, err error) {
head := &dnsHeader{}
head.getHeader(req)
if !head.isAQuery() {
return nil, errors.New("not a dns query, ignore")
}
que = &dnsQuestion{
event: eventNothing,
}
// Generally, when the recursive DNS server requests upward, it may
// initiate a resolution request for multiple aliases/domain names
// at once, Edge DNS does not need to process a message that carries
// multiple questions at a time.
if head.qdCount != 1 {
que.event = eventUpstream
return
}
offset := uint16(unsafe.Sizeof(dnsHeader{}))
// DNS NS <ROOT> operation
if req[offset] == 0x0 {
que.event = eventUpstream
return
}
que.getQuestion(req, offset, head)
err = nil
return
}
// isAQuery judges if the dns pkg is a query
func (h *dnsHeader) isAQuery() bool {
return h.flags&dnsQR != dnsQR
}
// getHeader gets dns pkg head
func (h *dnsHeader) getHeader(req []byte) {
h.id = binary.BigEndian.Uint16(req[0:2])
h.flags = binary.BigEndian.Uint16(req[2:4])
h.qdCount = binary.BigEndian.Uint16(req[4:6])
h.anCount = binary.BigEndian.Uint16(req[6:8])
h.nsCount = binary.BigEndian.Uint16(req[8:10])
h.arCount = binary.BigEndian.Uint16(req[10:12])
}
// getQuestion gets a dns question
func (q *dnsQuestion) getQuestion(req []byte, offset uint16, head *dnsHeader) {
ost := offset
tmp := ost
ost = q.getQName(req, ost)
q.qType = binary.BigEndian.Uint16(req[ost : ost+twoByteSize])
ost += twoByteSize
q.qClass = binary.BigEndian.Uint16(req[ost : ost+twoByteSize])
ost += twoByteSize
q.head = head
q.queByte = req[tmp:ost]
}
// getAnswer generates answer for the dns question
func (da *dnsAnswer) getAnswer() (answer []byte) {
answer = make([]byte, 0)
if da.qType == aRecord {
answer = append(answer, 0xc0)
answer = append(answer, 0x0c)
tmp16 := make([]byte, 2)
tmp32 := make([]byte, 4)
binary.BigEndian.PutUint16(tmp16, da.qType)
answer = append(answer, tmp16...)
binary.BigEndian.PutUint16(tmp16, da.qClass)
answer = append(answer, tmp16...)
binary.BigEndian.PutUint32(tmp32, da.ttl)
answer = append(answer, tmp32...)
binary.BigEndian.PutUint16(tmp16, da.dataLen)
answer = append(answer, tmp16...)
answer = append(answer, da.addr...)
}
return answer
}
// getQName gets dns question qName
func (q *dnsQuestion) getQName(req []byte, offset uint16) uint16 {
ost := offset
for {
// one byte to suggest length
qbyte := uint16(req[ost])
// qName ends with 0x00, and 0x00 should not be included
if qbyte == 0x00 {
q.name = q.name[:uint16(len(q.name))-oneByteSize]
return ost + oneByteSize
}
// step forward one more byte and get the real stuff
ost += oneByteSize
q.name = append(q.name, req[ost:ost+qbyte]...)
// add "." symbol
q.name = append(q.name, 0x2e)
ost += qbyte
}
}
// lookupFromMetaManager confirms if the service exists
func lookupFromMetaManager(serviceURL string) (exist bool, ip string) {
name, namespace := common.SplitServiceKey(serviceURL)
s, _ := metaClient.Services(namespace).Get(name)
if s != nil {
svcName := namespace + "." + name
ip := listener.GetServiceServer(svcName)
klog.Infof("[EdgeMesh] dns server parse %s ip %s", serviceURL, ip)
return true, ip
}
klog.Errorf("[EdgeMesh] service %s is not found in this cluster", serviceURL)
return false, ""
}
// getFromRealDNS returns a dns response from real dns servers
func getFromRealDNS(req []byte, from *net.UDPAddr) {
rsp := make([]byte, 0)
ips, err := parseNameServer()
if err != nil {
klog.Errorf("[EdgeMesh] parse nameserver err: %v", err)
return
}
laddr := &net.UDPAddr{
IP: net.IPv4zero,
Port: 0,
}
// get from real dns servers
for _, ip := range ips {
raddr := &net.UDPAddr{
IP: ip,
Port: 53,
}
conn, err := net.DialUDP("udp", laddr, raddr)
if err != nil {
continue
}
defer conn.Close()
_, err = conn.Write(req)
if err != nil {
continue
}
if err = conn.SetReadDeadline(time.Now().Add(time.Minute)); err != nil {
continue
}
var n int
buf := make([]byte, bufSize)
n, err = conn.Read(buf)
if err != nil {
continue
}
if n > 0 {
rsp = append(rsp, buf[:n]...)
if _, err = dnsConn.WriteToUDP(rsp, from); err != nil {
klog.Errorf("[EdgeMesh] failed to wirte to udp, err: %v", err)
continue
}
break
}
}
}
// parseNameServer gets all real nameservers from the resolv.conf
func parseNameServer() ([]net.IP, error) {
file, err := os.Open("/etc/resolv.conf")
if err != nil {
return nil, fmt.Errorf("error opening /etc/resolv.conf: %v", err)
}
defer file.Close()
scan := bufio.NewScanner(file)
scan.Split(bufio.ScanLines)
ip := make([]net.IP, 0)
for scan.Scan() {
serverString := scan.Text()
if strings.Contains(serverString, "nameserver") {
tmpString := strings.Replace(serverString, "nameserver", "", 1)
nameserver := strings.TrimSpace(tmpString)
sip := net.ParseIP(nameserver)
if sip != nil && !sip.Equal(config.Config.ListenIP) {
ip = append(ip, sip)
}
}
}
if len(ip) == 0 {
return nil, fmt.Errorf("there is no nameserver in /etc/resolv.conf")
}
return ip, nil
}
// modifyRspPrefix generates a dns response head
func modifyRspPrefix(que *dnsQuestion) (pre []byte) {
if que == nil {
return
}
// use head in que
rspHead := que.head
rspHead.convertQueryRsp(true)
if que.qType == aRecord {
rspHead.setAnswerNum(1)
} else {
rspHead.setAnswerNum(0)
}
rspHead.setRspRCode(que)
pre = rspHead.getByteFromDNSHeader()
pre = append(pre, que.queByte...)
return
}
// convertQueryRsp converts a dns question head to a response head
func (h *dnsHeader) convertQueryRsp(isRsp bool) {
if isRsp {
h.flags |= dnsQR
}
}
// setAnswerNum sets the answer num for dns head
func (h *dnsHeader) setAnswerNum(num uint16) |
// setRspRCode sets dns response return code
func (h *dnsHeader) setRspRCode(que *dnsQuestion) {
if que.qType != aRecord {
h.flags &= (^errNotImplemented)
h.flags |= errNotImplemented
} else if que.event == eventNxDomain {
h.flags &= (^errRefused)
h.flags |= errRefused
}
}
// getByteFromDNSHeader converts dnsHeader to bytes
func (h *dnsHeader) getByteFromDNSHeader() (rspHead []byte) {
rspHead = make([]byte, unsafe.Sizeof(*h))
idxTransactionID := unsafe.Sizeof(h.id)
idxFlags := unsafe.Sizeof(h.flags) + idxTransactionID
idxQDCount := unsafe.Sizeof(h.qdCount) + idxFlags
idxANCount := unsafe.Sizeof(h.anCount) + idxQDCount
idxNSCount := unsafe.Sizeof(h.nsCount) + idxANCount
idxARCount := unsafe.Sizeof(h.arCount) + idxNSCount
binary.BigEndian.PutUint16(rspHead[:idxTransactionID], h.id)
binary.BigEndian.PutUint16(rspHead[idxTransactionID:idxFlags], h.flags)
binary.BigEndian.PutUint16(rspHead[idxFlags:idxQDCount], h.qdCount)
binary.BigEndian.PutUint16(rspHead[idxQDCount:idxANCount], h.anCount)
binary.BigEndian.PutUint16(rspHead[idxANCount:idxNSCount], h.nsCount)
binary.BigEndian.PutUint16(rspHead[idxNSCount:idxARCount], h.arCount)
return
}
| {
h.anCount = num
} | identifier_body |
getprimers.py | import pandas as pd
import re
import sqlite3 as lite
import os
from pybedtools import BedTool
import django
from checkprimers import CheckPrimers
from pandas import ExcelWriter
import datetime
os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'
django.setup()
class GetPrimers(object):
"""Extracts data from excel spread sheet and imports it into a sqlite database.
:param excel_file: excel file to be imported.
:param db: database the excel file should be imported into.
"""
def __init__(self, excel_file, db):
self.excel_file = excel_file
self.db = db
global con, curs
con = lite.connect(self.db) # Creates a database if it doesn't already exist.
curs = con.cursor()
def get_sheet_name(self):
"""Returns the sheetname to be used to import data from."""
xl = pd.ExcelFile(self.excel_file)
sheet_names = xl.sheet_names
for item in sheet_names:
if re.match('(.*)Current primers', item, re.IGNORECASE): # Only extracts most recent primers.
sheet_name = item
return sheet_name
def get_primers(self, sheetname):
"""Extracts primer data from sheet.
Function reads an excel sheet using pandas and stores this in the df_primers_dups data frame (contains
duplicated rows).The df_primers data frame will go on to be used in the virtual PCR so irrelevant columns
are dropped and any duplicate rows are removed. | df_primers_dups = pd.read_excel(self.excel_file, header=0, parse_cols='A:M, O:X', skiprows=2,
names=['Gene', 'Exon', 'Direction', 'Version', 'Primer_seq', 'Chrom', 'M13_tag',
'Batch', 'project', 'Order_date', 'Frag_size', 'anneal_temp', 'Other',
'snp_check', 'no_snps', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2',
'action_to_take', 'check_by'],
sheetname=sheetname, index_col=None)
to_drop = ['Version', 'M13_tag', 'Batch', 'project', 'Order_date', 'Frag_size', 'anneal_temp', 'Other',
'snp_check', 'no_snps', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2', 'action_to_take',
'check_by']
df_primers_dups = df_primers_dups.where((pd.notnull(df_primers_dups)), None) # easier to work with than NaN
df_primers = df_primers_dups.drop(to_drop, axis=1)
df_primers = df_primers.drop_duplicates(subset=('Gene', 'Exon', 'Direction', 'Chrom'))
df_primers = df_primers.reset_index(drop=True)
return df_primers_dups, df_primers
def run_pcr(self, csv):
"""Runs virtual PCR on a CSV file using the isPcr and pslToBed tools installed from UCSC.
:param csv: a csv file is need as an input with format "name, forward, reverse".
:return bedfile: with results of virtual PCR if there is a match.
"""
print "Running virtual PCR..."
chromosomes = ['chr1.2bit', 'chr11.2bit', 'chr12.2bit', 'chrX.2bit', 'chr13.2bit', 'chr14.2bit', 'chr15.2bit',
'chr16.2bit', 'chr17.2bit', 'chr18.2bit', 'chr19.2bit', 'chr20.2bit', 'chr21.2bit', 'chr22.2bit',
'chr2.2bit', 'chr3.2bit', 'chr4.2bit', 'chr5.2bit', 'chr6.2bit', 'chr7.2bit', 'chr8.2bit',
'chr9.2bit', 'chr10.2bit', 'chrY.2bit']
for chr in chromosomes:
os.system(
"/opt/kentools/isPcr -out=psl /media/genomicdata/ucsc_hg19_by_chr/2bit_chr/%s \
%s %s.tmp.psl" % (chr, csv, chr[:-5]))
pslfile = "%s.tmp.psl" % chr[:-5]
bedfile = "%s.tmp.bed" % chr[:-5]
# Only converts a non-empty psl file to a bed file, and removes all psl files in folder.
if os.path.getsize(pslfile) != 0:
os.system("/opt/kentools/pslToBed %s %s" % (pslfile, bedfile))
os.system("rm %s" % pslfile)
return bedfile
else:
os.system("rm %s" % pslfile)
def get_coords(self, df_primers):
"""Generates csv file for virtual PCR and imports results into a pandas data frame.
:param df_primers: data frame of primer data.
:return df_coords: data frame with chromosome, start and end coordinates, and a name
(format "Gene_ExonDirection") for each primer.
"""
primer_list = []
names_dup = []
names = []
exons = []
dirs = []
start_coords = []
end_coords = []
chroms = []
seq_position = 0
list_position = 0
primer_seqs = pd.DataFrame([])
csv = '%s.csv' % self.excel_file[:-5]
csv = csv.replace(" ", "")
# (1) Gets sequences, exons and directions, splits the sequences into F+R and combines into series and then csv.
for row_index, row in df_primers.iterrows():
primer_list.append(str(row['Primer_seq']))
names_dup.append(str(row['Gene']) + '_' + str(row['Exon']) + str(row['Direction']))
exons.append(str(row['Exon']))
dirs.append(str(row['Direction']))
for item in names_dup:
if item not in names:
names.append(item)
forwards = primer_list[::2]
reverses = primer_list[1::2]
while list_position < len(forwards):
ser = pd.Series([names[list_position], forwards[list_position], reverses[list_position]])
primer_seqs = primer_seqs.append(ser, ignore_index=True)
list_position += 1
primer_seqs.to_csv(csv, header=None, index=None, sep='\t')
# (2) Runs virtual PCR on generated csv.
bedfile = self.run_pcr(csv)
tool = BedTool(bedfile)
# (3) Uses results to calculate start and end position of each primer (results give PCR product). Adds to df.
for row in tool:
chroms.append(row.chrom)
start_coords.append(row.start)
end_coords.append(row.start + len(primer_list[seq_position]))
chroms.append(row.chrom)
end_coords.append(row.end)
start_coords.append(row.end - len(primer_list[seq_position + 1]))
seq_position += 1
df_coords = pd.DataFrame([])
df_coords.insert(0, 'chrom', chroms)
df_coords.insert(1, 'start', start_coords)
df_coords.insert(2, 'end', end_coords)
df_coords.insert(3, 'name', names)
# (4) Generates a bed file from df_coords (not currently used in application).
bed = os.path.splitext(bedfile)[0]
df_coords.to_csv('%s.csv' % bed, header=None, index=None, sep='\t') # cannot directly convert to bed.
csv_file = BedTool('%s.csv' % bed)
csv_file.saveas('%s.bed' % bed)
df_coords.insert(4, 'Exon', exons) # not need in bed file so added after.
df_coords.insert(5, 'Direction', dirs)
# Removes unnecessary files and moves BED file into shared folder. (add /tests for unit testing)
os.system("rm /home/cuser/PycharmProjects/django_apps/mysite/%s.csv" % bed)
os.system("mv /home/cuser/PycharmProjects/django_apps/mysite/%s.bed /media/sf_sarah_share/bedfiles" %
bed)
os.system("rm /home/cuser/PycharmProjects/django_apps/mysite/%s" % csv)
return df_coords
def col_to_string(self, row):
"""Converts values in the Exon column into string values which makes merging data frames easier.
:param row: for every row in Exon column.
:return string of value.
"""
return str(row['Exon'])
def combine_coords_primers(self, df_coords, df_primers_dups):
"""Adds primer coordinates to original df_primers_dups data frame.
:param df_primers_dups: data frame with primer data from excel.
:param df_coords: data frame with chrom, start, end, name, exon, direction.
:return df_combined: data frame of merge between df_coords and df_primers_dups.
:return gene_name: this will be added to the Genes table and used to check if already in database.
"""
df_coords['Exon'] = df_coords.apply(self.col_to_string, axis=1)
df_primers_dups['Exon'] = df_primers_dups.apply(self.col_to_string, axis=1)
# Merge based on Exon and Direction columns
df_combined = pd.merge(df_primers_dups, df_coords, how='left', on=['Exon', 'Direction'])
# There is already a Chromosome column in df_primers_dups
cols_to_drop = ['chrom']
df_combined = df_combined.drop(cols_to_drop, axis=1)
gene_name = df_combined.get_value(0, 'Gene')
return df_combined, gene_name
def check_in_db(self, gene):
"""Queries the database to check if data for a particular gene is already present.
:param gene: a gene name to check against the database.
:return result: query result which will be a gene if already in database and None if not.
"""
curs.execute("SELECT Gene FROM Genes WHERE Gene LIKE '%s'" % gene)
result = curs.fetchone()
return result
def to_db(self, df_combined, gene_name):
"""Creates tables and adds data into the database.
Function modifies the given data frame to generate three tables in the database (Primers, SNPs, Genes) and
performs data checks. If data for a particular gene is already in the database, this is overridden and the
previous data is saved to an excel document (archived_files).
The commented out section should only be used for the first file to initially set up the tables.
:param gene_name: gene to check against database.
:param df_combined: data frame to be inserted into database.
:return info: description of action performed (for audit log).
:return archived_filename: filename the previous data is saved under (for audit log).
"""
# (1) Creates database schema
curs.execute("CREATE TABLE IF NOT EXISTS Primers(PrimerId INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, "
"Gene TEXT, Exon TEXT, Direction TEXT, Version INTEGER, Primer_Seq TEXT, Chrom TEXT, M13_Tag TEXT"
", Batch TEXT, Project TEXT, Order_date TEXT, Frag_size INTEGER, Anneal_Temp TEXT, Other TEXT, "
"snp_check INTEGER, no_snps INTEGER, rs TEXT, hgvs TEXT, freq TEXT, ss TEXT, ss_proj TEXT, "
"other2 TEXT, action_to_take TEXT, check_by TEXT, start TEXT, end TEXT, name TEXT)")
curs.execute("CREATE TABLE IF NOT EXISTS SNPs(SNP_Id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, Gene TEXT, "
"Exon TEXT, Direction TEXT, snp_check INTEGER, rs TEXT, hgvs TEXT, freq TEXT, ss TEXT, "
"ss_proj TEXT, other2 TEXT, action_to_take TEXT, check_by TEXT, name TEXT)")
curs.execute("CREATE TABLE IF NOT EXISTS Genes(Gene_Id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, Gene TEXT)")
# (2) Drops unnecessary columns to make two tables and removes duplicates.
primertable_cols_to_drop = ['snp_check', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2', 'action_to_take',
'check_by']
snptable_cols_to_drop = ['Exon', 'Direction', 'Version', 'Primer_seq', 'Chrom', 'M13_tag', 'Batch', 'project',
'Order_date', 'Frag_size', 'anneal_temp', 'Other', 'no_snps', 'start', 'end']
df_primertable = df_combined.drop(primertable_cols_to_drop, axis=1)
df_primertable = df_primertable.drop_duplicates(subset=('Gene', 'Exon', 'Direction', 'Chrom'))
df_snptable = df_combined.drop(snptable_cols_to_drop, axis=1)
# (3) Performs data checks using CheckPrimers and CheckSNPs classes.
check = CheckPrimers(df_primertable, df_snptable)
total_errors, error_details = check.check_all()
# (4) Checks if gene data already in database.
uni_gene = '(u\'%s\',)' % gene_name
gene = self.check_in_db(gene_name) # this outputs a unicode string
# (5) Adds to database if no errors. Overrides data if already present.
archived_filename = None
if total_errors == 0:
if str(uni_gene) == str(gene):
# Add query to data frame then save to excel.
get_old_query = "SELECT p.Gene, p.Exon, p.Direction, p.Version, p.Primer_seq, p.Chrom, p.M13_Tag, " \
"p.Batch, p.Project, p.Order_date, p.Frag_size, p.Anneal_Temp, p.Other, s.snp_check, " \
"p.no_snps, s.rs, s.hgvs, s.freq, s.ss, s.ss_proj, s.other2, s.action_to_take, " \
"s.check_by FROM SNPs s LEFT JOIN Primers p ON s.name = p.name WHERE p.Gene='%s'" % \
gene_name
today_date = datetime.datetime.now().strftime("%d-%m-%Y_%H%M")
df_sql = pd.read_sql_query(get_old_query, con=con)
archived_filename = '%s_%s' % (gene_name, today_date)
writer = ExcelWriter('%s.xlsx' % archived_filename)
df_sql.to_excel(writer, '%s' % today_date, index=False)
writer.save()
os.system("mv /home/cuser/PycharmProjects/django_apps/mysite/%s.xlsx "
"/home/cuser/PycharmProjects/django_apps/mysite/primerdb/archived_files/" % archived_filename)
curs.execute("DELETE FROM Primers WHERE Gene='%s'" % gene_name)
curs.execute("DELETE FROM Genes WHERE Gene='%s'" % gene_name)
curs.execute("DELETE FROM SNPs WHERE Gene='%s'" % gene_name)
info = "Data updated."
else:
info = "New gene added."
# Insert new data into SQL tables.
curs.execute("INSERT INTO Genes (Gene) VALUES (?)", (gene_name,))
df_primertable.to_sql('Primers', con, if_exists='append', index=False)
df_snptable.to_sql('SNPs', con, if_exists='append', index=False)
print "Primers successfully added to database."
else:
info = error_details
con.commit()
return info, archived_filename
def all(self):
"""Combines all methods"""
sheetname = self.get_sheet_name()
df_primers_dups, df_primers = self.get_primers(sheetname)
df_coords = self.get_coords(df_primers)
df_combined, gene = self.combine_coords_primers(df_coords, df_primers_dups)
info, archived_filename = self.to_db(df_combined, gene)
return info, archived_filename | :param sheetname: sheet data to be extracted from
:return df_primers_dups: data frame containing extracted data which may include duplicates.
:return df_primers: data frame containing only data necessary to get genome coordinates.
""" | random_line_split |
getprimers.py | import pandas as pd
import re
import sqlite3 as lite
import os
from pybedtools import BedTool
import django
from checkprimers import CheckPrimers
from pandas import ExcelWriter
import datetime
os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'
django.setup()
class GetPrimers(object):
"""Extracts data from excel spread sheet and imports it into a sqlite database.
:param excel_file: excel file to be imported.
:param db: database the excel file should be imported into.
"""
def __init__(self, excel_file, db):
self.excel_file = excel_file
self.db = db
global con, curs
con = lite.connect(self.db) # Creates a database if it doesn't already exist.
curs = con.cursor()
def get_sheet_name(self):
"""Returns the sheetname to be used to import data from."""
xl = pd.ExcelFile(self.excel_file)
sheet_names = xl.sheet_names
for item in sheet_names:
if re.match('(.*)Current primers', item, re.IGNORECASE): # Only extracts most recent primers.
sheet_name = item
return sheet_name
def get_primers(self, sheetname):
"""Extracts primer data from sheet.
Function reads an excel sheet using pandas and stores this in the df_primers_dups data frame (contains
duplicated rows).The df_primers data frame will go on to be used in the virtual PCR so irrelevant columns
are dropped and any duplicate rows are removed.
:param sheetname: sheet data to be extracted from
:return df_primers_dups: data frame containing extracted data which may include duplicates.
:return df_primers: data frame containing only data necessary to get genome coordinates.
"""
df_primers_dups = pd.read_excel(self.excel_file, header=0, parse_cols='A:M, O:X', skiprows=2,
names=['Gene', 'Exon', 'Direction', 'Version', 'Primer_seq', 'Chrom', 'M13_tag',
'Batch', 'project', 'Order_date', 'Frag_size', 'anneal_temp', 'Other',
'snp_check', 'no_snps', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2',
'action_to_take', 'check_by'],
sheetname=sheetname, index_col=None)
to_drop = ['Version', 'M13_tag', 'Batch', 'project', 'Order_date', 'Frag_size', 'anneal_temp', 'Other',
'snp_check', 'no_snps', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2', 'action_to_take',
'check_by']
df_primers_dups = df_primers_dups.where((pd.notnull(df_primers_dups)), None) # easier to work with than NaN
df_primers = df_primers_dups.drop(to_drop, axis=1)
df_primers = df_primers.drop_duplicates(subset=('Gene', 'Exon', 'Direction', 'Chrom'))
df_primers = df_primers.reset_index(drop=True)
return df_primers_dups, df_primers
def | (self, csv):
"""Runs virtual PCR on a CSV file using the isPcr and pslToBed tools installed from UCSC.
:param csv: a csv file is need as an input with format "name, forward, reverse".
:return bedfile: with results of virtual PCR if there is a match.
"""
print "Running virtual PCR..."
chromosomes = ['chr1.2bit', 'chr11.2bit', 'chr12.2bit', 'chrX.2bit', 'chr13.2bit', 'chr14.2bit', 'chr15.2bit',
'chr16.2bit', 'chr17.2bit', 'chr18.2bit', 'chr19.2bit', 'chr20.2bit', 'chr21.2bit', 'chr22.2bit',
'chr2.2bit', 'chr3.2bit', 'chr4.2bit', 'chr5.2bit', 'chr6.2bit', 'chr7.2bit', 'chr8.2bit',
'chr9.2bit', 'chr10.2bit', 'chrY.2bit']
for chr in chromosomes:
os.system(
"/opt/kentools/isPcr -out=psl /media/genomicdata/ucsc_hg19_by_chr/2bit_chr/%s \
%s %s.tmp.psl" % (chr, csv, chr[:-5]))
pslfile = "%s.tmp.psl" % chr[:-5]
bedfile = "%s.tmp.bed" % chr[:-5]
# Only converts a non-empty psl file to a bed file, and removes all psl files in folder.
if os.path.getsize(pslfile) != 0:
os.system("/opt/kentools/pslToBed %s %s" % (pslfile, bedfile))
os.system("rm %s" % pslfile)
return bedfile
else:
os.system("rm %s" % pslfile)
def get_coords(self, df_primers):
"""Generates csv file for virtual PCR and imports results into a pandas data frame.
:param df_primers: data frame of primer data.
:return df_coords: data frame with chromosome, start and end coordinates, and a name
(format "Gene_ExonDirection") for each primer.
"""
primer_list = []
names_dup = []
names = []
exons = []
dirs = []
start_coords = []
end_coords = []
chroms = []
seq_position = 0
list_position = 0
primer_seqs = pd.DataFrame([])
csv = '%s.csv' % self.excel_file[:-5]
csv = csv.replace(" ", "")
# (1) Gets sequences, exons and directions, splits the sequences into F+R and combines into series and then csv.
for row_index, row in df_primers.iterrows():
primer_list.append(str(row['Primer_seq']))
names_dup.append(str(row['Gene']) + '_' + str(row['Exon']) + str(row['Direction']))
exons.append(str(row['Exon']))
dirs.append(str(row['Direction']))
for item in names_dup:
if item not in names:
names.append(item)
forwards = primer_list[::2]
reverses = primer_list[1::2]
while list_position < len(forwards):
ser = pd.Series([names[list_position], forwards[list_position], reverses[list_position]])
primer_seqs = primer_seqs.append(ser, ignore_index=True)
list_position += 1
primer_seqs.to_csv(csv, header=None, index=None, sep='\t')
# (2) Runs virtual PCR on generated csv.
bedfile = self.run_pcr(csv)
tool = BedTool(bedfile)
# (3) Uses results to calculate start and end position of each primer (results give PCR product). Adds to df.
for row in tool:
chroms.append(row.chrom)
start_coords.append(row.start)
end_coords.append(row.start + len(primer_list[seq_position]))
chroms.append(row.chrom)
end_coords.append(row.end)
start_coords.append(row.end - len(primer_list[seq_position + 1]))
seq_position += 1
df_coords = pd.DataFrame([])
df_coords.insert(0, 'chrom', chroms)
df_coords.insert(1, 'start', start_coords)
df_coords.insert(2, 'end', end_coords)
df_coords.insert(3, 'name', names)
# (4) Generates a bed file from df_coords (not currently used in application).
bed = os.path.splitext(bedfile)[0]
df_coords.to_csv('%s.csv' % bed, header=None, index=None, sep='\t') # cannot directly convert to bed.
csv_file = BedTool('%s.csv' % bed)
csv_file.saveas('%s.bed' % bed)
df_coords.insert(4, 'Exon', exons) # not need in bed file so added after.
df_coords.insert(5, 'Direction', dirs)
# Removes unnecessary files and moves BED file into shared folder. (add /tests for unit testing)
os.system("rm /home/cuser/PycharmProjects/django_apps/mysite/%s.csv" % bed)
os.system("mv /home/cuser/PycharmProjects/django_apps/mysite/%s.bed /media/sf_sarah_share/bedfiles" %
bed)
os.system("rm /home/cuser/PycharmProjects/django_apps/mysite/%s" % csv)
return df_coords
def col_to_string(self, row):
"""Converts values in the Exon column into string values which makes merging data frames easier.
:param row: for every row in Exon column.
:return string of value.
"""
return str(row['Exon'])
def combine_coords_primers(self, df_coords, df_primers_dups):
"""Adds primer coordinates to original df_primers_dups data frame.
:param df_primers_dups: data frame with primer data from excel.
:param df_coords: data frame with chrom, start, end, name, exon, direction.
:return df_combined: data frame of merge between df_coords and df_primers_dups.
:return gene_name: this will be added to the Genes table and used to check if already in database.
"""
df_coords['Exon'] = df_coords.apply(self.col_to_string, axis=1)
df_primers_dups['Exon'] = df_primers_dups.apply(self.col_to_string, axis=1)
# Merge based on Exon and Direction columns
df_combined = pd.merge(df_primers_dups, df_coords, how='left', on=['Exon', 'Direction'])
# There is already a Chromosome column in df_primers_dups
cols_to_drop = ['chrom']
df_combined = df_combined.drop(cols_to_drop, axis=1)
gene_name = df_combined.get_value(0, 'Gene')
return df_combined, gene_name
def check_in_db(self, gene):
"""Queries the database to check if data for a particular gene is already present.
:param gene: a gene name to check against the database.
:return result: query result which will be a gene if already in database and None if not.
"""
curs.execute("SELECT Gene FROM Genes WHERE Gene LIKE '%s'" % gene)
result = curs.fetchone()
return result
def to_db(self, df_combined, gene_name):
"""Creates tables and adds data into the database.
Function modifies the given data frame to generate three tables in the database (Primers, SNPs, Genes) and
performs data checks. If data for a particular gene is already in the database, this is overridden and the
previous data is saved to an excel document (archived_files).
The commented out section should only be used for the first file to initially set up the tables.
:param gene_name: gene to check against database.
:param df_combined: data frame to be inserted into database.
:return info: description of action performed (for audit log).
:return archived_filename: filename the previous data is saved under (for audit log).
"""
# (1) Creates database schema
curs.execute("CREATE TABLE IF NOT EXISTS Primers(PrimerId INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, "
"Gene TEXT, Exon TEXT, Direction TEXT, Version INTEGER, Primer_Seq TEXT, Chrom TEXT, M13_Tag TEXT"
", Batch TEXT, Project TEXT, Order_date TEXT, Frag_size INTEGER, Anneal_Temp TEXT, Other TEXT, "
"snp_check INTEGER, no_snps INTEGER, rs TEXT, hgvs TEXT, freq TEXT, ss TEXT, ss_proj TEXT, "
"other2 TEXT, action_to_take TEXT, check_by TEXT, start TEXT, end TEXT, name TEXT)")
curs.execute("CREATE TABLE IF NOT EXISTS SNPs(SNP_Id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, Gene TEXT, "
"Exon TEXT, Direction TEXT, snp_check INTEGER, rs TEXT, hgvs TEXT, freq TEXT, ss TEXT, "
"ss_proj TEXT, other2 TEXT, action_to_take TEXT, check_by TEXT, name TEXT)")
curs.execute("CREATE TABLE IF NOT EXISTS Genes(Gene_Id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, Gene TEXT)")
# (2) Drops unnecessary columns to make two tables and removes duplicates.
primertable_cols_to_drop = ['snp_check', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2', 'action_to_take',
'check_by']
snptable_cols_to_drop = ['Exon', 'Direction', 'Version', 'Primer_seq', 'Chrom', 'M13_tag', 'Batch', 'project',
'Order_date', 'Frag_size', 'anneal_temp', 'Other', 'no_snps', 'start', 'end']
df_primertable = df_combined.drop(primertable_cols_to_drop, axis=1)
df_primertable = df_primertable.drop_duplicates(subset=('Gene', 'Exon', 'Direction', 'Chrom'))
df_snptable = df_combined.drop(snptable_cols_to_drop, axis=1)
# (3) Performs data checks using CheckPrimers and CheckSNPs classes.
check = CheckPrimers(df_primertable, df_snptable)
total_errors, error_details = check.check_all()
# (4) Checks if gene data already in database.
uni_gene = '(u\'%s\',)' % gene_name
gene = self.check_in_db(gene_name) # this outputs a unicode string
# (5) Adds to database if no errors. Overrides data if already present.
archived_filename = None
if total_errors == 0:
if str(uni_gene) == str(gene):
# Add query to data frame then save to excel.
get_old_query = "SELECT p.Gene, p.Exon, p.Direction, p.Version, p.Primer_seq, p.Chrom, p.M13_Tag, " \
"p.Batch, p.Project, p.Order_date, p.Frag_size, p.Anneal_Temp, p.Other, s.snp_check, " \
"p.no_snps, s.rs, s.hgvs, s.freq, s.ss, s.ss_proj, s.other2, s.action_to_take, " \
"s.check_by FROM SNPs s LEFT JOIN Primers p ON s.name = p.name WHERE p.Gene='%s'" % \
gene_name
today_date = datetime.datetime.now().strftime("%d-%m-%Y_%H%M")
df_sql = pd.read_sql_query(get_old_query, con=con)
archived_filename = '%s_%s' % (gene_name, today_date)
writer = ExcelWriter('%s.xlsx' % archived_filename)
df_sql.to_excel(writer, '%s' % today_date, index=False)
writer.save()
os.system("mv /home/cuser/PycharmProjects/django_apps/mysite/%s.xlsx "
"/home/cuser/PycharmProjects/django_apps/mysite/primerdb/archived_files/" % archived_filename)
curs.execute("DELETE FROM Primers WHERE Gene='%s'" % gene_name)
curs.execute("DELETE FROM Genes WHERE Gene='%s'" % gene_name)
curs.execute("DELETE FROM SNPs WHERE Gene='%s'" % gene_name)
info = "Data updated."
else:
info = "New gene added."
# Insert new data into SQL tables.
curs.execute("INSERT INTO Genes (Gene) VALUES (?)", (gene_name,))
df_primertable.to_sql('Primers', con, if_exists='append', index=False)
df_snptable.to_sql('SNPs', con, if_exists='append', index=False)
print "Primers successfully added to database."
else:
info = error_details
con.commit()
return info, archived_filename
def all(self):
"""Combines all methods"""
sheetname = self.get_sheet_name()
df_primers_dups, df_primers = self.get_primers(sheetname)
df_coords = self.get_coords(df_primers)
df_combined, gene = self.combine_coords_primers(df_coords, df_primers_dups)
info, archived_filename = self.to_db(df_combined, gene)
return info, archived_filename
| run_pcr | identifier_name |
getprimers.py | import pandas as pd
import re
import sqlite3 as lite
import os
from pybedtools import BedTool
import django
from checkprimers import CheckPrimers
from pandas import ExcelWriter
import datetime
os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'
django.setup()
class GetPrimers(object):
"""Extracts data from excel spread sheet and imports it into a sqlite database.
:param excel_file: excel file to be imported.
:param db: database the excel file should be imported into.
"""
def __init__(self, excel_file, db):
self.excel_file = excel_file
self.db = db
global con, curs
con = lite.connect(self.db) # Creates a database if it doesn't already exist.
curs = con.cursor()
def get_sheet_name(self):
"""Returns the sheetname to be used to import data from."""
xl = pd.ExcelFile(self.excel_file)
sheet_names = xl.sheet_names
for item in sheet_names:
if re.match('(.*)Current primers', item, re.IGNORECASE): # Only extracts most recent primers.
sheet_name = item
return sheet_name
def get_primers(self, sheetname):
"""Extracts primer data from sheet.
Function reads an excel sheet using pandas and stores this in the df_primers_dups data frame (contains
duplicated rows).The df_primers data frame will go on to be used in the virtual PCR so irrelevant columns
are dropped and any duplicate rows are removed.
:param sheetname: sheet data to be extracted from
:return df_primers_dups: data frame containing extracted data which may include duplicates.
:return df_primers: data frame containing only data necessary to get genome coordinates.
"""
df_primers_dups = pd.read_excel(self.excel_file, header=0, parse_cols='A:M, O:X', skiprows=2,
names=['Gene', 'Exon', 'Direction', 'Version', 'Primer_seq', 'Chrom', 'M13_tag',
'Batch', 'project', 'Order_date', 'Frag_size', 'anneal_temp', 'Other',
'snp_check', 'no_snps', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2',
'action_to_take', 'check_by'],
sheetname=sheetname, index_col=None)
to_drop = ['Version', 'M13_tag', 'Batch', 'project', 'Order_date', 'Frag_size', 'anneal_temp', 'Other',
'snp_check', 'no_snps', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2', 'action_to_take',
'check_by']
df_primers_dups = df_primers_dups.where((pd.notnull(df_primers_dups)), None) # easier to work with than NaN
df_primers = df_primers_dups.drop(to_drop, axis=1)
df_primers = df_primers.drop_duplicates(subset=('Gene', 'Exon', 'Direction', 'Chrom'))
df_primers = df_primers.reset_index(drop=True)
return df_primers_dups, df_primers
def run_pcr(self, csv):
"""Runs virtual PCR on a CSV file using the isPcr and pslToBed tools installed from UCSC.
:param csv: a csv file is need as an input with format "name, forward, reverse".
:return bedfile: with results of virtual PCR if there is a match.
"""
print "Running virtual PCR..."
chromosomes = ['chr1.2bit', 'chr11.2bit', 'chr12.2bit', 'chrX.2bit', 'chr13.2bit', 'chr14.2bit', 'chr15.2bit',
'chr16.2bit', 'chr17.2bit', 'chr18.2bit', 'chr19.2bit', 'chr20.2bit', 'chr21.2bit', 'chr22.2bit',
'chr2.2bit', 'chr3.2bit', 'chr4.2bit', 'chr5.2bit', 'chr6.2bit', 'chr7.2bit', 'chr8.2bit',
'chr9.2bit', 'chr10.2bit', 'chrY.2bit']
for chr in chromosomes:
os.system(
"/opt/kentools/isPcr -out=psl /media/genomicdata/ucsc_hg19_by_chr/2bit_chr/%s \
%s %s.tmp.psl" % (chr, csv, chr[:-5]))
pslfile = "%s.tmp.psl" % chr[:-5]
bedfile = "%s.tmp.bed" % chr[:-5]
# Only converts a non-empty psl file to a bed file, and removes all psl files in folder.
if os.path.getsize(pslfile) != 0:
os.system("/opt/kentools/pslToBed %s %s" % (pslfile, bedfile))
os.system("rm %s" % pslfile)
return bedfile
else:
os.system("rm %s" % pslfile)
def get_coords(self, df_primers):
"""Generates csv file for virtual PCR and imports results into a pandas data frame.
:param df_primers: data frame of primer data.
:return df_coords: data frame with chromosome, start and end coordinates, and a name
(format "Gene_ExonDirection") for each primer.
"""
primer_list = []
names_dup = []
names = []
exons = []
dirs = []
start_coords = []
end_coords = []
chroms = []
seq_position = 0
list_position = 0
primer_seqs = pd.DataFrame([])
csv = '%s.csv' % self.excel_file[:-5]
csv = csv.replace(" ", "")
# (1) Gets sequences, exons and directions, splits the sequences into F+R and combines into series and then csv.
for row_index, row in df_primers.iterrows():
primer_list.append(str(row['Primer_seq']))
names_dup.append(str(row['Gene']) + '_' + str(row['Exon']) + str(row['Direction']))
exons.append(str(row['Exon']))
dirs.append(str(row['Direction']))
for item in names_dup:
if item not in names:
|
forwards = primer_list[::2]
reverses = primer_list[1::2]
while list_position < len(forwards):
ser = pd.Series([names[list_position], forwards[list_position], reverses[list_position]])
primer_seqs = primer_seqs.append(ser, ignore_index=True)
list_position += 1
primer_seqs.to_csv(csv, header=None, index=None, sep='\t')
# (2) Runs virtual PCR on generated csv.
bedfile = self.run_pcr(csv)
tool = BedTool(bedfile)
# (3) Uses results to calculate start and end position of each primer (results give PCR product). Adds to df.
for row in tool:
chroms.append(row.chrom)
start_coords.append(row.start)
end_coords.append(row.start + len(primer_list[seq_position]))
chroms.append(row.chrom)
end_coords.append(row.end)
start_coords.append(row.end - len(primer_list[seq_position + 1]))
seq_position += 1
df_coords = pd.DataFrame([])
df_coords.insert(0, 'chrom', chroms)
df_coords.insert(1, 'start', start_coords)
df_coords.insert(2, 'end', end_coords)
df_coords.insert(3, 'name', names)
# (4) Generates a bed file from df_coords (not currently used in application).
bed = os.path.splitext(bedfile)[0]
df_coords.to_csv('%s.csv' % bed, header=None, index=None, sep='\t') # cannot directly convert to bed.
csv_file = BedTool('%s.csv' % bed)
csv_file.saveas('%s.bed' % bed)
df_coords.insert(4, 'Exon', exons) # not need in bed file so added after.
df_coords.insert(5, 'Direction', dirs)
# Removes unnecessary files and moves BED file into shared folder. (add /tests for unit testing)
os.system("rm /home/cuser/PycharmProjects/django_apps/mysite/%s.csv" % bed)
os.system("mv /home/cuser/PycharmProjects/django_apps/mysite/%s.bed /media/sf_sarah_share/bedfiles" %
bed)
os.system("rm /home/cuser/PycharmProjects/django_apps/mysite/%s" % csv)
return df_coords
def col_to_string(self, row):
"""Converts values in the Exon column into string values which makes merging data frames easier.
:param row: for every row in Exon column.
:return string of value.
"""
return str(row['Exon'])
def combine_coords_primers(self, df_coords, df_primers_dups):
"""Adds primer coordinates to original df_primers_dups data frame.
:param df_primers_dups: data frame with primer data from excel.
:param df_coords: data frame with chrom, start, end, name, exon, direction.
:return df_combined: data frame of merge between df_coords and df_primers_dups.
:return gene_name: this will be added to the Genes table and used to check if already in database.
"""
df_coords['Exon'] = df_coords.apply(self.col_to_string, axis=1)
df_primers_dups['Exon'] = df_primers_dups.apply(self.col_to_string, axis=1)
# Merge based on Exon and Direction columns
df_combined = pd.merge(df_primers_dups, df_coords, how='left', on=['Exon', 'Direction'])
# There is already a Chromosome column in df_primers_dups
cols_to_drop = ['chrom']
df_combined = df_combined.drop(cols_to_drop, axis=1)
gene_name = df_combined.get_value(0, 'Gene')
return df_combined, gene_name
def check_in_db(self, gene):
"""Queries the database to check if data for a particular gene is already present.
:param gene: a gene name to check against the database.
:return result: query result which will be a gene if already in database and None if not.
"""
curs.execute("SELECT Gene FROM Genes WHERE Gene LIKE '%s'" % gene)
result = curs.fetchone()
return result
def to_db(self, df_combined, gene_name):
"""Creates tables and adds data into the database.
Function modifies the given data frame to generate three tables in the database (Primers, SNPs, Genes) and
performs data checks. If data for a particular gene is already in the database, this is overridden and the
previous data is saved to an excel document (archived_files).
The commented out section should only be used for the first file to initially set up the tables.
:param gene_name: gene to check against database.
:param df_combined: data frame to be inserted into database.
:return info: description of action performed (for audit log).
:return archived_filename: filename the previous data is saved under (for audit log).
"""
# (1) Creates database schema
curs.execute("CREATE TABLE IF NOT EXISTS Primers(PrimerId INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, "
"Gene TEXT, Exon TEXT, Direction TEXT, Version INTEGER, Primer_Seq TEXT, Chrom TEXT, M13_Tag TEXT"
", Batch TEXT, Project TEXT, Order_date TEXT, Frag_size INTEGER, Anneal_Temp TEXT, Other TEXT, "
"snp_check INTEGER, no_snps INTEGER, rs TEXT, hgvs TEXT, freq TEXT, ss TEXT, ss_proj TEXT, "
"other2 TEXT, action_to_take TEXT, check_by TEXT, start TEXT, end TEXT, name TEXT)")
curs.execute("CREATE TABLE IF NOT EXISTS SNPs(SNP_Id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, Gene TEXT, "
"Exon TEXT, Direction TEXT, snp_check INTEGER, rs TEXT, hgvs TEXT, freq TEXT, ss TEXT, "
"ss_proj TEXT, other2 TEXT, action_to_take TEXT, check_by TEXT, name TEXT)")
curs.execute("CREATE TABLE IF NOT EXISTS Genes(Gene_Id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, Gene TEXT)")
# (2) Drops unnecessary columns to make two tables and removes duplicates.
primertable_cols_to_drop = ['snp_check', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2', 'action_to_take',
'check_by']
snptable_cols_to_drop = ['Exon', 'Direction', 'Version', 'Primer_seq', 'Chrom', 'M13_tag', 'Batch', 'project',
'Order_date', 'Frag_size', 'anneal_temp', 'Other', 'no_snps', 'start', 'end']
df_primertable = df_combined.drop(primertable_cols_to_drop, axis=1)
df_primertable = df_primertable.drop_duplicates(subset=('Gene', 'Exon', 'Direction', 'Chrom'))
df_snptable = df_combined.drop(snptable_cols_to_drop, axis=1)
# (3) Performs data checks using CheckPrimers and CheckSNPs classes.
check = CheckPrimers(df_primertable, df_snptable)
total_errors, error_details = check.check_all()
# (4) Checks if gene data already in database.
uni_gene = '(u\'%s\',)' % gene_name
gene = self.check_in_db(gene_name) # this outputs a unicode string
# (5) Adds to database if no errors. Overrides data if already present.
archived_filename = None
if total_errors == 0:
if str(uni_gene) == str(gene):
# Add query to data frame then save to excel.
get_old_query = "SELECT p.Gene, p.Exon, p.Direction, p.Version, p.Primer_seq, p.Chrom, p.M13_Tag, " \
"p.Batch, p.Project, p.Order_date, p.Frag_size, p.Anneal_Temp, p.Other, s.snp_check, " \
"p.no_snps, s.rs, s.hgvs, s.freq, s.ss, s.ss_proj, s.other2, s.action_to_take, " \
"s.check_by FROM SNPs s LEFT JOIN Primers p ON s.name = p.name WHERE p.Gene='%s'" % \
gene_name
today_date = datetime.datetime.now().strftime("%d-%m-%Y_%H%M")
df_sql = pd.read_sql_query(get_old_query, con=con)
archived_filename = '%s_%s' % (gene_name, today_date)
writer = ExcelWriter('%s.xlsx' % archived_filename)
df_sql.to_excel(writer, '%s' % today_date, index=False)
writer.save()
os.system("mv /home/cuser/PycharmProjects/django_apps/mysite/%s.xlsx "
"/home/cuser/PycharmProjects/django_apps/mysite/primerdb/archived_files/" % archived_filename)
curs.execute("DELETE FROM Primers WHERE Gene='%s'" % gene_name)
curs.execute("DELETE FROM Genes WHERE Gene='%s'" % gene_name)
curs.execute("DELETE FROM SNPs WHERE Gene='%s'" % gene_name)
info = "Data updated."
else:
info = "New gene added."
# Insert new data into SQL tables.
curs.execute("INSERT INTO Genes (Gene) VALUES (?)", (gene_name,))
df_primertable.to_sql('Primers', con, if_exists='append', index=False)
df_snptable.to_sql('SNPs', con, if_exists='append', index=False)
print "Primers successfully added to database."
else:
info = error_details
con.commit()
return info, archived_filename
def all(self):
"""Combines all methods"""
sheetname = self.get_sheet_name()
df_primers_dups, df_primers = self.get_primers(sheetname)
df_coords = self.get_coords(df_primers)
df_combined, gene = self.combine_coords_primers(df_coords, df_primers_dups)
info, archived_filename = self.to_db(df_combined, gene)
return info, archived_filename
| names.append(item) | conditional_block |
getprimers.py | import pandas as pd
import re
import sqlite3 as lite
import os
from pybedtools import BedTool
import django
from checkprimers import CheckPrimers
from pandas import ExcelWriter
import datetime
os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'
django.setup()
class GetPrimers(object):
"""Extracts data from excel spread sheet and imports it into a sqlite database.
:param excel_file: excel file to be imported.
:param db: database the excel file should be imported into.
"""
def __init__(self, excel_file, db):
self.excel_file = excel_file
self.db = db
global con, curs
con = lite.connect(self.db) # Creates a database if it doesn't already exist.
curs = con.cursor()
def get_sheet_name(self):
"""Returns the sheetname to be used to import data from."""
xl = pd.ExcelFile(self.excel_file)
sheet_names = xl.sheet_names
for item in sheet_names:
if re.match('(.*)Current primers', item, re.IGNORECASE): # Only extracts most recent primers.
sheet_name = item
return sheet_name
def get_primers(self, sheetname):
"""Extracts primer data from sheet.
Function reads an excel sheet using pandas and stores this in the df_primers_dups data frame (contains
duplicated rows).The df_primers data frame will go on to be used in the virtual PCR so irrelevant columns
are dropped and any duplicate rows are removed.
:param sheetname: sheet data to be extracted from
:return df_primers_dups: data frame containing extracted data which may include duplicates.
:return df_primers: data frame containing only data necessary to get genome coordinates.
"""
df_primers_dups = pd.read_excel(self.excel_file, header=0, parse_cols='A:M, O:X', skiprows=2,
names=['Gene', 'Exon', 'Direction', 'Version', 'Primer_seq', 'Chrom', 'M13_tag',
'Batch', 'project', 'Order_date', 'Frag_size', 'anneal_temp', 'Other',
'snp_check', 'no_snps', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2',
'action_to_take', 'check_by'],
sheetname=sheetname, index_col=None)
to_drop = ['Version', 'M13_tag', 'Batch', 'project', 'Order_date', 'Frag_size', 'anneal_temp', 'Other',
'snp_check', 'no_snps', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2', 'action_to_take',
'check_by']
df_primers_dups = df_primers_dups.where((pd.notnull(df_primers_dups)), None) # easier to work with than NaN
df_primers = df_primers_dups.drop(to_drop, axis=1)
df_primers = df_primers.drop_duplicates(subset=('Gene', 'Exon', 'Direction', 'Chrom'))
df_primers = df_primers.reset_index(drop=True)
return df_primers_dups, df_primers
def run_pcr(self, csv):
"""Runs virtual PCR on a CSV file using the isPcr and pslToBed tools installed from UCSC.
:param csv: a csv file is need as an input with format "name, forward, reverse".
:return bedfile: with results of virtual PCR if there is a match.
"""
print "Running virtual PCR..."
chromosomes = ['chr1.2bit', 'chr11.2bit', 'chr12.2bit', 'chrX.2bit', 'chr13.2bit', 'chr14.2bit', 'chr15.2bit',
'chr16.2bit', 'chr17.2bit', 'chr18.2bit', 'chr19.2bit', 'chr20.2bit', 'chr21.2bit', 'chr22.2bit',
'chr2.2bit', 'chr3.2bit', 'chr4.2bit', 'chr5.2bit', 'chr6.2bit', 'chr7.2bit', 'chr8.2bit',
'chr9.2bit', 'chr10.2bit', 'chrY.2bit']
for chr in chromosomes:
os.system(
"/opt/kentools/isPcr -out=psl /media/genomicdata/ucsc_hg19_by_chr/2bit_chr/%s \
%s %s.tmp.psl" % (chr, csv, chr[:-5]))
pslfile = "%s.tmp.psl" % chr[:-5]
bedfile = "%s.tmp.bed" % chr[:-5]
# Only converts a non-empty psl file to a bed file, and removes all psl files in folder.
if os.path.getsize(pslfile) != 0:
os.system("/opt/kentools/pslToBed %s %s" % (pslfile, bedfile))
os.system("rm %s" % pslfile)
return bedfile
else:
os.system("rm %s" % pslfile)
def get_coords(self, df_primers):
"""Generates csv file for virtual PCR and imports results into a pandas data frame.
:param df_primers: data frame of primer data.
:return df_coords: data frame with chromosome, start and end coordinates, and a name
(format "Gene_ExonDirection") for each primer.
"""
primer_list = []
names_dup = []
names = []
exons = []
dirs = []
start_coords = []
end_coords = []
chroms = []
seq_position = 0
list_position = 0
primer_seqs = pd.DataFrame([])
csv = '%s.csv' % self.excel_file[:-5]
csv = csv.replace(" ", "")
# (1) Gets sequences, exons and directions, splits the sequences into F+R and combines into series and then csv.
for row_index, row in df_primers.iterrows():
primer_list.append(str(row['Primer_seq']))
names_dup.append(str(row['Gene']) + '_' + str(row['Exon']) + str(row['Direction']))
exons.append(str(row['Exon']))
dirs.append(str(row['Direction']))
for item in names_dup:
if item not in names:
names.append(item)
forwards = primer_list[::2]
reverses = primer_list[1::2]
while list_position < len(forwards):
ser = pd.Series([names[list_position], forwards[list_position], reverses[list_position]])
primer_seqs = primer_seqs.append(ser, ignore_index=True)
list_position += 1
primer_seqs.to_csv(csv, header=None, index=None, sep='\t')
# (2) Runs virtual PCR on generated csv.
bedfile = self.run_pcr(csv)
tool = BedTool(bedfile)
# (3) Uses results to calculate start and end position of each primer (results give PCR product). Adds to df.
for row in tool:
chroms.append(row.chrom)
start_coords.append(row.start)
end_coords.append(row.start + len(primer_list[seq_position]))
chroms.append(row.chrom)
end_coords.append(row.end)
start_coords.append(row.end - len(primer_list[seq_position + 1]))
seq_position += 1
df_coords = pd.DataFrame([])
df_coords.insert(0, 'chrom', chroms)
df_coords.insert(1, 'start', start_coords)
df_coords.insert(2, 'end', end_coords)
df_coords.insert(3, 'name', names)
# (4) Generates a bed file from df_coords (not currently used in application).
bed = os.path.splitext(bedfile)[0]
df_coords.to_csv('%s.csv' % bed, header=None, index=None, sep='\t') # cannot directly convert to bed.
csv_file = BedTool('%s.csv' % bed)
csv_file.saveas('%s.bed' % bed)
df_coords.insert(4, 'Exon', exons) # not need in bed file so added after.
df_coords.insert(5, 'Direction', dirs)
# Removes unnecessary files and moves BED file into shared folder. (add /tests for unit testing)
os.system("rm /home/cuser/PycharmProjects/django_apps/mysite/%s.csv" % bed)
os.system("mv /home/cuser/PycharmProjects/django_apps/mysite/%s.bed /media/sf_sarah_share/bedfiles" %
bed)
os.system("rm /home/cuser/PycharmProjects/django_apps/mysite/%s" % csv)
return df_coords
def col_to_string(self, row):
"""Converts values in the Exon column into string values which makes merging data frames easier.
:param row: for every row in Exon column.
:return string of value.
"""
return str(row['Exon'])
def combine_coords_primers(self, df_coords, df_primers_dups):
"""Adds primer coordinates to original df_primers_dups data frame.
:param df_primers_dups: data frame with primer data from excel.
:param df_coords: data frame with chrom, start, end, name, exon, direction.
:return df_combined: data frame of merge between df_coords and df_primers_dups.
:return gene_name: this will be added to the Genes table and used to check if already in database.
"""
df_coords['Exon'] = df_coords.apply(self.col_to_string, axis=1)
df_primers_dups['Exon'] = df_primers_dups.apply(self.col_to_string, axis=1)
# Merge based on Exon and Direction columns
df_combined = pd.merge(df_primers_dups, df_coords, how='left', on=['Exon', 'Direction'])
# There is already a Chromosome column in df_primers_dups
cols_to_drop = ['chrom']
df_combined = df_combined.drop(cols_to_drop, axis=1)
gene_name = df_combined.get_value(0, 'Gene')
return df_combined, gene_name
def check_in_db(self, gene):
"""Queries the database to check if data for a particular gene is already present.
:param gene: a gene name to check against the database.
:return result: query result which will be a gene if already in database and None if not.
"""
curs.execute("SELECT Gene FROM Genes WHERE Gene LIKE '%s'" % gene)
result = curs.fetchone()
return result
def to_db(self, df_combined, gene_name):
|
def all(self):
"""Combines all methods"""
sheetname = self.get_sheet_name()
df_primers_dups, df_primers = self.get_primers(sheetname)
df_coords = self.get_coords(df_primers)
df_combined, gene = self.combine_coords_primers(df_coords, df_primers_dups)
info, archived_filename = self.to_db(df_combined, gene)
return info, archived_filename
| """Creates tables and adds data into the database.
Function modifies the given data frame to generate three tables in the database (Primers, SNPs, Genes) and
performs data checks. If data for a particular gene is already in the database, this is overridden and the
previous data is saved to an excel document (archived_files).
The commented out section should only be used for the first file to initially set up the tables.
:param gene_name: gene to check against database.
:param df_combined: data frame to be inserted into database.
:return info: description of action performed (for audit log).
:return archived_filename: filename the previous data is saved under (for audit log).
"""
# (1) Creates database schema
curs.execute("CREATE TABLE IF NOT EXISTS Primers(PrimerId INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, "
"Gene TEXT, Exon TEXT, Direction TEXT, Version INTEGER, Primer_Seq TEXT, Chrom TEXT, M13_Tag TEXT"
", Batch TEXT, Project TEXT, Order_date TEXT, Frag_size INTEGER, Anneal_Temp TEXT, Other TEXT, "
"snp_check INTEGER, no_snps INTEGER, rs TEXT, hgvs TEXT, freq TEXT, ss TEXT, ss_proj TEXT, "
"other2 TEXT, action_to_take TEXT, check_by TEXT, start TEXT, end TEXT, name TEXT)")
curs.execute("CREATE TABLE IF NOT EXISTS SNPs(SNP_Id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, Gene TEXT, "
"Exon TEXT, Direction TEXT, snp_check INTEGER, rs TEXT, hgvs TEXT, freq TEXT, ss TEXT, "
"ss_proj TEXT, other2 TEXT, action_to_take TEXT, check_by TEXT, name TEXT)")
curs.execute("CREATE TABLE IF NOT EXISTS Genes(Gene_Id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, Gene TEXT)")
# (2) Drops unnecessary columns to make two tables and removes duplicates.
primertable_cols_to_drop = ['snp_check', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2', 'action_to_take',
'check_by']
snptable_cols_to_drop = ['Exon', 'Direction', 'Version', 'Primer_seq', 'Chrom', 'M13_tag', 'Batch', 'project',
'Order_date', 'Frag_size', 'anneal_temp', 'Other', 'no_snps', 'start', 'end']
df_primertable = df_combined.drop(primertable_cols_to_drop, axis=1)
df_primertable = df_primertable.drop_duplicates(subset=('Gene', 'Exon', 'Direction', 'Chrom'))
df_snptable = df_combined.drop(snptable_cols_to_drop, axis=1)
# (3) Performs data checks using CheckPrimers and CheckSNPs classes.
check = CheckPrimers(df_primertable, df_snptable)
total_errors, error_details = check.check_all()
# (4) Checks if gene data already in database.
uni_gene = '(u\'%s\',)' % gene_name
gene = self.check_in_db(gene_name) # this outputs a unicode string
# (5) Adds to database if no errors. Overrides data if already present.
archived_filename = None
if total_errors == 0:
if str(uni_gene) == str(gene):
# Add query to data frame then save to excel.
get_old_query = "SELECT p.Gene, p.Exon, p.Direction, p.Version, p.Primer_seq, p.Chrom, p.M13_Tag, " \
"p.Batch, p.Project, p.Order_date, p.Frag_size, p.Anneal_Temp, p.Other, s.snp_check, " \
"p.no_snps, s.rs, s.hgvs, s.freq, s.ss, s.ss_proj, s.other2, s.action_to_take, " \
"s.check_by FROM SNPs s LEFT JOIN Primers p ON s.name = p.name WHERE p.Gene='%s'" % \
gene_name
today_date = datetime.datetime.now().strftime("%d-%m-%Y_%H%M")
df_sql = pd.read_sql_query(get_old_query, con=con)
archived_filename = '%s_%s' % (gene_name, today_date)
writer = ExcelWriter('%s.xlsx' % archived_filename)
df_sql.to_excel(writer, '%s' % today_date, index=False)
writer.save()
os.system("mv /home/cuser/PycharmProjects/django_apps/mysite/%s.xlsx "
"/home/cuser/PycharmProjects/django_apps/mysite/primerdb/archived_files/" % archived_filename)
curs.execute("DELETE FROM Primers WHERE Gene='%s'" % gene_name)
curs.execute("DELETE FROM Genes WHERE Gene='%s'" % gene_name)
curs.execute("DELETE FROM SNPs WHERE Gene='%s'" % gene_name)
info = "Data updated."
else:
info = "New gene added."
# Insert new data into SQL tables.
curs.execute("INSERT INTO Genes (Gene) VALUES (?)", (gene_name,))
df_primertable.to_sql('Primers', con, if_exists='append', index=False)
df_snptable.to_sql('SNPs', con, if_exists='append', index=False)
print "Primers successfully added to database."
else:
info = error_details
con.commit()
return info, archived_filename | identifier_body |
lib.rs | /*
* Copyright 2015-2017 Two Pore Guys, Inc.
* All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted providing that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
extern crate libc;
extern crate block;
use std::fmt;
use std::ffi::{CString, CStr};
use std::collections::hash_map::HashMap;
use std::os::raw::{c_char, c_void};
use std::ptr::{null, null_mut};
use std::mem::transmute;
use std::cell::RefCell;
use std::rc::Weak;
use libc::free;
use block::{Block, ConcreteBlock};
macro_rules! to_cstr {
($e:expr) => (CString::new($e).unwrap())
}
macro_rules! null_block {
() => (transmute::<*mut c_void, _>(null_mut()))
}
#[repr(C)]
#[derive(Debug)]
pub enum RawType
{
Null,
Bool,
Uint64,
Int64,
Double,
Date,
String,
Binary,
Fd,
Dictionary,
Array,
Error,
}
#[repr(C)]
#[derive(Debug)]
pub enum CallStatus
{
InProgress,
MoreAvailable,
Done,
Error,
Aborted,
Ended
}
pub enum RawObject {}
pub enum RawConnection {}
pub enum RawClient {}
pub enum RawCall {}
pub struct Object
{
value: *mut RawObject,
}
pub struct Connection
{
value: *mut RawConnection
}
pub struct Client
{
value: *mut RawClient,
connection: Connection
}
pub struct Call<'a>
{
connection: &'a Connection,
value: *mut RawCall
}
pub struct Instance<'a>
{
connection: &'a Connection,
path: String
}
pub struct Interface<'a>
{
instance: &'a Instance<'a>,
name: String
}
#[derive(Clone, Debug)]
pub enum Value
{
Null,
Bool(bool),
Uint64(u64),
Int64(i64),
Double(f64),
Date(u64),
String(String),
Binary(Vec<u8>),
Array(Vec<Value>),
Dictionary(HashMap<String, Value>),
Object(Object),
Fd(i32),
Error(Error)
}
#[derive(Clone, Debug)]
pub struct |
{
code: u32,
message: String,
stack_trace: Box<Value>,
extra: Box<Value>
}
#[link(name = "rpc")]
extern {
/* rpc/object.h */
pub fn rpc_get_type(value: *mut RawObject) -> RawType;
pub fn rpc_hash(value: *mut RawObject) -> u32;
pub fn rpc_null_create() -> *mut RawObject;
pub fn rpc_bool_create(value: bool) -> *mut RawObject;
pub fn rpc_bool_get_value(value: *mut RawObject) -> bool;
pub fn rpc_uint64_create(value: u64) -> *mut RawObject;
pub fn rpc_uint64_get_value(value: *mut RawObject) -> u64;
pub fn rpc_int64_create(value: i64) -> *mut RawObject;
pub fn rpc_int64_get_value(value: *mut RawObject) -> i64;
pub fn rpc_double_create(value: f64) -> *mut RawObject;
pub fn rpc_double_get_value(value: *mut RawObject) -> f64;
pub fn rpc_date_create(value: u64) -> *mut RawObject;
pub fn rpc_date_get_value(obj: *mut RawObject) -> u64;
pub fn rpc_string_create(value: *const c_char) -> *mut RawObject;
pub fn rpc_string_get_string_ptr(value: *mut RawObject) -> *const c_char;
pub fn rpc_data_create(ptr: *const u8, len: usize, dtor: *const c_void) -> *mut RawObject;
pub fn rpc_array_create() -> *mut RawObject;
pub fn rpc_dictionary_create() -> *mut RawObject;
pub fn rpc_array_append_value(obj: *mut RawObject, value: *mut RawObject);
pub fn rpc_dictionary_set_value(obj: *mut RawObject, key: *const c_char, value: *mut RawObject);
pub fn rpc_fd_create(value: i32) -> *mut RawObject;
pub fn rpc_fd_get_value(obj: *mut RawObject) -> i32;
pub fn rpc_copy_description(value: *mut RawObject) -> *mut c_char;
pub fn rpc_retain(value: *mut RawObject) -> *mut RawObject;
pub fn rpc_release_impl(value: *mut RawObject);
/* rpc/connection.h */
pub fn rpc_connection_call(conn: *mut RawConnection, path: *const c_char,
interface: *const c_char, name: *const c_char,
args: *const RawObject,
callback: &Block<(*mut RawCall,), bool>) -> *mut RawCall;
pub fn rpc_call_status(call: *mut RawCall) -> CallStatus;
pub fn rpc_call_result(call: *mut RawCall) -> *mut RawObject;
pub fn rpc_call_continue(call: *mut RawCall);
pub fn rpc_call_abort(call: *mut RawCall);
pub fn rpc_call_wait(call: *mut RawCall);
/* rpc/client.h */
pub fn rpc_client_create(uri: *const c_char, params: *const RawObject) -> *mut RawClient;
pub fn rpc_client_get_connection(client: *mut RawClient) -> *mut RawConnection;
}
pub trait Create<T> {
fn create(value: T) -> Object;
}
impl Clone for Object {
fn clone(&self) -> Object {
unsafe {
return Object { value: rpc_retain(self.value) }
}
}
}
impl Drop for Object {
fn drop(&mut self) {
unsafe {
rpc_release_impl(self.value)
}
}
}
impl<T> Create<T> for Object where Value: std::convert::From<T> {
fn create(value: T) -> Object {
Object::new(Value::from(value))
}
}
impl From<bool> for Value {
fn from(value: bool) -> Value {
Value::Bool(value)
}
}
impl From<u64> for Value {
fn from(value: u64) -> Value {
Value::Uint64(value)
}
}
impl From<i64> for Value {
fn from(value: i64) -> Value {
Value::Int64(value)
}
}
impl From<f64> for Value {
fn from(value: f64) -> Value {
Value::Double(value)
}
}
impl<'a> From<&'a str> for Value {
fn from(value: &str) -> Value {
Value::String(String::from(value))
}
}
impl From<String> for Value {
fn from(value: String) -> Value {
Value::String(value)
}
}
impl From<Vec<u8>> for Value {
fn from(value: Vec<u8>) -> Value {
Value::Binary(value)
}
}
impl<'a> From<&'a [Value]> for Value {
fn from(value: &[Value]) -> Value {
Value::Array(value.to_vec())
}
}
impl From<Vec<Value>> for Value {
fn from(value: Vec<Value>) -> Value {
Value::Array(value)
}
}
impl<'a> From<HashMap<&'a str, Value>> for Value {
fn from(value: HashMap<&str, Value>) -> Value {
Value::Dictionary(value.iter().map( | ( & k, v) |
(String::from(k), v.clone())
).collect())
}
}
impl From<HashMap<String, Value>> for Value {
fn from(value: HashMap<String, Value>) -> Value {
Value::Dictionary(value)
}
}
impl Object {
pub fn new(value: Value) -> Object {
unsafe {
let obj = match value {
Value::Null => rpc_null_create(),
Value::Bool(val) => rpc_bool_create(val),
Value::Uint64(val) => rpc_uint64_create(val),
Value::Int64(val) => rpc_int64_create(val),
Value::Double(val) => rpc_double_create(val),
Value::Date(val) => rpc_date_create(val),
Value::Fd(val) => rpc_fd_create(val),
Value::Binary(ref val) => rpc_data_create(val.as_ptr(), val.len(), null()),
Value::Object(ref val) => rpc_retain(val.value),
Value::String(ref val) => {
let c_val = to_cstr!(val.as_str());
rpc_string_create(c_val.as_ptr())
},
Value::Array(val) => {
let arr = rpc_array_create();
for i in val {
rpc_array_append_value(arr, Object::new(i).value);
}
arr
},
Value::Dictionary(val) => {
let dict = rpc_dictionary_create();
for (k, v) in val {
let c_key = to_cstr!(k.as_str());
rpc_dictionary_set_value(dict, c_key.as_ptr(), Object::new(v).value);
}
dict
},
Value::Error(val) => {
rpc_null_create()
}
};
return Object { value: obj };
}
}
pub fn get_raw_type(&self) -> RawType {
unsafe {
rpc_get_type(self.value)
}
}
pub fn unpack(&self) -> Value {
unsafe {
match self.get_raw_type() {
RawType::Null => Value::Null,
RawType::Bool => Value::Bool(rpc_bool_get_value(self.value)),
RawType::Uint64 => Value::Uint64(rpc_uint64_get_value(self.value)),
RawType::Int64 => Value::Int64(rpc_int64_get_value(self.value)),
RawType::Double => Value::Double(rpc_double_get_value(self.value)),
RawType::String => Value::String(String::from(CStr::from_ptr(
rpc_string_get_string_ptr(self.value)).to_str().unwrap())),
RawType::Date => Value::Date(rpc_date_get_value(self.value)),
RawType::Binary => Value::Null,
RawType::Fd => Value::Fd(rpc_fd_get_value(self.value)),
RawType::Array => Value::Null,
RawType::Dictionary => Value::Null,
RawType::Error => Value::Null,
}
}
}
}
impl std::hash::Hash for Object {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
}
}
impl fmt::Debug for Object {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
unsafe {
let descr = rpc_copy_description(self.value);
let str = CString::from_raw(descr);
let result = f.write_str(str.to_str().unwrap());
free(descr as *mut libc::c_void);
result
}
}
}
impl<'a> Call<'a> {
pub fn result(&self) -> Option<Value> {
unsafe {
let result = rpc_call_result(self.value);
match result.is_null() {
true => Option::None,
false => Option::Some(Object { value: result }.unpack())
}
}
}
pub fn status(&self) -> CallStatus {
unsafe {
rpc_call_status(self.value)
}
}
pub fn abort(&mut self) {
unsafe {
rpc_call_abort(self.value);
}
}
pub fn resume(&mut self) {
}
pub fn wait(&mut self) {
unsafe {
rpc_call_wait(self.value);
}
}
}
impl Connection {
pub fn call(&self, name: &str, path: &str, interface: &str, args: &[Value]) -> Call {
unsafe {
let c_path = to_cstr!(path);
let c_interface = to_cstr!(interface);
let c_name = to_cstr!(name);
let call = rpc_connection_call(
self.value, c_path.as_ptr(), c_interface.as_ptr(), c_name.as_ptr(),
Object::create(args).value, null_block!()
);
Call { value: call, connection: self }
}
}
pub fn call_sync(&self, name: &str, path: &str, interface: &str,
args: &[Value]) -> Option<Value> {
let mut c = self.call(name, path, interface, args);
c.wait();
c.result()
}
pub fn call_async(&self, name: &str, path: &str, interface: &str, args: &[Value],
callback: Box<Fn(&Call) -> bool>) {
unsafe {
let c_path = to_cstr!(path);
let c_interface = to_cstr!(interface);
let c_name = to_cstr!(name);
let block = ConcreteBlock::new(move |raw_call| {
let call = Call { connection: self, value: raw_call };
callback(&call)
});
rpc_connection_call(
self.value, c_path.as_ptr(), c_interface.as_ptr(), c_name.as_ptr(),
Object::create(args).value, &block
);
}
}
}
impl Client {
pub fn connect(uri: &str) -> Client {
unsafe {
let c_uri = to_cstr!(uri);
let client = rpc_client_create(c_uri.as_ptr(), null());
Client {
value: client,
connection: Connection { value: rpc_client_get_connection(client)}
}
}
}
pub fn connection(&self) -> &Connection {
&self.connection
}
pub fn instance(&self, path: &str) -> Instance {
Instance { connection: &self.connection(), path: String::from(path) }
}
}
impl<'a> Instance<'a> {
pub fn interfaces(&self) -> HashMap<String, Interface> {
self.connection.call_sync(
"get_interfaces",
self.path.as_str(),
"com.twoporeguys.librpc.Introspectable",
&[][..]
).unwrap()
}
pub fn interface(&self, name: &str) -> Interface {
Interface { instance: self, name: String::from(name) }
}
}
impl Interface {
pub fn call(method: &str, args: &[&Value]) -> Call {
}
pub fn call_sync(method: &str, args: &[&Value]) -> Result<Value> {
}
pub fn get(property: &str) -> Result<Value> {
}
pub fn set(property: &str, value: &Value) -> Result<()> {
}
}
| Error | identifier_name |
lib.rs | /*
* Copyright 2015-2017 Two Pore Guys, Inc.
* All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted providing that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
extern crate libc;
extern crate block;
use std::fmt;
use std::ffi::{CString, CStr};
use std::collections::hash_map::HashMap;
use std::os::raw::{c_char, c_void};
use std::ptr::{null, null_mut};
use std::mem::transmute;
use std::cell::RefCell;
use std::rc::Weak;
use libc::free;
use block::{Block, ConcreteBlock};
macro_rules! to_cstr {
($e:expr) => (CString::new($e).unwrap())
}
macro_rules! null_block {
() => (transmute::<*mut c_void, _>(null_mut()))
}
#[repr(C)]
#[derive(Debug)]
pub enum RawType
{
Null,
Bool, | Uint64,
Int64,
Double,
Date,
String,
Binary,
Fd,
Dictionary,
Array,
Error,
}
#[repr(C)]
#[derive(Debug)]
pub enum CallStatus
{
InProgress,
MoreAvailable,
Done,
Error,
Aborted,
Ended
}
pub enum RawObject {}
pub enum RawConnection {}
pub enum RawClient {}
pub enum RawCall {}
pub struct Object
{
value: *mut RawObject,
}
pub struct Connection
{
value: *mut RawConnection
}
pub struct Client
{
value: *mut RawClient,
connection: Connection
}
pub struct Call<'a>
{
connection: &'a Connection,
value: *mut RawCall
}
pub struct Instance<'a>
{
connection: &'a Connection,
path: String
}
pub struct Interface<'a>
{
instance: &'a Instance<'a>,
name: String
}
#[derive(Clone, Debug)]
pub enum Value
{
Null,
Bool(bool),
Uint64(u64),
Int64(i64),
Double(f64),
Date(u64),
String(String),
Binary(Vec<u8>),
Array(Vec<Value>),
Dictionary(HashMap<String, Value>),
Object(Object),
Fd(i32),
Error(Error)
}
#[derive(Clone, Debug)]
pub struct Error
{
code: u32,
message: String,
stack_trace: Box<Value>,
extra: Box<Value>
}
#[link(name = "rpc")]
extern {
/* rpc/object.h */
pub fn rpc_get_type(value: *mut RawObject) -> RawType;
pub fn rpc_hash(value: *mut RawObject) -> u32;
pub fn rpc_null_create() -> *mut RawObject;
pub fn rpc_bool_create(value: bool) -> *mut RawObject;
pub fn rpc_bool_get_value(value: *mut RawObject) -> bool;
pub fn rpc_uint64_create(value: u64) -> *mut RawObject;
pub fn rpc_uint64_get_value(value: *mut RawObject) -> u64;
pub fn rpc_int64_create(value: i64) -> *mut RawObject;
pub fn rpc_int64_get_value(value: *mut RawObject) -> i64;
pub fn rpc_double_create(value: f64) -> *mut RawObject;
pub fn rpc_double_get_value(value: *mut RawObject) -> f64;
pub fn rpc_date_create(value: u64) -> *mut RawObject;
pub fn rpc_date_get_value(obj: *mut RawObject) -> u64;
pub fn rpc_string_create(value: *const c_char) -> *mut RawObject;
pub fn rpc_string_get_string_ptr(value: *mut RawObject) -> *const c_char;
pub fn rpc_data_create(ptr: *const u8, len: usize, dtor: *const c_void) -> *mut RawObject;
pub fn rpc_array_create() -> *mut RawObject;
pub fn rpc_dictionary_create() -> *mut RawObject;
pub fn rpc_array_append_value(obj: *mut RawObject, value: *mut RawObject);
pub fn rpc_dictionary_set_value(obj: *mut RawObject, key: *const c_char, value: *mut RawObject);
pub fn rpc_fd_create(value: i32) -> *mut RawObject;
pub fn rpc_fd_get_value(obj: *mut RawObject) -> i32;
pub fn rpc_copy_description(value: *mut RawObject) -> *mut c_char;
pub fn rpc_retain(value: *mut RawObject) -> *mut RawObject;
pub fn rpc_release_impl(value: *mut RawObject);
/* rpc/connection.h */
pub fn rpc_connection_call(conn: *mut RawConnection, path: *const c_char,
interface: *const c_char, name: *const c_char,
args: *const RawObject,
callback: &Block<(*mut RawCall,), bool>) -> *mut RawCall;
pub fn rpc_call_status(call: *mut RawCall) -> CallStatus;
pub fn rpc_call_result(call: *mut RawCall) -> *mut RawObject;
pub fn rpc_call_continue(call: *mut RawCall);
pub fn rpc_call_abort(call: *mut RawCall);
pub fn rpc_call_wait(call: *mut RawCall);
/* rpc/client.h */
pub fn rpc_client_create(uri: *const c_char, params: *const RawObject) -> *mut RawClient;
pub fn rpc_client_get_connection(client: *mut RawClient) -> *mut RawConnection;
}
pub trait Create<T> {
fn create(value: T) -> Object;
}
impl Clone for Object {
fn clone(&self) -> Object {
unsafe {
return Object { value: rpc_retain(self.value) }
}
}
}
impl Drop for Object {
fn drop(&mut self) {
unsafe {
rpc_release_impl(self.value)
}
}
}
impl<T> Create<T> for Object where Value: std::convert::From<T> {
fn create(value: T) -> Object {
Object::new(Value::from(value))
}
}
impl From<bool> for Value {
fn from(value: bool) -> Value {
Value::Bool(value)
}
}
impl From<u64> for Value {
fn from(value: u64) -> Value {
Value::Uint64(value)
}
}
impl From<i64> for Value {
fn from(value: i64) -> Value {
Value::Int64(value)
}
}
impl From<f64> for Value {
fn from(value: f64) -> Value {
Value::Double(value)
}
}
impl<'a> From<&'a str> for Value {
fn from(value: &str) -> Value {
Value::String(String::from(value))
}
}
impl From<String> for Value {
fn from(value: String) -> Value {
Value::String(value)
}
}
impl From<Vec<u8>> for Value {
fn from(value: Vec<u8>) -> Value {
Value::Binary(value)
}
}
impl<'a> From<&'a [Value]> for Value {
fn from(value: &[Value]) -> Value {
Value::Array(value.to_vec())
}
}
impl From<Vec<Value>> for Value {
fn from(value: Vec<Value>) -> Value {
Value::Array(value)
}
}
impl<'a> From<HashMap<&'a str, Value>> for Value {
fn from(value: HashMap<&str, Value>) -> Value {
Value::Dictionary(value.iter().map( | ( & k, v) |
(String::from(k), v.clone())
).collect())
}
}
impl From<HashMap<String, Value>> for Value {
fn from(value: HashMap<String, Value>) -> Value {
Value::Dictionary(value)
}
}
impl Object {
pub fn new(value: Value) -> Object {
unsafe {
let obj = match value {
Value::Null => rpc_null_create(),
Value::Bool(val) => rpc_bool_create(val),
Value::Uint64(val) => rpc_uint64_create(val),
Value::Int64(val) => rpc_int64_create(val),
Value::Double(val) => rpc_double_create(val),
Value::Date(val) => rpc_date_create(val),
Value::Fd(val) => rpc_fd_create(val),
Value::Binary(ref val) => rpc_data_create(val.as_ptr(), val.len(), null()),
Value::Object(ref val) => rpc_retain(val.value),
Value::String(ref val) => {
let c_val = to_cstr!(val.as_str());
rpc_string_create(c_val.as_ptr())
},
Value::Array(val) => {
let arr = rpc_array_create();
for i in val {
rpc_array_append_value(arr, Object::new(i).value);
}
arr
},
Value::Dictionary(val) => {
let dict = rpc_dictionary_create();
for (k, v) in val {
let c_key = to_cstr!(k.as_str());
rpc_dictionary_set_value(dict, c_key.as_ptr(), Object::new(v).value);
}
dict
},
Value::Error(val) => {
rpc_null_create()
}
};
return Object { value: obj };
}
}
pub fn get_raw_type(&self) -> RawType {
unsafe {
rpc_get_type(self.value)
}
}
pub fn unpack(&self) -> Value {
unsafe {
match self.get_raw_type() {
RawType::Null => Value::Null,
RawType::Bool => Value::Bool(rpc_bool_get_value(self.value)),
RawType::Uint64 => Value::Uint64(rpc_uint64_get_value(self.value)),
RawType::Int64 => Value::Int64(rpc_int64_get_value(self.value)),
RawType::Double => Value::Double(rpc_double_get_value(self.value)),
RawType::String => Value::String(String::from(CStr::from_ptr(
rpc_string_get_string_ptr(self.value)).to_str().unwrap())),
RawType::Date => Value::Date(rpc_date_get_value(self.value)),
RawType::Binary => Value::Null,
RawType::Fd => Value::Fd(rpc_fd_get_value(self.value)),
RawType::Array => Value::Null,
RawType::Dictionary => Value::Null,
RawType::Error => Value::Null,
}
}
}
}
impl std::hash::Hash for Object {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
}
}
impl fmt::Debug for Object {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
unsafe {
let descr = rpc_copy_description(self.value);
let str = CString::from_raw(descr);
let result = f.write_str(str.to_str().unwrap());
free(descr as *mut libc::c_void);
result
}
}
}
impl<'a> Call<'a> {
pub fn result(&self) -> Option<Value> {
unsafe {
let result = rpc_call_result(self.value);
match result.is_null() {
true => Option::None,
false => Option::Some(Object { value: result }.unpack())
}
}
}
pub fn status(&self) -> CallStatus {
unsafe {
rpc_call_status(self.value)
}
}
pub fn abort(&mut self) {
unsafe {
rpc_call_abort(self.value);
}
}
pub fn resume(&mut self) {
}
pub fn wait(&mut self) {
unsafe {
rpc_call_wait(self.value);
}
}
}
impl Connection {
pub fn call(&self, name: &str, path: &str, interface: &str, args: &[Value]) -> Call {
unsafe {
let c_path = to_cstr!(path);
let c_interface = to_cstr!(interface);
let c_name = to_cstr!(name);
let call = rpc_connection_call(
self.value, c_path.as_ptr(), c_interface.as_ptr(), c_name.as_ptr(),
Object::create(args).value, null_block!()
);
Call { value: call, connection: self }
}
}
pub fn call_sync(&self, name: &str, path: &str, interface: &str,
args: &[Value]) -> Option<Value> {
let mut c = self.call(name, path, interface, args);
c.wait();
c.result()
}
pub fn call_async(&self, name: &str, path: &str, interface: &str, args: &[Value],
callback: Box<Fn(&Call) -> bool>) {
unsafe {
let c_path = to_cstr!(path);
let c_interface = to_cstr!(interface);
let c_name = to_cstr!(name);
let block = ConcreteBlock::new(move |raw_call| {
let call = Call { connection: self, value: raw_call };
callback(&call)
});
rpc_connection_call(
self.value, c_path.as_ptr(), c_interface.as_ptr(), c_name.as_ptr(),
Object::create(args).value, &block
);
}
}
}
impl Client {
pub fn connect(uri: &str) -> Client {
unsafe {
let c_uri = to_cstr!(uri);
let client = rpc_client_create(c_uri.as_ptr(), null());
Client {
value: client,
connection: Connection { value: rpc_client_get_connection(client)}
}
}
}
pub fn connection(&self) -> &Connection {
&self.connection
}
pub fn instance(&self, path: &str) -> Instance {
Instance { connection: &self.connection(), path: String::from(path) }
}
}
impl<'a> Instance<'a> {
pub fn interfaces(&self) -> HashMap<String, Interface> {
self.connection.call_sync(
"get_interfaces",
self.path.as_str(),
"com.twoporeguys.librpc.Introspectable",
&[][..]
).unwrap()
}
pub fn interface(&self, name: &str) -> Interface {
Interface { instance: self, name: String::from(name) }
}
}
impl Interface {
pub fn call(method: &str, args: &[&Value]) -> Call {
}
pub fn call_sync(method: &str, args: &[&Value]) -> Result<Value> {
}
pub fn get(property: &str) -> Result<Value> {
}
pub fn set(property: &str, value: &Value) -> Result<()> {
}
} | random_line_split | |
pirep.py | """Pilot Reports (PIREP)
This module attempts to process and store atomic data from PIREPs. These are
encoded products that look like so:
UBUS01 KMSC 221700
EAU UA /OV EAU360030/TM 1715/FL350/TP B737/TB CONT LGT-MOD CHOP =
EHY UA /OV MBW253036 /TM 1729 /FL105 /TP C206 /SK FEW250 /TA M06
/TB NEG /RM SMTH=
Unfortunately, there is not much documentation of this format and the feed of
this data contains a bunch of formatting errors.
"""
from enum import Enum
import datetime
import re
import math
from pydantic import BaseModel
import pyiem.nws.product as product
from pyiem.datatypes import distance
from pyiem.util import html_escape, LOG
OV_LATLON = re.compile(
(
r"\s?(?P<lat>[0-9]{3,4})(?P<latsign>[NS])"
r"\s?(?P<lon>[0-9]{3,5})(?P<lonsign>[EW])"
)
)
OV_LOCDIR = re.compile(
r".*?(?P<loc>[A-Z0-9]{3,4})\s?(?P<dir>[0-9]{3})(?P<dist>[0-9]{3})"
)
OV_TWOLOC = re.compile(
r"(?P<loc1>[A-Z0-9]{3,4})\s?-\s?(?P<loc2>[A-Z0-9]{3,4})"
)
OV_OFFSET = re.compile(
(
r"(?P<dist>[0-9]{1,3})\s?"
"(?P<dir>NORTH|EAST|SOUTH|WEST|N|NNE|NE|ENE|E|ESE|"
r"SE|SSE|S|SSW|SW|WSW|W|WNW|NW|NNW)\s+(OF )?(?P<loc>[A-Z0-9]{3,4})"
)
)
DRCT2DIR = {
"N": 0,
"NNE": 22.5,
"NE": 45,
"ENE": 67.5,
"E": 90,
"ESE": 112.5,
"SE": 135,
"SSE": 157.5,
"S": 180,
"SSW": 202.5,
"SW": 225,
"WSW": 247.5,
"W": 270,
"WNW": 292.5,
"NW": 305,
"NNW": 327.5,
"NORTH": 0,
"EAST": 90,
"SOUTH": 180,
"WEST": 270,
}
class Priority(str, Enum):
"""Types of reports."""
def __str__(self):
"""When we want the str repr."""
return str(self.value)
UA = "UA"
UUA = "UUA"
class | (BaseModel):
""" A Pilot Report. """
base_loc: str = None
text: str = None
priority: Priority = None
latitude: float = None
longitude: float = None
valid: datetime.datetime = None
cwsu: str = None
aircraft_type: str = None
is_duplicate: bool = False
class Pirep(product.TextProduct):
""" Class for parsing and representing Space Wx Products. """
def __init__(
self, text, utcnow=None, ugc_provider=None, nwsli_provider=None
):
""" constructor """
product.TextProduct.__init__(
self,
text,
utcnow=utcnow,
ugc_provider=ugc_provider,
nwsli_provider=nwsli_provider,
)
self.reports = []
self.parse_reports()
def parse_reports(self):
"""Actually do the parsing of the product that generates the reports
stored within the self.reports list"""
txt = (
self.unixtext
if self.unixtext[:2] != "\001\n"
else self.unixtext[2:]
)
lines = txt.split("\n")
# There may be an AWIPSID in line 3 or silly aviation control char
pos = 3 if len(lines[2]) < 10 or lines[2].startswith("\x1e") else 2
meat = "".join(lines[pos:])
for report in meat.split("="):
if report.strip() == "":
continue
res = self.process_pirep(" ".join(report.strip().split()))
if res is not None:
self.reports.append(res)
def process_pirep(self, report):
""" Convert this report text into an actual PIREP object """
_pr = PilotReport()
_pr.text = report
for i, token in enumerate(report.split("/")):
token = token.strip()
# First token is always priority
if i == 0:
if len(token) > 10:
LOG.info("Aborting as not-PIREP? |%s|", report)
return
if token.find(" UUA") > 0:
_pr.priority = Priority.UUA
else:
_pr.priority = Priority.UA
parts = token.split()
if len(parts) == 2:
_pr.base_loc = parts[0]
if len(_pr.base_loc) == 4 and _pr.base_loc[0] == "K":
_pr.base_loc = _pr.base_loc[1:]
continue
# Aircraft Type
if token.startswith("TP "):
_pr.aircraft_type = token[3:]
# Location
if token.startswith("OV "):
dist = 0
bearing = 0
therest = token[3:]
if len(therest) == 3:
loc = therest
elif therest.startswith("FINAL RWY"):
loc = report[:8].split()[0]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
elif len(therest) == 4:
if therest[0] == "K":
loc = therest[1:]
else:
loc = therest
elif re.match(OV_OFFSET, therest):
d = re.match(OV_OFFSET, therest).groupdict()
loc = d["loc"]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
dist = int(d["dist"])
bearing = DRCT2DIR[d["dir"]]
elif re.match(OV_LOCDIR, therest):
# KFAR330008
d = re.match(OV_LOCDIR, therest).groupdict()
loc = d["loc"]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
bearing = int(d["dir"])
dist = int(d["dist"])
elif re.match(OV_LATLON, therest):
# 2500N07000W
# FMH-12 says this is in degrees and minutes!
d = re.match(OV_LATLON, therest).groupdict()
_pr.latitude = float(
"%s.%i"
% (
d["lat"][:-2],
int(float(d["lat"][-2:]) / 60.0 * 10000.0),
)
)
if d["latsign"] == "S":
_pr.latitude = 0 - _pr.latitude
_pr.longitude = float(
"%s.%i"
% (
d["lon"][:-2],
int(float(d["lon"][-2:]) / 60.0 * 10000.0),
)
)
if d["lonsign"] == "W":
_pr.longitude = 0 - _pr.longitude
continue
elif therest == "O":
# Use the first part of the report in this case
loc = report[:3]
elif therest.find("-") > 0 and re.match(OV_TWOLOC, therest):
d = re.match(OV_TWOLOC, therest).groupdict()
numbers = re.findall("[0-9]{6}", therest)
if numbers:
bearing = int(numbers[0][:3])
dist = int(numbers[0][3:])
loc = d["loc2"]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
else:
# Split the distance between the two points
lats = []
lons = []
for loc in [d["loc1"], d["loc2"]]:
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
if loc not in self.nwsli_provider:
self.warnings.append(
f"Unknown location: {loc} '{report}'"
)
return None
lats.append(self.nwsli_provider[loc]["lat"])
lons.append(self.nwsli_provider[loc]["lon"])
_pr.latitude = sum(lats) / 2.0
_pr.longitude = sum(lons) / 2.0
continue
else:
loc = therest[:3]
if loc not in self.nwsli_provider:
if _pr.base_loc is None:
self.warnings.append(
f"Unknown location: {loc} '{report}'"
)
return None
loc = _pr.base_loc
if loc not in self.nwsli_provider:
self.warnings.append(
f"Double-unknown location: {report}"
)
return None
# So we discard the offset when we go back to the base
dist = 0
bearing = 0
_pr.longitude, _pr.latitude = self.compute_loc(
loc, dist, bearing
)
continue
# Time
if token.startswith("TM "):
numbers = re.findall("[0-9]{4}", token)
if len(numbers) != 1:
self.warnings.append("TM parse failed %s" % (report,))
return None
hour = int(numbers[0][:2])
minute = int(numbers[0][2:])
_pr.valid = self.compute_pirep_valid(hour, minute)
continue
return _pr if _pr.latitude is not None else None
def compute_loc(self, loc, dist, bearing):
""" Figure out the lon/lat for this location """
lat = self.nwsli_provider[loc]["lat"]
lon = self.nwsli_provider[loc]["lon"]
# shortcut
if dist == 0:
return lon, lat
meters = distance(float(dist), "MI").value("M")
northing = meters * math.cos(math.radians(bearing)) / 111111.0
easting = (
meters
* math.sin(math.radians(bearing))
/ math.cos(math.radians(lat))
/ 111111.0
)
return lon + easting, lat + northing
def compute_pirep_valid(self, hour, minute):
""" Based on what utcnow is set to, compute when this is valid """
res = self.utcnow.replace(
hour=hour, minute=minute, second=0, microsecond=0
)
if hour > self.utcnow.hour:
res -= datetime.timedelta(hours=24)
return res
def sql(self, txn):
""" Save the reports to the database via the transaction """
for report in self.reports:
if report.is_duplicate:
continue
txn.execute(
"INSERT into pireps(valid, geom, is_urgent, "
"aircraft_type, report) VALUES (%s, "
"ST_GeographyFromText('SRID=4326;POINT(%s %s)'),%s,%s,%s)",
(
report.valid,
report.longitude,
report.latitude,
report.priority == Priority.UUA,
report.aircraft_type,
report.text,
),
)
def assign_cwsu(self, txn):
""" Use this transaction object to assign CWSUs for the pireps """
for report in self.reports:
txn.execute(
"select distinct id from cwsu WHERE "
"st_contains(geom, geomFromEWKT('SRID=4326;POINT(%s %s)'))",
(report.longitude, report.latitude),
)
if txn.rowcount == 0:
# self.warnings.append("Find CWSU failed %.3f %.3f %s" % (
# report.longitude, report.latitude, report.text))
continue
row = txn.fetchone()
report.cwsu = row["id"]
def get_jabbers(self, _uri, _uri2=None):
""" get jabber messages """
res = []
for report in self.reports:
if report.is_duplicate or report.valid is None:
continue
jmsg = {
"priority": "Urgent"
if report.priority == Priority.UUA
else "Routine",
"ts": report.valid.strftime("%H%M"),
"report": html_escape(report.text),
"color": (
"#ff0000" if report.priority == Priority.UUA else "#00ff00"
),
}
plain = "%(priority)s pilot report at %(ts)sZ: %(report)s" % jmsg
html = (
"<span style='color:%(color)s;'>%(priority)s pilot "
"report</span> at %(ts)sZ: %(report)s"
) % jmsg
xtra = {
"channels": (
f"{report.priority}.{report.cwsu},{report.priority}.PIREP"
),
"geometry": "POINT(%s %s)"
% (report.longitude, report.latitude),
"ptype": report.priority,
"category": "PIREP",
"twitter": plain[:140],
"valid": report.valid.strftime("%Y%m%dT%H:%M:00"),
}
res.append([plain, html, xtra])
return res
def parser(buf, utcnow=None, ugc_provider=None, nwsli_provider=None):
""" A parser implementation """
return Pirep(
buf,
utcnow=utcnow,
ugc_provider=ugc_provider,
nwsli_provider=nwsli_provider,
)
| PilotReport | identifier_name |
pirep.py | """Pilot Reports (PIREP)
This module attempts to process and store atomic data from PIREPs. These are
encoded products that look like so:
UBUS01 KMSC 221700
EAU UA /OV EAU360030/TM 1715/FL350/TP B737/TB CONT LGT-MOD CHOP =
EHY UA /OV MBW253036 /TM 1729 /FL105 /TP C206 /SK FEW250 /TA M06
/TB NEG /RM SMTH=
Unfortunately, there is not much documentation of this format and the feed of
this data contains a bunch of formatting errors.
"""
from enum import Enum
import datetime
import re
import math
from pydantic import BaseModel
import pyiem.nws.product as product
from pyiem.datatypes import distance
from pyiem.util import html_escape, LOG
OV_LATLON = re.compile(
(
r"\s?(?P<lat>[0-9]{3,4})(?P<latsign>[NS])"
r"\s?(?P<lon>[0-9]{3,5})(?P<lonsign>[EW])"
)
)
OV_LOCDIR = re.compile(
r".*?(?P<loc>[A-Z0-9]{3,4})\s?(?P<dir>[0-9]{3})(?P<dist>[0-9]{3})"
)
OV_TWOLOC = re.compile(
r"(?P<loc1>[A-Z0-9]{3,4})\s?-\s?(?P<loc2>[A-Z0-9]{3,4})"
)
OV_OFFSET = re.compile(
(
r"(?P<dist>[0-9]{1,3})\s?"
"(?P<dir>NORTH|EAST|SOUTH|WEST|N|NNE|NE|ENE|E|ESE|"
r"SE|SSE|S|SSW|SW|WSW|W|WNW|NW|NNW)\s+(OF )?(?P<loc>[A-Z0-9]{3,4})"
)
)
DRCT2DIR = {
"N": 0,
"NNE": 22.5,
"NE": 45,
"ENE": 67.5,
"E": 90,
"ESE": 112.5,
"SE": 135,
"SSE": 157.5,
"S": 180,
"SSW": 202.5,
"SW": 225,
"WSW": 247.5,
"W": 270,
"WNW": 292.5,
"NW": 305,
"NNW": 327.5,
"NORTH": 0,
"EAST": 90,
"SOUTH": 180,
"WEST": 270,
}
class Priority(str, Enum):
"""Types of reports."""
def __str__(self):
"""When we want the str repr."""
return str(self.value)
UA = "UA"
UUA = "UUA"
class PilotReport(BaseModel):
""" A Pilot Report. """
base_loc: str = None
text: str = None
priority: Priority = None
latitude: float = None
longitude: float = None
valid: datetime.datetime = None
cwsu: str = None
aircraft_type: str = None
is_duplicate: bool = False
class Pirep(product.TextProduct):
""" Class for parsing and representing Space Wx Products. """
def __init__(
self, text, utcnow=None, ugc_provider=None, nwsli_provider=None
):
""" constructor """
product.TextProduct.__init__(
self,
text,
utcnow=utcnow,
ugc_provider=ugc_provider,
nwsli_provider=nwsli_provider,
)
self.reports = []
self.parse_reports()
def parse_reports(self):
"""Actually do the parsing of the product that generates the reports
stored within the self.reports list"""
txt = (
self.unixtext
if self.unixtext[:2] != "\001\n"
else self.unixtext[2:]
)
lines = txt.split("\n")
# There may be an AWIPSID in line 3 or silly aviation control char
pos = 3 if len(lines[2]) < 10 or lines[2].startswith("\x1e") else 2
meat = "".join(lines[pos:])
for report in meat.split("="):
if report.strip() == "":
continue
res = self.process_pirep(" ".join(report.strip().split()))
if res is not None:
self.reports.append(res)
def process_pirep(self, report):
""" Convert this report text into an actual PIREP object """
_pr = PilotReport()
_pr.text = report
for i, token in enumerate(report.split("/")):
token = token.strip()
# First token is always priority
if i == 0:
if len(token) > 10:
LOG.info("Aborting as not-PIREP? |%s|", report)
return
if token.find(" UUA") > 0:
_pr.priority = Priority.UUA
else:
_pr.priority = Priority.UA
parts = token.split()
if len(parts) == 2:
_pr.base_loc = parts[0]
if len(_pr.base_loc) == 4 and _pr.base_loc[0] == "K":
_pr.base_loc = _pr.base_loc[1:]
continue
# Aircraft Type
if token.startswith("TP "):
_pr.aircraft_type = token[3:]
# Location
if token.startswith("OV "):
dist = 0
bearing = 0
therest = token[3:]
if len(therest) == 3:
loc = therest
elif therest.startswith("FINAL RWY"):
loc = report[:8].split()[0]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
elif len(therest) == 4:
if therest[0] == "K":
loc = therest[1:]
else:
loc = therest
elif re.match(OV_OFFSET, therest):
d = re.match(OV_OFFSET, therest).groupdict()
loc = d["loc"]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
dist = int(d["dist"])
bearing = DRCT2DIR[d["dir"]]
elif re.match(OV_LOCDIR, therest):
# KFAR330008
d = re.match(OV_LOCDIR, therest).groupdict()
loc = d["loc"]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
bearing = int(d["dir"])
dist = int(d["dist"])
elif re.match(OV_LATLON, therest):
# 2500N07000W
# FMH-12 says this is in degrees and minutes!
d = re.match(OV_LATLON, therest).groupdict()
_pr.latitude = float(
"%s.%i"
% (
d["lat"][:-2],
int(float(d["lat"][-2:]) / 60.0 * 10000.0),
)
)
if d["latsign"] == "S":
_pr.latitude = 0 - _pr.latitude
_pr.longitude = float(
"%s.%i"
% (
d["lon"][:-2],
int(float(d["lon"][-2:]) / 60.0 * 10000.0),
)
)
if d["lonsign"] == "W":
_pr.longitude = 0 - _pr.longitude
continue
elif therest == "O":
# Use the first part of the report in this case
loc = report[:3]
elif therest.find("-") > 0 and re.match(OV_TWOLOC, therest):
d = re.match(OV_TWOLOC, therest).groupdict()
numbers = re.findall("[0-9]{6}", therest)
if numbers:
bearing = int(numbers[0][:3])
dist = int(numbers[0][3:])
loc = d["loc2"]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
else:
# Split the distance between the two points
lats = []
lons = []
for loc in [d["loc1"], d["loc2"]]:
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
if loc not in self.nwsli_provider:
self.warnings.append(
f"Unknown location: {loc} '{report}'"
)
return None
lats.append(self.nwsli_provider[loc]["lat"])
lons.append(self.nwsli_provider[loc]["lon"])
_pr.latitude = sum(lats) / 2.0
_pr.longitude = sum(lons) / 2.0
continue
else:
loc = therest[:3]
if loc not in self.nwsli_provider:
if _pr.base_loc is None:
self.warnings.append(
f"Unknown location: {loc} '{report}'"
)
return None
loc = _pr.base_loc
if loc not in self.nwsli_provider:
self.warnings.append(
f"Double-unknown location: {report}"
)
return None
# So we discard the offset when we go back to the base
dist = 0
bearing = 0
_pr.longitude, _pr.latitude = self.compute_loc(
loc, dist, bearing
)
continue
# Time
if token.startswith("TM "):
numbers = re.findall("[0-9]{4}", token)
if len(numbers) != 1:
self.warnings.append("TM parse failed %s" % (report,))
return None
hour = int(numbers[0][:2])
minute = int(numbers[0][2:])
_pr.valid = self.compute_pirep_valid(hour, minute)
continue
return _pr if _pr.latitude is not None else None
def compute_loc(self, loc, dist, bearing):
""" Figure out the lon/lat for this location """
lat = self.nwsli_provider[loc]["lat"]
lon = self.nwsli_provider[loc]["lon"]
# shortcut
if dist == 0:
return lon, lat
meters = distance(float(dist), "MI").value("M")
northing = meters * math.cos(math.radians(bearing)) / 111111.0
easting = (
meters
* math.sin(math.radians(bearing))
/ math.cos(math.radians(lat))
/ 111111.0
)
return lon + easting, lat + northing
def compute_pirep_valid(self, hour, minute):
""" Based on what utcnow is set to, compute when this is valid """
res = self.utcnow.replace(
hour=hour, minute=minute, second=0, microsecond=0
)
if hour > self.utcnow.hour:
|
return res
def sql(self, txn):
""" Save the reports to the database via the transaction """
for report in self.reports:
if report.is_duplicate:
continue
txn.execute(
"INSERT into pireps(valid, geom, is_urgent, "
"aircraft_type, report) VALUES (%s, "
"ST_GeographyFromText('SRID=4326;POINT(%s %s)'),%s,%s,%s)",
(
report.valid,
report.longitude,
report.latitude,
report.priority == Priority.UUA,
report.aircraft_type,
report.text,
),
)
def assign_cwsu(self, txn):
""" Use this transaction object to assign CWSUs for the pireps """
for report in self.reports:
txn.execute(
"select distinct id from cwsu WHERE "
"st_contains(geom, geomFromEWKT('SRID=4326;POINT(%s %s)'))",
(report.longitude, report.latitude),
)
if txn.rowcount == 0:
# self.warnings.append("Find CWSU failed %.3f %.3f %s" % (
# report.longitude, report.latitude, report.text))
continue
row = txn.fetchone()
report.cwsu = row["id"]
def get_jabbers(self, _uri, _uri2=None):
""" get jabber messages """
res = []
for report in self.reports:
if report.is_duplicate or report.valid is None:
continue
jmsg = {
"priority": "Urgent"
if report.priority == Priority.UUA
else "Routine",
"ts": report.valid.strftime("%H%M"),
"report": html_escape(report.text),
"color": (
"#ff0000" if report.priority == Priority.UUA else "#00ff00"
),
}
plain = "%(priority)s pilot report at %(ts)sZ: %(report)s" % jmsg
html = (
"<span style='color:%(color)s;'>%(priority)s pilot "
"report</span> at %(ts)sZ: %(report)s"
) % jmsg
xtra = {
"channels": (
f"{report.priority}.{report.cwsu},{report.priority}.PIREP"
),
"geometry": "POINT(%s %s)"
% (report.longitude, report.latitude),
"ptype": report.priority,
"category": "PIREP",
"twitter": plain[:140],
"valid": report.valid.strftime("%Y%m%dT%H:%M:00"),
}
res.append([plain, html, xtra])
return res
def parser(buf, utcnow=None, ugc_provider=None, nwsli_provider=None):
""" A parser implementation """
return Pirep(
buf,
utcnow=utcnow,
ugc_provider=ugc_provider,
nwsli_provider=nwsli_provider,
)
| res -= datetime.timedelta(hours=24) | conditional_block |
pirep.py | """Pilot Reports (PIREP)
This module attempts to process and store atomic data from PIREPs. These are
encoded products that look like so:
UBUS01 KMSC 221700
EAU UA /OV EAU360030/TM 1715/FL350/TP B737/TB CONT LGT-MOD CHOP =
EHY UA /OV MBW253036 /TM 1729 /FL105 /TP C206 /SK FEW250 /TA M06
/TB NEG /RM SMTH=
Unfortunately, there is not much documentation of this format and the feed of
this data contains a bunch of formatting errors.
"""
from enum import Enum
import datetime
import re
import math
from pydantic import BaseModel
import pyiem.nws.product as product
from pyiem.datatypes import distance
from pyiem.util import html_escape, LOG
OV_LATLON = re.compile(
(
r"\s?(?P<lat>[0-9]{3,4})(?P<latsign>[NS])"
r"\s?(?P<lon>[0-9]{3,5})(?P<lonsign>[EW])"
)
)
OV_LOCDIR = re.compile(
r".*?(?P<loc>[A-Z0-9]{3,4})\s?(?P<dir>[0-9]{3})(?P<dist>[0-9]{3})"
)
OV_TWOLOC = re.compile(
r"(?P<loc1>[A-Z0-9]{3,4})\s?-\s?(?P<loc2>[A-Z0-9]{3,4})"
)
OV_OFFSET = re.compile(
(
r"(?P<dist>[0-9]{1,3})\s?"
"(?P<dir>NORTH|EAST|SOUTH|WEST|N|NNE|NE|ENE|E|ESE|"
r"SE|SSE|S|SSW|SW|WSW|W|WNW|NW|NNW)\s+(OF )?(?P<loc>[A-Z0-9]{3,4})"
)
)
DRCT2DIR = {
"N": 0,
"NNE": 22.5,
"NE": 45,
"ENE": 67.5,
"E": 90,
"ESE": 112.5,
"SE": 135,
"SSE": 157.5,
"S": 180,
"SSW": 202.5,
"SW": 225,
"WSW": 247.5,
"W": 270,
"WNW": 292.5,
"NW": 305,
"NNW": 327.5,
"NORTH": 0,
"EAST": 90,
"SOUTH": 180,
"WEST": 270,
}
class Priority(str, Enum):
"""Types of reports."""
def __str__(self):
"""When we want the str repr."""
return str(self.value)
UA = "UA"
UUA = "UUA"
class PilotReport(BaseModel):
""" A Pilot Report. """
base_loc: str = None
text: str = None
priority: Priority = None
latitude: float = None
longitude: float = None
valid: datetime.datetime = None
cwsu: str = None
aircraft_type: str = None
is_duplicate: bool = False
class Pirep(product.TextProduct):
""" Class for parsing and representing Space Wx Products. """
def __init__(
self, text, utcnow=None, ugc_provider=None, nwsli_provider=None
):
""" constructor """
product.TextProduct.__init__(
self,
text,
utcnow=utcnow,
ugc_provider=ugc_provider,
nwsli_provider=nwsli_provider,
)
self.reports = []
self.parse_reports()
def parse_reports(self):
"""Actually do the parsing of the product that generates the reports
stored within the self.reports list"""
txt = (
self.unixtext
if self.unixtext[:2] != "\001\n"
else self.unixtext[2:]
)
lines = txt.split("\n")
# There may be an AWIPSID in line 3 or silly aviation control char
pos = 3 if len(lines[2]) < 10 or lines[2].startswith("\x1e") else 2
meat = "".join(lines[pos:])
for report in meat.split("="):
if report.strip() == "":
continue
res = self.process_pirep(" ".join(report.strip().split()))
if res is not None:
self.reports.append(res)
def process_pirep(self, report):
""" Convert this report text into an actual PIREP object """
_pr = PilotReport()
_pr.text = report
for i, token in enumerate(report.split("/")):
token = token.strip()
# First token is always priority
if i == 0:
if len(token) > 10:
LOG.info("Aborting as not-PIREP? |%s|", report)
return
if token.find(" UUA") > 0:
_pr.priority = Priority.UUA
else:
_pr.priority = Priority.UA
parts = token.split()
if len(parts) == 2:
_pr.base_loc = parts[0]
if len(_pr.base_loc) == 4 and _pr.base_loc[0] == "K":
_pr.base_loc = _pr.base_loc[1:]
continue
# Aircraft Type
if token.startswith("TP "):
_pr.aircraft_type = token[3:]
# Location
if token.startswith("OV "):
dist = 0
bearing = 0
therest = token[3:]
if len(therest) == 3:
loc = therest
elif therest.startswith("FINAL RWY"):
loc = report[:8].split()[0]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
elif len(therest) == 4:
if therest[0] == "K":
loc = therest[1:]
else:
loc = therest
elif re.match(OV_OFFSET, therest):
d = re.match(OV_OFFSET, therest).groupdict()
loc = d["loc"]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
dist = int(d["dist"])
bearing = DRCT2DIR[d["dir"]]
elif re.match(OV_LOCDIR, therest):
# KFAR330008
d = re.match(OV_LOCDIR, therest).groupdict()
loc = d["loc"]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
bearing = int(d["dir"])
dist = int(d["dist"])
elif re.match(OV_LATLON, therest):
# 2500N07000W
# FMH-12 says this is in degrees and minutes!
d = re.match(OV_LATLON, therest).groupdict()
_pr.latitude = float(
"%s.%i"
% (
d["lat"][:-2],
int(float(d["lat"][-2:]) / 60.0 * 10000.0),
)
)
if d["latsign"] == "S":
_pr.latitude = 0 - _pr.latitude
_pr.longitude = float(
"%s.%i"
% (
d["lon"][:-2],
int(float(d["lon"][-2:]) / 60.0 * 10000.0),
)
)
if d["lonsign"] == "W":
_pr.longitude = 0 - _pr.longitude
continue
elif therest == "O":
# Use the first part of the report in this case
loc = report[:3]
elif therest.find("-") > 0 and re.match(OV_TWOLOC, therest):
d = re.match(OV_TWOLOC, therest).groupdict()
numbers = re.findall("[0-9]{6}", therest)
if numbers:
bearing = int(numbers[0][:3])
dist = int(numbers[0][3:])
loc = d["loc2"]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
else:
# Split the distance between the two points
lats = []
lons = []
for loc in [d["loc1"], d["loc2"]]:
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
if loc not in self.nwsli_provider:
self.warnings.append(
f"Unknown location: {loc} '{report}'"
)
return None
lats.append(self.nwsli_provider[loc]["lat"])
lons.append(self.nwsli_provider[loc]["lon"])
_pr.latitude = sum(lats) / 2.0
_pr.longitude = sum(lons) / 2.0
continue
else:
loc = therest[:3]
if loc not in self.nwsli_provider:
if _pr.base_loc is None:
self.warnings.append(
f"Unknown location: {loc} '{report}'"
)
return None
loc = _pr.base_loc
if loc not in self.nwsli_provider:
self.warnings.append(
f"Double-unknown location: {report}"
)
return None
# So we discard the offset when we go back to the base
dist = 0
bearing = 0
_pr.longitude, _pr.latitude = self.compute_loc(
loc, dist, bearing
)
continue
# Time
if token.startswith("TM "):
numbers = re.findall("[0-9]{4}", token)
if len(numbers) != 1:
self.warnings.append("TM parse failed %s" % (report,))
return None
hour = int(numbers[0][:2])
minute = int(numbers[0][2:])
_pr.valid = self.compute_pirep_valid(hour, minute)
continue
return _pr if _pr.latitude is not None else None
def compute_loc(self, loc, dist, bearing):
|
def compute_pirep_valid(self, hour, minute):
""" Based on what utcnow is set to, compute when this is valid """
res = self.utcnow.replace(
hour=hour, minute=minute, second=0, microsecond=0
)
if hour > self.utcnow.hour:
res -= datetime.timedelta(hours=24)
return res
def sql(self, txn):
""" Save the reports to the database via the transaction """
for report in self.reports:
if report.is_duplicate:
continue
txn.execute(
"INSERT into pireps(valid, geom, is_urgent, "
"aircraft_type, report) VALUES (%s, "
"ST_GeographyFromText('SRID=4326;POINT(%s %s)'),%s,%s,%s)",
(
report.valid,
report.longitude,
report.latitude,
report.priority == Priority.UUA,
report.aircraft_type,
report.text,
),
)
def assign_cwsu(self, txn):
""" Use this transaction object to assign CWSUs for the pireps """
for report in self.reports:
txn.execute(
"select distinct id from cwsu WHERE "
"st_contains(geom, geomFromEWKT('SRID=4326;POINT(%s %s)'))",
(report.longitude, report.latitude),
)
if txn.rowcount == 0:
# self.warnings.append("Find CWSU failed %.3f %.3f %s" % (
# report.longitude, report.latitude, report.text))
continue
row = txn.fetchone()
report.cwsu = row["id"]
def get_jabbers(self, _uri, _uri2=None):
""" get jabber messages """
res = []
for report in self.reports:
if report.is_duplicate or report.valid is None:
continue
jmsg = {
"priority": "Urgent"
if report.priority == Priority.UUA
else "Routine",
"ts": report.valid.strftime("%H%M"),
"report": html_escape(report.text),
"color": (
"#ff0000" if report.priority == Priority.UUA else "#00ff00"
),
}
plain = "%(priority)s pilot report at %(ts)sZ: %(report)s" % jmsg
html = (
"<span style='color:%(color)s;'>%(priority)s pilot "
"report</span> at %(ts)sZ: %(report)s"
) % jmsg
xtra = {
"channels": (
f"{report.priority}.{report.cwsu},{report.priority}.PIREP"
),
"geometry": "POINT(%s %s)"
% (report.longitude, report.latitude),
"ptype": report.priority,
"category": "PIREP",
"twitter": plain[:140],
"valid": report.valid.strftime("%Y%m%dT%H:%M:00"),
}
res.append([plain, html, xtra])
return res
def parser(buf, utcnow=None, ugc_provider=None, nwsli_provider=None):
""" A parser implementation """
return Pirep(
buf,
utcnow=utcnow,
ugc_provider=ugc_provider,
nwsli_provider=nwsli_provider,
)
| """ Figure out the lon/lat for this location """
lat = self.nwsli_provider[loc]["lat"]
lon = self.nwsli_provider[loc]["lon"]
# shortcut
if dist == 0:
return lon, lat
meters = distance(float(dist), "MI").value("M")
northing = meters * math.cos(math.radians(bearing)) / 111111.0
easting = (
meters
* math.sin(math.radians(bearing))
/ math.cos(math.radians(lat))
/ 111111.0
)
return lon + easting, lat + northing | identifier_body |
pirep.py | """Pilot Reports (PIREP)
This module attempts to process and store atomic data from PIREPs. These are
encoded products that look like so:
UBUS01 KMSC 221700
EAU UA /OV EAU360030/TM 1715/FL350/TP B737/TB CONT LGT-MOD CHOP =
EHY UA /OV MBW253036 /TM 1729 /FL105 /TP C206 /SK FEW250 /TA M06
/TB NEG /RM SMTH=
Unfortunately, there is not much documentation of this format and the feed of
this data contains a bunch of formatting errors.
"""
from enum import Enum
import datetime
import re
import math
from pydantic import BaseModel
import pyiem.nws.product as product
from pyiem.datatypes import distance
from pyiem.util import html_escape, LOG
OV_LATLON = re.compile(
(
r"\s?(?P<lat>[0-9]{3,4})(?P<latsign>[NS])"
r"\s?(?P<lon>[0-9]{3,5})(?P<lonsign>[EW])"
)
)
OV_LOCDIR = re.compile(
r".*?(?P<loc>[A-Z0-9]{3,4})\s?(?P<dir>[0-9]{3})(?P<dist>[0-9]{3})"
)
OV_TWOLOC = re.compile(
r"(?P<loc1>[A-Z0-9]{3,4})\s?-\s?(?P<loc2>[A-Z0-9]{3,4})"
)
OV_OFFSET = re.compile(
(
r"(?P<dist>[0-9]{1,3})\s?"
"(?P<dir>NORTH|EAST|SOUTH|WEST|N|NNE|NE|ENE|E|ESE|"
r"SE|SSE|S|SSW|SW|WSW|W|WNW|NW|NNW)\s+(OF )?(?P<loc>[A-Z0-9]{3,4})"
)
)
DRCT2DIR = {
"N": 0,
"NNE": 22.5,
"NE": 45,
"ENE": 67.5,
"E": 90,
"ESE": 112.5,
"SE": 135,
"SSE": 157.5,
"S": 180,
"SSW": 202.5,
"SW": 225,
"WSW": 247.5,
"W": 270,
"WNW": 292.5,
"NW": 305,
"NNW": 327.5,
"NORTH": 0,
"EAST": 90,
"SOUTH": 180,
"WEST": 270,
}
class Priority(str, Enum):
"""Types of reports."""
def __str__(self):
"""When we want the str repr."""
return str(self.value)
UA = "UA"
UUA = "UUA"
class PilotReport(BaseModel):
""" A Pilot Report. """
base_loc: str = None
text: str = None
priority: Priority = None
latitude: float = None
longitude: float = None
valid: datetime.datetime = None
cwsu: str = None
aircraft_type: str = None
is_duplicate: bool = False
class Pirep(product.TextProduct):
""" Class for parsing and representing Space Wx Products. """
def __init__(
self, text, utcnow=None, ugc_provider=None, nwsli_provider=None
):
""" constructor """
product.TextProduct.__init__(
self,
text,
utcnow=utcnow,
ugc_provider=ugc_provider,
nwsli_provider=nwsli_provider,
)
self.reports = []
self.parse_reports()
def parse_reports(self):
"""Actually do the parsing of the product that generates the reports
stored within the self.reports list"""
txt = (
self.unixtext
if self.unixtext[:2] != "\001\n"
else self.unixtext[2:]
)
lines = txt.split("\n")
# There may be an AWIPSID in line 3 or silly aviation control char
pos = 3 if len(lines[2]) < 10 or lines[2].startswith("\x1e") else 2
meat = "".join(lines[pos:])
for report in meat.split("="):
if report.strip() == "":
continue
res = self.process_pirep(" ".join(report.strip().split()))
if res is not None:
self.reports.append(res)
def process_pirep(self, report):
""" Convert this report text into an actual PIREP object """
_pr = PilotReport()
_pr.text = report
for i, token in enumerate(report.split("/")):
token = token.strip()
# First token is always priority
if i == 0:
if len(token) > 10:
LOG.info("Aborting as not-PIREP? |%s|", report)
return
if token.find(" UUA") > 0:
_pr.priority = Priority.UUA
else:
_pr.priority = Priority.UA
parts = token.split()
if len(parts) == 2:
_pr.base_loc = parts[0]
if len(_pr.base_loc) == 4 and _pr.base_loc[0] == "K":
_pr.base_loc = _pr.base_loc[1:]
continue
# Aircraft Type
if token.startswith("TP "):
_pr.aircraft_type = token[3:]
# Location
if token.startswith("OV "):
dist = 0
bearing = 0
therest = token[3:]
if len(therest) == 3:
loc = therest
elif therest.startswith("FINAL RWY"):
loc = report[:8].split()[0]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
elif len(therest) == 4:
if therest[0] == "K":
loc = therest[1:]
else:
loc = therest
elif re.match(OV_OFFSET, therest):
d = re.match(OV_OFFSET, therest).groupdict()
loc = d["loc"]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
dist = int(d["dist"])
bearing = DRCT2DIR[d["dir"]]
elif re.match(OV_LOCDIR, therest):
# KFAR330008
d = re.match(OV_LOCDIR, therest).groupdict()
loc = d["loc"]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
bearing = int(d["dir"])
dist = int(d["dist"])
elif re.match(OV_LATLON, therest):
# 2500N07000W
# FMH-12 says this is in degrees and minutes!
d = re.match(OV_LATLON, therest).groupdict()
_pr.latitude = float(
"%s.%i"
% (
d["lat"][:-2],
int(float(d["lat"][-2:]) / 60.0 * 10000.0),
)
)
if d["latsign"] == "S":
_pr.latitude = 0 - _pr.latitude
_pr.longitude = float(
"%s.%i"
% (
d["lon"][:-2],
int(float(d["lon"][-2:]) / 60.0 * 10000.0),
)
)
if d["lonsign"] == "W":
_pr.longitude = 0 - _pr.longitude
continue
elif therest == "O":
# Use the first part of the report in this case
loc = report[:3]
elif therest.find("-") > 0 and re.match(OV_TWOLOC, therest):
d = re.match(OV_TWOLOC, therest).groupdict()
numbers = re.findall("[0-9]{6}", therest)
if numbers:
bearing = int(numbers[0][:3])
dist = int(numbers[0][3:])
loc = d["loc2"]
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
else:
# Split the distance between the two points
lats = []
lons = []
for loc in [d["loc1"], d["loc2"]]:
if len(loc) == 4 and loc[0] == "K":
loc = loc[1:]
if loc not in self.nwsli_provider:
self.warnings.append(
f"Unknown location: {loc} '{report}'"
)
return None
lats.append(self.nwsli_provider[loc]["lat"])
lons.append(self.nwsli_provider[loc]["lon"])
_pr.latitude = sum(lats) / 2.0
_pr.longitude = sum(lons) / 2.0
continue
else:
loc = therest[:3]
if loc not in self.nwsli_provider:
if _pr.base_loc is None:
self.warnings.append(
f"Unknown location: {loc} '{report}'"
)
return None
loc = _pr.base_loc
if loc not in self.nwsli_provider:
self.warnings.append(
f"Double-unknown location: {report}"
)
return None
# So we discard the offset when we go back to the base
dist = 0
bearing = 0
_pr.longitude, _pr.latitude = self.compute_loc(
loc, dist, bearing
)
continue
# Time
if token.startswith("TM "):
numbers = re.findall("[0-9]{4}", token)
if len(numbers) != 1:
self.warnings.append("TM parse failed %s" % (report,))
return None
hour = int(numbers[0][:2])
minute = int(numbers[0][2:])
_pr.valid = self.compute_pirep_valid(hour, minute)
continue
return _pr if _pr.latitude is not None else None
def compute_loc(self, loc, dist, bearing):
""" Figure out the lon/lat for this location """
lat = self.nwsli_provider[loc]["lat"]
lon = self.nwsli_provider[loc]["lon"]
# shortcut
if dist == 0:
return lon, lat
meters = distance(float(dist), "MI").value("M")
northing = meters * math.cos(math.radians(bearing)) / 111111.0
easting = (
meters
* math.sin(math.radians(bearing))
/ math.cos(math.radians(lat))
/ 111111.0
)
return lon + easting, lat + northing
def compute_pirep_valid(self, hour, minute):
""" Based on what utcnow is set to, compute when this is valid """
res = self.utcnow.replace(
hour=hour, minute=minute, second=0, microsecond=0
)
if hour > self.utcnow.hour:
res -= datetime.timedelta(hours=24)
return res
def sql(self, txn):
""" Save the reports to the database via the transaction """
for report in self.reports:
if report.is_duplicate:
continue
txn.execute(
"INSERT into pireps(valid, geom, is_urgent, "
"aircraft_type, report) VALUES (%s, "
"ST_GeographyFromText('SRID=4326;POINT(%s %s)'),%s,%s,%s)",
(
report.valid,
report.longitude,
report.latitude,
report.priority == Priority.UUA, | report.text,
),
)
def assign_cwsu(self, txn):
""" Use this transaction object to assign CWSUs for the pireps """
for report in self.reports:
txn.execute(
"select distinct id from cwsu WHERE "
"st_contains(geom, geomFromEWKT('SRID=4326;POINT(%s %s)'))",
(report.longitude, report.latitude),
)
if txn.rowcount == 0:
# self.warnings.append("Find CWSU failed %.3f %.3f %s" % (
# report.longitude, report.latitude, report.text))
continue
row = txn.fetchone()
report.cwsu = row["id"]
def get_jabbers(self, _uri, _uri2=None):
""" get jabber messages """
res = []
for report in self.reports:
if report.is_duplicate or report.valid is None:
continue
jmsg = {
"priority": "Urgent"
if report.priority == Priority.UUA
else "Routine",
"ts": report.valid.strftime("%H%M"),
"report": html_escape(report.text),
"color": (
"#ff0000" if report.priority == Priority.UUA else "#00ff00"
),
}
plain = "%(priority)s pilot report at %(ts)sZ: %(report)s" % jmsg
html = (
"<span style='color:%(color)s;'>%(priority)s pilot "
"report</span> at %(ts)sZ: %(report)s"
) % jmsg
xtra = {
"channels": (
f"{report.priority}.{report.cwsu},{report.priority}.PIREP"
),
"geometry": "POINT(%s %s)"
% (report.longitude, report.latitude),
"ptype": report.priority,
"category": "PIREP",
"twitter": plain[:140],
"valid": report.valid.strftime("%Y%m%dT%H:%M:00"),
}
res.append([plain, html, xtra])
return res
def parser(buf, utcnow=None, ugc_provider=None, nwsli_provider=None):
""" A parser implementation """
return Pirep(
buf,
utcnow=utcnow,
ugc_provider=ugc_provider,
nwsli_provider=nwsli_provider,
) | report.aircraft_type, | random_line_split |
requester.go | // Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package requester provides commands to run load tests and display results.
package requester
import (
"bytes"
"crypto/tls"
"encoding/json"
"io"
"io/ioutil"
"math/rand"
"mime/multipart"
"net/http"
"net/http/httptrace"
"net/url"
"os"
"strings"
"sync"
"time"
"github.com/mohae/deepcopy"
"golang.org/x/net/http2"
)
const megSenderUA = "meg/0.0.1"
type result struct {
err error
statusCode int
duration time.Duration
connDuration time.Duration // connection setup(DNS lookup + Dial up) duration
dnsDuration time.Duration // dns lookup duration
reqDuration time.Duration // request "write" duration
resDuration time.Duration // response "read" duration
delayDuration time.Duration // delay between response and request
contentLength int64
}
type Work struct {
// Request is the request to be made.
Request *http.Request
//RequestBody []byte
RequestParamSlice *RequestParamSlice
DataType string
DisableOutput bool
// N is the total number of requests to make.
N int
// C is the concurrency level, the number of concurrent workers to run.
C int
// H2 is an option to make HTTP/2 requests
H2 bool
// Timeout in seconds.
SingleRequestTimeout time.Duration
// Timeout in seconds
PerformanceTimeout time.Duration
// Qps is the rate limit.
QPS int
// DisableCompression is an option to disable compression in response
DisableCompression bool
// DisableKeepAlives is an option to prevents re-use of TCP connections between different HTTP requests
DisableKeepAlives bool
// DisableRedirects is an option to prevent the following of HTTP redirects
DisableRedirects bool
// RandomInput is an option to enable random data for input when input file has multi rows
RandomInput bool
// send requests synchronous in single worker
Async bool
// Output represents the output type. If "csv" is provided, the
// output will be dumped as a csv stream.
Output string
// ProxyAddr is the address of HTTP proxy server in the format on "host:port".
// Optional.
ProxyAddr *url.URL
// Writer is where results will be written. If nil, results are written to stdout.
Writer io.Writer
results chan *result
stopCh chan struct{}
startTime time.Time
report *report
}
func (b *Work) writer() io.Writer {
if b.Writer == nil {
return os.Stdout
}
return b.Writer
}
// Run makes all the requests, prints the summary. It blocks until
// all work is done.
func (b *Work) Run() {
// append hey's user agent
ua := b.Request.UserAgent()
if ua == "" {
ua = megSenderUA
} else {
ua += " " + megSenderUA
}
b.results = make(chan *result)
b.stopCh = make(chan struct{}, b.C)
b.startTime = time.Now()
b.report = newReport(b.writer(), b.results, b.Output)
b.report.start()
b.runWorkers()
b.Finish()
}
func (b *Work) Finish() {
for i := 0; i < b.C; i++ {
b.stopCh <- struct{}{}
}
close(b.results)
b.results = nil
b.report.stop()
}
func (b *Work) makeRequest(c *http.Client, p *RequestParam) {
s := time.Now()
var size int64
var code int
var dnsStart, connStart, resStart, reqStart, delayStart time.Time
var dnsDuration, connDuration, resDuration, reqDuration, delayDuration time.Duration
//req := cloneRequest(b.Request, b.RequestBody)
req := cloneRequest(b.Request, p, b.DataType)
trace := &httptrace.ClientTrace{
DNSStart: func(info httptrace.DNSStartInfo) {
dnsStart = time.Now()
},
DNSDone: func(dnsInfo httptrace.DNSDoneInfo) {
dnsDuration = time.Now().Sub(dnsStart)
},
GetConn: func(h string) {
connStart = time.Now()
},
GotConn: func(connInfo httptrace.GotConnInfo) {
connDuration = time.Now().Sub(connStart)
reqStart = time.Now()
},
WroteRequest: func(w httptrace.WroteRequestInfo) {
reqDuration = time.Now().Sub(reqStart)
delayStart = time.Now()
},
GotFirstResponseByte: func() {
delayDuration = time.Now().Sub(delayStart)
resStart = time.Now()
},
}
req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace))
resp, err := c.Do(req)
if resp != nil {
defer resp.Body.Close()
}
if err == nil {
size = resp.ContentLength
code = resp.StatusCode
body := &bytes.Buffer{}
if b.DisableOutput == false {
_, err := body.ReadFrom(resp.Body)
if err == nil {
Info.Printf("%s\t%d\t%s\n", strings.TrimSpace(string(p.Content)), code, strings.TrimSpace(body.String()))
} else {
Error.Println(err)
return
}
}
io.Copy(ioutil.Discard, resp.Body)
} else {
Error.Println(err)
return
}
t := time.Now()
resDuration = t.Sub(resStart)
finish := t.Sub(s)
select {
case b.results <- &result{
statusCode: code,
duration: finish,
err: err,
contentLength: size,
connDuration: connDuration,
dnsDuration: dnsDuration,
reqDuration: reqDuration,
resDuration: resDuration,
delayDuration: delayDuration,
}:
default:
}
}
// @param n count to send
func (b *Work) runWorker(n int, widx int) {
var throttle <-chan time.Time
if b.QPS > 0 {
throttle = time.Tick(time.Duration((1e6/(b.QPS))*b.C) * time.Microsecond)
}
tr := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
DisableCompression: b.DisableCompression,
DisableKeepAlives: b.DisableKeepAlives,
Proxy: http.ProxyURL(b.ProxyAddr),
}
if b.H2 {
http2.ConfigureTransport(tr)
} else {
tr.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper)
}
client := &http.Client{Transport: tr, Timeout: b.SingleRequestTimeout}
if b.DisableRedirects {
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
}
if b.Async {
// async
cli := deepcopy.Copy(*client)
cliObj, ok := cli.(http.Client)
if ok {
if b.PerformanceTimeout > 0 {
b.asyncSend(throttle, cliObj)
} else {
b.asyncSendN(widx, n, throttle, cliObj)
}
}
} else {
// sync
cli := deepcopy.Copy(*client)
cliObj, ok := cli.(http.Client)
if ok {
if b.PerformanceTimeout > 0 {
b.syncSend(throttle, cliObj)
} else {
b.syncSendN(widx, n, throttle, cliObj)
}
}
}
}
// sync send n
func (b *Work) syncSendN(widx int, n int, throttle <-chan time.Time, client http.Client) {
for i := 0; i < n; i++ |
}
// sync send
func (b *Work) syncSend(throttle <-chan time.Time, client http.Client) {
for i := 0; ; i++ {
if time.Now().Sub(b.startTime) > b.PerformanceTimeout {
break
}
if b.QPS > 0 {
<-throttle
}
select {
case <-b.stopCh:
break
default:
requestParam := b.getRequestParam(i)
b.makeRequest(&client, &requestParam)
}
}
}
// async send by count
func (b *Work) asyncSendN(widx int, n int, throttle <-chan time.Time, client http.Client) {
var wg sync.WaitGroup
wg.Add(n)
for i := 0; i < n; i++ {
if b.QPS > 0 {
<-throttle
}
go func() {
select {
case <-b.stopCh:
break
default:
requestParam := b.getRequestParam(i*b.C + widx)
b.makeRequest(&client, &requestParam)
}
wg.Done()
}()
}
wg.Wait()
}
// async send by time
func (b *Work) asyncSend(throttle <-chan time.Time, client http.Client) {
var wg sync.WaitGroup
for i := 0; ; i++ {
if time.Now().Sub(b.startTime) > b.PerformanceTimeout {
break
}
wg.Add(1)
if b.QPS > 0 {
<-throttle
}
go func() {
select {
case <-b.stopCh:
break
default:
requestParam := b.getRequestParam(i)
b.makeRequest(&client, &requestParam)
}
wg.Done()
}()
}
wg.Wait()
}
func (b *Work) getRequestParam(idx int) RequestParam {
length := len(b.RequestParamSlice.RequestParams)
if length > 0 {
if b.RandomInput {
return b.RequestParamSlice.RequestParams[rand.Intn(length)]
} else {
return b.RequestParamSlice.RequestParams[(idx)%length]
}
} else {
return RequestParam{
Content: []byte(""),
}
}
}
func (b *Work) runWorkers() {
var wg sync.WaitGroup
wg.Add(b.C)
for i := 0; i < b.C; i++ {
go func(i int) {
b.runWorker(b.N/(b.C), i)
defer wg.Done()
}(i)
}
wg.Wait()
}
/**
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request, body []byte) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header, len(r.Header))
for k, s := range r.Header {
r2.Header[k] = append([]string(nil), s...)
}
if len(body) > 0 {
r2.Body = ioutil.NopCloser(bytes.NewReader(body))
}
return r2
}
*/
func cloneRequest(r *http.Request, p *RequestParam, t string) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header, len(r.Header))
for k, s := range r.Header {
r2.Header[k] = append([]string(nil), s...)
}
if strings.ToUpper(t) == "JSON" {
r2.Body = ioutil.NopCloser(bytes.NewReader(p.Content))
} else if strings.ToUpper(t) == "FORM" {
var obj map[string]string
err := json.Unmarshal([]byte(p.Content), &obj)
if err != nil {
Error.Println(err)
return nil
}
filesMap := make(map[string]string)
dataMap := make(map[string]string)
for key, val := range obj {
startWithAt := strings.HasPrefix(val, "@")
if startWithAt == true {
filesMap[key] = val[1:]
} else {
dataMap[key] = val
}
}
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
if len(filesMap) != 0 {
for key, path := range filesMap {
file, err := os.Open(path)
if err != nil {
Error.Println(err)
continue
}
defer file.Close()
part, err := writer.CreateFormFile(key, path)
if err != nil {
Error.Println(err)
continue
}
_, err = io.Copy(part, file)
}
}
if len(dataMap) != 0 {
for key, val := range dataMap {
_ = writer.WriteField(key, val)
}
}
writer.Close()
/**
req, err := http.NewRequest("POST", r.URL.String(), body)
if err != nil {
log.Fatal(err.Error())
}
req.Header.Set("Content-Type", writer.FormDataContentType())
return req
*/
//r2.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
r2.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
r2.ContentLength = int64(len(body.Bytes()))
r2.Header.Set("Content-Type", writer.FormDataContentType())
} else {
r2.Body = ioutil.NopCloser(bytes.NewReader(p.Content))
}
return r2
}
func init() {
}
| {
if b.QPS > 0 {
<-throttle
}
select {
case <-b.stopCh:
break
default:
requestParam := b.getRequestParam(i*b.C + widx)
b.makeRequest(&client, &requestParam)
}
} | conditional_block |
requester.go | // Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package requester provides commands to run load tests and display results.
package requester
import (
"bytes"
"crypto/tls"
"encoding/json"
"io"
"io/ioutil"
"math/rand"
"mime/multipart"
"net/http"
"net/http/httptrace"
"net/url"
"os"
"strings"
"sync"
"time"
"github.com/mohae/deepcopy"
"golang.org/x/net/http2"
)
const megSenderUA = "meg/0.0.1"
type result struct {
err error
statusCode int
duration time.Duration
connDuration time.Duration // connection setup(DNS lookup + Dial up) duration
dnsDuration time.Duration // dns lookup duration
reqDuration time.Duration // request "write" duration
resDuration time.Duration // response "read" duration
delayDuration time.Duration // delay between response and request
contentLength int64
}
type Work struct {
// Request is the request to be made.
Request *http.Request
//RequestBody []byte
RequestParamSlice *RequestParamSlice
DataType string
DisableOutput bool
// N is the total number of requests to make.
N int
// C is the concurrency level, the number of concurrent workers to run.
C int
// H2 is an option to make HTTP/2 requests
H2 bool
// Timeout in seconds.
SingleRequestTimeout time.Duration
// Timeout in seconds
PerformanceTimeout time.Duration
// Qps is the rate limit.
QPS int
// DisableCompression is an option to disable compression in response
DisableCompression bool
// DisableKeepAlives is an option to prevents re-use of TCP connections between different HTTP requests
DisableKeepAlives bool
// DisableRedirects is an option to prevent the following of HTTP redirects
DisableRedirects bool
// RandomInput is an option to enable random data for input when input file has multi rows
RandomInput bool
// send requests synchronous in single worker
Async bool
// Output represents the output type. If "csv" is provided, the
// output will be dumped as a csv stream.
Output string
// ProxyAddr is the address of HTTP proxy server in the format on "host:port".
// Optional.
ProxyAddr *url.URL
// Writer is where results will be written. If nil, results are written to stdout.
Writer io.Writer
results chan *result
stopCh chan struct{}
startTime time.Time
report *report
}
| return os.Stdout
}
return b.Writer
}
// Run makes all the requests, prints the summary. It blocks until
// all work is done.
func (b *Work) Run() {
// append hey's user agent
ua := b.Request.UserAgent()
if ua == "" {
ua = megSenderUA
} else {
ua += " " + megSenderUA
}
b.results = make(chan *result)
b.stopCh = make(chan struct{}, b.C)
b.startTime = time.Now()
b.report = newReport(b.writer(), b.results, b.Output)
b.report.start()
b.runWorkers()
b.Finish()
}
func (b *Work) Finish() {
for i := 0; i < b.C; i++ {
b.stopCh <- struct{}{}
}
close(b.results)
b.results = nil
b.report.stop()
}
func (b *Work) makeRequest(c *http.Client, p *RequestParam) {
s := time.Now()
var size int64
var code int
var dnsStart, connStart, resStart, reqStart, delayStart time.Time
var dnsDuration, connDuration, resDuration, reqDuration, delayDuration time.Duration
//req := cloneRequest(b.Request, b.RequestBody)
req := cloneRequest(b.Request, p, b.DataType)
trace := &httptrace.ClientTrace{
DNSStart: func(info httptrace.DNSStartInfo) {
dnsStart = time.Now()
},
DNSDone: func(dnsInfo httptrace.DNSDoneInfo) {
dnsDuration = time.Now().Sub(dnsStart)
},
GetConn: func(h string) {
connStart = time.Now()
},
GotConn: func(connInfo httptrace.GotConnInfo) {
connDuration = time.Now().Sub(connStart)
reqStart = time.Now()
},
WroteRequest: func(w httptrace.WroteRequestInfo) {
reqDuration = time.Now().Sub(reqStart)
delayStart = time.Now()
},
GotFirstResponseByte: func() {
delayDuration = time.Now().Sub(delayStart)
resStart = time.Now()
},
}
req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace))
resp, err := c.Do(req)
if resp != nil {
defer resp.Body.Close()
}
if err == nil {
size = resp.ContentLength
code = resp.StatusCode
body := &bytes.Buffer{}
if b.DisableOutput == false {
_, err := body.ReadFrom(resp.Body)
if err == nil {
Info.Printf("%s\t%d\t%s\n", strings.TrimSpace(string(p.Content)), code, strings.TrimSpace(body.String()))
} else {
Error.Println(err)
return
}
}
io.Copy(ioutil.Discard, resp.Body)
} else {
Error.Println(err)
return
}
t := time.Now()
resDuration = t.Sub(resStart)
finish := t.Sub(s)
select {
case b.results <- &result{
statusCode: code,
duration: finish,
err: err,
contentLength: size,
connDuration: connDuration,
dnsDuration: dnsDuration,
reqDuration: reqDuration,
resDuration: resDuration,
delayDuration: delayDuration,
}:
default:
}
}
// @param n count to send
func (b *Work) runWorker(n int, widx int) {
var throttle <-chan time.Time
if b.QPS > 0 {
throttle = time.Tick(time.Duration((1e6/(b.QPS))*b.C) * time.Microsecond)
}
tr := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
DisableCompression: b.DisableCompression,
DisableKeepAlives: b.DisableKeepAlives,
Proxy: http.ProxyURL(b.ProxyAddr),
}
if b.H2 {
http2.ConfigureTransport(tr)
} else {
tr.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper)
}
client := &http.Client{Transport: tr, Timeout: b.SingleRequestTimeout}
if b.DisableRedirects {
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
}
if b.Async {
// async
cli := deepcopy.Copy(*client)
cliObj, ok := cli.(http.Client)
if ok {
if b.PerformanceTimeout > 0 {
b.asyncSend(throttle, cliObj)
} else {
b.asyncSendN(widx, n, throttle, cliObj)
}
}
} else {
// sync
cli := deepcopy.Copy(*client)
cliObj, ok := cli.(http.Client)
if ok {
if b.PerformanceTimeout > 0 {
b.syncSend(throttle, cliObj)
} else {
b.syncSendN(widx, n, throttle, cliObj)
}
}
}
}
// sync send n
func (b *Work) syncSendN(widx int, n int, throttle <-chan time.Time, client http.Client) {
for i := 0; i < n; i++ {
if b.QPS > 0 {
<-throttle
}
select {
case <-b.stopCh:
break
default:
requestParam := b.getRequestParam(i*b.C + widx)
b.makeRequest(&client, &requestParam)
}
}
}
// sync send
func (b *Work) syncSend(throttle <-chan time.Time, client http.Client) {
for i := 0; ; i++ {
if time.Now().Sub(b.startTime) > b.PerformanceTimeout {
break
}
if b.QPS > 0 {
<-throttle
}
select {
case <-b.stopCh:
break
default:
requestParam := b.getRequestParam(i)
b.makeRequest(&client, &requestParam)
}
}
}
// async send by count
func (b *Work) asyncSendN(widx int, n int, throttle <-chan time.Time, client http.Client) {
var wg sync.WaitGroup
wg.Add(n)
for i := 0; i < n; i++ {
if b.QPS > 0 {
<-throttle
}
go func() {
select {
case <-b.stopCh:
break
default:
requestParam := b.getRequestParam(i*b.C + widx)
b.makeRequest(&client, &requestParam)
}
wg.Done()
}()
}
wg.Wait()
}
// async send by time
func (b *Work) asyncSend(throttle <-chan time.Time, client http.Client) {
var wg sync.WaitGroup
for i := 0; ; i++ {
if time.Now().Sub(b.startTime) > b.PerformanceTimeout {
break
}
wg.Add(1)
if b.QPS > 0 {
<-throttle
}
go func() {
select {
case <-b.stopCh:
break
default:
requestParam := b.getRequestParam(i)
b.makeRequest(&client, &requestParam)
}
wg.Done()
}()
}
wg.Wait()
}
func (b *Work) getRequestParam(idx int) RequestParam {
length := len(b.RequestParamSlice.RequestParams)
if length > 0 {
if b.RandomInput {
return b.RequestParamSlice.RequestParams[rand.Intn(length)]
} else {
return b.RequestParamSlice.RequestParams[(idx)%length]
}
} else {
return RequestParam{
Content: []byte(""),
}
}
}
func (b *Work) runWorkers() {
var wg sync.WaitGroup
wg.Add(b.C)
for i := 0; i < b.C; i++ {
go func(i int) {
b.runWorker(b.N/(b.C), i)
defer wg.Done()
}(i)
}
wg.Wait()
}
/**
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request, body []byte) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header, len(r.Header))
for k, s := range r.Header {
r2.Header[k] = append([]string(nil), s...)
}
if len(body) > 0 {
r2.Body = ioutil.NopCloser(bytes.NewReader(body))
}
return r2
}
*/
func cloneRequest(r *http.Request, p *RequestParam, t string) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header, len(r.Header))
for k, s := range r.Header {
r2.Header[k] = append([]string(nil), s...)
}
if strings.ToUpper(t) == "JSON" {
r2.Body = ioutil.NopCloser(bytes.NewReader(p.Content))
} else if strings.ToUpper(t) == "FORM" {
var obj map[string]string
err := json.Unmarshal([]byte(p.Content), &obj)
if err != nil {
Error.Println(err)
return nil
}
filesMap := make(map[string]string)
dataMap := make(map[string]string)
for key, val := range obj {
startWithAt := strings.HasPrefix(val, "@")
if startWithAt == true {
filesMap[key] = val[1:]
} else {
dataMap[key] = val
}
}
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
if len(filesMap) != 0 {
for key, path := range filesMap {
file, err := os.Open(path)
if err != nil {
Error.Println(err)
continue
}
defer file.Close()
part, err := writer.CreateFormFile(key, path)
if err != nil {
Error.Println(err)
continue
}
_, err = io.Copy(part, file)
}
}
if len(dataMap) != 0 {
for key, val := range dataMap {
_ = writer.WriteField(key, val)
}
}
writer.Close()
/**
req, err := http.NewRequest("POST", r.URL.String(), body)
if err != nil {
log.Fatal(err.Error())
}
req.Header.Set("Content-Type", writer.FormDataContentType())
return req
*/
//r2.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
r2.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
r2.ContentLength = int64(len(body.Bytes()))
r2.Header.Set("Content-Type", writer.FormDataContentType())
} else {
r2.Body = ioutil.NopCloser(bytes.NewReader(p.Content))
}
return r2
}
func init() {
} | func (b *Work) writer() io.Writer {
if b.Writer == nil { | random_line_split |
requester.go | // Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package requester provides commands to run load tests and display results.
package requester
import (
"bytes"
"crypto/tls"
"encoding/json"
"io"
"io/ioutil"
"math/rand"
"mime/multipart"
"net/http"
"net/http/httptrace"
"net/url"
"os"
"strings"
"sync"
"time"
"github.com/mohae/deepcopy"
"golang.org/x/net/http2"
)
const megSenderUA = "meg/0.0.1"
type result struct {
err error
statusCode int
duration time.Duration
connDuration time.Duration // connection setup(DNS lookup + Dial up) duration
dnsDuration time.Duration // dns lookup duration
reqDuration time.Duration // request "write" duration
resDuration time.Duration // response "read" duration
delayDuration time.Duration // delay between response and request
contentLength int64
}
type Work struct {
// Request is the request to be made.
Request *http.Request
//RequestBody []byte
RequestParamSlice *RequestParamSlice
DataType string
DisableOutput bool
// N is the total number of requests to make.
N int
// C is the concurrency level, the number of concurrent workers to run.
C int
// H2 is an option to make HTTP/2 requests
H2 bool
// Timeout in seconds.
SingleRequestTimeout time.Duration
// Timeout in seconds
PerformanceTimeout time.Duration
// Qps is the rate limit.
QPS int
// DisableCompression is an option to disable compression in response
DisableCompression bool
// DisableKeepAlives is an option to prevents re-use of TCP connections between different HTTP requests
DisableKeepAlives bool
// DisableRedirects is an option to prevent the following of HTTP redirects
DisableRedirects bool
// RandomInput is an option to enable random data for input when input file has multi rows
RandomInput bool
// send requests synchronous in single worker
Async bool
// Output represents the output type. If "csv" is provided, the
// output will be dumped as a csv stream.
Output string
// ProxyAddr is the address of HTTP proxy server in the format on "host:port".
// Optional.
ProxyAddr *url.URL
// Writer is where results will be written. If nil, results are written to stdout.
Writer io.Writer
results chan *result
stopCh chan struct{}
startTime time.Time
report *report
}
func (b *Work) | () io.Writer {
if b.Writer == nil {
return os.Stdout
}
return b.Writer
}
// Run makes all the requests, prints the summary. It blocks until
// all work is done.
func (b *Work) Run() {
// append hey's user agent
ua := b.Request.UserAgent()
if ua == "" {
ua = megSenderUA
} else {
ua += " " + megSenderUA
}
b.results = make(chan *result)
b.stopCh = make(chan struct{}, b.C)
b.startTime = time.Now()
b.report = newReport(b.writer(), b.results, b.Output)
b.report.start()
b.runWorkers()
b.Finish()
}
func (b *Work) Finish() {
for i := 0; i < b.C; i++ {
b.stopCh <- struct{}{}
}
close(b.results)
b.results = nil
b.report.stop()
}
func (b *Work) makeRequest(c *http.Client, p *RequestParam) {
s := time.Now()
var size int64
var code int
var dnsStart, connStart, resStart, reqStart, delayStart time.Time
var dnsDuration, connDuration, resDuration, reqDuration, delayDuration time.Duration
//req := cloneRequest(b.Request, b.RequestBody)
req := cloneRequest(b.Request, p, b.DataType)
trace := &httptrace.ClientTrace{
DNSStart: func(info httptrace.DNSStartInfo) {
dnsStart = time.Now()
},
DNSDone: func(dnsInfo httptrace.DNSDoneInfo) {
dnsDuration = time.Now().Sub(dnsStart)
},
GetConn: func(h string) {
connStart = time.Now()
},
GotConn: func(connInfo httptrace.GotConnInfo) {
connDuration = time.Now().Sub(connStart)
reqStart = time.Now()
},
WroteRequest: func(w httptrace.WroteRequestInfo) {
reqDuration = time.Now().Sub(reqStart)
delayStart = time.Now()
},
GotFirstResponseByte: func() {
delayDuration = time.Now().Sub(delayStart)
resStart = time.Now()
},
}
req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace))
resp, err := c.Do(req)
if resp != nil {
defer resp.Body.Close()
}
if err == nil {
size = resp.ContentLength
code = resp.StatusCode
body := &bytes.Buffer{}
if b.DisableOutput == false {
_, err := body.ReadFrom(resp.Body)
if err == nil {
Info.Printf("%s\t%d\t%s\n", strings.TrimSpace(string(p.Content)), code, strings.TrimSpace(body.String()))
} else {
Error.Println(err)
return
}
}
io.Copy(ioutil.Discard, resp.Body)
} else {
Error.Println(err)
return
}
t := time.Now()
resDuration = t.Sub(resStart)
finish := t.Sub(s)
select {
case b.results <- &result{
statusCode: code,
duration: finish,
err: err,
contentLength: size,
connDuration: connDuration,
dnsDuration: dnsDuration,
reqDuration: reqDuration,
resDuration: resDuration,
delayDuration: delayDuration,
}:
default:
}
}
// @param n count to send
func (b *Work) runWorker(n int, widx int) {
var throttle <-chan time.Time
if b.QPS > 0 {
throttle = time.Tick(time.Duration((1e6/(b.QPS))*b.C) * time.Microsecond)
}
tr := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
DisableCompression: b.DisableCompression,
DisableKeepAlives: b.DisableKeepAlives,
Proxy: http.ProxyURL(b.ProxyAddr),
}
if b.H2 {
http2.ConfigureTransport(tr)
} else {
tr.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper)
}
client := &http.Client{Transport: tr, Timeout: b.SingleRequestTimeout}
if b.DisableRedirects {
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
}
if b.Async {
// async
cli := deepcopy.Copy(*client)
cliObj, ok := cli.(http.Client)
if ok {
if b.PerformanceTimeout > 0 {
b.asyncSend(throttle, cliObj)
} else {
b.asyncSendN(widx, n, throttle, cliObj)
}
}
} else {
// sync
cli := deepcopy.Copy(*client)
cliObj, ok := cli.(http.Client)
if ok {
if b.PerformanceTimeout > 0 {
b.syncSend(throttle, cliObj)
} else {
b.syncSendN(widx, n, throttle, cliObj)
}
}
}
}
// sync send n
func (b *Work) syncSendN(widx int, n int, throttle <-chan time.Time, client http.Client) {
for i := 0; i < n; i++ {
if b.QPS > 0 {
<-throttle
}
select {
case <-b.stopCh:
break
default:
requestParam := b.getRequestParam(i*b.C + widx)
b.makeRequest(&client, &requestParam)
}
}
}
// sync send
func (b *Work) syncSend(throttle <-chan time.Time, client http.Client) {
for i := 0; ; i++ {
if time.Now().Sub(b.startTime) > b.PerformanceTimeout {
break
}
if b.QPS > 0 {
<-throttle
}
select {
case <-b.stopCh:
break
default:
requestParam := b.getRequestParam(i)
b.makeRequest(&client, &requestParam)
}
}
}
// async send by count
func (b *Work) asyncSendN(widx int, n int, throttle <-chan time.Time, client http.Client) {
var wg sync.WaitGroup
wg.Add(n)
for i := 0; i < n; i++ {
if b.QPS > 0 {
<-throttle
}
go func() {
select {
case <-b.stopCh:
break
default:
requestParam := b.getRequestParam(i*b.C + widx)
b.makeRequest(&client, &requestParam)
}
wg.Done()
}()
}
wg.Wait()
}
// async send by time
func (b *Work) asyncSend(throttle <-chan time.Time, client http.Client) {
var wg sync.WaitGroup
for i := 0; ; i++ {
if time.Now().Sub(b.startTime) > b.PerformanceTimeout {
break
}
wg.Add(1)
if b.QPS > 0 {
<-throttle
}
go func() {
select {
case <-b.stopCh:
break
default:
requestParam := b.getRequestParam(i)
b.makeRequest(&client, &requestParam)
}
wg.Done()
}()
}
wg.Wait()
}
func (b *Work) getRequestParam(idx int) RequestParam {
length := len(b.RequestParamSlice.RequestParams)
if length > 0 {
if b.RandomInput {
return b.RequestParamSlice.RequestParams[rand.Intn(length)]
} else {
return b.RequestParamSlice.RequestParams[(idx)%length]
}
} else {
return RequestParam{
Content: []byte(""),
}
}
}
func (b *Work) runWorkers() {
var wg sync.WaitGroup
wg.Add(b.C)
for i := 0; i < b.C; i++ {
go func(i int) {
b.runWorker(b.N/(b.C), i)
defer wg.Done()
}(i)
}
wg.Wait()
}
/**
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request, body []byte) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header, len(r.Header))
for k, s := range r.Header {
r2.Header[k] = append([]string(nil), s...)
}
if len(body) > 0 {
r2.Body = ioutil.NopCloser(bytes.NewReader(body))
}
return r2
}
*/
func cloneRequest(r *http.Request, p *RequestParam, t string) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header, len(r.Header))
for k, s := range r.Header {
r2.Header[k] = append([]string(nil), s...)
}
if strings.ToUpper(t) == "JSON" {
r2.Body = ioutil.NopCloser(bytes.NewReader(p.Content))
} else if strings.ToUpper(t) == "FORM" {
var obj map[string]string
err := json.Unmarshal([]byte(p.Content), &obj)
if err != nil {
Error.Println(err)
return nil
}
filesMap := make(map[string]string)
dataMap := make(map[string]string)
for key, val := range obj {
startWithAt := strings.HasPrefix(val, "@")
if startWithAt == true {
filesMap[key] = val[1:]
} else {
dataMap[key] = val
}
}
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
if len(filesMap) != 0 {
for key, path := range filesMap {
file, err := os.Open(path)
if err != nil {
Error.Println(err)
continue
}
defer file.Close()
part, err := writer.CreateFormFile(key, path)
if err != nil {
Error.Println(err)
continue
}
_, err = io.Copy(part, file)
}
}
if len(dataMap) != 0 {
for key, val := range dataMap {
_ = writer.WriteField(key, val)
}
}
writer.Close()
/**
req, err := http.NewRequest("POST", r.URL.String(), body)
if err != nil {
log.Fatal(err.Error())
}
req.Header.Set("Content-Type", writer.FormDataContentType())
return req
*/
//r2.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
r2.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
r2.ContentLength = int64(len(body.Bytes()))
r2.Header.Set("Content-Type", writer.FormDataContentType())
} else {
r2.Body = ioutil.NopCloser(bytes.NewReader(p.Content))
}
return r2
}
func init() {
}
| writer | identifier_name |
requester.go | // Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package requester provides commands to run load tests and display results.
package requester
import (
"bytes"
"crypto/tls"
"encoding/json"
"io"
"io/ioutil"
"math/rand"
"mime/multipart"
"net/http"
"net/http/httptrace"
"net/url"
"os"
"strings"
"sync"
"time"
"github.com/mohae/deepcopy"
"golang.org/x/net/http2"
)
const megSenderUA = "meg/0.0.1"
type result struct {
err error
statusCode int
duration time.Duration
connDuration time.Duration // connection setup(DNS lookup + Dial up) duration
dnsDuration time.Duration // dns lookup duration
reqDuration time.Duration // request "write" duration
resDuration time.Duration // response "read" duration
delayDuration time.Duration // delay between response and request
contentLength int64
}
type Work struct {
// Request is the request to be made.
Request *http.Request
//RequestBody []byte
RequestParamSlice *RequestParamSlice
DataType string
DisableOutput bool
// N is the total number of requests to make.
N int
// C is the concurrency level, the number of concurrent workers to run.
C int
// H2 is an option to make HTTP/2 requests
H2 bool
// Timeout in seconds.
SingleRequestTimeout time.Duration
// Timeout in seconds
PerformanceTimeout time.Duration
// Qps is the rate limit.
QPS int
// DisableCompression is an option to disable compression in response
DisableCompression bool
// DisableKeepAlives is an option to prevents re-use of TCP connections between different HTTP requests
DisableKeepAlives bool
// DisableRedirects is an option to prevent the following of HTTP redirects
DisableRedirects bool
// RandomInput is an option to enable random data for input when input file has multi rows
RandomInput bool
// send requests synchronous in single worker
Async bool
// Output represents the output type. If "csv" is provided, the
// output will be dumped as a csv stream.
Output string
// ProxyAddr is the address of HTTP proxy server in the format on "host:port".
// Optional.
ProxyAddr *url.URL
// Writer is where results will be written. If nil, results are written to stdout.
Writer io.Writer
results chan *result
stopCh chan struct{}
startTime time.Time
report *report
}
func (b *Work) writer() io.Writer {
if b.Writer == nil {
return os.Stdout
}
return b.Writer
}
// Run makes all the requests, prints the summary. It blocks until
// all work is done.
func (b *Work) Run() {
// append hey's user agent
ua := b.Request.UserAgent()
if ua == "" {
ua = megSenderUA
} else {
ua += " " + megSenderUA
}
b.results = make(chan *result)
b.stopCh = make(chan struct{}, b.C)
b.startTime = time.Now()
b.report = newReport(b.writer(), b.results, b.Output)
b.report.start()
b.runWorkers()
b.Finish()
}
func (b *Work) Finish() {
for i := 0; i < b.C; i++ {
b.stopCh <- struct{}{}
}
close(b.results)
b.results = nil
b.report.stop()
}
func (b *Work) makeRequest(c *http.Client, p *RequestParam) {
s := time.Now()
var size int64
var code int
var dnsStart, connStart, resStart, reqStart, delayStart time.Time
var dnsDuration, connDuration, resDuration, reqDuration, delayDuration time.Duration
//req := cloneRequest(b.Request, b.RequestBody)
req := cloneRequest(b.Request, p, b.DataType)
trace := &httptrace.ClientTrace{
DNSStart: func(info httptrace.DNSStartInfo) {
dnsStart = time.Now()
},
DNSDone: func(dnsInfo httptrace.DNSDoneInfo) {
dnsDuration = time.Now().Sub(dnsStart)
},
GetConn: func(h string) {
connStart = time.Now()
},
GotConn: func(connInfo httptrace.GotConnInfo) {
connDuration = time.Now().Sub(connStart)
reqStart = time.Now()
},
WroteRequest: func(w httptrace.WroteRequestInfo) {
reqDuration = time.Now().Sub(reqStart)
delayStart = time.Now()
},
GotFirstResponseByte: func() {
delayDuration = time.Now().Sub(delayStart)
resStart = time.Now()
},
}
req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace))
resp, err := c.Do(req)
if resp != nil {
defer resp.Body.Close()
}
if err == nil {
size = resp.ContentLength
code = resp.StatusCode
body := &bytes.Buffer{}
if b.DisableOutput == false {
_, err := body.ReadFrom(resp.Body)
if err == nil {
Info.Printf("%s\t%d\t%s\n", strings.TrimSpace(string(p.Content)), code, strings.TrimSpace(body.String()))
} else {
Error.Println(err)
return
}
}
io.Copy(ioutil.Discard, resp.Body)
} else {
Error.Println(err)
return
}
t := time.Now()
resDuration = t.Sub(resStart)
finish := t.Sub(s)
select {
case b.results <- &result{
statusCode: code,
duration: finish,
err: err,
contentLength: size,
connDuration: connDuration,
dnsDuration: dnsDuration,
reqDuration: reqDuration,
resDuration: resDuration,
delayDuration: delayDuration,
}:
default:
}
}
// @param n count to send
func (b *Work) runWorker(n int, widx int) {
var throttle <-chan time.Time
if b.QPS > 0 {
throttle = time.Tick(time.Duration((1e6/(b.QPS))*b.C) * time.Microsecond)
}
tr := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
DisableCompression: b.DisableCompression,
DisableKeepAlives: b.DisableKeepAlives,
Proxy: http.ProxyURL(b.ProxyAddr),
}
if b.H2 {
http2.ConfigureTransport(tr)
} else {
tr.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper)
}
client := &http.Client{Transport: tr, Timeout: b.SingleRequestTimeout}
if b.DisableRedirects {
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
}
if b.Async {
// async
cli := deepcopy.Copy(*client)
cliObj, ok := cli.(http.Client)
if ok {
if b.PerformanceTimeout > 0 {
b.asyncSend(throttle, cliObj)
} else {
b.asyncSendN(widx, n, throttle, cliObj)
}
}
} else {
// sync
cli := deepcopy.Copy(*client)
cliObj, ok := cli.(http.Client)
if ok {
if b.PerformanceTimeout > 0 {
b.syncSend(throttle, cliObj)
} else {
b.syncSendN(widx, n, throttle, cliObj)
}
}
}
}
// sync send n
func (b *Work) syncSendN(widx int, n int, throttle <-chan time.Time, client http.Client) {
for i := 0; i < n; i++ {
if b.QPS > 0 {
<-throttle
}
select {
case <-b.stopCh:
break
default:
requestParam := b.getRequestParam(i*b.C + widx)
b.makeRequest(&client, &requestParam)
}
}
}
// sync send
func (b *Work) syncSend(throttle <-chan time.Time, client http.Client) {
for i := 0; ; i++ {
if time.Now().Sub(b.startTime) > b.PerformanceTimeout {
break
}
if b.QPS > 0 {
<-throttle
}
select {
case <-b.stopCh:
break
default:
requestParam := b.getRequestParam(i)
b.makeRequest(&client, &requestParam)
}
}
}
// async send by count
func (b *Work) asyncSendN(widx int, n int, throttle <-chan time.Time, client http.Client) {
var wg sync.WaitGroup
wg.Add(n)
for i := 0; i < n; i++ {
if b.QPS > 0 {
<-throttle
}
go func() {
select {
case <-b.stopCh:
break
default:
requestParam := b.getRequestParam(i*b.C + widx)
b.makeRequest(&client, &requestParam)
}
wg.Done()
}()
}
wg.Wait()
}
// async send by time
func (b *Work) asyncSend(throttle <-chan time.Time, client http.Client) {
var wg sync.WaitGroup
for i := 0; ; i++ {
if time.Now().Sub(b.startTime) > b.PerformanceTimeout {
break
}
wg.Add(1)
if b.QPS > 0 {
<-throttle
}
go func() {
select {
case <-b.stopCh:
break
default:
requestParam := b.getRequestParam(i)
b.makeRequest(&client, &requestParam)
}
wg.Done()
}()
}
wg.Wait()
}
func (b *Work) getRequestParam(idx int) RequestParam {
length := len(b.RequestParamSlice.RequestParams)
if length > 0 {
if b.RandomInput {
return b.RequestParamSlice.RequestParams[rand.Intn(length)]
} else {
return b.RequestParamSlice.RequestParams[(idx)%length]
}
} else {
return RequestParam{
Content: []byte(""),
}
}
}
func (b *Work) runWorkers() |
/**
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request, body []byte) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header, len(r.Header))
for k, s := range r.Header {
r2.Header[k] = append([]string(nil), s...)
}
if len(body) > 0 {
r2.Body = ioutil.NopCloser(bytes.NewReader(body))
}
return r2
}
*/
func cloneRequest(r *http.Request, p *RequestParam, t string) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header, len(r.Header))
for k, s := range r.Header {
r2.Header[k] = append([]string(nil), s...)
}
if strings.ToUpper(t) == "JSON" {
r2.Body = ioutil.NopCloser(bytes.NewReader(p.Content))
} else if strings.ToUpper(t) == "FORM" {
var obj map[string]string
err := json.Unmarshal([]byte(p.Content), &obj)
if err != nil {
Error.Println(err)
return nil
}
filesMap := make(map[string]string)
dataMap := make(map[string]string)
for key, val := range obj {
startWithAt := strings.HasPrefix(val, "@")
if startWithAt == true {
filesMap[key] = val[1:]
} else {
dataMap[key] = val
}
}
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
if len(filesMap) != 0 {
for key, path := range filesMap {
file, err := os.Open(path)
if err != nil {
Error.Println(err)
continue
}
defer file.Close()
part, err := writer.CreateFormFile(key, path)
if err != nil {
Error.Println(err)
continue
}
_, err = io.Copy(part, file)
}
}
if len(dataMap) != 0 {
for key, val := range dataMap {
_ = writer.WriteField(key, val)
}
}
writer.Close()
/**
req, err := http.NewRequest("POST", r.URL.String(), body)
if err != nil {
log.Fatal(err.Error())
}
req.Header.Set("Content-Type", writer.FormDataContentType())
return req
*/
//r2.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
r2.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
r2.ContentLength = int64(len(body.Bytes()))
r2.Header.Set("Content-Type", writer.FormDataContentType())
} else {
r2.Body = ioutil.NopCloser(bytes.NewReader(p.Content))
}
return r2
}
func init() {
}
| {
var wg sync.WaitGroup
wg.Add(b.C)
for i := 0; i < b.C; i++ {
go func(i int) {
b.runWorker(b.N/(b.C), i)
defer wg.Done()
}(i)
}
wg.Wait()
} | identifier_body |
type_check.rs | use std::collections::HashMap;
use std::rc::Rc;
use parsing::{AST, Statement, Declaration, Signature, Expression, ExpressionType, Operation, Variant, TypeName, TypeSingletonName};
// from Niko's talk
/* fn type_check(expression, expected_ty) -> Ty {
let ty = bare_type_check(expression, expected_type);
if ty icompatible with expected_ty {
try_coerce(expression, ty, expected_ty)
} else {
ty
}
}
fn bare_type_check(exprssion, expected_type) -> Ty { ... }
*/
/* H-M ALGO NOTES
from https://www.youtube.com/watch?v=il3gD7XMdmA
(also check out http://dev.stephendiehl.com/fun/006_hindley_milner.html)
typeInfer :: Expr a -> Matching (Type a)
unify :: Type a -> Type b -> Matching (Type c)
(Matching a) is a monad in which unification is done
ex:
typeInfer (If e1 e2 e3) = do
t1 <- typeInfer e1
t2 <- typeInfer e2
t3 <- typeInfer e3
_ <- unify t1 BoolType
unify t2 t3 -- b/c t2 and t3 have to be the same type
typeInfer (Const (ConstInt _)) = IntType -- same for other literals
--function application
typeInfer (Apply f x) = do
tf <- typeInfer f
tx <- typeInfer x
case tf of
FunctionType t1 t2 -> do
_ <- unify t1 tx
return t2
_ -> fail "Not a function"
--type annotation
typeInfer (Typed x t) = do
tx <- typeInfer x
unify tx t
--variable and let expressions - need to pass around a map of variable names to types here
typeInfer :: [ (Var, Type Var) ] -> Expr Var -> Matching (Type Var)
typeInfer ctx (Var x) = case (lookup x ctx) of
Just t -> return t
Nothing -> fail "Unknown variable"
--let x = e1 in e2
typeInfer ctx (Let x e1 e2) = do
t1 <- typeInfer ctx e1
typeInfer ((x, t1) :: ctx) e2
--lambdas are complicated (this represents ʎx.e)
typeInfer ctx (Lambda x e) = do
t1 <- allocExistentialVariable
t2 <- typeInfer ((x, t1) :: ctx) e
return $ FunctionType t1 t2 -- ie. t1 -> t2
--to solve the problem of map :: (a -> b) -> [a] -> [b]
when we use a variable whose type has universal tvars, convert those universal
tvars to existential ones
-and each distinct universal tvar needs to map to the same existential type
-so we change typeinfer:
typeInfer ctx (Var x) = do
case (lookup x ctx) of
Nothing -> ...
Just t -> do
let uvars = nub (toList t) -- nub removes duplicates, so this gets unique universally quantified variables
evars <- mapM (const allocExistentialVariable) uvars
let varMap = zip uvars evars
let vixVar varMap v = fromJust $ lookup v varMap
return (fmap (fixVar varMap) t)
--how do we define unify??
-recall, type signature is:
unify :: Type a -> Type b -> Matching (Type c)
unify BoolType BoolType = BoolType --easy, same for all constants
unify (FunctionType t1 t2) (FunctionType t3 t4) = do
t5 <- unify t1 t3
t6 <- unify t2 t4
return $ FunctionType t5 t6
unify (TVar a) (TVar b) = if a == b then TVar a else fail
--existential types can be assigned another type at most once
--some complicated stuff about hanlding existential types
--everything else is a type error
unify a b = fail
SKOLEMIZATION - how you prevent an unassigned existential type variable from leaking!
-before a type gets to global scope, replace all unassigned existential vars w/ new unique universal
type variables
*/
#[derive(Debug, PartialEq, Clone)]
pub enum Type {
TVar(TypeVar),
TConst(TypeConst),
TFunc(Box<Type>, Box<Type>),
}
#[derive(Debug, PartialEq, Clone)]
pub enum TypeVar {
Univ(Rc<String>),
Exist(u64),
}
impl TypeVar {
fn univ(label: &str) -> TypeVar {
TypeVar::Univ(Rc::new(label.to_string()))
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum TypeConst {
UserT(Rc<String>),
Integer,
Float,
StringT,
Boolean,
Unit,
Bottom,
}
type TypeCheckResult = Result<Type, String>;
#[derive(Debug, PartialEq, Eq, Hash)]
struct PathSpecifier(Rc<String>);
#[derive(Debug, PartialEq, Clone)]
struct TypeContextEntry {
ty: Type,
constant: bool
}
pub struct T | {
symbol_table: HashMap<PathSpecifier, TypeContextEntry>,
evar_table: HashMap<u64, Type>,
existential_type_label_count: u64
}
impl TypeContext {
pub fn new() -> TypeContext {
TypeContext {
symbol_table: HashMap::new(),
evar_table: HashMap::new(),
existential_type_label_count: 0,
}
}
pub fn add_symbols(&mut self, ast: &AST) {
use self::Declaration::*;
use self::Type::*;
use self::TypeConst::*;
for statement in ast.0.iter() {
match *statement {
Statement::ExpressionStatement(_) => (),
Statement::Declaration(ref decl) => match *decl {
FuncSig(_) => (),
Impl { .. } => (),
TypeDecl(ref type_constructor, ref body) => {
for variant in body.0.iter() {
let (spec, ty) = match variant {
&Variant::UnitStruct(ref data_constructor) => {
let spec = PathSpecifier(data_constructor.clone());
let ty = TConst(UserT(type_constructor.name.clone()));
(spec, ty)
},
&Variant::TupleStruct(ref data_construcor, ref args) => {
//TODO fix
let arg = args.get(0).unwrap();
let type_arg = self.from_anno(arg);
let spec = PathSpecifier(data_construcor.clone());
let ty = TFunc(Box::new(type_arg), Box::new(TConst(UserT(type_constructor.name.clone()))));
(spec, ty)
},
&Variant::Record(_, _) => unimplemented!(),
};
let entry = TypeContextEntry { ty, constant: true };
self.symbol_table.insert(spec, entry);
}
},
TypeAlias { .. } => (),
Binding {ref name, ref constant, ref expr} => {
let spec = PathSpecifier(name.clone());
let ty = expr.1.as_ref()
.map(|ty| self.from_anno(ty))
.unwrap_or_else(|| { self.alloc_existential_type() }); // this call to alloc_existential is OK b/c a binding only ever has one type, so if the annotation is absent, it's fine to just make one de novo
let entry = TypeContextEntry { ty, constant: *constant };
self.symbol_table.insert(spec, entry);
},
FuncDecl(ref signature, _) => {
let spec = PathSpecifier(signature.name.clone());
let ty = self.from_signature(signature);
let entry = TypeContextEntry { ty, constant: true };
self.symbol_table.insert(spec, entry);
},
}
}
}
}
fn lookup(&mut self, binding: &Rc<String>) -> Option<TypeContextEntry> {
let key = PathSpecifier(binding.clone());
self.symbol_table.get(&key).map(|entry| entry.clone())
}
pub fn debug_symbol_table(&self) -> String {
format!("Symbol table:\n {:?}\nEvar table:\n{:?}", self.symbol_table, self.evar_table)
}
fn alloc_existential_type(&mut self) -> Type {
let ret = Type::TVar(TypeVar::Exist(self.existential_type_label_count));
self.existential_type_label_count += 1;
ret
}
fn from_anno(&mut self, anno: &TypeName) -> Type {
use self::Type::*;
use self::TypeConst::*;
match anno {
&TypeName::Singleton(TypeSingletonName { ref name, .. }) => {
match name.as_ref().as_ref() {
"Int" => TConst(Integer),
"Float" => TConst(Float),
"Bool" => TConst(Boolean),
"String" => TConst(StringT),
s => TVar(TypeVar::Univ(Rc::new(format!("{}",s)))),
}
},
&TypeName::Tuple(ref items) => {
if items.len() == 1 {
TConst(Unit)
} else {
TConst(Bottom)
}
}
}
}
fn from_signature(&mut self, sig: &Signature) -> Type {
use self::Type::*;
use self::TypeConst::*;
//TODO this won't work properly until you make sure that all (universal) type vars in the function have the same existential type var
// actually this should never even put existential types into the symbol table at all
//this will crash if more than 5 arg function is used
let names = vec!["a", "b", "c", "d", "e", "f"];
let mut idx = 0;
let mut get_type = || { let q = TVar(TypeVar::Univ(Rc::new(format!("{}", names.get(idx).unwrap())))); idx += 1; q };
let return_type = sig.type_anno.as_ref().map(|anno| self.from_anno(&anno)).unwrap_or_else(|| { get_type() });
if sig.params.len() == 0 {
TFunc(Box::new(TConst(Unit)), Box::new(return_type))
} else {
let mut output_type = return_type;
for p in sig.params.iter() {
let p_type = p.1.as_ref().map(|anno| self.from_anno(anno)).unwrap_or_else(|| { get_type() });
output_type = TFunc(Box::new(p_type), Box::new(output_type));
}
output_type
}
}
pub fn type_check(&mut self, ast: &AST) -> TypeCheckResult {
use self::Type::*;
use self::TypeConst::*;
let mut last = TConst(Unit);
for statement in ast.0.iter() {
match statement {
&Statement::Declaration(ref _decl) => {
//return Err(format!("Declarations not supported"));
},
&Statement::ExpressionStatement(ref expr) => {
last = self.infer(expr)?;
}
}
}
Ok(last)
}
fn infer(&mut self, expr: &Expression) -> TypeCheckResult {
match (&expr.0, &expr.1) {
(exprtype, &Some(ref anno)) => {
let tx = self.infer_no_anno(exprtype)?;
let ty = self.from_anno(anno);
self.unify(tx, ty)
},
(exprtype, &None) => self.infer_no_anno(exprtype),
}
}
fn infer_no_anno(&mut self, ex: &ExpressionType) -> TypeCheckResult {
use self::ExpressionType::*;
use self::Type::*;
use self::TypeConst::*;
Ok(match ex {
&IntLiteral(_) => TConst(Integer),
&FloatLiteral(_) => TConst(Float),
&StringLiteral(_) => TConst(StringT),
&BoolLiteral(_) => TConst(Boolean),
&Value(ref name, _) => {
self.lookup(name)
.map(|entry| entry.ty)
.ok_or(format!("Couldn't find {}", name))?
},
&BinExp(ref op, ref lhs, ref rhs) => {
let t_lhs = self.infer(lhs)?;
match self.infer_op(op)? {
TFunc(t1, t2) => {
let _ = self.unify(t_lhs, *t1)?;
let t_rhs = self.infer(rhs)?;
let x = *t2;
match x {
TFunc(t3, t4) => {
let _ = self.unify(t_rhs, *t3)?;
*t4
},
_ => return Err(format!("Not a function type either")),
}
},
_ => return Err(format!("Op {:?} is not a function type", op)),
}
},
&Call { ref f, ref arguments } => {
let tf = self.infer(f)?;
let targ = self.infer(arguments.get(0).unwrap())?;
match tf {
TFunc(box t1, box t2) => {
let _ = self.unify(t1, targ)?;
t2
},
_ => return Err(format!("Not a function!")),
}
},
_ => TConst(Bottom),
})
}
fn infer_op(&mut self, op: &Operation) -> TypeCheckResult {
use self::Type::*;
use self::TypeConst::*;
macro_rules! binoptype {
($lhs:expr, $rhs:expr, $out:expr) => { TFunc(Box::new($lhs), Box::new(TFunc(Box::new($rhs), Box::new($out)))) };
}
Ok(match (*op.0).as_ref() {
"+" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
"++" => binoptype!(TConst(StringT), TConst(StringT), TConst(StringT)),
"-" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
"*" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
"/" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
"%" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
_ => TConst(Bottom)
})
}
fn unify(&mut self, t1: Type, t2: Type) -> TypeCheckResult {
use self::Type::*;
use self::TypeVar::*;
println!("Calling unify with `{:?}` and `{:?}`", t1, t2);
match (&t1, &t2) {
(&TConst(ref c1), &TConst(ref c2)) if c1 == c2 => Ok(TConst(c1.clone())),
(&TFunc(ref t1, ref t2), &TFunc(ref t3, ref t4)) => {
let t5 = self.unify(*t1.clone().clone(), *t3.clone().clone())?;
let t6 = self.unify(*t2.clone().clone(), *t4.clone().clone())?;
Ok(TFunc(Box::new(t5), Box::new(t6)))
},
(&TVar(Univ(ref a)), &TVar(Univ(ref b))) => {
if a == b {
Ok(TVar(Univ(a.clone())))
} else {
Err(format!("Couldn't unify universal types {} and {}", a, b))
}
},
//the interesting case!!
(&TVar(Exist(ref a)), ref t2) => {
let x = self.evar_table.get(a).map(|x| x.clone());
match x {
Some(ref t1) => self.unify(t1.clone().clone(), t2.clone().clone()),
None => {
self.evar_table.insert(*a, t2.clone().clone());
Ok(t2.clone().clone())
}
}
},
(ref t1, &TVar(Exist(ref a))) => {
let x = self.evar_table.get(a).map(|x| x.clone());
match x {
Some(ref t2) => self.unify(t2.clone().clone(), t1.clone().clone()),
None => {
self.evar_table.insert(*a, t1.clone().clone());
Ok(t1.clone().clone())
}
}
},
_ => Err(format!("Types {:?} and {:?} don't unify", t1, t2))
}
}
}
#[cfg(test)]
mod tests {
use super::{Type, TypeVar, TypeConst, TypeContext};
use super::Type::*;
use super::TypeConst::*;
use schala_lang::parsing::{parse, tokenize};
macro_rules! type_test {
($input:expr, $correct:expr) => {
{
let mut tc = TypeContext::new();
let ast = parse(tokenize($input)).0.unwrap() ;
tc.add_symbols(&ast);
assert_eq!($correct, tc.type_check(&ast).unwrap())
}
}
}
#[test]
fn basic_inference() {
type_test!("30", TConst(Integer));
type_test!("fn x(a: Int): Bool {}; x(1)", TConst(Boolean));
}
}
| ypeContext | identifier_name |
type_check.rs | use std::collections::HashMap;
use std::rc::Rc;
use parsing::{AST, Statement, Declaration, Signature, Expression, ExpressionType, Operation, Variant, TypeName, TypeSingletonName};
// from Niko's talk
/* fn type_check(expression, expected_ty) -> Ty {
let ty = bare_type_check(expression, expected_type);
if ty icompatible with expected_ty {
try_coerce(expression, ty, expected_ty)
} else {
ty
}
}
fn bare_type_check(exprssion, expected_type) -> Ty { ... }
*/
/* H-M ALGO NOTES
from https://www.youtube.com/watch?v=il3gD7XMdmA
(also check out http://dev.stephendiehl.com/fun/006_hindley_milner.html)
typeInfer :: Expr a -> Matching (Type a)
unify :: Type a -> Type b -> Matching (Type c)
(Matching a) is a monad in which unification is done
ex:
typeInfer (If e1 e2 e3) = do
t1 <- typeInfer e1
t2 <- typeInfer e2
t3 <- typeInfer e3
_ <- unify t1 BoolType
unify t2 t3 -- b/c t2 and t3 have to be the same type
typeInfer (Const (ConstInt _)) = IntType -- same for other literals
--function application
typeInfer (Apply f x) = do
tf <- typeInfer f
tx <- typeInfer x
case tf of
FunctionType t1 t2 -> do
_ <- unify t1 tx
return t2
_ -> fail "Not a function"
--type annotation
typeInfer (Typed x t) = do
tx <- typeInfer x
unify tx t
--variable and let expressions - need to pass around a map of variable names to types here
typeInfer :: [ (Var, Type Var) ] -> Expr Var -> Matching (Type Var)
typeInfer ctx (Var x) = case (lookup x ctx) of
Just t -> return t
Nothing -> fail "Unknown variable"
--let x = e1 in e2
typeInfer ctx (Let x e1 e2) = do
t1 <- typeInfer ctx e1
typeInfer ((x, t1) :: ctx) e2
--lambdas are complicated (this represents ʎx.e)
typeInfer ctx (Lambda x e) = do
t1 <- allocExistentialVariable
t2 <- typeInfer ((x, t1) :: ctx) e
return $ FunctionType t1 t2 -- ie. t1 -> t2
--to solve the problem of map :: (a -> b) -> [a] -> [b]
when we use a variable whose type has universal tvars, convert those universal
tvars to existential ones
-and each distinct universal tvar needs to map to the same existential type
-so we change typeinfer:
typeInfer ctx (Var x) = do
case (lookup x ctx) of
Nothing -> ...
Just t -> do
let uvars = nub (toList t) -- nub removes duplicates, so this gets unique universally quantified variables
evars <- mapM (const allocExistentialVariable) uvars
let varMap = zip uvars evars
let vixVar varMap v = fromJust $ lookup v varMap
return (fmap (fixVar varMap) t)
--how do we define unify??
-recall, type signature is:
unify :: Type a -> Type b -> Matching (Type c)
unify BoolType BoolType = BoolType --easy, same for all constants
unify (FunctionType t1 t2) (FunctionType t3 t4) = do
t5 <- unify t1 t3
t6 <- unify t2 t4
return $ FunctionType t5 t6
unify (TVar a) (TVar b) = if a == b then TVar a else fail
--existential types can be assigned another type at most once
--some complicated stuff about hanlding existential types
--everything else is a type error
unify a b = fail
SKOLEMIZATION - how you prevent an unassigned existential type variable from leaking!
-before a type gets to global scope, replace all unassigned existential vars w/ new unique universal
type variables
*/
#[derive(Debug, PartialEq, Clone)]
pub enum Type {
TVar(TypeVar),
TConst(TypeConst),
TFunc(Box<Type>, Box<Type>),
}
#[derive(Debug, PartialEq, Clone)]
pub enum TypeVar {
Univ(Rc<String>),
Exist(u64),
}
impl TypeVar {
fn univ(label: &str) -> TypeVar {
TypeVar::Univ(Rc::new(label.to_string()))
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum TypeConst {
UserT(Rc<String>),
Integer,
Float,
StringT,
Boolean,
Unit,
Bottom,
}
type TypeCheckResult = Result<Type, String>;
#[derive(Debug, PartialEq, Eq, Hash)]
struct PathSpecifier(Rc<String>);
#[derive(Debug, PartialEq, Clone)]
struct TypeContextEntry {
ty: Type,
constant: bool
}
pub struct TypeContext {
symbol_table: HashMap<PathSpecifier, TypeContextEntry>,
evar_table: HashMap<u64, Type>,
existential_type_label_count: u64
}
impl TypeContext {
pub fn new() -> TypeContext {
TypeContext {
symbol_table: HashMap::new(),
evar_table: HashMap::new(),
existential_type_label_count: 0,
}
}
pub fn add_symbols(&mut self, ast: &AST) {
use self::Declaration::*;
use self::Type::*;
use self::TypeConst::*;
for statement in ast.0.iter() {
match *statement {
Statement::ExpressionStatement(_) => (),
Statement::Declaration(ref decl) => match *decl {
FuncSig(_) => (),
Impl { .. } => (),
TypeDecl(ref type_constructor, ref body) => {
for variant in body.0.iter() {
let (spec, ty) = match variant {
&Variant::UnitStruct(ref data_constructor) => {
let spec = PathSpecifier(data_constructor.clone());
let ty = TConst(UserT(type_constructor.name.clone()));
(spec, ty)
},
&Variant::TupleStruct(ref data_construcor, ref args) => {
//TODO fix
let arg = args.get(0).unwrap();
let type_arg = self.from_anno(arg);
let spec = PathSpecifier(data_construcor.clone());
let ty = TFunc(Box::new(type_arg), Box::new(TConst(UserT(type_constructor.name.clone()))));
(spec, ty)
},
&Variant::Record(_, _) => unimplemented!(),
};
let entry = TypeContextEntry { ty, constant: true };
self.symbol_table.insert(spec, entry);
}
},
TypeAlias { .. } => (),
Binding {ref name, ref constant, ref expr} => {
let spec = PathSpecifier(name.clone());
let ty = expr.1.as_ref()
.map(|ty| self.from_anno(ty))
.unwrap_or_else(|| { self.alloc_existential_type() }); // this call to alloc_existential is OK b/c a binding only ever has one type, so if the annotation is absent, it's fine to just make one de novo
let entry = TypeContextEntry { ty, constant: *constant };
self.symbol_table.insert(spec, entry);
},
FuncDecl(ref signature, _) => {
let spec = PathSpecifier(signature.name.clone());
let ty = self.from_signature(signature);
let entry = TypeContextEntry { ty, constant: true };
self.symbol_table.insert(spec, entry);
},
}
}
}
}
fn lookup(&mut self, binding: &Rc<String>) -> Option<TypeContextEntry> {
let key = PathSpecifier(binding.clone());
self.symbol_table.get(&key).map(|entry| entry.clone())
}
pub fn debug_symbol_table(&self) -> String {
format!("Symbol table:\n {:?}\nEvar table:\n{:?}", self.symbol_table, self.evar_table)
}
fn alloc_existential_type(&mut self) -> Type {
let ret = Type::TVar(TypeVar::Exist(self.existential_type_label_count));
self.existential_type_label_count += 1;
ret
}
fn from_anno(&mut self, anno: &TypeName) -> Type {
use self::Type::*;
use self::TypeConst::*;
match anno {
&TypeName::Singleton(TypeSingletonName { ref name, .. }) => {
match name.as_ref().as_ref() {
"Int" => TConst(Integer),
"Float" => TConst(Float),
"Bool" => TConst(Boolean),
"String" => TConst(StringT),
s => TVar(TypeVar::Univ(Rc::new(format!("{}",s)))),
}
},
&TypeName::Tuple(ref items) => {
if items.len() == 1 {
TConst(Unit)
} else {
TConst(Bottom)
}
}
}
}
fn from_signature(&mut self, sig: &Signature) -> Type {
use self::Type::*;
use self::TypeConst::*;
//TODO this won't work properly until you make sure that all (universal) type vars in the function have the same existential type var
// actually this should never even put existential types into the symbol table at all
//this will crash if more than 5 arg function is used
let names = vec!["a", "b", "c", "d", "e", "f"];
let mut idx = 0;
let mut get_type = || { let q = TVar(TypeVar::Univ(Rc::new(format!("{}", names.get(idx).unwrap())))); idx += 1; q };
let return_type = sig.type_anno.as_ref().map(|anno| self.from_anno(&anno)).unwrap_or_else(|| { get_type() });
if sig.params.len() == 0 {
TFunc(Box::new(TConst(Unit)), Box::new(return_type))
} else {
let mut output_type = return_type;
for p in sig.params.iter() {
let p_type = p.1.as_ref().map(|anno| self.from_anno(anno)).unwrap_or_else(|| { get_type() });
output_type = TFunc(Box::new(p_type), Box::new(output_type));
}
output_type
}
}
pub fn type_check(&mut self, ast: &AST) -> TypeCheckResult {
use self::Type::*;
use self::TypeConst::*;
let mut last = TConst(Unit);
for statement in ast.0.iter() {
match statement {
&Statement::Declaration(ref _decl) => {
//return Err(format!("Declarations not supported"));
},
&Statement::ExpressionStatement(ref expr) => {
last = self.infer(expr)?;
}
}
}
Ok(last)
}
fn infer(&mut self, expr: &Expression) -> TypeCheckResult {
match (&expr.0, &expr.1) {
(exprtype, &Some(ref anno)) => {
let tx = self.infer_no_anno(exprtype)?;
let ty = self.from_anno(anno);
self.unify(tx, ty)
},
(exprtype, &None) => self.infer_no_anno(exprtype),
}
}
fn infer_no_anno(&mut self, ex: &ExpressionType) -> TypeCheckResult {
use self::ExpressionType::*;
use self::Type::*;
use self::TypeConst::*;
Ok(match ex {
&IntLiteral(_) => TConst(Integer),
&FloatLiteral(_) => TConst(Float),
&StringLiteral(_) => TConst(StringT),
&BoolLiteral(_) => TConst(Boolean),
&Value(ref name, _) => {
self.lookup(name)
.map(|entry| entry.ty)
.ok_or(format!("Couldn't find {}", name))?
},
&BinExp(ref op, ref lhs, ref rhs) => {
let t_lhs = self.infer(lhs)?;
match self.infer_op(op)? {
TFunc(t1, t2) => {
let _ = self.unify(t_lhs, *t1)?;
let t_rhs = self.infer(rhs)?;
let x = *t2;
match x {
TFunc(t3, t4) => {
let _ = self.unify(t_rhs, *t3)?;
*t4
},
_ => return Err(format!("Not a function type either")),
}
},
_ => return Err(format!("Op {:?} is not a function type", op)),
}
},
&Call { ref f, ref arguments } => {
let tf = self.infer(f)?;
let targ = self.infer(arguments.get(0).unwrap())?;
match tf {
TFunc(box t1, box t2) => {
let _ = self.unify(t1, targ)?;
t2
},
_ => return Err(format!("Not a function!")),
}
},
_ => TConst(Bottom),
})
}
fn infer_op(&mut self, op: &Operation) -> TypeCheckResult {
use self::Type::*;
use self::TypeConst::*;
macro_rules! binoptype {
($lhs:expr, $rhs:expr, $out:expr) => { TFunc(Box::new($lhs), Box::new(TFunc(Box::new($rhs), Box::new($out)))) };
}
Ok(match (*op.0).as_ref() {
"+" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
"++" => binoptype!(TConst(StringT), TConst(StringT), TConst(StringT)),
"-" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
"*" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
"/" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
"%" => binoptype!(TConst(Integer), TConst(Integer), TConst(Integer)),
_ => TConst(Bottom)
})
}
fn unify(&mut self, t1: Type, t2: Type) -> TypeCheckResult {
use self::Type::*;
use self::TypeVar::*;
println!("Calling unify with `{:?}` and `{:?}`", t1, t2);
match (&t1, &t2) {
(&TConst(ref c1), &TConst(ref c2)) if c1 == c2 => Ok(TConst(c1.clone())),
(&TFunc(ref t1, ref t2), &TFunc(ref t3, ref t4)) => {
let t5 = self.unify(*t1.clone().clone(), *t3.clone().clone())?;
let t6 = self.unify(*t2.clone().clone(), *t4.clone().clone())?;
Ok(TFunc(Box::new(t5), Box::new(t6)))
},
(&TVar(Univ(ref a)), &TVar(Univ(ref b))) => {
if a == b {
Ok(TVar(Univ(a.clone())))
} else {
Err(format!("Couldn't unify universal types {} and {}", a, b))
}
},
//the interesting case!!
(&TVar(Exist(ref a)), ref t2) => {
let x = self.evar_table.get(a).map(|x| x.clone());
match x {
Some(ref t1) => self.unify(t1.clone().clone(), t2.clone().clone()),
None => {
self.evar_table.insert(*a, t2.clone().clone());
Ok(t2.clone().clone())
} | },
(ref t1, &TVar(Exist(ref a))) => {
let x = self.evar_table.get(a).map(|x| x.clone());
match x {
Some(ref t2) => self.unify(t2.clone().clone(), t1.clone().clone()),
None => {
self.evar_table.insert(*a, t1.clone().clone());
Ok(t1.clone().clone())
}
}
},
_ => Err(format!("Types {:?} and {:?} don't unify", t1, t2))
}
}
}
#[cfg(test)]
mod tests {
use super::{Type, TypeVar, TypeConst, TypeContext};
use super::Type::*;
use super::TypeConst::*;
use schala_lang::parsing::{parse, tokenize};
macro_rules! type_test {
($input:expr, $correct:expr) => {
{
let mut tc = TypeContext::new();
let ast = parse(tokenize($input)).0.unwrap() ;
tc.add_symbols(&ast);
assert_eq!($correct, tc.type_check(&ast).unwrap())
}
}
}
#[test]
fn basic_inference() {
type_test!("30", TConst(Integer));
type_test!("fn x(a: Int): Bool {}; x(1)", TConst(Boolean));
}
} | } | random_line_split |
chinpnr_account_pull.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
import pymysql
db = pymysql.connect(host='localhost', port=3306, user='root', passwd='', db='bankbyjiajia', charset="utf8",
use_unicode=True)
def insert(data):
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# SQL 插入语句
sql = "INSERT INTO tb_chinapnr_account_detail(create_time, \
serial_number, usr_id, user_name, acct_type,debit_or_credit_mark,tran_amount,free_amount,acct_amount,in_usr_id,buss_type,des_note) \
VALUES ('%s', '%s', '%s', '%s', '%s','%s','%s','%s','%s','%s','%s','%s')" % \
(data[0], data[1], data[2], data[3], data[4], data[5], data[6].replace(',', ''), data[7].replace(',', ''),
data[8].replace(',', ''), data[9], data[10], data[11])
# try:
# 执行sql语句
cursor.execute(sql)
# 提交到数据库执行
db.commit()
# except:
# 发生错误时回滚
# db.rollback()
usrids = ['6000060269448244', '6000060269448119', '6000060269453923', '6000060269456948', '6000060269455093',
'6000060269455994', '6000060269459071', '6000060269455869', '6000060269456118', '6000060269456261',
'6000060269457616', '6000060269462708', '6000060269461736', '6000060269463618', '6000060269652085',
'6000060269469033', '6000060269480234', '6000060269480225', '6000060269488245', '6000060269554011',
'6000060269572475', '6000060269586521', '6000060269591533', '6000060269593773', '6000060269693585',
'6000060269697796', '6000060269696840', '6000060269699222', '6000060269764954', '6000060269743638',
'6000060269785094', '6000060269786761', '6000060269810654', '6000060269826745', '6000060269918708',
'6000060269935173', '6000060269938241', '6000060269943716', '6000060269968627', '6000060269971043',
'6000060269974255', '6000060269991218', '6000060269996017', '6000060270007753', '6000060269998792',
'6000060270012462', '6000060270016379', '6000060273437908', '6000060270080487', '6000060270084474',
'6000060270088461', '6000060270122389', '6000060270120452', '6000060270130888', '6000060270167777',
'6000060270178319', '6000060270182545', '6000060270187327', '6000060270193123', '6000060270194961',
'6000060270196585', '6000060270197334', '6000060270207724', '6000060270245666', '6000060270250454',
'6000060270290465', '6000060270326597', '6000060270329727', '6000060270457918', '6000060270711545',
'6000060270777172', '6000060270787884', '6000060270839686', '6000060270979347', '6000060271138986',
'6000060271413848', '6000060271429555', '6000060271431891', '6000060271441461', '6000060271607309',
'6000060271765842', '6000060272137958', '6000060272216676', '6000060272235138', '6000060272451778',
'6000060272579695', '6000060272620195', '6000060272734740', '6000060272779728', '6000060272799975',
'6000060272947208', '6000060273017933', '6000060273037378', '6000060273060715', '6000060273155882',
'6000060273210947', '6000060273239329', '6000060273302928', '6000060273307834', '6000060273329213',
'6000060273503970', '6000060273545952', '6000060273554273', '6000060273573797', '6000060273581476',
'6000060273617081', '6000060274035680', '6000060274038213', '6000060274239176', '6000060274334081',
'6000060274348254', '6000060274510041', '6000060274544880', '6000060274532900', '6000060274624847',
'6000060274628594', '6000060274654234', '6000060274684899', '6000060274750102', '6000060274750219',
'6000060274857284', '6000060274897106', '6000060274974772', '6000060281109604', '6000060275043945',
'6000060275068712', '6000060275074965', '6000060275083919', '6000060275148351', '6000060275157082',
'6000060275207206', '6000060275277050', '6000060275367596', '6000060275397705', '6000060275426041',
'6000060275440999', '6000060275465481', '6000060275543272', '6000060275603634', '6000060275839738',
'6000060275852561', '6000060275977669', '6000060276008278', '6000060276077924', '6000060276088912',
'6000060276095165', '6000060276333693', '6000060276564979', '6000060276506649', '6000060276541076',
'6000060276697942', '6000060276699325', '6000060276705498', '6000060276803541', '6000060276890992',
'6000060277250182', '6000060277256024', '6000060277353197', '6000060277439532', '6000060277397532',
'6000060277416897', '6000060277551776', '6000060278336455', '6000060278760851', '6000060280233099',
'6000060280355359', '6000060280459078', '6000060280462643', '6000060280616069', '6000060280638063',
'6000060280807932', '6000060280816352', '6000060280827876', '6000060281093176', '6000060281190999',
'6000060281237413', '6000060281349695', '6000060281474013', '6000060281607816', '6000060281871398',
'6000060281894104', '6000060281940983', '6000060281943249', '6000060281948468', '6000060281951239',
'6000060282077968', '6000060282137644', '6000060282182522', '6000060282311535', '6000060282710505',
'6000060282717438', '6000060282742464', '6000060282989858', '6000060283127582', '6000060283130426',
'6000060283534963', '6000060283674197', '6000060283766962', '6000060283907622', '6000060284026975',
'6000060284270807', '6000060284481606', '6000060284562617', '6000060284596038', '6000060284630143',
'6000060284785850', '6000060284935948', '6000060284839400', '6000060284931568', '6000060285006663',
'6000060285020576', '6000060285032126', '6000060285046175', '6000060285140278', '6000060285234195',
'6000060285338298', '6000060285405642', '6000060285407551', '6000060285416934', '6000060285740076',
'6000060285780870', '6000060285804863', '6000060285887505', '6000060285912666', '6000060285968865',
'6000060285986006', '6000060286048091', '6000060286091596', '6000060286117523', '6000060286187430',
'6000060286197465', '6000060286349444', '6000060286369920', '6000060286403660', '6000060286413551',
'6000060286417968', '6000060286426949', '6000060286533431', '6000060286556059', '6000060286595864',
'6000060286670372', '6000060286823653', '6000060286954351', '6000060286957811', '6000060286982865',
'6000060287011733', '6000060287029653', '6000060287235449', '6000060287231096', '6000060287352204',
'6000060287417868', '6000060287772938', '6000060287798974', '6000060287808918', '6000060287904635',
'6000060287977744', '6000060288128045', '6000060288346997', '6000060288530136', '6000060288602727',
'6000060288963837', '6000060289368328', '6000060289386415', '6000060289482294', '6000060289911927',
'6000060290281302', '6000060290300265', '6000060290550360', '6000060290686839', '6000060291077380',
| '6000060295245128', '6000060295298124', '6000060295371605', '6000060295427413', '6000060295767894',
'6000060295876437']
# usrids = ['6000060269448244', '6000060269448119', '6000060269652085']
import time
import requests
from lxml import html
browser = webdriver.Chrome()
browser.get("https://wealth.cloudpnr.com/p2padmin/")
input = browser.find_element_by_id("login_operator_id")
input.send_keys("mayanbin0302@163.com")
# input.send_keys(Keys.ENTER)
input = browser.find_element_by_id("login_password")
input.send_keys("972506")
# input.send_keys(Keys.ENTER)
str1 = input("Enter your input: ")
input = browser.find_element_by_id("captcha")
input.send_keys(str1)
input.send_keys(Keys.ENTER)
# 等待时间
wait = WebDriverWait(browser, 10)
wait.until(EC.presence_of_element_located((By.CLASS_NAME, "header-infos")))
# print(browser.current_url)
# print(browser.get_cookies())
# print(browser.page_source)
# time.sleep(10)
browser.get('https://wealth.cloudpnr.com/p2padmin/report/index/report/id/500005')
# tree = html.fromstring(browser.page_source)
# data = ''.join(tree.xpath('//span[contains(text(),"客户账户明细查询")]/text()'))
wait.until(EC.presence_of_element_located((By.CLASS_NAME, "main-content-title")))
# 点击查询
query = browser.find_element_by_xpath('//a[@class="btn btn-primary ajax-get-data"]')
# 查询开始时间
date_start_input = browser.find_element_by_name('date_from')
# 查询结束时间
date_end_input = browser.find_element_by_name('date_to')
# 客户号
cust_input = browser.find_element_by_name('custId')
for usrid in usrids:
num = 0s
cust_input.clear()
cust_input.send_keys(usrid)
queryDate = [['2018-03-12', '2018-06-10'], ['2018-06-11', '2018-07-31']]
for dateq in queryDate:
date_start_input.clear()
date_start_input.send_keys(dateq[0])
date_end_input.clear()
date_end_input.send_keys(dateq[1])
query.click()
# "btn ajax-get-data btn-disabled"
wait.until(EC.presence_of_element_located((By.XPATH, '//a[@class="btn ajax-get-data btn-primary"]')))
# 获取总页数
size = browser.find_element_by_xpath('//p[@class="page"]/span/strong').text
# print(size)
if int(size) > 0:
# 数据总页数
total = browser.find_element_by_xpath('//p[@class="page"]/input[@max]').get_attribute('max')
for i in range(1, int(total) + 1):
if i != 1:
next = browser.find_element_by_xpath('//p[@class="page"]/a[@title="Next"]')
next.click()
wait.until(EC.presence_of_element_located(
(By.XPATH, '//p[@class="page"]/a[@class="current" and @title="' + str(i) + '"]')))
# pageindex = browser.find_element_by_xpath('//div[@class="table dis"]/table/tbody/tr')
trs = browser.find_elements_by_xpath('//div[@class="table dis"]/table/tbody/tr')
for tr in trs:
one = list()
tds = tr.find_elements_by_xpath('.//td')
for j in range(0, len(tds)):
if j > 0:
one.append(tds[j].text)
insert(one)
# print(e.text)
num += 1
# pageindex = browser.find_element_by_xpath('//p[@class="page"]/a[@class="current"]').text
# if i<total:
print(usrid + ':' + str(num))
# print(browser.page_source)
browser.close()
# 关闭数据库连接
db.close() | '6000060291243806', '6000060291340862', '6000060291431540', '6000060291483681', '6000060291553294',
'6000060291706959', '6000060292219430', '6000060292566439', '6000060292741605', '6000060293019252',
'6000060293381519', '6000060293320112', '6000060293384730', '6000060293387871', '6000060293694236',
'6000060293457750', '6000060293507340', '6000060293611888', '6000060293621190', '6000060295043630',
| random_line_split |
chinpnr_account_pull.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
import pymysql
db = pymysql.connect(host='localhost', port=3306, user='root', passwd='', db='bankbyjiajia', charset="utf8",
use_unicode=True)
def | (data):
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# SQL 插入语句
sql = "INSERT INTO tb_chinapnr_account_detail(create_time, \
serial_number, usr_id, user_name, acct_type,debit_or_credit_mark,tran_amount,free_amount,acct_amount,in_usr_id,buss_type,des_note) \
VALUES ('%s', '%s', '%s', '%s', '%s','%s','%s','%s','%s','%s','%s','%s')" % \
(data[0], data[1], data[2], data[3], data[4], data[5], data[6].replace(',', ''), data[7].replace(',', ''),
data[8].replace(',', ''), data[9], data[10], data[11])
# try:
# 执行sql语句
cursor.execute(sql)
# 提交到数据库执行
db.commit()
# except:
# 发生错误时回滚
# db.rollback()
usrids = ['6000060269448244', '6000060269448119', '6000060269453923', '6000060269456948', '6000060269455093',
'6000060269455994', '6000060269459071', '6000060269455869', '6000060269456118', '6000060269456261',
'6000060269457616', '6000060269462708', '6000060269461736', '6000060269463618', '6000060269652085',
'6000060269469033', '6000060269480234', '6000060269480225', '6000060269488245', '6000060269554011',
'6000060269572475', '6000060269586521', '6000060269591533', '6000060269593773', '6000060269693585',
'6000060269697796', '6000060269696840', '6000060269699222', '6000060269764954', '6000060269743638',
'6000060269785094', '6000060269786761', '6000060269810654', '6000060269826745', '6000060269918708',
'6000060269935173', '6000060269938241', '6000060269943716', '6000060269968627', '6000060269971043',
'6000060269974255', '6000060269991218', '6000060269996017', '6000060270007753', '6000060269998792',
'6000060270012462', '6000060270016379', '6000060273437908', '6000060270080487', '6000060270084474',
'6000060270088461', '6000060270122389', '6000060270120452', '6000060270130888', '6000060270167777',
'6000060270178319', '6000060270182545', '6000060270187327', '6000060270193123', '6000060270194961',
'6000060270196585', '6000060270197334', '6000060270207724', '6000060270245666', '6000060270250454',
'6000060270290465', '6000060270326597', '6000060270329727', '6000060270457918', '6000060270711545',
'6000060270777172', '6000060270787884', '6000060270839686', '6000060270979347', '6000060271138986',
'6000060271413848', '6000060271429555', '6000060271431891', '6000060271441461', '6000060271607309',
'6000060271765842', '6000060272137958', '6000060272216676', '6000060272235138', '6000060272451778',
'6000060272579695', '6000060272620195', '6000060272734740', '6000060272779728', '6000060272799975',
'6000060272947208', '6000060273017933', '6000060273037378', '6000060273060715', '6000060273155882',
'6000060273210947', '6000060273239329', '6000060273302928', '6000060273307834', '6000060273329213',
'6000060273503970', '6000060273545952', '6000060273554273', '6000060273573797', '6000060273581476',
'6000060273617081', '6000060274035680', '6000060274038213', '6000060274239176', '6000060274334081',
'6000060274348254', '6000060274510041', '6000060274544880', '6000060274532900', '6000060274624847',
'6000060274628594', '6000060274654234', '6000060274684899', '6000060274750102', '6000060274750219',
'6000060274857284', '6000060274897106', '6000060274974772', '6000060281109604', '6000060275043945',
'6000060275068712', '6000060275074965', '6000060275083919', '6000060275148351', '6000060275157082',
'6000060275207206', '6000060275277050', '6000060275367596', '6000060275397705', '6000060275426041',
'6000060275440999', '6000060275465481', '6000060275543272', '6000060275603634', '6000060275839738',
'6000060275852561', '6000060275977669', '6000060276008278', '6000060276077924', '6000060276088912',
'6000060276095165', '6000060276333693', '6000060276564979', '6000060276506649', '6000060276541076',
'6000060276697942', '6000060276699325', '6000060276705498', '6000060276803541', '6000060276890992',
'6000060277250182', '6000060277256024', '6000060277353197', '6000060277439532', '6000060277397532',
'6000060277416897', '6000060277551776', '6000060278336455', '6000060278760851', '6000060280233099',
'6000060280355359', '6000060280459078', '6000060280462643', '6000060280616069', '6000060280638063',
'6000060280807932', '6000060280816352', '6000060280827876', '6000060281093176', '6000060281190999',
'6000060281237413', '6000060281349695', '6000060281474013', '6000060281607816', '6000060281871398',
'6000060281894104', '6000060281940983', '6000060281943249', '6000060281948468', '6000060281951239',
'6000060282077968', '6000060282137644', '6000060282182522', '6000060282311535', '6000060282710505',
'6000060282717438', '6000060282742464', '6000060282989858', '6000060283127582', '6000060283130426',
'6000060283534963', '6000060283674197', '6000060283766962', '6000060283907622', '6000060284026975',
'6000060284270807', '6000060284481606', '6000060284562617', '6000060284596038', '6000060284630143',
'6000060284785850', '6000060284935948', '6000060284839400', '6000060284931568', '6000060285006663',
'6000060285020576', '6000060285032126', '6000060285046175', '6000060285140278', '6000060285234195',
'6000060285338298', '6000060285405642', '6000060285407551', '6000060285416934', '6000060285740076',
'6000060285780870', '6000060285804863', '6000060285887505', '6000060285912666', '6000060285968865',
'6000060285986006', '6000060286048091', '6000060286091596', '6000060286117523', '6000060286187430',
'6000060286197465', '6000060286349444', '6000060286369920', '6000060286403660', '6000060286413551',
'6000060286417968', '6000060286426949', '6000060286533431', '6000060286556059', '6000060286595864',
'6000060286670372', '6000060286823653', '6000060286954351', '6000060286957811', '6000060286982865',
'6000060287011733', '6000060287029653', '6000060287235449', '6000060287231096', '6000060287352204',
'6000060287417868', '6000060287772938', '6000060287798974', '6000060287808918', '6000060287904635',
'6000060287977744', '6000060288128045', '6000060288346997', '6000060288530136', '6000060288602727',
'6000060288963837', '6000060289368328', '6000060289386415', '6000060289482294', '6000060289911927',
'6000060290281302', '6000060290300265', '6000060290550360', '6000060290686839', '6000060291077380',
'6000060291243806', '6000060291340862', '6000060291431540', '6000060291483681', '6000060291553294',
'6000060291706959', '6000060292219430', '6000060292566439', '6000060292741605', '6000060293019252',
'6000060293381519', '6000060293320112', '6000060293384730', '6000060293387871', '6000060293694236',
'6000060293457750', '6000060293507340', '6000060293611888', '6000060293621190', '6000060295043630',
'6000060295245128', '6000060295298124', '6000060295371605', '6000060295427413', '6000060295767894',
'6000060295876437']
# usrids = ['6000060269448244', '6000060269448119', '6000060269652085']
import time
import requests
from lxml import html
browser = webdriver.Chrome()
browser.get("https://wealth.cloudpnr.com/p2padmin/")
input = browser.find_element_by_id("login_operator_id")
input.send_keys("mayanbin0302@163.com")
# input.send_keys(Keys.ENTER)
input = browser.find_element_by_id("login_password")
input.send_keys("972506")
# input.send_keys(Keys.ENTER)
str1 = input("Enter your input: ")
input = browser.find_element_by_id("captcha")
input.send_keys(str1)
input.send_keys(Keys.ENTER)
# 等待时间
wait = WebDriverWait(browser, 10)
wait.until(EC.presence_of_element_located((By.CLASS_NAME, "header-infos")))
# print(browser.current_url)
# print(browser.get_cookies())
# print(browser.page_source)
# time.sleep(10)
browser.get('https://wealth.cloudpnr.com/p2padmin/report/index/report/id/500005')
# tree = html.fromstring(browser.page_source)
# data = ''.join(tree.xpath('//span[contains(text(),"客户账户明细查询")]/text()'))
wait.until(EC.presence_of_element_located((By.CLASS_NAME, "main-content-title")))
# 点击查询
query = browser.find_element_by_xpath('//a[@class="btn btn-primary ajax-get-data"]')
# 查询开始时间
date_start_input = browser.find_element_by_name('date_from')
# 查询结束时间
date_end_input = browser.find_element_by_name('date_to')
# 客户号
cust_input = browser.find_element_by_name('custId')
for usrid in usrids:
num = 0s
cust_input.clear()
cust_input.send_keys(usrid)
queryDate = [['2018-03-12', '2018-06-10'], ['2018-06-11', '2018-07-31']]
for dateq in queryDate:
date_start_input.clear()
date_start_input.send_keys(dateq[0])
date_end_input.clear()
date_end_input.send_keys(dateq[1])
query.click()
# "btn ajax-get-data btn-disabled"
wait.until(EC.presence_of_element_located((By.XPATH, '//a[@class="btn ajax-get-data btn-primary"]')))
# 获取总页数
size = browser.find_element_by_xpath('//p[@class="page"]/span/strong').text
# print(size)
if int(size) > 0:
# 数据总页数
total = browser.find_element_by_xpath('//p[@class="page"]/input[@max]').get_attribute('max')
for i in range(1, int(total) + 1):
if i != 1:
next = browser.find_element_by_xpath('//p[@class="page"]/a[@title="Next"]')
next.click()
wait.until(EC.presence_of_element_located(
(By.XPATH, '//p[@class="page"]/a[@class="current" and @title="' + str(i) + '"]')))
# pageindex = browser.find_element_by_xpath('//div[@class="table dis"]/table/tbody/tr')
trs = browser.find_elements_by_xpath('//div[@class="table dis"]/table/tbody/tr')
for tr in trs:
one = list()
tds = tr.find_elements_by_xpath('.//td')
for j in range(0, len(tds)):
if j > 0:
one.append(tds[j].text)
insert(one)
# print(e.text)
num += 1
# pageindex = browser.find_element_by_xpath('//p[@class="page"]/a[@class="current"]').text
# if i<total:
print(usrid + ':' + str(num))
# print(browser.page_source)
browser.close()
# 关闭数据库连接
db.close()
| insert | identifier_name |
chinpnr_account_pull.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
import pymysql
db = pymysql.connect(host='localhost', port=3306, user='root', passwd='', db='bankbyjiajia', charset="utf8",
use_unicode=True)
def insert(data):
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# SQL 插入语句
sql = "INSERT INTO tb_chinapnr_account_detail(create_time, \
serial_number, usr_id, user_name, acct_type,debit_or_credit_mark,tran_amount,free_amount,acct_amount,in_usr_id,buss_type,des_note) \
VALUES ('%s', '%s', '%s', '%s', '%s','%s','%s','%s','%s','%s','%s','%s')" % \
(data[0], data[1], data[2], data[3], data[4], data[5], data[6].replace(',', ''), data[7].replace(',', ''),
data[8].replace(',', ''), data[9], data[10], data[11])
# try:
# 执行sql语句
cursor.execute(sql)
# 提交到数据库执行
db.commit()
# except:
# 发生错误时回滚
# db.rollback()
usrids = ['6000060269448244', '6000060269448119', '6000060269453923', '6000060269456948', '6000060269455093',
'6000060269455994', '6000060269459071', '6000060269455869', '6000060269456118', '6000060269456261',
'6000060269457616', '6000060269462708', '6000060269461736', '6000060269463618', '6000060269652085',
'6000060269469033', '6000060269480234', '6000060269480225', '6000060269488245', '6000060269554011',
'6000060269572475', '6000060269586521', '6000060269591533', '6000060269593773', '6000060269693585',
'6000060269697796', '6000060269696840', '6000060269699222', '6000060269764954', '6000060269743638',
'6000060269785094', '6000060269786761', '6000060269810654', '6000060269826745', '6000060269918708',
'6000060269935173', '6000060269938241', '6000060269943716', '6000060269968627', '6000060269971043',
'6000060269974255', '6000060269991218', '6000060269996017', '6000060270007753', '6000060269998792',
'6000060270012462', '6000060270016379', '6000060273437908', '6000060270080487', '6000060270084474',
'6000060270088461', '6000060270122389', '6000060270120452', '6000060270130888', '6000060270167777',
'6000060270178319', '6000060270182545', '6000060270187327', '6000060270193123', '6000060270194961',
'6000060270196585', '6000060270197334', '6000060270207724', '6000060270245666', '6000060270250454',
'6000060270290465', '6000060270326597', '6000060270329727', '6000060270457918', '6000060270711545',
'6000060270777172', '6000060270787884', '6000060270839686', '6000060270979347', '6000060271138986',
'6000060271413848', '6000060271429555', '6000060271431891', '6000060271441461', '6000060271607309',
'6000060271765842', '6000060272137958', '6000060272216676', '6000060272235138', '6000060272451778',
'6000060272579695', '6000060272620195', '6000060272734740', '6000060272779728', '6000060272799975',
'6000060272947208', '6000060273017933', '6000060273037378', '6000060273060715', '6000060273155882',
'6000060273210947', '6000060273239329', '6000060273302928', '6000060273307834', '6000060273329213',
'6000060273503970', '6000060273545952', '6000060273554273', '6000060273573797', '6000060273581476',
'6000060273617081', '6000060274035680', '6000060274038213', '6000060274239176', '6000060274334081',
'6000060274348254', '6000060274510041', '6000060274544880', '6000060274532900', '6000060274624847',
'6000060274628594', '6000060274654234', '6000060274684899', '6000060274750102', '6000060274750219',
'6000060274857284', '6000060274897106', '6000060274974772', '6000060281109604', '6000060275043945',
'6000060275068712', '6000060275074965', '6000060275083919', '6000060275148351', '6000060275157082',
'6000060275207206', '6000060275277050', '6000060275367596', '6000060275397705', '6000060275426041',
'6000060275440999', '6000060275465481', '6000060275543272', '6000060275603634', '6000060275839738',
'6000060275852561', '6000060275977669', '6000060276008278', '6000060276077924', '6000060276088912',
'6000060276095165', '6000060276333693', '6000060276564979', '6000060276506649', '6000060276541076',
'6000060276697942', '6000060276699325', '6000060276705498', '6000060276803541', '6000060276890992',
'6000060277250182', '6000060277256024', '6000060277353197', '6000060277439532', '6000060277397532',
'6000060277416897', '6000060277551776', '6000060278336455', '6000060278760851', '6000060280233099',
'6000060280355359', '6000060280459078', '6000060280462643', '6000060280616069', '6000060280638063',
'6000060280807932', '6000060280816352', '6000060280827876', '6000060281093176', '6000060281190999',
'6000060281237413', '6000060281349695', '6000060281474013', '6000060281607816', '6000060281871398',
'6000060281894104', '6000060281940983', '6000060281943249', '6000060281948468', '6000060281951239',
'6000060282077968', '6000060282137644', '6000060282182522', '6000060282311535', '6000060282710505',
'6000060282717438', '6000060282742464', '6000060282989858', '6000060283127582', '6000060283130426',
'6000060283534963', '6000060283674197', '6000060283766962', '6000060283907622', '6000060284026975',
'6000060284270807', '6000060284481606', '6000060284562617', '6000060284596038', '6000060284630143',
'6000060284785850', '6000060284935948', '6000060284839400', '6000060284931568', '6000060285006663',
'6000060285020576', '6000060285032126', '6000060285046175', '6000060285140278', '6000060285234195',
'6000060285338298', '6000060285405642', '6000060285407551', '6000060285416934', '6000060285740076',
'6000060285780870', '6000060285804863', '6000060285887505', '6000060285912666', '6000060285968865',
'6000060285986006', '6000060286048091', '6000060286091596', '6000060286117523', '6000060286187430',
'6000060286197465', '6000060286349444', '6000060286369920', '6000060286403660', '6000060286413551',
'6000060286417968', '6000060286426949', '6000060286533431', '6000060286556059', '6000060286595864',
'6000060286670372', '6000060286823653', '6000060286954351', '6000060286957811', '6000060286982865',
'6000060287011733', '6000060287029653', '6000060287235449', '6000060287231096', '6000060287352204',
'6000060287417868', '6000060287772938', '6000060287798974', '6000060287808918', '6000060287904635',
'6000060287977744', '6000060288128045', '6000060288346997', '6000060288530136', '6000060288602727',
'6000060288963837', '6000060289368328', '6000060289386415', '6000060289482294', '6000060289911927',
'6000060290281302', '6000060290300265', '6000060290550360', '6000060290686839', '6000060291077380',
'6000060291243806', '6000060291340862', '6000060291431540', '6000060291483681', '6000060291553294',
'6000060291706959', '6000060292219430', '6000060292566439', '6000060292741605', '6000060293019252',
'6000060293381519', '6000060293320112', '6000060293384730', '6000060293387871', '6000060293694236',
'6000060293457750', '6000060293507340', '6000060293611888', '6000060293621190', '6000060295043630',
'6000060295245128', '6000060295298124', '6000060295371605', '6000060295427413', '6000060295767894',
'6000060295876437']
# usrids = ['6000060269448244', '6000060269448119', '6000060269652085']
import time
import requests
from lxml import html
browser = webdriver.Chrome()
browser.get("https://wealth.cloudpnr.com/p2padmin/")
input = browser.find_element_by_id("login_operator_id")
input.send_keys("mayanbin0302@163.com")
# input.send_keys(Keys.ENTER)
input = browser.find_element_by_id("login_password")
input.send_keys("972506")
# input.send_keys(Keys.ENTER)
str1 = input("Enter your input: ")
input = browser.find_element_by_id("captcha")
input.send_keys(str1)
input.send_keys(Keys.ENTER)
# 等待时间
wait = WebDriverWait(browser, 10)
wait.until(EC.presence_of_element_located((By.CLASS_NAME, "header-infos")))
# print(browser.current_url)
# print(browser.get_cookies())
# print(browser.page_source)
# time.sleep(10)
browser.get('https://wealth.cloudpnr.com/p2padmin/report/index/report/id/500005')
# tree = html.fromstring(browser.page_source)
# data = ''.join(tree.xpath('//span[contains(text(),"客户账户明细查询")]/text()'))
wait.until(EC.presence_of_element_located((By.CLASS_NAME, "main-content-title")))
# 点击查询
query = browser.find_element_by_xpath('//a[@class="btn btn-primary ajax-get-data"]')
# 查询开始时间
date_start_input = browser.find_element_by_name('date_from')
# 查询结束时间
date_end_input = browser.find_element_by_name('date_to')
# 客户号
cust_input = browser.find_element_by_name('custId')
for usrid in usrids:
num = 0s
cust_input.clear()
cust_input.send_keys(usrid)
queryDate = [['2018-03-12', '2018-06-10'], ['2018-06-11', '2018-07-31']]
for dateq in queryDate:
date_start_input.clear()
date_start_input.send_keys(dateq[0])
date_end_input.clear()
date_end_input.send_keys(dateq[1])
query.click()
# "btn ajax-get-data btn-disabled"
wait.until(EC.presence_of_element_located((By.XPATH, '//a[@class="btn ajax-get-data btn-primary"]')))
# 获取总页数
size = browser.find_element_by_xpath('//p[@class="page"]/span/strong').text
# print(size)
if int(size) > 0:
# 数据总页数
total = browser.find_element_by_xpath('//p[@class="page"]/input[@max]').get_attribute('max')
for i in range(1, int(total) + 1):
| if i != 1:
next = browser.find_element_by_xpath('//p[@class="page"]/a[@title="Next"]')
next.click()
wait.until(EC.presence_of_element_located(
(By.XPATH, '//p[@class="page"]/a[@class="current" and @title="' + str(i) + '"]')))
# pageindex = browser.find_element_by_xpath('//div[@class="table dis"]/table/tbody/tr')
trs = browser.find_elements_by_xpath('//div[@class="table dis"]/table/tbody/tr')
for tr in trs:
one = list()
tds = tr.find_elements_by_xpath('.//td')
for j in range(0, len(tds)):
if j > 0:
one.append(tds[j].text)
insert(one)
# print(e.text)
num += 1
# pageindex = browser.find_element_by_xpath('//p[@class="page"]/a[@class="current"]').text
# if i<total:
print(usrid + ':' + str(num))
# print(browser.page_source)
browser.close()
# 关闭数据库连接
db.close()
| conditional_block | |
chinpnr_account_pull.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
import pymysql
db = pymysql.connect(host='localhost', port=3306, user='root', passwd='', db='bankbyjiajia', charset="utf8",
use_unicode=True)
def insert(data):
# 使用cursor()方法获取操作游标
cursor = db.cursor() | 453923', '6000060269456948', '6000060269455093',
'6000060269455994', '6000060269459071', '6000060269455869', '6000060269456118', '6000060269456261',
'6000060269457616', '6000060269462708', '6000060269461736', '6000060269463618', '6000060269652085',
'6000060269469033', '6000060269480234', '6000060269480225', '6000060269488245', '6000060269554011',
'6000060269572475', '6000060269586521', '6000060269591533', '6000060269593773', '6000060269693585',
'6000060269697796', '6000060269696840', '6000060269699222', '6000060269764954', '6000060269743638',
'6000060269785094', '6000060269786761', '6000060269810654', '6000060269826745', '6000060269918708',
'6000060269935173', '6000060269938241', '6000060269943716', '6000060269968627', '6000060269971043',
'6000060269974255', '6000060269991218', '6000060269996017', '6000060270007753', '6000060269998792',
'6000060270012462', '6000060270016379', '6000060273437908', '6000060270080487', '6000060270084474',
'6000060270088461', '6000060270122389', '6000060270120452', '6000060270130888', '6000060270167777',
'6000060270178319', '6000060270182545', '6000060270187327', '6000060270193123', '6000060270194961',
'6000060270196585', '6000060270197334', '6000060270207724', '6000060270245666', '6000060270250454',
'6000060270290465', '6000060270326597', '6000060270329727', '6000060270457918', '6000060270711545',
'6000060270777172', '6000060270787884', '6000060270839686', '6000060270979347', '6000060271138986',
'6000060271413848', '6000060271429555', '6000060271431891', '6000060271441461', '6000060271607309',
'6000060271765842', '6000060272137958', '6000060272216676', '6000060272235138', '6000060272451778',
'6000060272579695', '6000060272620195', '6000060272734740', '6000060272779728', '6000060272799975',
'6000060272947208', '6000060273017933', '6000060273037378', '6000060273060715', '6000060273155882',
'6000060273210947', '6000060273239329', '6000060273302928', '6000060273307834', '6000060273329213',
'6000060273503970', '6000060273545952', '6000060273554273', '6000060273573797', '6000060273581476',
'6000060273617081', '6000060274035680', '6000060274038213', '6000060274239176', '6000060274334081',
'6000060274348254', '6000060274510041', '6000060274544880', '6000060274532900', '6000060274624847',
'6000060274628594', '6000060274654234', '6000060274684899', '6000060274750102', '6000060274750219',
'6000060274857284', '6000060274897106', '6000060274974772', '6000060281109604', '6000060275043945',
'6000060275068712', '6000060275074965', '6000060275083919', '6000060275148351', '6000060275157082',
'6000060275207206', '6000060275277050', '6000060275367596', '6000060275397705', '6000060275426041',
'6000060275440999', '6000060275465481', '6000060275543272', '6000060275603634', '6000060275839738',
'6000060275852561', '6000060275977669', '6000060276008278', '6000060276077924', '6000060276088912',
'6000060276095165', '6000060276333693', '6000060276564979', '6000060276506649', '6000060276541076',
'6000060276697942', '6000060276699325', '6000060276705498', '6000060276803541', '6000060276890992',
'6000060277250182', '6000060277256024', '6000060277353197', '6000060277439532', '6000060277397532',
'6000060277416897', '6000060277551776', '6000060278336455', '6000060278760851', '6000060280233099',
'6000060280355359', '6000060280459078', '6000060280462643', '6000060280616069', '6000060280638063',
'6000060280807932', '6000060280816352', '6000060280827876', '6000060281093176', '6000060281190999',
'6000060281237413', '6000060281349695', '6000060281474013', '6000060281607816', '6000060281871398',
'6000060281894104', '6000060281940983', '6000060281943249', '6000060281948468', '6000060281951239',
'6000060282077968', '6000060282137644', '6000060282182522', '6000060282311535', '6000060282710505',
'6000060282717438', '6000060282742464', '6000060282989858', '6000060283127582', '6000060283130426',
'6000060283534963', '6000060283674197', '6000060283766962', '6000060283907622', '6000060284026975',
'6000060284270807', '6000060284481606', '6000060284562617', '6000060284596038', '6000060284630143',
'6000060284785850', '6000060284935948', '6000060284839400', '6000060284931568', '6000060285006663',
'6000060285020576', '6000060285032126', '6000060285046175', '6000060285140278', '6000060285234195',
'6000060285338298', '6000060285405642', '6000060285407551', '6000060285416934', '6000060285740076',
'6000060285780870', '6000060285804863', '6000060285887505', '6000060285912666', '6000060285968865',
'6000060285986006', '6000060286048091', '6000060286091596', '6000060286117523', '6000060286187430',
'6000060286197465', '6000060286349444', '6000060286369920', '6000060286403660', '6000060286413551',
'6000060286417968', '6000060286426949', '6000060286533431', '6000060286556059', '6000060286595864',
'6000060286670372', '6000060286823653', '6000060286954351', '6000060286957811', '6000060286982865',
'6000060287011733', '6000060287029653', '6000060287235449', '6000060287231096', '6000060287352204',
'6000060287417868', '6000060287772938', '6000060287798974', '6000060287808918', '6000060287904635',
'6000060287977744', '6000060288128045', '6000060288346997', '6000060288530136', '6000060288602727',
'6000060288963837', '6000060289368328', '6000060289386415', '6000060289482294', '6000060289911927',
'6000060290281302', '6000060290300265', '6000060290550360', '6000060290686839', '6000060291077380',
'6000060291243806', '6000060291340862', '6000060291431540', '6000060291483681', '6000060291553294',
'6000060291706959', '6000060292219430', '6000060292566439', '6000060292741605', '6000060293019252',
'6000060293381519', '6000060293320112', '6000060293384730', '6000060293387871', '6000060293694236',
'6000060293457750', '6000060293507340', '6000060293611888', '6000060293621190', '6000060295043630',
'6000060295245128', '6000060295298124', '6000060295371605', '6000060295427413', '6000060295767894',
'6000060295876437']
# usrids = ['6000060269448244', '6000060269448119', '6000060269652085']
import time
import requests
from lxml import html
browser = webdriver.Chrome()
browser.get("https://wealth.cloudpnr.com/p2padmin/")
input = browser.find_element_by_id("login_operator_id")
input.send_keys("mayanbin0302@163.com")
# input.send_keys(Keys.ENTER)
input = browser.find_element_by_id("login_password")
input.send_keys("972506")
# input.send_keys(Keys.ENTER)
str1 = input("Enter your input: ")
input = browser.find_element_by_id("captcha")
input.send_keys(str1)
input.send_keys(Keys.ENTER)
# 等待时间
wait = WebDriverWait(browser, 10)
wait.until(EC.presence_of_element_located((By.CLASS_NAME, "header-infos")))
# print(browser.current_url)
# print(browser.get_cookies())
# print(browser.page_source)
# time.sleep(10)
browser.get('https://wealth.cloudpnr.com/p2padmin/report/index/report/id/500005')
# tree = html.fromstring(browser.page_source)
# data = ''.join(tree.xpath('//span[contains(text(),"客户账户明细查询")]/text()'))
wait.until(EC.presence_of_element_located((By.CLASS_NAME, "main-content-title")))
# 点击查询
query = browser.find_element_by_xpath('//a[@class="btn btn-primary ajax-get-data"]')
# 查询开始时间
date_start_input = browser.find_element_by_name('date_from')
# 查询结束时间
date_end_input = browser.find_element_by_name('date_to')
# 客户号
cust_input = browser.find_element_by_name('custId')
for usrid in usrids:
num = 0s
cust_input.clear()
cust_input.send_keys(usrid)
queryDate = [['2018-03-12', '2018-06-10'], ['2018-06-11', '2018-07-31']]
for dateq in queryDate:
date_start_input.clear()
date_start_input.send_keys(dateq[0])
date_end_input.clear()
date_end_input.send_keys(dateq[1])
query.click()
# "btn ajax-get-data btn-disabled"
wait.until(EC.presence_of_element_located((By.XPATH, '//a[@class="btn ajax-get-data btn-primary"]')))
# 获取总页数
size = browser.find_element_by_xpath('//p[@class="page"]/span/strong').text
# print(size)
if int(size) > 0:
# 数据总页数
total = browser.find_element_by_xpath('//p[@class="page"]/input[@max]').get_attribute('max')
for i in range(1, int(total) + 1):
if i != 1:
next = browser.find_element_by_xpath('//p[@class="page"]/a[@title="Next"]')
next.click()
wait.until(EC.presence_of_element_located(
(By.XPATH, '//p[@class="page"]/a[@class="current" and @title="' + str(i) + '"]')))
# pageindex = browser.find_element_by_xpath('//div[@class="table dis"]/table/tbody/tr')
trs = browser.find_elements_by_xpath('//div[@class="table dis"]/table/tbody/tr')
for tr in trs:
one = list()
tds = tr.find_elements_by_xpath('.//td')
for j in range(0, len(tds)):
if j > 0:
one.append(tds[j].text)
insert(one)
# print(e.text)
num += 1
# pageindex = browser.find_element_by_xpath('//p[@class="page"]/a[@class="current"]').text
# if i<total:
print(usrid + ':' + str(num))
# print(browser.page_source)
browser.close()
# 关闭数据库连接
db.close()
|
# SQL 插入语句
sql = "INSERT INTO tb_chinapnr_account_detail(create_time, \
serial_number, usr_id, user_name, acct_type,debit_or_credit_mark,tran_amount,free_amount,acct_amount,in_usr_id,buss_type,des_note) \
VALUES ('%s', '%s', '%s', '%s', '%s','%s','%s','%s','%s','%s','%s','%s')" % \
(data[0], data[1], data[2], data[3], data[4], data[5], data[6].replace(',', ''), data[7].replace(',', ''),
data[8].replace(',', ''), data[9], data[10], data[11])
# try:
# 执行sql语句
cursor.execute(sql)
# 提交到数据库执行
db.commit()
# except:
# 发生错误时回滚
# db.rollback()
usrids = ['6000060269448244', '6000060269448119', '6000060269 | identifier_body |
viewer.js | "use strict";
var Viewer = {
// The arguments received
args: {},
// Registered game loaders
supportedGames: {},
// Current loaded game
game: null,
speed: 3.0, // Turns per second
// msPerFrame: 16, // Milliseconds between updates
lastTick: 0,
lastRender: 0,
nextTick: null,
playing: true,
// Current time (round)
time: 0.0,
// HTML nodes (for fast access)
overlay: null,
infoDivs: [],
divRound: null,
divNarration: null,
btnPlayPause: null,
btnStop: null,
// Canvas & contexts
canvas: null,
ctx: null,
gl: null,
// Audio
music: null,
// Number of assets to be loaded
loading: 0,
mousePressed: false,
lastMouse: [0.0, 0.0],
registerGame: function(name, gameClass) {
this.supportedGames[name] = gameClass;
},
init: function(args) {
this.canvas = document.getElementById("Canvas");
window.addEventListener("resize", function(ev) { Viewer.resized(ev); }, false);
this.divRound = $("Round");
this.divRound.childNodes[2].addEventListener("click", function(ev) {
Viewer.setTime(ev.layerX / ev.target.clientWidth);
}, false);
this.divNarration = $("Narration");
this.btnPlayPause = $("PlayPause");
this.btnPlayPause.onclick = function() { Viewer.togglePlay(); };
this.btnStop = $("Stop");
this.btnStop.onclick = function() { Viewer.stop(); };
if (true) { // 3D
try {
this.gl = this.canvas.getContext("webgl") || this.canvas.getContext("experimental-webgl");
}
catch (ex) {}
if (!this.gl) {
this.showError("No WebGL! :(",
'If you\'re using chrome, try to go to <u class="selectable">chrome://flags</u> and enable WebGL.\n'+
'Otherwise, try upgrading your browser.'
);
return;
}
}
else { // 2D
try {
this.ctx = this.canvas.getContext("2d");
}
catch (ex) {}
if (!this.ctx) { this.showError("Canvas not supported! :(", "Try upgrading your browser."); return; }
}
this.args = getArguments();
this.playing = true;
if (this.args.wait || this.args.nostart || (this.args.start && this.args.start.toLowerCase() == "no")) {
this.playing = false;
}
this.updateButtons();
if (this.args.speed) {
var speed = parseFloat(this.args.speed);
if (!isNaN(speed)) this.speed = speed;
}
// Load game, if passed by url
var game = null;
if (this.args.web) { // Request game with ajax
game = "index.php?game="+this.args.web+"&p="+this.args.p+"&u=1";
}
else if (this.args.sub) |
else game = this.args.game;
if (!game) {
$("File").addEventListener("change", function(ev) {
if (ev.target.files.length > 0) {
Viewer.loadFromFile(ev.target.files[0]);
}
}, false);
this.showOverlay("Upload");
}
else this.loadFromURL(game);
},
showOverlay: function(id) {
var ov = $("Overlay");
for (var i = 0; i < ov.childNodes.length; ++i) {
var c = ov.childNodes.item(i);
if (c.nodeValue === null) {
c.style.display = (c.getAttribute("id") == id)? "block" : "none";
}
}
},
showError: function(msg, desc) {
var div = $("Error");
div.childNodes[1].textContent = msg;
if (desc) div.childNodes[5].innerHTML = desc;
this.showOverlay("Error");
},
loadFromFile: function(file) {
if (!file) return;
this.loadStart();
var reader = new FileReader();
reader.onload = function(ev) { Viewer.gameLoaded(ev.target.result); };
reader.readAsText(file);
},
loadFromURL: function(url) {
this.loadStart();
try {
ajaxGet(url, function(s) { Viewer.gameLoaded(s); });
} catch (e) {
this.showError("UNABLE TO LOAD GAME", e);
}
},
loadStart: function() {
this.loading = 1;
this.showOverlay("Loading");
},
gameLoaded: function(input) {
try {
var lines = input.replace("\r", "").split("\n");
if (!input || lines.length == 1) throw "404: Game Not Found!";
var v = splitWhitespace(lines[0]);
var gameClass = this.supportedGames[v[0]];
if (!gameClass) throw "Unsupported game: \""+lines[0]+"\"!";
this.args.version = v;
this.game = new gameClass(this.args, lines);
this.theme = this.game.renderer; //game.getTheme(this.args);
}
catch (ex) {
this.showError("Game load error", ex);
return;
}
this.theme.preload(this);
this.loaded();
},
loaded: function() {
if (--this.loading > 0) return;
this.renderer = new Renderer(this.gl, this.canvas);
this.theme.init(this.renderer, this.game);
this.resized();
document.body.addEventListener("keydown", function(ev) { Viewer.keyPressed(ev); }, false);
this.canvas.addEventListener("mousedown", function(ev) { Viewer.mouseDown(ev); }, false);
this.canvas.addEventListener("mouseup", function(ev) { Viewer.mouseUp(ev); }, false);
this.canvas.addEventListener("mousemove", function(ev) { Viewer.mouseMove(ev); }, false);
this.canvas.addEventListener("mouseout", function(ev) { Viewer.mouseOut(ev); }, false);
this.canvas.addEventListener("mousewheel", function(ev) { Viewer.mouseWheel(ev); }, false);
var root = $("Players");
for (var i = 0; i < this.game.nPlayers; ++i) {
var div = document.createElement("div");
div.className = "player";
var s =
'<div><span class="color"></span><span class="name"></span></div>'+
'<p>Score: <span class="score"></span></p>';
for (var j = 0; j < this.game.playerInfo.length; ++j) {
s += '<p>' + this.game.playerInfo[j][0] + ': <span></span></p>';
}
div.innerHTML = s;
root.appendChild(div);
this.infoDivs.push(div);
div.childNodes[0].childNodes[1].textContent = this.game.names[i];
if (this.game.names[i] == "Tonto") div.childNodes[0].childNodes[1].className += " bot";
var c = this.theme.playerColors[i];
div.childNodes[0].childNodes[0].style.backgroundColor = "rgb("+Math.floor(c[0]*255.0)+", "+Math.floor(c[1]*255.0)+", "+Math.floor(c[2]*255.0)+")";
}
// Ready
$("Overlay").style.display = "none";
if (this.music) this.music.play();
this.tick();
},
tickCallback: function(t) { Viewer.tick(t); },
tick: function(t) {
var dt = this.lastTick? t - this.lastTick : 0;
if (isNaN(t)) this.lastTick = 0;
else this.lastTick = t;
// Update info
var r = Math.max(0, Math.min(this.game.nRounds, Math.floor(this.time)));
this.divRound.childNodes[1].textContent = r + " / " + this.game.nRounds;
this.divRound.childNodes[0].style.width = (100 * Math.max(0, Math.min(1, this.time / this.game.nRounds))) + "%";
var round = this.game.rounds[Math.min(this.game.nRounds - 1, r)];
var winning = []; var maxScore = -1;
for (var i = 0; i < this.game.nPlayers; ++i) {
var div = this.infoDivs[i];
div.childNodes[1].childNodes[1].textContent = round.score[i];
for (var j = 0; j < this.game.playerInfo.length; ++j) {
div.childNodes[2 + j].childNodes[1].textContent = this.game.playerInfo[j][1](round, i);
}
if (round.score[i] > maxScore) { maxScore = round.score[i]; winning = [i]; }
else if (round.score[i] == maxScore) { winning.push(i); }
}
var msg = "";
if (r > 0) {
if (winning.length == 1) msg = this.game.names[winning[0]];
else {
msg = this.game.names[winning[0]];
for (var i = 1; i < winning.length; ++i) msg += " & " + this.game.names[winning[i]];
}
if (r >= this.game.nRounds) {
if (winning.length == 1) msg += " won!";
else msg += " tied!";
}
else {
if (winning.length == 1) msg += " is winning!";
else msg += " are tied!";
}
}
this.divNarration.textContent = msg;
this.theme.W = this.canvas.width;
this.theme.H = this.canvas.height;
this.theme.render(this.game, this.time);
if (this.playing) this.time += dt * this.speed / 1000;
// Schedule next tick
this.nextTick = window.requestAnimationFrame(this.tickCallback);
},
resized: function() {
this.canvas.width = window.innerWidth - 4;
this.canvas.height = window.innerHeight - 4;
},
keyPressed: function(ev) {
var code = ((ev.keyCode !== undefined)? ev.keyCode : ev.which);
switch (code) {
case 32: // Space
this.togglePlay(); break;
case 33: // Page Down
this.advance(-10); break;
case 34: // Page Up
this.advance(10); break;
case 35: // End
this.time = this.game.nRounds; break;
case 36: // Start
this.time = 0; break;
case 37: // Left
case 38: // Up
this.advance(-1); break;
case 39: // Right
case 40: // Down
this.advance(1); break;
case 72: // 'h'
this.showHelp(); break;
case 76: // 'l'
var newSpeed = prompt("New speed? (turns per second)");
if (newSpeed) {
newSpeed = parseFloat(newSpeed);
if (!isNaN(newSpeed)) this.speed = Math.max(0.1, Math.min(10.0, newSpeed));
}
break;
case 80: // 'p'
this.toggleFullscreen();
break;
default:
if (this.theme && this.theme.keyPressed) this.theme.keyPressed(code);
}
},
mouseDown: function(ev) {
this.lastMouse[0] = ev.clientX;
this.lastMouse[1] = ev.clientY;
this.mousePressed = true;
},
mouseMove: function(ev) {
if (this.mousePressed && this.theme && this.theme.mouseMoved) {
var dx = ev.clientX - this.lastMouse[0];
var dy = ev.clientY - this.lastMouse[1];
this.lastMouse[0] = ev.clientX;
this.lastMouse[1] = ev.clientY;
this.theme.mouseMoved(dx, dy);
}
},
mouseUp: function(ev) {
this.mousePressed = false;
},
mouseOut: function(ev) {
this.mousePressed = false;
},
mouseWheel: function(ev) {
if (this.theme && this.theme.mouseWheel) this.theme.mouseWheel(ev.wheelDelta);
},
updateButtons: function() {
this.btnPlayPause.textContent = this.playing? "Pause" : "Play";
},
stop: function() {
this.pause();
this.time = 0;
this.updateButtons();
},
pause: function() {
this.playing = false;
this.updateButtons();
},
play: function() {
this.playing = true;
this.updateButtons();
},
togglePlay: function() {
if (this.playing) this.pause();
else this.play();
},
advance: function(t) {
this.pause();
this.time = Math.max(0, Math.min(this.game.nRounds, Math.round(this.time) + t));
},
setTime: function(t) {
var wasPlaying = this.playing;
this.pause();
this.time = this.game.nRounds * t;
if (wasPlaying) this.play();
},
inFullscreen: function() {
return !(!document.fullscreenElement && !document.mozFullScreenElement && !document.webkitFullscreenElement);
},
enterFullscreen: function() {
var el = document.body;
if (el.webkitRequestFullScreen) {
el.webkitRequestFullScreen(Element.ALLOW_KEYBOARD_INPUT);
}
else if (el.mozRequestFullScreen) {
el.mozRequestFullScreen();
}
},
exitFullscreen: function() {
if (document.cancelFullScreen) document.cancelFullScreen();
else if (document.mozCancelFullScreen) document.mozCancelFullScreen();
else if (document.webkitCancelFullScreen) document.webkitCancelFullScreen();
},
toggleFullscreen: function() {
if (this.inFullscreen()) this.exitFullscreen();
else this.enterFullscreen();
},
loadFile: function(url) {
++this.loading;
var file = { content: "" };
try {
ajaxGet(url, function(s) {
file.content = s;
Viewer.loaded();
});
} catch (e) {
this.showError("UNABLE TO LOAD FILE ", url+"Could not load file \""+url+"\"<br/>"+e);
}
return file;
},
loadImage: function(url) {
++this.loading;
var img = new Image();
img.onload = function() { Viewer.loaded(); };
img.src = url;
return img;
},
loadModel: function(url) {
++this.loading;
var mdl = new OBJ();
try {
ajaxGet(url, function(s) {
mdl.load(s);
Viewer.loaded();
});
} catch (e) {
this.showError("UNABLE TO LOAD MODEL", "Could not load model \""+url+"\"<br/>"+e);
}
return mdl;
},
loadMusic: function(url) {
++this.loading;
this.stopMusic();
this.music = new Audio();
this.music.loop = true;
this.music.addEventListener("canplay", function() { Viewer.loaded(); }, false);
this.music.src = url;
},
stopMusic: function() {
if (this.music != null) {
this.music.pause();
this.music = null;
}
},
showHelp: function() {
var w = 544, h = 448;
var x = (screen.width - w)/2;
var y = (screen.height - h)/2;
var win = window.open(undefined, "_blank",
"height="+h+", width="+w+", top="+y+", left="+x+
", location=0, menubar=0, status=0, scrollbars=1", false);
var html = '<!DOCTYPE html><head><title>Help</title></head><link rel="stylesheet" type="text/css" href="viewer3D.css"><body class="help">'+
'<h1>Help</h1><ul>'+
'<li><b>Space</b>: Play/Pause</li>'+
'<li><b>Start</b>: Go to the first turn</li>'+
'<li><b>End</b>: Go to the last turn</li>'+
'<li><b>Left</b>, <b>Up</b>: Previous turn</li>'+
'<li><b>Right</b>, <b>Down</b>: Next turn</li>'+
'<li><b>Page Up</b>: Advance 10 turns</li>'+
'<li><b>Page Down</b>: Go back 10 turns</li>'+
'<li><b>h</b>: Show this help</li>'+
'<li><b>l</b>: Change speed</li>'+
'<li><b>0</b>-<b>9</b>: Change between cams</li>'+
'<li><b>q</b>: Reset current cam</li>'+
'</ul><h2>Cam Controls</h2><ul>'+
'<li><b>Cam 0</b> (inspection): Maintain LMB and move the mouse to rotate the scene. Mouse wheel to zoom in/out.</li>'+
'<li><b>Cam 9</b> (free movement): Move with WASD and RF, change the viewing direction with the mouse while pressing LMB.</li>'+
'</ul>'+
'</body>';
win.document.write(html);
win.document.close();
if (win.focus) win.focus();
}
};
window.onload = function() { Viewer.init(); };
| { // Arguments passed by Jutge
if (location.host) game = location.protocol + "//" + location.host + "/";
else game = "https://battle-royale-eda.jutge.org/";
if (this.args.nbr) {
game += "?cmd=lliuraments&sub="+this.args.sub+"&nbr="+this.args.nbr+"&download=partida";
}
else game += "?cmd=partida&sub="+this.args.sub+"&download=partida";
} | conditional_block |
viewer.js | "use strict";
var Viewer = {
// The arguments received
args: {},
// Registered game loaders
supportedGames: {},
// Current loaded game
game: null,
speed: 3.0, // Turns per second
// msPerFrame: 16, // Milliseconds between updates
lastTick: 0,
lastRender: 0,
nextTick: null,
playing: true,
// Current time (round)
time: 0.0,
// HTML nodes (for fast access)
overlay: null,
infoDivs: [],
divRound: null,
divNarration: null,
btnPlayPause: null,
btnStop: null,
// Canvas & contexts
canvas: null,
ctx: null,
gl: null,
// Audio
music: null,
// Number of assets to be loaded
loading: 0,
mousePressed: false,
lastMouse: [0.0, 0.0],
registerGame: function(name, gameClass) {
this.supportedGames[name] = gameClass;
},
init: function(args) {
this.canvas = document.getElementById("Canvas");
window.addEventListener("resize", function(ev) { Viewer.resized(ev); }, false);
this.divRound = $("Round");
this.divRound.childNodes[2].addEventListener("click", function(ev) {
Viewer.setTime(ev.layerX / ev.target.clientWidth);
}, false);
this.divNarration = $("Narration");
this.btnPlayPause = $("PlayPause");
this.btnPlayPause.onclick = function() { Viewer.togglePlay(); };
this.btnStop = $("Stop");
this.btnStop.onclick = function() { Viewer.stop(); };
if (true) { // 3D
try {
this.gl = this.canvas.getContext("webgl") || this.canvas.getContext("experimental-webgl");
}
catch (ex) {}
if (!this.gl) {
this.showError("No WebGL! :(",
'If you\'re using chrome, try to go to <u class="selectable">chrome://flags</u> and enable WebGL.\n'+
'Otherwise, try upgrading your browser.'
);
return;
}
}
else { // 2D
try {
this.ctx = this.canvas.getContext("2d");
}
catch (ex) {}
if (!this.ctx) { this.showError("Canvas not supported! :(", "Try upgrading your browser."); return; }
}
this.args = getArguments();
this.playing = true;
if (this.args.wait || this.args.nostart || (this.args.start && this.args.start.toLowerCase() == "no")) {
this.playing = false;
}
this.updateButtons();
if (this.args.speed) {
var speed = parseFloat(this.args.speed);
if (!isNaN(speed)) this.speed = speed;
}
// Load game, if passed by url
var game = null;
if (this.args.web) { // Request game with ajax
game = "index.php?game="+this.args.web+"&p="+this.args.p+"&u=1";
}
else if (this.args.sub) { // Arguments passed by Jutge
if (location.host) game = location.protocol + "//" + location.host + "/";
else game = "https://battle-royale-eda.jutge.org/";
if (this.args.nbr) {
game += "?cmd=lliuraments&sub="+this.args.sub+"&nbr="+this.args.nbr+"&download=partida";
}
else game += "?cmd=partida&sub="+this.args.sub+"&download=partida";
}
else game = this.args.game;
if (!game) {
$("File").addEventListener("change", function(ev) {
if (ev.target.files.length > 0) {
Viewer.loadFromFile(ev.target.files[0]);
}
}, false);
this.showOverlay("Upload");
}
else this.loadFromURL(game);
},
showOverlay: function(id) {
var ov = $("Overlay");
for (var i = 0; i < ov.childNodes.length; ++i) {
var c = ov.childNodes.item(i);
if (c.nodeValue === null) {
c.style.display = (c.getAttribute("id") == id)? "block" : "none";
}
}
},
showError: function(msg, desc) {
var div = $("Error");
div.childNodes[1].textContent = msg;
if (desc) div.childNodes[5].innerHTML = desc;
this.showOverlay("Error");
},
loadFromFile: function(file) {
if (!file) return;
this.loadStart();
var reader = new FileReader();
reader.onload = function(ev) { Viewer.gameLoaded(ev.target.result); };
reader.readAsText(file);
},
loadFromURL: function(url) {
this.loadStart();
try {
ajaxGet(url, function(s) { Viewer.gameLoaded(s); });
} catch (e) {
this.showError("UNABLE TO LOAD GAME", e);
}
},
loadStart: function() {
this.loading = 1;
this.showOverlay("Loading");
},
gameLoaded: function(input) {
try {
var lines = input.replace("\r", "").split("\n");
if (!input || lines.length == 1) throw "404: Game Not Found!";
var v = splitWhitespace(lines[0]);
var gameClass = this.supportedGames[v[0]];
if (!gameClass) throw "Unsupported game: \""+lines[0]+"\"!";
this.args.version = v;
this.game = new gameClass(this.args, lines);
this.theme = this.game.renderer; //game.getTheme(this.args);
}
catch (ex) {
this.showError("Game load error", ex);
return;
}
this.theme.preload(this);
this.loaded();
},
loaded: function() {
if (--this.loading > 0) return;
this.renderer = new Renderer(this.gl, this.canvas);
this.theme.init(this.renderer, this.game);
this.resized();
document.body.addEventListener("keydown", function(ev) { Viewer.keyPressed(ev); }, false);
this.canvas.addEventListener("mousedown", function(ev) { Viewer.mouseDown(ev); }, false);
this.canvas.addEventListener("mouseup", function(ev) { Viewer.mouseUp(ev); }, false);
this.canvas.addEventListener("mousemove", function(ev) { Viewer.mouseMove(ev); }, false);
this.canvas.addEventListener("mouseout", function(ev) { Viewer.mouseOut(ev); }, false);
this.canvas.addEventListener("mousewheel", function(ev) { Viewer.mouseWheel(ev); }, false);
var root = $("Players");
for (var i = 0; i < this.game.nPlayers; ++i) {
var div = document.createElement("div");
div.className = "player";
var s =
'<div><span class="color"></span><span class="name"></span></div>'+
'<p>Score: <span class="score"></span></p>';
for (var j = 0; j < this.game.playerInfo.length; ++j) {
s += '<p>' + this.game.playerInfo[j][0] + ': <span></span></p>';
}
div.innerHTML = s;
root.appendChild(div);
this.infoDivs.push(div);
div.childNodes[0].childNodes[1].textContent = this.game.names[i];
if (this.game.names[i] == "Tonto") div.childNodes[0].childNodes[1].className += " bot";
var c = this.theme.playerColors[i];
div.childNodes[0].childNodes[0].style.backgroundColor = "rgb("+Math.floor(c[0]*255.0)+", "+Math.floor(c[1]*255.0)+", "+Math.floor(c[2]*255.0)+")";
}
// Ready
$("Overlay").style.display = "none";
if (this.music) this.music.play();
this.tick();
},
tickCallback: function(t) { Viewer.tick(t); },
tick: function(t) {
var dt = this.lastTick? t - this.lastTick : 0;
if (isNaN(t)) this.lastTick = 0;
else this.lastTick = t;
// Update info
var r = Math.max(0, Math.min(this.game.nRounds, Math.floor(this.time)));
this.divRound.childNodes[1].textContent = r + " / " + this.game.nRounds;
this.divRound.childNodes[0].style.width = (100 * Math.max(0, Math.min(1, this.time / this.game.nRounds))) + "%";
var round = this.game.rounds[Math.min(this.game.nRounds - 1, r)];
var winning = []; var maxScore = -1;
for (var i = 0; i < this.game.nPlayers; ++i) {
var div = this.infoDivs[i];
div.childNodes[1].childNodes[1].textContent = round.score[i];
for (var j = 0; j < this.game.playerInfo.length; ++j) {
div.childNodes[2 + j].childNodes[1].textContent = this.game.playerInfo[j][1](round, i);
}
if (round.score[i] > maxScore) { maxScore = round.score[i]; winning = [i]; }
else if (round.score[i] == maxScore) { winning.push(i); }
}
var msg = "";
if (r > 0) {
if (winning.length == 1) msg = this.game.names[winning[0]];
else {
msg = this.game.names[winning[0]];
for (var i = 1; i < winning.length; ++i) msg += " & " + this.game.names[winning[i]];
}
if (r >= this.game.nRounds) {
if (winning.length == 1) msg += " won!";
else msg += " tied!";
}
else {
if (winning.length == 1) msg += " is winning!";
else msg += " are tied!";
}
}
this.divNarration.textContent = msg;
this.theme.W = this.canvas.width;
this.theme.H = this.canvas.height;
this.theme.render(this.game, this.time);
if (this.playing) this.time += dt * this.speed / 1000;
// Schedule next tick
this.nextTick = window.requestAnimationFrame(this.tickCallback);
},
resized: function() {
this.canvas.width = window.innerWidth - 4;
this.canvas.height = window.innerHeight - 4;
},
keyPressed: function(ev) {
var code = ((ev.keyCode !== undefined)? ev.keyCode : ev.which);
switch (code) {
case 32: // Space
this.togglePlay(); break;
case 33: // Page Down
this.advance(-10); break;
case 34: // Page Up
this.advance(10); break;
case 35: // End
this.time = this.game.nRounds; break;
case 36: // Start
this.time = 0; break;
case 37: // Left
case 38: // Up
this.advance(-1); break;
case 39: // Right
case 40: // Down
this.advance(1); break;
case 72: // 'h'
this.showHelp(); break;
case 76: // 'l'
var newSpeed = prompt("New speed? (turns per second)");
if (newSpeed) {
newSpeed = parseFloat(newSpeed);
if (!isNaN(newSpeed)) this.speed = Math.max(0.1, Math.min(10.0, newSpeed));
}
break;
case 80: // 'p'
this.toggleFullscreen();
break;
default:
if (this.theme && this.theme.keyPressed) this.theme.keyPressed(code);
}
},
mouseDown: function(ev) {
this.lastMouse[0] = ev.clientX;
this.lastMouse[1] = ev.clientY;
this.mousePressed = true;
},
mouseMove: function(ev) {
if (this.mousePressed && this.theme && this.theme.mouseMoved) {
var dx = ev.clientX - this.lastMouse[0];
var dy = ev.clientY - this.lastMouse[1];
this.lastMouse[0] = ev.clientX;
this.lastMouse[1] = ev.clientY;
this.theme.mouseMoved(dx, dy);
}
},
mouseUp: function(ev) {
this.mousePressed = false;
},
mouseOut: function(ev) {
this.mousePressed = false;
},
mouseWheel: function(ev) {
if (this.theme && this.theme.mouseWheel) this.theme.mouseWheel(ev.wheelDelta);
},
updateButtons: function() {
this.btnPlayPause.textContent = this.playing? "Pause" : "Play";
},
stop: function() {
this.pause();
this.time = 0;
this.updateButtons();
},
pause: function() {
this.playing = false;
this.updateButtons();
},
play: function() {
this.playing = true;
this.updateButtons();
},
togglePlay: function() {
if (this.playing) this.pause();
else this.play();
},
advance: function(t) {
this.pause();
this.time = Math.max(0, Math.min(this.game.nRounds, Math.round(this.time) + t));
},
setTime: function(t) {
var wasPlaying = this.playing;
this.pause();
this.time = this.game.nRounds * t;
if (wasPlaying) this.play();
},
inFullscreen: function() {
return !(!document.fullscreenElement && !document.mozFullScreenElement && !document.webkitFullscreenElement);
},
enterFullscreen: function() {
var el = document.body;
if (el.webkitRequestFullScreen) {
el.webkitRequestFullScreen(Element.ALLOW_KEYBOARD_INPUT);
}
else if (el.mozRequestFullScreen) {
el.mozRequestFullScreen();
}
},
exitFullscreen: function() {
if (document.cancelFullScreen) document.cancelFullScreen();
else if (document.mozCancelFullScreen) document.mozCancelFullScreen();
else if (document.webkitCancelFullScreen) document.webkitCancelFullScreen();
},
toggleFullscreen: function() {
if (this.inFullscreen()) this.exitFullscreen();
else this.enterFullscreen();
},
loadFile: function(url) {
++this.loading;
var file = { content: "" };
try {
ajaxGet(url, function(s) {
file.content = s;
Viewer.loaded();
});
} catch (e) {
this.showError("UNABLE TO LOAD FILE ", url+"Could not load file \""+url+"\"<br/>"+e);
}
return file;
},
loadImage: function(url) {
++this.loading;
var img = new Image();
img.onload = function() { Viewer.loaded(); };
img.src = url;
return img;
},
loadModel: function(url) {
++this.loading;
var mdl = new OBJ();
try {
ajaxGet(url, function(s) {
mdl.load(s);
Viewer.loaded();
});
} catch (e) {
this.showError("UNABLE TO LOAD MODEL", "Could not load model \""+url+"\"<br/>"+e);
}
return mdl;
}, | this.music = new Audio();
this.music.loop = true;
this.music.addEventListener("canplay", function() { Viewer.loaded(); }, false);
this.music.src = url;
},
stopMusic: function() {
if (this.music != null) {
this.music.pause();
this.music = null;
}
},
showHelp: function() {
var w = 544, h = 448;
var x = (screen.width - w)/2;
var y = (screen.height - h)/2;
var win = window.open(undefined, "_blank",
"height="+h+", width="+w+", top="+y+", left="+x+
", location=0, menubar=0, status=0, scrollbars=1", false);
var html = '<!DOCTYPE html><head><title>Help</title></head><link rel="stylesheet" type="text/css" href="viewer3D.css"><body class="help">'+
'<h1>Help</h1><ul>'+
'<li><b>Space</b>: Play/Pause</li>'+
'<li><b>Start</b>: Go to the first turn</li>'+
'<li><b>End</b>: Go to the last turn</li>'+
'<li><b>Left</b>, <b>Up</b>: Previous turn</li>'+
'<li><b>Right</b>, <b>Down</b>: Next turn</li>'+
'<li><b>Page Up</b>: Advance 10 turns</li>'+
'<li><b>Page Down</b>: Go back 10 turns</li>'+
'<li><b>h</b>: Show this help</li>'+
'<li><b>l</b>: Change speed</li>'+
'<li><b>0</b>-<b>9</b>: Change between cams</li>'+
'<li><b>q</b>: Reset current cam</li>'+
'</ul><h2>Cam Controls</h2><ul>'+
'<li><b>Cam 0</b> (inspection): Maintain LMB and move the mouse to rotate the scene. Mouse wheel to zoom in/out.</li>'+
'<li><b>Cam 9</b> (free movement): Move with WASD and RF, change the viewing direction with the mouse while pressing LMB.</li>'+
'</ul>'+
'</body>';
win.document.write(html);
win.document.close();
if (win.focus) win.focus();
}
};
window.onload = function() { Viewer.init(); }; |
loadMusic: function(url) {
++this.loading;
this.stopMusic(); | random_line_split |
keyboard.rs | // Copyright 2020 The Druid Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Baseview modifications to druid code:
// - collect functions from various files
// - update imports, paths etc
//! X11 keyboard handling
use xcb::xproto;
use keyboard_types::*;
use crate::keyboard::code_to_location;
/// Convert a hardware scan code to a key.
///
/// Note: this is a hardcoded layout. We need to detect the user's
/// layout from the system and apply it.
fn code_to_key(code: Code, m: Modifiers) -> Key {
fn a(s: &str) -> Key {
Key::Character(s.into())
}
fn s(mods: Modifiers, base: &str, shifted: &str) -> Key {
if mods.contains(Modifiers::SHIFT) {
Key::Character(shifted.into())
} else {
Key::Character(base.into())
}
}
fn n(mods: Modifiers, base: Key, num: &str) -> Key {
if mods.contains(Modifiers::NUM_LOCK) != mods.contains(Modifiers::SHIFT) {
Key::Character(num.into())
} else {
base
}
}
match code {
Code::KeyA => s(m, "a", "A"),
Code::KeyB => s(m, "b", "B"),
Code::KeyC => s(m, "c", "C"),
Code::KeyD => s(m, "d", "D"),
Code::KeyE => s(m, "e", "E"),
Code::KeyF => s(m, "f", "F"),
Code::KeyG => s(m, "g", "G"),
Code::KeyH => s(m, "h", "H"),
Code::KeyI => s(m, "i", "I"),
Code::KeyJ => s(m, "j", "J"),
Code::KeyK => s(m, "k", "K"),
Code::KeyL => s(m, "l", "L"),
Code::KeyM => s(m, "m", "M"),
Code::KeyN => s(m, "n", "N"),
Code::KeyO => s(m, "o", "O"),
Code::KeyP => s(m, "p", "P"),
Code::KeyQ => s(m, "q", "Q"),
Code::KeyR => s(m, "r", "R"),
Code::KeyS => s(m, "s", "S"),
Code::KeyT => s(m, "t", "T"),
Code::KeyU => s(m, "u", "U"),
Code::KeyV => s(m, "v", "V"),
Code::KeyW => s(m, "w", "W"),
Code::KeyX => s(m, "x", "X"),
Code::KeyY => s(m, "y", "Y"),
Code::KeyZ => s(m, "z", "Z"),
Code::Digit0 => s(m, "0", ")"),
Code::Digit1 => s(m, "1", "!"),
Code::Digit2 => s(m, "2", "@"),
Code::Digit3 => s(m, "3", "#"),
Code::Digit4 => s(m, "4", "$"),
Code::Digit5 => s(m, "5", "%"),
Code::Digit6 => s(m, "6", "^"),
Code::Digit7 => s(m, "7", "&"),
Code::Digit8 => s(m, "8", "*"),
Code::Digit9 => s(m, "9", "("),
Code::Backquote => s(m, "`", "~"),
Code::Minus => s(m, "-", "_"),
Code::Equal => s(m, "=", "+"),
Code::BracketLeft => s(m, "[", "{"),
Code::BracketRight => s(m, "]", "}"),
Code::Backslash => s(m, "\\", "|"),
Code::Semicolon => s(m, ";", ":"),
Code::Quote => s(m, "'", "\""),
Code::Comma => s(m, ",", "<"),
Code::Period => s(m, ".", ">"),
Code::Slash => s(m, "/", "?"),
Code::Space => a(" "),
Code::Escape => Key::Escape,
Code::Backspace => Key::Backspace,
Code::Tab => Key::Tab,
Code::Enter => Key::Enter,
Code::ControlLeft => Key::Control,
Code::ShiftLeft => Key::Shift,
Code::ShiftRight => Key::Shift,
Code::NumpadMultiply => a("*"),
Code::AltLeft => Key::Alt,
Code::CapsLock => Key::CapsLock,
Code::F1 => Key::F1,
Code::F2 => Key::F2,
Code::F3 => Key::F3,
Code::F4 => Key::F4,
Code::F5 => Key::F5,
Code::F6 => Key::F6,
Code::F7 => Key::F7,
Code::F8 => Key::F8,
Code::F9 => Key::F9,
Code::F10 => Key::F10,
Code::NumLock => Key::NumLock,
Code::ScrollLock => Key::ScrollLock,
Code::Numpad0 => n(m, Key::Insert, "0"),
Code::Numpad1 => n(m, Key::End, "1"),
Code::Numpad2 => n(m, Key::ArrowDown, "2"),
Code::Numpad3 => n(m, Key::PageDown, "3"),
Code::Numpad4 => n(m, Key::ArrowLeft, "4"),
Code::Numpad5 => n(m, Key::Clear, "5"),
Code::Numpad6 => n(m, Key::ArrowRight, "6"),
Code::Numpad7 => n(m, Key::Home, "7"),
Code::Numpad8 => n(m, Key::ArrowUp, "8"),
Code::Numpad9 => n(m, Key::PageUp, "9"),
Code::NumpadSubtract => a("-"),
Code::NumpadAdd => a("+"),
Code::NumpadDecimal => n(m, Key::Delete, "."),
Code::IntlBackslash => s(m, "\\", "|"),
Code::F11 => Key::F11,
Code::F12 => Key::F12,
// This mapping is based on the picture in the w3c spec.
Code::IntlRo => a("\\"),
Code::Convert => Key::Convert,
Code::KanaMode => Key::KanaMode,
Code::NonConvert => Key::NonConvert,
Code::NumpadEnter => Key::Enter,
Code::ControlRight => Key::Control,
Code::NumpadDivide => a("/"),
Code::PrintScreen => Key::PrintScreen,
Code::AltRight => Key::Alt,
Code::Home => Key::Home,
Code::ArrowUp => Key::ArrowUp,
Code::PageUp => Key::PageUp,
Code::ArrowLeft => Key::ArrowLeft,
Code::ArrowRight => Key::ArrowRight,
Code::End => Key::End,
Code::ArrowDown => Key::ArrowDown,
Code::PageDown => Key::PageDown,
Code::Insert => Key::Insert,
Code::Delete => Key::Delete,
Code::AudioVolumeMute => Key::AudioVolumeMute,
Code::AudioVolumeDown => Key::AudioVolumeDown,
Code::AudioVolumeUp => Key::AudioVolumeUp,
Code::NumpadEqual => a("="),
Code::Pause => Key::Pause,
Code::NumpadComma => a(","),
Code::Lang1 => Key::HangulMode,
Code::Lang2 => Key::HanjaMode,
Code::IntlYen => a("¥"),
Code::MetaLeft => Key::Meta,
Code::MetaRight => Key::Meta,
Code::ContextMenu => Key::ContextMenu,
Code::BrowserStop => Key::BrowserStop,
Code::Again => Key::Again,
Code::Props => Key::Props,
Code::Undo => Key::Undo,
Code::Select => Key::Select,
Code::Copy => Key::Copy,
Code::Open => Key::Open,
Code::Paste => Key::Paste,
Code::Find => Key::Find,
Code::Cut => Key::Cut,
Code::Help => Key::Help,
Code::LaunchApp2 => Key::LaunchApplication2,
Code::WakeUp => Key::WakeUp,
Code::LaunchApp1 => Key::LaunchApplication1,
Code::LaunchMail => Key::LaunchMail,
Code::BrowserFavorites => Key::BrowserFavorites,
Code::BrowserBack => Key::BrowserBack,
Code::BrowserForward => Key::BrowserForward,
Code::Eject => Key::Eject,
Code::MediaTrackNext => Key::MediaTrackNext,
Code::MediaPlayPause => Key::MediaPlayPause,
Code::MediaTrackPrevious => Key::MediaTrackPrevious,
Code::MediaStop => Key::MediaStop,
Code::MediaSelect => Key::LaunchMediaPlayer,
Code::BrowserHome => Key::BrowserHome,
Code::BrowserRefresh => Key::BrowserRefresh,
Code::BrowserSearch => Key::BrowserSearch,
_ => Key::Unidentified,
}
}
#[cfg(target_os = "linux")]
/// Map hardware keycode to code.
///
/// In theory, the hardware keycode is device dependent, but in
/// practice it's probably pretty reliable.
///
/// The logic is based on NativeKeyToDOMCodeName.h in Mozilla.
fn hardware_keycode_to_code(hw_keycode: u16) -> Code {
match hw_keycode {
0x0009 => Code::Escape,
0x000A => Code::Digit1,
0x000B => Code::Digit2,
0x000C => Code::Digit3,
0x000D => Code::Digit4,
0x000E => Code::Digit5,
0x000F => Code::Digit6,
0x0010 => Code::Digit7,
0x0011 => Code::Digit8,
0x0012 => Code::Digit9,
0x0013 => Code::Digit0,
0x0014 => Code::Minus,
0x0015 => Code::Equal,
0x0016 => Code::Backspace,
0x0017 => Code::Tab,
0x0018 => Code::KeyQ,
0x0019 => Code::KeyW,
0x001A => Code::KeyE,
0x001B => Code::KeyR,
0x001C => Code::KeyT,
0x001D => Code::KeyY,
0x001E => Code::KeyU,
0x001F => Code::KeyI,
0x0020 => Code::KeyO,
0x0021 => Code::KeyP,
0x0022 => Code::BracketLeft,
0x0023 => Code::BracketRight,
0x0024 => Code::Enter,
0x0025 => Code::ControlLeft,
0x0026 => Code::KeyA,
0x0027 => Code::KeyS,
0x0028 => Code::KeyD,
0x0029 => Code::KeyF,
0x002A => Code::KeyG,
0x002B => Code::KeyH,
0x002C => Code::KeyJ,
0x002D => Code::KeyK,
0x002E => Code::KeyL,
0x002F => Code::Semicolon,
0x0030 => Code::Quote,
0x0031 => Code::Backquote,
0x0032 => Code::ShiftLeft,
0x0033 => Code::Backslash,
0x0034 => Code::KeyZ,
0x0035 => Code::KeyX,
0x0036 => Code::KeyC,
0x0037 => Code::KeyV,
0x0038 => Code::KeyB,
0x0039 => Code::KeyN,
0x003A => Code::KeyM,
0x003B => Code::Comma,
0x003C => Code::Period,
0x003D => Code::Slash,
0x003E => Code::ShiftRight,
0x003F => Code::NumpadMultiply,
0x0040 => Code::AltLeft,
0x0041 => Code::Space,
0x0042 => Code::CapsLock,
0x0043 => Code::F1,
0x0044 => Code::F2,
0x0045 => Code::F3,
0x0046 => Code::F4,
0x0047 => Code::F5,
0x0048 => Code::F6, | 0x004D => Code::NumLock,
0x004E => Code::ScrollLock,
0x004F => Code::Numpad7,
0x0050 => Code::Numpad8,
0x0051 => Code::Numpad9,
0x0052 => Code::NumpadSubtract,
0x0053 => Code::Numpad4,
0x0054 => Code::Numpad5,
0x0055 => Code::Numpad6,
0x0056 => Code::NumpadAdd,
0x0057 => Code::Numpad1,
0x0058 => Code::Numpad2,
0x0059 => Code::Numpad3,
0x005A => Code::Numpad0,
0x005B => Code::NumpadDecimal,
0x005E => Code::IntlBackslash,
0x005F => Code::F11,
0x0060 => Code::F12,
0x0061 => Code::IntlRo,
0x0064 => Code::Convert,
0x0065 => Code::KanaMode,
0x0066 => Code::NonConvert,
0x0068 => Code::NumpadEnter,
0x0069 => Code::ControlRight,
0x006A => Code::NumpadDivide,
0x006B => Code::PrintScreen,
0x006C => Code::AltRight,
0x006E => Code::Home,
0x006F => Code::ArrowUp,
0x0070 => Code::PageUp,
0x0071 => Code::ArrowLeft,
0x0072 => Code::ArrowRight,
0x0073 => Code::End,
0x0074 => Code::ArrowDown,
0x0075 => Code::PageDown,
0x0076 => Code::Insert,
0x0077 => Code::Delete,
0x0079 => Code::AudioVolumeMute,
0x007A => Code::AudioVolumeDown,
0x007B => Code::AudioVolumeUp,
0x007D => Code::NumpadEqual,
0x007F => Code::Pause,
0x0081 => Code::NumpadComma,
0x0082 => Code::Lang1,
0x0083 => Code::Lang2,
0x0084 => Code::IntlYen,
0x0085 => Code::MetaLeft,
0x0086 => Code::MetaRight,
0x0087 => Code::ContextMenu,
0x0088 => Code::BrowserStop,
0x0089 => Code::Again,
0x008A => Code::Props,
0x008B => Code::Undo,
0x008C => Code::Select,
0x008D => Code::Copy,
0x008E => Code::Open,
0x008F => Code::Paste,
0x0090 => Code::Find,
0x0091 => Code::Cut,
0x0092 => Code::Help,
0x0094 => Code::LaunchApp2,
0x0097 => Code::WakeUp,
0x0098 => Code::LaunchApp1,
// key to right of volume controls on T430s produces 0x9C
// but no documentation of what it should map to :/
0x00A3 => Code::LaunchMail,
0x00A4 => Code::BrowserFavorites,
0x00A6 => Code::BrowserBack,
0x00A7 => Code::BrowserForward,
0x00A9 => Code::Eject,
0x00AB => Code::MediaTrackNext,
0x00AC => Code::MediaPlayPause,
0x00AD => Code::MediaTrackPrevious,
0x00AE => Code::MediaStop,
0x00B3 => Code::MediaSelect,
0x00B4 => Code::BrowserHome,
0x00B5 => Code::BrowserRefresh,
0x00E1 => Code::BrowserSearch,
_ => Code::Unidentified,
}
}
// Extracts the keyboard modifiers from, e.g., the `state` field of
// `xcb::xproto::ButtonPressEvent`
fn key_mods(mods: u16) -> Modifiers {
let mut ret = Modifiers::default();
let mut key_masks = [
(xproto::MOD_MASK_SHIFT, Modifiers::SHIFT),
(xproto::MOD_MASK_CONTROL, Modifiers::CONTROL),
// X11's mod keys are configurable, but this seems
// like a reasonable default for US keyboards, at least,
// where the "windows" key seems to be MOD_MASK_4.
(xproto::MOD_MASK_1, Modifiers::ALT),
(xproto::MOD_MASK_2, Modifiers::NUM_LOCK),
(xproto::MOD_MASK_4, Modifiers::META),
(xproto::MOD_MASK_LOCK, Modifiers::CAPS_LOCK),
];
for (mask, modifiers) in &mut key_masks {
if mods & (*mask as u16) != 0 {
ret |= *modifiers;
}
}
ret
}
pub(super) fn convert_key_press_event(key_press: &xcb::KeyPressEvent) -> KeyboardEvent {
let hw_keycode = key_press.detail();
let code = hardware_keycode_to_code(hw_keycode.into());
let modifiers = key_mods(key_press.state());
let key = code_to_key(code, modifiers);
let location = code_to_location(code);
let state = KeyState::Down;
KeyboardEvent { code, key, modifiers, location, state, repeat: false, is_composing: false }
}
pub(super) fn convert_key_release_event(key_release: &xcb::KeyReleaseEvent) -> KeyboardEvent {
let hw_keycode = key_release.detail();
let code = hardware_keycode_to_code(hw_keycode.into());
let modifiers = key_mods(key_release.state());
let key = code_to_key(code, modifiers);
let location = code_to_location(code);
let state = KeyState::Up;
KeyboardEvent { code, key, modifiers, location, state, repeat: false, is_composing: false }
} | 0x0049 => Code::F7,
0x004A => Code::F8,
0x004B => Code::F9,
0x004C => Code::F10, | random_line_split |
keyboard.rs | // Copyright 2020 The Druid Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Baseview modifications to druid code:
// - collect functions from various files
// - update imports, paths etc
//! X11 keyboard handling
use xcb::xproto;
use keyboard_types::*;
use crate::keyboard::code_to_location;
/// Convert a hardware scan code to a key.
///
/// Note: this is a hardcoded layout. We need to detect the user's
/// layout from the system and apply it.
fn | (code: Code, m: Modifiers) -> Key {
fn a(s: &str) -> Key {
Key::Character(s.into())
}
fn s(mods: Modifiers, base: &str, shifted: &str) -> Key {
if mods.contains(Modifiers::SHIFT) {
Key::Character(shifted.into())
} else {
Key::Character(base.into())
}
}
fn n(mods: Modifiers, base: Key, num: &str) -> Key {
if mods.contains(Modifiers::NUM_LOCK) != mods.contains(Modifiers::SHIFT) {
Key::Character(num.into())
} else {
base
}
}
match code {
Code::KeyA => s(m, "a", "A"),
Code::KeyB => s(m, "b", "B"),
Code::KeyC => s(m, "c", "C"),
Code::KeyD => s(m, "d", "D"),
Code::KeyE => s(m, "e", "E"),
Code::KeyF => s(m, "f", "F"),
Code::KeyG => s(m, "g", "G"),
Code::KeyH => s(m, "h", "H"),
Code::KeyI => s(m, "i", "I"),
Code::KeyJ => s(m, "j", "J"),
Code::KeyK => s(m, "k", "K"),
Code::KeyL => s(m, "l", "L"),
Code::KeyM => s(m, "m", "M"),
Code::KeyN => s(m, "n", "N"),
Code::KeyO => s(m, "o", "O"),
Code::KeyP => s(m, "p", "P"),
Code::KeyQ => s(m, "q", "Q"),
Code::KeyR => s(m, "r", "R"),
Code::KeyS => s(m, "s", "S"),
Code::KeyT => s(m, "t", "T"),
Code::KeyU => s(m, "u", "U"),
Code::KeyV => s(m, "v", "V"),
Code::KeyW => s(m, "w", "W"),
Code::KeyX => s(m, "x", "X"),
Code::KeyY => s(m, "y", "Y"),
Code::KeyZ => s(m, "z", "Z"),
Code::Digit0 => s(m, "0", ")"),
Code::Digit1 => s(m, "1", "!"),
Code::Digit2 => s(m, "2", "@"),
Code::Digit3 => s(m, "3", "#"),
Code::Digit4 => s(m, "4", "$"),
Code::Digit5 => s(m, "5", "%"),
Code::Digit6 => s(m, "6", "^"),
Code::Digit7 => s(m, "7", "&"),
Code::Digit8 => s(m, "8", "*"),
Code::Digit9 => s(m, "9", "("),
Code::Backquote => s(m, "`", "~"),
Code::Minus => s(m, "-", "_"),
Code::Equal => s(m, "=", "+"),
Code::BracketLeft => s(m, "[", "{"),
Code::BracketRight => s(m, "]", "}"),
Code::Backslash => s(m, "\\", "|"),
Code::Semicolon => s(m, ";", ":"),
Code::Quote => s(m, "'", "\""),
Code::Comma => s(m, ",", "<"),
Code::Period => s(m, ".", ">"),
Code::Slash => s(m, "/", "?"),
Code::Space => a(" "),
Code::Escape => Key::Escape,
Code::Backspace => Key::Backspace,
Code::Tab => Key::Tab,
Code::Enter => Key::Enter,
Code::ControlLeft => Key::Control,
Code::ShiftLeft => Key::Shift,
Code::ShiftRight => Key::Shift,
Code::NumpadMultiply => a("*"),
Code::AltLeft => Key::Alt,
Code::CapsLock => Key::CapsLock,
Code::F1 => Key::F1,
Code::F2 => Key::F2,
Code::F3 => Key::F3,
Code::F4 => Key::F4,
Code::F5 => Key::F5,
Code::F6 => Key::F6,
Code::F7 => Key::F7,
Code::F8 => Key::F8,
Code::F9 => Key::F9,
Code::F10 => Key::F10,
Code::NumLock => Key::NumLock,
Code::ScrollLock => Key::ScrollLock,
Code::Numpad0 => n(m, Key::Insert, "0"),
Code::Numpad1 => n(m, Key::End, "1"),
Code::Numpad2 => n(m, Key::ArrowDown, "2"),
Code::Numpad3 => n(m, Key::PageDown, "3"),
Code::Numpad4 => n(m, Key::ArrowLeft, "4"),
Code::Numpad5 => n(m, Key::Clear, "5"),
Code::Numpad6 => n(m, Key::ArrowRight, "6"),
Code::Numpad7 => n(m, Key::Home, "7"),
Code::Numpad8 => n(m, Key::ArrowUp, "8"),
Code::Numpad9 => n(m, Key::PageUp, "9"),
Code::NumpadSubtract => a("-"),
Code::NumpadAdd => a("+"),
Code::NumpadDecimal => n(m, Key::Delete, "."),
Code::IntlBackslash => s(m, "\\", "|"),
Code::F11 => Key::F11,
Code::F12 => Key::F12,
// This mapping is based on the picture in the w3c spec.
Code::IntlRo => a("\\"),
Code::Convert => Key::Convert,
Code::KanaMode => Key::KanaMode,
Code::NonConvert => Key::NonConvert,
Code::NumpadEnter => Key::Enter,
Code::ControlRight => Key::Control,
Code::NumpadDivide => a("/"),
Code::PrintScreen => Key::PrintScreen,
Code::AltRight => Key::Alt,
Code::Home => Key::Home,
Code::ArrowUp => Key::ArrowUp,
Code::PageUp => Key::PageUp,
Code::ArrowLeft => Key::ArrowLeft,
Code::ArrowRight => Key::ArrowRight,
Code::End => Key::End,
Code::ArrowDown => Key::ArrowDown,
Code::PageDown => Key::PageDown,
Code::Insert => Key::Insert,
Code::Delete => Key::Delete,
Code::AudioVolumeMute => Key::AudioVolumeMute,
Code::AudioVolumeDown => Key::AudioVolumeDown,
Code::AudioVolumeUp => Key::AudioVolumeUp,
Code::NumpadEqual => a("="),
Code::Pause => Key::Pause,
Code::NumpadComma => a(","),
Code::Lang1 => Key::HangulMode,
Code::Lang2 => Key::HanjaMode,
Code::IntlYen => a("¥"),
Code::MetaLeft => Key::Meta,
Code::MetaRight => Key::Meta,
Code::ContextMenu => Key::ContextMenu,
Code::BrowserStop => Key::BrowserStop,
Code::Again => Key::Again,
Code::Props => Key::Props,
Code::Undo => Key::Undo,
Code::Select => Key::Select,
Code::Copy => Key::Copy,
Code::Open => Key::Open,
Code::Paste => Key::Paste,
Code::Find => Key::Find,
Code::Cut => Key::Cut,
Code::Help => Key::Help,
Code::LaunchApp2 => Key::LaunchApplication2,
Code::WakeUp => Key::WakeUp,
Code::LaunchApp1 => Key::LaunchApplication1,
Code::LaunchMail => Key::LaunchMail,
Code::BrowserFavorites => Key::BrowserFavorites,
Code::BrowserBack => Key::BrowserBack,
Code::BrowserForward => Key::BrowserForward,
Code::Eject => Key::Eject,
Code::MediaTrackNext => Key::MediaTrackNext,
Code::MediaPlayPause => Key::MediaPlayPause,
Code::MediaTrackPrevious => Key::MediaTrackPrevious,
Code::MediaStop => Key::MediaStop,
Code::MediaSelect => Key::LaunchMediaPlayer,
Code::BrowserHome => Key::BrowserHome,
Code::BrowserRefresh => Key::BrowserRefresh,
Code::BrowserSearch => Key::BrowserSearch,
_ => Key::Unidentified,
}
}
#[cfg(target_os = "linux")]
/// Map hardware keycode to code.
///
/// In theory, the hardware keycode is device dependent, but in
/// practice it's probably pretty reliable.
///
/// The logic is based on NativeKeyToDOMCodeName.h in Mozilla.
fn hardware_keycode_to_code(hw_keycode: u16) -> Code {
match hw_keycode {
0x0009 => Code::Escape,
0x000A => Code::Digit1,
0x000B => Code::Digit2,
0x000C => Code::Digit3,
0x000D => Code::Digit4,
0x000E => Code::Digit5,
0x000F => Code::Digit6,
0x0010 => Code::Digit7,
0x0011 => Code::Digit8,
0x0012 => Code::Digit9,
0x0013 => Code::Digit0,
0x0014 => Code::Minus,
0x0015 => Code::Equal,
0x0016 => Code::Backspace,
0x0017 => Code::Tab,
0x0018 => Code::KeyQ,
0x0019 => Code::KeyW,
0x001A => Code::KeyE,
0x001B => Code::KeyR,
0x001C => Code::KeyT,
0x001D => Code::KeyY,
0x001E => Code::KeyU,
0x001F => Code::KeyI,
0x0020 => Code::KeyO,
0x0021 => Code::KeyP,
0x0022 => Code::BracketLeft,
0x0023 => Code::BracketRight,
0x0024 => Code::Enter,
0x0025 => Code::ControlLeft,
0x0026 => Code::KeyA,
0x0027 => Code::KeyS,
0x0028 => Code::KeyD,
0x0029 => Code::KeyF,
0x002A => Code::KeyG,
0x002B => Code::KeyH,
0x002C => Code::KeyJ,
0x002D => Code::KeyK,
0x002E => Code::KeyL,
0x002F => Code::Semicolon,
0x0030 => Code::Quote,
0x0031 => Code::Backquote,
0x0032 => Code::ShiftLeft,
0x0033 => Code::Backslash,
0x0034 => Code::KeyZ,
0x0035 => Code::KeyX,
0x0036 => Code::KeyC,
0x0037 => Code::KeyV,
0x0038 => Code::KeyB,
0x0039 => Code::KeyN,
0x003A => Code::KeyM,
0x003B => Code::Comma,
0x003C => Code::Period,
0x003D => Code::Slash,
0x003E => Code::ShiftRight,
0x003F => Code::NumpadMultiply,
0x0040 => Code::AltLeft,
0x0041 => Code::Space,
0x0042 => Code::CapsLock,
0x0043 => Code::F1,
0x0044 => Code::F2,
0x0045 => Code::F3,
0x0046 => Code::F4,
0x0047 => Code::F5,
0x0048 => Code::F6,
0x0049 => Code::F7,
0x004A => Code::F8,
0x004B => Code::F9,
0x004C => Code::F10,
0x004D => Code::NumLock,
0x004E => Code::ScrollLock,
0x004F => Code::Numpad7,
0x0050 => Code::Numpad8,
0x0051 => Code::Numpad9,
0x0052 => Code::NumpadSubtract,
0x0053 => Code::Numpad4,
0x0054 => Code::Numpad5,
0x0055 => Code::Numpad6,
0x0056 => Code::NumpadAdd,
0x0057 => Code::Numpad1,
0x0058 => Code::Numpad2,
0x0059 => Code::Numpad3,
0x005A => Code::Numpad0,
0x005B => Code::NumpadDecimal,
0x005E => Code::IntlBackslash,
0x005F => Code::F11,
0x0060 => Code::F12,
0x0061 => Code::IntlRo,
0x0064 => Code::Convert,
0x0065 => Code::KanaMode,
0x0066 => Code::NonConvert,
0x0068 => Code::NumpadEnter,
0x0069 => Code::ControlRight,
0x006A => Code::NumpadDivide,
0x006B => Code::PrintScreen,
0x006C => Code::AltRight,
0x006E => Code::Home,
0x006F => Code::ArrowUp,
0x0070 => Code::PageUp,
0x0071 => Code::ArrowLeft,
0x0072 => Code::ArrowRight,
0x0073 => Code::End,
0x0074 => Code::ArrowDown,
0x0075 => Code::PageDown,
0x0076 => Code::Insert,
0x0077 => Code::Delete,
0x0079 => Code::AudioVolumeMute,
0x007A => Code::AudioVolumeDown,
0x007B => Code::AudioVolumeUp,
0x007D => Code::NumpadEqual,
0x007F => Code::Pause,
0x0081 => Code::NumpadComma,
0x0082 => Code::Lang1,
0x0083 => Code::Lang2,
0x0084 => Code::IntlYen,
0x0085 => Code::MetaLeft,
0x0086 => Code::MetaRight,
0x0087 => Code::ContextMenu,
0x0088 => Code::BrowserStop,
0x0089 => Code::Again,
0x008A => Code::Props,
0x008B => Code::Undo,
0x008C => Code::Select,
0x008D => Code::Copy,
0x008E => Code::Open,
0x008F => Code::Paste,
0x0090 => Code::Find,
0x0091 => Code::Cut,
0x0092 => Code::Help,
0x0094 => Code::LaunchApp2,
0x0097 => Code::WakeUp,
0x0098 => Code::LaunchApp1,
// key to right of volume controls on T430s produces 0x9C
// but no documentation of what it should map to :/
0x00A3 => Code::LaunchMail,
0x00A4 => Code::BrowserFavorites,
0x00A6 => Code::BrowserBack,
0x00A7 => Code::BrowserForward,
0x00A9 => Code::Eject,
0x00AB => Code::MediaTrackNext,
0x00AC => Code::MediaPlayPause,
0x00AD => Code::MediaTrackPrevious,
0x00AE => Code::MediaStop,
0x00B3 => Code::MediaSelect,
0x00B4 => Code::BrowserHome,
0x00B5 => Code::BrowserRefresh,
0x00E1 => Code::BrowserSearch,
_ => Code::Unidentified,
}
}
// Extracts the keyboard modifiers from, e.g., the `state` field of
// `xcb::xproto::ButtonPressEvent`
fn key_mods(mods: u16) -> Modifiers {
let mut ret = Modifiers::default();
let mut key_masks = [
(xproto::MOD_MASK_SHIFT, Modifiers::SHIFT),
(xproto::MOD_MASK_CONTROL, Modifiers::CONTROL),
// X11's mod keys are configurable, but this seems
// like a reasonable default for US keyboards, at least,
// where the "windows" key seems to be MOD_MASK_4.
(xproto::MOD_MASK_1, Modifiers::ALT),
(xproto::MOD_MASK_2, Modifiers::NUM_LOCK),
(xproto::MOD_MASK_4, Modifiers::META),
(xproto::MOD_MASK_LOCK, Modifiers::CAPS_LOCK),
];
for (mask, modifiers) in &mut key_masks {
if mods & (*mask as u16) != 0 {
ret |= *modifiers;
}
}
ret
}
pub(super) fn convert_key_press_event(key_press: &xcb::KeyPressEvent) -> KeyboardEvent {
let hw_keycode = key_press.detail();
let code = hardware_keycode_to_code(hw_keycode.into());
let modifiers = key_mods(key_press.state());
let key = code_to_key(code, modifiers);
let location = code_to_location(code);
let state = KeyState::Down;
KeyboardEvent { code, key, modifiers, location, state, repeat: false, is_composing: false }
}
pub(super) fn convert_key_release_event(key_release: &xcb::KeyReleaseEvent) -> KeyboardEvent {
let hw_keycode = key_release.detail();
let code = hardware_keycode_to_code(hw_keycode.into());
let modifiers = key_mods(key_release.state());
let key = code_to_key(code, modifiers);
let location = code_to_location(code);
let state = KeyState::Up;
KeyboardEvent { code, key, modifiers, location, state, repeat: false, is_composing: false }
}
| code_to_key | identifier_name |
keyboard.rs | // Copyright 2020 The Druid Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Baseview modifications to druid code:
// - collect functions from various files
// - update imports, paths etc
//! X11 keyboard handling
use xcb::xproto;
use keyboard_types::*;
use crate::keyboard::code_to_location;
/// Convert a hardware scan code to a key.
///
/// Note: this is a hardcoded layout. We need to detect the user's
/// layout from the system and apply it.
fn code_to_key(code: Code, m: Modifiers) -> Key {
fn a(s: &str) -> Key {
Key::Character(s.into())
}
fn s(mods: Modifiers, base: &str, shifted: &str) -> Key |
fn n(mods: Modifiers, base: Key, num: &str) -> Key {
if mods.contains(Modifiers::NUM_LOCK) != mods.contains(Modifiers::SHIFT) {
Key::Character(num.into())
} else {
base
}
}
match code {
Code::KeyA => s(m, "a", "A"),
Code::KeyB => s(m, "b", "B"),
Code::KeyC => s(m, "c", "C"),
Code::KeyD => s(m, "d", "D"),
Code::KeyE => s(m, "e", "E"),
Code::KeyF => s(m, "f", "F"),
Code::KeyG => s(m, "g", "G"),
Code::KeyH => s(m, "h", "H"),
Code::KeyI => s(m, "i", "I"),
Code::KeyJ => s(m, "j", "J"),
Code::KeyK => s(m, "k", "K"),
Code::KeyL => s(m, "l", "L"),
Code::KeyM => s(m, "m", "M"),
Code::KeyN => s(m, "n", "N"),
Code::KeyO => s(m, "o", "O"),
Code::KeyP => s(m, "p", "P"),
Code::KeyQ => s(m, "q", "Q"),
Code::KeyR => s(m, "r", "R"),
Code::KeyS => s(m, "s", "S"),
Code::KeyT => s(m, "t", "T"),
Code::KeyU => s(m, "u", "U"),
Code::KeyV => s(m, "v", "V"),
Code::KeyW => s(m, "w", "W"),
Code::KeyX => s(m, "x", "X"),
Code::KeyY => s(m, "y", "Y"),
Code::KeyZ => s(m, "z", "Z"),
Code::Digit0 => s(m, "0", ")"),
Code::Digit1 => s(m, "1", "!"),
Code::Digit2 => s(m, "2", "@"),
Code::Digit3 => s(m, "3", "#"),
Code::Digit4 => s(m, "4", "$"),
Code::Digit5 => s(m, "5", "%"),
Code::Digit6 => s(m, "6", "^"),
Code::Digit7 => s(m, "7", "&"),
Code::Digit8 => s(m, "8", "*"),
Code::Digit9 => s(m, "9", "("),
Code::Backquote => s(m, "`", "~"),
Code::Minus => s(m, "-", "_"),
Code::Equal => s(m, "=", "+"),
Code::BracketLeft => s(m, "[", "{"),
Code::BracketRight => s(m, "]", "}"),
Code::Backslash => s(m, "\\", "|"),
Code::Semicolon => s(m, ";", ":"),
Code::Quote => s(m, "'", "\""),
Code::Comma => s(m, ",", "<"),
Code::Period => s(m, ".", ">"),
Code::Slash => s(m, "/", "?"),
Code::Space => a(" "),
Code::Escape => Key::Escape,
Code::Backspace => Key::Backspace,
Code::Tab => Key::Tab,
Code::Enter => Key::Enter,
Code::ControlLeft => Key::Control,
Code::ShiftLeft => Key::Shift,
Code::ShiftRight => Key::Shift,
Code::NumpadMultiply => a("*"),
Code::AltLeft => Key::Alt,
Code::CapsLock => Key::CapsLock,
Code::F1 => Key::F1,
Code::F2 => Key::F2,
Code::F3 => Key::F3,
Code::F4 => Key::F4,
Code::F5 => Key::F5,
Code::F6 => Key::F6,
Code::F7 => Key::F7,
Code::F8 => Key::F8,
Code::F9 => Key::F9,
Code::F10 => Key::F10,
Code::NumLock => Key::NumLock,
Code::ScrollLock => Key::ScrollLock,
Code::Numpad0 => n(m, Key::Insert, "0"),
Code::Numpad1 => n(m, Key::End, "1"),
Code::Numpad2 => n(m, Key::ArrowDown, "2"),
Code::Numpad3 => n(m, Key::PageDown, "3"),
Code::Numpad4 => n(m, Key::ArrowLeft, "4"),
Code::Numpad5 => n(m, Key::Clear, "5"),
Code::Numpad6 => n(m, Key::ArrowRight, "6"),
Code::Numpad7 => n(m, Key::Home, "7"),
Code::Numpad8 => n(m, Key::ArrowUp, "8"),
Code::Numpad9 => n(m, Key::PageUp, "9"),
Code::NumpadSubtract => a("-"),
Code::NumpadAdd => a("+"),
Code::NumpadDecimal => n(m, Key::Delete, "."),
Code::IntlBackslash => s(m, "\\", "|"),
Code::F11 => Key::F11,
Code::F12 => Key::F12,
// This mapping is based on the picture in the w3c spec.
Code::IntlRo => a("\\"),
Code::Convert => Key::Convert,
Code::KanaMode => Key::KanaMode,
Code::NonConvert => Key::NonConvert,
Code::NumpadEnter => Key::Enter,
Code::ControlRight => Key::Control,
Code::NumpadDivide => a("/"),
Code::PrintScreen => Key::PrintScreen,
Code::AltRight => Key::Alt,
Code::Home => Key::Home,
Code::ArrowUp => Key::ArrowUp,
Code::PageUp => Key::PageUp,
Code::ArrowLeft => Key::ArrowLeft,
Code::ArrowRight => Key::ArrowRight,
Code::End => Key::End,
Code::ArrowDown => Key::ArrowDown,
Code::PageDown => Key::PageDown,
Code::Insert => Key::Insert,
Code::Delete => Key::Delete,
Code::AudioVolumeMute => Key::AudioVolumeMute,
Code::AudioVolumeDown => Key::AudioVolumeDown,
Code::AudioVolumeUp => Key::AudioVolumeUp,
Code::NumpadEqual => a("="),
Code::Pause => Key::Pause,
Code::NumpadComma => a(","),
Code::Lang1 => Key::HangulMode,
Code::Lang2 => Key::HanjaMode,
Code::IntlYen => a("¥"),
Code::MetaLeft => Key::Meta,
Code::MetaRight => Key::Meta,
Code::ContextMenu => Key::ContextMenu,
Code::BrowserStop => Key::BrowserStop,
Code::Again => Key::Again,
Code::Props => Key::Props,
Code::Undo => Key::Undo,
Code::Select => Key::Select,
Code::Copy => Key::Copy,
Code::Open => Key::Open,
Code::Paste => Key::Paste,
Code::Find => Key::Find,
Code::Cut => Key::Cut,
Code::Help => Key::Help,
Code::LaunchApp2 => Key::LaunchApplication2,
Code::WakeUp => Key::WakeUp,
Code::LaunchApp1 => Key::LaunchApplication1,
Code::LaunchMail => Key::LaunchMail,
Code::BrowserFavorites => Key::BrowserFavorites,
Code::BrowserBack => Key::BrowserBack,
Code::BrowserForward => Key::BrowserForward,
Code::Eject => Key::Eject,
Code::MediaTrackNext => Key::MediaTrackNext,
Code::MediaPlayPause => Key::MediaPlayPause,
Code::MediaTrackPrevious => Key::MediaTrackPrevious,
Code::MediaStop => Key::MediaStop,
Code::MediaSelect => Key::LaunchMediaPlayer,
Code::BrowserHome => Key::BrowserHome,
Code::BrowserRefresh => Key::BrowserRefresh,
Code::BrowserSearch => Key::BrowserSearch,
_ => Key::Unidentified,
}
}
#[cfg(target_os = "linux")]
/// Map hardware keycode to code.
///
/// In theory, the hardware keycode is device dependent, but in
/// practice it's probably pretty reliable.
///
/// The logic is based on NativeKeyToDOMCodeName.h in Mozilla.
fn hardware_keycode_to_code(hw_keycode: u16) -> Code {
match hw_keycode {
0x0009 => Code::Escape,
0x000A => Code::Digit1,
0x000B => Code::Digit2,
0x000C => Code::Digit3,
0x000D => Code::Digit4,
0x000E => Code::Digit5,
0x000F => Code::Digit6,
0x0010 => Code::Digit7,
0x0011 => Code::Digit8,
0x0012 => Code::Digit9,
0x0013 => Code::Digit0,
0x0014 => Code::Minus,
0x0015 => Code::Equal,
0x0016 => Code::Backspace,
0x0017 => Code::Tab,
0x0018 => Code::KeyQ,
0x0019 => Code::KeyW,
0x001A => Code::KeyE,
0x001B => Code::KeyR,
0x001C => Code::KeyT,
0x001D => Code::KeyY,
0x001E => Code::KeyU,
0x001F => Code::KeyI,
0x0020 => Code::KeyO,
0x0021 => Code::KeyP,
0x0022 => Code::BracketLeft,
0x0023 => Code::BracketRight,
0x0024 => Code::Enter,
0x0025 => Code::ControlLeft,
0x0026 => Code::KeyA,
0x0027 => Code::KeyS,
0x0028 => Code::KeyD,
0x0029 => Code::KeyF,
0x002A => Code::KeyG,
0x002B => Code::KeyH,
0x002C => Code::KeyJ,
0x002D => Code::KeyK,
0x002E => Code::KeyL,
0x002F => Code::Semicolon,
0x0030 => Code::Quote,
0x0031 => Code::Backquote,
0x0032 => Code::ShiftLeft,
0x0033 => Code::Backslash,
0x0034 => Code::KeyZ,
0x0035 => Code::KeyX,
0x0036 => Code::KeyC,
0x0037 => Code::KeyV,
0x0038 => Code::KeyB,
0x0039 => Code::KeyN,
0x003A => Code::KeyM,
0x003B => Code::Comma,
0x003C => Code::Period,
0x003D => Code::Slash,
0x003E => Code::ShiftRight,
0x003F => Code::NumpadMultiply,
0x0040 => Code::AltLeft,
0x0041 => Code::Space,
0x0042 => Code::CapsLock,
0x0043 => Code::F1,
0x0044 => Code::F2,
0x0045 => Code::F3,
0x0046 => Code::F4,
0x0047 => Code::F5,
0x0048 => Code::F6,
0x0049 => Code::F7,
0x004A => Code::F8,
0x004B => Code::F9,
0x004C => Code::F10,
0x004D => Code::NumLock,
0x004E => Code::ScrollLock,
0x004F => Code::Numpad7,
0x0050 => Code::Numpad8,
0x0051 => Code::Numpad9,
0x0052 => Code::NumpadSubtract,
0x0053 => Code::Numpad4,
0x0054 => Code::Numpad5,
0x0055 => Code::Numpad6,
0x0056 => Code::NumpadAdd,
0x0057 => Code::Numpad1,
0x0058 => Code::Numpad2,
0x0059 => Code::Numpad3,
0x005A => Code::Numpad0,
0x005B => Code::NumpadDecimal,
0x005E => Code::IntlBackslash,
0x005F => Code::F11,
0x0060 => Code::F12,
0x0061 => Code::IntlRo,
0x0064 => Code::Convert,
0x0065 => Code::KanaMode,
0x0066 => Code::NonConvert,
0x0068 => Code::NumpadEnter,
0x0069 => Code::ControlRight,
0x006A => Code::NumpadDivide,
0x006B => Code::PrintScreen,
0x006C => Code::AltRight,
0x006E => Code::Home,
0x006F => Code::ArrowUp,
0x0070 => Code::PageUp,
0x0071 => Code::ArrowLeft,
0x0072 => Code::ArrowRight,
0x0073 => Code::End,
0x0074 => Code::ArrowDown,
0x0075 => Code::PageDown,
0x0076 => Code::Insert,
0x0077 => Code::Delete,
0x0079 => Code::AudioVolumeMute,
0x007A => Code::AudioVolumeDown,
0x007B => Code::AudioVolumeUp,
0x007D => Code::NumpadEqual,
0x007F => Code::Pause,
0x0081 => Code::NumpadComma,
0x0082 => Code::Lang1,
0x0083 => Code::Lang2,
0x0084 => Code::IntlYen,
0x0085 => Code::MetaLeft,
0x0086 => Code::MetaRight,
0x0087 => Code::ContextMenu,
0x0088 => Code::BrowserStop,
0x0089 => Code::Again,
0x008A => Code::Props,
0x008B => Code::Undo,
0x008C => Code::Select,
0x008D => Code::Copy,
0x008E => Code::Open,
0x008F => Code::Paste,
0x0090 => Code::Find,
0x0091 => Code::Cut,
0x0092 => Code::Help,
0x0094 => Code::LaunchApp2,
0x0097 => Code::WakeUp,
0x0098 => Code::LaunchApp1,
// key to right of volume controls on T430s produces 0x9C
// but no documentation of what it should map to :/
0x00A3 => Code::LaunchMail,
0x00A4 => Code::BrowserFavorites,
0x00A6 => Code::BrowserBack,
0x00A7 => Code::BrowserForward,
0x00A9 => Code::Eject,
0x00AB => Code::MediaTrackNext,
0x00AC => Code::MediaPlayPause,
0x00AD => Code::MediaTrackPrevious,
0x00AE => Code::MediaStop,
0x00B3 => Code::MediaSelect,
0x00B4 => Code::BrowserHome,
0x00B5 => Code::BrowserRefresh,
0x00E1 => Code::BrowserSearch,
_ => Code::Unidentified,
}
}
// Extracts the keyboard modifiers from, e.g., the `state` field of
// `xcb::xproto::ButtonPressEvent`
fn key_mods(mods: u16) -> Modifiers {
let mut ret = Modifiers::default();
let mut key_masks = [
(xproto::MOD_MASK_SHIFT, Modifiers::SHIFT),
(xproto::MOD_MASK_CONTROL, Modifiers::CONTROL),
// X11's mod keys are configurable, but this seems
// like a reasonable default for US keyboards, at least,
// where the "windows" key seems to be MOD_MASK_4.
(xproto::MOD_MASK_1, Modifiers::ALT),
(xproto::MOD_MASK_2, Modifiers::NUM_LOCK),
(xproto::MOD_MASK_4, Modifiers::META),
(xproto::MOD_MASK_LOCK, Modifiers::CAPS_LOCK),
];
for (mask, modifiers) in &mut key_masks {
if mods & (*mask as u16) != 0 {
ret |= *modifiers;
}
}
ret
}
pub(super) fn convert_key_press_event(key_press: &xcb::KeyPressEvent) -> KeyboardEvent {
let hw_keycode = key_press.detail();
let code = hardware_keycode_to_code(hw_keycode.into());
let modifiers = key_mods(key_press.state());
let key = code_to_key(code, modifiers);
let location = code_to_location(code);
let state = KeyState::Down;
KeyboardEvent { code, key, modifiers, location, state, repeat: false, is_composing: false }
}
pub(super) fn convert_key_release_event(key_release: &xcb::KeyReleaseEvent) -> KeyboardEvent {
let hw_keycode = key_release.detail();
let code = hardware_keycode_to_code(hw_keycode.into());
let modifiers = key_mods(key_release.state());
let key = code_to_key(code, modifiers);
let location = code_to_location(code);
let state = KeyState::Up;
KeyboardEvent { code, key, modifiers, location, state, repeat: false, is_composing: false }
}
| {
if mods.contains(Modifiers::SHIFT) {
Key::Character(shifted.into())
} else {
Key::Character(base.into())
}
} | identifier_body |
motion-export-dialog.component.ts | import { Component, OnInit, ViewChild, ViewEncapsulation } from '@angular/core';
import { UntypedFormBuilder, UntypedFormGroup } from '@angular/forms';
import { MatButtonToggle, MatButtonToggleChange } from '@angular/material/button-toggle';
import { MatDialogRef } from '@angular/material/dialog';
import { auditTime, Observable } from 'rxjs';
import { Permission } from 'src/app/domain/definitions/permission';
import {
ChangeRecoMode,
LineNumberingMode,
MOTION_PDF_OPTIONS,
PERSONAL_NOTE_ID
} from 'src/app/domain/models/motions/motions.constants';
import { StorageService } from 'src/app/gateways/storage.service';
import { ViewMotionCommentSection } from 'src/app/site/pages/meetings/pages/motions';
import { MeetingSettingsService } from 'src/app/site/pages/meetings/services/meeting-settings.service';
import { BaseUiComponent } from 'src/app/ui/base/base-ui-component';
import { MotionCommentSectionControllerService } from '../../../../modules/comments/services/motion-comment-section-controller.service';
import { ExportFileFormat, motionImportExportHeaderOrder, noMetaData } from '../../../../services/export/definitions';
import { MotionExportInfo } from '../../../../services/export/motion-export.service/motion-export.service';
@Component({
selector: `os-motion-export-dialog`,
templateUrl: `./motion-export-dialog.component.html`,
styleUrls: [`./motion-export-dialog.component.scss`],
encapsulation: ViewEncapsulation.None
})
export class MotionExportDialogComponent extends BaseUiComponent implements OnInit {
/**
* import PERSONAL_NOTE_ID for use in template
*/
public PERSONAL_NOTE_ID = PERSONAL_NOTE_ID;
public readonly permission = Permission;
/**
* For using the enum constants from the template.
*/
public lnMode = LineNumberingMode;
/**
* For using the enum constants from the template.
*/
public crMode = ChangeRecoMode;
/**
* to use the format in the template
*/
public fileFormat = ExportFileFormat;
/**
* The form that contains the export information.
*/
public exportForm!: UntypedFormGroup;
/**
* Store the subject to the ViewMotionCommentSection
*/
private commentsSubject: Observable<ViewMotionCommentSection[]>;
/**
* The default export values in contrast to the restored values
*/
private defaults: MotionExportInfo = {
format: ExportFileFormat.PDF,
content: [`text`, `reason`],
pdfOptions: [
MOTION_PDF_OPTIONS.Toc,
MOTION_PDF_OPTIONS.Header,
MOTION_PDF_OPTIONS.Page,
MOTION_PDF_OPTIONS.AddBreaks
],
metaInfo: [`submitters`, `state`, `recommendation`, `category`, `tags`, `block`, `polls`, `referring_motions`]
};
/**
* Determine the export order of the meta data
*/
public metaInfoExportOrder: string[];
/**
* @returns a list of available commentSections
*/
public get commentsToExport(): ViewMotionCommentSection[] {
return this.commentRepo.getViewModelList();
}
/**
* To deactivate the export-as-diff button
*/
@ViewChild(`diffVersionButton`, { static: true })
public diffVersionButton!: MatButtonToggle;
/**
* To deactivate the voting result button
*/
@ViewChild(`votingResultButton`, { static: true })
public votingResultButton!: MatButtonToggle;
/**
* To deactivate the referring motions button
*/
@ViewChild(`referringMotionsButton`, { static: true })
public referringMotionsButton!: MatButtonToggle;
/**
* To deactivate the speakers button.
*/
@ViewChild(`speakersButton`)
public speakersButton!: MatButtonToggle;
/**
* To deactivate the toc button.
*/
@ViewChild(MOTION_PDF_OPTIONS.Toc)
public tocButton!: MatButtonToggle;
@ViewChild(MOTION_PDF_OPTIONS.AddBreaks)
public addBreaksButton!: MatButtonToggle;
@ViewChild(MOTION_PDF_OPTIONS.ContinuousText)
public continuousTextButton!: MatButtonToggle;
/**
* Constructor
* Sets the default values for the lineNumberingMode and changeRecoMode and creates the form.
* This uses "instant" over observables to prevent on-fly-changes by auto update while
* the dialog is open.
*/
public constructor(
public formBuilder: UntypedFormBuilder,
public dialogRef: MatDialogRef<MotionExportDialogComponent>,
public meetingSettingsService: MeetingSettingsService,
public commentRepo: MotionCommentSectionControllerService,
private store: StorageService
) {
super();
this.defaults.lnMode = this.meetingSettingsService.instant(`motions_default_line_numbering`)!;
this.defaults.crMode = this.meetingSettingsService.instant(`motions_recommendation_text_mode`)!;
this.commentsSubject = this.commentRepo.getViewModelListObservable();
if (this.meetingSettingsService.instant(`motions_show_sequential_number`)) {
this.defaults.metaInfo!.push(`id`);
}
// Get the export order, exclude everything that does not count as meta-data
this.metaInfoExportOrder = motionImportExportHeaderOrder.filter(
metaData => !noMetaData.some(noMeta => metaData === noMeta)
);
this.createForm();
}
/**
* Init.
* Observes the form for changes to react dynamically
*/
public ngOnInit(): void {
this.subscriptions.push(
this.exportForm.valueChanges.pipe(auditTime(500)).subscribe((value: MotionExportInfo) => {
this.store.set(`motion_export_selection`, value);
}),
this.exportForm
.get(`format`)!
.valueChanges.subscribe((value: ExportFileFormat) => this.onFormatChange(value))
);
}
/**
* React to changes on the file format
* @param format
*/
private onFormatChange(format: ExportFileFormat): void {
// XLSX cannot have "content"
if (format === ExportFileFormat.XLSX) {
this.disableControl(`content`);
this.changeStateOfButton(this.speakersButton, false);
} else {
this.enableControl(`content`);
this.changeStateOfButton(this.speakersButton, true);
}
if (format === ExportFileFormat.CSV || format === ExportFileFormat.XLSX) {
this.disableControl(`lnMode`);
this.disableControl(`crMode`);
this.disableControl(`pdfOptions`);
// remove the selection of "votingResult"
if (format === ExportFileFormat.CSV) {
this.disableMetaInfoControl(`polls`, `speakers`);
} else {
this.disableMetaInfoControl(`polls`);
}
this.votingResultButton.disabled = true;
this.referringMotionsButton.disabled = true;
}
if (format === ExportFileFormat.PDF) {
this.enableControl(`lnMode`);
this.enableControl(`crMode`);
this.enableControl(`pdfOptions`);
this.votingResultButton.disabled = false;
this.referringMotionsButton.disabled = false;
}
}
public onChange(event: MatButtonToggleChange): void |
/**
* Function to change the state of the property `disabled` of a given button.
*
* Ensures, that the button exists.
*
* @param button The button whose state will change.
* @param nextState The next state the button will assume.
*/
private changeStateOfButton(button: MatButtonToggle, nextState: boolean): void {
if (button) {
button.disabled = nextState;
}
}
/**
* Helper function to easier enable a control
* @param name
*/
private enableControl(name: string): void {
this.exportForm.get(name)!.enable();
}
/**
* Helper function to easier disable a control
*
* @param name
*/
private disableControl(name: string): void {
this.exportForm.get(name)!.disable();
this.exportForm.get(name)!.setValue(this.getOffState(name));
}
/**
* Determine what "off means in certain states"
*
* @param control
*/
private getOffState(control: string): string | null {
switch (control) {
case `lnMode`:
return this.lnMode.None;
case `crMode`:
return this.crMode.Original;
default:
return null;
}
}
/**
* Function to deactivate at least one field of the meta-info.
*
* @param fields All fields to deactivate.
*/
private disableMetaInfoControl(...fields: string[]): void {
let metaInfoVal: string[] = this.exportForm.get(`metaInfo`)!.value;
if (metaInfoVal) {
metaInfoVal = metaInfoVal.filter(info => !fields.includes(info));
this.exportForm.get(`metaInfo`)!.setValue(metaInfoVal);
}
}
/**
* Creates the form with default values
*/
public createForm(): void {
this.exportForm = this.formBuilder.group({
format: [],
lnMode: [],
crMode: [],
content: [],
metaInfo: [],
pdfOptions: [],
comments: []
});
// restore selection or set default
this.store.get<MotionExportInfo>(`motion_export_selection`).then(restored => {
if (restored) {
this.exportForm.patchValue(restored);
} else {
this.exportForm.patchValue(this.defaults);
}
});
}
/**
* Just close the dialog
*/
public onCloseClick(): void {
this.dialogRef.close();
}
/**
* Gets the untranslated label for metaData
*/
public getLabelForMetadata(metaDataName: string): string {
switch (metaDataName) {
case `polls`: {
return `Voting result`;
}
case `id`: {
return `Sequential number`;
}
case `block`: {
return `Motion block`;
}
default: {
return metaDataName.charAt(0).toUpperCase() + metaDataName.slice(1).replace(`_`, ` `);
}
}
}
}
| {
if (event.value.includes(MOTION_PDF_OPTIONS.ContinuousText)) {
this.tocButton.checked = false;
this.addBreaksButton.checked = false;
}
} | identifier_body |
motion-export-dialog.component.ts | import { Component, OnInit, ViewChild, ViewEncapsulation } from '@angular/core';
import { UntypedFormBuilder, UntypedFormGroup } from '@angular/forms';
import { MatButtonToggle, MatButtonToggleChange } from '@angular/material/button-toggle';
import { MatDialogRef } from '@angular/material/dialog';
import { auditTime, Observable } from 'rxjs';
import { Permission } from 'src/app/domain/definitions/permission';
import {
ChangeRecoMode,
LineNumberingMode,
MOTION_PDF_OPTIONS,
PERSONAL_NOTE_ID
} from 'src/app/domain/models/motions/motions.constants';
import { StorageService } from 'src/app/gateways/storage.service';
import { ViewMotionCommentSection } from 'src/app/site/pages/meetings/pages/motions';
import { MeetingSettingsService } from 'src/app/site/pages/meetings/services/meeting-settings.service';
import { BaseUiComponent } from 'src/app/ui/base/base-ui-component';
import { MotionCommentSectionControllerService } from '../../../../modules/comments/services/motion-comment-section-controller.service';
import { ExportFileFormat, motionImportExportHeaderOrder, noMetaData } from '../../../../services/export/definitions';
import { MotionExportInfo } from '../../../../services/export/motion-export.service/motion-export.service';
@Component({
selector: `os-motion-export-dialog`,
templateUrl: `./motion-export-dialog.component.html`,
styleUrls: [`./motion-export-dialog.component.scss`],
encapsulation: ViewEncapsulation.None
})
export class MotionExportDialogComponent extends BaseUiComponent implements OnInit {
/**
* import PERSONAL_NOTE_ID for use in template
*/
public PERSONAL_NOTE_ID = PERSONAL_NOTE_ID;
public readonly permission = Permission;
/**
* For using the enum constants from the template.
*/
public lnMode = LineNumberingMode;
/**
* For using the enum constants from the template.
*/
public crMode = ChangeRecoMode;
/**
* to use the format in the template
*/
public fileFormat = ExportFileFormat;
/**
* The form that contains the export information.
*/
public exportForm!: UntypedFormGroup;
/**
* Store the subject to the ViewMotionCommentSection
*/
private commentsSubject: Observable<ViewMotionCommentSection[]>;
/**
* The default export values in contrast to the restored values
*/
private defaults: MotionExportInfo = {
format: ExportFileFormat.PDF,
content: [`text`, `reason`],
pdfOptions: [
MOTION_PDF_OPTIONS.Toc,
MOTION_PDF_OPTIONS.Header,
MOTION_PDF_OPTIONS.Page,
MOTION_PDF_OPTIONS.AddBreaks
],
metaInfo: [`submitters`, `state`, `recommendation`, `category`, `tags`, `block`, `polls`, `referring_motions`]
};
/**
* Determine the export order of the meta data
*/
public metaInfoExportOrder: string[];
/**
* @returns a list of available commentSections
*/
public get commentsToExport(): ViewMotionCommentSection[] {
return this.commentRepo.getViewModelList();
}
/**
* To deactivate the export-as-diff button
*/
@ViewChild(`diffVersionButton`, { static: true })
public diffVersionButton!: MatButtonToggle;
/**
* To deactivate the voting result button
*/
@ViewChild(`votingResultButton`, { static: true })
public votingResultButton!: MatButtonToggle;
/**
* To deactivate the referring motions button
*/
@ViewChild(`referringMotionsButton`, { static: true })
public referringMotionsButton!: MatButtonToggle;
/**
* To deactivate the speakers button.
*/
@ViewChild(`speakersButton`)
public speakersButton!: MatButtonToggle;
/**
* To deactivate the toc button.
*/
@ViewChild(MOTION_PDF_OPTIONS.Toc)
public tocButton!: MatButtonToggle;
@ViewChild(MOTION_PDF_OPTIONS.AddBreaks)
public addBreaksButton!: MatButtonToggle;
@ViewChild(MOTION_PDF_OPTIONS.ContinuousText)
public continuousTextButton!: MatButtonToggle;
/**
* Constructor
* Sets the default values for the lineNumberingMode and changeRecoMode and creates the form.
* This uses "instant" over observables to prevent on-fly-changes by auto update while
* the dialog is open.
*/
public constructor(
public formBuilder: UntypedFormBuilder,
public dialogRef: MatDialogRef<MotionExportDialogComponent>,
public meetingSettingsService: MeetingSettingsService,
public commentRepo: MotionCommentSectionControllerService,
private store: StorageService
) {
super();
this.defaults.lnMode = this.meetingSettingsService.instant(`motions_default_line_numbering`)!;
this.defaults.crMode = this.meetingSettingsService.instant(`motions_recommendation_text_mode`)!;
this.commentsSubject = this.commentRepo.getViewModelListObservable();
if (this.meetingSettingsService.instant(`motions_show_sequential_number`)) {
this.defaults.metaInfo!.push(`id`);
}
// Get the export order, exclude everything that does not count as meta-data
this.metaInfoExportOrder = motionImportExportHeaderOrder.filter(
metaData => !noMetaData.some(noMeta => metaData === noMeta)
);
this.createForm();
}
/**
* Init.
* Observes the form for changes to react dynamically
*/
public ngOnInit(): void {
this.subscriptions.push(
this.exportForm.valueChanges.pipe(auditTime(500)).subscribe((value: MotionExportInfo) => {
this.store.set(`motion_export_selection`, value);
}),
this.exportForm
.get(`format`)!
.valueChanges.subscribe((value: ExportFileFormat) => this.onFormatChange(value))
);
}
/**
* React to changes on the file format
* @param format
*/
private onFormatChange(format: ExportFileFormat): void {
// XLSX cannot have "content"
if (format === ExportFileFormat.XLSX) | else {
this.enableControl(`content`);
this.changeStateOfButton(this.speakersButton, true);
}
if (format === ExportFileFormat.CSV || format === ExportFileFormat.XLSX) {
this.disableControl(`lnMode`);
this.disableControl(`crMode`);
this.disableControl(`pdfOptions`);
// remove the selection of "votingResult"
if (format === ExportFileFormat.CSV) {
this.disableMetaInfoControl(`polls`, `speakers`);
} else {
this.disableMetaInfoControl(`polls`);
}
this.votingResultButton.disabled = true;
this.referringMotionsButton.disabled = true;
}
if (format === ExportFileFormat.PDF) {
this.enableControl(`lnMode`);
this.enableControl(`crMode`);
this.enableControl(`pdfOptions`);
this.votingResultButton.disabled = false;
this.referringMotionsButton.disabled = false;
}
}
public onChange(event: MatButtonToggleChange): void {
if (event.value.includes(MOTION_PDF_OPTIONS.ContinuousText)) {
this.tocButton.checked = false;
this.addBreaksButton.checked = false;
}
}
/**
* Function to change the state of the property `disabled` of a given button.
*
* Ensures, that the button exists.
*
* @param button The button whose state will change.
* @param nextState The next state the button will assume.
*/
private changeStateOfButton(button: MatButtonToggle, nextState: boolean): void {
if (button) {
button.disabled = nextState;
}
}
/**
* Helper function to easier enable a control
* @param name
*/
private enableControl(name: string): void {
this.exportForm.get(name)!.enable();
}
/**
* Helper function to easier disable a control
*
* @param name
*/
private disableControl(name: string): void {
this.exportForm.get(name)!.disable();
this.exportForm.get(name)!.setValue(this.getOffState(name));
}
/**
* Determine what "off means in certain states"
*
* @param control
*/
private getOffState(control: string): string | null {
switch (control) {
case `lnMode`:
return this.lnMode.None;
case `crMode`:
return this.crMode.Original;
default:
return null;
}
}
/**
* Function to deactivate at least one field of the meta-info.
*
* @param fields All fields to deactivate.
*/
private disableMetaInfoControl(...fields: string[]): void {
let metaInfoVal: string[] = this.exportForm.get(`metaInfo`)!.value;
if (metaInfoVal) {
metaInfoVal = metaInfoVal.filter(info => !fields.includes(info));
this.exportForm.get(`metaInfo`)!.setValue(metaInfoVal);
}
}
/**
* Creates the form with default values
*/
public createForm(): void {
this.exportForm = this.formBuilder.group({
format: [],
lnMode: [],
crMode: [],
content: [],
metaInfo: [],
pdfOptions: [],
comments: []
});
// restore selection or set default
this.store.get<MotionExportInfo>(`motion_export_selection`).then(restored => {
if (restored) {
this.exportForm.patchValue(restored);
} else {
this.exportForm.patchValue(this.defaults);
}
});
}
/**
* Just close the dialog
*/
public onCloseClick(): void {
this.dialogRef.close();
}
/**
* Gets the untranslated label for metaData
*/
public getLabelForMetadata(metaDataName: string): string {
switch (metaDataName) {
case `polls`: {
return `Voting result`;
}
case `id`: {
return `Sequential number`;
}
case `block`: {
return `Motion block`;
}
default: {
return metaDataName.charAt(0).toUpperCase() + metaDataName.slice(1).replace(`_`, ` `);
}
}
}
}
| {
this.disableControl(`content`);
this.changeStateOfButton(this.speakersButton, false);
} | conditional_block |
motion-export-dialog.component.ts | import { Component, OnInit, ViewChild, ViewEncapsulation } from '@angular/core';
import { UntypedFormBuilder, UntypedFormGroup } from '@angular/forms';
import { MatButtonToggle, MatButtonToggleChange } from '@angular/material/button-toggle';
import { MatDialogRef } from '@angular/material/dialog';
import { auditTime, Observable } from 'rxjs';
import { Permission } from 'src/app/domain/definitions/permission';
import {
ChangeRecoMode,
LineNumberingMode,
MOTION_PDF_OPTIONS,
PERSONAL_NOTE_ID
} from 'src/app/domain/models/motions/motions.constants';
import { StorageService } from 'src/app/gateways/storage.service';
import { ViewMotionCommentSection } from 'src/app/site/pages/meetings/pages/motions';
import { MeetingSettingsService } from 'src/app/site/pages/meetings/services/meeting-settings.service';
import { BaseUiComponent } from 'src/app/ui/base/base-ui-component';
import { MotionCommentSectionControllerService } from '../../../../modules/comments/services/motion-comment-section-controller.service';
import { ExportFileFormat, motionImportExportHeaderOrder, noMetaData } from '../../../../services/export/definitions';
import { MotionExportInfo } from '../../../../services/export/motion-export.service/motion-export.service'; | @Component({
selector: `os-motion-export-dialog`,
templateUrl: `./motion-export-dialog.component.html`,
styleUrls: [`./motion-export-dialog.component.scss`],
encapsulation: ViewEncapsulation.None
})
export class MotionExportDialogComponent extends BaseUiComponent implements OnInit {
/**
* import PERSONAL_NOTE_ID for use in template
*/
public PERSONAL_NOTE_ID = PERSONAL_NOTE_ID;
public readonly permission = Permission;
/**
* For using the enum constants from the template.
*/
public lnMode = LineNumberingMode;
/**
* For using the enum constants from the template.
*/
public crMode = ChangeRecoMode;
/**
* to use the format in the template
*/
public fileFormat = ExportFileFormat;
/**
* The form that contains the export information.
*/
public exportForm!: UntypedFormGroup;
/**
* Store the subject to the ViewMotionCommentSection
*/
private commentsSubject: Observable<ViewMotionCommentSection[]>;
/**
* The default export values in contrast to the restored values
*/
private defaults: MotionExportInfo = {
format: ExportFileFormat.PDF,
content: [`text`, `reason`],
pdfOptions: [
MOTION_PDF_OPTIONS.Toc,
MOTION_PDF_OPTIONS.Header,
MOTION_PDF_OPTIONS.Page,
MOTION_PDF_OPTIONS.AddBreaks
],
metaInfo: [`submitters`, `state`, `recommendation`, `category`, `tags`, `block`, `polls`, `referring_motions`]
};
/**
* Determine the export order of the meta data
*/
public metaInfoExportOrder: string[];
/**
* @returns a list of available commentSections
*/
public get commentsToExport(): ViewMotionCommentSection[] {
return this.commentRepo.getViewModelList();
}
/**
* To deactivate the export-as-diff button
*/
@ViewChild(`diffVersionButton`, { static: true })
public diffVersionButton!: MatButtonToggle;
/**
* To deactivate the voting result button
*/
@ViewChild(`votingResultButton`, { static: true })
public votingResultButton!: MatButtonToggle;
/**
* To deactivate the referring motions button
*/
@ViewChild(`referringMotionsButton`, { static: true })
public referringMotionsButton!: MatButtonToggle;
/**
* To deactivate the speakers button.
*/
@ViewChild(`speakersButton`)
public speakersButton!: MatButtonToggle;
/**
* To deactivate the toc button.
*/
@ViewChild(MOTION_PDF_OPTIONS.Toc)
public tocButton!: MatButtonToggle;
@ViewChild(MOTION_PDF_OPTIONS.AddBreaks)
public addBreaksButton!: MatButtonToggle;
@ViewChild(MOTION_PDF_OPTIONS.ContinuousText)
public continuousTextButton!: MatButtonToggle;
/**
* Constructor
* Sets the default values for the lineNumberingMode and changeRecoMode and creates the form.
* This uses "instant" over observables to prevent on-fly-changes by auto update while
* the dialog is open.
*/
public constructor(
public formBuilder: UntypedFormBuilder,
public dialogRef: MatDialogRef<MotionExportDialogComponent>,
public meetingSettingsService: MeetingSettingsService,
public commentRepo: MotionCommentSectionControllerService,
private store: StorageService
) {
super();
this.defaults.lnMode = this.meetingSettingsService.instant(`motions_default_line_numbering`)!;
this.defaults.crMode = this.meetingSettingsService.instant(`motions_recommendation_text_mode`)!;
this.commentsSubject = this.commentRepo.getViewModelListObservable();
if (this.meetingSettingsService.instant(`motions_show_sequential_number`)) {
this.defaults.metaInfo!.push(`id`);
}
// Get the export order, exclude everything that does not count as meta-data
this.metaInfoExportOrder = motionImportExportHeaderOrder.filter(
metaData => !noMetaData.some(noMeta => metaData === noMeta)
);
this.createForm();
}
/**
* Init.
* Observes the form for changes to react dynamically
*/
public ngOnInit(): void {
this.subscriptions.push(
this.exportForm.valueChanges.pipe(auditTime(500)).subscribe((value: MotionExportInfo) => {
this.store.set(`motion_export_selection`, value);
}),
this.exportForm
.get(`format`)!
.valueChanges.subscribe((value: ExportFileFormat) => this.onFormatChange(value))
);
}
/**
* React to changes on the file format
* @param format
*/
private onFormatChange(format: ExportFileFormat): void {
// XLSX cannot have "content"
if (format === ExportFileFormat.XLSX) {
this.disableControl(`content`);
this.changeStateOfButton(this.speakersButton, false);
} else {
this.enableControl(`content`);
this.changeStateOfButton(this.speakersButton, true);
}
if (format === ExportFileFormat.CSV || format === ExportFileFormat.XLSX) {
this.disableControl(`lnMode`);
this.disableControl(`crMode`);
this.disableControl(`pdfOptions`);
// remove the selection of "votingResult"
if (format === ExportFileFormat.CSV) {
this.disableMetaInfoControl(`polls`, `speakers`);
} else {
this.disableMetaInfoControl(`polls`);
}
this.votingResultButton.disabled = true;
this.referringMotionsButton.disabled = true;
}
if (format === ExportFileFormat.PDF) {
this.enableControl(`lnMode`);
this.enableControl(`crMode`);
this.enableControl(`pdfOptions`);
this.votingResultButton.disabled = false;
this.referringMotionsButton.disabled = false;
}
}
public onChange(event: MatButtonToggleChange): void {
if (event.value.includes(MOTION_PDF_OPTIONS.ContinuousText)) {
this.tocButton.checked = false;
this.addBreaksButton.checked = false;
}
}
/**
* Function to change the state of the property `disabled` of a given button.
*
* Ensures, that the button exists.
*
* @param button The button whose state will change.
* @param nextState The next state the button will assume.
*/
private changeStateOfButton(button: MatButtonToggle, nextState: boolean): void {
if (button) {
button.disabled = nextState;
}
}
/**
* Helper function to easier enable a control
* @param name
*/
private enableControl(name: string): void {
this.exportForm.get(name)!.enable();
}
/**
* Helper function to easier disable a control
*
* @param name
*/
private disableControl(name: string): void {
this.exportForm.get(name)!.disable();
this.exportForm.get(name)!.setValue(this.getOffState(name));
}
/**
* Determine what "off means in certain states"
*
* @param control
*/
private getOffState(control: string): string | null {
switch (control) {
case `lnMode`:
return this.lnMode.None;
case `crMode`:
return this.crMode.Original;
default:
return null;
}
}
/**
* Function to deactivate at least one field of the meta-info.
*
* @param fields All fields to deactivate.
*/
private disableMetaInfoControl(...fields: string[]): void {
let metaInfoVal: string[] = this.exportForm.get(`metaInfo`)!.value;
if (metaInfoVal) {
metaInfoVal = metaInfoVal.filter(info => !fields.includes(info));
this.exportForm.get(`metaInfo`)!.setValue(metaInfoVal);
}
}
/**
* Creates the form with default values
*/
public createForm(): void {
this.exportForm = this.formBuilder.group({
format: [],
lnMode: [],
crMode: [],
content: [],
metaInfo: [],
pdfOptions: [],
comments: []
});
// restore selection or set default
this.store.get<MotionExportInfo>(`motion_export_selection`).then(restored => {
if (restored) {
this.exportForm.patchValue(restored);
} else {
this.exportForm.patchValue(this.defaults);
}
});
}
/**
* Just close the dialog
*/
public onCloseClick(): void {
this.dialogRef.close();
}
/**
* Gets the untranslated label for metaData
*/
public getLabelForMetadata(metaDataName: string): string {
switch (metaDataName) {
case `polls`: {
return `Voting result`;
}
case `id`: {
return `Sequential number`;
}
case `block`: {
return `Motion block`;
}
default: {
return metaDataName.charAt(0).toUpperCase() + metaDataName.slice(1).replace(`_`, ` `);
}
}
}
} | random_line_split | |
motion-export-dialog.component.ts | import { Component, OnInit, ViewChild, ViewEncapsulation } from '@angular/core';
import { UntypedFormBuilder, UntypedFormGroup } from '@angular/forms';
import { MatButtonToggle, MatButtonToggleChange } from '@angular/material/button-toggle';
import { MatDialogRef } from '@angular/material/dialog';
import { auditTime, Observable } from 'rxjs';
import { Permission } from 'src/app/domain/definitions/permission';
import {
ChangeRecoMode,
LineNumberingMode,
MOTION_PDF_OPTIONS,
PERSONAL_NOTE_ID
} from 'src/app/domain/models/motions/motions.constants';
import { StorageService } from 'src/app/gateways/storage.service';
import { ViewMotionCommentSection } from 'src/app/site/pages/meetings/pages/motions';
import { MeetingSettingsService } from 'src/app/site/pages/meetings/services/meeting-settings.service';
import { BaseUiComponent } from 'src/app/ui/base/base-ui-component';
import { MotionCommentSectionControllerService } from '../../../../modules/comments/services/motion-comment-section-controller.service';
import { ExportFileFormat, motionImportExportHeaderOrder, noMetaData } from '../../../../services/export/definitions';
import { MotionExportInfo } from '../../../../services/export/motion-export.service/motion-export.service';
@Component({
selector: `os-motion-export-dialog`,
templateUrl: `./motion-export-dialog.component.html`,
styleUrls: [`./motion-export-dialog.component.scss`],
encapsulation: ViewEncapsulation.None
})
export class MotionExportDialogComponent extends BaseUiComponent implements OnInit {
/**
* import PERSONAL_NOTE_ID for use in template
*/
public PERSONAL_NOTE_ID = PERSONAL_NOTE_ID;
public readonly permission = Permission;
/**
* For using the enum constants from the template.
*/
public lnMode = LineNumberingMode;
/**
* For using the enum constants from the template.
*/
public crMode = ChangeRecoMode;
/**
* to use the format in the template
*/
public fileFormat = ExportFileFormat;
/**
* The form that contains the export information.
*/
public exportForm!: UntypedFormGroup;
/**
* Store the subject to the ViewMotionCommentSection
*/
private commentsSubject: Observable<ViewMotionCommentSection[]>;
/**
* The default export values in contrast to the restored values
*/
private defaults: MotionExportInfo = {
format: ExportFileFormat.PDF,
content: [`text`, `reason`],
pdfOptions: [
MOTION_PDF_OPTIONS.Toc,
MOTION_PDF_OPTIONS.Header,
MOTION_PDF_OPTIONS.Page,
MOTION_PDF_OPTIONS.AddBreaks
],
metaInfo: [`submitters`, `state`, `recommendation`, `category`, `tags`, `block`, `polls`, `referring_motions`]
};
/**
* Determine the export order of the meta data
*/
public metaInfoExportOrder: string[];
/**
* @returns a list of available commentSections
*/
public get commentsToExport(): ViewMotionCommentSection[] {
return this.commentRepo.getViewModelList();
}
/**
* To deactivate the export-as-diff button
*/
@ViewChild(`diffVersionButton`, { static: true })
public diffVersionButton!: MatButtonToggle;
/**
* To deactivate the voting result button
*/
@ViewChild(`votingResultButton`, { static: true })
public votingResultButton!: MatButtonToggle;
/**
* To deactivate the referring motions button
*/
@ViewChild(`referringMotionsButton`, { static: true })
public referringMotionsButton!: MatButtonToggle;
/**
* To deactivate the speakers button.
*/
@ViewChild(`speakersButton`)
public speakersButton!: MatButtonToggle;
/**
* To deactivate the toc button.
*/
@ViewChild(MOTION_PDF_OPTIONS.Toc)
public tocButton!: MatButtonToggle;
@ViewChild(MOTION_PDF_OPTIONS.AddBreaks)
public addBreaksButton!: MatButtonToggle;
@ViewChild(MOTION_PDF_OPTIONS.ContinuousText)
public continuousTextButton!: MatButtonToggle;
/**
* Constructor
* Sets the default values for the lineNumberingMode and changeRecoMode and creates the form.
* This uses "instant" over observables to prevent on-fly-changes by auto update while
* the dialog is open.
*/
public constructor(
public formBuilder: UntypedFormBuilder,
public dialogRef: MatDialogRef<MotionExportDialogComponent>,
public meetingSettingsService: MeetingSettingsService,
public commentRepo: MotionCommentSectionControllerService,
private store: StorageService
) {
super();
this.defaults.lnMode = this.meetingSettingsService.instant(`motions_default_line_numbering`)!;
this.defaults.crMode = this.meetingSettingsService.instant(`motions_recommendation_text_mode`)!;
this.commentsSubject = this.commentRepo.getViewModelListObservable();
if (this.meetingSettingsService.instant(`motions_show_sequential_number`)) {
this.defaults.metaInfo!.push(`id`);
}
// Get the export order, exclude everything that does not count as meta-data
this.metaInfoExportOrder = motionImportExportHeaderOrder.filter(
metaData => !noMetaData.some(noMeta => metaData === noMeta)
);
this.createForm();
}
/**
* Init.
* Observes the form for changes to react dynamically
*/
public ngOnInit(): void {
this.subscriptions.push(
this.exportForm.valueChanges.pipe(auditTime(500)).subscribe((value: MotionExportInfo) => {
this.store.set(`motion_export_selection`, value);
}),
this.exportForm
.get(`format`)!
.valueChanges.subscribe((value: ExportFileFormat) => this.onFormatChange(value))
);
}
/**
* React to changes on the file format
* @param format
*/
private onFormatChange(format: ExportFileFormat): void {
// XLSX cannot have "content"
if (format === ExportFileFormat.XLSX) {
this.disableControl(`content`);
this.changeStateOfButton(this.speakersButton, false);
} else {
this.enableControl(`content`);
this.changeStateOfButton(this.speakersButton, true);
}
if (format === ExportFileFormat.CSV || format === ExportFileFormat.XLSX) {
this.disableControl(`lnMode`);
this.disableControl(`crMode`);
this.disableControl(`pdfOptions`);
// remove the selection of "votingResult"
if (format === ExportFileFormat.CSV) {
this.disableMetaInfoControl(`polls`, `speakers`);
} else {
this.disableMetaInfoControl(`polls`);
}
this.votingResultButton.disabled = true;
this.referringMotionsButton.disabled = true;
}
if (format === ExportFileFormat.PDF) {
this.enableControl(`lnMode`);
this.enableControl(`crMode`);
this.enableControl(`pdfOptions`);
this.votingResultButton.disabled = false;
this.referringMotionsButton.disabled = false;
}
}
public onChange(event: MatButtonToggleChange): void {
if (event.value.includes(MOTION_PDF_OPTIONS.ContinuousText)) {
this.tocButton.checked = false;
this.addBreaksButton.checked = false;
}
}
/**
* Function to change the state of the property `disabled` of a given button.
*
* Ensures, that the button exists.
*
* @param button The button whose state will change.
* @param nextState The next state the button will assume.
*/
private changeStateOfButton(button: MatButtonToggle, nextState: boolean): void {
if (button) {
button.disabled = nextState;
}
}
/**
* Helper function to easier enable a control
* @param name
*/
private enableControl(name: string): void {
this.exportForm.get(name)!.enable();
}
/**
* Helper function to easier disable a control
*
* @param name
*/
private disableControl(name: string): void {
this.exportForm.get(name)!.disable();
this.exportForm.get(name)!.setValue(this.getOffState(name));
}
/**
* Determine what "off means in certain states"
*
* @param control
*/
private getOffState(control: string): string | null {
switch (control) {
case `lnMode`:
return this.lnMode.None;
case `crMode`:
return this.crMode.Original;
default:
return null;
}
}
/**
* Function to deactivate at least one field of the meta-info.
*
* @param fields All fields to deactivate.
*/
private disableMetaInfoControl(...fields: string[]): void {
let metaInfoVal: string[] = this.exportForm.get(`metaInfo`)!.value;
if (metaInfoVal) {
metaInfoVal = metaInfoVal.filter(info => !fields.includes(info));
this.exportForm.get(`metaInfo`)!.setValue(metaInfoVal);
}
}
/**
* Creates the form with default values
*/
public createForm(): void {
this.exportForm = this.formBuilder.group({
format: [],
lnMode: [],
crMode: [],
content: [],
metaInfo: [],
pdfOptions: [],
comments: []
});
// restore selection or set default
this.store.get<MotionExportInfo>(`motion_export_selection`).then(restored => {
if (restored) {
this.exportForm.patchValue(restored);
} else {
this.exportForm.patchValue(this.defaults);
}
});
}
/**
* Just close the dialog
*/
public | (): void {
this.dialogRef.close();
}
/**
* Gets the untranslated label for metaData
*/
public getLabelForMetadata(metaDataName: string): string {
switch (metaDataName) {
case `polls`: {
return `Voting result`;
}
case `id`: {
return `Sequential number`;
}
case `block`: {
return `Motion block`;
}
default: {
return metaDataName.charAt(0).toUpperCase() + metaDataName.slice(1).replace(`_`, ` `);
}
}
}
}
| onCloseClick | identifier_name |
watched_bitfield.rs | use crate::{BitField8, Error};
use std::{
fmt::{self, Display},
str::FromStr,
};
/// (De)Serializable field that tracks which videos have been watched
/// and the latest one watched.
///
/// This is a [`WatchedBitField`] compatible field, (de)serialized
/// without the knowledge of `videos_ids`.
///
/// `{anchor:video_id}:{anchor_length}:{bitfield8}`
///
/// # Examples
///
/// ```
/// use stremio_watched_bitfield::WatchedField;
///
/// // `tt2934286:1:5` - anchor video id
/// // `5` - anchor video length
/// // `eJyTZwAAAEAAIA==` - BitField8
///
/// let watched = "tt2934286:1:5:5:eJyTZwAAAEAAIA==".parse::<WatchedField>().expect("Should parse");
/// ```
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct WatchedField {
/// The anchor video id
///
/// Indicates which is the last watched video id.
anchor_video: String,
/// The length from the beginning of the `BitField8` to the last
/// watched video.
anchor_length: usize,
bitfield: BitField8,
}
impl Display for WatchedField {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}:{}:{}",
self.anchor_video, self.anchor_length, self.bitfield
)
}
}
impl From<WatchedBitField> for WatchedField {
fn from(watched_bit_field: WatchedBitField) -> Self {
let last_id = watched_bit_field.bitfield.last_index_of(true).unwrap_or(0);
let last_video_id = watched_bit_field
.video_ids
.get(last_id)
.map_or_else(|| "undefined".to_string(), |id| id.clone());
Self {
anchor_video: last_video_id,
anchor_length: last_id + 1,
bitfield: watched_bit_field.bitfield,
}
}
}
impl FromStr for WatchedField {
type Err = Error;
fn from_str(string: &str) -> Result<Self, Self::Err> {
// serialized is formed by {id}:{len}:{serializedBuf}, but since {id} might contain : we have to pop gradually and then keep the rest
let mut components = string.split(':').collect::<Vec<&str>>();
if components.len() < 3 {
return Err(Error("Not enough components".to_string()));
}
let bitfield_buf = components
.pop()
.ok_or("Cannot obtain the serialized data")?
.to_string();
let anchor_length = components
.pop()
.ok_or("Cannot obtain the length field")?
.parse::<usize>()?;
let anchor_video_id = components.join(":");
let bitfield = BitField8::try_from((bitfield_buf, None))?;
Ok(Self {
bitfield,
anchor_video: anchor_video_id,
anchor_length,
})
}
}
/// Tracks which videos have been watched.
///
/// Serialized in the format `{id}:{len}:{serializedBuf}` but since `{id}`
/// might contain `:` we pop gradually and then keep the rest.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct WatchedBitField {
bitfield: BitField8,
video_ids: Vec<String>,
}
impl WatchedBitField {
pub fn construct_from_array(arr: Vec<bool>, video_ids: Vec<String>) -> WatchedBitField {
let mut bitfield = BitField8::new(video_ids.len());
for (i, val) in arr.iter().enumerate() {
bitfield.set(i, *val);
}
WatchedBitField {
bitfield,
video_ids,
}
}
pub fn new(bitfield: BitField8, video_ids: Vec<String>) -> WatchedBitField {
Self {
bitfield,
video_ids,
}
}
pub fn construct_with_videos(
watched_field: WatchedField,
video_ids: Vec<String>,
) -> Result<WatchedBitField, Error> {
// We can shift the bitmap in any direction, as long as we can find the anchor video
if let Some(anchor_video_idx) = video_ids
.iter()
.position(|s| s == &watched_field.anchor_video)
{
// TODO: replace with `usize` and `checked_sub` when more tests are added for negative ids
let offset = watched_field.anchor_length as i32 - anchor_video_idx as i32 - 1;
let bitfield =
BitField8::new_with_values(watched_field.bitfield.values, Some(video_ids.len()));
// in case of an previous empty array, this will be 0
if offset != 0 {
// Resize the buffer
let mut resized_wbf = WatchedBitField {
bitfield: BitField8::new(video_ids.len()),
video_ids: video_ids.clone(),
};
// rewrite the old buf into the new one, applying the offset
for i in 0..video_ids.len() {
// TODO: Check what will happen if we change it to `usize`
let id_in_prev = i as i32 + offset;
if id_in_prev >= 0 && (id_in_prev as usize) < bitfield.length {
resized_wbf.set(i, bitfield.get(id_in_prev as usize));
}
}
Ok(resized_wbf)
} else {
Ok(WatchedBitField {
bitfield,
video_ids,
})
}
} else {
// videoId could not be found, return a totally blank buf
Ok(WatchedBitField {
bitfield: BitField8::new(video_ids.len()),
video_ids,
})
}
}
pub fn construct_and_resize(
serialized: &str,
video_ids: Vec<String>,
) -> Result<WatchedBitField, Error> {
// note: videoIds.length could only be >= from serialized lastLength
// should we assert?
// we might also wanna assert that the bitfield.length for the returned wb is the same sa videoIds.length
let watched_field = serialized.parse()?;
Self::construct_with_videos(watched_field, video_ids)
}
pub fn get(&self, idx: usize) -> bool {
self.bitfield.get(idx)
}
pub fn get_video(&self, video_id: &str) -> bool {
if let Some(pos) = self.video_ids.iter().position(|s| *s == video_id) {
self.bitfield.get(pos)
} else {
false
}
}
pub fn set(&mut self, idx: usize, v: bool) {
self.bitfield.set(idx, v);
}
pub fn set_video(&mut self, video_id: &str, v: bool) {
if let Some(pos) = self.video_ids.iter().position(|s| *s == video_id) {
self.bitfield.set(pos, v);
}
}
}
impl fmt::Display for WatchedBitField {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let packed = String::try_from(&self.bitfield).expect("bitfield failed to compress");
let last_id = self.bitfield.last_index_of(true).unwrap_or(0);
let last_video_id = self
.video_ids
.get(last_id)
.map_or("undefined", |id| id.as_str());
write!(f, "{}:{}:{}", last_video_id, last_id + 1, packed)
}
}
impl From<WatchedBitField> for BitField8 {
fn from(watched: WatchedBitField) -> Self |
}
/// Module containing all the impls of the `serde` feature
#[cfg(feature = "serde")]
mod serde {
use std::str::FromStr;
use serde::{de, Serialize};
use super::WatchedField;
impl<'de> serde::Deserialize<'de> for WatchedField {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let serialized = String::deserialize(deserializer)?;
WatchedField::from_str(&serialized).map_err(de::Error::custom)
}
}
impl Serialize for WatchedField {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
}
#[cfg(test)]
mod tests {
use crate::{BitField8, WatchedBitField, WatchedField};
#[test]
fn parse_and_modify() {
let videos = [
"tt2934286:1:1",
"tt2934286:1:2",
"tt2934286:1:3",
"tt2934286:1:4",
"tt2934286:1:5",
"tt2934286:1:6",
"tt2934286:1:7",
"tt2934286:1:8",
"tt2934286:1:9",
];
let watched = "tt2934286:1:5:5:eJyTZwAAAEAAIA==";
let mut wb = WatchedBitField::construct_and_resize(
watched,
videos.iter().map(|v| v.to_string()).collect(),
)
.unwrap();
assert!(wb.get_video("tt2934286:1:5"));
assert!(!wb.get_video("tt2934286:1:6"));
assert_eq!(watched, wb.to_string());
wb.set_video("tt2934286:1:6", true);
assert!(wb.get_video("tt2934286:1:6"));
}
#[test]
fn construct_from_array() {
let arr = vec![false; 500];
let mut video_ids = vec![];
for i in 1..500 {
video_ids.push(format!("tt2934286:1:{}", i));
}
let mut wb = WatchedBitField::construct_from_array(arr, video_ids.clone());
// All should be false
for (i, val) in video_ids.iter().enumerate() {
assert!(!wb.get(i));
assert!(!wb.get_video(val));
}
// Set half to true
for (i, _val) in video_ids.iter().enumerate() {
wb.set(i, i % 2 == 0);
}
// Serialize and deserialize to new structure
let watched = wb.to_string();
let wb2 = WatchedBitField::construct_and_resize(
&watched,
video_ids.iter().map(|v| v.to_string()).collect(),
)
.unwrap();
// Half should still be true
for (i, val) in video_ids.iter().enumerate() {
assert_eq!(wb2.get(i), i % 2 == 0);
assert_eq!(wb2.get_video(val), i % 2 == 0);
}
}
#[test]
fn to_string_empty() {
let watched = WatchedBitField::construct_from_array(vec![], vec![]);
let serialized = watched.to_string();
assert_eq!(serialized, "undefined:1:eJwDAAAAAAE=");
}
#[test]
#[cfg(feature = "serde")]
fn test_watched_field_de_serialize() {
let string = "tt7767422:3:8:24:eJz7//8/AAX9Av4=";
let json_value = serde_json::json!(string);
let expected = string.parse::<WatchedField>().expect("Should parse field");
let actual_from_json = serde_json::from_value::<WatchedField>(json_value.clone())
.expect("Should deserialize ");
assert_eq!(expected, actual_from_json);
assert_eq!("eJz7//8/AAX9Av4=", &actual_from_json.bitfield.to_string());
assert_eq!(24, actual_from_json.anchor_length);
assert_eq!("tt7767422:3:8", actual_from_json.anchor_video);
let actual_to_json = serde_json::to_value(&expected).expect("Should serialize");
assert_eq!(json_value, actual_to_json);
}
#[test]
fn deserialize_empty() {
let watched = WatchedBitField::construct_and_resize("undefined:1:eJwDAAAAAAE=", vec![]);
assert_eq!(
watched,
Ok(WatchedBitField {
bitfield: BitField8::new(0),
video_ids: vec![]
})
);
}
}
| {
watched.bitfield
} | identifier_body |
watched_bitfield.rs | use crate::{BitField8, Error};
use std::{
fmt::{self, Display},
str::FromStr,
};
/// (De)Serializable field that tracks which videos have been watched
/// and the latest one watched.
///
/// This is a [`WatchedBitField`] compatible field, (de)serialized
/// without the knowledge of `videos_ids`.
///
/// `{anchor:video_id}:{anchor_length}:{bitfield8}`
///
/// # Examples
///
/// ```
/// use stremio_watched_bitfield::WatchedField;
///
/// // `tt2934286:1:5` - anchor video id
/// // `5` - anchor video length
/// // `eJyTZwAAAEAAIA==` - BitField8
///
/// let watched = "tt2934286:1:5:5:eJyTZwAAAEAAIA==".parse::<WatchedField>().expect("Should parse");
/// ```
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct WatchedField {
/// The anchor video id
///
/// Indicates which is the last watched video id.
anchor_video: String,
/// The length from the beginning of the `BitField8` to the last
/// watched video.
anchor_length: usize,
bitfield: BitField8,
}
impl Display for WatchedField {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}:{}:{}",
self.anchor_video, self.anchor_length, self.bitfield
)
}
}
impl From<WatchedBitField> for WatchedField {
fn from(watched_bit_field: WatchedBitField) -> Self {
let last_id = watched_bit_field.bitfield.last_index_of(true).unwrap_or(0);
let last_video_id = watched_bit_field
.video_ids
.get(last_id)
.map_or_else(|| "undefined".to_string(), |id| id.clone());
Self {
anchor_video: last_video_id,
anchor_length: last_id + 1,
bitfield: watched_bit_field.bitfield,
}
}
}
impl FromStr for WatchedField {
type Err = Error;
fn from_str(string: &str) -> Result<Self, Self::Err> {
// serialized is formed by {id}:{len}:{serializedBuf}, but since {id} might contain : we have to pop gradually and then keep the rest
let mut components = string.split(':').collect::<Vec<&str>>();
if components.len() < 3 {
return Err(Error("Not enough components".to_string()));
}
let bitfield_buf = components
.pop()
.ok_or("Cannot obtain the serialized data")?
.to_string();
let anchor_length = components
.pop()
.ok_or("Cannot obtain the length field")?
.parse::<usize>()?;
let anchor_video_id = components.join(":");
let bitfield = BitField8::try_from((bitfield_buf, None))?;
Ok(Self {
bitfield,
anchor_video: anchor_video_id,
anchor_length,
})
}
}
/// Tracks which videos have been watched.
///
/// Serialized in the format `{id}:{len}:{serializedBuf}` but since `{id}`
/// might contain `:` we pop gradually and then keep the rest.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct WatchedBitField {
bitfield: BitField8,
video_ids: Vec<String>,
}
impl WatchedBitField {
pub fn construct_from_array(arr: Vec<bool>, video_ids: Vec<String>) -> WatchedBitField {
let mut bitfield = BitField8::new(video_ids.len());
for (i, val) in arr.iter().enumerate() {
bitfield.set(i, *val);
}
WatchedBitField {
bitfield,
video_ids,
}
}
pub fn new(bitfield: BitField8, video_ids: Vec<String>) -> WatchedBitField {
Self {
bitfield,
video_ids,
}
}
pub fn construct_with_videos(
watched_field: WatchedField,
video_ids: Vec<String>,
) -> Result<WatchedBitField, Error> {
// We can shift the bitmap in any direction, as long as we can find the anchor video
if let Some(anchor_video_idx) = video_ids
.iter()
.position(|s| s == &watched_field.anchor_video)
{
// TODO: replace with `usize` and `checked_sub` when more tests are added for negative ids
let offset = watched_field.anchor_length as i32 - anchor_video_idx as i32 - 1;
let bitfield =
BitField8::new_with_values(watched_field.bitfield.values, Some(video_ids.len()));
// in case of an previous empty array, this will be 0
if offset != 0 {
// Resize the buffer
let mut resized_wbf = WatchedBitField {
bitfield: BitField8::new(video_ids.len()),
video_ids: video_ids.clone(),
};
// rewrite the old buf into the new one, applying the offset
for i in 0..video_ids.len() {
// TODO: Check what will happen if we change it to `usize`
let id_in_prev = i as i32 + offset;
if id_in_prev >= 0 && (id_in_prev as usize) < bitfield.length {
resized_wbf.set(i, bitfield.get(id_in_prev as usize));
}
}
Ok(resized_wbf)
} else {
Ok(WatchedBitField {
bitfield,
video_ids,
})
}
} else {
// videoId could not be found, return a totally blank buf
Ok(WatchedBitField {
bitfield: BitField8::new(video_ids.len()),
video_ids,
})
}
}
pub fn construct_and_resize(
serialized: &str,
video_ids: Vec<String>,
) -> Result<WatchedBitField, Error> {
// note: videoIds.length could only be >= from serialized lastLength
// should we assert?
// we might also wanna assert that the bitfield.length for the returned wb is the same sa videoIds.length
let watched_field = serialized.parse()?;
Self::construct_with_videos(watched_field, video_ids)
}
pub fn get(&self, idx: usize) -> bool {
self.bitfield.get(idx)
}
pub fn get_video(&self, video_id: &str) -> bool {
if let Some(pos) = self.video_ids.iter().position(|s| *s == video_id) {
self.bitfield.get(pos)
} else {
false
}
}
pub fn set(&mut self, idx: usize, v: bool) {
self.bitfield.set(idx, v);
}
pub fn set_video(&mut self, video_id: &str, v: bool) {
if let Some(pos) = self.video_ids.iter().position(|s| *s == video_id) {
self.bitfield.set(pos, v);
}
}
}
impl fmt::Display for WatchedBitField {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let packed = String::try_from(&self.bitfield).expect("bitfield failed to compress");
let last_id = self.bitfield.last_index_of(true).unwrap_or(0);
let last_video_id = self
.video_ids
.get(last_id)
.map_or("undefined", |id| id.as_str());
write!(f, "{}:{}:{}", last_video_id, last_id + 1, packed)
}
}
impl From<WatchedBitField> for BitField8 {
fn from(watched: WatchedBitField) -> Self {
watched.bitfield
}
}
/// Module containing all the impls of the `serde` feature
#[cfg(feature = "serde")]
mod serde {
use std::str::FromStr;
use serde::{de, Serialize};
use super::WatchedField;
impl<'de> serde::Deserialize<'de> for WatchedField {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let serialized = String::deserialize(deserializer)?;
WatchedField::from_str(&serialized).map_err(de::Error::custom)
}
}
impl Serialize for WatchedField {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
}
#[cfg(test)]
mod tests {
use crate::{BitField8, WatchedBitField, WatchedField};
#[test]
fn parse_and_modify() {
let videos = [
"tt2934286:1:1",
"tt2934286:1:2",
"tt2934286:1:3",
"tt2934286:1:4",
"tt2934286:1:5",
"tt2934286:1:6",
"tt2934286:1:7",
"tt2934286:1:8",
"tt2934286:1:9",
];
let watched = "tt2934286:1:5:5:eJyTZwAAAEAAIA==";
let mut wb = WatchedBitField::construct_and_resize(
watched,
videos.iter().map(|v| v.to_string()).collect(),
)
.unwrap();
assert!(wb.get_video("tt2934286:1:5"));
assert!(!wb.get_video("tt2934286:1:6"));
assert_eq!(watched, wb.to_string());
wb.set_video("tt2934286:1:6", true);
assert!(wb.get_video("tt2934286:1:6"));
}
#[test]
fn construct_from_array() {
let arr = vec![false; 500];
let mut video_ids = vec![];
for i in 1..500 {
video_ids.push(format!("tt2934286:1:{}", i));
}
let mut wb = WatchedBitField::construct_from_array(arr, video_ids.clone());
// All should be false
for (i, val) in video_ids.iter().enumerate() {
assert!(!wb.get(i));
assert!(!wb.get_video(val));
}
// Set half to true
for (i, _val) in video_ids.iter().enumerate() {
wb.set(i, i % 2 == 0);
}
// Serialize and deserialize to new structure
let watched = wb.to_string();
let wb2 = WatchedBitField::construct_and_resize(
&watched,
video_ids.iter().map(|v| v.to_string()).collect(),
)
.unwrap();
// Half should still be true
for (i, val) in video_ids.iter().enumerate() {
assert_eq!(wb2.get(i), i % 2 == 0);
assert_eq!(wb2.get_video(val), i % 2 == 0);
}
}
#[test]
fn to_string_empty() {
let watched = WatchedBitField::construct_from_array(vec![], vec![]);
let serialized = watched.to_string();
assert_eq!(serialized, "undefined:1:eJwDAAAAAAE=");
}
#[test]
#[cfg(feature = "serde")]
fn test_watched_field_de_serialize() {
let string = "tt7767422:3:8:24:eJz7//8/AAX9Av4=";
let json_value = serde_json::json!(string);
let expected = string.parse::<WatchedField>().expect("Should parse field");
let actual_from_json = serde_json::from_value::<WatchedField>(json_value.clone())
.expect("Should deserialize ");
assert_eq!(expected, actual_from_json);
assert_eq!("eJz7//8/AAX9Av4=", &actual_from_json.bitfield.to_string());
assert_eq!(24, actual_from_json.anchor_length);
assert_eq!("tt7767422:3:8", actual_from_json.anchor_video);
let actual_to_json = serde_json::to_value(&expected).expect("Should serialize");
assert_eq!(json_value, actual_to_json);
}
#[test]
fn | () {
let watched = WatchedBitField::construct_and_resize("undefined:1:eJwDAAAAAAE=", vec![]);
assert_eq!(
watched,
Ok(WatchedBitField {
bitfield: BitField8::new(0),
video_ids: vec![]
})
);
}
}
| deserialize_empty | identifier_name |
watched_bitfield.rs | use crate::{BitField8, Error};
use std::{
fmt::{self, Display},
str::FromStr,
};
/// (De)Serializable field that tracks which videos have been watched
/// and the latest one watched.
///
/// This is a [`WatchedBitField`] compatible field, (de)serialized
/// without the knowledge of `videos_ids`.
///
/// `{anchor:video_id}:{anchor_length}:{bitfield8}`
///
/// # Examples
///
/// ```
/// use stremio_watched_bitfield::WatchedField;
///
/// // `tt2934286:1:5` - anchor video id
/// // `5` - anchor video length
/// // `eJyTZwAAAEAAIA==` - BitField8
///
/// let watched = "tt2934286:1:5:5:eJyTZwAAAEAAIA==".parse::<WatchedField>().expect("Should parse");
/// ```
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct WatchedField {
/// The anchor video id
///
/// Indicates which is the last watched video id.
anchor_video: String,
/// The length from the beginning of the `BitField8` to the last
/// watched video.
anchor_length: usize,
bitfield: BitField8,
}
impl Display for WatchedField {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}:{}:{}",
self.anchor_video, self.anchor_length, self.bitfield
)
}
}
impl From<WatchedBitField> for WatchedField {
fn from(watched_bit_field: WatchedBitField) -> Self {
let last_id = watched_bit_field.bitfield.last_index_of(true).unwrap_or(0);
let last_video_id = watched_bit_field
.video_ids
.get(last_id)
.map_or_else(|| "undefined".to_string(), |id| id.clone());
Self {
anchor_video: last_video_id,
anchor_length: last_id + 1,
bitfield: watched_bit_field.bitfield,
}
}
}
impl FromStr for WatchedField {
type Err = Error;
fn from_str(string: &str) -> Result<Self, Self::Err> {
// serialized is formed by {id}:{len}:{serializedBuf}, but since {id} might contain : we have to pop gradually and then keep the rest
let mut components = string.split(':').collect::<Vec<&str>>();
if components.len() < 3 {
return Err(Error("Not enough components".to_string()));
}
let bitfield_buf = components
.pop()
.ok_or("Cannot obtain the serialized data")?
.to_string();
let anchor_length = components
.pop()
.ok_or("Cannot obtain the length field")?
.parse::<usize>()?;
let anchor_video_id = components.join(":");
let bitfield = BitField8::try_from((bitfield_buf, None))?;
Ok(Self {
bitfield,
anchor_video: anchor_video_id,
anchor_length,
})
}
}
/// Tracks which videos have been watched.
///
/// Serialized in the format `{id}:{len}:{serializedBuf}` but since `{id}`
/// might contain `:` we pop gradually and then keep the rest.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct WatchedBitField {
bitfield: BitField8,
video_ids: Vec<String>,
}
impl WatchedBitField {
pub fn construct_from_array(arr: Vec<bool>, video_ids: Vec<String>) -> WatchedBitField {
let mut bitfield = BitField8::new(video_ids.len());
for (i, val) in arr.iter().enumerate() {
bitfield.set(i, *val);
}
WatchedBitField {
bitfield,
video_ids,
}
}
pub fn new(bitfield: BitField8, video_ids: Vec<String>) -> WatchedBitField {
Self {
bitfield,
video_ids,
}
}
pub fn construct_with_videos(
watched_field: WatchedField,
video_ids: Vec<String>,
) -> Result<WatchedBitField, Error> {
// We can shift the bitmap in any direction, as long as we can find the anchor video
if let Some(anchor_video_idx) = video_ids
.iter()
.position(|s| s == &watched_field.anchor_video)
{
// TODO: replace with `usize` and `checked_sub` when more tests are added for negative ids
let offset = watched_field.anchor_length as i32 - anchor_video_idx as i32 - 1;
let bitfield =
BitField8::new_with_values(watched_field.bitfield.values, Some(video_ids.len()));
// in case of an previous empty array, this will be 0
if offset != 0 {
// Resize the buffer
let mut resized_wbf = WatchedBitField {
bitfield: BitField8::new(video_ids.len()),
video_ids: video_ids.clone(),
};
// rewrite the old buf into the new one, applying the offset
for i in 0..video_ids.len() {
// TODO: Check what will happen if we change it to `usize`
let id_in_prev = i as i32 + offset;
if id_in_prev >= 0 && (id_in_prev as usize) < bitfield.length {
resized_wbf.set(i, bitfield.get(id_in_prev as usize));
}
}
Ok(resized_wbf)
} else {
Ok(WatchedBitField {
bitfield,
video_ids,
})
}
} else {
// videoId could not be found, return a totally blank buf
Ok(WatchedBitField {
bitfield: BitField8::new(video_ids.len()),
video_ids,
})
}
}
pub fn construct_and_resize(
serialized: &str,
video_ids: Vec<String>,
) -> Result<WatchedBitField, Error> {
// note: videoIds.length could only be >= from serialized lastLength
// should we assert?
// we might also wanna assert that the bitfield.length for the returned wb is the same sa videoIds.length
let watched_field = serialized.parse()?;
Self::construct_with_videos(watched_field, video_ids)
}
pub fn get(&self, idx: usize) -> bool {
self.bitfield.get(idx)
}
pub fn get_video(&self, video_id: &str) -> bool {
if let Some(pos) = self.video_ids.iter().position(|s| *s == video_id) {
self.bitfield.get(pos)
} else |
}
pub fn set(&mut self, idx: usize, v: bool) {
self.bitfield.set(idx, v);
}
pub fn set_video(&mut self, video_id: &str, v: bool) {
if let Some(pos) = self.video_ids.iter().position(|s| *s == video_id) {
self.bitfield.set(pos, v);
}
}
}
impl fmt::Display for WatchedBitField {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let packed = String::try_from(&self.bitfield).expect("bitfield failed to compress");
let last_id = self.bitfield.last_index_of(true).unwrap_or(0);
let last_video_id = self
.video_ids
.get(last_id)
.map_or("undefined", |id| id.as_str());
write!(f, "{}:{}:{}", last_video_id, last_id + 1, packed)
}
}
impl From<WatchedBitField> for BitField8 {
fn from(watched: WatchedBitField) -> Self {
watched.bitfield
}
}
/// Module containing all the impls of the `serde` feature
#[cfg(feature = "serde")]
mod serde {
use std::str::FromStr;
use serde::{de, Serialize};
use super::WatchedField;
impl<'de> serde::Deserialize<'de> for WatchedField {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let serialized = String::deserialize(deserializer)?;
WatchedField::from_str(&serialized).map_err(de::Error::custom)
}
}
impl Serialize for WatchedField {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
}
#[cfg(test)]
mod tests {
use crate::{BitField8, WatchedBitField, WatchedField};
#[test]
fn parse_and_modify() {
let videos = [
"tt2934286:1:1",
"tt2934286:1:2",
"tt2934286:1:3",
"tt2934286:1:4",
"tt2934286:1:5",
"tt2934286:1:6",
"tt2934286:1:7",
"tt2934286:1:8",
"tt2934286:1:9",
];
let watched = "tt2934286:1:5:5:eJyTZwAAAEAAIA==";
let mut wb = WatchedBitField::construct_and_resize(
watched,
videos.iter().map(|v| v.to_string()).collect(),
)
.unwrap();
assert!(wb.get_video("tt2934286:1:5"));
assert!(!wb.get_video("tt2934286:1:6"));
assert_eq!(watched, wb.to_string());
wb.set_video("tt2934286:1:6", true);
assert!(wb.get_video("tt2934286:1:6"));
}
#[test]
fn construct_from_array() {
let arr = vec![false; 500];
let mut video_ids = vec![];
for i in 1..500 {
video_ids.push(format!("tt2934286:1:{}", i));
}
let mut wb = WatchedBitField::construct_from_array(arr, video_ids.clone());
// All should be false
for (i, val) in video_ids.iter().enumerate() {
assert!(!wb.get(i));
assert!(!wb.get_video(val));
}
// Set half to true
for (i, _val) in video_ids.iter().enumerate() {
wb.set(i, i % 2 == 0);
}
// Serialize and deserialize to new structure
let watched = wb.to_string();
let wb2 = WatchedBitField::construct_and_resize(
&watched,
video_ids.iter().map(|v| v.to_string()).collect(),
)
.unwrap();
// Half should still be true
for (i, val) in video_ids.iter().enumerate() {
assert_eq!(wb2.get(i), i % 2 == 0);
assert_eq!(wb2.get_video(val), i % 2 == 0);
}
}
#[test]
fn to_string_empty() {
let watched = WatchedBitField::construct_from_array(vec![], vec![]);
let serialized = watched.to_string();
assert_eq!(serialized, "undefined:1:eJwDAAAAAAE=");
}
#[test]
#[cfg(feature = "serde")]
fn test_watched_field_de_serialize() {
let string = "tt7767422:3:8:24:eJz7//8/AAX9Av4=";
let json_value = serde_json::json!(string);
let expected = string.parse::<WatchedField>().expect("Should parse field");
let actual_from_json = serde_json::from_value::<WatchedField>(json_value.clone())
.expect("Should deserialize ");
assert_eq!(expected, actual_from_json);
assert_eq!("eJz7//8/AAX9Av4=", &actual_from_json.bitfield.to_string());
assert_eq!(24, actual_from_json.anchor_length);
assert_eq!("tt7767422:3:8", actual_from_json.anchor_video);
let actual_to_json = serde_json::to_value(&expected).expect("Should serialize");
assert_eq!(json_value, actual_to_json);
}
#[test]
fn deserialize_empty() {
let watched = WatchedBitField::construct_and_resize("undefined:1:eJwDAAAAAAE=", vec![]);
assert_eq!(
watched,
Ok(WatchedBitField {
bitfield: BitField8::new(0),
video_ids: vec![]
})
);
}
}
| {
false
} | conditional_block |
watched_bitfield.rs | use crate::{BitField8, Error};
use std::{
fmt::{self, Display},
str::FromStr,
};
/// (De)Serializable field that tracks which videos have been watched
/// and the latest one watched.
///
/// This is a [`WatchedBitField`] compatible field, (de)serialized
/// without the knowledge of `videos_ids`.
///
/// `{anchor:video_id}:{anchor_length}:{bitfield8}`
///
/// # Examples
///
/// ```
/// use stremio_watched_bitfield::WatchedField;
///
/// // `tt2934286:1:5` - anchor video id
/// // `5` - anchor video length
/// // `eJyTZwAAAEAAIA==` - BitField8
///
/// let watched = "tt2934286:1:5:5:eJyTZwAAAEAAIA==".parse::<WatchedField>().expect("Should parse");
/// ```
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct WatchedField {
/// The anchor video id
///
/// Indicates which is the last watched video id.
anchor_video: String,
/// The length from the beginning of the `BitField8` to the last
/// watched video.
anchor_length: usize,
bitfield: BitField8,
}
impl Display for WatchedField {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}:{}:{}",
self.anchor_video, self.anchor_length, self.bitfield
)
}
}
impl From<WatchedBitField> for WatchedField {
fn from(watched_bit_field: WatchedBitField) -> Self {
let last_id = watched_bit_field.bitfield.last_index_of(true).unwrap_or(0);
let last_video_id = watched_bit_field
.video_ids
.get(last_id)
.map_or_else(|| "undefined".to_string(), |id| id.clone());
Self {
anchor_video: last_video_id,
anchor_length: last_id + 1,
bitfield: watched_bit_field.bitfield,
}
}
}
impl FromStr for WatchedField {
type Err = Error;
fn from_str(string: &str) -> Result<Self, Self::Err> {
// serialized is formed by {id}:{len}:{serializedBuf}, but since {id} might contain : we have to pop gradually and then keep the rest
let mut components = string.split(':').collect::<Vec<&str>>();
if components.len() < 3 {
return Err(Error("Not enough components".to_string()));
}
let bitfield_buf = components
.pop()
.ok_or("Cannot obtain the serialized data")?
.to_string();
let anchor_length = components
.pop()
.ok_or("Cannot obtain the length field")?
.parse::<usize>()?;
let anchor_video_id = components.join(":");
let bitfield = BitField8::try_from((bitfield_buf, None))?;
Ok(Self {
bitfield,
anchor_video: anchor_video_id,
anchor_length,
})
}
}
/// Tracks which videos have been watched.
///
/// Serialized in the format `{id}:{len}:{serializedBuf}` but since `{id}`
/// might contain `:` we pop gradually and then keep the rest.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct WatchedBitField {
bitfield: BitField8,
video_ids: Vec<String>,
}
impl WatchedBitField {
pub fn construct_from_array(arr: Vec<bool>, video_ids: Vec<String>) -> WatchedBitField {
let mut bitfield = BitField8::new(video_ids.len());
for (i, val) in arr.iter().enumerate() {
bitfield.set(i, *val);
}
WatchedBitField {
bitfield,
video_ids,
}
}
pub fn new(bitfield: BitField8, video_ids: Vec<String>) -> WatchedBitField {
Self {
bitfield,
video_ids,
}
}
pub fn construct_with_videos(
watched_field: WatchedField,
video_ids: Vec<String>,
) -> Result<WatchedBitField, Error> {
// We can shift the bitmap in any direction, as long as we can find the anchor video
if let Some(anchor_video_idx) = video_ids
.iter()
.position(|s| s == &watched_field.anchor_video)
{
// TODO: replace with `usize` and `checked_sub` when more tests are added for negative ids
let offset = watched_field.anchor_length as i32 - anchor_video_idx as i32 - 1;
let bitfield =
BitField8::new_with_values(watched_field.bitfield.values, Some(video_ids.len()));
// in case of an previous empty array, this will be 0
if offset != 0 {
// Resize the buffer
let mut resized_wbf = WatchedBitField { | for i in 0..video_ids.len() {
// TODO: Check what will happen if we change it to `usize`
let id_in_prev = i as i32 + offset;
if id_in_prev >= 0 && (id_in_prev as usize) < bitfield.length {
resized_wbf.set(i, bitfield.get(id_in_prev as usize));
}
}
Ok(resized_wbf)
} else {
Ok(WatchedBitField {
bitfield,
video_ids,
})
}
} else {
// videoId could not be found, return a totally blank buf
Ok(WatchedBitField {
bitfield: BitField8::new(video_ids.len()),
video_ids,
})
}
}
pub fn construct_and_resize(
serialized: &str,
video_ids: Vec<String>,
) -> Result<WatchedBitField, Error> {
// note: videoIds.length could only be >= from serialized lastLength
// should we assert?
// we might also wanna assert that the bitfield.length for the returned wb is the same sa videoIds.length
let watched_field = serialized.parse()?;
Self::construct_with_videos(watched_field, video_ids)
}
pub fn get(&self, idx: usize) -> bool {
self.bitfield.get(idx)
}
pub fn get_video(&self, video_id: &str) -> bool {
if let Some(pos) = self.video_ids.iter().position(|s| *s == video_id) {
self.bitfield.get(pos)
} else {
false
}
}
pub fn set(&mut self, idx: usize, v: bool) {
self.bitfield.set(idx, v);
}
pub fn set_video(&mut self, video_id: &str, v: bool) {
if let Some(pos) = self.video_ids.iter().position(|s| *s == video_id) {
self.bitfield.set(pos, v);
}
}
}
impl fmt::Display for WatchedBitField {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let packed = String::try_from(&self.bitfield).expect("bitfield failed to compress");
let last_id = self.bitfield.last_index_of(true).unwrap_or(0);
let last_video_id = self
.video_ids
.get(last_id)
.map_or("undefined", |id| id.as_str());
write!(f, "{}:{}:{}", last_video_id, last_id + 1, packed)
}
}
impl From<WatchedBitField> for BitField8 {
fn from(watched: WatchedBitField) -> Self {
watched.bitfield
}
}
/// Module containing all the impls of the `serde` feature
#[cfg(feature = "serde")]
mod serde {
use std::str::FromStr;
use serde::{de, Serialize};
use super::WatchedField;
impl<'de> serde::Deserialize<'de> for WatchedField {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let serialized = String::deserialize(deserializer)?;
WatchedField::from_str(&serialized).map_err(de::Error::custom)
}
}
impl Serialize for WatchedField {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
}
#[cfg(test)]
mod tests {
use crate::{BitField8, WatchedBitField, WatchedField};
#[test]
fn parse_and_modify() {
let videos = [
"tt2934286:1:1",
"tt2934286:1:2",
"tt2934286:1:3",
"tt2934286:1:4",
"tt2934286:1:5",
"tt2934286:1:6",
"tt2934286:1:7",
"tt2934286:1:8",
"tt2934286:1:9",
];
let watched = "tt2934286:1:5:5:eJyTZwAAAEAAIA==";
let mut wb = WatchedBitField::construct_and_resize(
watched,
videos.iter().map(|v| v.to_string()).collect(),
)
.unwrap();
assert!(wb.get_video("tt2934286:1:5"));
assert!(!wb.get_video("tt2934286:1:6"));
assert_eq!(watched, wb.to_string());
wb.set_video("tt2934286:1:6", true);
assert!(wb.get_video("tt2934286:1:6"));
}
#[test]
fn construct_from_array() {
let arr = vec![false; 500];
let mut video_ids = vec![];
for i in 1..500 {
video_ids.push(format!("tt2934286:1:{}", i));
}
let mut wb = WatchedBitField::construct_from_array(arr, video_ids.clone());
// All should be false
for (i, val) in video_ids.iter().enumerate() {
assert!(!wb.get(i));
assert!(!wb.get_video(val));
}
// Set half to true
for (i, _val) in video_ids.iter().enumerate() {
wb.set(i, i % 2 == 0);
}
// Serialize and deserialize to new structure
let watched = wb.to_string();
let wb2 = WatchedBitField::construct_and_resize(
&watched,
video_ids.iter().map(|v| v.to_string()).collect(),
)
.unwrap();
// Half should still be true
for (i, val) in video_ids.iter().enumerate() {
assert_eq!(wb2.get(i), i % 2 == 0);
assert_eq!(wb2.get_video(val), i % 2 == 0);
}
}
#[test]
fn to_string_empty() {
let watched = WatchedBitField::construct_from_array(vec![], vec![]);
let serialized = watched.to_string();
assert_eq!(serialized, "undefined:1:eJwDAAAAAAE=");
}
#[test]
#[cfg(feature = "serde")]
fn test_watched_field_de_serialize() {
let string = "tt7767422:3:8:24:eJz7//8/AAX9Av4=";
let json_value = serde_json::json!(string);
let expected = string.parse::<WatchedField>().expect("Should parse field");
let actual_from_json = serde_json::from_value::<WatchedField>(json_value.clone())
.expect("Should deserialize ");
assert_eq!(expected, actual_from_json);
assert_eq!("eJz7//8/AAX9Av4=", &actual_from_json.bitfield.to_string());
assert_eq!(24, actual_from_json.anchor_length);
assert_eq!("tt7767422:3:8", actual_from_json.anchor_video);
let actual_to_json = serde_json::to_value(&expected).expect("Should serialize");
assert_eq!(json_value, actual_to_json);
}
#[test]
fn deserialize_empty() {
let watched = WatchedBitField::construct_and_resize("undefined:1:eJwDAAAAAAE=", vec![]);
assert_eq!(
watched,
Ok(WatchedBitField {
bitfield: BitField8::new(0),
video_ids: vec![]
})
);
}
} | bitfield: BitField8::new(video_ids.len()),
video_ids: video_ids.clone(),
};
// rewrite the old buf into the new one, applying the offset | random_line_split |
pisco_redsequence.py | import sys
import os
import pandas as pd
import numpy as np
import subprocess
import shlex
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.table import Table
import matplotlib.pyplot as plt
from matplotlib import image
import matplotlib
import extra_program as ex
import ezgal
from rsz import RSModel
##----
def make_images(field,ax=None):
dir='final/'
ax.imshow(image.imread(dir+"img%s_2.eps" % field))
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
return None
def red_seq_color_plot(color,df,mags,ax=None):
if ax is None:
ax = plt.gca()
#https://www.sdss3.org/dr8/algorithms/sdssUBVRITransform.php#Jordi2006
n=1000 #repeat number for sampling
slope_fit,i_band0,color_err,rs_models,band_1,band_2=color_sloan(color, mags)
ysample=df[df_color[band_1]]-df[df_color[band_2]]
ysample_err=np.sqrt(df[df_colorerr[band_1]]**2+df[df_colorerr[band_2]]**2)
total=[]
for i in ysample.index:
total.append(np.random.normal(loc=ysample[i],scale=ysample_err[i],size=n))
#total.append(0.1)
total=np.array(total)
band_x='sloan_i'
all_x=np.repeat(df[df_color[band_x]],n)
total=np.reshape(total, len(all_x))
bp=ax.errorbar(df[df_color[band_x]],ysample,yerr=ysample_err,fmt='.',alpha=0.5)
#bp=ax.errorbar(df[df_color[band_x]],ysample,fmt='.',alpha=0.5)
red_band=np.arange(16,25,0.01) #just for the line plot in the 3rd plot
redshift_range=np.arange(0.10,0.8,0.05) #for the actual data
number=[]
if color=='sloan_g-sloan_r':
redshift_range=np.arange(0.10,0.36,0.05)
elif color=='sloan_r-sloan_i':
redshift_range=np.arange(0.10,0.71,0.05)
for redshift in redshift_range:
if color=='sloan_g-sloan_r':
# i_band_cut=20.5
i_band_cut=i_band0+5.*np.log10(ex.d_L(redshift)*1e6)-5.
elif color=='sloan_r-sloan_i':
i_band_cut=i_band0+5.*np.log10(ex.d_L(redshift)*1e6)-5.
aa=red_band<i_band_cut
loc=[(all_x<i_band_cut)&\
(total < rs_models[color][round(redshift+0.025,2)].rs_color(all_x))&\
(total > rs_models[color][round(redshift-0.025,2)].rs_color(all_x))][0]
number.append(np.sum(loc))
ax.plot(red_band[aa],rs_models[color][round(redshift,2)].rs_color(red_band[aa]),\
color=s_m.to_rgba(round(redshift,2)),ls='-')
ax.plot(red_band[aa],rs_models[color][round(redshift+0.025,2)].rs_color(red_band[aa]),\
color=s_m.to_rgba(round(redshift,2)),ls=':')
ax.plot(red_band[aa],rs_models[color][round(redshift-0.025,2)].rs_color(red_band[aa]),\
color=s_m.to_rgba(round(redshift,2)),ls=':')
ax.set_xlim(16,25)
if color == 'sloan_g-sloan_i':
ax.set_ylim(0,4)
elif color == 'sloan_g-sloan_r':
ax.set_ylim(0.0,2.5)
else:
ax.set_ylim(-0.5,1.75)
ax.set_xlabel(band_x)
ax.set_ylabel(color)
return np.array(redshift_range),np.array(number)
def color_sloan(color, mags):
|
# adding the slope for different color set that we are interested in (01_rsz_test,fit_gr_ri01.ipyn)
def blue_model(color,mags,redshift,red_mag):
#g-r
if color=='sloan_g-sloan_r':
blue_mag=(0.787302458781+2.9352*redshift)+red_mag
elif color=='sloan_r-sloan_i':
if redshift <= 0.36:
blue_mag=(0.348871987852+0.75340856*redshift)+red_mag
else:
blue_mag=(-0.210727367027+2.2836974*redshift)+red_mag
else:
print 'This color has not been implemented.'
return blue_mag
def histogram_plot(xranf,numberf,df,ax=None,line=False,cbar=False):
l2=6
ax.set_xlim(0,0.8)
ic2,ic3=0,0
numbers=numberf[:6]
numbers2=numberf[l2:]
ax.bar(xranf[:6],numbers,width=0.05,color='red',alpha=0.5,align='center')
ax.bar(xranf[l2:],numbers2,width=0.05,alpha=0.5,align='center')
if cbar:
cbar=fig.colorbar(s_m, ax=ax)
cbar.set_label("redshift")
if line:
if dff_sdss.loc[ind].redshift!=-1:
ax.axvline(dff_sdss.redshift[ind],ls='--',color='#66cc00',lw=2.,label='qso z=%.2f'%dff_sdss.redshift[ind])
ax.axvline(xranf[:6][ic2],ls='--',color='black',lw=2.,label='red_seq g-r z=%.2f'%xranf[:6][ic2])
ax.axvline(xranf[l2:][ic3],ls='--',color='purple',lw=2.,label='red_seq r-i z=%.2f'%xranf[l2:][ic3])
ax.legend(loc='best',frameon=False)
sigma,sigma2,sigma3=0.,0.,0.
if line:
return np.array([xranf[:6][ic2],sigma2,xranf[l2:][ic3],sigma3,dff_sdss.redshift[ind],sigma])
else:
return np.array([xranf[:6][ic2],sigma2,xranf[l2:][ic3],sigma3])
def save_rgb_image_extra(field, f026):
cmd = "ds9 -zscale -crosshair %f %f wcs fk5 -rgb -red final/coadd_c%s_i.fits -green final/coadd_c%s_r.fits -blue final/coadd_c%s_g.fits -zoom out -saveimage final/img%s_2.eps -exit" % \
(f026.RA0.values[0], f026.DEC0.values[0], field, field, field, field)
print cmd
sub = subprocess.check_call(shlex.split(cmd))
cmd = "ds9 -rgb -red final/coadd_c%s_i.fits -green final/coadd_c%s_r.fits -blue final/coadd_c%s_g.fits -zoom out -saveimage final/img%s_3.eps -exit" % \
(field, field, field, field)
print cmd
sub = subprocess.check_call(shlex.split(cmd))
print 'finished saving final/img%s.eps' % field
def find_offset(fname):
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
band=[x.split(' ')[0][-1] for x in content[5:-1]]
corr=[float(x.split(' ')[1]) for x in content[5:-1]]
ecorr=[float(x.split(' ')[3]) for x in content[5:-1]]
return zip(band,corr,ecorr), corr
def find_num(fname):
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
num_2mass=content[0].split(' ')[3]
num_star=content[3].split(' ')[1]
chisq=content[2].split(' ')[1]
return num_2mass,num_star,chisq
##--------
if __name__ == "__main__":
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
filters=['sloan_r','sloan_i','sloan_z','sloan_g']
zfs = np.arange(1.0, 6.001, 0.05)
zf = 3.0 #formation redshift
spacing=0.01 #spacing of redshift for resolution (0.01 is high_res, 0.05 low_res)
zs = np.arange(0.05, 2.500001, spacing)
new_model = ezgal.model("pisco_pipeline/pisco_exp_chab_evolved.model")
new_model.set_normalization(filter='ks', mag=10.9, apparent=True, vega=True,z=0.023) ##normalize to Coma
new_mags = new_model.get_apparent_mags(zf, filters=filters, zs=zs, ab=True)
df_color=dict()
df_color['sloan_g']='MAG_g'
df_color['sloan_r']='MAG_r'
df_color['sloan_i']='MAG_i'
df_color['sloan_z']='MAG_z'
df_colorerr=dict()
df_colorerr['sloan_g']='MAGERR_g'
df_colorerr['sloan_r']='MAGERR_r'
df_colorerr['sloan_i']='MAGERR_i'
df_colorerr['sloan_z']='MAGERR_z'
zss=zs[0:80:5]
norm = matplotlib.colors.Normalize(vmin=np.min(zss),vmax=np.max(zss))
c_m = matplotlib.cm.RdYlBu
s_m = matplotlib.cm.ScalarMappable(cmap=c_m, norm=norm)
s_m.set_array([])
# Pipeline to run PISCO reduction data
#dir = str(sys.argv[1])
field = str(sys.argv[1])
slrdir = 'slr_output'
# field = 'Field054'
df_all = pd.read_csv("/Users/taweewat/Dropbox/Documents/MIT/Observation/2017_1/all_objs_list_new.csv")
f026 = df_all[df_all["name"]==field]
redshift=f026.redshift.values[0]
priority=f026.priority.values[0]
seeing=Table.read('/Users/taweewat/Dropbox/Documents/MIT/Observation/2017_1/PISCO_Jan17_seeing.csv')
see=seeing[seeing['Field']==int(field[-3:])]['Seeing'][0]
offset=find_offset('slr_output/star_%s.fits.offsets.list' % field)
num_2mass,num_star,chisq=find_num('../pisco_code/slr_output/star_%s.fits.offsets.list' % field)
#save_rgb_image_extra(field, f026)
df = pd.read_csv(os.path.join(slrdir,'ntotal_%s.csv' % field),index_col=0)
c5 = SkyCoord(ra=df['XWIN_WORLD'].values*u.degree, dec=df['YWIN_WORLD'].values*u.degree)
c0 = SkyCoord(ra=f026.RA0*u.degree, dec=f026.DEC0*u.degree)
sep = c5.separation(c0)
cut=df[(sep.arcmin<ex.rad_A(redshift,dist=1.5)) & (df["CLASS_STAR"]<0.75)] #CLASS_STAR < 0.75
#ncut=df[(sep.arcmin>2.5) & (df["CLASS_STAR"]<0.8)]
print see
print offset[1]
fig,ax=plt.subplots(1,4,figsize=(20,5));
fig.suptitle(field+', Redshift='+str(redshift)+', Priority='+priority+', Seeing='+str(see)+', Offset(r,i,g,z)='+str(offset[1])+', #2mass='+str(num_2mass)+', #stars='+str(num_star)+', chisq='+str(chisq))
make_images(field,ax[0])
xran,numbers_gr=red_seq_color_plot('sloan_g-sloan_r',cut,new_mags,ax[1])
xran2,numbers_ri=red_seq_color_plot('sloan_r-sloan_i',cut,new_mags,ax[2])
total_sigma=histogram_plot(np.append(xran,xran2),np.append(numbers_gr,numbers_ri),cut,ax[3])
ax[3].axvline(redshift, color='green')
fig.tight_layout()
fig.savefig('plots/plot_%s.png' % (field), dpi=200)
| if color=='sloan_r-sloan_z':
slope_r_m_i=-0.0192138872893
slope_r_m_z=(1.584 * slope_r_m_i)
slope_fit=[slope_r_m_z, 0]
i_band0=-20.
elif color=='sloan_g-sloan_i':
slope_v_m_i=-0.029
slope_g_m_i=(1.481 * slope_v_m_i)
slope_fit=[slope_g_m_i, 0]
i_band0=-20.
elif color=='sloan_r-sloan_i':
slope_rc_m_ic=-0.0192138872893
slope_r_m_i=(1.007 * slope_rc_m_ic)
slope_fit=[slope_r_m_i, 0]
i_band0=-20.5
color_err=0.18
elif color=='sloan_g-sloan_r':
slope_v_m_r=-0.0133824600874
slope_g_m_r=(1.646 * slope_v_m_r)
slope_fit=[slope_g_m_r, 0]
i_band0=-20.5
color_err=0.15
band_1, band_2 = color.split("-")
band_1_idx=filters.index(band_1)
band_2_idx=filters.index(band_2)
rs_models=dict()
rs_models[color]=dict()
for z, m in zip(zs,mags):
#mag_1=m[band_1_idx]
mag_2=m[band_2_idx]
mag_1=blue_model(color,mags,z,mag_2)
this_model=RSModel(z, mag_1, mag_2, slope_fit)
rs_models[color][this_model.z]=this_model
return slope_fit,i_band0,color_err,rs_models,band_1,band_2 | identifier_body |
pisco_redsequence.py | import sys
import os
import pandas as pd
import numpy as np
import subprocess
import shlex
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.table import Table
import matplotlib.pyplot as plt
from matplotlib import image
import matplotlib
import extra_program as ex
import ezgal
from rsz import RSModel
##----
def make_images(field,ax=None):
dir='final/'
ax.imshow(image.imread(dir+"img%s_2.eps" % field))
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
return None
def red_seq_color_plot(color,df,mags,ax=None):
if ax is None:
ax = plt.gca()
#https://www.sdss3.org/dr8/algorithms/sdssUBVRITransform.php#Jordi2006
n=1000 #repeat number for sampling
slope_fit,i_band0,color_err,rs_models,band_1,band_2=color_sloan(color, mags)
ysample=df[df_color[band_1]]-df[df_color[band_2]]
ysample_err=np.sqrt(df[df_colorerr[band_1]]**2+df[df_colorerr[band_2]]**2)
total=[]
for i in ysample.index:
total.append(np.random.normal(loc=ysample[i],scale=ysample_err[i],size=n))
#total.append(0.1)
total=np.array(total)
band_x='sloan_i'
all_x=np.repeat(df[df_color[band_x]],n)
total=np.reshape(total, len(all_x))
bp=ax.errorbar(df[df_color[band_x]],ysample,yerr=ysample_err,fmt='.',alpha=0.5)
#bp=ax.errorbar(df[df_color[band_x]],ysample,fmt='.',alpha=0.5)
red_band=np.arange(16,25,0.01) #just for the line plot in the 3rd plot
redshift_range=np.arange(0.10,0.8,0.05) #for the actual data
number=[]
if color=='sloan_g-sloan_r':
redshift_range=np.arange(0.10,0.36,0.05)
elif color=='sloan_r-sloan_i':
redshift_range=np.arange(0.10,0.71,0.05)
for redshift in redshift_range:
if color=='sloan_g-sloan_r':
# i_band_cut=20.5
i_band_cut=i_band0+5.*np.log10(ex.d_L(redshift)*1e6)-5.
elif color=='sloan_r-sloan_i':
i_band_cut=i_band0+5.*np.log10(ex.d_L(redshift)*1e6)-5.
aa=red_band<i_band_cut
loc=[(all_x<i_band_cut)&\
(total < rs_models[color][round(redshift+0.025,2)].rs_color(all_x))&\
(total > rs_models[color][round(redshift-0.025,2)].rs_color(all_x))][0]
number.append(np.sum(loc))
ax.plot(red_band[aa],rs_models[color][round(redshift,2)].rs_color(red_band[aa]),\
color=s_m.to_rgba(round(redshift,2)),ls='-')
ax.plot(red_band[aa],rs_models[color][round(redshift+0.025,2)].rs_color(red_band[aa]),\
color=s_m.to_rgba(round(redshift,2)),ls=':')
ax.plot(red_band[aa],rs_models[color][round(redshift-0.025,2)].rs_color(red_band[aa]),\
color=s_m.to_rgba(round(redshift,2)),ls=':')
ax.set_xlim(16,25)
if color == 'sloan_g-sloan_i':
ax.set_ylim(0,4)
elif color == 'sloan_g-sloan_r':
ax.set_ylim(0.0,2.5)
else:
ax.set_ylim(-0.5,1.75)
ax.set_xlabel(band_x)
ax.set_ylabel(color)
return np.array(redshift_range),np.array(number)
def color_sloan(color, mags):
if color=='sloan_r-sloan_z':
slope_r_m_i=-0.0192138872893
slope_r_m_z=(1.584 * slope_r_m_i)
slope_fit=[slope_r_m_z, 0]
i_band0=-20.
elif color=='sloan_g-sloan_i':
slope_v_m_i=-0.029
slope_g_m_i=(1.481 * slope_v_m_i)
slope_fit=[slope_g_m_i, 0]
i_band0=-20.
elif color=='sloan_r-sloan_i':
slope_rc_m_ic=-0.0192138872893
slope_r_m_i=(1.007 * slope_rc_m_ic)
slope_fit=[slope_r_m_i, 0]
i_band0=-20.5
color_err=0.18
elif color=='sloan_g-sloan_r':
slope_v_m_r=-0.0133824600874
slope_g_m_r=(1.646 * slope_v_m_r)
slope_fit=[slope_g_m_r, 0]
i_band0=-20.5
color_err=0.15
band_1, band_2 = color.split("-")
band_1_idx=filters.index(band_1)
band_2_idx=filters.index(band_2)
rs_models=dict()
rs_models[color]=dict()
for z, m in zip(zs,mags):
#mag_1=m[band_1_idx]
mag_2=m[band_2_idx]
mag_1=blue_model(color,mags,z,mag_2)
this_model=RSModel(z, mag_1, mag_2, slope_fit)
rs_models[color][this_model.z]=this_model
return slope_fit,i_band0,color_err,rs_models,band_1,band_2
# adding the slope for different color set that we are interested in (01_rsz_test,fit_gr_ri01.ipyn)
def blue_model(color,mags,redshift,red_mag):
#g-r
if color=='sloan_g-sloan_r':
blue_mag=(0.787302458781+2.9352*redshift)+red_mag
elif color=='sloan_r-sloan_i':
if redshift <= 0.36:
blue_mag=(0.348871987852+0.75340856*redshift)+red_mag
else:
blue_mag=(-0.210727367027+2.2836974*redshift)+red_mag
else:
print 'This color has not been implemented.'
return blue_mag
def histogram_plot(xranf,numberf,df,ax=None,line=False,cbar=False):
l2=6
ax.set_xlim(0,0.8)
ic2,ic3=0,0
numbers=numberf[:6]
numbers2=numberf[l2:]
ax.bar(xranf[:6],numbers,width=0.05,color='red',alpha=0.5,align='center')
ax.bar(xranf[l2:],numbers2,width=0.05,alpha=0.5,align='center')
if cbar:
cbar=fig.colorbar(s_m, ax=ax)
cbar.set_label("redshift")
if line:
if dff_sdss.loc[ind].redshift!=-1:
ax.axvline(dff_sdss.redshift[ind],ls='--',color='#66cc00',lw=2.,label='qso z=%.2f'%dff_sdss.redshift[ind])
ax.axvline(xranf[:6][ic2],ls='--',color='black',lw=2.,label='red_seq g-r z=%.2f'%xranf[:6][ic2])
ax.axvline(xranf[l2:][ic3],ls='--',color='purple',lw=2.,label='red_seq r-i z=%.2f'%xranf[l2:][ic3])
ax.legend(loc='best',frameon=False)
sigma,sigma2,sigma3=0.,0.,0.
if line:
return np.array([xranf[:6][ic2],sigma2,xranf[l2:][ic3],sigma3,dff_sdss.redshift[ind],sigma])
else:
return np.array([xranf[:6][ic2],sigma2,xranf[l2:][ic3],sigma3])
def save_rgb_image_extra(field, f026):
cmd = "ds9 -zscale -crosshair %f %f wcs fk5 -rgb -red final/coadd_c%s_i.fits -green final/coadd_c%s_r.fits -blue final/coadd_c%s_g.fits -zoom out -saveimage final/img%s_2.eps -exit" % \
(f026.RA0.values[0], f026.DEC0.values[0], field, field, field, field)
print cmd
sub = subprocess.check_call(shlex.split(cmd))
cmd = "ds9 -rgb -red final/coadd_c%s_i.fits -green final/coadd_c%s_r.fits -blue final/coadd_c%s_g.fits -zoom out -saveimage final/img%s_3.eps -exit" % \
(field, field, field, field)
print cmd
sub = subprocess.check_call(shlex.split(cmd))
print 'finished saving final/img%s.eps' % field
def | (fname):
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
band=[x.split(' ')[0][-1] for x in content[5:-1]]
corr=[float(x.split(' ')[1]) for x in content[5:-1]]
ecorr=[float(x.split(' ')[3]) for x in content[5:-1]]
return zip(band,corr,ecorr), corr
def find_num(fname):
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
num_2mass=content[0].split(' ')[3]
num_star=content[3].split(' ')[1]
chisq=content[2].split(' ')[1]
return num_2mass,num_star,chisq
##--------
if __name__ == "__main__":
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
filters=['sloan_r','sloan_i','sloan_z','sloan_g']
zfs = np.arange(1.0, 6.001, 0.05)
zf = 3.0 #formation redshift
spacing=0.01 #spacing of redshift for resolution (0.01 is high_res, 0.05 low_res)
zs = np.arange(0.05, 2.500001, spacing)
new_model = ezgal.model("pisco_pipeline/pisco_exp_chab_evolved.model")
new_model.set_normalization(filter='ks', mag=10.9, apparent=True, vega=True,z=0.023) ##normalize to Coma
new_mags = new_model.get_apparent_mags(zf, filters=filters, zs=zs, ab=True)
df_color=dict()
df_color['sloan_g']='MAG_g'
df_color['sloan_r']='MAG_r'
df_color['sloan_i']='MAG_i'
df_color['sloan_z']='MAG_z'
df_colorerr=dict()
df_colorerr['sloan_g']='MAGERR_g'
df_colorerr['sloan_r']='MAGERR_r'
df_colorerr['sloan_i']='MAGERR_i'
df_colorerr['sloan_z']='MAGERR_z'
zss=zs[0:80:5]
norm = matplotlib.colors.Normalize(vmin=np.min(zss),vmax=np.max(zss))
c_m = matplotlib.cm.RdYlBu
s_m = matplotlib.cm.ScalarMappable(cmap=c_m, norm=norm)
s_m.set_array([])
# Pipeline to run PISCO reduction data
#dir = str(sys.argv[1])
field = str(sys.argv[1])
slrdir = 'slr_output'
# field = 'Field054'
df_all = pd.read_csv("/Users/taweewat/Dropbox/Documents/MIT/Observation/2017_1/all_objs_list_new.csv")
f026 = df_all[df_all["name"]==field]
redshift=f026.redshift.values[0]
priority=f026.priority.values[0]
seeing=Table.read('/Users/taweewat/Dropbox/Documents/MIT/Observation/2017_1/PISCO_Jan17_seeing.csv')
see=seeing[seeing['Field']==int(field[-3:])]['Seeing'][0]
offset=find_offset('slr_output/star_%s.fits.offsets.list' % field)
num_2mass,num_star,chisq=find_num('../pisco_code/slr_output/star_%s.fits.offsets.list' % field)
#save_rgb_image_extra(field, f026)
df = pd.read_csv(os.path.join(slrdir,'ntotal_%s.csv' % field),index_col=0)
c5 = SkyCoord(ra=df['XWIN_WORLD'].values*u.degree, dec=df['YWIN_WORLD'].values*u.degree)
c0 = SkyCoord(ra=f026.RA0*u.degree, dec=f026.DEC0*u.degree)
sep = c5.separation(c0)
cut=df[(sep.arcmin<ex.rad_A(redshift,dist=1.5)) & (df["CLASS_STAR"]<0.75)] #CLASS_STAR < 0.75
#ncut=df[(sep.arcmin>2.5) & (df["CLASS_STAR"]<0.8)]
print see
print offset[1]
fig,ax=plt.subplots(1,4,figsize=(20,5));
fig.suptitle(field+', Redshift='+str(redshift)+', Priority='+priority+', Seeing='+str(see)+', Offset(r,i,g,z)='+str(offset[1])+', #2mass='+str(num_2mass)+', #stars='+str(num_star)+', chisq='+str(chisq))
make_images(field,ax[0])
xran,numbers_gr=red_seq_color_plot('sloan_g-sloan_r',cut,new_mags,ax[1])
xran2,numbers_ri=red_seq_color_plot('sloan_r-sloan_i',cut,new_mags,ax[2])
total_sigma=histogram_plot(np.append(xran,xran2),np.append(numbers_gr,numbers_ri),cut,ax[3])
ax[3].axvline(redshift, color='green')
fig.tight_layout()
fig.savefig('plots/plot_%s.png' % (field), dpi=200)
| find_offset | identifier_name |
pisco_redsequence.py | import sys
import os
import pandas as pd
import numpy as np
import subprocess
import shlex
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.table import Table
import matplotlib.pyplot as plt
from matplotlib import image
import matplotlib
import extra_program as ex
import ezgal
from rsz import RSModel
##----
def make_images(field,ax=None):
dir='final/'
ax.imshow(image.imread(dir+"img%s_2.eps" % field))
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
return None
def red_seq_color_plot(color,df,mags,ax=None):
if ax is None:
ax = plt.gca()
#https://www.sdss3.org/dr8/algorithms/sdssUBVRITransform.php#Jordi2006
n=1000 #repeat number for sampling
slope_fit,i_band0,color_err,rs_models,band_1,band_2=color_sloan(color, mags)
ysample=df[df_color[band_1]]-df[df_color[band_2]]
ysample_err=np.sqrt(df[df_colorerr[band_1]]**2+df[df_colorerr[band_2]]**2)
total=[]
for i in ysample.index:
total.append(np.random.normal(loc=ysample[i],scale=ysample_err[i],size=n))
#total.append(0.1)
total=np.array(total)
band_x='sloan_i'
all_x=np.repeat(df[df_color[band_x]],n)
total=np.reshape(total, len(all_x))
bp=ax.errorbar(df[df_color[band_x]],ysample,yerr=ysample_err,fmt='.',alpha=0.5)
#bp=ax.errorbar(df[df_color[band_x]],ysample,fmt='.',alpha=0.5)
red_band=np.arange(16,25,0.01) #just for the line plot in the 3rd plot
redshift_range=np.arange(0.10,0.8,0.05) #for the actual data
number=[]
if color=='sloan_g-sloan_r':
redshift_range=np.arange(0.10,0.36,0.05)
elif color=='sloan_r-sloan_i':
redshift_range=np.arange(0.10,0.71,0.05)
for redshift in redshift_range:
if color=='sloan_g-sloan_r':
# i_band_cut=20.5
i_band_cut=i_band0+5.*np.log10(ex.d_L(redshift)*1e6)-5.
elif color=='sloan_r-sloan_i':
i_band_cut=i_band0+5.*np.log10(ex.d_L(redshift)*1e6)-5.
aa=red_band<i_band_cut
loc=[(all_x<i_band_cut)&\
(total < rs_models[color][round(redshift+0.025,2)].rs_color(all_x))&\
(total > rs_models[color][round(redshift-0.025,2)].rs_color(all_x))][0]
number.append(np.sum(loc))
ax.plot(red_band[aa],rs_models[color][round(redshift,2)].rs_color(red_band[aa]),\
color=s_m.to_rgba(round(redshift,2)),ls='-')
ax.plot(red_band[aa],rs_models[color][round(redshift+0.025,2)].rs_color(red_band[aa]),\
color=s_m.to_rgba(round(redshift,2)),ls=':')
ax.plot(red_band[aa],rs_models[color][round(redshift-0.025,2)].rs_color(red_band[aa]),\
color=s_m.to_rgba(round(redshift,2)),ls=':')
ax.set_xlim(16,25)
if color == 'sloan_g-sloan_i':
ax.set_ylim(0,4)
elif color == 'sloan_g-sloan_r':
ax.set_ylim(0.0,2.5)
else:
ax.set_ylim(-0.5,1.75)
ax.set_xlabel(band_x)
ax.set_ylabel(color)
return np.array(redshift_range),np.array(number)
def color_sloan(color, mags):
if color=='sloan_r-sloan_z':
slope_r_m_i=-0.0192138872893
slope_r_m_z=(1.584 * slope_r_m_i)
slope_fit=[slope_r_m_z, 0]
i_band0=-20.
elif color=='sloan_g-sloan_i':
slope_v_m_i=-0.029
slope_g_m_i=(1.481 * slope_v_m_i)
slope_fit=[slope_g_m_i, 0]
i_band0=-20.
elif color=='sloan_r-sloan_i':
slope_rc_m_ic=-0.0192138872893
slope_r_m_i=(1.007 * slope_rc_m_ic)
slope_fit=[slope_r_m_i, 0]
i_band0=-20.5
color_err=0.18
elif color=='sloan_g-sloan_r':
slope_v_m_r=-0.0133824600874
slope_g_m_r=(1.646 * slope_v_m_r)
slope_fit=[slope_g_m_r, 0]
i_band0=-20.5
color_err=0.15
band_1, band_2 = color.split("-")
band_1_idx=filters.index(band_1)
band_2_idx=filters.index(band_2)
rs_models=dict()
rs_models[color]=dict()
for z, m in zip(zs,mags):
#mag_1=m[band_1_idx]
mag_2=m[band_2_idx]
mag_1=blue_model(color,mags,z,mag_2)
this_model=RSModel(z, mag_1, mag_2, slope_fit)
rs_models[color][this_model.z]=this_model
return slope_fit,i_band0,color_err,rs_models,band_1,band_2
# adding the slope for different color set that we are interested in (01_rsz_test,fit_gr_ri01.ipyn)
def blue_model(color,mags,redshift,red_mag):
#g-r
if color=='sloan_g-sloan_r':
|
elif color=='sloan_r-sloan_i':
if redshift <= 0.36:
blue_mag=(0.348871987852+0.75340856*redshift)+red_mag
else:
blue_mag=(-0.210727367027+2.2836974*redshift)+red_mag
else:
print 'This color has not been implemented.'
return blue_mag
def histogram_plot(xranf,numberf,df,ax=None,line=False,cbar=False):
l2=6
ax.set_xlim(0,0.8)
ic2,ic3=0,0
numbers=numberf[:6]
numbers2=numberf[l2:]
ax.bar(xranf[:6],numbers,width=0.05,color='red',alpha=0.5,align='center')
ax.bar(xranf[l2:],numbers2,width=0.05,alpha=0.5,align='center')
if cbar:
cbar=fig.colorbar(s_m, ax=ax)
cbar.set_label("redshift")
if line:
if dff_sdss.loc[ind].redshift!=-1:
ax.axvline(dff_sdss.redshift[ind],ls='--',color='#66cc00',lw=2.,label='qso z=%.2f'%dff_sdss.redshift[ind])
ax.axvline(xranf[:6][ic2],ls='--',color='black',lw=2.,label='red_seq g-r z=%.2f'%xranf[:6][ic2])
ax.axvline(xranf[l2:][ic3],ls='--',color='purple',lw=2.,label='red_seq r-i z=%.2f'%xranf[l2:][ic3])
ax.legend(loc='best',frameon=False)
sigma,sigma2,sigma3=0.,0.,0.
if line:
return np.array([xranf[:6][ic2],sigma2,xranf[l2:][ic3],sigma3,dff_sdss.redshift[ind],sigma])
else:
return np.array([xranf[:6][ic2],sigma2,xranf[l2:][ic3],sigma3])
def save_rgb_image_extra(field, f026):
cmd = "ds9 -zscale -crosshair %f %f wcs fk5 -rgb -red final/coadd_c%s_i.fits -green final/coadd_c%s_r.fits -blue final/coadd_c%s_g.fits -zoom out -saveimage final/img%s_2.eps -exit" % \
(f026.RA0.values[0], f026.DEC0.values[0], field, field, field, field)
print cmd
sub = subprocess.check_call(shlex.split(cmd))
cmd = "ds9 -rgb -red final/coadd_c%s_i.fits -green final/coadd_c%s_r.fits -blue final/coadd_c%s_g.fits -zoom out -saveimage final/img%s_3.eps -exit" % \
(field, field, field, field)
print cmd
sub = subprocess.check_call(shlex.split(cmd))
print 'finished saving final/img%s.eps' % field
def find_offset(fname):
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
band=[x.split(' ')[0][-1] for x in content[5:-1]]
corr=[float(x.split(' ')[1]) for x in content[5:-1]]
ecorr=[float(x.split(' ')[3]) for x in content[5:-1]]
return zip(band,corr,ecorr), corr
def find_num(fname):
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
num_2mass=content[0].split(' ')[3]
num_star=content[3].split(' ')[1]
chisq=content[2].split(' ')[1]
return num_2mass,num_star,chisq
##--------
if __name__ == "__main__":
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
filters=['sloan_r','sloan_i','sloan_z','sloan_g']
zfs = np.arange(1.0, 6.001, 0.05)
zf = 3.0 #formation redshift
spacing=0.01 #spacing of redshift for resolution (0.01 is high_res, 0.05 low_res)
zs = np.arange(0.05, 2.500001, spacing)
new_model = ezgal.model("pisco_pipeline/pisco_exp_chab_evolved.model")
new_model.set_normalization(filter='ks', mag=10.9, apparent=True, vega=True,z=0.023) ##normalize to Coma
new_mags = new_model.get_apparent_mags(zf, filters=filters, zs=zs, ab=True)
df_color=dict()
df_color['sloan_g']='MAG_g'
df_color['sloan_r']='MAG_r'
df_color['sloan_i']='MAG_i'
df_color['sloan_z']='MAG_z'
df_colorerr=dict()
df_colorerr['sloan_g']='MAGERR_g'
df_colorerr['sloan_r']='MAGERR_r'
df_colorerr['sloan_i']='MAGERR_i'
df_colorerr['sloan_z']='MAGERR_z'
zss=zs[0:80:5]
norm = matplotlib.colors.Normalize(vmin=np.min(zss),vmax=np.max(zss))
c_m = matplotlib.cm.RdYlBu
s_m = matplotlib.cm.ScalarMappable(cmap=c_m, norm=norm)
s_m.set_array([])
# Pipeline to run PISCO reduction data
#dir = str(sys.argv[1])
field = str(sys.argv[1])
slrdir = 'slr_output'
# field = 'Field054'
df_all = pd.read_csv("/Users/taweewat/Dropbox/Documents/MIT/Observation/2017_1/all_objs_list_new.csv")
f026 = df_all[df_all["name"]==field]
redshift=f026.redshift.values[0]
priority=f026.priority.values[0]
seeing=Table.read('/Users/taweewat/Dropbox/Documents/MIT/Observation/2017_1/PISCO_Jan17_seeing.csv')
see=seeing[seeing['Field']==int(field[-3:])]['Seeing'][0]
offset=find_offset('slr_output/star_%s.fits.offsets.list' % field)
num_2mass,num_star,chisq=find_num('../pisco_code/slr_output/star_%s.fits.offsets.list' % field)
#save_rgb_image_extra(field, f026)
df = pd.read_csv(os.path.join(slrdir,'ntotal_%s.csv' % field),index_col=0)
c5 = SkyCoord(ra=df['XWIN_WORLD'].values*u.degree, dec=df['YWIN_WORLD'].values*u.degree)
c0 = SkyCoord(ra=f026.RA0*u.degree, dec=f026.DEC0*u.degree)
sep = c5.separation(c0)
cut=df[(sep.arcmin<ex.rad_A(redshift,dist=1.5)) & (df["CLASS_STAR"]<0.75)] #CLASS_STAR < 0.75
#ncut=df[(sep.arcmin>2.5) & (df["CLASS_STAR"]<0.8)]
print see
print offset[1]
fig,ax=plt.subplots(1,4,figsize=(20,5));
fig.suptitle(field+', Redshift='+str(redshift)+', Priority='+priority+', Seeing='+str(see)+', Offset(r,i,g,z)='+str(offset[1])+', #2mass='+str(num_2mass)+', #stars='+str(num_star)+', chisq='+str(chisq))
make_images(field,ax[0])
xran,numbers_gr=red_seq_color_plot('sloan_g-sloan_r',cut,new_mags,ax[1])
xran2,numbers_ri=red_seq_color_plot('sloan_r-sloan_i',cut,new_mags,ax[2])
total_sigma=histogram_plot(np.append(xran,xran2),np.append(numbers_gr,numbers_ri),cut,ax[3])
ax[3].axvline(redshift, color='green')
fig.tight_layout()
fig.savefig('plots/plot_%s.png' % (field), dpi=200)
| blue_mag=(0.787302458781+2.9352*redshift)+red_mag | conditional_block |
pisco_redsequence.py | import sys
import os
import pandas as pd
import numpy as np
import subprocess
import shlex
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.table import Table
import matplotlib.pyplot as plt
from matplotlib import image
import matplotlib
import extra_program as ex
import ezgal
from rsz import RSModel
##----
def make_images(field,ax=None):
dir='final/'
ax.imshow(image.imread(dir+"img%s_2.eps" % field))
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
return None
def red_seq_color_plot(color,df,mags,ax=None):
if ax is None:
ax = plt.gca()
#https://www.sdss3.org/dr8/algorithms/sdssUBVRITransform.php#Jordi2006
n=1000 #repeat number for sampling
slope_fit,i_band0,color_err,rs_models,band_1,band_2=color_sloan(color, mags)
ysample=df[df_color[band_1]]-df[df_color[band_2]]
ysample_err=np.sqrt(df[df_colorerr[band_1]]**2+df[df_colorerr[band_2]]**2)
total=[]
for i in ysample.index:
total.append(np.random.normal(loc=ysample[i],scale=ysample_err[i],size=n))
#total.append(0.1)
total=np.array(total)
band_x='sloan_i'
all_x=np.repeat(df[df_color[band_x]],n)
total=np.reshape(total, len(all_x))
bp=ax.errorbar(df[df_color[band_x]],ysample,yerr=ysample_err,fmt='.',alpha=0.5)
#bp=ax.errorbar(df[df_color[band_x]],ysample,fmt='.',alpha=0.5)
red_band=np.arange(16,25,0.01) #just for the line plot in the 3rd plot
redshift_range=np.arange(0.10,0.8,0.05) #for the actual data
number=[]
if color=='sloan_g-sloan_r':
redshift_range=np.arange(0.10,0.36,0.05)
elif color=='sloan_r-sloan_i':
redshift_range=np.arange(0.10,0.71,0.05)
for redshift in redshift_range:
if color=='sloan_g-sloan_r':
# i_band_cut=20.5
i_band_cut=i_band0+5.*np.log10(ex.d_L(redshift)*1e6)-5.
elif color=='sloan_r-sloan_i':
i_band_cut=i_band0+5.*np.log10(ex.d_L(redshift)*1e6)-5.
aa=red_band<i_band_cut
loc=[(all_x<i_band_cut)&\
(total < rs_models[color][round(redshift+0.025,2)].rs_color(all_x))&\
(total > rs_models[color][round(redshift-0.025,2)].rs_color(all_x))][0]
number.append(np.sum(loc))
ax.plot(red_band[aa],rs_models[color][round(redshift,2)].rs_color(red_band[aa]),\
color=s_m.to_rgba(round(redshift,2)),ls='-')
ax.plot(red_band[aa],rs_models[color][round(redshift+0.025,2)].rs_color(red_band[aa]),\
color=s_m.to_rgba(round(redshift,2)),ls=':')
ax.plot(red_band[aa],rs_models[color][round(redshift-0.025,2)].rs_color(red_band[aa]),\
color=s_m.to_rgba(round(redshift,2)),ls=':')
ax.set_xlim(16,25)
if color == 'sloan_g-sloan_i':
ax.set_ylim(0,4)
elif color == 'sloan_g-sloan_r':
ax.set_ylim(0.0,2.5)
else:
ax.set_ylim(-0.5,1.75)
ax.set_xlabel(band_x)
ax.set_ylabel(color)
return np.array(redshift_range),np.array(number)
def color_sloan(color, mags):
if color=='sloan_r-sloan_z':
slope_r_m_i=-0.0192138872893
slope_r_m_z=(1.584 * slope_r_m_i)
slope_fit=[slope_r_m_z, 0]
i_band0=-20.
elif color=='sloan_g-sloan_i':
slope_v_m_i=-0.029
slope_g_m_i=(1.481 * slope_v_m_i)
slope_fit=[slope_g_m_i, 0]
i_band0=-20.
elif color=='sloan_r-sloan_i':
slope_rc_m_ic=-0.0192138872893
slope_r_m_i=(1.007 * slope_rc_m_ic)
slope_fit=[slope_r_m_i, 0]
i_band0=-20.5
color_err=0.18
elif color=='sloan_g-sloan_r':
slope_v_m_r=-0.0133824600874
slope_g_m_r=(1.646 * slope_v_m_r)
slope_fit=[slope_g_m_r, 0]
i_band0=-20.5
color_err=0.15
band_1, band_2 = color.split("-")
band_1_idx=filters.index(band_1)
band_2_idx=filters.index(band_2)
rs_models=dict()
rs_models[color]=dict()
for z, m in zip(zs,mags):
#mag_1=m[band_1_idx]
mag_2=m[band_2_idx]
mag_1=blue_model(color,mags,z,mag_2)
this_model=RSModel(z, mag_1, mag_2, slope_fit)
rs_models[color][this_model.z]=this_model
return slope_fit,i_band0,color_err,rs_models,band_1,band_2
# adding the slope for different color set that we are interested in (01_rsz_test,fit_gr_ri01.ipyn)
def blue_model(color,mags,redshift,red_mag):
#g-r
if color=='sloan_g-sloan_r':
blue_mag=(0.787302458781+2.9352*redshift)+red_mag
elif color=='sloan_r-sloan_i':
if redshift <= 0.36:
blue_mag=(0.348871987852+0.75340856*redshift)+red_mag | print 'This color has not been implemented.'
return blue_mag
def histogram_plot(xranf,numberf,df,ax=None,line=False,cbar=False):
l2=6
ax.set_xlim(0,0.8)
ic2,ic3=0,0
numbers=numberf[:6]
numbers2=numberf[l2:]
ax.bar(xranf[:6],numbers,width=0.05,color='red',alpha=0.5,align='center')
ax.bar(xranf[l2:],numbers2,width=0.05,alpha=0.5,align='center')
if cbar:
cbar=fig.colorbar(s_m, ax=ax)
cbar.set_label("redshift")
if line:
if dff_sdss.loc[ind].redshift!=-1:
ax.axvline(dff_sdss.redshift[ind],ls='--',color='#66cc00',lw=2.,label='qso z=%.2f'%dff_sdss.redshift[ind])
ax.axvline(xranf[:6][ic2],ls='--',color='black',lw=2.,label='red_seq g-r z=%.2f'%xranf[:6][ic2])
ax.axvline(xranf[l2:][ic3],ls='--',color='purple',lw=2.,label='red_seq r-i z=%.2f'%xranf[l2:][ic3])
ax.legend(loc='best',frameon=False)
sigma,sigma2,sigma3=0.,0.,0.
if line:
return np.array([xranf[:6][ic2],sigma2,xranf[l2:][ic3],sigma3,dff_sdss.redshift[ind],sigma])
else:
return np.array([xranf[:6][ic2],sigma2,xranf[l2:][ic3],sigma3])
def save_rgb_image_extra(field, f026):
cmd = "ds9 -zscale -crosshair %f %f wcs fk5 -rgb -red final/coadd_c%s_i.fits -green final/coadd_c%s_r.fits -blue final/coadd_c%s_g.fits -zoom out -saveimage final/img%s_2.eps -exit" % \
(f026.RA0.values[0], f026.DEC0.values[0], field, field, field, field)
print cmd
sub = subprocess.check_call(shlex.split(cmd))
cmd = "ds9 -rgb -red final/coadd_c%s_i.fits -green final/coadd_c%s_r.fits -blue final/coadd_c%s_g.fits -zoom out -saveimage final/img%s_3.eps -exit" % \
(field, field, field, field)
print cmd
sub = subprocess.check_call(shlex.split(cmd))
print 'finished saving final/img%s.eps' % field
def find_offset(fname):
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
band=[x.split(' ')[0][-1] for x in content[5:-1]]
corr=[float(x.split(' ')[1]) for x in content[5:-1]]
ecorr=[float(x.split(' ')[3]) for x in content[5:-1]]
return zip(band,corr,ecorr), corr
def find_num(fname):
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
num_2mass=content[0].split(' ')[3]
num_star=content[3].split(' ')[1]
chisq=content[2].split(' ')[1]
return num_2mass,num_star,chisq
##--------
if __name__ == "__main__":
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
filters=['sloan_r','sloan_i','sloan_z','sloan_g']
zfs = np.arange(1.0, 6.001, 0.05)
zf = 3.0 #formation redshift
spacing=0.01 #spacing of redshift for resolution (0.01 is high_res, 0.05 low_res)
zs = np.arange(0.05, 2.500001, spacing)
new_model = ezgal.model("pisco_pipeline/pisco_exp_chab_evolved.model")
new_model.set_normalization(filter='ks', mag=10.9, apparent=True, vega=True,z=0.023) ##normalize to Coma
new_mags = new_model.get_apparent_mags(zf, filters=filters, zs=zs, ab=True)
df_color=dict()
df_color['sloan_g']='MAG_g'
df_color['sloan_r']='MAG_r'
df_color['sloan_i']='MAG_i'
df_color['sloan_z']='MAG_z'
df_colorerr=dict()
df_colorerr['sloan_g']='MAGERR_g'
df_colorerr['sloan_r']='MAGERR_r'
df_colorerr['sloan_i']='MAGERR_i'
df_colorerr['sloan_z']='MAGERR_z'
zss=zs[0:80:5]
norm = matplotlib.colors.Normalize(vmin=np.min(zss),vmax=np.max(zss))
c_m = matplotlib.cm.RdYlBu
s_m = matplotlib.cm.ScalarMappable(cmap=c_m, norm=norm)
s_m.set_array([])
# Pipeline to run PISCO reduction data
#dir = str(sys.argv[1])
field = str(sys.argv[1])
slrdir = 'slr_output'
# field = 'Field054'
df_all = pd.read_csv("/Users/taweewat/Dropbox/Documents/MIT/Observation/2017_1/all_objs_list_new.csv")
f026 = df_all[df_all["name"]==field]
redshift=f026.redshift.values[0]
priority=f026.priority.values[0]
seeing=Table.read('/Users/taweewat/Dropbox/Documents/MIT/Observation/2017_1/PISCO_Jan17_seeing.csv')
see=seeing[seeing['Field']==int(field[-3:])]['Seeing'][0]
offset=find_offset('slr_output/star_%s.fits.offsets.list' % field)
num_2mass,num_star,chisq=find_num('../pisco_code/slr_output/star_%s.fits.offsets.list' % field)
#save_rgb_image_extra(field, f026)
df = pd.read_csv(os.path.join(slrdir,'ntotal_%s.csv' % field),index_col=0)
c5 = SkyCoord(ra=df['XWIN_WORLD'].values*u.degree, dec=df['YWIN_WORLD'].values*u.degree)
c0 = SkyCoord(ra=f026.RA0*u.degree, dec=f026.DEC0*u.degree)
sep = c5.separation(c0)
cut=df[(sep.arcmin<ex.rad_A(redshift,dist=1.5)) & (df["CLASS_STAR"]<0.75)] #CLASS_STAR < 0.75
#ncut=df[(sep.arcmin>2.5) & (df["CLASS_STAR"]<0.8)]
print see
print offset[1]
fig,ax=plt.subplots(1,4,figsize=(20,5));
fig.suptitle(field+', Redshift='+str(redshift)+', Priority='+priority+', Seeing='+str(see)+', Offset(r,i,g,z)='+str(offset[1])+', #2mass='+str(num_2mass)+', #stars='+str(num_star)+', chisq='+str(chisq))
make_images(field,ax[0])
xran,numbers_gr=red_seq_color_plot('sloan_g-sloan_r',cut,new_mags,ax[1])
xran2,numbers_ri=red_seq_color_plot('sloan_r-sloan_i',cut,new_mags,ax[2])
total_sigma=histogram_plot(np.append(xran,xran2),np.append(numbers_gr,numbers_ri),cut,ax[3])
ax[3].axvline(redshift, color='green')
fig.tight_layout()
fig.savefig('plots/plot_%s.png' % (field), dpi=200) | else:
blue_mag=(-0.210727367027+2.2836974*redshift)+red_mag
else: | random_line_split |
main.rs | use std::{fmt::Display, ops::Index, str::FromStr};
use anyhow::{bail, Error};
use intcode::Computer;
static INPUT: &str = include_str!("input.txt");
struct View {
view: Vec<u8>,
width: usize,
height: usize,
}
impl FromStr for View {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut view = vec![];
let mut width = 0;
let mut computer: Computer = s.parse()?;
while let Some(o) = computer.wait_until_output() {
let o: u8 = o.try_into()?;
if o == 10 {
if width == 0 {
width = view.len();
}
} else {
view.push(o);
}
}
let height = view.len() / width;
Ok(View {
view,
width,
height,
})
}
}
impl Index<(usize, usize)> for View {
type Output = u8;
fn index(&self, (x, y): (usize, usize)) -> &Self::Output {
&self.view[x + self.width * y]
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Orientation {
Up,
Down,
Left,
Right,
}
impl TryFrom<u8> for Orientation {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
b'^' => Ok(Orientation::Up),
b'v' => Ok(Orientation::Down),
b'<' => Ok(Orientation::Left),
b'>' => Ok(Orientation::Right),
_ => bail!("unknown orientation {value}"),
}
}
}
impl Orientation {
fn delta(&self) -> (isize, isize) {
match self {
Orientation::Up => (0, -1),
Orientation::Down => (0, 1),
Orientation::Left => (-1, 0),
Orientation::Right => (1, 0),
}
}
fn angle(&self) -> isize {
match self {
Orientation::Up => 90,
Orientation::Down => 270,
Orientation::Left => 180,
Orientation::Right => 0,
}
}
fn orient(&self, to: Orientation) -> Vec<Command> {
let diff_angle = to.angle() - self.angle();
let diff_angle = if diff_angle < 0 {
diff_angle + 360
} else {
diff_angle
};
match diff_angle {
0 => vec![],
90 => vec![Command::Left],
180 => vec![Command::Left, Command::Left],
270 => vec![Command::Right],
_ => panic!("unrecognized angle: {diff_angle}"),
}
}
fn is_opposite(&self, to: Orientation) -> bool {
match self {
Orientation::Up => to == Orientation::Down,
Orientation::Down => to == Orientation::Up,
Orientation::Left => to == Orientation::Right,
Orientation::Right => to == Orientation::Left,
}
}
}
impl View {
fn neighbours(
&self,
pos: (usize, usize),
) -> impl Iterator<Item = (Orientation, (usize, usize))> {
let width = self.width;
let height = self.height;
[
Orientation::Up,
Orientation::Down,
Orientation::Left,
Orientation::Right,
]
.into_iter()
.filter_map(move |o| {
let d = o.delta();
pos.0
.checked_add_signed(d.0)
.and_then(|x| pos.1.checked_add_signed(d.1).map(|y| (o, (x, y))))
})
.filter(move |(_, (x, y))| *x < width && *y < height)
}
fn forward(&self, pos: (usize, usize), o: Orientation) -> Option<(usize, usize)> {
let width = self.width;
let height = self.height;
let d = o.delta();
pos.0
.checked_add_signed(d.0)
.and_then(|x| pos.1.checked_add_signed(d.1).map(|y| (x, y)))
.filter(move |(x, y)| *x < width && *y < height)
}
fn compute_route(&self) -> Result<Route, Error> {
let mut route = vec![];
let pos = self
.view
.iter()
.position(|c| *c != b'.' && *c != b'#')
.unwrap();
let mut cur_pos = (pos % self.width, pos / self.width);
let mut o = Orientation::try_from(self[cur_pos])?;
for n in self.neighbours(cur_pos) {
if self[n.1] == b'#' {
route.append(&mut o.orient(n.0));
o = n.0;
break;
}
}
loop {
// move forward until either None or self[pos] != b'#'
let mut steps = 0;
while let Some(p) = self.forward(cur_pos, o) {
if self[p] != b'#' {
break;
}
steps += 1;
cur_pos = p;
}
route.push(Command::Forward(steps));
let mut found_turn = false;
for n in self.neighbours(cur_pos) {
if self[n.1] == b'#' && !o.is_opposite(n.0) { | o = n.0;
break;
}
}
if !found_turn {
break;
}
}
let route = Route(route);
Ok(route)
}
}
fn part_01(input: &str) -> Result<usize, Error> {
let mut aligment_parameters = 0;
let view: View = input.parse()?;
for y in 1..view.height - 1 {
for x in 1..view.width - 1 {
if view[(x, y)] == b'#'
&& view[(x - 1, y)] == b'#'
&& view[(x + 1, y)] == b'#'
&& view[(x, y - 1)] == b'#'
&& view[(x, y + 1)] == b'#'
{
aligment_parameters += x * y;
}
}
}
Ok(aligment_parameters)
}
impl Display for View {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for (idx, c) in self.view.iter().enumerate() {
if idx > 0 && idx % self.width == 0 {
writeln!(f)?;
}
write!(f, "{}", *c as char)?;
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Command {
Right,
Left,
Forward(usize),
}
impl Display for Command {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Command::Right => write!(f, "R"),
Command::Left => write!(f, "L"),
Command::Forward(d) => write!(f, "{d}"),
}
}
}
struct Route(Vec<Command>);
impl Display for Route {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(c) = self.0.first() {
write!(f, "{c}")?;
}
for c in self.0.iter().skip(1) {
write!(f, ",{c}")?;
}
Ok(())
}
}
fn strip_commas(mut input: &str) -> &str {
input = if let Some(i) = input.strip_prefix(',') {
i
} else {
input
};
if let Some(i) = input.strip_suffix(',') {
i
} else {
input
}
}
impl Route {
// return (main, A, B, C)
fn compress_route(&self) -> (String, String, String, String) {
// we need a pattern at the begining,
// one at the end, and the rest
// for a, scan from start,
// then split string at a, and do same for b
// on remaining string
// if what's left are identical, call that c, and if the prog is
// compressed down to less than 20 chars, return that
// complexity due to commas
let complete = format!("{self}");
for a in complete
.split(',')
.scan(String::new(), |s, c| {
if !s.is_empty() {
*s = format!("{s},{c}");
} else {
*s = c.to_owned();
}
Some(s.clone())
})
.take_while(|s| s.len() <= 20)
{
let fragments: Vec<_> = complete
.split(&a)
.map(strip_commas)
.filter(|f| !f.is_empty())
.collect();
// looking for a prefix in the first frament:
if let Some(first) = fragments.first() {
for b in first
.split(',')
.scan(String::new(), |s, c| {
if !s.is_empty() {
*s = format!("{s},{c}");
} else {
*s = c.to_owned();
}
Some(s.clone())
})
.take_while(|s| s.len() <= 20)
{
let fragments: Vec<_> = fragments
.iter()
.flat_map(|f| f.split(&b))
.map(strip_commas)
.filter(|f| !f.is_empty() && f != &",")
.collect();
if let Some(&c) = fragments.first() {
let c = c.to_owned();
if fragments
.iter()
.all(|f| f.split(&c).all(|f| f == "," || f.is_empty()))
{
let prog = complete.replace(&a, "A").replace(&b, "B").replace(&c, "C");
if prog.len() <= 20 {
return (prog, a, b, c);
}
}
}
}
}
}
panic!("compression not found")
}
}
fn part_02(input: &str) -> Result<i64, Error> {
let view: View = input.parse()?;
let route = view.compute_route()?;
let (prog, a, b, c) = route.compress_route();
// let data = format!("{route}");
// L,6,R,12,L,6,R,12,L,10,L,4,L,6,L,6,R,12,L,6,R,12,L,10,L,4,L,6,L,6,R,12,L,6,L,10,L,10,L,4,L,6,R,12,L,10,L,4,L,6,L,10,L,10,L,4,L,6,L,6,R,12,L,6,L,10,L,10,L,4,L,6
let mut computer: Computer = input.parse()?;
// switch to command mode
computer.set_at(0, 2);
for b in prog.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in a.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in b.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in c.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
computer.add_input(b'n' as i64);
computer.add_input(10);
let mut dust = 0;
while let Some(o) = computer.wait_until_output() {
if let Ok(b) = u8::try_from(o) {
print!("{}", b as char);
} else {
dust = o;
break;
}
}
Ok(dust)
}
fn main() -> Result<(), Error> {
println!("part 1: {}", part_01(INPUT)?);
println!("part 2: {}", part_02(INPUT)?);
Ok(())
}
#[cfg(test)]
mod tests {} | found_turn = true;
route.append(&mut o.orient(n.0)); | random_line_split |
main.rs | use std::{fmt::Display, ops::Index, str::FromStr};
use anyhow::{bail, Error};
use intcode::Computer;
static INPUT: &str = include_str!("input.txt");
struct View {
view: Vec<u8>,
width: usize,
height: usize,
}
impl FromStr for View {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut view = vec![];
let mut width = 0;
let mut computer: Computer = s.parse()?;
while let Some(o) = computer.wait_until_output() {
let o: u8 = o.try_into()?;
if o == 10 {
if width == 0 {
width = view.len();
}
} else {
view.push(o);
}
}
let height = view.len() / width;
Ok(View {
view,
width,
height,
})
}
}
impl Index<(usize, usize)> for View {
type Output = u8;
fn index(&self, (x, y): (usize, usize)) -> &Self::Output {
&self.view[x + self.width * y]
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Orientation {
Up,
Down,
Left,
Right,
}
impl TryFrom<u8> for Orientation {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
b'^' => Ok(Orientation::Up),
b'v' => Ok(Orientation::Down),
b'<' => Ok(Orientation::Left),
b'>' => Ok(Orientation::Right),
_ => bail!("unknown orientation {value}"),
}
}
}
impl Orientation {
fn delta(&self) -> (isize, isize) {
match self {
Orientation::Up => (0, -1),
Orientation::Down => (0, 1),
Orientation::Left => (-1, 0),
Orientation::Right => (1, 0),
}
}
fn angle(&self) -> isize {
match self {
Orientation::Up => 90,
Orientation::Down => 270,
Orientation::Left => 180,
Orientation::Right => 0,
}
}
fn orient(&self, to: Orientation) -> Vec<Command> {
let diff_angle = to.angle() - self.angle();
let diff_angle = if diff_angle < 0 {
diff_angle + 360
} else {
diff_angle
};
match diff_angle {
0 => vec![],
90 => vec![Command::Left],
180 => vec![Command::Left, Command::Left],
270 => vec![Command::Right],
_ => panic!("unrecognized angle: {diff_angle}"),
}
}
fn is_opposite(&self, to: Orientation) -> bool {
match self {
Orientation::Up => to == Orientation::Down,
Orientation::Down => to == Orientation::Up,
Orientation::Left => to == Orientation::Right,
Orientation::Right => to == Orientation::Left,
}
}
}
impl View {
fn neighbours(
&self,
pos: (usize, usize),
) -> impl Iterator<Item = (Orientation, (usize, usize))> |
fn forward(&self, pos: (usize, usize), o: Orientation) -> Option<(usize, usize)> {
let width = self.width;
let height = self.height;
let d = o.delta();
pos.0
.checked_add_signed(d.0)
.and_then(|x| pos.1.checked_add_signed(d.1).map(|y| (x, y)))
.filter(move |(x, y)| *x < width && *y < height)
}
fn compute_route(&self) -> Result<Route, Error> {
let mut route = vec![];
let pos = self
.view
.iter()
.position(|c| *c != b'.' && *c != b'#')
.unwrap();
let mut cur_pos = (pos % self.width, pos / self.width);
let mut o = Orientation::try_from(self[cur_pos])?;
for n in self.neighbours(cur_pos) {
if self[n.1] == b'#' {
route.append(&mut o.orient(n.0));
o = n.0;
break;
}
}
loop {
// move forward until either None or self[pos] != b'#'
let mut steps = 0;
while let Some(p) = self.forward(cur_pos, o) {
if self[p] != b'#' {
break;
}
steps += 1;
cur_pos = p;
}
route.push(Command::Forward(steps));
let mut found_turn = false;
for n in self.neighbours(cur_pos) {
if self[n.1] == b'#' && !o.is_opposite(n.0) {
found_turn = true;
route.append(&mut o.orient(n.0));
o = n.0;
break;
}
}
if !found_turn {
break;
}
}
let route = Route(route);
Ok(route)
}
}
fn part_01(input: &str) -> Result<usize, Error> {
let mut aligment_parameters = 0;
let view: View = input.parse()?;
for y in 1..view.height - 1 {
for x in 1..view.width - 1 {
if view[(x, y)] == b'#'
&& view[(x - 1, y)] == b'#'
&& view[(x + 1, y)] == b'#'
&& view[(x, y - 1)] == b'#'
&& view[(x, y + 1)] == b'#'
{
aligment_parameters += x * y;
}
}
}
Ok(aligment_parameters)
}
impl Display for View {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for (idx, c) in self.view.iter().enumerate() {
if idx > 0 && idx % self.width == 0 {
writeln!(f)?;
}
write!(f, "{}", *c as char)?;
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Command {
Right,
Left,
Forward(usize),
}
impl Display for Command {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Command::Right => write!(f, "R"),
Command::Left => write!(f, "L"),
Command::Forward(d) => write!(f, "{d}"),
}
}
}
struct Route(Vec<Command>);
impl Display for Route {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(c) = self.0.first() {
write!(f, "{c}")?;
}
for c in self.0.iter().skip(1) {
write!(f, ",{c}")?;
}
Ok(())
}
}
fn strip_commas(mut input: &str) -> &str {
input = if let Some(i) = input.strip_prefix(',') {
i
} else {
input
};
if let Some(i) = input.strip_suffix(',') {
i
} else {
input
}
}
impl Route {
// return (main, A, B, C)
fn compress_route(&self) -> (String, String, String, String) {
// we need a pattern at the begining,
// one at the end, and the rest
// for a, scan from start,
// then split string at a, and do same for b
// on remaining string
// if what's left are identical, call that c, and if the prog is
// compressed down to less than 20 chars, return that
// complexity due to commas
let complete = format!("{self}");
for a in complete
.split(',')
.scan(String::new(), |s, c| {
if !s.is_empty() {
*s = format!("{s},{c}");
} else {
*s = c.to_owned();
}
Some(s.clone())
})
.take_while(|s| s.len() <= 20)
{
let fragments: Vec<_> = complete
.split(&a)
.map(strip_commas)
.filter(|f| !f.is_empty())
.collect();
// looking for a prefix in the first frament:
if let Some(first) = fragments.first() {
for b in first
.split(',')
.scan(String::new(), |s, c| {
if !s.is_empty() {
*s = format!("{s},{c}");
} else {
*s = c.to_owned();
}
Some(s.clone())
})
.take_while(|s| s.len() <= 20)
{
let fragments: Vec<_> = fragments
.iter()
.flat_map(|f| f.split(&b))
.map(strip_commas)
.filter(|f| !f.is_empty() && f != &",")
.collect();
if let Some(&c) = fragments.first() {
let c = c.to_owned();
if fragments
.iter()
.all(|f| f.split(&c).all(|f| f == "," || f.is_empty()))
{
let prog = complete.replace(&a, "A").replace(&b, "B").replace(&c, "C");
if prog.len() <= 20 {
return (prog, a, b, c);
}
}
}
}
}
}
panic!("compression not found")
}
}
fn part_02(input: &str) -> Result<i64, Error> {
let view: View = input.parse()?;
let route = view.compute_route()?;
let (prog, a, b, c) = route.compress_route();
// let data = format!("{route}");
// L,6,R,12,L,6,R,12,L,10,L,4,L,6,L,6,R,12,L,6,R,12,L,10,L,4,L,6,L,6,R,12,L,6,L,10,L,10,L,4,L,6,R,12,L,10,L,4,L,6,L,10,L,10,L,4,L,6,L,6,R,12,L,6,L,10,L,10,L,4,L,6
let mut computer: Computer = input.parse()?;
// switch to command mode
computer.set_at(0, 2);
for b in prog.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in a.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in b.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in c.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
computer.add_input(b'n' as i64);
computer.add_input(10);
let mut dust = 0;
while let Some(o) = computer.wait_until_output() {
if let Ok(b) = u8::try_from(o) {
print!("{}", b as char);
} else {
dust = o;
break;
}
}
Ok(dust)
}
fn main() -> Result<(), Error> {
println!("part 1: {}", part_01(INPUT)?);
println!("part 2: {}", part_02(INPUT)?);
Ok(())
}
#[cfg(test)]
mod tests {}
| {
let width = self.width;
let height = self.height;
[
Orientation::Up,
Orientation::Down,
Orientation::Left,
Orientation::Right,
]
.into_iter()
.filter_map(move |o| {
let d = o.delta();
pos.0
.checked_add_signed(d.0)
.and_then(|x| pos.1.checked_add_signed(d.1).map(|y| (o, (x, y))))
})
.filter(move |(_, (x, y))| *x < width && *y < height)
} | identifier_body |
main.rs | use std::{fmt::Display, ops::Index, str::FromStr};
use anyhow::{bail, Error};
use intcode::Computer;
static INPUT: &str = include_str!("input.txt");
struct View {
view: Vec<u8>,
width: usize,
height: usize,
}
impl FromStr for View {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut view = vec![];
let mut width = 0;
let mut computer: Computer = s.parse()?;
while let Some(o) = computer.wait_until_output() {
let o: u8 = o.try_into()?;
if o == 10 {
if width == 0 {
width = view.len();
}
} else {
view.push(o);
}
}
let height = view.len() / width;
Ok(View {
view,
width,
height,
})
}
}
impl Index<(usize, usize)> for View {
type Output = u8;
fn index(&self, (x, y): (usize, usize)) -> &Self::Output {
&self.view[x + self.width * y]
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Orientation {
Up,
Down,
Left,
Right,
}
impl TryFrom<u8> for Orientation {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
b'^' => Ok(Orientation::Up),
b'v' => Ok(Orientation::Down),
b'<' => Ok(Orientation::Left),
b'>' => Ok(Orientation::Right),
_ => bail!("unknown orientation {value}"),
}
}
}
impl Orientation {
fn delta(&self) -> (isize, isize) {
match self {
Orientation::Up => (0, -1),
Orientation::Down => (0, 1),
Orientation::Left => (-1, 0),
Orientation::Right => (1, 0),
}
}
fn angle(&self) -> isize {
match self {
Orientation::Up => 90,
Orientation::Down => 270,
Orientation::Left => 180,
Orientation::Right => 0,
}
}
fn orient(&self, to: Orientation) -> Vec<Command> {
let diff_angle = to.angle() - self.angle();
let diff_angle = if diff_angle < 0 {
diff_angle + 360
} else {
diff_angle
};
match diff_angle {
0 => vec![],
90 => vec![Command::Left],
180 => vec![Command::Left, Command::Left],
270 => vec![Command::Right],
_ => panic!("unrecognized angle: {diff_angle}"),
}
}
fn is_opposite(&self, to: Orientation) -> bool {
match self {
Orientation::Up => to == Orientation::Down,
Orientation::Down => to == Orientation::Up,
Orientation::Left => to == Orientation::Right,
Orientation::Right => to == Orientation::Left,
}
}
}
impl View {
fn neighbours(
&self,
pos: (usize, usize),
) -> impl Iterator<Item = (Orientation, (usize, usize))> {
let width = self.width;
let height = self.height;
[
Orientation::Up,
Orientation::Down,
Orientation::Left,
Orientation::Right,
]
.into_iter()
.filter_map(move |o| {
let d = o.delta();
pos.0
.checked_add_signed(d.0)
.and_then(|x| pos.1.checked_add_signed(d.1).map(|y| (o, (x, y))))
})
.filter(move |(_, (x, y))| *x < width && *y < height)
}
fn | (&self, pos: (usize, usize), o: Orientation) -> Option<(usize, usize)> {
let width = self.width;
let height = self.height;
let d = o.delta();
pos.0
.checked_add_signed(d.0)
.and_then(|x| pos.1.checked_add_signed(d.1).map(|y| (x, y)))
.filter(move |(x, y)| *x < width && *y < height)
}
fn compute_route(&self) -> Result<Route, Error> {
let mut route = vec![];
let pos = self
.view
.iter()
.position(|c| *c != b'.' && *c != b'#')
.unwrap();
let mut cur_pos = (pos % self.width, pos / self.width);
let mut o = Orientation::try_from(self[cur_pos])?;
for n in self.neighbours(cur_pos) {
if self[n.1] == b'#' {
route.append(&mut o.orient(n.0));
o = n.0;
break;
}
}
loop {
// move forward until either None or self[pos] != b'#'
let mut steps = 0;
while let Some(p) = self.forward(cur_pos, o) {
if self[p] != b'#' {
break;
}
steps += 1;
cur_pos = p;
}
route.push(Command::Forward(steps));
let mut found_turn = false;
for n in self.neighbours(cur_pos) {
if self[n.1] == b'#' && !o.is_opposite(n.0) {
found_turn = true;
route.append(&mut o.orient(n.0));
o = n.0;
break;
}
}
if !found_turn {
break;
}
}
let route = Route(route);
Ok(route)
}
}
fn part_01(input: &str) -> Result<usize, Error> {
let mut aligment_parameters = 0;
let view: View = input.parse()?;
for y in 1..view.height - 1 {
for x in 1..view.width - 1 {
if view[(x, y)] == b'#'
&& view[(x - 1, y)] == b'#'
&& view[(x + 1, y)] == b'#'
&& view[(x, y - 1)] == b'#'
&& view[(x, y + 1)] == b'#'
{
aligment_parameters += x * y;
}
}
}
Ok(aligment_parameters)
}
impl Display for View {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for (idx, c) in self.view.iter().enumerate() {
if idx > 0 && idx % self.width == 0 {
writeln!(f)?;
}
write!(f, "{}", *c as char)?;
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Command {
Right,
Left,
Forward(usize),
}
impl Display for Command {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Command::Right => write!(f, "R"),
Command::Left => write!(f, "L"),
Command::Forward(d) => write!(f, "{d}"),
}
}
}
struct Route(Vec<Command>);
impl Display for Route {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(c) = self.0.first() {
write!(f, "{c}")?;
}
for c in self.0.iter().skip(1) {
write!(f, ",{c}")?;
}
Ok(())
}
}
fn strip_commas(mut input: &str) -> &str {
input = if let Some(i) = input.strip_prefix(',') {
i
} else {
input
};
if let Some(i) = input.strip_suffix(',') {
i
} else {
input
}
}
impl Route {
// return (main, A, B, C)
fn compress_route(&self) -> (String, String, String, String) {
// we need a pattern at the begining,
// one at the end, and the rest
// for a, scan from start,
// then split string at a, and do same for b
// on remaining string
// if what's left are identical, call that c, and if the prog is
// compressed down to less than 20 chars, return that
// complexity due to commas
let complete = format!("{self}");
for a in complete
.split(',')
.scan(String::new(), |s, c| {
if !s.is_empty() {
*s = format!("{s},{c}");
} else {
*s = c.to_owned();
}
Some(s.clone())
})
.take_while(|s| s.len() <= 20)
{
let fragments: Vec<_> = complete
.split(&a)
.map(strip_commas)
.filter(|f| !f.is_empty())
.collect();
// looking for a prefix in the first frament:
if let Some(first) = fragments.first() {
for b in first
.split(',')
.scan(String::new(), |s, c| {
if !s.is_empty() {
*s = format!("{s},{c}");
} else {
*s = c.to_owned();
}
Some(s.clone())
})
.take_while(|s| s.len() <= 20)
{
let fragments: Vec<_> = fragments
.iter()
.flat_map(|f| f.split(&b))
.map(strip_commas)
.filter(|f| !f.is_empty() && f != &",")
.collect();
if let Some(&c) = fragments.first() {
let c = c.to_owned();
if fragments
.iter()
.all(|f| f.split(&c).all(|f| f == "," || f.is_empty()))
{
let prog = complete.replace(&a, "A").replace(&b, "B").replace(&c, "C");
if prog.len() <= 20 {
return (prog, a, b, c);
}
}
}
}
}
}
panic!("compression not found")
}
}
fn part_02(input: &str) -> Result<i64, Error> {
let view: View = input.parse()?;
let route = view.compute_route()?;
let (prog, a, b, c) = route.compress_route();
// let data = format!("{route}");
// L,6,R,12,L,6,R,12,L,10,L,4,L,6,L,6,R,12,L,6,R,12,L,10,L,4,L,6,L,6,R,12,L,6,L,10,L,10,L,4,L,6,R,12,L,10,L,4,L,6,L,10,L,10,L,4,L,6,L,6,R,12,L,6,L,10,L,10,L,4,L,6
let mut computer: Computer = input.parse()?;
// switch to command mode
computer.set_at(0, 2);
for b in prog.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in a.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in b.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in c.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
computer.add_input(b'n' as i64);
computer.add_input(10);
let mut dust = 0;
while let Some(o) = computer.wait_until_output() {
if let Ok(b) = u8::try_from(o) {
print!("{}", b as char);
} else {
dust = o;
break;
}
}
Ok(dust)
}
fn main() -> Result<(), Error> {
println!("part 1: {}", part_01(INPUT)?);
println!("part 2: {}", part_02(INPUT)?);
Ok(())
}
#[cfg(test)]
mod tests {}
| forward | identifier_name |
main.rs | use std::{fmt::Display, ops::Index, str::FromStr};
use anyhow::{bail, Error};
use intcode::Computer;
static INPUT: &str = include_str!("input.txt");
struct View {
view: Vec<u8>,
width: usize,
height: usize,
}
impl FromStr for View {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut view = vec![];
let mut width = 0;
let mut computer: Computer = s.parse()?;
while let Some(o) = computer.wait_until_output() {
let o: u8 = o.try_into()?;
if o == 10 {
if width == 0 |
} else {
view.push(o);
}
}
let height = view.len() / width;
Ok(View {
view,
width,
height,
})
}
}
impl Index<(usize, usize)> for View {
type Output = u8;
fn index(&self, (x, y): (usize, usize)) -> &Self::Output {
&self.view[x + self.width * y]
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Orientation {
Up,
Down,
Left,
Right,
}
impl TryFrom<u8> for Orientation {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
b'^' => Ok(Orientation::Up),
b'v' => Ok(Orientation::Down),
b'<' => Ok(Orientation::Left),
b'>' => Ok(Orientation::Right),
_ => bail!("unknown orientation {value}"),
}
}
}
impl Orientation {
fn delta(&self) -> (isize, isize) {
match self {
Orientation::Up => (0, -1),
Orientation::Down => (0, 1),
Orientation::Left => (-1, 0),
Orientation::Right => (1, 0),
}
}
fn angle(&self) -> isize {
match self {
Orientation::Up => 90,
Orientation::Down => 270,
Orientation::Left => 180,
Orientation::Right => 0,
}
}
fn orient(&self, to: Orientation) -> Vec<Command> {
let diff_angle = to.angle() - self.angle();
let diff_angle = if diff_angle < 0 {
diff_angle + 360
} else {
diff_angle
};
match diff_angle {
0 => vec![],
90 => vec![Command::Left],
180 => vec![Command::Left, Command::Left],
270 => vec![Command::Right],
_ => panic!("unrecognized angle: {diff_angle}"),
}
}
fn is_opposite(&self, to: Orientation) -> bool {
match self {
Orientation::Up => to == Orientation::Down,
Orientation::Down => to == Orientation::Up,
Orientation::Left => to == Orientation::Right,
Orientation::Right => to == Orientation::Left,
}
}
}
impl View {
fn neighbours(
&self,
pos: (usize, usize),
) -> impl Iterator<Item = (Orientation, (usize, usize))> {
let width = self.width;
let height = self.height;
[
Orientation::Up,
Orientation::Down,
Orientation::Left,
Orientation::Right,
]
.into_iter()
.filter_map(move |o| {
let d = o.delta();
pos.0
.checked_add_signed(d.0)
.and_then(|x| pos.1.checked_add_signed(d.1).map(|y| (o, (x, y))))
})
.filter(move |(_, (x, y))| *x < width && *y < height)
}
fn forward(&self, pos: (usize, usize), o: Orientation) -> Option<(usize, usize)> {
let width = self.width;
let height = self.height;
let d = o.delta();
pos.0
.checked_add_signed(d.0)
.and_then(|x| pos.1.checked_add_signed(d.1).map(|y| (x, y)))
.filter(move |(x, y)| *x < width && *y < height)
}
fn compute_route(&self) -> Result<Route, Error> {
let mut route = vec![];
let pos = self
.view
.iter()
.position(|c| *c != b'.' && *c != b'#')
.unwrap();
let mut cur_pos = (pos % self.width, pos / self.width);
let mut o = Orientation::try_from(self[cur_pos])?;
for n in self.neighbours(cur_pos) {
if self[n.1] == b'#' {
route.append(&mut o.orient(n.0));
o = n.0;
break;
}
}
loop {
// move forward until either None or self[pos] != b'#'
let mut steps = 0;
while let Some(p) = self.forward(cur_pos, o) {
if self[p] != b'#' {
break;
}
steps += 1;
cur_pos = p;
}
route.push(Command::Forward(steps));
let mut found_turn = false;
for n in self.neighbours(cur_pos) {
if self[n.1] == b'#' && !o.is_opposite(n.0) {
found_turn = true;
route.append(&mut o.orient(n.0));
o = n.0;
break;
}
}
if !found_turn {
break;
}
}
let route = Route(route);
Ok(route)
}
}
fn part_01(input: &str) -> Result<usize, Error> {
let mut aligment_parameters = 0;
let view: View = input.parse()?;
for y in 1..view.height - 1 {
for x in 1..view.width - 1 {
if view[(x, y)] == b'#'
&& view[(x - 1, y)] == b'#'
&& view[(x + 1, y)] == b'#'
&& view[(x, y - 1)] == b'#'
&& view[(x, y + 1)] == b'#'
{
aligment_parameters += x * y;
}
}
}
Ok(aligment_parameters)
}
impl Display for View {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for (idx, c) in self.view.iter().enumerate() {
if idx > 0 && idx % self.width == 0 {
writeln!(f)?;
}
write!(f, "{}", *c as char)?;
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Command {
Right,
Left,
Forward(usize),
}
impl Display for Command {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Command::Right => write!(f, "R"),
Command::Left => write!(f, "L"),
Command::Forward(d) => write!(f, "{d}"),
}
}
}
struct Route(Vec<Command>);
impl Display for Route {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(c) = self.0.first() {
write!(f, "{c}")?;
}
for c in self.0.iter().skip(1) {
write!(f, ",{c}")?;
}
Ok(())
}
}
fn strip_commas(mut input: &str) -> &str {
input = if let Some(i) = input.strip_prefix(',') {
i
} else {
input
};
if let Some(i) = input.strip_suffix(',') {
i
} else {
input
}
}
impl Route {
// return (main, A, B, C)
fn compress_route(&self) -> (String, String, String, String) {
// we need a pattern at the begining,
// one at the end, and the rest
// for a, scan from start,
// then split string at a, and do same for b
// on remaining string
// if what's left are identical, call that c, and if the prog is
// compressed down to less than 20 chars, return that
// complexity due to commas
let complete = format!("{self}");
for a in complete
.split(',')
.scan(String::new(), |s, c| {
if !s.is_empty() {
*s = format!("{s},{c}");
} else {
*s = c.to_owned();
}
Some(s.clone())
})
.take_while(|s| s.len() <= 20)
{
let fragments: Vec<_> = complete
.split(&a)
.map(strip_commas)
.filter(|f| !f.is_empty())
.collect();
// looking for a prefix in the first frament:
if let Some(first) = fragments.first() {
for b in first
.split(',')
.scan(String::new(), |s, c| {
if !s.is_empty() {
*s = format!("{s},{c}");
} else {
*s = c.to_owned();
}
Some(s.clone())
})
.take_while(|s| s.len() <= 20)
{
let fragments: Vec<_> = fragments
.iter()
.flat_map(|f| f.split(&b))
.map(strip_commas)
.filter(|f| !f.is_empty() && f != &",")
.collect();
if let Some(&c) = fragments.first() {
let c = c.to_owned();
if fragments
.iter()
.all(|f| f.split(&c).all(|f| f == "," || f.is_empty()))
{
let prog = complete.replace(&a, "A").replace(&b, "B").replace(&c, "C");
if prog.len() <= 20 {
return (prog, a, b, c);
}
}
}
}
}
}
panic!("compression not found")
}
}
fn part_02(input: &str) -> Result<i64, Error> {
let view: View = input.parse()?;
let route = view.compute_route()?;
let (prog, a, b, c) = route.compress_route();
// let data = format!("{route}");
// L,6,R,12,L,6,R,12,L,10,L,4,L,6,L,6,R,12,L,6,R,12,L,10,L,4,L,6,L,6,R,12,L,6,L,10,L,10,L,4,L,6,R,12,L,10,L,4,L,6,L,10,L,10,L,4,L,6,L,6,R,12,L,6,L,10,L,10,L,4,L,6
let mut computer: Computer = input.parse()?;
// switch to command mode
computer.set_at(0, 2);
for b in prog.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in a.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in b.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
for b in c.bytes() {
computer.add_input(b as i64);
}
computer.add_input(10);
computer.add_input(b'n' as i64);
computer.add_input(10);
let mut dust = 0;
while let Some(o) = computer.wait_until_output() {
if let Ok(b) = u8::try_from(o) {
print!("{}", b as char);
} else {
dust = o;
break;
}
}
Ok(dust)
}
fn main() -> Result<(), Error> {
println!("part 1: {}", part_01(INPUT)?);
println!("part 2: {}", part_02(INPUT)?);
Ok(())
}
#[cfg(test)]
mod tests {}
| {
width = view.len();
} | conditional_block |
val.rs | //! A concrete implementation of `futures::Future`. It is similar in spirit as
//! `futures::Promise`, but is better suited for use with Tokio.
use futures::{Future, Poll, Task};
use std::mem;
use std::cell::Cell;
use std::sync::{Arc, Mutex};
use self::State::*;
/// A future representing the completion of an asynchronous computation.
///
/// This is created by the `pair` function.
pub struct Val<T, E> {
inner: Arc<Inner<T, E>>,
}
/// The `Complete` half of `Val` used to send the result of an asynchronous
/// computation to the consumer of `Val`.
///
/// This is created by the `pair` function.
pub struct Complete<T, E> {
inner: Arc<Inner<T, E>>,
cancellation: Cell<bool>,
}
/// A future representing the cancellation in interest by the consumer of
/// `Val`.
///
/// If a `Val` is dropped without ever attempting to read the value, then it
/// becomes impossible to ever receive the result of the underlying
/// computation. This indicates that there is no interest in the computation
/// and it may be cancelled.
///
/// In this case, this future will be completed. The asynchronous computation
/// is able to listen for this completion and abort work early.
pub struct Cancellation {
inner: Arc<SyncFuture>,
}
// Currently implemented with a mutex, but this is only to get something
// working. This should be rewritten to use a lock free strategy.
struct Inner<T, E> {
state: Mutex<State<T, E>>,
}
enum State<T, E> {
Init {
consumer: Option<Callback>,
cancellation: Option<Callback>
},
Completed(Option<Result<T, E>>),
Cancelled,
Consumed,
}
type Callback = Box<FnBox>;
/// Create and return a new `Complete` / `Val` pair.
///
/// `Complete` is used to send the result of an asynchronous computation to the
/// consumer of `Val`.
pub fn pair<T, E>() -> (Complete<T, E>, Val<T, E>) {
let inner = Arc::new(Inner {
state: Mutex::new(State::Init {
consumer: None,
cancellation: None,
}),
});
let tx = Complete {
inner: inner.clone(),
cancellation: Cell::new(false),
};
let rx = Val {
inner: inner,
};
(tx, rx)
}
/*
*
* ===== Val =====
*
*/
impl<T, E> Future for Val<T, E>
where T: Send + 'static,
E: Send + 'static,
{
type Item = T;
type Error = E;
fn poll(&mut self, task: &mut Task) -> Poll<T, E> {
self.inner.poll(task)
}
fn schedule(&mut self, task: &mut Task) {
self.inner.schedule(task)
}
}
impl<T, E> Drop for Val<T, E> {
fn drop(&mut self) {
self.inner.cancel();
}
}
/*
*
* ===== Complete =====
*
*/
impl<T, E> Complete<T, E>
where T: Send + 'static,
E: Send + 'static,
{
/// Successfully complete the associated `Val` with the given value.
pub fn complete(self, val: T) {
self.inner.complete(Some(Ok(val)), false);
}
/// Complete the associated `Val` with the given error
pub fn error(self, err: E) {
self.inner.complete(Some(Err(err)), false);
}
/// Abort the computation. This will cause the associated `Val` to panic on
/// a call to `poll`.
pub fn abort(self) {
self.inner.complete(None, false);
}
/// Returns a `Future` representing the consuming end cancelling interest
/// in the future.
///
/// This function can only be called once.
///
/// # Panics
///
/// A second call to this function will result in a panic.
pub fn cancellation(&self) -> Cancellation {
if self.cancellation.get() {
panic!("cancellation future already obtained");
}
self.cancellation.set(true);
Cancellation { inner: self.inner.clone() }
}
}
impl<T, E> Drop for Complete<T, E> {
fn drop(&mut self) {
self.inner.complete(None, true);
}
}
/*
*
* ===== Cancellation =====
*
*/
impl Future for Cancellation {
type Item = bool;
type Error = ();
fn | (&mut self, task: &mut Task) -> Poll<bool, ()> {
self.inner.poll(task)
}
fn schedule(&mut self, task: &mut Task) {
self.inner.schedule(task)
}
}
/*
*
* ===== Inner =====
*
*/
impl<T, E> Inner<T, E> {
/// Complete the future with the given result
fn complete(&self, res: Option<Result<T, E>>, panic: bool) {
let mut state = match self.state.lock() {
Ok(s) => s,
Err(_) => {
if panic { panic!("failed to lock mutex") };
return;
}
};
let cb;
match state.take() {
Init { consumer, .. } => cb = consumer,
s => {
if res.is_some() {
panic!("attempting to complete already completed future");
} else {
*state = s;
return;
}
}
}
*state = Completed(res);
drop(state);
if let Some(cb) = cb {
cb.call_box(); // Invoke callback
}
}
/// Cancel interest in the future
fn cancel(&self) {
let mut state = match self.state.lock() {
Ok(s) => s,
Err(_) => return,
};
let cb;
match state.take() {
Init { cancellation, .. } => cb = cancellation,
Completed(_) | Cancelled | Consumed => {
return; // Cannot cancel from these states
}
}
*state = Cancelled;
drop(state);
if let Some(cb) = cb {
cb.call_box(); // Invoke callback
}
}
/// Poll the inner state for a value
fn poll(&self, _: &mut Task) -> Poll<T, E> {
let mut state = self.state.lock().unwrap();
if state.is_complete() {
match state.take() {
Completed(Some(Ok(v))) => Poll::Ok(v),
Completed(Some(Err(e))) => Poll::Err(e),
Completed(None) => panic!("Complete dropped without producing a value"),
Consumed => panic!("Val already consumed"),
_ => unreachable!(),
}
} else {
Poll::NotReady
}
}
/// Associate completion with the given task
fn schedule(&self, task: &mut Task) {
let mut state = self.state.lock().unwrap();
if state.in_flight() {
let handle = task.handle().clone();
state.set_consumer_cb(Box::new(move || handle.notify()));
} else {
task.handle().notify();
}
}
}
// A little hacky, but implementing Future on Inner allows losing the generics
// on Cancellation
trait SyncFuture: Send + Sync + 'static {
fn poll(&self, task: &mut Task) -> Poll<bool, ()>;
fn schedule(&self, task: &mut Task);
}
impl<T, E> SyncFuture for Inner<T, E>
where T: Send + 'static,
E: Send + 'static,
{
fn poll(&self, _: &mut Task) -> Poll<bool, ()> {
let state = self.state.lock().unwrap();
match *state {
Init { .. } => Poll::NotReady,
Cancelled => Poll::Ok(true),
_ => Poll::Ok(false),
}
}
fn schedule(&self, task: &mut Task) {
let mut state = self.state.lock().unwrap();
if state.in_flight() {
let handle = task.handle().clone();
state.set_cancellation_cb(Box::new(move || handle.notify()));
} else {
task.handle().notify();
}
}
}
impl<T, E> State<T, E> {
fn in_flight(&self) -> bool {
match *self {
Init { .. } => true,
_ => false,
}
}
/// Returns true if in a completed state.
fn is_complete(&self) -> bool {
match *self {
Completed(_) | Consumed => true,
_ => false,
}
}
fn set_consumer_cb(&mut self, cb: Callback) {
match *self {
Init { ref mut consumer, .. } => *consumer = Some(cb),
_ => panic!("unexpected state"),
}
}
fn set_cancellation_cb(&mut self, cb: Callback) {
match *self {
Init { ref mut cancellation, .. } => *cancellation = Some(cb),
_ => panic!("unexpected state"),
}
}
/// Sets the current state to Consumed and returns the original value
fn take(&mut self) -> State<T, E> {
mem::replace(self, State::Consumed)
}
}
trait FnBox: Send + 'static {
fn call_box(self: Box<Self>);
}
impl<F> FnBox for F
where F: FnOnce() + Send + 'static
{
fn call_box(self: Box<F>) {
(*self)()
}
}
#[cfg(test)]
mod test {
use super::*;
use futures::Future;
use std::sync::mpsc;
#[test]
fn test_complete_after_listen() {
let (c, val) = pair::<u32, ()>();
let (tx, rx) = mpsc::channel();
val.then(move |res| {
tx.send(res.unwrap()).unwrap();
res
}).forget();
c.complete(123);
assert_eq!(123, rx.recv().unwrap());
}
#[test]
fn test_complete_before_listen() {
let (c, val) = pair::<u32, ()>();
let (tx, rx) = mpsc::channel();
c.complete(123);
val.then(move |res| {
tx.send(res.unwrap()).unwrap();
res
}).forget();
assert_eq!(123, rx.recv().unwrap());
}
#[test]
fn test_polling_aborted_future_panics() {
use std::thread;
let res = thread::spawn(|| {
let (c, val) = pair::<u32, ()>();
val.then(move |res| {
println!("WAT: {:?}", res);
res
}).forget();
c.abort();
});
assert!(res.join().is_err());
}
#[test]
fn test_cancellation_future() {
let (c, val) = pair::<u32, ()>();
let (tx, rx) = mpsc::channel();
c.cancellation().then(move |res| {
tx.send(123).unwrap();
res
}).forget();
assert!(rx.try_recv().is_err());
drop(val);
assert_eq!(123, rx.recv().unwrap());
}
}
| poll | identifier_name |
val.rs | //! A concrete implementation of `futures::Future`. It is similar in spirit as
//! `futures::Promise`, but is better suited for use with Tokio.
use futures::{Future, Poll, Task};
use std::mem;
use std::cell::Cell;
use std::sync::{Arc, Mutex};
use self::State::*;
/// A future representing the completion of an asynchronous computation.
///
/// This is created by the `pair` function.
pub struct Val<T, E> {
inner: Arc<Inner<T, E>>,
}
/// The `Complete` half of `Val` used to send the result of an asynchronous
/// computation to the consumer of `Val`.
///
/// This is created by the `pair` function.
pub struct Complete<T, E> {
inner: Arc<Inner<T, E>>,
cancellation: Cell<bool>,
}
/// A future representing the cancellation in interest by the consumer of
/// `Val`.
///
/// If a `Val` is dropped without ever attempting to read the value, then it
/// becomes impossible to ever receive the result of the underlying
/// computation. This indicates that there is no interest in the computation
/// and it may be cancelled.
///
/// In this case, this future will be completed. The asynchronous computation
/// is able to listen for this completion and abort work early.
pub struct Cancellation {
inner: Arc<SyncFuture>,
}
// Currently implemented with a mutex, but this is only to get something
// working. This should be rewritten to use a lock free strategy.
struct Inner<T, E> {
state: Mutex<State<T, E>>,
}
enum State<T, E> {
Init {
consumer: Option<Callback>,
cancellation: Option<Callback>
},
Completed(Option<Result<T, E>>),
Cancelled,
Consumed,
}
type Callback = Box<FnBox>;
/// Create and return a new `Complete` / `Val` pair.
///
/// `Complete` is used to send the result of an asynchronous computation to the
/// consumer of `Val`.
pub fn pair<T, E>() -> (Complete<T, E>, Val<T, E>) {
let inner = Arc::new(Inner {
state: Mutex::new(State::Init {
consumer: None,
cancellation: None,
}),
});
let tx = Complete {
inner: inner.clone(),
cancellation: Cell::new(false),
};
let rx = Val {
inner: inner,
};
(tx, rx)
}
/*
*
* ===== Val =====
*
*/
impl<T, E> Future for Val<T, E>
where T: Send + 'static,
E: Send + 'static,
{
type Item = T;
type Error = E;
fn poll(&mut self, task: &mut Task) -> Poll<T, E> {
self.inner.poll(task)
}
fn schedule(&mut self, task: &mut Task) {
self.inner.schedule(task)
}
}
impl<T, E> Drop for Val<T, E> {
fn drop(&mut self) {
self.inner.cancel();
}
}
/*
*
* ===== Complete =====
*
*/
impl<T, E> Complete<T, E>
where T: Send + 'static,
E: Send + 'static,
{
/// Successfully complete the associated `Val` with the given value.
pub fn complete(self, val: T) {
self.inner.complete(Some(Ok(val)), false);
}
/// Complete the associated `Val` with the given error
pub fn error(self, err: E) {
self.inner.complete(Some(Err(err)), false);
}
/// Abort the computation. This will cause the associated `Val` to panic on
/// a call to `poll`.
pub fn abort(self) {
self.inner.complete(None, false);
}
/// Returns a `Future` representing the consuming end cancelling interest
/// in the future.
///
/// This function can only be called once.
///
/// # Panics
///
/// A second call to this function will result in a panic.
pub fn cancellation(&self) -> Cancellation {
if self.cancellation.get() {
panic!("cancellation future already obtained");
}
self.cancellation.set(true);
Cancellation { inner: self.inner.clone() }
}
}
impl<T, E> Drop for Complete<T, E> {
fn drop(&mut self) {
self.inner.complete(None, true);
}
}
/*
*
* ===== Cancellation =====
*
*/
impl Future for Cancellation {
type Item = bool;
type Error = ();
fn poll(&mut self, task: &mut Task) -> Poll<bool, ()> {
self.inner.poll(task)
}
fn schedule(&mut self, task: &mut Task) {
self.inner.schedule(task)
}
}
/*
*
* ===== Inner =====
*
*/
impl<T, E> Inner<T, E> {
/// Complete the future with the given result
fn complete(&self, res: Option<Result<T, E>>, panic: bool) {
let mut state = match self.state.lock() {
Ok(s) => s,
Err(_) => {
if panic { panic!("failed to lock mutex") };
return;
}
};
let cb;
match state.take() {
Init { consumer, .. } => cb = consumer,
s => {
if res.is_some() {
panic!("attempting to complete already completed future");
} else {
*state = s;
return;
}
}
}
*state = Completed(res);
drop(state);
if let Some(cb) = cb {
cb.call_box(); // Invoke callback
}
}
/// Cancel interest in the future
fn cancel(&self) {
let mut state = match self.state.lock() {
Ok(s) => s,
Err(_) => return,
};
let cb;
match state.take() {
Init { cancellation, .. } => cb = cancellation,
Completed(_) | Cancelled | Consumed => {
return; // Cannot cancel from these states
}
}
*state = Cancelled;
drop(state);
if let Some(cb) = cb {
cb.call_box(); // Invoke callback
}
}
/// Poll the inner state for a value
fn poll(&self, _: &mut Task) -> Poll<T, E> {
let mut state = self.state.lock().unwrap();
if state.is_complete() {
match state.take() {
Completed(Some(Ok(v))) => Poll::Ok(v),
Completed(Some(Err(e))) => Poll::Err(e),
Completed(None) => panic!("Complete dropped without producing a value"),
Consumed => panic!("Val already consumed"),
_ => unreachable!(),
}
} else {
Poll::NotReady
}
}
/// Associate completion with the given task
fn schedule(&self, task: &mut Task) {
let mut state = self.state.lock().unwrap();
if state.in_flight() {
let handle = task.handle().clone();
state.set_consumer_cb(Box::new(move || handle.notify()));
} else {
task.handle().notify();
}
}
}
// A little hacky, but implementing Future on Inner allows losing the generics
// on Cancellation
trait SyncFuture: Send + Sync + 'static {
fn poll(&self, task: &mut Task) -> Poll<bool, ()>;
fn schedule(&self, task: &mut Task);
}
impl<T, E> SyncFuture for Inner<T, E>
where T: Send + 'static,
E: Send + 'static,
{
fn poll(&self, _: &mut Task) -> Poll<bool, ()> {
let state = self.state.lock().unwrap();
match *state {
Init { .. } => Poll::NotReady,
Cancelled => Poll::Ok(true),
_ => Poll::Ok(false),
}
}
fn schedule(&self, task: &mut Task) {
let mut state = self.state.lock().unwrap();
if state.in_flight() {
let handle = task.handle().clone();
state.set_cancellation_cb(Box::new(move || handle.notify()));
} else {
task.handle().notify();
}
}
}
impl<T, E> State<T, E> {
fn in_flight(&self) -> bool {
match *self {
Init { .. } => true,
_ => false,
}
}
/// Returns true if in a completed state.
fn is_complete(&self) -> bool {
match *self {
Completed(_) | Consumed => true,
_ => false,
}
}
fn set_consumer_cb(&mut self, cb: Callback) {
match *self {
Init { ref mut consumer, .. } => *consumer = Some(cb),
_ => panic!("unexpected state"),
}
}
fn set_cancellation_cb(&mut self, cb: Callback) {
match *self {
Init { ref mut cancellation, .. } => *cancellation = Some(cb),
_ => panic!("unexpected state"),
}
}
/// Sets the current state to Consumed and returns the original value
fn take(&mut self) -> State<T, E> {
mem::replace(self, State::Consumed)
}
}
trait FnBox: Send + 'static {
fn call_box(self: Box<Self>);
}
impl<F> FnBox for F
where F: FnOnce() + Send + 'static
{
fn call_box(self: Box<F>) {
(*self)()
}
}
#[cfg(test)]
mod test {
use super::*;
use futures::Future;
use std::sync::mpsc;
#[test]
fn test_complete_after_listen() {
let (c, val) = pair::<u32, ()>();
let (tx, rx) = mpsc::channel();
val.then(move |res| {
tx.send(res.unwrap()).unwrap();
res
}).forget();
c.complete(123);
assert_eq!(123, rx.recv().unwrap());
}
#[test]
fn test_complete_before_listen() {
let (c, val) = pair::<u32, ()>();
let (tx, rx) = mpsc::channel();
c.complete(123); | }).forget();
assert_eq!(123, rx.recv().unwrap());
}
#[test]
fn test_polling_aborted_future_panics() {
use std::thread;
let res = thread::spawn(|| {
let (c, val) = pair::<u32, ()>();
val.then(move |res| {
println!("WAT: {:?}", res);
res
}).forget();
c.abort();
});
assert!(res.join().is_err());
}
#[test]
fn test_cancellation_future() {
let (c, val) = pair::<u32, ()>();
let (tx, rx) = mpsc::channel();
c.cancellation().then(move |res| {
tx.send(123).unwrap();
res
}).forget();
assert!(rx.try_recv().is_err());
drop(val);
assert_eq!(123, rx.recv().unwrap());
}
} |
val.then(move |res| {
tx.send(res.unwrap()).unwrap();
res | random_line_split |
val.rs | //! A concrete implementation of `futures::Future`. It is similar in spirit as
//! `futures::Promise`, but is better suited for use with Tokio.
use futures::{Future, Poll, Task};
use std::mem;
use std::cell::Cell;
use std::sync::{Arc, Mutex};
use self::State::*;
/// A future representing the completion of an asynchronous computation.
///
/// This is created by the `pair` function.
pub struct Val<T, E> {
inner: Arc<Inner<T, E>>,
}
/// The `Complete` half of `Val` used to send the result of an asynchronous
/// computation to the consumer of `Val`.
///
/// This is created by the `pair` function.
pub struct Complete<T, E> {
inner: Arc<Inner<T, E>>,
cancellation: Cell<bool>,
}
/// A future representing the cancellation in interest by the consumer of
/// `Val`.
///
/// If a `Val` is dropped without ever attempting to read the value, then it
/// becomes impossible to ever receive the result of the underlying
/// computation. This indicates that there is no interest in the computation
/// and it may be cancelled.
///
/// In this case, this future will be completed. The asynchronous computation
/// is able to listen for this completion and abort work early.
pub struct Cancellation {
inner: Arc<SyncFuture>,
}
// Currently implemented with a mutex, but this is only to get something
// working. This should be rewritten to use a lock free strategy.
struct Inner<T, E> {
state: Mutex<State<T, E>>,
}
enum State<T, E> {
Init {
consumer: Option<Callback>,
cancellation: Option<Callback>
},
Completed(Option<Result<T, E>>),
Cancelled,
Consumed,
}
type Callback = Box<FnBox>;
/// Create and return a new `Complete` / `Val` pair.
///
/// `Complete` is used to send the result of an asynchronous computation to the
/// consumer of `Val`.
pub fn pair<T, E>() -> (Complete<T, E>, Val<T, E>) {
let inner = Arc::new(Inner {
state: Mutex::new(State::Init {
consumer: None,
cancellation: None,
}),
});
let tx = Complete {
inner: inner.clone(),
cancellation: Cell::new(false),
};
let rx = Val {
inner: inner,
};
(tx, rx)
}
/*
*
* ===== Val =====
*
*/
impl<T, E> Future for Val<T, E>
where T: Send + 'static,
E: Send + 'static,
{
type Item = T;
type Error = E;
fn poll(&mut self, task: &mut Task) -> Poll<T, E> {
self.inner.poll(task)
}
fn schedule(&mut self, task: &mut Task) {
self.inner.schedule(task)
}
}
impl<T, E> Drop for Val<T, E> {
fn drop(&mut self) {
self.inner.cancel();
}
}
/*
*
* ===== Complete =====
*
*/
impl<T, E> Complete<T, E>
where T: Send + 'static,
E: Send + 'static,
{
/// Successfully complete the associated `Val` with the given value.
pub fn complete(self, val: T) {
self.inner.complete(Some(Ok(val)), false);
}
/// Complete the associated `Val` with the given error
pub fn error(self, err: E) |
/// Abort the computation. This will cause the associated `Val` to panic on
/// a call to `poll`.
pub fn abort(self) {
self.inner.complete(None, false);
}
/// Returns a `Future` representing the consuming end cancelling interest
/// in the future.
///
/// This function can only be called once.
///
/// # Panics
///
/// A second call to this function will result in a panic.
pub fn cancellation(&self) -> Cancellation {
if self.cancellation.get() {
panic!("cancellation future already obtained");
}
self.cancellation.set(true);
Cancellation { inner: self.inner.clone() }
}
}
impl<T, E> Drop for Complete<T, E> {
fn drop(&mut self) {
self.inner.complete(None, true);
}
}
/*
*
* ===== Cancellation =====
*
*/
impl Future for Cancellation {
type Item = bool;
type Error = ();
fn poll(&mut self, task: &mut Task) -> Poll<bool, ()> {
self.inner.poll(task)
}
fn schedule(&mut self, task: &mut Task) {
self.inner.schedule(task)
}
}
/*
*
* ===== Inner =====
*
*/
impl<T, E> Inner<T, E> {
/// Complete the future with the given result
fn complete(&self, res: Option<Result<T, E>>, panic: bool) {
let mut state = match self.state.lock() {
Ok(s) => s,
Err(_) => {
if panic { panic!("failed to lock mutex") };
return;
}
};
let cb;
match state.take() {
Init { consumer, .. } => cb = consumer,
s => {
if res.is_some() {
panic!("attempting to complete already completed future");
} else {
*state = s;
return;
}
}
}
*state = Completed(res);
drop(state);
if let Some(cb) = cb {
cb.call_box(); // Invoke callback
}
}
/// Cancel interest in the future
fn cancel(&self) {
let mut state = match self.state.lock() {
Ok(s) => s,
Err(_) => return,
};
let cb;
match state.take() {
Init { cancellation, .. } => cb = cancellation,
Completed(_) | Cancelled | Consumed => {
return; // Cannot cancel from these states
}
}
*state = Cancelled;
drop(state);
if let Some(cb) = cb {
cb.call_box(); // Invoke callback
}
}
/// Poll the inner state for a value
fn poll(&self, _: &mut Task) -> Poll<T, E> {
let mut state = self.state.lock().unwrap();
if state.is_complete() {
match state.take() {
Completed(Some(Ok(v))) => Poll::Ok(v),
Completed(Some(Err(e))) => Poll::Err(e),
Completed(None) => panic!("Complete dropped without producing a value"),
Consumed => panic!("Val already consumed"),
_ => unreachable!(),
}
} else {
Poll::NotReady
}
}
/// Associate completion with the given task
fn schedule(&self, task: &mut Task) {
let mut state = self.state.lock().unwrap();
if state.in_flight() {
let handle = task.handle().clone();
state.set_consumer_cb(Box::new(move || handle.notify()));
} else {
task.handle().notify();
}
}
}
// A little hacky, but implementing Future on Inner allows losing the generics
// on Cancellation
trait SyncFuture: Send + Sync + 'static {
fn poll(&self, task: &mut Task) -> Poll<bool, ()>;
fn schedule(&self, task: &mut Task);
}
impl<T, E> SyncFuture for Inner<T, E>
where T: Send + 'static,
E: Send + 'static,
{
fn poll(&self, _: &mut Task) -> Poll<bool, ()> {
let state = self.state.lock().unwrap();
match *state {
Init { .. } => Poll::NotReady,
Cancelled => Poll::Ok(true),
_ => Poll::Ok(false),
}
}
fn schedule(&self, task: &mut Task) {
let mut state = self.state.lock().unwrap();
if state.in_flight() {
let handle = task.handle().clone();
state.set_cancellation_cb(Box::new(move || handle.notify()));
} else {
task.handle().notify();
}
}
}
impl<T, E> State<T, E> {
fn in_flight(&self) -> bool {
match *self {
Init { .. } => true,
_ => false,
}
}
/// Returns true if in a completed state.
fn is_complete(&self) -> bool {
match *self {
Completed(_) | Consumed => true,
_ => false,
}
}
fn set_consumer_cb(&mut self, cb: Callback) {
match *self {
Init { ref mut consumer, .. } => *consumer = Some(cb),
_ => panic!("unexpected state"),
}
}
fn set_cancellation_cb(&mut self, cb: Callback) {
match *self {
Init { ref mut cancellation, .. } => *cancellation = Some(cb),
_ => panic!("unexpected state"),
}
}
/// Sets the current state to Consumed and returns the original value
fn take(&mut self) -> State<T, E> {
mem::replace(self, State::Consumed)
}
}
trait FnBox: Send + 'static {
fn call_box(self: Box<Self>);
}
impl<F> FnBox for F
where F: FnOnce() + Send + 'static
{
fn call_box(self: Box<F>) {
(*self)()
}
}
#[cfg(test)]
mod test {
use super::*;
use futures::Future;
use std::sync::mpsc;
#[test]
fn test_complete_after_listen() {
let (c, val) = pair::<u32, ()>();
let (tx, rx) = mpsc::channel();
val.then(move |res| {
tx.send(res.unwrap()).unwrap();
res
}).forget();
c.complete(123);
assert_eq!(123, rx.recv().unwrap());
}
#[test]
fn test_complete_before_listen() {
let (c, val) = pair::<u32, ()>();
let (tx, rx) = mpsc::channel();
c.complete(123);
val.then(move |res| {
tx.send(res.unwrap()).unwrap();
res
}).forget();
assert_eq!(123, rx.recv().unwrap());
}
#[test]
fn test_polling_aborted_future_panics() {
use std::thread;
let res = thread::spawn(|| {
let (c, val) = pair::<u32, ()>();
val.then(move |res| {
println!("WAT: {:?}", res);
res
}).forget();
c.abort();
});
assert!(res.join().is_err());
}
#[test]
fn test_cancellation_future() {
let (c, val) = pair::<u32, ()>();
let (tx, rx) = mpsc::channel();
c.cancellation().then(move |res| {
tx.send(123).unwrap();
res
}).forget();
assert!(rx.try_recv().is_err());
drop(val);
assert_eq!(123, rx.recv().unwrap());
}
}
| {
self.inner.complete(Some(Err(err)), false);
} | identifier_body |
DissertationScript.py | from subprocess import call
import os
import pandas as pd
from pandas import DataFrame
import string
import re
from urllib.request import urlopen
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common import exceptions
from selenium.webdriver.common.action_chains import ActionChains
import time
import nltk
from collections import defaultdict, Counter
from nltk.corpus import stopwords
from nltk import FreqDist, ConditionalFreqDist
import datetime
from xml.etree import ElementTree
from difflib import SequenceMatcher
import json
import difflib
#Get username and PW from text file
with open("../CSpanUserNamePW.txt","r") as f:
f = f.read()
username = f.split("\t")[0]
pw = f.split("\t")[1]
#This scrapes the Congressional Record transcripts from each committee
def gpo_scrape():
li=[]
for num in range(95,115):
for agen in ["HOUSE","JOINT","SENATE"]:
url = "http://www.gpo.gov/fdsys/browse/collection.action?collectionCode=CHRG&browsePath="+str(num)+"%2F"+agen+"&isCollapsed=false&leafLevelBrowse=false&ycord=0"
url = urlopen(url).read().decode()
reg = re.compile(str(num)+'/'+agen+'/'+'(.*?)3')
reg = re.findall(reg,url)
for a in reg:
temp = re.sub(' ','+',a)
path = str(num)+"%2F"+str(agen)+"%2F"+str(temp)
url2 = "http://www.gpo.gov/fdsys/browse/collection.action?collectionCode=CHRG&browsePath="+str(path)+"&isCollapsed=false&leafLevelBrowse=false&isDocumentResults=true&ycord=0"
#print(url2)
url2 = urlopen(url2).read().decode()
reg2 = re.compile('a href="(.*?\.htm)')
reg2 = re.findall(reg2,url2) | try:
site = urlopen(el).read()
text = open("CongressBLAHfolder/"+agen+str(num)+a+str(reg3.groups()[0])+".txt","wb")
text.write(site)
except Exception as e:
print("This didn't work: "+str(e))
#pdf.write(bytes(site))
#print(len(li))
#This cleans the scraped data from gpo_scrape
def re_clean_written():
for subdir, dirs, files in os.walk("Congressional Hearings TXT/"):
for file in files:
print("Working on file...." + str(file))
f = open(os.path.join(subdir, file), "rb").read().decode('utf-8', 'ignore').encode('cp850',
'ignore').decode(
'cp850')
reg2 = re.compile('[JFMASOND][A-Za-z]+? \d{1,2}. \d{4}')
try:
date = re.findall(reg2, f)[0].capitalize()
date = datetime.datetime.strptime(date, '%B %d, %Y')
date = str(date.year) + "." + str(date.month) + "." + str(date.day)
print(date)
except Exception as e:
print("NO DATE in this file: " + file)
#This is the big line that I need to check: gets only the relevant stuff from each transcript
reg = re.compile(
'\n{1,3} {4}([A-Z][a-z]+\.? [A-Z][a-z]+(?: [A-Z][a-z]+)?\.{1} [A-Z][^\b]*?)\n{1,3} {4,}\[(?:Where|Add)')
newtext = re.findall(reg, f)
# print(newtext)
#Takes out prepared written statements, so there's just actual speech remaining
try:
newtext = re.sub(
'(\[(?:The opening|The (?:information)? follow|The prepared|Text of)[^\b]+?)(?=\n{1,3} {4}[A-Z][a-z]+\.? [A-Z][a-z]+(?: [A-Z][a-z]+)?\.)',
'', newtext[0])
newtext = re.sub(
'((?:O[Pp][Ee][Nn][Ii][Nn][Gg] |P[Rr][Ee][Pp][Aa][Rr][Ee][Dd] )?S[Tt][Aa][Tt][Ee][Mm][Ee][Nn][Tt][Ss]?[^\b]*?\n{2})',
'', newtext)
with open('Congressional Hearings TXT (clean NEW)/' + file[:-4] + '_' + date + '.txt',
'a') as fi:
fi.write(",".join(reg3) + "\n" + newtext)
except Exception as e:
print("ERROR IN SCRAPE: " + file + "\t" + str(e))
#After running re_clean_written, this parses the data out and creates folders for each speaker
def parse_by_speakerdate():
a = defaultdict(list)
for subdir, dirs, files in os.walk("Congressional Hearings TXT (clean)/"):
for file in files:
print("Working on file...." + str(file))
f = open(os.path.join(subdir, file), "rb").read().decode('utf-8', 'ignore')
date = file.split("_")[1][:-4]
# Load original files, so I can get the names of everyone
f2 = open("Congressional Hearings TXT/" + file.split("_")[0] + ".txt", "rb").read().decode()
# Code to get name list from beginning of each file on GPO.gov (need to make sure this gets everyone)
try:
reg3 = re.compile('(?: {4,}|\r?\n?)([A-Z]+\.? [A-Z]+\.?(?: [A-Z]+)?),')
# reg3 = re.compile(' {4,}([A-Z]+ [A-Z]+?),')
reg3 = re.findall(reg3, f2)
print(reg3)
except Exception as e:
print("SOME ERROR HERE WITH COLLECTING NAMES..." + str(e))
try:
newtext = re.split(
'\r\n\r?\n? {4,}(?=[A-Z][a-z]+\.? [A-Z][a-z]+(?: [A-Z][a-z]+| \[presiding\])?\.)', f)
# print(newtext)
print(len(newtext))
for line in newtext:
b = defaultdict(list)
name = line.split(".")[0]
# This corrects for the split on the period in Mr. or Mrs. (makes sure it gets their full name)
if len(name) < 4:
name = ".".join(line.split(".")[:2])
name = stringdist_list(name, reg3)
text = line.split(".")[1]
if len(text) < 15:
text = line.split(".", 2)[2]
if not os.path.exists('Congressional Hearings - People (new)/' + name + '/'):
os.makedirs('Congressional Hearings - People (new)/' + name + '/')
# Only download files if they are longer than 100 words (prevents need to use clean_people_dir_words in SVM Practice file)
if len(text.split(" ")) > 100:
with open('Congressional Hearings - People (new)/' + name + '/' + name + '_' + date + '.txt','a') as fi:
fi.write(text + "\n")
# print(text)
# b[date].append(text)
# a[name].append(b)
except Exception as e:
print("ERROR IN SCRAPE: " + file + "\t" + str(e))
# df_dict.tabulate()
# df_dict.plot()
# pickle.dump(df_dict,open("FDA Open Meetings Sites/Word Tokenized Dictionary.p","wb"))
#From the gpo.gov Congressional Directory, scrapes the txt file for each member of House/Senate going back to 105th Congress (1997-1998). Saves to GPO Biographies folder.
def gpo_scrape_bio():
#Pre-downloaded file that just lists state in one column and number of House representatives in the 2nd column
NumReps = pd.read_csv("Representatives by State.csv")
#Convert the csv to a dictionary for easier calling of values
repsdict = NumReps.set_index("State")["NumRep"].to_dict()
for state in NumReps.State:
for agen in ["H", "S"]:
if agen=="H":
Num = repsdict[state]
else:
Num = 2
for years in ["2016-02-12","2014-02-18","2011-12-01","2009-12-01","2008-08-01","2007-08-09","2006-09-01","2005-07-11","2004-08-01","2004-01-01","2003-11-01","2003-07-11",
"2002-10-01","2001-12-07","2000-10-01","2000-02-01","1999-06-15","1997-06-04"]:
for num in range(1,Num+1):
url = "https://www.gpo.gov/fdsys/pkg/CDIR-"+years+"/html/CDIR-"+years+"-"+state+"-"+agen+"-"+str(num)+".htm"
print(url)
try:
url = urlopen(url).read().decode()
text = open("GPO Biographies/"+agen+" "+state+" "+str(num)+" "+years+".txt","w")
text.write(url)
except Exception as e:
print("Number of representatives changed in "+state+" in"+years)
#From the files in GPO Biographies folder, pulls out the name, affiliation, education, and year of the txt files using regex (1262 people?)
def gpo_regex_get_name():
a = defaultdict(list)
for subdir,dirs,files in os.walk("GPO Biographies/"):
for file in files:
f = open(os.path.join(subdir,file),"rb").read().decode('utf-8','ignore')
name = re.compile(' {4}([A-Z\.]+? [^0-9]*?), ([A-Za-z]{4,})(?:,| |-|;)').findall(f)
year = re.compile('<title>.*?for the (.*?),').findall(f)
#reg2 = re.compile('education: ([\s\S]*?); [a-z ]*?: | ')
f2 = re.sub("\r\n","",f)
edu = re.compile('((?:[A-Z]\.){2,3},[\s\S]*?);').findall(f2)
print(name, file, year, edu)
try:
if name[0][0] not in a:
a[name[0][0]].append(name[0][1]) #name[1] is political affiliation because the txt file was organized in such a way
except Exception as e:
print("Problem with name in file: "+file)
try:
if file[0]=="H":
a[name[0][0]].append(year[0] + " - House")
if file[0] == "S":
a[name[0][0]].append(year[0] + " - Senate")
except Exception as e:
print("Problem with year: "+file)
try:
a[name[0][0]].extend(edu)
except Exception as e:
print("Problem with education: " + file)
print(a)
#Suggestion to use json for defaultdict instead of csv (http://codereview.stackexchange.com/questions/30741/writing-defaultdict-to-csv-file)
json.dump(a,open('GPO Biographies - JSON','w'))
#pd.DataFrame.from_dict(a, orient='index').to_csv("GPO Biographies - Education1.csv")
#Takes a list of names, searches them on C-SPAN, and extracts the website associated with them - website includes PersonID
def cspan_selenium_getsite():
###Create files with each Senator and their full website, including PersonID
#Load GPO Biographies - JSON and extract the key, which is names of all Congresspeople going back to 1997
names = json.load(open('GPO Biographies - JSON'))
names = list(names.keys())
#This gets rid of middle names and middle initials, so the search is better on C-SPAN
names = [name.split(" ")[0].title() + " " + name.split(" ")[-1].title() for name in names]
# Log in with Selenium
driver = webdriver.Firefox()
driver.get("http://www.c-span.org")
login = driver.find_element_by_class_name("my-cspan")
login.click()
time.sleep(1)
user = driver.find_elements_by_id("login")[1]
user.clear()
user.send_keys(username)
pw = driver.find_element_by_id("password")
pw.clear()
pw.send_keys(pw)
clicklogin = driver.find_element_by_id("submit-login")
clicklogin.click()
errorlog = []
for name in names:
try:
#Have to wait a bit of time because the website gets screwy and can't find the dropdown menu sometimes
time.sleep(10)
openfilter = driver.find_element_by_class_name('selected')
# openfilter = driver.find_element_by_xpath("//form[@class='search']/fieldset/div/span[@class='carat icon-chevron-down']")
openfilter.click()
peoplefilter = driver.find_element_by_xpath('//div[@style]/ul/li[4]')
time.sleep(0.5)
peoplefilter.click()
namesearch = driver.find_element_by_id('global-search')
namesearch.clear()
namesearch.send_keys(name)
clicker = driver.find_element_by_class_name('icon-search')
clicker.click()
time.sleep(1.5)
search = driver.find_elements_by_class_name('thumb')[0]
search.click()
source = driver.page_source
ID = re.compile('personid\[\]=(.*?)"').findall(source)
print(name,names.index(name),ID)
with open("C-SPAN PersonID1.txt","a") as f:
f.write(name+"\t"+ID[0]+"\n")
if len(ID) > 4:
errorlog.append(name)
except Exception as e:
print("COME BACKKKK AND GET THIS ONE MANUALLY!!!: ", name)
errorlog.append(name)
print(errorlog)
#This takes a tab delimited file with name and C-SPAN website, and just simplifies it so it's name and C-SPAN ID (using output from cspan_selenium_getsite
def cspan_selenium_getid():
###Create a file with just Senator's name and personID in separate column
with open("C-SPAN PersonID.txt") as f:
f = f.read().splitlines()
print(f)
for item in f:
ID = item.split("=")[-1]
name = item.split("\t")[0]
with open("C-SPAN PersonID (simplified).txt","a") as g:
g.write(name+"\t"+ID+"\n")
#Makes file names the correct case (upper, lower) when matching them in match_name_CongressRecord() function below. Also turns it from
#tab delimited text file into list
def file_to_list_upper(file):
with open(file,"r",encoding="ISO-8859-1") as f:
f = f.readlines()
return([x.split("\t")[0].title() for x in f])
#C-SPAN PersonID file has names and C-SPAN ID. This function takes the person's name and returns their C-SPAN ID
def dict_cspan_id(name):
with open("C-SPAN PersonID (simplified).txt","r",encoding="Latin1") as f:
df = pd.Series.from_csv(f,sep="\t",header=None).to_dict()
return(df[name])
#Match name from Congressional Record to names of spoken word text files, and then get C-SPAN ID
def match_name_CongressRecord():
allcongrnames = json.load(open('GPO Biographies - JSON'))
allcongrnameslist = list(allcongrnames.keys())
#print(file_to_list_upper("C-SPAN PersonID (simplified).txt"))
#print(len(namelist))
allcongrnameslist = [name.split(" ")[0]+" "+name.split(" ")[-1] for name in allcongrnameslist]
#print(namelist)
#The /media/jemme directory is from my orange external hard drive
for root,dirs,files in os.walk("/media/jemme/New Volume/Congressional Hearings - People (new)"):
for file in files:
name = file.split("_")[0] # Need to match name with ID
date = file.split("_")[1][:-4] # Need in this format: 2015-06-10
try:
date = datetime.datetime.strptime(date, "%Y.%m.%d").strftime("%Y-%m-%d")
#print(name, date)
except Exception as e:
print(file + " has a weird file name, I think..." + str(e))
namematch = difflib.get_close_matches(name,allcongrnameslist,cutoff=.8)
# The outer function finds the ID based on the name, using the C-SPAN PersonID file. stringdist_list_imperfect compares the name of the file in the folder
# to the list of names from the C-SPAN PersonID file and returns the name that is closest, which is fed into the outer function to find the ID
if difflib.get_close_matches(name.title(), file_to_list_upper("C-SPAN PersonID (simplified).txt"),cutoff=.8):
print(difflib.get_close_matches(name.title(), file_to_list_upper("C-SPAN PersonID (simplified).txt"),cutoff=.8))
ID = dict_cspan_id(difflib.get_close_matches(name.title(), file_to_list_upper("C-SPAN PersonID (simplified).txt"),cutoff=.8)[0])
#ID = dict_cspan_id(difflib.get_close_matches(name.title(), file_to_list_upper("C-SPAN PersonID (simplified test).txt")))
print(name,ID)
#Several functions now add important info to the C-SPAN PersonID file - the below adds the dates each person spoke based on the transcripts
def add_dates():
with open("C-SPAN PersonID.txt",encoding="Latin1") as f:
f = f.read().splitlines()
#f = [item.split("\t")[0] for item in f]
#print(os.listdir("Congressional Hearings - People (new)"))
for item in f:
print(item)
#This first has to capitalize just the first letter of the transcript names, since they are all caps beforehand
#and, thus, can't match
transcriptnames = [name.title() for name in os.listdir("/media/jemme/New Volume/Congressional Hearings - People (new)")]
transcriptnamesmatch = difflib.get_close_matches(item,transcriptnames)
if transcriptnamesmatch:
print(transcriptnamesmatch)
#Turn the matched name back into all caps after it matches, so that it can find the actual transcript file
try:
dates = os.listdir("/media/jemme/New Volume/Congressional Hearings - People (new)/"+transcriptnamesmatch[0].upper())
except Exception as e:
print(item+" doesn't WORKKKKK!")
for date in dates:
date = date.split("_")[1][:-4].replace(".","-")
with open("C-SPAN PersonID and File Dates.txt","a") as outfile:
outfile.write(item+"\t"+transcriptnamesmatch[0]+"\t"+date+"\n")
#This is just a helper function used in add_date_month below - it converts dates into the proper format
def set_date(date):
date = datetime.datetime.strptime(date,"%Y-%m-%d")
date = datetime.datetime.strftime(date, "%Y-%m-%d")
return(date)
#Because there weren't corresponding videos for the specific dates in the Congressional Record transcripts,
#this just makes it so you can search a person on C-SPAN for that whole month, not just a specific day.
def add_date_month():
with open("C-SPAN PersonID and File Dates.txt") as f:
f = f.read().splitlines()
for item in f:
name = item.split("\t")[0]
ID = item.split("\t")[1]
nameinfile = item.split("\t")[2]
try:
date = item.split("\t")[3]
datebeg = re.sub("-\d{1,2}$", "-01", date)
datebeg = set_date(datebeg)
if date.split("-")[1] in ["9", "4", "6", "11"]:
dateend = re.sub("-\d{1,2}$", "-30", date)
dateend = set_date(dateend)
elif date.split("-")[1] in ["1", "3", "5", "7", "8", "10", "12"]:
dateend = re.sub("-\d{1,2}$", "-31", date)
dateend = set_date(dateend)
elif date.split("-")[1] == "2":
dateend = re.sub("-\d{1,2}$", "-28", date)
dateend = set_date(dateend)
with open("C-SPAN PersonID and File Dates (entire month search)1.txt", "a") as outfile:
outfile.write(item + "\t" + datebeg + "\t" + dateend + "\n")
except Exception as e:
print("DATE NOT CORRECT: ",date)
#This goes into C-SPAN and adds in the times that each person spoke in each video, so I don't have to download the entire thing
def get_vid():
driver = webdriver.Firefox()
with open("C-SPAN PersonID and File Dates (entire month search).txt") as f:
f = f.read().splitlines()
for item in f:
ID = item.split("\t")[1]
name = item.split("\t")[2]
date = item.split("\t")[3]
datebeg = item.split("\t")[4]
dateend = item.split("\t")[5]
#Use this code below to get video corresponding to each PersonID and date
driver.get("http://www.c-span.org/search/?searchtype=Videos&personid[]=" + str(ID) + "&sdate=" + str(datebeg) + "&edate=" + str(dateend))
try:
video = driver.find_elements_by_class_name("thumb")[0]
video.click()
time.sleep(6)
html = driver.execute_script("return document.getElementsByTagName('html')[0].innerHTML")
reg = re.compile('<th>[\s\S]+?time-(.*?)"[\s\S]+?<strong>(.*?)<')
reg = re.findall(reg,html)
times = []
i = 0
while i < len(reg):
if reg[i][1].split(" ")[-1].title() in name:
times.append((reg[i][0],reg[i+1][0]))
i+=1
#reg = [tup for tup in reg if tup[1].split(" ")[-1].title() in name]
print()
print(driver.current_url,"\n",reg,"\n",times,"\n",name, "\n", ID, "\n", date)
with open("C-SPAN PersonID and File Dates (entire month search with vid website and time)1.txt","a") as outfile:
if times:
for tup in times:
outfile.write(item+"\t"+driver.current_url+"\t"+tup[0]+"\t"+tup[1]+"\t"+str(f.index(item))+"\n")
except Exception as e:
print(str(e))
# print("There is no video for "+str(name)+"on "+str(date)+".")
#This actually clicks on the video - make sure rtmpsrv is running BEFORE you even click the thumb link to
#the video, otherwise it won't work. So, essentially, go to c-span.com, then run ./rtmpsuckredirect.sh
#from the command line. Then start rtmpsrv. Rtmpsrv has to be running before you click the video thumbnail,
#not just before you click play.
flash = driver.find_element_by_id("flashPlayer")
time.sleep(5)
###Below code is not working until Selenium gets updated (https://github.com/SeleniumHQ/selenium/issues/2285)
ActionChains(driver).move_to_element(flash).click().perform()
###Trying javascript clicking
#driver.execute_script("arguments[0].click();", flash)
time.sleep(8)
#This cleans the final long list of rtmpdump commands created after running get_vid
#Then, run this file in the Linux command prompt to get all the videos
def rtmp_file():
with open("C-SPAN PersonID and File Dates (entire month search with vid website and time).txt") as f:
f = f.read()
f = re.sub("\nDuplicate request, skipping\.\n\n","\t",f)
f = f.splitlines()
f = [line for line in f if line.split(" ")[0].istitle()]
for line in reversed(f):
name = line.split("\t")[0].replace(" ","")
date = line.split("\t")[3].split("-")[:-1]
date = "-".join(date)
starttime = line.split("\t")[7]
if len(line.split("\t"))>10:
rtmp = line.split("\t")[10].split(" -o ")[0] + " -o " +name + date + "_" + starttime + ".flv"
with open("CSpanVideos/RTMPdump Cspan Commands.sh", "a") as outfile:
outfile.write(rtmp + " -A " + line.split("\t")[7] + " -B " + line.split("\t")[8] + "\n")
else:
with open("CSpanVideos/RTMPdump Cspan Commands.sh", "a") as outfile:
outfile.write(rtmp[:-4] + line.split("\t")[7] + ".flv" + " -A " + line.split("\t")[7] + " -B " + line.split("\t")[8] + "\n")
#rtmp = line.split("\t")[10][:-4]+line.split("\t")[7]+".flv"
#print(line,"\n",rtmp, " -A ", line.split("\t")[7], " -B ", line.split("\t")[8])
# if len(line.split("\t"))>10:
# print(line.split("\t"))
# print(line.split("\t"))
# print(line," -A ",line.split("\t")[7]," -B ",line.split("\t")[8])
# print(line.split("\t")[10]," -A ",line.split("\t")[7]," -B ",line.split("\t")[8])
###LASTLY, once you have the shell file created from rtmp_file, run it from Linux command line.
###First, you need to run rtmpsrv and such...
if __name__ == "__main__":
#match_name_CongressRecord()
#add_dates()
#cspan_selenium_getsite()
get_vid() | for el in reg2:
print(el)
#print(el)
#li.append(el)
reg3 = re.search('hrg(\d*?)\/',el) | random_line_split |
DissertationScript.py | from subprocess import call
import os
import pandas as pd
from pandas import DataFrame
import string
import re
from urllib.request import urlopen
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common import exceptions
from selenium.webdriver.common.action_chains import ActionChains
import time
import nltk
from collections import defaultdict, Counter
from nltk.corpus import stopwords
from nltk import FreqDist, ConditionalFreqDist
import datetime
from xml.etree import ElementTree
from difflib import SequenceMatcher
import json
import difflib
#Get username and PW from text file
with open("../CSpanUserNamePW.txt","r") as f:
f = f.read()
username = f.split("\t")[0]
pw = f.split("\t")[1]
#This scrapes the Congressional Record transcripts from each committee
def gpo_scrape():
li=[]
for num in range(95,115):
for agen in ["HOUSE","JOINT","SENATE"]:
url = "http://www.gpo.gov/fdsys/browse/collection.action?collectionCode=CHRG&browsePath="+str(num)+"%2F"+agen+"&isCollapsed=false&leafLevelBrowse=false&ycord=0"
url = urlopen(url).read().decode()
reg = re.compile(str(num)+'/'+agen+'/'+'(.*?)3')
reg = re.findall(reg,url)
for a in reg:
temp = re.sub(' ','+',a)
path = str(num)+"%2F"+str(agen)+"%2F"+str(temp)
url2 = "http://www.gpo.gov/fdsys/browse/collection.action?collectionCode=CHRG&browsePath="+str(path)+"&isCollapsed=false&leafLevelBrowse=false&isDocumentResults=true&ycord=0"
#print(url2)
url2 = urlopen(url2).read().decode()
reg2 = re.compile('a href="(.*?\.htm)')
reg2 = re.findall(reg2,url2)
for el in reg2:
print(el)
#print(el)
#li.append(el)
reg3 = re.search('hrg(\d*?)\/',el)
try:
site = urlopen(el).read()
text = open("CongressBLAHfolder/"+agen+str(num)+a+str(reg3.groups()[0])+".txt","wb")
text.write(site)
except Exception as e:
print("This didn't work: "+str(e))
#pdf.write(bytes(site))
#print(len(li))
#This cleans the scraped data from gpo_scrape
def re_clean_written():
for subdir, dirs, files in os.walk("Congressional Hearings TXT/"):
for file in files:
print("Working on file...." + str(file))
f = open(os.path.join(subdir, file), "rb").read().decode('utf-8', 'ignore').encode('cp850',
'ignore').decode(
'cp850')
reg2 = re.compile('[JFMASOND][A-Za-z]+? \d{1,2}. \d{4}')
try:
date = re.findall(reg2, f)[0].capitalize()
date = datetime.datetime.strptime(date, '%B %d, %Y')
date = str(date.year) + "." + str(date.month) + "." + str(date.day)
print(date)
except Exception as e:
print("NO DATE in this file: " + file)
#This is the big line that I need to check: gets only the relevant stuff from each transcript
reg = re.compile(
'\n{1,3} {4}([A-Z][a-z]+\.? [A-Z][a-z]+(?: [A-Z][a-z]+)?\.{1} [A-Z][^\b]*?)\n{1,3} {4,}\[(?:Where|Add)')
newtext = re.findall(reg, f)
# print(newtext)
#Takes out prepared written statements, so there's just actual speech remaining
try:
newtext = re.sub(
'(\[(?:The opening|The (?:information)? follow|The prepared|Text of)[^\b]+?)(?=\n{1,3} {4}[A-Z][a-z]+\.? [A-Z][a-z]+(?: [A-Z][a-z]+)?\.)',
'', newtext[0])
newtext = re.sub(
'((?:O[Pp][Ee][Nn][Ii][Nn][Gg] |P[Rr][Ee][Pp][Aa][Rr][Ee][Dd] )?S[Tt][Aa][Tt][Ee][Mm][Ee][Nn][Tt][Ss]?[^\b]*?\n{2})',
'', newtext)
with open('Congressional Hearings TXT (clean NEW)/' + file[:-4] + '_' + date + '.txt',
'a') as fi:
fi.write(",".join(reg3) + "\n" + newtext)
except Exception as e:
print("ERROR IN SCRAPE: " + file + "\t" + str(e))
#After running re_clean_written, this parses the data out and creates folders for each speaker
def parse_by_speakerdate():
a = defaultdict(list)
for subdir, dirs, files in os.walk("Congressional Hearings TXT (clean)/"):
for file in files:
print("Working on file...." + str(file))
f = open(os.path.join(subdir, file), "rb").read().decode('utf-8', 'ignore')
date = file.split("_")[1][:-4]
# Load original files, so I can get the names of everyone
f2 = open("Congressional Hearings TXT/" + file.split("_")[0] + ".txt", "rb").read().decode()
# Code to get name list from beginning of each file on GPO.gov (need to make sure this gets everyone)
try:
reg3 = re.compile('(?: {4,}|\r?\n?)([A-Z]+\.? [A-Z]+\.?(?: [A-Z]+)?),')
# reg3 = re.compile(' {4,}([A-Z]+ [A-Z]+?),')
reg3 = re.findall(reg3, f2)
print(reg3)
except Exception as e:
print("SOME ERROR HERE WITH COLLECTING NAMES..." + str(e))
try:
newtext = re.split(
'\r\n\r?\n? {4,}(?=[A-Z][a-z]+\.? [A-Z][a-z]+(?: [A-Z][a-z]+| \[presiding\])?\.)', f)
# print(newtext)
print(len(newtext))
for line in newtext:
b = defaultdict(list)
name = line.split(".")[0]
# This corrects for the split on the period in Mr. or Mrs. (makes sure it gets their full name)
if len(name) < 4:
name = ".".join(line.split(".")[:2])
name = stringdist_list(name, reg3)
text = line.split(".")[1]
if len(text) < 15:
text = line.split(".", 2)[2]
if not os.path.exists('Congressional Hearings - People (new)/' + name + '/'):
os.makedirs('Congressional Hearings - People (new)/' + name + '/')
# Only download files if they are longer than 100 words (prevents need to use clean_people_dir_words in SVM Practice file)
if len(text.split(" ")) > 100:
with open('Congressional Hearings - People (new)/' + name + '/' + name + '_' + date + '.txt','a') as fi:
fi.write(text + "\n")
# print(text)
# b[date].append(text)
# a[name].append(b)
except Exception as e:
print("ERROR IN SCRAPE: " + file + "\t" + str(e))
# df_dict.tabulate()
# df_dict.plot()
# pickle.dump(df_dict,open("FDA Open Meetings Sites/Word Tokenized Dictionary.p","wb"))
#From the gpo.gov Congressional Directory, scrapes the txt file for each member of House/Senate going back to 105th Congress (1997-1998). Saves to GPO Biographies folder.
def gpo_scrape_bio():
#Pre-downloaded file that just lists state in one column and number of House representatives in the 2nd column
NumReps = pd.read_csv("Representatives by State.csv")
#Convert the csv to a dictionary for easier calling of values
repsdict = NumReps.set_index("State")["NumRep"].to_dict()
for state in NumReps.State:
for agen in ["H", "S"]:
if agen=="H":
Num = repsdict[state]
else:
Num = 2
for years in ["2016-02-12","2014-02-18","2011-12-01","2009-12-01","2008-08-01","2007-08-09","2006-09-01","2005-07-11","2004-08-01","2004-01-01","2003-11-01","2003-07-11",
"2002-10-01","2001-12-07","2000-10-01","2000-02-01","1999-06-15","1997-06-04"]:
for num in range(1,Num+1):
url = "https://www.gpo.gov/fdsys/pkg/CDIR-"+years+"/html/CDIR-"+years+"-"+state+"-"+agen+"-"+str(num)+".htm"
print(url)
try:
url = urlopen(url).read().decode()
text = open("GPO Biographies/"+agen+" "+state+" "+str(num)+" "+years+".txt","w")
text.write(url)
except Exception as e:
print("Number of representatives changed in "+state+" in"+years)
#From the files in GPO Biographies folder, pulls out the name, affiliation, education, and year of the txt files using regex (1262 people?)
def gpo_regex_get_name():
a = defaultdict(list)
for subdir,dirs,files in os.walk("GPO Biographies/"):
for file in files:
f = open(os.path.join(subdir,file),"rb").read().decode('utf-8','ignore')
name = re.compile(' {4}([A-Z\.]+? [^0-9]*?), ([A-Za-z]{4,})(?:,| |-|;)').findall(f)
year = re.compile('<title>.*?for the (.*?),').findall(f)
#reg2 = re.compile('education: ([\s\S]*?); [a-z ]*?: | ')
f2 = re.sub("\r\n","",f)
edu = re.compile('((?:[A-Z]\.){2,3},[\s\S]*?);').findall(f2)
print(name, file, year, edu)
try:
if name[0][0] not in a:
a[name[0][0]].append(name[0][1]) #name[1] is political affiliation because the txt file was organized in such a way
except Exception as e:
print("Problem with name in file: "+file)
try:
if file[0]=="H":
a[name[0][0]].append(year[0] + " - House")
if file[0] == "S":
a[name[0][0]].append(year[0] + " - Senate")
except Exception as e:
print("Problem with year: "+file)
try:
a[name[0][0]].extend(edu)
except Exception as e:
print("Problem with education: " + file)
print(a)
#Suggestion to use json for defaultdict instead of csv (http://codereview.stackexchange.com/questions/30741/writing-defaultdict-to-csv-file)
json.dump(a,open('GPO Biographies - JSON','w'))
#pd.DataFrame.from_dict(a, orient='index').to_csv("GPO Biographies - Education1.csv")
#Takes a list of names, searches them on C-SPAN, and extracts the website associated with them - website includes PersonID
def | ():
###Create files with each Senator and their full website, including PersonID
#Load GPO Biographies - JSON and extract the key, which is names of all Congresspeople going back to 1997
names = json.load(open('GPO Biographies - JSON'))
names = list(names.keys())
#This gets rid of middle names and middle initials, so the search is better on C-SPAN
names = [name.split(" ")[0].title() + " " + name.split(" ")[-1].title() for name in names]
# Log in with Selenium
driver = webdriver.Firefox()
driver.get("http://www.c-span.org")
login = driver.find_element_by_class_name("my-cspan")
login.click()
time.sleep(1)
user = driver.find_elements_by_id("login")[1]
user.clear()
user.send_keys(username)
pw = driver.find_element_by_id("password")
pw.clear()
pw.send_keys(pw)
clicklogin = driver.find_element_by_id("submit-login")
clicklogin.click()
errorlog = []
for name in names:
try:
#Have to wait a bit of time because the website gets screwy and can't find the dropdown menu sometimes
time.sleep(10)
openfilter = driver.find_element_by_class_name('selected')
# openfilter = driver.find_element_by_xpath("//form[@class='search']/fieldset/div/span[@class='carat icon-chevron-down']")
openfilter.click()
peoplefilter = driver.find_element_by_xpath('//div[@style]/ul/li[4]')
time.sleep(0.5)
peoplefilter.click()
namesearch = driver.find_element_by_id('global-search')
namesearch.clear()
namesearch.send_keys(name)
clicker = driver.find_element_by_class_name('icon-search')
clicker.click()
time.sleep(1.5)
search = driver.find_elements_by_class_name('thumb')[0]
search.click()
source = driver.page_source
ID = re.compile('personid\[\]=(.*?)"').findall(source)
print(name,names.index(name),ID)
with open("C-SPAN PersonID1.txt","a") as f:
f.write(name+"\t"+ID[0]+"\n")
if len(ID) > 4:
errorlog.append(name)
except Exception as e:
print("COME BACKKKK AND GET THIS ONE MANUALLY!!!: ", name)
errorlog.append(name)
print(errorlog)
#This takes a tab delimited file with name and C-SPAN website, and just simplifies it so it's name and C-SPAN ID (using output from cspan_selenium_getsite
def cspan_selenium_getid():
###Create a file with just Senator's name and personID in separate column
with open("C-SPAN PersonID.txt") as f:
f = f.read().splitlines()
print(f)
for item in f:
ID = item.split("=")[-1]
name = item.split("\t")[0]
with open("C-SPAN PersonID (simplified).txt","a") as g:
g.write(name+"\t"+ID+"\n")
#Makes file names the correct case (upper, lower) when matching them in match_name_CongressRecord() function below. Also turns it from
#tab delimited text file into list
def file_to_list_upper(file):
with open(file,"r",encoding="ISO-8859-1") as f:
f = f.readlines()
return([x.split("\t")[0].title() for x in f])
#C-SPAN PersonID file has names and C-SPAN ID. This function takes the person's name and returns their C-SPAN ID
def dict_cspan_id(name):
with open("C-SPAN PersonID (simplified).txt","r",encoding="Latin1") as f:
df = pd.Series.from_csv(f,sep="\t",header=None).to_dict()
return(df[name])
#Match name from Congressional Record to names of spoken word text files, and then get C-SPAN ID
def match_name_CongressRecord():
allcongrnames = json.load(open('GPO Biographies - JSON'))
allcongrnameslist = list(allcongrnames.keys())
#print(file_to_list_upper("C-SPAN PersonID (simplified).txt"))
#print(len(namelist))
allcongrnameslist = [name.split(" ")[0]+" "+name.split(" ")[-1] for name in allcongrnameslist]
#print(namelist)
#The /media/jemme directory is from my orange external hard drive
for root,dirs,files in os.walk("/media/jemme/New Volume/Congressional Hearings - People (new)"):
for file in files:
name = file.split("_")[0] # Need to match name with ID
date = file.split("_")[1][:-4] # Need in this format: 2015-06-10
try:
date = datetime.datetime.strptime(date, "%Y.%m.%d").strftime("%Y-%m-%d")
#print(name, date)
except Exception as e:
print(file + " has a weird file name, I think..." + str(e))
namematch = difflib.get_close_matches(name,allcongrnameslist,cutoff=.8)
# The outer function finds the ID based on the name, using the C-SPAN PersonID file. stringdist_list_imperfect compares the name of the file in the folder
# to the list of names from the C-SPAN PersonID file and returns the name that is closest, which is fed into the outer function to find the ID
if difflib.get_close_matches(name.title(), file_to_list_upper("C-SPAN PersonID (simplified).txt"),cutoff=.8):
print(difflib.get_close_matches(name.title(), file_to_list_upper("C-SPAN PersonID (simplified).txt"),cutoff=.8))
ID = dict_cspan_id(difflib.get_close_matches(name.title(), file_to_list_upper("C-SPAN PersonID (simplified).txt"),cutoff=.8)[0])
#ID = dict_cspan_id(difflib.get_close_matches(name.title(), file_to_list_upper("C-SPAN PersonID (simplified test).txt")))
print(name,ID)
#Several functions now add important info to the C-SPAN PersonID file - the below adds the dates each person spoke based on the transcripts
def add_dates():
with open("C-SPAN PersonID.txt",encoding="Latin1") as f:
f = f.read().splitlines()
#f = [item.split("\t")[0] for item in f]
#print(os.listdir("Congressional Hearings - People (new)"))
for item in f:
print(item)
#This first has to capitalize just the first letter of the transcript names, since they are all caps beforehand
#and, thus, can't match
transcriptnames = [name.title() for name in os.listdir("/media/jemme/New Volume/Congressional Hearings - People (new)")]
transcriptnamesmatch = difflib.get_close_matches(item,transcriptnames)
if transcriptnamesmatch:
print(transcriptnamesmatch)
#Turn the matched name back into all caps after it matches, so that it can find the actual transcript file
try:
dates = os.listdir("/media/jemme/New Volume/Congressional Hearings - People (new)/"+transcriptnamesmatch[0].upper())
except Exception as e:
print(item+" doesn't WORKKKKK!")
for date in dates:
date = date.split("_")[1][:-4].replace(".","-")
with open("C-SPAN PersonID and File Dates.txt","a") as outfile:
outfile.write(item+"\t"+transcriptnamesmatch[0]+"\t"+date+"\n")
#This is just a helper function used in add_date_month below - it converts dates into the proper format
def set_date(date):
date = datetime.datetime.strptime(date,"%Y-%m-%d")
date = datetime.datetime.strftime(date, "%Y-%m-%d")
return(date)
#Because there weren't corresponding videos for the specific dates in the Congressional Record transcripts,
#this just makes it so you can search a person on C-SPAN for that whole month, not just a specific day.
def add_date_month():
with open("C-SPAN PersonID and File Dates.txt") as f:
f = f.read().splitlines()
for item in f:
name = item.split("\t")[0]
ID = item.split("\t")[1]
nameinfile = item.split("\t")[2]
try:
date = item.split("\t")[3]
datebeg = re.sub("-\d{1,2}$", "-01", date)
datebeg = set_date(datebeg)
if date.split("-")[1] in ["9", "4", "6", "11"]:
dateend = re.sub("-\d{1,2}$", "-30", date)
dateend = set_date(dateend)
elif date.split("-")[1] in ["1", "3", "5", "7", "8", "10", "12"]:
dateend = re.sub("-\d{1,2}$", "-31", date)
dateend = set_date(dateend)
elif date.split("-")[1] == "2":
dateend = re.sub("-\d{1,2}$", "-28", date)
dateend = set_date(dateend)
with open("C-SPAN PersonID and File Dates (entire month search)1.txt", "a") as outfile:
outfile.write(item + "\t" + datebeg + "\t" + dateend + "\n")
except Exception as e:
print("DATE NOT CORRECT: ",date)
#This goes into C-SPAN and adds in the times that each person spoke in each video, so I don't have to download the entire thing
def get_vid():
driver = webdriver.Firefox()
with open("C-SPAN PersonID and File Dates (entire month search).txt") as f:
f = f.read().splitlines()
for item in f:
ID = item.split("\t")[1]
name = item.split("\t")[2]
date = item.split("\t")[3]
datebeg = item.split("\t")[4]
dateend = item.split("\t")[5]
#Use this code below to get video corresponding to each PersonID and date
driver.get("http://www.c-span.org/search/?searchtype=Videos&personid[]=" + str(ID) + "&sdate=" + str(datebeg) + "&edate=" + str(dateend))
try:
video = driver.find_elements_by_class_name("thumb")[0]
video.click()
time.sleep(6)
html = driver.execute_script("return document.getElementsByTagName('html')[0].innerHTML")
reg = re.compile('<th>[\s\S]+?time-(.*?)"[\s\S]+?<strong>(.*?)<')
reg = re.findall(reg,html)
times = []
i = 0
while i < len(reg):
if reg[i][1].split(" ")[-1].title() in name:
times.append((reg[i][0],reg[i+1][0]))
i+=1
#reg = [tup for tup in reg if tup[1].split(" ")[-1].title() in name]
print()
print(driver.current_url,"\n",reg,"\n",times,"\n",name, "\n", ID, "\n", date)
with open("C-SPAN PersonID and File Dates (entire month search with vid website and time)1.txt","a") as outfile:
if times:
for tup in times:
outfile.write(item+"\t"+driver.current_url+"\t"+tup[0]+"\t"+tup[1]+"\t"+str(f.index(item))+"\n")
except Exception as e:
print(str(e))
# print("There is no video for "+str(name)+"on "+str(date)+".")
#This actually clicks on the video - make sure rtmpsrv is running BEFORE you even click the thumb link to
#the video, otherwise it won't work. So, essentially, go to c-span.com, then run ./rtmpsuckredirect.sh
#from the command line. Then start rtmpsrv. Rtmpsrv has to be running before you click the video thumbnail,
#not just before you click play.
flash = driver.find_element_by_id("flashPlayer")
time.sleep(5)
###Below code is not working until Selenium gets updated (https://github.com/SeleniumHQ/selenium/issues/2285)
ActionChains(driver).move_to_element(flash).click().perform()
###Trying javascript clicking
#driver.execute_script("arguments[0].click();", flash)
time.sleep(8)
#This cleans the final long list of rtmpdump commands created after running get_vid
#Then, run this file in the Linux command prompt to get all the videos
def rtmp_file():
with open("C-SPAN PersonID and File Dates (entire month search with vid website and time).txt") as f:
f = f.read()
f = re.sub("\nDuplicate request, skipping\.\n\n","\t",f)
f = f.splitlines()
f = [line for line in f if line.split(" ")[0].istitle()]
for line in reversed(f):
name = line.split("\t")[0].replace(" ","")
date = line.split("\t")[3].split("-")[:-1]
date = "-".join(date)
starttime = line.split("\t")[7]
if len(line.split("\t"))>10:
rtmp = line.split("\t")[10].split(" -o ")[0] + " -o " +name + date + "_" + starttime + ".flv"
with open("CSpanVideos/RTMPdump Cspan Commands.sh", "a") as outfile:
outfile.write(rtmp + " -A " + line.split("\t")[7] + " -B " + line.split("\t")[8] + "\n")
else:
with open("CSpanVideos/RTMPdump Cspan Commands.sh", "a") as outfile:
outfile.write(rtmp[:-4] + line.split("\t")[7] + ".flv" + " -A " + line.split("\t")[7] + " -B " + line.split("\t")[8] + "\n")
#rtmp = line.split("\t")[10][:-4]+line.split("\t")[7]+".flv"
#print(line,"\n",rtmp, " -A ", line.split("\t")[7], " -B ", line.split("\t")[8])
# if len(line.split("\t"))>10:
# print(line.split("\t"))
# print(line.split("\t"))
# print(line," -A ",line.split("\t")[7]," -B ",line.split("\t")[8])
# print(line.split("\t")[10]," -A ",line.split("\t")[7]," -B ",line.split("\t")[8])
###LASTLY, once you have the shell file created from rtmp_file, run it from Linux command line.
###First, you need to run rtmpsrv and such...
if __name__ == "__main__":
#match_name_CongressRecord()
#add_dates()
#cspan_selenium_getsite()
get_vid()
| cspan_selenium_getsite | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.